aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-platform-dell-laptop60
-rw-r--r--Documentation/devicetree/bindings/arm/arm-boards2
-rw-r--r--Documentation/devicetree/bindings/arm/fw-cfg.txt72
-rw-r--r--Documentation/devicetree/bindings/graph.txt2
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt4
-rw-r--r--MAINTAINERS15
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/kernel/pci.c8
-rw-r--r--arch/arm/boot/dts/dra7.dtsi6
-rw-r--r--arch/arm/boot/dts/imx25.dtsi8
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dts8
-rw-r--r--arch/arm/boot/dts/tegra20-seaboard.dts2
-rw-r--r--arch/arm/kernel/entry-header.S13
-rw-r--r--arch/arm/kernel/perf_event.c10
-rw-r--r--arch/arm/kernel/setup.c7
-rw-r--r--arch/arm/mach-mvebu/coherency.c7
-rw-r--r--arch/arm/mach-omap2/common.h1
-rw-r--r--arch/arm/mach-omap2/omap4-common.c32
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c10
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c5
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_54xx_data.c1
-rw-r--r--arch/arm/mach-omap2/prcm-common.h1
-rw-r--r--arch/arm/mach-omap2/prm44xx.c5
-rw-r--r--arch/arm/mach-omap2/prm_common.c14
-rw-r--r--arch/arm/mach-omap2/twl-common.c7
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7778.c9
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7779.c9
-rw-r--r--arch/arm64/Makefile1
-rw-r--r--arch/arm64/boot/dts/Makefile2
-rw-r--r--arch/arm64/boot/dts/arm/juno.dts2
-rw-r--r--arch/arm64/mm/dump.c1
-rw-r--r--arch/avr32/kernel/module.c13
-rw-r--r--arch/cris/arch-v32/drivers/sync_serial.c2
-rw-r--r--arch/cris/kernel/module.c2
-rw-r--r--arch/frv/mb93090-mb00/pci-frv.c2
-rw-r--r--arch/ia64/kernel/module.c6
-rw-r--r--arch/ia64/pci/pci.c48
-rw-r--r--arch/microblaze/pci/pci-common.c13
-rw-r--r--arch/mips/net/bpf_jit.c2
-rw-r--r--arch/mn10300/unit-asb2305/pci-asb2305.c2
-rw-r--r--arch/mn10300/unit-asb2305/pci.c47
-rw-r--r--arch/nios2/kernel/module.c2
-rw-r--r--arch/nios2/kernel/signal.c2
-rw-r--r--arch/parisc/kernel/module.c6
-rw-r--r--arch/powerpc/kernel/pci-common.c12
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c2
-rw-r--r--arch/powerpc/xmon/xmon.c1
-rw-r--r--arch/s390/kernel/module.c10
-rw-r--r--arch/s390/net/bpf_jit.S28
-rw-r--r--arch/s390/net/bpf_jit_comp.c9
-rw-r--r--arch/sparc/kernel/pci.c5
-rw-r--r--arch/sparc/net/bpf_jit_comp.c4
-rw-r--r--arch/tile/kernel/module.c4
-rw-r--r--arch/x86/Kconfig6
-rw-r--r--arch/x86/boot/compressed/Makefile2
-rw-r--r--arch/x86/boot/compressed/misc.c9
-rw-r--r--arch/x86/include/asm/acpi.h1
-rw-r--r--arch/x86/include/asm/desc.h20
-rw-r--r--arch/x86/include/asm/mmu_context.h20
-rw-r--r--arch/x86/kernel/acpi/boot.c26
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c1
-rw-r--r--arch/x86/kernel/ftrace.c2
-rw-r--r--arch/x86/kernel/irq.c2
-rw-r--r--arch/x86/kernel/tls.c25
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kvm/emulate.c31
-rw-r--r--arch/x86/mm/init.c4
-rw-r--r--arch/x86/mm/mpx.c6
-rw-r--r--arch/x86/mm/pat.c7
-rw-r--r--arch/x86/pci/i386.c2
-rw-r--r--arch/x86/pci/xen.c49
-rw-r--r--arch/x86/tools/calc_run_size.pl39
-rw-r--r--arch/x86/tools/calc_run_size.sh42
-rw-r--r--block/blk-mq-sysfs.c25
-rw-r--r--block/blk-mq.c6
-rw-r--r--drivers/acpi/pci_irq.c1
-rw-r--r--drivers/block/nvme-core.c2
-rw-r--r--drivers/bus/mvebu-mbus.c13
-rw-r--r--drivers/clocksource/bcm_kona_timer.c9
-rw-r--r--drivers/clocksource/exynos_mct.c4
-rw-r--r--drivers/clocksource/sh_tmu.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c80
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c12
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c52
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c1
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c1
-rw-r--r--drivers/gpu/drm/radeon/r100.c10
-rw-r--r--drivers/gpu/drm/radeon/r300.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c54
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c6
-rw-r--r--drivers/gpu/drm/radeon/rs400.c14
-rw-r--r--drivers/gpu/drm/radeon/rs600.c14
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/hwmon/Kconfig10
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/i5500_temp.c149
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-hip04.c2
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c4
-rw-r--r--drivers/irqchip/irq-omap-intc.c26
-rw-r--r--drivers/md/dm-cache-metadata.c101
-rw-r--r--drivers/md/dm-cache-target.c89
-rw-r--r--drivers/md/dm.c9
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c23
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c11
-rw-r--r--drivers/media/pci/cx23885/cx23885.h1
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c7
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c5
-rw-r--r--drivers/media/platform/soc_camera/mx2_camera.c3
-rw-r--r--drivers/media/platform/soc_camera/mx3_camera.c3
-rw-r--r--drivers/media/platform/soc_camera/omap1_camera.c3
-rw-r--r--drivers/media/platform/soc_camera/pxa_camera.c3
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c4
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c4
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c24
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c19
-rw-r--r--drivers/net/can/c_can/c_can.c3
-rw-r--r--drivers/net/can/usb/kvaser_usb.c28
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h9
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c59
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c5
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c164
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw.c22
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h7
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c53
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c11
-rw-r--r--drivers/of/overlay.c11
-rw-r--r--drivers/of/platform.c11
-rw-r--r--drivers/of/unittest-data/tests-overlay.dtsi55
-rw-r--r--drivers/of/unittest.c39
-rw-r--r--drivers/parisc/lba_pci.c5
-rw-r--r--drivers/pci/bus.c43
-rw-r--r--drivers/pci/pci.c40
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pci/quirks.c14
-rw-r--r--drivers/pci/setup-bus.c56
-rw-r--r--drivers/platform/x86/dell-laptop.c1055
-rw-r--r--drivers/regulator/core.c4
-rw-r--r--drivers/regulator/s2mps11.c42
-rw-r--r--drivers/rtc/rtc-s5m.c1
-rw-r--r--drivers/s390/net/qeth_core_main.c117
-rw-r--r--drivers/s390/net/qeth_l2_main.c220
-rw-r--r--drivers/s390/net/qeth_l3_main.c50
-rw-r--r--drivers/scsi/ipr.c92
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/scsi.c13
-rw-r--r--drivers/scsi/scsi_debug.c4
-rw-r--r--drivers/scsi/scsi_lib.c12
-rw-r--r--drivers/spi/spi-dw-mid.c1
-rw-r--r--drivers/spi/spi-dw.c6
-rw-r--r--drivers/spi/spi-pxa2xx.c2
-rw-r--r--drivers/spi/spi-sh-msiof.c2
-rw-r--r--drivers/staging/media/tlg2300/Kconfig1
-rw-r--r--drivers/watchdog/cadence_wdt.c1
-rw-r--r--drivers/watchdog/imx2_wdt.c40
-rw-r--r--drivers/watchdog/meson_wdt.c1
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/extent-tree.c2
-rw-r--r--fs/btrfs/extent_io.c2
-rw-r--r--fs/btrfs/scrub.c2
-rw-r--r--fs/btrfs/super.c14
-rw-r--r--fs/btrfs/transaction.c2
-rw-r--r--fs/cifs/ioctl.c21
-rw-r--r--include/dt-bindings/interrupt-controller/arm-gic.h4
-rw-r--r--include/linux/mfd/samsung/s2mps13.h2
-rw-r--r--include/linux/module.h2
-rw-r--r--include/linux/moduleloader.h4
-rw-r--r--include/linux/oom.h5
-rw-r--r--include/linux/pci.h3
-rw-r--r--include/linux/printk.h15
-rw-r--r--include/linux/time.h13
-rw-r--r--include/net/ip.h11
-rw-r--r--include/trace/events/kvm.h16
-rw-r--r--kernel/bpf/core.c2
-rw-r--r--kernel/bpf/syscall.c25
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/debug/kdb/kdb_main.c2
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/module.c91
-rw-r--r--kernel/params.c3
-rw-r--r--kernel/sys.c4
-rw-r--r--kernel/time/ntp.c7
-rw-r--r--kernel/time/time.c4
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/page_alloc.c82
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/dsa/slave.c1
-rw-r--r--net/ipv4/ip_forward.c3
-rw-r--r--net/ipv4/ping.c5
-rw-r--r--net/ipv4/route.c9
-rw-r--r--net/ipv4/udp_diag.c4
-rw-r--r--net/ipv6/ip6_fib.c45
-rw-r--r--net/ipv6/route.c6
-rw-r--r--net/ipv6/xfrm6_policy.c10
-rw-r--r--net/llc/sysctl_net_llc.c8
-rw-r--r--net/mac80211/pm.c29
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/sched/cls_bpf.c15
-rw-r--r--net/sctp/associola.c1
-rw-r--r--net/socket.c3
-rw-r--r--net/wireless/nl80211.c9
-rw-r--r--net/wireless/util.c6
-rw-r--r--samples/bpf/test_maps.c4
234 files changed, 2560 insertions, 2229 deletions
diff --git a/Documentation/ABI/testing/sysfs-platform-dell-laptop b/Documentation/ABI/testing/sysfs-platform-dell-laptop
deleted file mode 100644
index 7969443ef0ef..000000000000
--- a/Documentation/ABI/testing/sysfs-platform-dell-laptop
+++ /dev/null
@@ -1,60 +0,0 @@
1What: /sys/class/leds/dell::kbd_backlight/als_setting
2Date: December 2014
3KernelVersion: 3.19
4Contact: Gabriele Mazzotta <gabriele.mzt@gmail.com>,
5 Pali Rohár <pali.rohar@gmail.com>
6Description:
7 This file allows to control the automatic keyboard
8 illumination mode on some systems that have an ambient
9 light sensor. Write 1 to this file to enable the auto
10 mode, 0 to disable it.
11
12What: /sys/class/leds/dell::kbd_backlight/start_triggers
13Date: December 2014
14KernelVersion: 3.19
15Contact: Gabriele Mazzotta <gabriele.mzt@gmail.com>,
16 Pali Rohár <pali.rohar@gmail.com>
17Description:
18 This file allows to control the input triggers that
19 turn on the keyboard backlight illumination that is
20 disabled because of inactivity.
21 Read the file to see the triggers available. The ones
22 enabled are preceded by '+', those disabled by '-'.
23
24 To enable a trigger, write its name preceded by '+' to
25 this file. To disable a trigger, write its name preceded
26 by '-' instead.
27
28 For example, to enable the keyboard as trigger run:
29 echo +keyboard > /sys/class/leds/dell::kbd_backlight/start_triggers
30 To disable it:
31 echo -keyboard > /sys/class/leds/dell::kbd_backlight/start_triggers
32
33 Note that not all the available triggers can be configured.
34
35What: /sys/class/leds/dell::kbd_backlight/stop_timeout
36Date: December 2014
37KernelVersion: 3.19
38Contact: Gabriele Mazzotta <gabriele.mzt@gmail.com>,
39 Pali Rohár <pali.rohar@gmail.com>
40Description:
41 This file allows to specify the interval after which the
42 keyboard illumination is disabled because of inactivity.
43 The timeouts are expressed in seconds, minutes, hours and
44 days, for which the symbols are 's', 'm', 'h' and 'd'
45 respectively.
46
47 To configure the timeout, write to this file a value along
48 with any the above units. If no unit is specified, the value
49 is assumed to be expressed in seconds.
50
51 For example, to set the timeout to 10 minutes run:
52 echo 10m > /sys/class/leds/dell::kbd_backlight/stop_timeout
53
54 Note that when this file is read, the returned value might be
55 expressed in a different unit than the one used when the timeout
56 was set.
57
58 Also note that only some timeouts are supported and that
59 some systems might fall back to a specific timeout in case
60 an invalid timeout is written to this file.
diff --git a/Documentation/devicetree/bindings/arm/arm-boards b/Documentation/devicetree/bindings/arm/arm-boards
index 556c8665fdbf..b78564b2b201 100644
--- a/Documentation/devicetree/bindings/arm/arm-boards
+++ b/Documentation/devicetree/bindings/arm/arm-boards
@@ -23,7 +23,7 @@ Required nodes:
23 range of 0x200 bytes. 23 range of 0x200 bytes.
24 24
25- syscon: the root node of the Integrator platforms must have a 25- syscon: the root node of the Integrator platforms must have a
26 system controller node pointong to the control registers, 26 system controller node pointing to the control registers,
27 with the compatible string 27 with the compatible string
28 "arm,integrator-ap-syscon" 28 "arm,integrator-ap-syscon"
29 "arm,integrator-cp-syscon" 29 "arm,integrator-cp-syscon"
diff --git a/Documentation/devicetree/bindings/arm/fw-cfg.txt b/Documentation/devicetree/bindings/arm/fw-cfg.txt
new file mode 100644
index 000000000000..953fb640d9c4
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/fw-cfg.txt
@@ -0,0 +1,72 @@
1* QEMU Firmware Configuration bindings for ARM
2
3QEMU's arm-softmmu and aarch64-softmmu emulation / virtualization targets
4provide the following Firmware Configuration interface on the "virt" machine
5type:
6
7- A write-only, 16-bit wide selector (or control) register,
8- a read-write, 64-bit wide data register.
9
10QEMU exposes the control and data register to ARM guests as memory mapped
11registers; their location is communicated to the guest's UEFI firmware in the
12DTB that QEMU places at the bottom of the guest's DRAM.
13
14The guest writes a selector value (a key) to the selector register, and then
15can read the corresponding data (produced by QEMU) via the data register. If
16the selected entry is writable, the guest can rewrite it through the data
17register.
18
19The selector register takes keys in big endian byte order.
20
21The data register allows accesses with 8, 16, 32 and 64-bit width (only at
22offset 0 of the register). Accesses larger than a byte are interpreted as
23arrays, bundled together only for better performance. The bytes constituting
24such a word, in increasing address order, correspond to the bytes that would
25have been transferred by byte-wide accesses in chronological order.
26
27The interface allows guest firmware to download various parameters and blobs
28that affect how the firmware works and what tables it installs for the guest
29OS. For example, boot order of devices, ACPI tables, SMBIOS tables, kernel and
30initrd images for direct kernel booting, virtual machine UUID, SMP information,
31virtual NUMA topology, and so on.
32
33The authoritative registry of the valid selector values and their meanings is
34the QEMU source code; the structure of the data blobs corresponding to the
35individual key values is also defined in the QEMU source code.
36
37The presence of the registers can be verified by selecting the "signature" blob
38with key 0x0000, and reading four bytes from the data register. The returned
39signature is "QEMU".
40
41The outermost protocol (involving the write / read sequences of the control and
42data registers) is expected to be versioned, and/or described by feature bits.
43The interface revision / feature bitmap can be retrieved with key 0x0001. The
44blob to be read from the data register has size 4, and it is to be interpreted
45as a uint32_t value in little endian byte order. The current value
46(corresponding to the above outer protocol) is zero.
47
48The guest kernel is not expected to use these registers (although it is
49certainly allowed to); the device tree bindings are documented here because
50this is where device tree bindings reside in general.
51
52Required properties:
53
54- compatible: "qemu,fw-cfg-mmio".
55
56- reg: the MMIO region used by the device.
57 * Bytes 0x0 to 0x7 cover the data register.
58 * Bytes 0x8 to 0x9 cover the selector register.
59 * Further registers may be appended to the region in case of future interface
60 revisions / feature bits.
61
62Example:
63
64/ {
65 #size-cells = <0x2>;
66 #address-cells = <0x2>;
67
68 fw-cfg@9020000 {
69 compatible = "qemu,fw-cfg-mmio";
70 reg = <0x0 0x9020000 0x0 0xa>;
71 };
72};
diff --git a/Documentation/devicetree/bindings/graph.txt b/Documentation/devicetree/bindings/graph.txt
index 1a69c078adf2..fcb1c6a4787b 100644
--- a/Documentation/devicetree/bindings/graph.txt
+++ b/Documentation/devicetree/bindings/graph.txt
@@ -19,7 +19,7 @@ type of the connections, they just map their existence. Specific properties
19may be described by specialized bindings depending on the type of connection. 19may be described by specialized bindings depending on the type of connection.
20 20
21To see how this binding applies to video pipelines, for example, see 21To see how this binding applies to video pipelines, for example, see
22Documentation/device-tree/bindings/media/video-interfaces.txt. 22Documentation/devicetree/bindings/media/video-interfaces.txt.
23Here the ports describe data interfaces, and the links between them are 23Here the ports describe data interfaces, and the links between them are
24the connecting data buses. A single port with multiple connections can 24the connecting data buses. A single port with multiple connections can
25correspond to multiple devices being connected to the same physical bus. 25correspond to multiple devices being connected to the same physical bus.
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index c0333a97c47a..a78a2a619ed0 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -9,7 +9,6 @@ ad Avionic Design GmbH
9adapteva Adapteva, Inc. 9adapteva Adapteva, Inc.
10adi Analog Devices, Inc. 10adi Analog Devices, Inc.
11aeroflexgaisler Aeroflex Gaisler AB 11aeroflexgaisler Aeroflex Gaisler AB
12ak Asahi Kasei Corp.
13allwinner Allwinner Technology Co., Ltd. 12allwinner Allwinner Technology Co., Ltd.
14altr Altera Corp. 13altr Altera Corp.
15amcc Applied Micro Circuits Corporation (APM, formally AMCC) 14amcc Applied Micro Circuits Corporation (APM, formally AMCC)
@@ -20,6 +19,7 @@ amstaos AMS-Taos Inc.
20apm Applied Micro Circuits Corporation (APM) 19apm Applied Micro Circuits Corporation (APM)
21arm ARM Ltd. 20arm ARM Ltd.
22armadeus ARMadeus Systems SARL 21armadeus ARMadeus Systems SARL
22asahi-kasei Asahi Kasei Corp.
23atmel Atmel Corporation 23atmel Atmel Corporation
24auo AU Optronics Corporation 24auo AU Optronics Corporation
25avago Avago Technologies 25avago Avago Technologies
@@ -130,6 +130,7 @@ pixcir PIXCIR MICROELECTRONICS Co., Ltd
130powervr PowerVR (deprecated, use img) 130powervr PowerVR (deprecated, use img)
131qca Qualcomm Atheros, Inc. 131qca Qualcomm Atheros, Inc.
132qcom Qualcomm Technologies, Inc 132qcom Qualcomm Technologies, Inc
133qemu QEMU, a generic and open source machine emulator and virtualizer
133qnap QNAP Systems, Inc. 134qnap QNAP Systems, Inc.
134radxa Radxa 135radxa Radxa
135raidsonic RaidSonic Technology GmbH 136raidsonic RaidSonic Technology GmbH
@@ -171,6 +172,7 @@ usi Universal Scientific Industrial Co., Ltd.
171v3 V3 Semiconductor 172v3 V3 Semiconductor
172variscite Variscite Ltd. 173variscite Variscite Ltd.
173via VIA Technologies, Inc. 174via VIA Technologies, Inc.
175virtio Virtual I/O Device Specification, developed by the OASIS consortium
174voipac Voipac Technologies s.r.o. 176voipac Voipac Technologies s.r.o.
175winbond Winbond Electronics corp. 177winbond Winbond Electronics corp.
176wlf Wolfson Microelectronics 178wlf Wolfson Microelectronics
diff --git a/MAINTAINERS b/MAINTAINERS
index aa97dffe59e1..f5c4567b46ba 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -698,7 +698,7 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers)
698W: http://blackfin.uclinux.org/ 698W: http://blackfin.uclinux.org/
699S: Supported 699S: Supported
700F: sound/soc/blackfin/* 700F: sound/soc/blackfin/*
701 701
702ANALOG DEVICES INC IIO DRIVERS 702ANALOG DEVICES INC IIO DRIVERS
703M: Lars-Peter Clausen <lars@metafoo.de> 703M: Lars-Peter Clausen <lars@metafoo.de>
704M: Michael Hennerich <Michael.Hennerich@analog.com> 704M: Michael Hennerich <Michael.Hennerich@analog.com>
@@ -4752,14 +4752,14 @@ S: Supported
4752F: drivers/net/ethernet/ibm/ibmveth.* 4752F: drivers/net/ethernet/ibm/ibmveth.*
4753 4753
4754IBM Power Virtual SCSI Device Drivers 4754IBM Power Virtual SCSI Device Drivers
4755M: Nathan Fontenot <nfont@linux.vnet.ibm.com> 4755M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
4756L: linux-scsi@vger.kernel.org 4756L: linux-scsi@vger.kernel.org
4757S: Supported 4757S: Supported
4758F: drivers/scsi/ibmvscsi/ibmvscsi* 4758F: drivers/scsi/ibmvscsi/ibmvscsi*
4759F: drivers/scsi/ibmvscsi/viosrp.h 4759F: drivers/scsi/ibmvscsi/viosrp.h
4760 4760
4761IBM Power Virtual FC Device Drivers 4761IBM Power Virtual FC Device Drivers
4762M: Brian King <brking@linux.vnet.ibm.com> 4762M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
4763L: linux-scsi@vger.kernel.org 4763L: linux-scsi@vger.kernel.org
4764S: Supported 4764S: Supported
4765F: drivers/scsi/ibmvscsi/ibmvfc* 4765F: drivers/scsi/ibmvscsi/ibmvfc*
@@ -4948,7 +4948,6 @@ K: \b(ABS|SYN)_MT_
4948INTEL C600 SERIES SAS CONTROLLER DRIVER 4948INTEL C600 SERIES SAS CONTROLLER DRIVER
4949M: Intel SCU Linux support <intel-linux-scu@intel.com> 4949M: Intel SCU Linux support <intel-linux-scu@intel.com>
4950M: Artur Paszkiewicz <artur.paszkiewicz@intel.com> 4950M: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
4951M: Dave Jiang <dave.jiang@intel.com>
4952L: linux-scsi@vger.kernel.org 4951L: linux-scsi@vger.kernel.org
4953T: git git://git.code.sf.net/p/intel-sas/isci 4952T: git git://git.code.sf.net/p/intel-sas/isci
4954S: Supported 4953S: Supported
@@ -7026,14 +7025,12 @@ OPEN FIRMWARE AND FLATTENED DEVICE TREE
7026M: Grant Likely <grant.likely@linaro.org> 7025M: Grant Likely <grant.likely@linaro.org>
7027M: Rob Herring <robh+dt@kernel.org> 7026M: Rob Herring <robh+dt@kernel.org>
7028L: devicetree@vger.kernel.org 7027L: devicetree@vger.kernel.org
7029W: http://fdt.secretlab.ca 7028W: http://www.devicetree.org/
7030T: git git://git.secretlab.ca/git/linux-2.6.git 7029T: git git://git.kernel.org/pub/scm/linux/kernel/git/glikely/linux.git
7031S: Maintained 7030S: Maintained
7032F: drivers/of/ 7031F: drivers/of/
7033F: include/linux/of*.h 7032F: include/linux/of*.h
7034F: scripts/dtc/ 7033F: scripts/dtc/
7035K: of_get_property
7036K: of_match_table
7037 7034
7038OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS 7035OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
7039M: Rob Herring <robh+dt@kernel.org> 7036M: Rob Herring <robh+dt@kernel.org>
@@ -7278,7 +7275,7 @@ S: Maintained
7278F: drivers/pci/host/*layerscape* 7275F: drivers/pci/host/*layerscape*
7279 7276
7280PCI DRIVER FOR IMX6 7277PCI DRIVER FOR IMX6
7281M: Richard Zhu <r65037@freescale.com> 7278M: Richard Zhu <Richard.Zhu@freescale.com>
7282M: Lucas Stach <l.stach@pengutronix.de> 7279M: Lucas Stach <l.stach@pengutronix.de>
7283L: linux-pci@vger.kernel.org 7280L: linux-pci@vger.kernel.org
7284L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 7281L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
diff --git a/Makefile b/Makefile
index fb93350cf645..95a0e827ecd3 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 19 2PATCHLEVEL = 19
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc5 4EXTRAVERSION = -rc6
5NAME = Diseased Newt 5NAME = Diseased Newt
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index 076c35cd6cde..98a1525fa164 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -285,8 +285,12 @@ pcibios_claim_one_bus(struct pci_bus *b)
285 if (r->parent || !r->start || !r->flags) 285 if (r->parent || !r->start || !r->flags)
286 continue; 286 continue;
287 if (pci_has_flag(PCI_PROBE_ONLY) || 287 if (pci_has_flag(PCI_PROBE_ONLY) ||
288 (r->flags & IORESOURCE_PCI_FIXED)) 288 (r->flags & IORESOURCE_PCI_FIXED)) {
289 pci_claim_resource(dev, i); 289 if (pci_claim_resource(dev, i) == 0)
290 continue;
291
292 pci_claim_bridge_resource(dev, i);
293 }
290 } 294 }
291 } 295 }
292 296
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 22771bc1643a..63f8b007bdc5 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1257,6 +1257,8 @@
1257 tx-fifo-resize; 1257 tx-fifo-resize;
1258 maximum-speed = "super-speed"; 1258 maximum-speed = "super-speed";
1259 dr_mode = "otg"; 1259 dr_mode = "otg";
1260 snps,dis_u3_susphy_quirk;
1261 snps,dis_u2_susphy_quirk;
1260 }; 1262 };
1261 }; 1263 };
1262 1264
@@ -1278,6 +1280,8 @@
1278 tx-fifo-resize; 1280 tx-fifo-resize;
1279 maximum-speed = "high-speed"; 1281 maximum-speed = "high-speed";
1280 dr_mode = "otg"; 1282 dr_mode = "otg";
1283 snps,dis_u3_susphy_quirk;
1284 snps,dis_u2_susphy_quirk;
1281 }; 1285 };
1282 }; 1286 };
1283 1287
@@ -1299,6 +1303,8 @@
1299 tx-fifo-resize; 1303 tx-fifo-resize;
1300 maximum-speed = "high-speed"; 1304 maximum-speed = "high-speed";
1301 dr_mode = "otg"; 1305 dr_mode = "otg";
1306 snps,dis_u3_susphy_quirk;
1307 snps,dis_u2_susphy_quirk;
1302 }; 1308 };
1303 }; 1309 };
1304 1310
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
index d238676a9107..e4d3aecc4ed2 100644
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -369,7 +369,7 @@
369 compatible = "fsl,imx25-pwm", "fsl,imx27-pwm"; 369 compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
370 #pwm-cells = <2>; 370 #pwm-cells = <2>;
371 reg = <0x53fa0000 0x4000>; 371 reg = <0x53fa0000 0x4000>;
372 clocks = <&clks 106>, <&clks 36>; 372 clocks = <&clks 106>, <&clks 52>;
373 clock-names = "ipg", "per"; 373 clock-names = "ipg", "per";
374 interrupts = <36>; 374 interrupts = <36>;
375 }; 375 };
@@ -388,7 +388,7 @@
388 compatible = "fsl,imx25-pwm", "fsl,imx27-pwm"; 388 compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
389 #pwm-cells = <2>; 389 #pwm-cells = <2>;
390 reg = <0x53fa8000 0x4000>; 390 reg = <0x53fa8000 0x4000>;
391 clocks = <&clks 107>, <&clks 36>; 391 clocks = <&clks 107>, <&clks 52>;
392 clock-names = "ipg", "per"; 392 clock-names = "ipg", "per";
393 interrupts = <41>; 393 interrupts = <41>;
394 }; 394 };
@@ -429,7 +429,7 @@
429 pwm4: pwm@53fc8000 { 429 pwm4: pwm@53fc8000 {
430 compatible = "fsl,imx25-pwm", "fsl,imx27-pwm"; 430 compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
431 reg = <0x53fc8000 0x4000>; 431 reg = <0x53fc8000 0x4000>;
432 clocks = <&clks 108>, <&clks 36>; 432 clocks = <&clks 108>, <&clks 52>;
433 clock-names = "ipg", "per"; 433 clock-names = "ipg", "per";
434 interrupts = <42>; 434 interrupts = <42>;
435 }; 435 };
@@ -476,7 +476,7 @@
476 compatible = "fsl,imx25-pwm", "fsl,imx27-pwm"; 476 compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
477 #pwm-cells = <2>; 477 #pwm-cells = <2>;
478 reg = <0x53fe0000 0x4000>; 478 reg = <0x53fe0000 0x4000>;
479 clocks = <&clks 105>, <&clks 36>; 479 clocks = <&clks 105>, <&clks 52>;
480 clock-names = "ipg", "per"; 480 clock-names = "ipg", "per";
481 interrupts = <26>; 481 interrupts = <26>;
482 }; 482 };
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts
index 8c1febd7e3f2..c108bb451337 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dts
+++ b/arch/arm/boot/dts/imx6sx-sdb.dts
@@ -166,12 +166,12 @@
166 #address-cells = <1>; 166 #address-cells = <1>;
167 #size-cells = <0>; 167 #size-cells = <0>;
168 168
169 ethphy1: ethernet-phy@0 { 169 ethphy1: ethernet-phy@1 {
170 reg = <0>; 170 reg = <1>;
171 }; 171 };
172 172
173 ethphy2: ethernet-phy@1 { 173 ethphy2: ethernet-phy@2 {
174 reg = <1>; 174 reg = <2>;
175 }; 175 };
176 }; 176 };
177}; 177};
diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts
index ea282c7c0ca5..e2fed2712249 100644
--- a/arch/arm/boot/dts/tegra20-seaboard.dts
+++ b/arch/arm/boot/dts/tegra20-seaboard.dts
@@ -406,7 +406,7 @@
406 clock-frequency = <400000>; 406 clock-frequency = <400000>;
407 407
408 magnetometer@c { 408 magnetometer@c {
409 compatible = "ak,ak8975"; 409 compatible = "asahi-kasei,ak8975";
410 reg = <0xc>; 410 reg = <0xc>;
411 interrupt-parent = <&gpio>; 411 interrupt-parent = <&gpio>;
412 interrupts = <TEGRA_GPIO(N, 5) IRQ_TYPE_LEVEL_HIGH>; 412 interrupts = <TEGRA_GPIO(N, 5) IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 4176df721bf0..1a0045abead7 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -253,21 +253,22 @@
253 .endm 253 .endm
254 254
255 .macro restore_user_regs, fast = 0, offset = 0 255 .macro restore_user_regs, fast = 0, offset = 0
256 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr 256 mov r2, sp
257 ldr lr, [sp, #\offset + S_PC]! @ get pc 257 ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
258 ldr lr, [r2, #\offset + S_PC]! @ get pc
258 msr spsr_cxsf, r1 @ save in spsr_svc 259 msr spsr_cxsf, r1 @ save in spsr_svc
259#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) 260#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
260 @ We must avoid clrex due to Cortex-A15 erratum #830321 261 @ We must avoid clrex due to Cortex-A15 erratum #830321
261 strex r1, r2, [sp] @ clear the exclusive monitor 262 strex r1, r2, [r2] @ clear the exclusive monitor
262#endif 263#endif
263 .if \fast 264 .if \fast
264 ldmdb sp, {r1 - lr}^ @ get calling r1 - lr 265 ldmdb r2, {r1 - lr}^ @ get calling r1 - lr
265 .else 266 .else
266 ldmdb sp, {r0 - lr}^ @ get calling r0 - lr 267 ldmdb r2, {r0 - lr}^ @ get calling r0 - lr
267 .endif 268 .endif
268 mov r0, r0 @ ARMv5T and earlier require a nop 269 mov r0, r0 @ ARMv5T and earlier require a nop
269 @ after ldm {}^ 270 @ after ldm {}^
270 add sp, sp, #S_FRAME_SIZE - S_PC 271 add sp, sp, #\offset + S_FRAME_SIZE
271 movs pc, lr @ return & move spsr_svc into cpsr 272 movs pc, lr @ return & move spsr_svc into cpsr
272 .endm 273 .endm
273 274
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index f7c65adaa428..557e128e4df0 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -116,8 +116,14 @@ int armpmu_event_set_period(struct perf_event *event)
116 ret = 1; 116 ret = 1;
117 } 117 }
118 118
119 if (left > (s64)armpmu->max_period) 119 /*
120 left = armpmu->max_period; 120 * Limit the maximum period to prevent the counter value
121 * from overtaking the one we are about to program. In
122 * effect we are reducing max_period to account for
123 * interrupt latency (and we are being very conservative).
124 */
125 if (left > (armpmu->max_period >> 1))
126 left = armpmu->max_period >> 1;
121 127
122 local64_set(&hwc->prev_count, (u64)-left); 128 local64_set(&hwc->prev_count, (u64)-left);
123 129
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 715ae19bc7c8..e55408e96559 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -657,10 +657,13 @@ int __init arm_add_memory(u64 start, u64 size)
657 657
658 /* 658 /*
659 * Ensure that start/size are aligned to a page boundary. 659 * Ensure that start/size are aligned to a page boundary.
660 * Size is appropriately rounded down, start is rounded up. 660 * Size is rounded down, start is rounded up.
661 */ 661 */
662 size -= start & ~PAGE_MASK;
663 aligned_start = PAGE_ALIGN(start); 662 aligned_start = PAGE_ALIGN(start);
663 if (aligned_start > start + size)
664 size = 0;
665 else
666 size -= aligned_start - start;
664 667
665#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT 668#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
666 if (aligned_start > ULONG_MAX) { 669 if (aligned_start > ULONG_MAX) {
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index 3585cb394e9b..caa21e9b8cd9 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -246,9 +246,14 @@ static int coherency_type(void)
246 return type; 246 return type;
247} 247}
248 248
249/*
250 * As a precaution, we currently completely disable hardware I/O
251 * coherency, until enough testing is done with automatic I/O
252 * synchronization barriers to validate that it is a proper solution.
253 */
249int coherency_available(void) 254int coherency_available(void)
250{ 255{
251 return coherency_type() != COHERENCY_FABRIC_TYPE_NONE; 256 return false;
252} 257}
253 258
254int __init coherency_init(void) 259int __init coherency_init(void)
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index db57741c9c8a..64e44d6d07c0 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -211,6 +211,7 @@ extern struct device *omap2_get_iva_device(void);
211extern struct device *omap2_get_l3_device(void); 211extern struct device *omap2_get_l3_device(void);
212extern struct device *omap4_get_dsp_device(void); 212extern struct device *omap4_get_dsp_device(void);
213 213
214unsigned int omap4_xlate_irq(unsigned int hwirq);
214void omap_gic_of_init(void); 215void omap_gic_of_init(void);
215 216
216#ifdef CONFIG_CACHE_L2X0 217#ifdef CONFIG_CACHE_L2X0
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index b7cb44abe49b..cc30e49a4cc2 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -256,6 +256,38 @@ static int __init omap4_sar_ram_init(void)
256} 256}
257omap_early_initcall(omap4_sar_ram_init); 257omap_early_initcall(omap4_sar_ram_init);
258 258
259static struct of_device_id gic_match[] = {
260 { .compatible = "arm,cortex-a9-gic", },
261 { .compatible = "arm,cortex-a15-gic", },
262 { },
263};
264
265static struct device_node *gic_node;
266
267unsigned int omap4_xlate_irq(unsigned int hwirq)
268{
269 struct of_phandle_args irq_data;
270 unsigned int irq;
271
272 if (!gic_node)
273 gic_node = of_find_matching_node(NULL, gic_match);
274
275 if (WARN_ON(!gic_node))
276 return hwirq;
277
278 irq_data.np = gic_node;
279 irq_data.args_count = 3;
280 irq_data.args[0] = 0;
281 irq_data.args[1] = hwirq - OMAP44XX_IRQ_GIC_START;
282 irq_data.args[2] = IRQ_TYPE_LEVEL_HIGH;
283
284 irq = irq_create_of_mapping(&irq_data);
285 if (WARN_ON(!irq))
286 irq = hwirq;
287
288 return irq;
289}
290
259void __init omap_gic_of_init(void) 291void __init omap_gic_of_init(void)
260{ 292{
261 struct device_node *np; 293 struct device_node *np;
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index cbb908dc5cf0..9025ffffd2dc 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -3534,9 +3534,15 @@ int omap_hwmod_fill_resources(struct omap_hwmod *oh, struct resource *res)
3534 3534
3535 mpu_irqs_cnt = _count_mpu_irqs(oh); 3535 mpu_irqs_cnt = _count_mpu_irqs(oh);
3536 for (i = 0; i < mpu_irqs_cnt; i++) { 3536 for (i = 0; i < mpu_irqs_cnt; i++) {
3537 unsigned int irq;
3538
3539 if (oh->xlate_irq)
3540 irq = oh->xlate_irq((oh->mpu_irqs + i)->irq);
3541 else
3542 irq = (oh->mpu_irqs + i)->irq;
3537 (res + r)->name = (oh->mpu_irqs + i)->name; 3543 (res + r)->name = (oh->mpu_irqs + i)->name;
3538 (res + r)->start = (oh->mpu_irqs + i)->irq; 3544 (res + r)->start = irq;
3539 (res + r)->end = (oh->mpu_irqs + i)->irq; 3545 (res + r)->end = irq;
3540 (res + r)->flags = IORESOURCE_IRQ; 3546 (res + r)->flags = IORESOURCE_IRQ;
3541 r++; 3547 r++;
3542 } 3548 }
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index 35ca6efbec31..5b42fafcaf55 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -676,6 +676,7 @@ struct omap_hwmod {
676 spinlock_t _lock; 676 spinlock_t _lock;
677 struct list_head node; 677 struct list_head node;
678 struct omap_hwmod_ocp_if *_mpu_port; 678 struct omap_hwmod_ocp_if *_mpu_port;
679 unsigned int (*xlate_irq)(unsigned int);
679 u16 flags; 680 u16 flags;
680 u8 mpu_rt_idx; 681 u8 mpu_rt_idx;
681 u8 response_lat; 682 u8 response_lat;
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index c314b3c31117..f5e68a782025 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -479,6 +479,7 @@ static struct omap_hwmod omap44xx_dma_system_hwmod = {
479 .class = &omap44xx_dma_hwmod_class, 479 .class = &omap44xx_dma_hwmod_class,
480 .clkdm_name = "l3_dma_clkdm", 480 .clkdm_name = "l3_dma_clkdm",
481 .mpu_irqs = omap44xx_dma_system_irqs, 481 .mpu_irqs = omap44xx_dma_system_irqs,
482 .xlate_irq = omap4_xlate_irq,
482 .main_clk = "l3_div_ck", 483 .main_clk = "l3_div_ck",
483 .prcm = { 484 .prcm = {
484 .omap4 = { 485 .omap4 = {
@@ -640,6 +641,7 @@ static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
640 .class = &omap44xx_dispc_hwmod_class, 641 .class = &omap44xx_dispc_hwmod_class,
641 .clkdm_name = "l3_dss_clkdm", 642 .clkdm_name = "l3_dss_clkdm",
642 .mpu_irqs = omap44xx_dss_dispc_irqs, 643 .mpu_irqs = omap44xx_dss_dispc_irqs,
644 .xlate_irq = omap4_xlate_irq,
643 .sdma_reqs = omap44xx_dss_dispc_sdma_reqs, 645 .sdma_reqs = omap44xx_dss_dispc_sdma_reqs,
644 .main_clk = "dss_dss_clk", 646 .main_clk = "dss_dss_clk",
645 .prcm = { 647 .prcm = {
@@ -693,6 +695,7 @@ static struct omap_hwmod omap44xx_dss_dsi1_hwmod = {
693 .class = &omap44xx_dsi_hwmod_class, 695 .class = &omap44xx_dsi_hwmod_class,
694 .clkdm_name = "l3_dss_clkdm", 696 .clkdm_name = "l3_dss_clkdm",
695 .mpu_irqs = omap44xx_dss_dsi1_irqs, 697 .mpu_irqs = omap44xx_dss_dsi1_irqs,
698 .xlate_irq = omap4_xlate_irq,
696 .sdma_reqs = omap44xx_dss_dsi1_sdma_reqs, 699 .sdma_reqs = omap44xx_dss_dsi1_sdma_reqs,
697 .main_clk = "dss_dss_clk", 700 .main_clk = "dss_dss_clk",
698 .prcm = { 701 .prcm = {
@@ -726,6 +729,7 @@ static struct omap_hwmod omap44xx_dss_dsi2_hwmod = {
726 .class = &omap44xx_dsi_hwmod_class, 729 .class = &omap44xx_dsi_hwmod_class,
727 .clkdm_name = "l3_dss_clkdm", 730 .clkdm_name = "l3_dss_clkdm",
728 .mpu_irqs = omap44xx_dss_dsi2_irqs, 731 .mpu_irqs = omap44xx_dss_dsi2_irqs,
732 .xlate_irq = omap4_xlate_irq,
729 .sdma_reqs = omap44xx_dss_dsi2_sdma_reqs, 733 .sdma_reqs = omap44xx_dss_dsi2_sdma_reqs,
730 .main_clk = "dss_dss_clk", 734 .main_clk = "dss_dss_clk",
731 .prcm = { 735 .prcm = {
@@ -784,6 +788,7 @@ static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
784 */ 788 */
785 .flags = HWMOD_SWSUP_SIDLE, 789 .flags = HWMOD_SWSUP_SIDLE,
786 .mpu_irqs = omap44xx_dss_hdmi_irqs, 790 .mpu_irqs = omap44xx_dss_hdmi_irqs,
791 .xlate_irq = omap4_xlate_irq,
787 .sdma_reqs = omap44xx_dss_hdmi_sdma_reqs, 792 .sdma_reqs = omap44xx_dss_hdmi_sdma_reqs,
788 .main_clk = "dss_48mhz_clk", 793 .main_clk = "dss_48mhz_clk",
789 .prcm = { 794 .prcm = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index 3e9523084b2a..7c3fac035e93 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -288,6 +288,7 @@ static struct omap_hwmod omap54xx_dma_system_hwmod = {
288 .class = &omap54xx_dma_hwmod_class, 288 .class = &omap54xx_dma_hwmod_class,
289 .clkdm_name = "dma_clkdm", 289 .clkdm_name = "dma_clkdm",
290 .mpu_irqs = omap54xx_dma_system_irqs, 290 .mpu_irqs = omap54xx_dma_system_irqs,
291 .xlate_irq = omap4_xlate_irq,
291 .main_clk = "l3_iclk_div", 292 .main_clk = "l3_iclk_div",
292 .prcm = { 293 .prcm = {
293 .omap4 = { 294 .omap4 = {
diff --git a/arch/arm/mach-omap2/prcm-common.h b/arch/arm/mach-omap2/prcm-common.h
index a8e4b582c527..6163d66102a3 100644
--- a/arch/arm/mach-omap2/prcm-common.h
+++ b/arch/arm/mach-omap2/prcm-common.h
@@ -498,6 +498,7 @@ struct omap_prcm_irq_setup {
498 u8 nr_irqs; 498 u8 nr_irqs;
499 const struct omap_prcm_irq *irqs; 499 const struct omap_prcm_irq *irqs;
500 int irq; 500 int irq;
501 unsigned int (*xlate_irq)(unsigned int);
501 void (*read_pending_irqs)(unsigned long *events); 502 void (*read_pending_irqs)(unsigned long *events);
502 void (*ocp_barrier)(void); 503 void (*ocp_barrier)(void);
503 void (*save_and_clear_irqen)(u32 *saved_mask); 504 void (*save_and_clear_irqen)(u32 *saved_mask);
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index cc170fb81ff7..408c64efb807 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -49,6 +49,7 @@ static struct omap_prcm_irq_setup omap4_prcm_irq_setup = {
49 .irqs = omap4_prcm_irqs, 49 .irqs = omap4_prcm_irqs,
50 .nr_irqs = ARRAY_SIZE(omap4_prcm_irqs), 50 .nr_irqs = ARRAY_SIZE(omap4_prcm_irqs),
51 .irq = 11 + OMAP44XX_IRQ_GIC_START, 51 .irq = 11 + OMAP44XX_IRQ_GIC_START,
52 .xlate_irq = omap4_xlate_irq,
52 .read_pending_irqs = &omap44xx_prm_read_pending_irqs, 53 .read_pending_irqs = &omap44xx_prm_read_pending_irqs,
53 .ocp_barrier = &omap44xx_prm_ocp_barrier, 54 .ocp_barrier = &omap44xx_prm_ocp_barrier,
54 .save_and_clear_irqen = &omap44xx_prm_save_and_clear_irqen, 55 .save_and_clear_irqen = &omap44xx_prm_save_and_clear_irqen,
@@ -751,8 +752,10 @@ static int omap44xx_prm_late_init(void)
751 } 752 }
752 753
753 /* Once OMAP4 DT is filled as well */ 754 /* Once OMAP4 DT is filled as well */
754 if (irq_num >= 0) 755 if (irq_num >= 0) {
755 omap4_prcm_irq_setup.irq = irq_num; 756 omap4_prcm_irq_setup.irq = irq_num;
757 omap4_prcm_irq_setup.xlate_irq = NULL;
758 }
756 } 759 }
757 760
758 omap44xx_prm_enable_io_wakeup(); 761 omap44xx_prm_enable_io_wakeup();
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
index 779940cb6e56..dea2833ca627 100644
--- a/arch/arm/mach-omap2/prm_common.c
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -187,6 +187,7 @@ int omap_prcm_event_to_irq(const char *name)
187 */ 187 */
188void omap_prcm_irq_cleanup(void) 188void omap_prcm_irq_cleanup(void)
189{ 189{
190 unsigned int irq;
190 int i; 191 int i;
191 192
192 if (!prcm_irq_setup) { 193 if (!prcm_irq_setup) {
@@ -211,7 +212,11 @@ void omap_prcm_irq_cleanup(void)
211 kfree(prcm_irq_setup->priority_mask); 212 kfree(prcm_irq_setup->priority_mask);
212 prcm_irq_setup->priority_mask = NULL; 213 prcm_irq_setup->priority_mask = NULL;
213 214
214 irq_set_chained_handler(prcm_irq_setup->irq, NULL); 215 if (prcm_irq_setup->xlate_irq)
216 irq = prcm_irq_setup->xlate_irq(prcm_irq_setup->irq);
217 else
218 irq = prcm_irq_setup->irq;
219 irq_set_chained_handler(irq, NULL);
215 220
216 if (prcm_irq_setup->base_irq > 0) 221 if (prcm_irq_setup->base_irq > 0)
217 irq_free_descs(prcm_irq_setup->base_irq, 222 irq_free_descs(prcm_irq_setup->base_irq,
@@ -259,6 +264,7 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
259 int offset, i; 264 int offset, i;
260 struct irq_chip_generic *gc; 265 struct irq_chip_generic *gc;
261 struct irq_chip_type *ct; 266 struct irq_chip_type *ct;
267 unsigned int irq;
262 268
263 if (!irq_setup) 269 if (!irq_setup)
264 return -EINVAL; 270 return -EINVAL;
@@ -298,7 +304,11 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
298 1 << (offset & 0x1f); 304 1 << (offset & 0x1f);
299 } 305 }
300 306
301 irq_set_chained_handler(irq_setup->irq, omap_prcm_irq_handler); 307 if (irq_setup->xlate_irq)
308 irq = irq_setup->xlate_irq(irq_setup->irq);
309 else
310 irq = irq_setup->irq;
311 irq_set_chained_handler(irq, omap_prcm_irq_handler);
302 312
303 irq_setup->base_irq = irq_alloc_descs(-1, 0, irq_setup->nr_regs * 32, 313 irq_setup->base_irq = irq_alloc_descs(-1, 0, irq_setup->nr_regs * 32,
304 0); 314 0);
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
index 4457e731f7a4..292eca0e78ed 100644
--- a/arch/arm/mach-omap2/twl-common.c
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -66,19 +66,24 @@ void __init omap_pmic_init(int bus, u32 clkrate,
66 omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1); 66 omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
67} 67}
68 68
69#ifdef CONFIG_ARCH_OMAP4
69void __init omap4_pmic_init(const char *pmic_type, 70void __init omap4_pmic_init(const char *pmic_type,
70 struct twl4030_platform_data *pmic_data, 71 struct twl4030_platform_data *pmic_data,
71 struct i2c_board_info *devices, int nr_devices) 72 struct i2c_board_info *devices, int nr_devices)
72{ 73{
73 /* PMIC part*/ 74 /* PMIC part*/
75 unsigned int irq;
76
74 omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE); 77 omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
75 omap_mux_init_signal("fref_clk0_out.sys_drm_msecure", OMAP_PIN_OUTPUT); 78 omap_mux_init_signal("fref_clk0_out.sys_drm_msecure", OMAP_PIN_OUTPUT);
76 omap_pmic_init(1, 400, pmic_type, 7 + OMAP44XX_IRQ_GIC_START, pmic_data); 79 irq = omap4_xlate_irq(7 + OMAP44XX_IRQ_GIC_START);
80 omap_pmic_init(1, 400, pmic_type, irq, pmic_data);
77 81
78 /* Register additional devices on i2c1 bus if needed */ 82 /* Register additional devices on i2c1 bus if needed */
79 if (devices) 83 if (devices)
80 i2c_register_board_info(1, devices, nr_devices); 84 i2c_register_board_info(1, devices, nr_devices);
81} 85}
86#endif
82 87
83void __init omap_pmic_late_init(void) 88void __init omap_pmic_late_init(void)
84{ 89{
diff --git a/arch/arm/mach-shmobile/setup-r8a7778.c b/arch/arm/mach-shmobile/setup-r8a7778.c
index 170bd146ba17..cef8895a9b82 100644
--- a/arch/arm/mach-shmobile/setup-r8a7778.c
+++ b/arch/arm/mach-shmobile/setup-r8a7778.c
@@ -576,11 +576,18 @@ void __init r8a7778_init_irq_extpin(int irlm)
576void __init r8a7778_init_irq_dt(void) 576void __init r8a7778_init_irq_dt(void)
577{ 577{
578 void __iomem *base = ioremap_nocache(0xfe700000, 0x00100000); 578 void __iomem *base = ioremap_nocache(0xfe700000, 0x00100000);
579#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
580 void __iomem *gic_dist_base = ioremap_nocache(0xfe438000, 0x1000);
581 void __iomem *gic_cpu_base = ioremap_nocache(0xfe430000, 0x1000);
582#endif
579 583
580 BUG_ON(!base); 584 BUG_ON(!base);
581 585
586#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
587 gic_init(0, 29, gic_dist_base, gic_cpu_base);
588#else
582 irqchip_init(); 589 irqchip_init();
583 590#endif
584 /* route all interrupts to ARM */ 591 /* route all interrupts to ARM */
585 __raw_writel(0x73ffffff, base + INT2NTSR0); 592 __raw_writel(0x73ffffff, base + INT2NTSR0);
586 __raw_writel(0xffffffff, base + INT2NTSR1); 593 __raw_writel(0xffffffff, base + INT2NTSR1);
diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c
index 6156d172cf31..27dceaf9e688 100644
--- a/arch/arm/mach-shmobile/setup-r8a7779.c
+++ b/arch/arm/mach-shmobile/setup-r8a7779.c
@@ -720,10 +720,17 @@ static int r8a7779_set_wake(struct irq_data *data, unsigned int on)
720 720
721void __init r8a7779_init_irq_dt(void) 721void __init r8a7779_init_irq_dt(void)
722{ 722{
723#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
724 void __iomem *gic_dist_base = ioremap_nocache(0xf0001000, 0x1000);
725 void __iomem *gic_cpu_base = ioremap_nocache(0xf0000100, 0x1000);
726#endif
723 gic_arch_extn.irq_set_wake = r8a7779_set_wake; 727 gic_arch_extn.irq_set_wake = r8a7779_set_wake;
724 728
729#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
730 gic_init(0, 29, gic_dist_base, gic_cpu_base);
731#else
725 irqchip_init(); 732 irqchip_init();
726 733#endif
727 /* route all interrupts to ARM */ 734 /* route all interrupts to ARM */
728 __raw_writel(0xffffffff, INT2NTSR0); 735 __raw_writel(0xffffffff, INT2NTSR0);
729 __raw_writel(0x3fffffff, INT2NTSR1); 736 __raw_writel(0x3fffffff, INT2NTSR1);
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 1c43cec971b5..066688863920 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -85,6 +85,7 @@ vdso_install:
85# We use MRPROPER_FILES and CLEAN_FILES now 85# We use MRPROPER_FILES and CLEAN_FILES now
86archclean: 86archclean:
87 $(Q)$(MAKE) $(clean)=$(boot) 87 $(Q)$(MAKE) $(clean)=$(boot)
88 $(Q)$(MAKE) $(clean)=$(boot)/dts
88 89
89define archhelp 90define archhelp
90 echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)' 91 echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 3b8d427c3985..c62b0f4d9ef6 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -3,6 +3,4 @@ dts-dirs += apm
3dts-dirs += arm 3dts-dirs += arm
4dts-dirs += cavium 4dts-dirs += cavium
5 5
6always := $(dtb-y)
7subdir-y := $(dts-dirs) 6subdir-y := $(dts-dirs)
8clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts
index cb3073e4e7a8..d429129ecb3d 100644
--- a/arch/arm64/boot/dts/arm/juno.dts
+++ b/arch/arm64/boot/dts/arm/juno.dts
@@ -22,7 +22,7 @@
22 }; 22 };
23 23
24 chosen { 24 chosen {
25 stdout-path = &soc_uart0; 25 stdout-path = "serial0:115200n8";
26 }; 26 };
27 27
28 psci { 28 psci {
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index cf33f33333cc..d54dc9ac4b70 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -15,6 +15,7 @@
15 */ 15 */
16#include <linux/debugfs.h> 16#include <linux/debugfs.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/io.h>
18#include <linux/mm.h> 19#include <linux/mm.h>
19#include <linux/sched.h> 20#include <linux/sched.h>
20#include <linux/seq_file.h> 21#include <linux/seq_file.h>
diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c
index 2c9412908024..164efa009e5b 100644
--- a/arch/avr32/kernel/module.c
+++ b/arch/avr32/kernel/module.c
@@ -19,12 +19,10 @@
19#include <linux/moduleloader.h> 19#include <linux/moduleloader.h>
20#include <linux/vmalloc.h> 20#include <linux/vmalloc.h>
21 21
22void module_free(struct module *mod, void *module_region) 22void module_arch_freeing_init(struct module *mod)
23{ 23{
24 vfree(mod->arch.syminfo); 24 vfree(mod->arch.syminfo);
25 mod->arch.syminfo = NULL; 25 mod->arch.syminfo = NULL;
26
27 vfree(module_region);
28} 26}
29 27
30static inline int check_rela(Elf32_Rela *rela, struct module *module, 28static inline int check_rela(Elf32_Rela *rela, struct module *module,
@@ -291,12 +289,3 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
291 289
292 return ret; 290 return ret;
293} 291}
294
295int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
296 struct module *module)
297{
298 vfree(module->arch.syminfo);
299 module->arch.syminfo = NULL;
300
301 return 0;
302}
diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c
index 08a313fc2241..f772068d9e79 100644
--- a/arch/cris/arch-v32/drivers/sync_serial.c
+++ b/arch/cris/arch-v32/drivers/sync_serial.c
@@ -604,7 +604,7 @@ static ssize_t __sync_serial_read(struct file *file,
604 struct timespec *ts) 604 struct timespec *ts)
605{ 605{
606 unsigned long flags; 606 unsigned long flags;
607 int dev = MINOR(file->f_dentry->d_inode->i_rdev); 607 int dev = MINOR(file_inode(file)->i_rdev);
608 int avail; 608 int avail;
609 struct sync_port *port; 609 struct sync_port *port;
610 unsigned char *start; 610 unsigned char *start;
diff --git a/arch/cris/kernel/module.c b/arch/cris/kernel/module.c
index 51123f985eb5..af04cb6b6dc9 100644
--- a/arch/cris/kernel/module.c
+++ b/arch/cris/kernel/module.c
@@ -36,7 +36,7 @@ void *module_alloc(unsigned long size)
36} 36}
37 37
38/* Free memory returned from module_alloc */ 38/* Free memory returned from module_alloc */
39void module_free(struct module *mod, void *module_region) 39void module_memfree(void *module_region)
40{ 40{
41 kfree(module_region); 41 kfree(module_region);
42} 42}
diff --git a/arch/frv/mb93090-mb00/pci-frv.c b/arch/frv/mb93090-mb00/pci-frv.c
index 67b1d1685759..0635bd6c2af3 100644
--- a/arch/frv/mb93090-mb00/pci-frv.c
+++ b/arch/frv/mb93090-mb00/pci-frv.c
@@ -94,7 +94,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
94 r = &dev->resource[idx]; 94 r = &dev->resource[idx];
95 if (!r->start) 95 if (!r->start)
96 continue; 96 continue;
97 pci_claim_resource(dev, idx); 97 pci_claim_bridge_resource(dev, idx);
98 } 98 }
99 } 99 }
100 pcibios_allocate_bus_resources(&bus->children); 100 pcibios_allocate_bus_resources(&bus->children);
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index 24603be24c14..29754aae5177 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -305,14 +305,12 @@ plt_target (struct plt_entry *plt)
305#endif /* !USE_BRL */ 305#endif /* !USE_BRL */
306 306
307void 307void
308module_free (struct module *mod, void *module_region) 308module_arch_freeing_init (struct module *mod)
309{ 309{
310 if (mod && mod->arch.init_unw_table && 310 if (mod->arch.init_unw_table) {
311 module_region == mod->module_init) {
312 unw_remove_unwind_table(mod->arch.init_unw_table); 311 unw_remove_unwind_table(mod->arch.init_unw_table);
313 mod->arch.init_unw_table = NULL; 312 mod->arch.init_unw_table = NULL;
314 } 313 }
315 vfree(module_region);
316} 314}
317 315
318/* Have we already seen one of these relocations? */ 316/* Have we already seen one of these relocations? */
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 291a582777cf..900cc93e5409 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -487,45 +487,39 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
487 return 0; 487 return 0;
488} 488}
489 489
490static int is_valid_resource(struct pci_dev *dev, int idx) 490void pcibios_fixup_device_resources(struct pci_dev *dev)
491{ 491{
492 unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM; 492 int idx;
493 struct resource *devr = &dev->resource[idx], *busr;
494 493
495 if (!dev->bus) 494 if (!dev->bus)
496 return 0; 495 return;
497
498 pci_bus_for_each_resource(dev->bus, busr, i) {
499 if (!busr || ((busr->flags ^ devr->flags) & type_mask))
500 continue;
501 if ((devr->start) && (devr->start >= busr->start) &&
502 (devr->end <= busr->end))
503 return 1;
504 }
505 return 0;
506}
507 496
508static void pcibios_fixup_resources(struct pci_dev *dev, int start, int limit) 497 for (idx = 0; idx < PCI_BRIDGE_RESOURCES; idx++) {
509{ 498 struct resource *r = &dev->resource[idx];
510 int i;
511 499
512 for (i = start; i < limit; i++) { 500 if (!r->flags || r->parent || !r->start)
513 if (!dev->resource[i].flags)
514 continue; 501 continue;
515 if ((is_valid_resource(dev, i)))
516 pci_claim_resource(dev, i);
517 }
518}
519 502
520void pcibios_fixup_device_resources(struct pci_dev *dev) 503 pci_claim_resource(dev, idx);
521{ 504 }
522 pcibios_fixup_resources(dev, 0, PCI_BRIDGE_RESOURCES);
523} 505}
524EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources); 506EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources);
525 507
526static void pcibios_fixup_bridge_resources(struct pci_dev *dev) 508static void pcibios_fixup_bridge_resources(struct pci_dev *dev)
527{ 509{
528 pcibios_fixup_resources(dev, PCI_BRIDGE_RESOURCES, PCI_NUM_RESOURCES); 510 int idx;
511
512 if (!dev->bus)
513 return;
514
515 for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
516 struct resource *r = &dev->resource[idx];
517
518 if (!r->flags || r->parent || !r->start)
519 continue;
520
521 pci_claim_bridge_resource(dev, idx);
522 }
529} 523}
530 524
531/* 525/*
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index b30e41c0c033..48528fb81eff 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -1026,6 +1026,8 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
1026 pr, (pr && pr->name) ? pr->name : "nil"); 1026 pr, (pr && pr->name) ? pr->name : "nil");
1027 1027
1028 if (pr && !(pr->flags & IORESOURCE_UNSET)) { 1028 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1029 struct pci_dev *dev = bus->self;
1030
1029 if (request_resource(pr, res) == 0) 1031 if (request_resource(pr, res) == 0)
1030 continue; 1032 continue;
1031 /* 1033 /*
@@ -1035,6 +1037,12 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
1035 */ 1037 */
1036 if (reparent_resources(pr, res) == 0) 1038 if (reparent_resources(pr, res) == 0)
1037 continue; 1039 continue;
1040
1041 if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
1042 pci_claim_bridge_resource(dev,
1043 i + PCI_BRIDGE_RESOURCES) == 0)
1044 continue;
1045
1038 } 1046 }
1039 pr_warn("PCI: Cannot allocate resource region "); 1047 pr_warn("PCI: Cannot allocate resource region ");
1040 pr_cont("%d of PCI bridge %d, will remap\n", i, bus->number); 1048 pr_cont("%d of PCI bridge %d, will remap\n", i, bus->number);
@@ -1227,7 +1235,10 @@ void pcibios_claim_one_bus(struct pci_bus *bus)
1227 (unsigned long long)r->end, 1235 (unsigned long long)r->end,
1228 (unsigned int)r->flags); 1236 (unsigned int)r->flags);
1229 1237
1230 pci_claim_resource(dev, i); 1238 if (pci_claim_resource(dev, i) == 0)
1239 continue;
1240
1241 pci_claim_bridge_resource(dev, i);
1231 } 1242 }
1232 } 1243 }
1233 1244
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 9fd6834a2172..5d6139390bf8 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -1388,7 +1388,7 @@ out:
1388void bpf_jit_free(struct bpf_prog *fp) 1388void bpf_jit_free(struct bpf_prog *fp)
1389{ 1389{
1390 if (fp->jited) 1390 if (fp->jited)
1391 module_free(NULL, fp->bpf_func); 1391 module_memfree(fp->bpf_func);
1392 1392
1393 bpf_prog_unlock_free(fp); 1393 bpf_prog_unlock_free(fp);
1394} 1394}
diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.c b/arch/mn10300/unit-asb2305/pci-asb2305.c
index febb9cd83177..b5b036f64275 100644
--- a/arch/mn10300/unit-asb2305/pci-asb2305.c
+++ b/arch/mn10300/unit-asb2305/pci-asb2305.c
@@ -106,7 +106,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
106 if (!r->flags) 106 if (!r->flags)
107 continue; 107 continue;
108 if (!r->start || 108 if (!r->start ||
109 pci_claim_resource(dev, idx) < 0) { 109 pci_claim_bridge_resource(dev, idx) < 0) {
110 printk(KERN_ERR "PCI:" 110 printk(KERN_ERR "PCI:"
111 " Cannot allocate resource" 111 " Cannot allocate resource"
112 " region %d of bridge %s\n", 112 " region %d of bridge %s\n",
diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c
index 6b4339f8c9c2..471ff398090c 100644
--- a/arch/mn10300/unit-asb2305/pci.c
+++ b/arch/mn10300/unit-asb2305/pci.c
@@ -281,42 +281,37 @@ static int __init pci_check_direct(void)
281 return -ENODEV; 281 return -ENODEV;
282} 282}
283 283
284static int is_valid_resource(struct pci_dev *dev, int idx) 284static void pcibios_fixup_device_resources(struct pci_dev *dev)
285{ 285{
286 unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM; 286 int idx;
287 struct resource *devr = &dev->resource[idx], *busr;
288
289 if (dev->bus) {
290 pci_bus_for_each_resource(dev->bus, busr, i) {
291 if (!busr || (busr->flags ^ devr->flags) & type_mask)
292 continue;
293
294 if (devr->start &&
295 devr->start >= busr->start &&
296 devr->end <= busr->end)
297 return 1;
298 }
299 }
300 287
301 return 0; 288 if (!dev->bus)
289 return;
290
291 for (idx = 0; idx < PCI_BRIDGE_RESOURCES; idx++) {
292 struct resource *r = &dev->resource[idx];
293
294 if (!r->flags || r->parent || !r->start)
295 continue;
296
297 pci_claim_resource(dev, idx);
298 }
302} 299}
303 300
304static void pcibios_fixup_device_resources(struct pci_dev *dev) 301static void pcibios_fixup_bridge_resources(struct pci_dev *dev)
305{ 302{
306 int limit, i; 303 int idx;
307 304
308 if (dev->bus->number != 0) 305 if (!dev->bus)
309 return; 306 return;
310 307
311 limit = (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) ? 308 for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
312 PCI_BRIDGE_RESOURCES : PCI_NUM_RESOURCES; 309 struct resource *r = &dev->resource[idx];
313 310
314 for (i = 0; i < limit; i++) { 311 if (!r->flags || r->parent || !r->start)
315 if (!dev->resource[i].flags)
316 continue; 312 continue;
317 313
318 if (is_valid_resource(dev, i)) 314 pci_claim_bridge_resource(dev, idx);
319 pci_claim_resource(dev, i);
320 } 315 }
321} 316}
322 317
@@ -330,7 +325,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
330 325
331 if (bus->self) { 326 if (bus->self) {
332 pci_read_bridge_bases(bus); 327 pci_read_bridge_bases(bus);
333 pcibios_fixup_device_resources(bus->self); 328 pcibios_fixup_bridge_resources(bus->self);
334 } 329 }
335 330
336 list_for_each_entry(dev, &bus->devices, bus_list) 331 list_for_each_entry(dev, &bus->devices, bus_list)
diff --git a/arch/nios2/kernel/module.c b/arch/nios2/kernel/module.c
index cc924a38f22a..e2e3f13f98d5 100644
--- a/arch/nios2/kernel/module.c
+++ b/arch/nios2/kernel/module.c
@@ -36,7 +36,7 @@ void *module_alloc(unsigned long size)
36} 36}
37 37
38/* Free memory returned from module_alloc */ 38/* Free memory returned from module_alloc */
39void module_free(struct module *mod, void *module_region) 39void module_memfree(void *module_region)
40{ 40{
41 kfree(module_region); 41 kfree(module_region);
42} 42}
diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c
index f9d27883a714..2d0ea25be171 100644
--- a/arch/nios2/kernel/signal.c
+++ b/arch/nios2/kernel/signal.c
@@ -200,7 +200,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
200 200
201 /* Set up to return from userspace; jump to fixed address sigreturn 201 /* Set up to return from userspace; jump to fixed address sigreturn
202 trampoline on kuser page. */ 202 trampoline on kuser page. */
203 regs->ra = (unsigned long) (0x1040); 203 regs->ra = (unsigned long) (0x1044);
204 204
205 /* Set up registers for signal handler */ 205 /* Set up registers for signal handler */
206 regs->sp = (unsigned long) frame; 206 regs->sp = (unsigned long) frame;
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index 50dfafc3f2c1..5822e8e200e6 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -298,14 +298,10 @@ static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
298} 298}
299#endif 299#endif
300 300
301 301void module_arch_freeing_init(struct module *mod)
302/* Free memory returned from module_alloc */
303void module_free(struct module *mod, void *module_region)
304{ 302{
305 kfree(mod->arch.section); 303 kfree(mod->arch.section);
306 mod->arch.section = NULL; 304 mod->arch.section = NULL;
307
308 vfree(module_region);
309} 305}
310 306
311/* Additional bytes needed in front of individual sections */ 307/* Additional bytes needed in front of individual sections */
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 37d512d35943..2a525c938158 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1184,6 +1184,8 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
1184 pr, (pr && pr->name) ? pr->name : "nil"); 1184 pr, (pr && pr->name) ? pr->name : "nil");
1185 1185
1186 if (pr && !(pr->flags & IORESOURCE_UNSET)) { 1186 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1187 struct pci_dev *dev = bus->self;
1188
1187 if (request_resource(pr, res) == 0) 1189 if (request_resource(pr, res) == 0)
1188 continue; 1190 continue;
1189 /* 1191 /*
@@ -1193,6 +1195,11 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
1193 */ 1195 */
1194 if (reparent_resources(pr, res) == 0) 1196 if (reparent_resources(pr, res) == 0)
1195 continue; 1197 continue;
1198
1199 if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
1200 pci_claim_bridge_resource(dev,
1201 i + PCI_BRIDGE_RESOURCES) == 0)
1202 continue;
1196 } 1203 }
1197 pr_warning("PCI: Cannot allocate resource region " 1204 pr_warning("PCI: Cannot allocate resource region "
1198 "%d of PCI bridge %d, will remap\n", i, bus->number); 1205 "%d of PCI bridge %d, will remap\n", i, bus->number);
@@ -1401,7 +1408,10 @@ void pcibios_claim_one_bus(struct pci_bus *bus)
1401 (unsigned long long)r->end, 1408 (unsigned long long)r->end,
1402 (unsigned int)r->flags); 1409 (unsigned int)r->flags);
1403 1410
1404 pci_claim_resource(dev, i); 1411 if (pci_claim_resource(dev, i) == 0)
1412 continue;
1413
1414 pci_claim_bridge_resource(dev, i);
1405 } 1415 }
1406 } 1416 }
1407 1417
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 1ca125b9c226..d1916b577f2c 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -699,7 +699,7 @@ out:
699void bpf_jit_free(struct bpf_prog *fp) 699void bpf_jit_free(struct bpf_prog *fp)
700{ 700{
701 if (fp->jited) 701 if (fp->jited)
702 module_free(NULL, fp->bpf_func); 702 module_memfree(fp->bpf_func);
703 703
704 bpf_prog_unlock_free(fp); 704 bpf_prog_unlock_free(fp);
705} 705}
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index b700a329c31d..d2de7d5d7574 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -304,7 +304,7 @@ int pnv_save_sprs_for_winkle(void)
304 * all cpus at boot. Get these reg values of current cpu and use the 304 * all cpus at boot. Get these reg values of current cpu and use the
305 * same accross all cpus. 305 * same accross all cpus.
306 */ 306 */
307 uint64_t lpcr_val = mfspr(SPRN_LPCR); 307 uint64_t lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
308 uint64_t hid0_val = mfspr(SPRN_HID0); 308 uint64_t hid0_val = mfspr(SPRN_HID0);
309 uint64_t hid1_val = mfspr(SPRN_HID1); 309 uint64_t hid1_val = mfspr(SPRN_HID1);
310 uint64_t hid4_val = mfspr(SPRN_HID4); 310 uint64_t hid4_val = mfspr(SPRN_HID4);
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 5b150f0c5df9..13c6e200b24e 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -337,6 +337,7 @@ static inline void disable_surveillance(void)
337 args.token = rtas_token("set-indicator"); 337 args.token = rtas_token("set-indicator");
338 if (args.token == RTAS_UNKNOWN_SERVICE) 338 if (args.token == RTAS_UNKNOWN_SERVICE)
339 return; 339 return;
340 args.token = cpu_to_be32(args.token);
340 args.nargs = cpu_to_be32(3); 341 args.nargs = cpu_to_be32(3);
341 args.nret = cpu_to_be32(1); 342 args.nret = cpu_to_be32(1);
342 args.rets = &args.args[3]; 343 args.rets = &args.args[3];
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index b89b59158b95..409d152585be 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -55,14 +55,10 @@ void *module_alloc(unsigned long size)
55} 55}
56#endif 56#endif
57 57
58/* Free memory returned from module_alloc */ 58void module_arch_freeing_init(struct module *mod)
59void module_free(struct module *mod, void *module_region)
60{ 59{
61 if (mod) { 60 vfree(mod->arch.syminfo);
62 vfree(mod->arch.syminfo); 61 mod->arch.syminfo = NULL;
63 mod->arch.syminfo = NULL;
64 }
65 vfree(module_region);
66} 62}
67 63
68static void check_rela(Elf_Rela *rela, struct module *me) 64static void check_rela(Elf_Rela *rela, struct module *me)
diff --git a/arch/s390/net/bpf_jit.S b/arch/s390/net/bpf_jit.S
index 7e45d13816c1..ba44c9f55346 100644
--- a/arch/s390/net/bpf_jit.S
+++ b/arch/s390/net/bpf_jit.S
@@ -22,8 +22,8 @@
22 * skb_copy_bits takes 4 parameters: 22 * skb_copy_bits takes 4 parameters:
23 * %r2 = skb pointer 23 * %r2 = skb pointer
24 * %r3 = offset into skb data 24 * %r3 = offset into skb data
25 * %r4 = length to copy 25 * %r4 = pointer to temp buffer
26 * %r5 = pointer to temp buffer 26 * %r5 = length to copy
27 */ 27 */
28#define SKBDATA %r8 28#define SKBDATA %r8
29 29
@@ -44,8 +44,9 @@ ENTRY(sk_load_word)
44 44
45sk_load_word_slow: 45sk_load_word_slow:
46 lgr %r9,%r2 # save %r2 46 lgr %r9,%r2 # save %r2
47 lhi %r4,4 # 4 bytes 47 lgr %r3,%r1 # offset
48 la %r5,160(%r15) # pointer to temp buffer 48 la %r4,160(%r15) # pointer to temp buffer
49 lghi %r5,4 # 4 bytes
49 brasl %r14,skb_copy_bits # get data from skb 50 brasl %r14,skb_copy_bits # get data from skb
50 l %r5,160(%r15) # load result from temp buffer 51 l %r5,160(%r15) # load result from temp buffer
51 ltgr %r2,%r2 # set cc to (%r2 != 0) 52 ltgr %r2,%r2 # set cc to (%r2 != 0)
@@ -69,8 +70,9 @@ ENTRY(sk_load_half)
69 70
70sk_load_half_slow: 71sk_load_half_slow:
71 lgr %r9,%r2 # save %r2 72 lgr %r9,%r2 # save %r2
72 lhi %r4,2 # 2 bytes 73 lgr %r3,%r1 # offset
73 la %r5,162(%r15) # pointer to temp buffer 74 la %r4,162(%r15) # pointer to temp buffer
75 lghi %r5,2 # 2 bytes
74 brasl %r14,skb_copy_bits # get data from skb 76 brasl %r14,skb_copy_bits # get data from skb
75 xc 160(2,%r15),160(%r15) 77 xc 160(2,%r15),160(%r15)
76 l %r5,160(%r15) # load result from temp buffer 78 l %r5,160(%r15) # load result from temp buffer
@@ -95,8 +97,9 @@ ENTRY(sk_load_byte)
95 97
96sk_load_byte_slow: 98sk_load_byte_slow:
97 lgr %r9,%r2 # save %r2 99 lgr %r9,%r2 # save %r2
98 lhi %r4,1 # 1 bytes 100 lgr %r3,%r1 # offset
99 la %r5,163(%r15) # pointer to temp buffer 101 la %r4,163(%r15) # pointer to temp buffer
102 lghi %r5,1 # 1 byte
100 brasl %r14,skb_copy_bits # get data from skb 103 brasl %r14,skb_copy_bits # get data from skb
101 xc 160(3,%r15),160(%r15) 104 xc 160(3,%r15),160(%r15)
102 l %r5,160(%r15) # load result from temp buffer 105 l %r5,160(%r15) # load result from temp buffer
@@ -104,11 +107,11 @@ sk_load_byte_slow:
104 lgr %r2,%r9 # restore %r2 107 lgr %r2,%r9 # restore %r2
105 br %r8 108 br %r8
106 109
107 /* A = (*(u8 *)(skb->data+K) & 0xf) << 2 */ 110 /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */
108ENTRY(sk_load_byte_msh) 111ENTRY(sk_load_byte_msh)
109 llgfr %r1,%r3 # extend offset 112 llgfr %r1,%r3 # extend offset
110 clr %r11,%r3 # hlen < offset ? 113 clr %r11,%r3 # hlen < offset ?
111 jle sk_load_byte_slow 114 jle sk_load_byte_msh_slow
112 lhi %r12,0 115 lhi %r12,0
113 ic %r12,0(%r1,%r10) # get byte from skb 116 ic %r12,0(%r1,%r10) # get byte from skb
114 nill %r12,0x0f 117 nill %r12,0x0f
@@ -118,8 +121,9 @@ ENTRY(sk_load_byte_msh)
118 121
119sk_load_byte_msh_slow: 122sk_load_byte_msh_slow:
120 lgr %r9,%r2 # save %r2 123 lgr %r9,%r2 # save %r2
121 lhi %r4,2 # 2 bytes 124 lgr %r3,%r1 # offset
122 la %r5,162(%r15) # pointer to temp buffer 125 la %r4,163(%r15) # pointer to temp buffer
126 lghi %r5,1 # 1 byte
123 brasl %r14,skb_copy_bits # get data from skb 127 brasl %r14,skb_copy_bits # get data from skb
124 xc 160(3,%r15),160(%r15) 128 xc 160(3,%r15),160(%r15)
125 l %r12,160(%r15) # load result from temp buffer 129 l %r12,160(%r15) # load result from temp buffer
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 524496d47ef5..bbd1981cc150 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -448,15 +448,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
448 mask = 0x800000; /* je */ 448 mask = 0x800000; /* je */
449kbranch: /* Emit compare if the branch targets are different */ 449kbranch: /* Emit compare if the branch targets are different */
450 if (filter->jt != filter->jf) { 450 if (filter->jt != filter->jf) {
451 if (K <= 16383) 451 if (test_facility(21))
452 /* chi %r5,<K> */
453 EMIT4_IMM(0xa75e0000, K);
454 else if (test_facility(21))
455 /* clfi %r5,<K> */ 452 /* clfi %r5,<K> */
456 EMIT6_IMM(0xc25f0000, K); 453 EMIT6_IMM(0xc25f0000, K);
457 else 454 else
458 /* c %r5,<d(K)>(%r13) */ 455 /* cl %r5,<d(K)>(%r13) */
459 EMIT4_DISP(0x5950d000, EMIT_CONST(K)); 456 EMIT4_DISP(0x5550d000, EMIT_CONST(K));
460 } 457 }
461branch: if (filter->jt == filter->jf) { 458branch: if (filter->jt == filter->jf) {
462 if (filter->jt == 0) 459 if (filter->jt == 0)
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index b36365f49478..9ce5afe167ff 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -639,7 +639,10 @@ static void pci_claim_bus_resources(struct pci_bus *bus)
639 (unsigned long long)r->end, 639 (unsigned long long)r->end,
640 (unsigned int)r->flags); 640 (unsigned int)r->flags);
641 641
642 pci_claim_resource(dev, i); 642 if (pci_claim_resource(dev, i) == 0)
643 continue;
644
645 pci_claim_bridge_resource(dev, i);
643 } 646 }
644 } 647 }
645 648
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index f33e7c7a3bf7..7931eeeb649a 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -776,7 +776,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
776 if (unlikely(proglen + ilen > oldproglen)) { 776 if (unlikely(proglen + ilen > oldproglen)) {
777 pr_err("bpb_jit_compile fatal error\n"); 777 pr_err("bpb_jit_compile fatal error\n");
778 kfree(addrs); 778 kfree(addrs);
779 module_free(NULL, image); 779 module_memfree(image);
780 return; 780 return;
781 } 781 }
782 memcpy(image + proglen, temp, ilen); 782 memcpy(image + proglen, temp, ilen);
@@ -822,7 +822,7 @@ out:
822void bpf_jit_free(struct bpf_prog *fp) 822void bpf_jit_free(struct bpf_prog *fp)
823{ 823{
824 if (fp->jited) 824 if (fp->jited)
825 module_free(NULL, fp->bpf_func); 825 module_memfree(fp->bpf_func);
826 826
827 bpf_prog_unlock_free(fp); 827 bpf_prog_unlock_free(fp);
828} 828}
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index 96447c9160a0..2305084c9b93 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -74,7 +74,7 @@ error:
74 74
75 75
76/* Free memory returned from module_alloc */ 76/* Free memory returned from module_alloc */
77void module_free(struct module *mod, void *module_region) 77void module_memfree(void *module_region)
78{ 78{
79 vfree(module_region); 79 vfree(module_region);
80 80
@@ -83,7 +83,7 @@ void module_free(struct module *mod, void *module_region)
83 0, 0, 0, NULL, NULL, 0); 83 0, 0, 0, NULL, NULL, 0);
84 84
85 /* 85 /*
86 * FIXME: If module_region == mod->module_init, trim exception 86 * FIXME: Add module_arch_freeing_init to trim exception
87 * table entries. 87 * table entries.
88 */ 88 */
89} 89}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ba397bde7948..0dc9d0144a27 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -857,7 +857,7 @@ source "kernel/Kconfig.preempt"
857 857
858config X86_UP_APIC 858config X86_UP_APIC
859 bool "Local APIC support on uniprocessors" 859 bool "Local APIC support on uniprocessors"
860 depends on X86_32 && !SMP && !X86_32_NON_STANDARD && !PCI_MSI 860 depends on X86_32 && !SMP && !X86_32_NON_STANDARD
861 ---help--- 861 ---help---
862 A local APIC (Advanced Programmable Interrupt Controller) is an 862 A local APIC (Advanced Programmable Interrupt Controller) is an
863 integrated interrupt controller in the CPU. If you have a single-CPU 863 integrated interrupt controller in the CPU. If you have a single-CPU
@@ -868,6 +868,10 @@ config X86_UP_APIC
868 performance counters), and the NMI watchdog which detects hard 868 performance counters), and the NMI watchdog which detects hard
869 lockups. 869 lockups.
870 870
871config X86_UP_APIC_MSI
872 def_bool y
873 select X86_UP_APIC if X86_32 && !SMP && !X86_32_NON_STANDARD && PCI_MSI
874
871config X86_UP_IOAPIC 875config X86_UP_IOAPIC
872 bool "IO-APIC support on uniprocessors" 876 bool "IO-APIC support on uniprocessors"
873 depends on X86_UP_APIC 877 depends on X86_UP_APIC
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index d999398928bc..ad754b4411f7 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -90,7 +90,7 @@ suffix-$(CONFIG_KERNEL_LZO) := lzo
90suffix-$(CONFIG_KERNEL_LZ4) := lz4 90suffix-$(CONFIG_KERNEL_LZ4) := lz4
91 91
92RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \ 92RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \
93 perl $(srctree)/arch/x86/tools/calc_run_size.pl) 93 $(CONFIG_SHELL) $(srctree)/arch/x86/tools/calc_run_size.sh)
94quiet_cmd_mkpiggy = MKPIGGY $@ 94quiet_cmd_mkpiggy = MKPIGGY $@
95 cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false ) 95 cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false )
96 96
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index dcc1c536cc21..a950864a64da 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -373,6 +373,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
373 unsigned long output_len, 373 unsigned long output_len,
374 unsigned long run_size) 374 unsigned long run_size)
375{ 375{
376 unsigned char *output_orig = output;
377
376 real_mode = rmode; 378 real_mode = rmode;
377 379
378 sanitize_boot_params(real_mode); 380 sanitize_boot_params(real_mode);
@@ -421,7 +423,12 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
421 debug_putstr("\nDecompressing Linux... "); 423 debug_putstr("\nDecompressing Linux... ");
422 decompress(input_data, input_len, NULL, NULL, output, NULL, error); 424 decompress(input_data, input_len, NULL, NULL, output, NULL, error);
423 parse_elf(output); 425 parse_elf(output);
424 handle_relocations(output, output_len); 426 /*
427 * 32-bit always performs relocations. 64-bit relocations are only
428 * needed if kASLR has chosen a different load address.
429 */
430 if (!IS_ENABLED(CONFIG_X86_64) || output != output_orig)
431 handle_relocations(output, output_len);
425 debug_putstr("done.\nBooting the kernel.\n"); 432 debug_putstr("done.\nBooting the kernel.\n");
426 return output; 433 return output;
427} 434}
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 0ab4f9fd2687..3a45668f6dc3 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -50,6 +50,7 @@ void acpi_pic_sci_set_trigger(unsigned int, u16);
50 50
51extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi, 51extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
52 int trigger, int polarity); 52 int trigger, int polarity);
53extern void (*__acpi_unregister_gsi)(u32 gsi);
53 54
54static inline void disable_acpi(void) 55static inline void disable_acpi(void)
55{ 56{
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 50d033a8947d..a94b82e8f156 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -251,7 +251,8 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
251 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; 251 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
252} 252}
253 253
254#define _LDT_empty(info) \ 254/* This intentionally ignores lm, since 32-bit apps don't have that field. */
255#define LDT_empty(info) \
255 ((info)->base_addr == 0 && \ 256 ((info)->base_addr == 0 && \
256 (info)->limit == 0 && \ 257 (info)->limit == 0 && \
257 (info)->contents == 0 && \ 258 (info)->contents == 0 && \
@@ -261,11 +262,18 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
261 (info)->seg_not_present == 1 && \ 262 (info)->seg_not_present == 1 && \
262 (info)->useable == 0) 263 (info)->useable == 0)
263 264
264#ifdef CONFIG_X86_64 265/* Lots of programs expect an all-zero user_desc to mean "no segment at all". */
265#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0)) 266static inline bool LDT_zero(const struct user_desc *info)
266#else 267{
267#define LDT_empty(info) (_LDT_empty(info)) 268 return (info->base_addr == 0 &&
268#endif 269 info->limit == 0 &&
270 info->contents == 0 &&
271 info->read_exec_only == 0 &&
272 info->seg_32bit == 0 &&
273 info->limit_in_pages == 0 &&
274 info->seg_not_present == 0 &&
275 info->useable == 0);
276}
269 277
270static inline void clear_LDT(void) 278static inline void clear_LDT(void)
271{ 279{
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 40269a2bf6f9..4b75d591eb5e 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -130,7 +130,25 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
130static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, 130static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
131 unsigned long start, unsigned long end) 131 unsigned long start, unsigned long end)
132{ 132{
133 mpx_notify_unmap(mm, vma, start, end); 133 /*
134 * mpx_notify_unmap() goes and reads a rarely-hot
135 * cacheline in the mm_struct. That can be expensive
136 * enough to be seen in profiles.
137 *
138 * The mpx_notify_unmap() call and its contents have been
139 * observed to affect munmap() performance on hardware
140 * where MPX is not present.
141 *
142 * The unlikely() optimizes for the fast case: no MPX
143 * in the CPU, or no MPX use in the process. Even if
144 * we get this wrong (in the unlikely event that MPX
145 * is widely enabled on some system) the overhead of
146 * MPX itself (reading bounds tables) is expected to
147 * overwhelm the overhead of getting this unlikely()
148 * consistently wrong.
149 */
150 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
151 mpx_notify_unmap(mm, vma, start, end);
134} 152}
135 153
136#endif /* _ASM_X86_MMU_CONTEXT_H */ 154#endif /* _ASM_X86_MMU_CONTEXT_H */
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index d1626364a28a..b9e30daa0881 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -611,20 +611,20 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
611 611
612int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp) 612int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
613{ 613{
614 int irq; 614 int rc, irq, trigger, polarity;
615 615
616 if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { 616 rc = acpi_get_override_irq(gsi, &trigger, &polarity);
617 *irqp = gsi; 617 if (rc == 0) {
618 } else { 618 trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
619 mutex_lock(&acpi_ioapic_lock); 619 polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
620 irq = mp_map_gsi_to_irq(gsi, 620 irq = acpi_register_gsi(NULL, gsi, trigger, polarity);
621 IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK); 621 if (irq >= 0) {
622 mutex_unlock(&acpi_ioapic_lock); 622 *irqp = irq;
623 if (irq < 0) 623 return 0;
624 return -1; 624 }
625 *irqp = irq;
626 } 625 }
627 return 0; 626
627 return -1;
628} 628}
629EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); 629EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
630 630
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index a450373e8e91..939155ffdece 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -107,6 +107,7 @@ static struct clocksource hyperv_cs = {
107 .rating = 400, /* use this when running on Hyperv*/ 107 .rating = 400, /* use this when running on Hyperv*/
108 .read = read_hv_clock, 108 .read = read_hv_clock,
109 .mask = CLOCKSOURCE_MASK(64), 109 .mask = CLOCKSOURCE_MASK(64),
110 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
110}; 111};
111 112
112static void __init ms_hyperv_init_platform(void) 113static void __init ms_hyperv_init_platform(void)
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 2142376dc8c6..8b7b0a51e742 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -674,7 +674,7 @@ static inline void *alloc_tramp(unsigned long size)
674} 674}
675static inline void tramp_free(void *tramp) 675static inline void tramp_free(void *tramp)
676{ 676{
677 module_free(NULL, tramp); 677 module_memfree(tramp);
678} 678}
679#else 679#else
680/* Trampolines can only be created if modules are supported */ 680/* Trampolines can only be created if modules are supported */
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 6307a0f0cf17..705ef8d48e2d 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -127,7 +127,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
127 seq_puts(p, " Machine check polls\n"); 127 seq_puts(p, " Machine check polls\n");
128#endif 128#endif
129#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN) 129#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
130 seq_printf(p, "%*s: ", prec, "THR"); 130 seq_printf(p, "%*s: ", prec, "HYP");
131 for_each_online_cpu(j) 131 for_each_online_cpu(j)
132 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count); 132 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
133 seq_puts(p, " Hypervisor callback interrupts\n"); 133 seq_puts(p, " Hypervisor callback interrupts\n");
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index 4e942f31b1a7..7fc5e843f247 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -29,7 +29,28 @@ static int get_free_idx(void)
29 29
30static bool tls_desc_okay(const struct user_desc *info) 30static bool tls_desc_okay(const struct user_desc *info)
31{ 31{
32 if (LDT_empty(info)) 32 /*
33 * For historical reasons (i.e. no one ever documented how any
34 * of the segmentation APIs work), user programs can and do
35 * assume that a struct user_desc that's all zeros except for
36 * entry_number means "no segment at all". This never actually
37 * worked. In fact, up to Linux 3.19, a struct user_desc like
38 * this would create a 16-bit read-write segment with base and
39 * limit both equal to zero.
40 *
41 * That was close enough to "no segment at all" until we
42 * hardened this function to disallow 16-bit TLS segments. Fix
43 * it up by interpreting these zeroed segments the way that they
44 * were almost certainly intended to be interpreted.
45 *
46 * The correct way to ask for "no segment at all" is to specify
47 * a user_desc that satisfies LDT_empty. To keep everything
48 * working, we accept both.
49 *
50 * Note that there's a similar kludge in modify_ldt -- look at
51 * the distinction between modes 1 and 0x11.
52 */
53 if (LDT_empty(info) || LDT_zero(info))
33 return true; 54 return true;
34 55
35 /* 56 /*
@@ -71,7 +92,7 @@ static void set_tls_desc(struct task_struct *p, int idx,
71 cpu = get_cpu(); 92 cpu = get_cpu();
72 93
73 while (n-- > 0) { 94 while (n-- > 0) {
74 if (LDT_empty(info)) 95 if (LDT_empty(info) || LDT_zero(info))
75 desc->a = desc->b = 0; 96 desc->a = desc->b = 0;
76 else 97 else
77 fill_ldt(desc, info); 98 fill_ldt(desc, info);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index b7e50bba3bbb..505449700e0c 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -617,7 +617,7 @@ static unsigned long quick_pit_calibrate(void)
617 goto success; 617 goto success;
618 } 618 }
619 } 619 }
620 pr_err("Fast TSC calibration failed\n"); 620 pr_info("Fast TSC calibration failed\n");
621 return 0; 621 return 0;
622 622
623success: 623success:
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 169b09d76ddd..de12c1d379f1 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2348,7 +2348,7 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2348 * Not recognized on AMD in compat mode (but is recognized in legacy 2348 * Not recognized on AMD in compat mode (but is recognized in legacy
2349 * mode). 2349 * mode).
2350 */ 2350 */
2351 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA) 2351 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2352 && !vendor_intel(ctxt)) 2352 && !vendor_intel(ctxt))
2353 return emulate_ud(ctxt); 2353 return emulate_ud(ctxt);
2354 2354
@@ -2359,25 +2359,13 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2359 setup_syscalls_segments(ctxt, &cs, &ss); 2359 setup_syscalls_segments(ctxt, &cs, &ss);
2360 2360
2361 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); 2361 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2362 switch (ctxt->mode) { 2362 if ((msr_data & 0xfffc) == 0x0)
2363 case X86EMUL_MODE_PROT32: 2363 return emulate_gp(ctxt, 0);
2364 if ((msr_data & 0xfffc) == 0x0)
2365 return emulate_gp(ctxt, 0);
2366 break;
2367 case X86EMUL_MODE_PROT64:
2368 if (msr_data == 0x0)
2369 return emulate_gp(ctxt, 0);
2370 break;
2371 default:
2372 break;
2373 }
2374 2364
2375 ctxt->eflags &= ~(EFLG_VM | EFLG_IF); 2365 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2376 cs_sel = (u16)msr_data; 2366 cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
2377 cs_sel &= ~SELECTOR_RPL_MASK;
2378 ss_sel = cs_sel + 8; 2367 ss_sel = cs_sel + 8;
2379 ss_sel &= ~SELECTOR_RPL_MASK; 2368 if (efer & EFER_LMA) {
2380 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2381 cs.d = 0; 2369 cs.d = 0;
2382 cs.l = 1; 2370 cs.l = 1;
2383 } 2371 }
@@ -2386,10 +2374,11 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2386 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); 2374 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2387 2375
2388 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); 2376 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2389 ctxt->_eip = msr_data; 2377 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2390 2378
2391 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); 2379 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2392 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data; 2380 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2381 (u32)msr_data;
2393 2382
2394 return X86EMUL_CONTINUE; 2383 return X86EMUL_CONTINUE;
2395} 2384}
@@ -3791,8 +3780,8 @@ static const struct opcode group5[] = {
3791}; 3780};
3792 3781
3793static const struct opcode group6[] = { 3782static const struct opcode group6[] = {
3794 DI(Prot, sldt), 3783 DI(Prot | DstMem, sldt),
3795 DI(Prot, str), 3784 DI(Prot | DstMem, str),
3796 II(Prot | Priv | SrcMem16, em_lldt, lldt), 3785 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3797 II(Prot | Priv | SrcMem16, em_ltr, ltr), 3786 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3798 N, N, N, N, 3787 N, N, N, N,
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 08a7d313538a..079c3b6a3ff1 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -43,7 +43,7 @@ uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
43 [_PAGE_CACHE_MODE_WT] = _PAGE_PCD, 43 [_PAGE_CACHE_MODE_WT] = _PAGE_PCD,
44 [_PAGE_CACHE_MODE_WP] = _PAGE_PCD, 44 [_PAGE_CACHE_MODE_WP] = _PAGE_PCD,
45}; 45};
46EXPORT_SYMBOL_GPL(__cachemode2pte_tbl); 46EXPORT_SYMBOL(__cachemode2pte_tbl);
47uint8_t __pte2cachemode_tbl[8] = { 47uint8_t __pte2cachemode_tbl[8] = {
48 [__pte2cm_idx(0)] = _PAGE_CACHE_MODE_WB, 48 [__pte2cm_idx(0)] = _PAGE_CACHE_MODE_WB,
49 [__pte2cm_idx(_PAGE_PWT)] = _PAGE_CACHE_MODE_WC, 49 [__pte2cm_idx(_PAGE_PWT)] = _PAGE_CACHE_MODE_WC,
@@ -54,7 +54,7 @@ uint8_t __pte2cachemode_tbl[8] = {
54 [__pte2cm_idx(_PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS, 54 [__pte2cm_idx(_PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
55 [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC, 55 [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
56}; 56};
57EXPORT_SYMBOL_GPL(__pte2cachemode_tbl); 57EXPORT_SYMBOL(__pte2cachemode_tbl);
58 58
59static unsigned long __initdata pgt_buf_start; 59static unsigned long __initdata pgt_buf_start;
60static unsigned long __initdata pgt_buf_end; 60static unsigned long __initdata pgt_buf_end;
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 67ebf5751222..c439ec478216 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -349,6 +349,12 @@ static __user void *task_get_bounds_dir(struct task_struct *tsk)
349 return MPX_INVALID_BOUNDS_DIR; 349 return MPX_INVALID_BOUNDS_DIR;
350 350
351 /* 351 /*
352 * 32-bit binaries on 64-bit kernels are currently
353 * unsupported.
354 */
355 if (IS_ENABLED(CONFIG_X86_64) && test_thread_flag(TIF_IA32))
356 return MPX_INVALID_BOUNDS_DIR;
357 /*
352 * The bounds directory pointer is stored in a register 358 * The bounds directory pointer is stored in a register
353 * only accessible if we first do an xsave. 359 * only accessible if we first do an xsave.
354 */ 360 */
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index edf299c8ff6c..7ac68698406c 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -234,8 +234,13 @@ void pat_init(void)
234 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC); 234 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
235 235
236 /* Boot CPU check */ 236 /* Boot CPU check */
237 if (!boot_pat_state) 237 if (!boot_pat_state) {
238 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state); 238 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
239 if (!boot_pat_state) {
240 pat_disable("PAT read returns always zero, disabled.");
241 return;
242 }
243 }
239 244
240 wrmsrl(MSR_IA32_CR_PAT, pat); 245 wrmsrl(MSR_IA32_CR_PAT, pat);
241 246
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 9b18ef315a55..349c0d32cc0b 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -216,7 +216,7 @@ static void pcibios_allocate_bridge_resources(struct pci_dev *dev)
216 continue; 216 continue;
217 if (r->parent) /* Already allocated */ 217 if (r->parent) /* Already allocated */
218 continue; 218 continue;
219 if (!r->start || pci_claim_resource(dev, idx) < 0) { 219 if (!r->start || pci_claim_bridge_resource(dev, idx) < 0) {
220 /* 220 /*
221 * Something is wrong with the region. 221 * Something is wrong with the region.
222 * Invalidate the resource to prevent 222 * Invalidate the resource to prevent
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index c489ef2c1a39..9098d880c476 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -458,6 +458,7 @@ int __init pci_xen_hvm_init(void)
458 * just how GSIs get registered. 458 * just how GSIs get registered.
459 */ 459 */
460 __acpi_register_gsi = acpi_register_gsi_xen_hvm; 460 __acpi_register_gsi = acpi_register_gsi_xen_hvm;
461 __acpi_unregister_gsi = NULL;
461#endif 462#endif
462 463
463#ifdef CONFIG_PCI_MSI 464#ifdef CONFIG_PCI_MSI
@@ -471,52 +472,6 @@ int __init pci_xen_hvm_init(void)
471} 472}
472 473
473#ifdef CONFIG_XEN_DOM0 474#ifdef CONFIG_XEN_DOM0
474static __init void xen_setup_acpi_sci(void)
475{
476 int rc;
477 int trigger, polarity;
478 int gsi = acpi_sci_override_gsi;
479 int irq = -1;
480 int gsi_override = -1;
481
482 if (!gsi)
483 return;
484
485 rc = acpi_get_override_irq(gsi, &trigger, &polarity);
486 if (rc) {
487 printk(KERN_WARNING "xen: acpi_get_override_irq failed for acpi"
488 " sci, rc=%d\n", rc);
489 return;
490 }
491 trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
492 polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
493
494 printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d "
495 "polarity=%d\n", gsi, trigger, polarity);
496
497 /* Before we bind the GSI to a Linux IRQ, check whether
498 * we need to override it with bus_irq (IRQ) value. Usually for
499 * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so:
500 * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level)
501 * but there are oddballs where the IRQ != GSI:
502 * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level)
503 * which ends up being: gsi_to_irq[9] == 20
504 * (which is what acpi_gsi_to_irq ends up calling when starting the
505 * the ACPI interpreter and keels over since IRQ 9 has not been
506 * setup as we had setup IRQ 20 for it).
507 */
508 if (acpi_gsi_to_irq(gsi, &irq) == 0) {
509 /* Use the provided value if it's valid. */
510 if (irq >= 0)
511 gsi_override = irq;
512 }
513
514 gsi = xen_register_gsi(gsi, gsi_override, trigger, polarity);
515 printk(KERN_INFO "xen: acpi sci %d\n", gsi);
516
517 return;
518}
519
520int __init pci_xen_initial_domain(void) 475int __init pci_xen_initial_domain(void)
521{ 476{
522 int irq; 477 int irq;
@@ -527,8 +482,8 @@ int __init pci_xen_initial_domain(void)
527 x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs; 482 x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs;
528 pci_msi_ignore_mask = 1; 483 pci_msi_ignore_mask = 1;
529#endif 484#endif
530 xen_setup_acpi_sci();
531 __acpi_register_gsi = acpi_register_gsi_xen; 485 __acpi_register_gsi = acpi_register_gsi_xen;
486 __acpi_unregister_gsi = NULL;
532 /* Pre-allocate legacy irqs */ 487 /* Pre-allocate legacy irqs */
533 for (irq = 0; irq < nr_legacy_irqs(); irq++) { 488 for (irq = 0; irq < nr_legacy_irqs(); irq++) {
534 int trigger, polarity; 489 int trigger, polarity;
diff --git a/arch/x86/tools/calc_run_size.pl b/arch/x86/tools/calc_run_size.pl
deleted file mode 100644
index 23210baade2d..000000000000
--- a/arch/x86/tools/calc_run_size.pl
+++ /dev/null
@@ -1,39 +0,0 @@
1#!/usr/bin/perl
2#
3# Calculate the amount of space needed to run the kernel, including room for
4# the .bss and .brk sections.
5#
6# Usage:
7# objdump -h a.out | perl calc_run_size.pl
8use strict;
9
10my $mem_size = 0;
11my $file_offset = 0;
12
13my $sections=" *[0-9]+ \.(?:bss|brk) +";
14while (<>) {
15 if (/^$sections([0-9a-f]+) +(?:[0-9a-f]+ +){2}([0-9a-f]+)/) {
16 my $size = hex($1);
17 my $offset = hex($2);
18 $mem_size += $size;
19 if ($file_offset == 0) {
20 $file_offset = $offset;
21 } elsif ($file_offset != $offset) {
22 # BFD linker shows the same file offset in ELF.
23 # Gold linker shows them as consecutive.
24 next if ($file_offset + $mem_size == $offset + $size);
25
26 printf STDERR "file_offset: 0x%lx\n", $file_offset;
27 printf STDERR "mem_size: 0x%lx\n", $mem_size;
28 printf STDERR "offset: 0x%lx\n", $offset;
29 printf STDERR "size: 0x%lx\n", $size;
30
31 die ".bss and .brk are non-contiguous\n";
32 }
33 }
34}
35
36if ($file_offset == 0) {
37 die "Never found .bss or .brk file offset\n";
38}
39printf("%d\n", $mem_size + $file_offset);
diff --git a/arch/x86/tools/calc_run_size.sh b/arch/x86/tools/calc_run_size.sh
new file mode 100644
index 000000000000..1a4c17bb3910
--- /dev/null
+++ b/arch/x86/tools/calc_run_size.sh
@@ -0,0 +1,42 @@
1#!/bin/sh
2#
3# Calculate the amount of space needed to run the kernel, including room for
4# the .bss and .brk sections.
5#
6# Usage:
7# objdump -h a.out | sh calc_run_size.sh
8
9NUM='\([0-9a-fA-F]*[ \t]*\)'
10OUT=$(sed -n 's/^[ \t0-9]*.b[sr][sk][ \t]*'"$NUM$NUM$NUM$NUM"'.*/\1\4/p')
11if [ -z "$OUT" ] ; then
12 echo "Never found .bss or .brk file offset" >&2
13 exit 1
14fi
15
16OUT=$(echo ${OUT# })
17sizeA=$(printf "%d" 0x${OUT%% *})
18OUT=${OUT#* }
19offsetA=$(printf "%d" 0x${OUT%% *})
20OUT=${OUT#* }
21sizeB=$(printf "%d" 0x${OUT%% *})
22OUT=${OUT#* }
23offsetB=$(printf "%d" 0x${OUT%% *})
24
25run_size=$(( $offsetA + $sizeA + $sizeB ))
26
27# BFD linker shows the same file offset in ELF.
28if [ "$offsetA" -ne "$offsetB" ] ; then
29 # Gold linker shows them as consecutive.
30 endB=$(( $offsetB + $sizeB ))
31 if [ "$endB" != "$run_size" ] ; then
32 printf "sizeA: 0x%x\n" $sizeA >&2
33 printf "offsetA: 0x%x\n" $offsetA >&2
34 printf "sizeB: 0x%x\n" $sizeB >&2
35 printf "offsetB: 0x%x\n" $offsetB >&2
36 echo ".bss and .brk are non-contiguous" >&2
37 exit 1
38 fi
39fi
40
41printf "%d\n" $run_size
42exit 0
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 1630a20d5dcf..6774a0e69867 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -15,6 +15,26 @@
15 15
16static void blk_mq_sysfs_release(struct kobject *kobj) 16static void blk_mq_sysfs_release(struct kobject *kobj)
17{ 17{
18 struct request_queue *q;
19
20 q = container_of(kobj, struct request_queue, mq_kobj);
21 free_percpu(q->queue_ctx);
22}
23
24static void blk_mq_ctx_release(struct kobject *kobj)
25{
26 struct blk_mq_ctx *ctx;
27
28 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
29 kobject_put(&ctx->queue->mq_kobj);
30}
31
32static void blk_mq_hctx_release(struct kobject *kobj)
33{
34 struct blk_mq_hw_ctx *hctx;
35
36 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
37 kfree(hctx);
18} 38}
19 39
20struct blk_mq_ctx_sysfs_entry { 40struct blk_mq_ctx_sysfs_entry {
@@ -318,13 +338,13 @@ static struct kobj_type blk_mq_ktype = {
318static struct kobj_type blk_mq_ctx_ktype = { 338static struct kobj_type blk_mq_ctx_ktype = {
319 .sysfs_ops = &blk_mq_sysfs_ops, 339 .sysfs_ops = &blk_mq_sysfs_ops,
320 .default_attrs = default_ctx_attrs, 340 .default_attrs = default_ctx_attrs,
321 .release = blk_mq_sysfs_release, 341 .release = blk_mq_ctx_release,
322}; 342};
323 343
324static struct kobj_type blk_mq_hw_ktype = { 344static struct kobj_type blk_mq_hw_ktype = {
325 .sysfs_ops = &blk_mq_hw_sysfs_ops, 345 .sysfs_ops = &blk_mq_hw_sysfs_ops,
326 .default_attrs = default_hw_ctx_attrs, 346 .default_attrs = default_hw_ctx_attrs,
327 .release = blk_mq_sysfs_release, 347 .release = blk_mq_hctx_release,
328}; 348};
329 349
330static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) 350static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
@@ -355,6 +375,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
355 return ret; 375 return ret;
356 376
357 hctx_for_each_ctx(hctx, ctx, i) { 377 hctx_for_each_ctx(hctx, ctx, i) {
378 kobject_get(&q->mq_kobj);
358 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); 379 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
359 if (ret) 380 if (ret)
360 break; 381 break;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 2f95747c287e..9ee3b87c4498 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1641,10 +1641,8 @@ static void blk_mq_free_hw_queues(struct request_queue *q,
1641 struct blk_mq_hw_ctx *hctx; 1641 struct blk_mq_hw_ctx *hctx;
1642 unsigned int i; 1642 unsigned int i;
1643 1643
1644 queue_for_each_hw_ctx(q, hctx, i) { 1644 queue_for_each_hw_ctx(q, hctx, i)
1645 free_cpumask_var(hctx->cpumask); 1645 free_cpumask_var(hctx->cpumask);
1646 kfree(hctx);
1647 }
1648} 1646}
1649 1647
1650static int blk_mq_init_hctx(struct request_queue *q, 1648static int blk_mq_init_hctx(struct request_queue *q,
@@ -2002,11 +2000,9 @@ void blk_mq_free_queue(struct request_queue *q)
2002 2000
2003 percpu_ref_exit(&q->mq_usage_counter); 2001 percpu_ref_exit(&q->mq_usage_counter);
2004 2002
2005 free_percpu(q->queue_ctx);
2006 kfree(q->queue_hw_ctx); 2003 kfree(q->queue_hw_ctx);
2007 kfree(q->mq_map); 2004 kfree(q->mq_map);
2008 2005
2009 q->queue_ctx = NULL;
2010 q->queue_hw_ctx = NULL; 2006 q->queue_hw_ctx = NULL;
2011 q->mq_map = NULL; 2007 q->mq_map = NULL;
2012 2008
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 5277a0ee5704..b1def411c0b8 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -512,7 +512,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
512 dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin)); 512 dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
513 if (gsi >= 0) { 513 if (gsi >= 0) {
514 acpi_unregister_gsi(gsi); 514 acpi_unregister_gsi(gsi);
515 dev->irq = 0;
516 dev->irq_managed = 0; 515 dev->irq_managed = 0;
517 } 516 }
518} 517}
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index cb529e9a82dd..d826bf3e62c8 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -106,7 +106,7 @@ struct nvme_queue {
106 dma_addr_t cq_dma_addr; 106 dma_addr_t cq_dma_addr;
107 u32 __iomem *q_db; 107 u32 __iomem *q_db;
108 u16 q_depth; 108 u16 q_depth;
109 u16 cq_vector; 109 s16 cq_vector;
110 u16 sq_head; 110 u16 sq_head;
111 u16 sq_tail; 111 u16 sq_tail;
112 u16 cq_head; 112 u16 cq_head;
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index eb7682dc123b..81bf297f1034 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -210,12 +210,25 @@ static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus,
210} 210}
211 211
212/* Checks whether the given window number is available */ 212/* Checks whether the given window number is available */
213
214/* On Armada XP, 375 and 38x the MBus window 13 has the remap
215 * capability, like windows 0 to 7. However, the mvebu-mbus driver
216 * isn't currently taking into account this special case, which means
217 * that when window 13 is actually used, the remap registers are left
218 * to 0, making the device using this MBus window unavailable. The
219 * quick fix for stable is to not use window 13. A follow up patch
220 * will correctly handle this window.
221*/
213static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus, 222static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus,
214 const int win) 223 const int win)
215{ 224{
216 void __iomem *addr = mbus->mbuswins_base + 225 void __iomem *addr = mbus->mbuswins_base +
217 mbus->soc->win_cfg_offset(win); 226 mbus->soc->win_cfg_offset(win);
218 u32 ctrl = readl(addr + WIN_CTRL_OFF); 227 u32 ctrl = readl(addr + WIN_CTRL_OFF);
228
229 if (win == 13)
230 return false;
231
219 return !(ctrl & WIN_CTRL_ENABLE); 232 return !(ctrl & WIN_CTRL_ENABLE);
220} 233}
221 234
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index 0595dc6c453e..f1e33d08dd83 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -68,9 +68,8 @@ static void kona_timer_disable_and_clear(void __iomem *base)
68} 68}
69 69
70static void 70static void
71kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw) 71kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
72{ 72{
73 void __iomem *base = IOMEM(timer_base);
74 int loop_limit = 4; 73 int loop_limit = 4;
75 74
76 /* 75 /*
@@ -86,9 +85,9 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
86 */ 85 */
87 86
88 while (--loop_limit) { 87 while (--loop_limit) {
89 *msw = readl(base + KONA_GPTIMER_STCHI_OFFSET); 88 *msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET);
90 *lsw = readl(base + KONA_GPTIMER_STCLO_OFFSET); 89 *lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET);
91 if (*msw == readl(base + KONA_GPTIMER_STCHI_OFFSET)) 90 if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET))
92 break; 91 break;
93 } 92 }
94 if (!loop_limit) { 93 if (!loop_limit) {
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 9403061a2acc..83564c9cfdbe 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -97,8 +97,8 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
97 writel_relaxed(value, reg_base + offset); 97 writel_relaxed(value, reg_base + offset);
98 98
99 if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) { 99 if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
100 stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET; 100 stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
101 switch (offset & EXYNOS4_MCT_L_MASK) { 101 switch (offset & ~EXYNOS4_MCT_L_MASK) {
102 case MCT_L_TCON_OFFSET: 102 case MCT_L_TCON_OFFSET:
103 mask = 1 << 3; /* L_TCON write status */ 103 mask = 1 << 3; /* L_TCON write status */
104 break; 104 break;
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 0f665b8f2461..f150ca82bfaf 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -428,7 +428,7 @@ static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
428 ced->features = CLOCK_EVT_FEAT_PERIODIC; 428 ced->features = CLOCK_EVT_FEAT_PERIODIC;
429 ced->features |= CLOCK_EVT_FEAT_ONESHOT; 429 ced->features |= CLOCK_EVT_FEAT_ONESHOT;
430 ced->rating = 200; 430 ced->rating = 200;
431 ced->cpumask = cpumask_of(0); 431 ced->cpumask = cpu_possible_mask;
432 ced->set_next_event = sh_tmu_clock_event_next; 432 ced->set_next_event = sh_tmu_clock_event_next;
433 ced->set_mode = sh_tmu_clock_event_mode; 433 ced->set_mode = sh_tmu_clock_event_mode;
434 ced->suspend = sh_tmu_clock_event_suspend; 434 ced->suspend = sh_tmu_clock_event_suspend;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 1ba8332419fa..5bc32c26b989 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -183,16 +183,15 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
183 kfd->shared_resources = *gpu_resources; 183 kfd->shared_resources = *gpu_resources;
184 184
185 /* calculate max size of mqds needed for queues */ 185 /* calculate max size of mqds needed for queues */
186 size = max_num_of_processes * 186 size = max_num_of_queues_per_device *
187 max_num_of_queues_per_process * 187 kfd->device_info->mqd_size_aligned;
188 kfd->device_info->mqd_size_aligned;
189 188
190 /* 189 /*
191 * calculate max size of runlist packet. 190 * calculate max size of runlist packet.
192 * There can be only 2 packets at once 191 * There can be only 2 packets at once
193 */ 192 */
194 size += (max_num_of_processes * sizeof(struct pm4_map_process) + 193 size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_map_process) +
195 max_num_of_processes * max_num_of_queues_per_process * 194 max_num_of_queues_per_device *
196 sizeof(struct pm4_map_queues) + sizeof(struct pm4_runlist)) * 2; 195 sizeof(struct pm4_map_queues) + sizeof(struct pm4_runlist)) * 2;
197 196
198 /* Add size of HIQ & DIQ */ 197 /* Add size of HIQ & DIQ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index b189f9791c90..ecc78ece634c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -135,6 +135,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
135 135
136 mutex_lock(&dqm->lock); 136 mutex_lock(&dqm->lock);
137 137
138 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
139 pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
140 dqm->total_queue_count);
141 mutex_unlock(&dqm->lock);
142 return -EPERM;
143 }
144
138 if (list_empty(&qpd->queues_list)) { 145 if (list_empty(&qpd->queues_list)) {
139 retval = allocate_vmid(dqm, qpd, q); 146 retval = allocate_vmid(dqm, qpd, q);
140 if (retval != 0) { 147 if (retval != 0) {
@@ -161,8 +168,18 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
161 168
162 list_add(&q->list, &qpd->queues_list); 169 list_add(&q->list, &qpd->queues_list);
163 dqm->queue_count++; 170 dqm->queue_count++;
171
164 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) 172 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
165 dqm->sdma_queue_count++; 173 dqm->sdma_queue_count++;
174
175 /*
176 * Unconditionally increment this counter, regardless of the queue's
177 * type or whether the queue is active.
178 */
179 dqm->total_queue_count++;
180 pr_debug("Total of %d queues are accountable so far\n",
181 dqm->total_queue_count);
182
166 mutex_unlock(&dqm->lock); 183 mutex_unlock(&dqm->lock);
167 return 0; 184 return 0;
168} 185}
@@ -297,6 +314,15 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
297 if (list_empty(&qpd->queues_list)) 314 if (list_empty(&qpd->queues_list))
298 deallocate_vmid(dqm, qpd, q); 315 deallocate_vmid(dqm, qpd, q);
299 dqm->queue_count--; 316 dqm->queue_count--;
317
318 /*
319 * Unconditionally decrement this counter, regardless of the queue's
320 * type
321 */
322 dqm->total_queue_count--;
323 pr_debug("Total of %d queues are accountable so far\n",
324 dqm->total_queue_count);
325
300out: 326out:
301 mutex_unlock(&dqm->lock); 327 mutex_unlock(&dqm->lock);
302 return retval; 328 return retval;
@@ -470,10 +496,14 @@ int init_pipelines(struct device_queue_manager *dqm,
470 496
471 for (i = 0; i < pipes_num; i++) { 497 for (i = 0; i < pipes_num; i++) {
472 inx = i + first_pipe; 498 inx = i + first_pipe;
499 /*
500 * HPD buffer on GTT is allocated by amdkfd, no need to waste
501 * space in GTT for pipelines we don't initialize
502 */
473 pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES; 503 pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
474 pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr); 504 pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
475 /* = log2(bytes/4)-1 */ 505 /* = log2(bytes/4)-1 */
476 kfd2kgd->init_pipeline(dqm->dev->kgd, i, 506 kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
477 CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr); 507 CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
478 } 508 }
479 509
@@ -488,8 +518,7 @@ static int init_scheduler(struct device_queue_manager *dqm)
488 518
489 pr_debug("kfd: In %s\n", __func__); 519 pr_debug("kfd: In %s\n", __func__);
490 520
491 retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE); 521 retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
492
493 return retval; 522 return retval;
494} 523}
495 524
@@ -744,6 +773,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
744 pr_debug("kfd: In func %s\n", __func__); 773 pr_debug("kfd: In func %s\n", __func__);
745 774
746 mutex_lock(&dqm->lock); 775 mutex_lock(&dqm->lock);
776 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
777 pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
778 dqm->total_queue_count);
779 mutex_unlock(&dqm->lock);
780 return -EPERM;
781 }
782
783 /*
784 * Unconditionally increment this counter, regardless of the queue's
785 * type or whether the queue is active.
786 */
787 dqm->total_queue_count++;
788 pr_debug("Total of %d queues are accountable so far\n",
789 dqm->total_queue_count);
790
747 list_add(&kq->list, &qpd->priv_queue_list); 791 list_add(&kq->list, &qpd->priv_queue_list);
748 dqm->queue_count++; 792 dqm->queue_count++;
749 qpd->is_debug = true; 793 qpd->is_debug = true;
@@ -767,6 +811,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
767 dqm->queue_count--; 811 dqm->queue_count--;
768 qpd->is_debug = false; 812 qpd->is_debug = false;
769 execute_queues_cpsch(dqm, false); 813 execute_queues_cpsch(dqm, false);
814 /*
815 * Unconditionally decrement this counter, regardless of the queue's
816 * type.
817 */
818 dqm->total_queue_count++;
819 pr_debug("Total of %d queues are accountable so far\n",
820 dqm->total_queue_count);
770 mutex_unlock(&dqm->lock); 821 mutex_unlock(&dqm->lock);
771} 822}
772 823
@@ -793,6 +844,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
793 844
794 mutex_lock(&dqm->lock); 845 mutex_lock(&dqm->lock);
795 846
847 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
848 pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
849 dqm->total_queue_count);
850 retval = -EPERM;
851 goto out;
852 }
853
796 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) 854 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
797 select_sdma_engine_id(q); 855 select_sdma_engine_id(q);
798 856
@@ -817,6 +875,14 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
817 875
818 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) 876 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
819 dqm->sdma_queue_count++; 877 dqm->sdma_queue_count++;
878 /*
879 * Unconditionally increment this counter, regardless of the queue's
880 * type or whether the queue is active.
881 */
882 dqm->total_queue_count++;
883
884 pr_debug("Total of %d queues are accountable so far\n",
885 dqm->total_queue_count);
820 886
821out: 887out:
822 mutex_unlock(&dqm->lock); 888 mutex_unlock(&dqm->lock);
@@ -958,6 +1024,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
958 1024
959 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); 1025 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
960 1026
1027 /*
1028 * Unconditionally decrement this counter, regardless of the queue's
1029 * type
1030 */
1031 dqm->total_queue_count--;
1032 pr_debug("Total of %d queues are accountable so far\n",
1033 dqm->total_queue_count);
1034
961 mutex_unlock(&dqm->lock); 1035 mutex_unlock(&dqm->lock);
962 1036
963 return 0; 1037 return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index e7b17b28330e..d64f86cda34f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -144,6 +144,7 @@ struct device_queue_manager {
144 unsigned int processes_count; 144 unsigned int processes_count;
145 unsigned int queue_count; 145 unsigned int queue_count;
146 unsigned int sdma_queue_count; 146 unsigned int sdma_queue_count;
147 unsigned int total_queue_count;
147 unsigned int next_pipe_to_allocate; 148 unsigned int next_pipe_to_allocate;
148 unsigned int *allocated_queues; 149 unsigned int *allocated_queues;
149 unsigned int sdma_bitmap; 150 unsigned int sdma_bitmap;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index ac5445415667..3c6221905bc4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -50,15 +50,10 @@ module_param(sched_policy, int, 0444);
50MODULE_PARM_DESC(sched_policy, 50MODULE_PARM_DESC(sched_policy,
51 "Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)"); 51 "Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)");
52 52
53int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT; 53int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
54module_param(max_num_of_processes, int, 0444); 54module_param(max_num_of_queues_per_device, int, 0444);
55MODULE_PARM_DESC(max_num_of_processes, 55MODULE_PARM_DESC(max_num_of_queues_per_device,
56 "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes"); 56 "Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
57
58int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT;
59module_param(max_num_of_queues_per_process, int, 0444);
60MODULE_PARM_DESC(max_num_of_queues_per_process,
61 "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process");
62 57
63bool kgd2kfd_init(unsigned interface_version, 58bool kgd2kfd_init(unsigned interface_version,
64 const struct kfd2kgd_calls *f2g, 59 const struct kfd2kgd_calls *f2g,
@@ -100,16 +95,10 @@ static int __init kfd_module_init(void)
100 } 95 }
101 96
102 /* Verify module parameters */ 97 /* Verify module parameters */
103 if ((max_num_of_processes < 0) || 98 if ((max_num_of_queues_per_device < 0) ||
104 (max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) { 99 (max_num_of_queues_per_device >
105 pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n"); 100 KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
106 return -1; 101 pr_err("kfd: max_num_of_queues_per_device must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
107 }
108
109 if ((max_num_of_queues_per_process < 0) ||
110 (max_num_of_queues_per_process >
111 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) {
112 pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n");
113 return -1; 102 return -1;
114 } 103 }
115 104
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 4c25ef504f79..6cfe7f1f18cf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -30,7 +30,7 @@ static DEFINE_MUTEX(pasid_mutex);
30 30
31int kfd_pasid_init(void) 31int kfd_pasid_init(void)
32{ 32{
33 pasid_limit = max_num_of_processes; 33 pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
34 34
35 pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); 35 pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
36 if (!pasid_bitmap) 36 if (!pasid_bitmap)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 1b35a9c87437..5a44f2fecf38 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -52,20 +52,19 @@
52#define kfd_alloc_struct(ptr_to_struct) \ 52#define kfd_alloc_struct(ptr_to_struct) \
53 ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) 53 ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
54 54
55/* Kernel module parameter to specify maximum number of supported processes */
56extern int max_num_of_processes;
57
58#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32
59#define KFD_MAX_NUM_OF_PROCESSES 512 55#define KFD_MAX_NUM_OF_PROCESSES 512
56#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
60 57
61/* 58/*
62 * Kernel module parameter to specify maximum number of supported queues 59 * Kernel module parameter to specify maximum number of supported queues per
63 * per process 60 * device
64 */ 61 */
65extern int max_num_of_queues_per_process; 62extern int max_num_of_queues_per_device;
66 63
67#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128 64#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
68#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 65#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
66 (KFD_MAX_NUM_OF_PROCESSES * \
67 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
69 68
70#define KFD_KERNEL_QUEUE_SIZE 2048 69#define KFD_KERNEL_QUEUE_SIZE 2048
71 70
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 513eeb6e402a..ca93ab0449c8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
54 pr_debug("kfd: in %s\n", __func__); 54 pr_debug("kfd: in %s\n", __func__);
55 55
56 found = find_first_zero_bit(pqm->queue_slot_bitmap, 56 found = find_first_zero_bit(pqm->queue_slot_bitmap,
57 max_num_of_queues_per_process); 57 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
58 58
59 pr_debug("kfd: the new slot id %lu\n", found); 59 pr_debug("kfd: the new slot id %lu\n", found);
60 60
61 if (found >= max_num_of_queues_per_process) { 61 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
62 pr_info("amdkfd: Can not open more queues for process with pasid %d\n", 62 pr_info("amdkfd: Can not open more queues for process with pasid %d\n",
63 pqm->process->pasid); 63 pqm->process->pasid);
64 return -ENOMEM; 64 return -ENOMEM;
@@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
76 76
77 INIT_LIST_HEAD(&pqm->queues); 77 INIT_LIST_HEAD(&pqm->queues);
78 pqm->queue_slot_bitmap = 78 pqm->queue_slot_bitmap =
79 kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process, 79 kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
80 BITS_PER_BYTE), GFP_KERNEL); 80 BITS_PER_BYTE), GFP_KERNEL);
81 if (pqm->queue_slot_bitmap == NULL) 81 if (pqm->queue_slot_bitmap == NULL)
82 return -ENOMEM; 82 return -ENOMEM;
@@ -206,6 +206,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
206 pqn->kq = NULL; 206 pqn->kq = NULL;
207 retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, 207 retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
208 &q->properties.vmid); 208 &q->properties.vmid);
209 pr_debug("DQM returned %d for create_queue\n", retval);
209 print_queue(q); 210 print_queue(q);
210 break; 211 break;
211 case KFD_QUEUE_TYPE_DIQ: 212 case KFD_QUEUE_TYPE_DIQ:
@@ -226,7 +227,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
226 } 227 }
227 228
228 if (retval != 0) { 229 if (retval != 0) {
229 pr_err("kfd: error dqm create queue\n"); 230 pr_debug("Error dqm create queue\n");
230 goto err_create_queue; 231 goto err_create_queue;
231 } 232 }
232 233
@@ -245,7 +246,10 @@ int pqm_create_queue(struct process_queue_manager *pqm,
245err_create_queue: 246err_create_queue:
246 kfree(pqn); 247 kfree(pqn);
247err_allocate_pqn: 248err_allocate_pqn:
249 /* check if queues list is empty unregister process from device */
248 clear_bit(*qid, pqm->queue_slot_bitmap); 250 clear_bit(*qid, pqm->queue_slot_bitmap);
251 if (list_empty(&pqm->queues))
252 dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd);
249 return retval; 253 return retval;
250} 254}
251 255
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index d4762799351d..a9041d1a8ff0 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -32,6 +32,8 @@
32struct tda998x_priv { 32struct tda998x_priv {
33 struct i2c_client *cec; 33 struct i2c_client *cec;
34 struct i2c_client *hdmi; 34 struct i2c_client *hdmi;
35 struct mutex mutex;
36 struct delayed_work dwork;
35 uint16_t rev; 37 uint16_t rev;
36 uint8_t current_page; 38 uint8_t current_page;
37 int dpms; 39 int dpms;
@@ -402,9 +404,10 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
402 uint8_t addr = REG2ADDR(reg); 404 uint8_t addr = REG2ADDR(reg);
403 int ret; 405 int ret;
404 406
407 mutex_lock(&priv->mutex);
405 ret = set_page(priv, reg); 408 ret = set_page(priv, reg);
406 if (ret < 0) 409 if (ret < 0)
407 return ret; 410 goto out;
408 411
409 ret = i2c_master_send(client, &addr, sizeof(addr)); 412 ret = i2c_master_send(client, &addr, sizeof(addr));
410 if (ret < 0) 413 if (ret < 0)
@@ -414,10 +417,12 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
414 if (ret < 0) 417 if (ret < 0)
415 goto fail; 418 goto fail;
416 419
417 return ret; 420 goto out;
418 421
419fail: 422fail:
420 dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg); 423 dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg);
424out:
425 mutex_unlock(&priv->mutex);
421 return ret; 426 return ret;
422} 427}
423 428
@@ -431,13 +436,16 @@ reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt)
431 buf[0] = REG2ADDR(reg); 436 buf[0] = REG2ADDR(reg);
432 memcpy(&buf[1], p, cnt); 437 memcpy(&buf[1], p, cnt);
433 438
439 mutex_lock(&priv->mutex);
434 ret = set_page(priv, reg); 440 ret = set_page(priv, reg);
435 if (ret < 0) 441 if (ret < 0)
436 return; 442 goto out;
437 443
438 ret = i2c_master_send(client, buf, cnt + 1); 444 ret = i2c_master_send(client, buf, cnt + 1);
439 if (ret < 0) 445 if (ret < 0)
440 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); 446 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
447out:
448 mutex_unlock(&priv->mutex);
441} 449}
442 450
443static int 451static int
@@ -459,13 +467,16 @@ reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
459 uint8_t buf[] = {REG2ADDR(reg), val}; 467 uint8_t buf[] = {REG2ADDR(reg), val};
460 int ret; 468 int ret;
461 469
470 mutex_lock(&priv->mutex);
462 ret = set_page(priv, reg); 471 ret = set_page(priv, reg);
463 if (ret < 0) 472 if (ret < 0)
464 return; 473 goto out;
465 474
466 ret = i2c_master_send(client, buf, sizeof(buf)); 475 ret = i2c_master_send(client, buf, sizeof(buf));
467 if (ret < 0) 476 if (ret < 0)
468 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); 477 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
478out:
479 mutex_unlock(&priv->mutex);
469} 480}
470 481
471static void 482static void
@@ -475,13 +486,16 @@ reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val)
475 uint8_t buf[] = {REG2ADDR(reg), val >> 8, val}; 486 uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
476 int ret; 487 int ret;
477 488
489 mutex_lock(&priv->mutex);
478 ret = set_page(priv, reg); 490 ret = set_page(priv, reg);
479 if (ret < 0) 491 if (ret < 0)
480 return; 492 goto out;
481 493
482 ret = i2c_master_send(client, buf, sizeof(buf)); 494 ret = i2c_master_send(client, buf, sizeof(buf));
483 if (ret < 0) 495 if (ret < 0)
484 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); 496 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
497out:
498 mutex_unlock(&priv->mutex);
485} 499}
486 500
487static void 501static void
@@ -536,6 +550,17 @@ tda998x_reset(struct tda998x_priv *priv)
536 reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24); 550 reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24);
537} 551}
538 552
553/* handle HDMI connect/disconnect */
554static void tda998x_hpd(struct work_struct *work)
555{
556 struct delayed_work *dwork = to_delayed_work(work);
557 struct tda998x_priv *priv =
558 container_of(dwork, struct tda998x_priv, dwork);
559
560 if (priv->encoder && priv->encoder->dev)
561 drm_kms_helper_hotplug_event(priv->encoder->dev);
562}
563
539/* 564/*
540 * only 2 interrupts may occur: screen plug/unplug and EDID read 565 * only 2 interrupts may occur: screen plug/unplug and EDID read
541 */ 566 */
@@ -559,8 +584,7 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
559 priv->wq_edid_wait = 0; 584 priv->wq_edid_wait = 0;
560 wake_up(&priv->wq_edid); 585 wake_up(&priv->wq_edid);
561 } else if (cec != 0) { /* HPD change */ 586 } else if (cec != 0) { /* HPD change */
562 if (priv->encoder && priv->encoder->dev) 587 schedule_delayed_work(&priv->dwork, HZ/10);
563 drm_helper_hpd_irq_event(priv->encoder->dev);
564 } 588 }
565 return IRQ_HANDLED; 589 return IRQ_HANDLED;
566} 590}
@@ -1170,8 +1194,10 @@ static void tda998x_destroy(struct tda998x_priv *priv)
1170 /* disable all IRQs and free the IRQ handler */ 1194 /* disable all IRQs and free the IRQ handler */
1171 cec_write(priv, REG_CEC_RXSHPDINTENA, 0); 1195 cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
1172 reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); 1196 reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
1173 if (priv->hdmi->irq) 1197 if (priv->hdmi->irq) {
1174 free_irq(priv->hdmi->irq, priv); 1198 free_irq(priv->hdmi->irq, priv);
1199 cancel_delayed_work_sync(&priv->dwork);
1200 }
1175 1201
1176 i2c_unregister_device(priv->cec); 1202 i2c_unregister_device(priv->cec);
1177} 1203}
@@ -1255,6 +1281,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
1255 struct device_node *np = client->dev.of_node; 1281 struct device_node *np = client->dev.of_node;
1256 u32 video; 1282 u32 video;
1257 int rev_lo, rev_hi, ret; 1283 int rev_lo, rev_hi, ret;
1284 unsigned short cec_addr;
1258 1285
1259 priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3); 1286 priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
1260 priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); 1287 priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
@@ -1262,12 +1289,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
1262 1289
1263 priv->current_page = 0xff; 1290 priv->current_page = 0xff;
1264 priv->hdmi = client; 1291 priv->hdmi = client;
1265 priv->cec = i2c_new_dummy(client->adapter, 0x34); 1292 /* CEC I2C address bound to TDA998x I2C addr by configuration pins */
1293 cec_addr = 0x34 + (client->addr & 0x03);
1294 priv->cec = i2c_new_dummy(client->adapter, cec_addr);
1266 if (!priv->cec) 1295 if (!priv->cec)
1267 return -ENODEV; 1296 return -ENODEV;
1268 1297
1269 priv->dpms = DRM_MODE_DPMS_OFF; 1298 priv->dpms = DRM_MODE_DPMS_OFF;
1270 1299
1300 mutex_init(&priv->mutex); /* protect the page access */
1301
1271 /* wake up the device: */ 1302 /* wake up the device: */
1272 cec_write(priv, REG_CEC_ENAMODS, 1303 cec_write(priv, REG_CEC_ENAMODS,
1273 CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI); 1304 CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
@@ -1323,8 +1354,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
1323 if (client->irq) { 1354 if (client->irq) {
1324 int irqf_trigger; 1355 int irqf_trigger;
1325 1356
1326 /* init read EDID waitqueue */ 1357 /* init read EDID waitqueue and HDP work */
1327 init_waitqueue_head(&priv->wq_edid); 1358 init_waitqueue_head(&priv->wq_edid);
1359 INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd);
1328 1360
1329 /* clear pending interrupts */ 1361 /* clear pending interrupts */
1330 reg_read(priv, REG_INT_FLAGS_0); 1362 reg_read(priv, REG_INT_FLAGS_0);
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 7d2ff31c35a5..f86eb54e7763 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -845,7 +845,6 @@ void cik_sdma_vm_write_pages(struct radeon_device *rdev,
845 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 845 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
846 if (flags & R600_PTE_SYSTEM) { 846 if (flags & R600_PTE_SYSTEM) {
847 value = radeon_vm_map_gart(rdev, addr); 847 value = radeon_vm_map_gart(rdev, addr);
848 value &= 0xFFFFFFFFFFFFF000ULL;
849 } else if (flags & R600_PTE_VALID) { 848 } else if (flags & R600_PTE_VALID) {
850 value = addr; 849 value = addr;
851 } else { 850 } else {
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index 4be2bb7cbef3..ce787a9f12c0 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -372,7 +372,6 @@ void cayman_dma_vm_write_pages(struct radeon_device *rdev,
372 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 372 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
373 if (flags & R600_PTE_SYSTEM) { 373 if (flags & R600_PTE_SYSTEM) {
374 value = radeon_vm_map_gart(rdev, addr); 374 value = radeon_vm_map_gart(rdev, addr);
375 value &= 0xFFFFFFFFFFFFF000ULL;
376 } else if (flags & R600_PTE_VALID) { 375 } else if (flags & R600_PTE_VALID) {
377 value = addr; 376 value = addr;
378 } else { 377 } else {
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 74f06d540591..279801ca5110 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
644 return r; 644 return r;
645 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 645 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
646 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; 646 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
647 rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
647 rdev->asic->gart.set_page = &r100_pci_gart_set_page; 648 rdev->asic->gart.set_page = &r100_pci_gart_set_page;
648 return radeon_gart_table_ram_alloc(rdev); 649 return radeon_gart_table_ram_alloc(rdev);
649} 650}
@@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
681 WREG32(RADEON_AIC_HI_ADDR, 0); 682 WREG32(RADEON_AIC_HI_ADDR, 0);
682} 683}
683 684
685uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags)
686{
687 return addr;
688}
689
684void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, 690void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
685 uint64_t addr, uint32_t flags) 691 uint64_t entry)
686{ 692{
687 u32 *gtt = rdev->gart.ptr; 693 u32 *gtt = rdev->gart.ptr;
688 gtt[i] = cpu_to_le32(lower_32_bits(addr)); 694 gtt[i] = cpu_to_le32(lower_32_bits(entry));
689} 695}
690 696
691void r100_pci_gart_fini(struct radeon_device *rdev) 697void r100_pci_gart_fini(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 064ad5569cca..08d68f3e13e9 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
73#define R300_PTE_WRITEABLE (1 << 2) 73#define R300_PTE_WRITEABLE (1 << 2)
74#define R300_PTE_READABLE (1 << 3) 74#define R300_PTE_READABLE (1 << 3)
75 75
76void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, 76uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags)
77 uint64_t addr, uint32_t flags)
78{ 77{
79 void __iomem *ptr = rdev->gart.ptr;
80
81 addr = (lower_32_bits(addr) >> 8) | 78 addr = (lower_32_bits(addr) >> 8) |
82 ((upper_32_bits(addr) & 0xff) << 24); 79 ((upper_32_bits(addr) & 0xff) << 24);
83 if (flags & RADEON_GART_PAGE_READ) 80 if (flags & RADEON_GART_PAGE_READ)
@@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
86 addr |= R300_PTE_WRITEABLE; 83 addr |= R300_PTE_WRITEABLE;
87 if (!(flags & RADEON_GART_PAGE_SNOOP)) 84 if (!(flags & RADEON_GART_PAGE_SNOOP))
88 addr |= R300_PTE_UNSNOOPED; 85 addr |= R300_PTE_UNSNOOPED;
86 return addr;
87}
88
89void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
90 uint64_t entry)
91{
92 void __iomem *ptr = rdev->gart.ptr;
93
89 /* on x86 we want this to be CPU endian, on powerpc 94 /* on x86 we want this to be CPU endian, on powerpc
90 * on powerpc without HW swappers, it'll get swapped on way 95 * on powerpc without HW swappers, it'll get swapped on way
91 * into VRAM - so no need for cpu_to_le32 on VRAM tables */ 96 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
92 writel(addr, ((void __iomem *)ptr) + (i * 4)); 97 writel(entry, ((void __iomem *)ptr) + (i * 4));
93} 98}
94 99
95int rv370_pcie_gart_init(struct radeon_device *rdev) 100int rv370_pcie_gart_init(struct radeon_device *rdev)
@@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
109 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); 114 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
110 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 115 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
111 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; 116 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
117 rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
112 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; 118 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
113 return radeon_gart_table_vram_alloc(rdev); 119 return radeon_gart_table_vram_alloc(rdev);
114} 120}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 93e407b7e7a7..5587603b4a89 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -242,6 +242,7 @@ bool radeon_get_bios(struct radeon_device *rdev);
242 * Dummy page 242 * Dummy page
243 */ 243 */
244struct radeon_dummy_page { 244struct radeon_dummy_page {
245 uint64_t entry;
245 struct page *page; 246 struct page *page;
246 dma_addr_t addr; 247 dma_addr_t addr;
247}; 248};
@@ -645,7 +646,7 @@ struct radeon_gart {
645 unsigned num_cpu_pages; 646 unsigned num_cpu_pages;
646 unsigned table_size; 647 unsigned table_size;
647 struct page **pages; 648 struct page **pages;
648 dma_addr_t *pages_addr; 649 uint64_t *pages_entry;
649 bool ready; 650 bool ready;
650}; 651};
651 652
@@ -1858,8 +1859,9 @@ struct radeon_asic {
1858 /* gart */ 1859 /* gart */
1859 struct { 1860 struct {
1860 void (*tlb_flush)(struct radeon_device *rdev); 1861 void (*tlb_flush)(struct radeon_device *rdev);
1862 uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
1861 void (*set_page)(struct radeon_device *rdev, unsigned i, 1863 void (*set_page)(struct radeon_device *rdev, unsigned i,
1862 uint64_t addr, uint32_t flags); 1864 uint64_t entry);
1863 } gart; 1865 } gart;
1864 struct { 1866 struct {
1865 int (*init)(struct radeon_device *rdev); 1867 int (*init)(struct radeon_device *rdev);
@@ -2867,7 +2869,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
2867#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) 2869#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
2868#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) 2870#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
2869#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) 2871#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
2870#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f)) 2872#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f))
2873#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
2871#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) 2874#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
2872#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) 2875#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
2873#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count))) 2876#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index f811ee14a237..c0ecd128b14b 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev)
159 DRM_INFO("Forcing AGP to PCIE mode\n"); 159 DRM_INFO("Forcing AGP to PCIE mode\n");
160 rdev->flags |= RADEON_IS_PCIE; 160 rdev->flags |= RADEON_IS_PCIE;
161 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; 161 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
162 rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
162 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; 163 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
163 } else { 164 } else {
164 DRM_INFO("Forcing AGP to PCI mode\n"); 165 DRM_INFO("Forcing AGP to PCI mode\n");
165 rdev->flags |= RADEON_IS_PCI; 166 rdev->flags |= RADEON_IS_PCI;
166 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; 167 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
168 rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
167 rdev->asic->gart.set_page = &r100_pci_gart_set_page; 169 rdev->asic->gart.set_page = &r100_pci_gart_set_page;
168 } 170 }
169 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 171 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
@@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = {
199 .mc_wait_for_idle = &r100_mc_wait_for_idle, 201 .mc_wait_for_idle = &r100_mc_wait_for_idle,
200 .gart = { 202 .gart = {
201 .tlb_flush = &r100_pci_gart_tlb_flush, 203 .tlb_flush = &r100_pci_gart_tlb_flush,
204 .get_page_entry = &r100_pci_gart_get_page_entry,
202 .set_page = &r100_pci_gart_set_page, 205 .set_page = &r100_pci_gart_set_page,
203 }, 206 },
204 .ring = { 207 .ring = {
@@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = {
265 .mc_wait_for_idle = &r100_mc_wait_for_idle, 268 .mc_wait_for_idle = &r100_mc_wait_for_idle,
266 .gart = { 269 .gart = {
267 .tlb_flush = &r100_pci_gart_tlb_flush, 270 .tlb_flush = &r100_pci_gart_tlb_flush,
271 .get_page_entry = &r100_pci_gart_get_page_entry,
268 .set_page = &r100_pci_gart_set_page, 272 .set_page = &r100_pci_gart_set_page,
269 }, 273 },
270 .ring = { 274 .ring = {
@@ -359,6 +363,7 @@ static struct radeon_asic r300_asic = {
359 .mc_wait_for_idle = &r300_mc_wait_for_idle, 363 .mc_wait_for_idle = &r300_mc_wait_for_idle,
360 .gart = { 364 .gart = {
361 .tlb_flush = &r100_pci_gart_tlb_flush, 365 .tlb_flush = &r100_pci_gart_tlb_flush,
366 .get_page_entry = &r100_pci_gart_get_page_entry,
362 .set_page = &r100_pci_gart_set_page, 367 .set_page = &r100_pci_gart_set_page,
363 }, 368 },
364 .ring = { 369 .ring = {
@@ -425,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = {
425 .mc_wait_for_idle = &r300_mc_wait_for_idle, 430 .mc_wait_for_idle = &r300_mc_wait_for_idle,
426 .gart = { 431 .gart = {
427 .tlb_flush = &rv370_pcie_gart_tlb_flush, 432 .tlb_flush = &rv370_pcie_gart_tlb_flush,
433 .get_page_entry = &rv370_pcie_gart_get_page_entry,
428 .set_page = &rv370_pcie_gart_set_page, 434 .set_page = &rv370_pcie_gart_set_page,
429 }, 435 },
430 .ring = { 436 .ring = {
@@ -491,6 +497,7 @@ static struct radeon_asic r420_asic = {
491 .mc_wait_for_idle = &r300_mc_wait_for_idle, 497 .mc_wait_for_idle = &r300_mc_wait_for_idle,
492 .gart = { 498 .gart = {
493 .tlb_flush = &rv370_pcie_gart_tlb_flush, 499 .tlb_flush = &rv370_pcie_gart_tlb_flush,
500 .get_page_entry = &rv370_pcie_gart_get_page_entry,
494 .set_page = &rv370_pcie_gart_set_page, 501 .set_page = &rv370_pcie_gart_set_page,
495 }, 502 },
496 .ring = { 503 .ring = {
@@ -557,6 +564,7 @@ static struct radeon_asic rs400_asic = {
557 .mc_wait_for_idle = &rs400_mc_wait_for_idle, 564 .mc_wait_for_idle = &rs400_mc_wait_for_idle,
558 .gart = { 565 .gart = {
559 .tlb_flush = &rs400_gart_tlb_flush, 566 .tlb_flush = &rs400_gart_tlb_flush,
567 .get_page_entry = &rs400_gart_get_page_entry,
560 .set_page = &rs400_gart_set_page, 568 .set_page = &rs400_gart_set_page,
561 }, 569 },
562 .ring = { 570 .ring = {
@@ -623,6 +631,7 @@ static struct radeon_asic rs600_asic = {
623 .mc_wait_for_idle = &rs600_mc_wait_for_idle, 631 .mc_wait_for_idle = &rs600_mc_wait_for_idle,
624 .gart = { 632 .gart = {
625 .tlb_flush = &rs600_gart_tlb_flush, 633 .tlb_flush = &rs600_gart_tlb_flush,
634 .get_page_entry = &rs600_gart_get_page_entry,
626 .set_page = &rs600_gart_set_page, 635 .set_page = &rs600_gart_set_page,
627 }, 636 },
628 .ring = { 637 .ring = {
@@ -689,6 +698,7 @@ static struct radeon_asic rs690_asic = {
689 .mc_wait_for_idle = &rs690_mc_wait_for_idle, 698 .mc_wait_for_idle = &rs690_mc_wait_for_idle,
690 .gart = { 699 .gart = {
691 .tlb_flush = &rs400_gart_tlb_flush, 700 .tlb_flush = &rs400_gart_tlb_flush,
701 .get_page_entry = &rs400_gart_get_page_entry,
692 .set_page = &rs400_gart_set_page, 702 .set_page = &rs400_gart_set_page,
693 }, 703 },
694 .ring = { 704 .ring = {
@@ -755,6 +765,7 @@ static struct radeon_asic rv515_asic = {
755 .mc_wait_for_idle = &rv515_mc_wait_for_idle, 765 .mc_wait_for_idle = &rv515_mc_wait_for_idle,
756 .gart = { 766 .gart = {
757 .tlb_flush = &rv370_pcie_gart_tlb_flush, 767 .tlb_flush = &rv370_pcie_gart_tlb_flush,
768 .get_page_entry = &rv370_pcie_gart_get_page_entry,
758 .set_page = &rv370_pcie_gart_set_page, 769 .set_page = &rv370_pcie_gart_set_page,
759 }, 770 },
760 .ring = { 771 .ring = {
@@ -821,6 +832,7 @@ static struct radeon_asic r520_asic = {
821 .mc_wait_for_idle = &r520_mc_wait_for_idle, 832 .mc_wait_for_idle = &r520_mc_wait_for_idle,
822 .gart = { 833 .gart = {
823 .tlb_flush = &rv370_pcie_gart_tlb_flush, 834 .tlb_flush = &rv370_pcie_gart_tlb_flush,
835 .get_page_entry = &rv370_pcie_gart_get_page_entry,
824 .set_page = &rv370_pcie_gart_set_page, 836 .set_page = &rv370_pcie_gart_set_page,
825 }, 837 },
826 .ring = { 838 .ring = {
@@ -915,6 +927,7 @@ static struct radeon_asic r600_asic = {
915 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 927 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
916 .gart = { 928 .gart = {
917 .tlb_flush = &r600_pcie_gart_tlb_flush, 929 .tlb_flush = &r600_pcie_gart_tlb_flush,
930 .get_page_entry = &rs600_gart_get_page_entry,
918 .set_page = &rs600_gart_set_page, 931 .set_page = &rs600_gart_set_page,
919 }, 932 },
920 .ring = { 933 .ring = {
@@ -998,6 +1011,7 @@ static struct radeon_asic rv6xx_asic = {
998 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1011 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
999 .gart = { 1012 .gart = {
1000 .tlb_flush = &r600_pcie_gart_tlb_flush, 1013 .tlb_flush = &r600_pcie_gart_tlb_flush,
1014 .get_page_entry = &rs600_gart_get_page_entry,
1001 .set_page = &rs600_gart_set_page, 1015 .set_page = &rs600_gart_set_page,
1002 }, 1016 },
1003 .ring = { 1017 .ring = {
@@ -1087,6 +1101,7 @@ static struct radeon_asic rs780_asic = {
1087 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1101 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1088 .gart = { 1102 .gart = {
1089 .tlb_flush = &r600_pcie_gart_tlb_flush, 1103 .tlb_flush = &r600_pcie_gart_tlb_flush,
1104 .get_page_entry = &rs600_gart_get_page_entry,
1090 .set_page = &rs600_gart_set_page, 1105 .set_page = &rs600_gart_set_page,
1091 }, 1106 },
1092 .ring = { 1107 .ring = {
@@ -1189,6 +1204,7 @@ static struct radeon_asic rv770_asic = {
1189 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1204 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1190 .gart = { 1205 .gart = {
1191 .tlb_flush = &r600_pcie_gart_tlb_flush, 1206 .tlb_flush = &r600_pcie_gart_tlb_flush,
1207 .get_page_entry = &rs600_gart_get_page_entry,
1192 .set_page = &rs600_gart_set_page, 1208 .set_page = &rs600_gart_set_page,
1193 }, 1209 },
1194 .ring = { 1210 .ring = {
@@ -1305,6 +1321,7 @@ static struct radeon_asic evergreen_asic = {
1305 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1321 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1306 .gart = { 1322 .gart = {
1307 .tlb_flush = &evergreen_pcie_gart_tlb_flush, 1323 .tlb_flush = &evergreen_pcie_gart_tlb_flush,
1324 .get_page_entry = &rs600_gart_get_page_entry,
1308 .set_page = &rs600_gart_set_page, 1325 .set_page = &rs600_gart_set_page,
1309 }, 1326 },
1310 .ring = { 1327 .ring = {
@@ -1395,6 +1412,7 @@ static struct radeon_asic sumo_asic = {
1395 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1412 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1396 .gart = { 1413 .gart = {
1397 .tlb_flush = &evergreen_pcie_gart_tlb_flush, 1414 .tlb_flush = &evergreen_pcie_gart_tlb_flush,
1415 .get_page_entry = &rs600_gart_get_page_entry,
1398 .set_page = &rs600_gart_set_page, 1416 .set_page = &rs600_gart_set_page,
1399 }, 1417 },
1400 .ring = { 1418 .ring = {
@@ -1484,6 +1502,7 @@ static struct radeon_asic btc_asic = {
1484 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1502 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1485 .gart = { 1503 .gart = {
1486 .tlb_flush = &evergreen_pcie_gart_tlb_flush, 1504 .tlb_flush = &evergreen_pcie_gart_tlb_flush,
1505 .get_page_entry = &rs600_gart_get_page_entry,
1487 .set_page = &rs600_gart_set_page, 1506 .set_page = &rs600_gart_set_page,
1488 }, 1507 },
1489 .ring = { 1508 .ring = {
@@ -1617,6 +1636,7 @@ static struct radeon_asic cayman_asic = {
1617 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1636 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1618 .gart = { 1637 .gart = {
1619 .tlb_flush = &cayman_pcie_gart_tlb_flush, 1638 .tlb_flush = &cayman_pcie_gart_tlb_flush,
1639 .get_page_entry = &rs600_gart_get_page_entry,
1620 .set_page = &rs600_gart_set_page, 1640 .set_page = &rs600_gart_set_page,
1621 }, 1641 },
1622 .vm = { 1642 .vm = {
@@ -1718,6 +1738,7 @@ static struct radeon_asic trinity_asic = {
1718 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1738 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1719 .gart = { 1739 .gart = {
1720 .tlb_flush = &cayman_pcie_gart_tlb_flush, 1740 .tlb_flush = &cayman_pcie_gart_tlb_flush,
1741 .get_page_entry = &rs600_gart_get_page_entry,
1721 .set_page = &rs600_gart_set_page, 1742 .set_page = &rs600_gart_set_page,
1722 }, 1743 },
1723 .vm = { 1744 .vm = {
@@ -1849,6 +1870,7 @@ static struct radeon_asic si_asic = {
1849 .get_gpu_clock_counter = &si_get_gpu_clock_counter, 1870 .get_gpu_clock_counter = &si_get_gpu_clock_counter,
1850 .gart = { 1871 .gart = {
1851 .tlb_flush = &si_pcie_gart_tlb_flush, 1872 .tlb_flush = &si_pcie_gart_tlb_flush,
1873 .get_page_entry = &rs600_gart_get_page_entry,
1852 .set_page = &rs600_gart_set_page, 1874 .set_page = &rs600_gart_set_page,
1853 }, 1875 },
1854 .vm = { 1876 .vm = {
@@ -2012,6 +2034,7 @@ static struct radeon_asic ci_asic = {
2012 .get_gpu_clock_counter = &cik_get_gpu_clock_counter, 2034 .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
2013 .gart = { 2035 .gart = {
2014 .tlb_flush = &cik_pcie_gart_tlb_flush, 2036 .tlb_flush = &cik_pcie_gart_tlb_flush,
2037 .get_page_entry = &rs600_gart_get_page_entry,
2015 .set_page = &rs600_gart_set_page, 2038 .set_page = &rs600_gart_set_page,
2016 }, 2039 },
2017 .vm = { 2040 .vm = {
@@ -2121,6 +2144,7 @@ static struct radeon_asic kv_asic = {
2121 .get_gpu_clock_counter = &cik_get_gpu_clock_counter, 2144 .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
2122 .gart = { 2145 .gart = {
2123 .tlb_flush = &cik_pcie_gart_tlb_flush, 2146 .tlb_flush = &cik_pcie_gart_tlb_flush,
2147 .get_page_entry = &rs600_gart_get_page_entry,
2124 .set_page = &rs600_gart_set_page, 2148 .set_page = &rs600_gart_set_page,
2125 }, 2149 },
2126 .vm = { 2150 .vm = {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 4045a320a424..72bdd3bf0d8e 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
67int r100_asic_reset(struct radeon_device *rdev); 67int r100_asic_reset(struct radeon_device *rdev);
68u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); 68u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
69void r100_pci_gart_tlb_flush(struct radeon_device *rdev); 69void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
70uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags);
70void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, 71void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
71 uint64_t addr, uint32_t flags); 72 uint64_t entry);
72void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); 73void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
73int r100_irq_set(struct radeon_device *rdev); 74int r100_irq_set(struct radeon_device *rdev);
74int r100_irq_process(struct radeon_device *rdev); 75int r100_irq_process(struct radeon_device *rdev);
@@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
172 struct radeon_fence *fence); 173 struct radeon_fence *fence);
173extern int r300_cs_parse(struct radeon_cs_parser *p); 174extern int r300_cs_parse(struct radeon_cs_parser *p);
174extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); 175extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
176extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags);
175extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, 177extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
176 uint64_t addr, uint32_t flags); 178 uint64_t entry);
177extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); 179extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
178extern int rv370_get_pcie_lanes(struct radeon_device *rdev); 180extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
179extern void r300_set_reg_safe(struct radeon_device *rdev); 181extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev);
208extern int rs400_suspend(struct radeon_device *rdev); 210extern int rs400_suspend(struct radeon_device *rdev);
209extern int rs400_resume(struct radeon_device *rdev); 211extern int rs400_resume(struct radeon_device *rdev);
210void rs400_gart_tlb_flush(struct radeon_device *rdev); 212void rs400_gart_tlb_flush(struct radeon_device *rdev);
213uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags);
211void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, 214void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
212 uint64_t addr, uint32_t flags); 215 uint64_t entry);
213uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); 216uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
214void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 217void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
215int rs400_gart_init(struct radeon_device *rdev); 218int rs400_gart_init(struct radeon_device *rdev);
@@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev);
232void rs600_irq_disable(struct radeon_device *rdev); 235void rs600_irq_disable(struct radeon_device *rdev);
233u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); 236u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
234void rs600_gart_tlb_flush(struct radeon_device *rdev); 237void rs600_gart_tlb_flush(struct radeon_device *rdev);
238uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags);
235void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, 239void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
236 uint64_t addr, uint32_t flags); 240 uint64_t entry);
237uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); 241uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
238void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 242void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
239void rs600_bandwidth_update(struct radeon_device *rdev); 243void rs600_bandwidth_update(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0ec65168f331..bd7519fdd3f4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -774,6 +774,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
774 rdev->dummy_page.page = NULL; 774 rdev->dummy_page.page = NULL;
775 return -ENOMEM; 775 return -ENOMEM;
776 } 776 }
777 rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
778 RADEON_GART_PAGE_DUMMY);
777 return 0; 779 return 0;
778} 780}
779 781
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 84146d5901aa..5450fa95a47e 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
165 radeon_bo_unpin(rdev->gart.robj); 165 radeon_bo_unpin(rdev->gart.robj);
166 radeon_bo_unreserve(rdev->gart.robj); 166 radeon_bo_unreserve(rdev->gart.robj);
167 rdev->gart.table_addr = gpu_addr; 167 rdev->gart.table_addr = gpu_addr;
168
169 if (!r) {
170 int i;
171
172 /* We might have dropped some GART table updates while it wasn't
173 * mapped, restore all entries
174 */
175 for (i = 0; i < rdev->gart.num_gpu_pages; i++)
176 radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]);
177 mb();
178 radeon_gart_tlb_flush(rdev);
179 }
180
168 return r; 181 return r;
169} 182}
170 183
@@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
228 unsigned t; 241 unsigned t;
229 unsigned p; 242 unsigned p;
230 int i, j; 243 int i, j;
231 u64 page_base;
232 244
233 if (!rdev->gart.ready) { 245 if (!rdev->gart.ready) {
234 WARN(1, "trying to unbind memory from uninitialized GART !\n"); 246 WARN(1, "trying to unbind memory from uninitialized GART !\n");
@@ -239,14 +251,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
239 for (i = 0; i < pages; i++, p++) { 251 for (i = 0; i < pages; i++, p++) {
240 if (rdev->gart.pages[p]) { 252 if (rdev->gart.pages[p]) {
241 rdev->gart.pages[p] = NULL; 253 rdev->gart.pages[p] = NULL;
242 rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
243 page_base = rdev->gart.pages_addr[p];
244 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 254 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
255 rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
245 if (rdev->gart.ptr) { 256 if (rdev->gart.ptr) {
246 radeon_gart_set_page(rdev, t, page_base, 257 radeon_gart_set_page(rdev, t,
247 RADEON_GART_PAGE_DUMMY); 258 rdev->dummy_page.entry);
248 } 259 }
249 page_base += RADEON_GPU_PAGE_SIZE;
250 } 260 }
251 } 261 }
252 } 262 }
@@ -274,7 +284,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
274{ 284{
275 unsigned t; 285 unsigned t;
276 unsigned p; 286 unsigned p;
277 uint64_t page_base; 287 uint64_t page_base, page_entry;
278 int i, j; 288 int i, j;
279 289
280 if (!rdev->gart.ready) { 290 if (!rdev->gart.ready) {
@@ -285,14 +295,15 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
285 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 295 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
286 296
287 for (i = 0; i < pages; i++, p++) { 297 for (i = 0; i < pages; i++, p++) {
288 rdev->gart.pages_addr[p] = dma_addr[i];
289 rdev->gart.pages[p] = pagelist[i]; 298 rdev->gart.pages[p] = pagelist[i];
290 if (rdev->gart.ptr) { 299 page_base = dma_addr[i];
291 page_base = rdev->gart.pages_addr[p]; 300 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
292 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 301 page_entry = radeon_gart_get_page_entry(page_base, flags);
293 radeon_gart_set_page(rdev, t, page_base, flags); 302 rdev->gart.pages_entry[t] = page_entry;
294 page_base += RADEON_GPU_PAGE_SIZE; 303 if (rdev->gart.ptr) {
304 radeon_gart_set_page(rdev, t, page_entry);
295 } 305 }
306 page_base += RADEON_GPU_PAGE_SIZE;
296 } 307 }
297 } 308 }
298 mb(); 309 mb();
@@ -334,16 +345,15 @@ int radeon_gart_init(struct radeon_device *rdev)
334 radeon_gart_fini(rdev); 345 radeon_gart_fini(rdev);
335 return -ENOMEM; 346 return -ENOMEM;
336 } 347 }
337 rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) * 348 rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) *
338 rdev->gart.num_cpu_pages); 349 rdev->gart.num_gpu_pages);
339 if (rdev->gart.pages_addr == NULL) { 350 if (rdev->gart.pages_entry == NULL) {
340 radeon_gart_fini(rdev); 351 radeon_gart_fini(rdev);
341 return -ENOMEM; 352 return -ENOMEM;
342 } 353 }
343 /* set GART entry to point to the dummy page by default */ 354 /* set GART entry to point to the dummy page by default */
344 for (i = 0; i < rdev->gart.num_cpu_pages; i++) { 355 for (i = 0; i < rdev->gart.num_gpu_pages; i++)
345 rdev->gart.pages_addr[i] = rdev->dummy_page.addr; 356 rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
346 }
347 return 0; 357 return 0;
348} 358}
349 359
@@ -356,15 +366,15 @@ int radeon_gart_init(struct radeon_device *rdev)
356 */ 366 */
357void radeon_gart_fini(struct radeon_device *rdev) 367void radeon_gart_fini(struct radeon_device *rdev)
358{ 368{
359 if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) { 369 if (rdev->gart.ready) {
360 /* unbind pages */ 370 /* unbind pages */
361 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); 371 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
362 } 372 }
363 rdev->gart.ready = false; 373 rdev->gart.ready = false;
364 vfree(rdev->gart.pages); 374 vfree(rdev->gart.pages);
365 vfree(rdev->gart.pages_addr); 375 vfree(rdev->gart.pages_entry);
366 rdev->gart.pages = NULL; 376 rdev->gart.pages = NULL;
367 rdev->gart.pages_addr = NULL; 377 rdev->gart.pages_entry = NULL;
368 378
369 radeon_dummy_page_fini(rdev); 379 radeon_dummy_page_fini(rdev);
370} 380}
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 7b274205eeaf..061eaa9c19c7 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -392,7 +392,7 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
392static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, 392static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
393 uint32_t hpd_size, uint64_t hpd_gpu_addr) 393 uint32_t hpd_size, uint64_t hpd_gpu_addr)
394{ 394{
395 uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1; 395 uint32_t mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
396 uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC); 396 uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
397 397
398 lock_srbm(kgd, mec, pipe, 0, 0); 398 lock_srbm(kgd, mec, pipe, 0, 0);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index cde48c42b30a..06d2246d07f1 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -587,10 +587,8 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
587 uint64_t result; 587 uint64_t result;
588 588
589 /* page table offset */ 589 /* page table offset */
590 result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; 590 result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT];
591 591 result &= ~RADEON_GPU_PAGE_MASK;
592 /* in case cpu page size != gpu page size*/
593 result |= addr & (~PAGE_MASK);
594 592
595 return result; 593 return result;
596} 594}
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index c5799f16aa4b..34e3235f41d2 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev)
212#define RS400_PTE_WRITEABLE (1 << 2) 212#define RS400_PTE_WRITEABLE (1 << 2)
213#define RS400_PTE_READABLE (1 << 3) 213#define RS400_PTE_READABLE (1 << 3)
214 214
215void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, 215uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags)
216 uint64_t addr, uint32_t flags)
217{ 216{
218 uint32_t entry; 217 uint32_t entry;
219 u32 *gtt = rdev->gart.ptr;
220 218
221 entry = (lower_32_bits(addr) & PAGE_MASK) | 219 entry = (lower_32_bits(addr) & PAGE_MASK) |
222 ((upper_32_bits(addr) & 0xff) << 4); 220 ((upper_32_bits(addr) & 0xff) << 4);
@@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
226 entry |= RS400_PTE_WRITEABLE; 224 entry |= RS400_PTE_WRITEABLE;
227 if (!(flags & RADEON_GART_PAGE_SNOOP)) 225 if (!(flags & RADEON_GART_PAGE_SNOOP))
228 entry |= RS400_PTE_UNSNOOPED; 226 entry |= RS400_PTE_UNSNOOPED;
229 entry = cpu_to_le32(entry); 227 return entry;
230 gtt[i] = entry; 228}
229
230void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
231 uint64_t entry)
232{
233 u32 *gtt = rdev->gart.ptr;
234 gtt[i] = cpu_to_le32(lower_32_bits(entry));
231} 235}
232 236
233int rs400_mc_wait_for_idle(struct radeon_device *rdev) 237int rs400_mc_wait_for_idle(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 68f154a451c0..d81182ad53ec 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -626,11 +626,8 @@ static void rs600_gart_fini(struct radeon_device *rdev)
626 radeon_gart_table_vram_free(rdev); 626 radeon_gart_table_vram_free(rdev);
627} 627}
628 628
629void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, 629uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags)
630 uint64_t addr, uint32_t flags)
631{ 630{
632 void __iomem *ptr = (void *)rdev->gart.ptr;
633
634 addr = addr & 0xFFFFFFFFFFFFF000ULL; 631 addr = addr & 0xFFFFFFFFFFFFF000ULL;
635 addr |= R600_PTE_SYSTEM; 632 addr |= R600_PTE_SYSTEM;
636 if (flags & RADEON_GART_PAGE_VALID) 633 if (flags & RADEON_GART_PAGE_VALID)
@@ -641,7 +638,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
641 addr |= R600_PTE_WRITEABLE; 638 addr |= R600_PTE_WRITEABLE;
642 if (flags & RADEON_GART_PAGE_SNOOP) 639 if (flags & RADEON_GART_PAGE_SNOOP)
643 addr |= R600_PTE_SNOOPED; 640 addr |= R600_PTE_SNOOPED;
644 writeq(addr, ptr + (i * 8)); 641 return addr;
642}
643
644void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
645 uint64_t entry)
646{
647 void __iomem *ptr = (void *)rdev->gart.ptr;
648 writeq(entry, ptr + (i * 8));
645} 649}
646 650
647int rs600_irq_set(struct radeon_device *rdev) 651int rs600_irq_set(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index aa7b872b2c43..83207929fc62 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -123,7 +123,6 @@ void si_dma_vm_write_pages(struct radeon_device *rdev,
123 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 123 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
124 if (flags & R600_PTE_SYSTEM) { 124 if (flags & R600_PTE_SYSTEM) {
125 value = radeon_vm_map_gart(rdev, addr); 125 value = radeon_vm_map_gart(rdev, addr);
126 value &= 0xFFFFFFFFFFFFF000ULL;
127 } else if (flags & R600_PTE_VALID) { 126 } else if (flags & R600_PTE_VALID) {
128 value = addr; 127 value = addr;
129 } else { 128 } else {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 7b5d22110f25..6c6b655defcf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv,
406 if (unlikely(ret != 0)) 406 if (unlikely(ret != 0))
407 --dev_priv->num_3d_resources; 407 --dev_priv->num_3d_resources;
408 } else if (unhide_svga) { 408 } else if (unhide_svga) {
409 mutex_lock(&dev_priv->hw_mutex);
410 vmw_write(dev_priv, SVGA_REG_ENABLE, 409 vmw_write(dev_priv, SVGA_REG_ENABLE,
411 vmw_read(dev_priv, SVGA_REG_ENABLE) & 410 vmw_read(dev_priv, SVGA_REG_ENABLE) &
412 ~SVGA_REG_ENABLE_HIDE); 411 ~SVGA_REG_ENABLE_HIDE);
413 mutex_unlock(&dev_priv->hw_mutex);
414 } 412 }
415 413
416 mutex_unlock(&dev_priv->release_mutex); 414 mutex_unlock(&dev_priv->release_mutex);
@@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv,
433 mutex_lock(&dev_priv->release_mutex); 431 mutex_lock(&dev_priv->release_mutex);
434 if (unlikely(--dev_priv->num_3d_resources == 0)) 432 if (unlikely(--dev_priv->num_3d_resources == 0))
435 vmw_release_device(dev_priv); 433 vmw_release_device(dev_priv);
436 else if (hide_svga) { 434 else if (hide_svga)
437 mutex_lock(&dev_priv->hw_mutex);
438 vmw_write(dev_priv, SVGA_REG_ENABLE, 435 vmw_write(dev_priv, SVGA_REG_ENABLE,
439 vmw_read(dev_priv, SVGA_REG_ENABLE) | 436 vmw_read(dev_priv, SVGA_REG_ENABLE) |
440 SVGA_REG_ENABLE_HIDE); 437 SVGA_REG_ENABLE_HIDE);
441 mutex_unlock(&dev_priv->hw_mutex);
442 }
443 438
444 n3d = (int32_t) dev_priv->num_3d_resources; 439 n3d = (int32_t) dev_priv->num_3d_resources;
445 mutex_unlock(&dev_priv->release_mutex); 440 mutex_unlock(&dev_priv->release_mutex);
@@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
600 dev_priv->dev = dev; 595 dev_priv->dev = dev;
601 dev_priv->vmw_chipset = chipset; 596 dev_priv->vmw_chipset = chipset;
602 dev_priv->last_read_seqno = (uint32_t) -100; 597 dev_priv->last_read_seqno = (uint32_t) -100;
603 mutex_init(&dev_priv->hw_mutex);
604 mutex_init(&dev_priv->cmdbuf_mutex); 598 mutex_init(&dev_priv->cmdbuf_mutex);
605 mutex_init(&dev_priv->release_mutex); 599 mutex_init(&dev_priv->release_mutex);
606 mutex_init(&dev_priv->binding_mutex); 600 mutex_init(&dev_priv->binding_mutex);
607 rwlock_init(&dev_priv->resource_lock); 601 rwlock_init(&dev_priv->resource_lock);
608 ttm_lock_init(&dev_priv->reservation_sem); 602 ttm_lock_init(&dev_priv->reservation_sem);
603 spin_lock_init(&dev_priv->hw_lock);
604 spin_lock_init(&dev_priv->waiter_lock);
605 spin_lock_init(&dev_priv->cap_lock);
609 606
610 for (i = vmw_res_context; i < vmw_res_max; ++i) { 607 for (i = vmw_res_context; i < vmw_res_max; ++i) {
611 idr_init(&dev_priv->res_idr[i]); 608 idr_init(&dev_priv->res_idr[i]);
@@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
626 623
627 dev_priv->enable_fb = enable_fbdev; 624 dev_priv->enable_fb = enable_fbdev;
628 625
629 mutex_lock(&dev_priv->hw_mutex);
630
631 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); 626 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
632 svga_id = vmw_read(dev_priv, SVGA_REG_ID); 627 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
633 if (svga_id != SVGA_ID_2) { 628 if (svga_id != SVGA_ID_2) {
634 ret = -ENOSYS; 629 ret = -ENOSYS;
635 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); 630 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
636 mutex_unlock(&dev_priv->hw_mutex);
637 goto out_err0; 631 goto out_err0;
638 } 632 }
639 633
@@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
683 dev_priv->prim_bb_mem = dev_priv->vram_size; 677 dev_priv->prim_bb_mem = dev_priv->vram_size;
684 678
685 ret = vmw_dma_masks(dev_priv); 679 ret = vmw_dma_masks(dev_priv);
686 if (unlikely(ret != 0)) { 680 if (unlikely(ret != 0))
687 mutex_unlock(&dev_priv->hw_mutex);
688 goto out_err0; 681 goto out_err0;
689 }
690 682
691 /* 683 /*
692 * Limit back buffer size to VRAM size. Remove this once 684 * Limit back buffer size to VRAM size. Remove this once
@@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
695 if (dev_priv->prim_bb_mem > dev_priv->vram_size) 687 if (dev_priv->prim_bb_mem > dev_priv->vram_size)
696 dev_priv->prim_bb_mem = dev_priv->vram_size; 688 dev_priv->prim_bb_mem = dev_priv->vram_size;
697 689
698 mutex_unlock(&dev_priv->hw_mutex);
699
700 vmw_print_capabilities(dev_priv->capabilities); 690 vmw_print_capabilities(dev_priv->capabilities);
701 691
702 if (dev_priv->capabilities & SVGA_CAP_GMR2) { 692 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
@@ -1160,9 +1150,7 @@ static int vmw_master_set(struct drm_device *dev,
1160 if (unlikely(ret != 0)) 1150 if (unlikely(ret != 0))
1161 return ret; 1151 return ret;
1162 vmw_kms_save_vga(dev_priv); 1152 vmw_kms_save_vga(dev_priv);
1163 mutex_lock(&dev_priv->hw_mutex);
1164 vmw_write(dev_priv, SVGA_REG_TRACES, 0); 1153 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
1165 mutex_unlock(&dev_priv->hw_mutex);
1166 } 1154 }
1167 1155
1168 if (active) { 1156 if (active) {
@@ -1196,9 +1184,7 @@ out_no_active_lock:
1196 if (!dev_priv->enable_fb) { 1184 if (!dev_priv->enable_fb) {
1197 vmw_kms_restore_vga(dev_priv); 1185 vmw_kms_restore_vga(dev_priv);
1198 vmw_3d_resource_dec(dev_priv, true); 1186 vmw_3d_resource_dec(dev_priv, true);
1199 mutex_lock(&dev_priv->hw_mutex);
1200 vmw_write(dev_priv, SVGA_REG_TRACES, 1); 1187 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
1201 mutex_unlock(&dev_priv->hw_mutex);
1202 } 1188 }
1203 return ret; 1189 return ret;
1204} 1190}
@@ -1233,9 +1219,7 @@ static void vmw_master_drop(struct drm_device *dev,
1233 DRM_ERROR("Unable to clean VRAM on master drop.\n"); 1219 DRM_ERROR("Unable to clean VRAM on master drop.\n");
1234 vmw_kms_restore_vga(dev_priv); 1220 vmw_kms_restore_vga(dev_priv);
1235 vmw_3d_resource_dec(dev_priv, true); 1221 vmw_3d_resource_dec(dev_priv, true);
1236 mutex_lock(&dev_priv->hw_mutex);
1237 vmw_write(dev_priv, SVGA_REG_TRACES, 1); 1222 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
1238 mutex_unlock(&dev_priv->hw_mutex);
1239 } 1223 }
1240 1224
1241 dev_priv->active_master = &dev_priv->fbdev_master; 1225 dev_priv->active_master = &dev_priv->fbdev_master;
@@ -1367,10 +1351,8 @@ static void vmw_pm_complete(struct device *kdev)
1367 struct drm_device *dev = pci_get_drvdata(pdev); 1351 struct drm_device *dev = pci_get_drvdata(pdev);
1368 struct vmw_private *dev_priv = vmw_priv(dev); 1352 struct vmw_private *dev_priv = vmw_priv(dev);
1369 1353
1370 mutex_lock(&dev_priv->hw_mutex);
1371 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); 1354 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1372 (void) vmw_read(dev_priv, SVGA_REG_ID); 1355 (void) vmw_read(dev_priv, SVGA_REG_ID);
1373 mutex_unlock(&dev_priv->hw_mutex);
1374 1356
1375 /** 1357 /**
1376 * Reclaim 3d reference held by fbdev and potentially 1358 * Reclaim 3d reference held by fbdev and potentially
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 4ee799b43d5d..d26a6daa9719 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -399,7 +399,8 @@ struct vmw_private {
399 uint32_t memory_size; 399 uint32_t memory_size;
400 bool has_gmr; 400 bool has_gmr;
401 bool has_mob; 401 bool has_mob;
402 struct mutex hw_mutex; 402 spinlock_t hw_lock;
403 spinlock_t cap_lock;
403 404
404 /* 405 /*
405 * VGA registers. 406 * VGA registers.
@@ -449,8 +450,9 @@ struct vmw_private {
449 atomic_t marker_seq; 450 atomic_t marker_seq;
450 wait_queue_head_t fence_queue; 451 wait_queue_head_t fence_queue;
451 wait_queue_head_t fifo_queue; 452 wait_queue_head_t fifo_queue;
452 int fence_queue_waiters; /* Protected by hw_mutex */ 453 spinlock_t waiter_lock;
453 int goal_queue_waiters; /* Protected by hw_mutex */ 454 int fence_queue_waiters; /* Protected by waiter_lock */
455 int goal_queue_waiters; /* Protected by waiter_lock */
454 atomic_t fifo_queue_waiters; 456 atomic_t fifo_queue_waiters;
455 uint32_t last_read_seqno; 457 uint32_t last_read_seqno;
456 spinlock_t irq_lock; 458 spinlock_t irq_lock;
@@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)
553 return (struct vmw_master *) master->driver_priv; 555 return (struct vmw_master *) master->driver_priv;
554} 556}
555 557
558/*
559 * The locking here is fine-grained, so that it is performed once
560 * for every read- and write operation. This is of course costly, but we
561 * don't perform much register access in the timing critical paths anyway.
562 * Instead we have the extra benefit of being sure that we don't forget
563 * the hw lock around register accesses.
564 */
556static inline void vmw_write(struct vmw_private *dev_priv, 565static inline void vmw_write(struct vmw_private *dev_priv,
557 unsigned int offset, uint32_t value) 566 unsigned int offset, uint32_t value)
558{ 567{
568 unsigned long irq_flags;
569
570 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
559 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); 571 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
560 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); 572 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
573 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
561} 574}
562 575
563static inline uint32_t vmw_read(struct vmw_private *dev_priv, 576static inline uint32_t vmw_read(struct vmw_private *dev_priv,
564 unsigned int offset) 577 unsigned int offset)
565{ 578{
566 uint32_t val; 579 unsigned long irq_flags;
580 u32 val;
567 581
582 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
568 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); 583 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
569 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); 584 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
585 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
586
570 return val; 587 return val;
571} 588}
572 589
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index b7594cb758af..945f1e0dad92 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -35,7 +35,7 @@ struct vmw_fence_manager {
35 struct vmw_private *dev_priv; 35 struct vmw_private *dev_priv;
36 spinlock_t lock; 36 spinlock_t lock;
37 struct list_head fence_list; 37 struct list_head fence_list;
38 struct work_struct work, ping_work; 38 struct work_struct work;
39 u32 user_fence_size; 39 u32 user_fence_size;
40 u32 fence_size; 40 u32 fence_size;
41 u32 event_fence_action_size; 41 u32 event_fence_action_size;
@@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct fence *f)
134 return "svga"; 134 return "svga";
135} 135}
136 136
137static void vmw_fence_ping_func(struct work_struct *work)
138{
139 struct vmw_fence_manager *fman =
140 container_of(work, struct vmw_fence_manager, ping_work);
141
142 vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC);
143}
144
145static bool vmw_fence_enable_signaling(struct fence *f) 137static bool vmw_fence_enable_signaling(struct fence *f)
146{ 138{
147 struct vmw_fence_obj *fence = 139 struct vmw_fence_obj *fence =
@@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)
155 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) 147 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
156 return false; 148 return false;
157 149
158 if (mutex_trylock(&dev_priv->hw_mutex)) { 150 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
159 vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC);
160 mutex_unlock(&dev_priv->hw_mutex);
161 } else
162 schedule_work(&fman->ping_work);
163 151
164 return true; 152 return true;
165} 153}
@@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
305 INIT_LIST_HEAD(&fman->fence_list); 293 INIT_LIST_HEAD(&fman->fence_list);
306 INIT_LIST_HEAD(&fman->cleanup_list); 294 INIT_LIST_HEAD(&fman->cleanup_list);
307 INIT_WORK(&fman->work, &vmw_fence_work_func); 295 INIT_WORK(&fman->work, &vmw_fence_work_func);
308 INIT_WORK(&fman->ping_work, &vmw_fence_ping_func);
309 fman->fifo_down = true; 296 fman->fifo_down = true;
310 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); 297 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
311 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); 298 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
@@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
323 bool lists_empty; 310 bool lists_empty;
324 311
325 (void) cancel_work_sync(&fman->work); 312 (void) cancel_work_sync(&fman->work);
326 (void) cancel_work_sync(&fman->ping_work);
327 313
328 spin_lock_irqsave(&fman->lock, irq_flags); 314 spin_lock_irqsave(&fman->lock, irq_flags);
329 lists_empty = list_empty(&fman->fence_list) && 315 lists_empty = list_empty(&fman->fence_list) &&
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 09e10aefcd8e..39f2b03888e7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
44 if (!dev_priv->has_mob) 44 if (!dev_priv->has_mob)
45 return false; 45 return false;
46 46
47 mutex_lock(&dev_priv->hw_mutex); 47 spin_lock(&dev_priv->cap_lock);
48 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); 48 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
49 result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); 49 result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
50 mutex_unlock(&dev_priv->hw_mutex); 50 spin_unlock(&dev_priv->cap_lock);
51 51
52 return (result != 0); 52 return (result != 0);
53 } 53 }
@@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
120 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); 120 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
121 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); 121 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
122 122
123 mutex_lock(&dev_priv->hw_mutex);
124 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); 123 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
125 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); 124 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
126 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); 125 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
@@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
143 mb(); 142 mb();
144 143
145 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); 144 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
146 mutex_unlock(&dev_priv->hw_mutex);
147 145
148 max = ioread32(fifo_mem + SVGA_FIFO_MAX); 146 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
149 min = ioread32(fifo_mem + SVGA_FIFO_MIN); 147 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
@@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
160 return vmw_fifo_send_fence(dev_priv, &dummy); 158 return vmw_fifo_send_fence(dev_priv, &dummy);
161} 159}
162 160
163void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason) 161void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
164{ 162{
165 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 163 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
164 static DEFINE_SPINLOCK(ping_lock);
165 unsigned long irq_flags;
166 166
167 /*
168 * The ping_lock is needed because we don't have an atomic
169 * test-and-set of the SVGA_FIFO_BUSY register.
170 */
171 spin_lock_irqsave(&ping_lock, irq_flags);
167 if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) { 172 if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
168 iowrite32(1, fifo_mem + SVGA_FIFO_BUSY); 173 iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
169 vmw_write(dev_priv, SVGA_REG_SYNC, reason); 174 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
170 } 175 }
171} 176 spin_unlock_irqrestore(&ping_lock, irq_flags);
172
173void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
174{
175 mutex_lock(&dev_priv->hw_mutex);
176
177 vmw_fifo_ping_host_locked(dev_priv, reason);
178
179 mutex_unlock(&dev_priv->hw_mutex);
180} 177}
181 178
182void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) 179void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
183{ 180{
184 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 181 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
185 182
186 mutex_lock(&dev_priv->hw_mutex);
187
188 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); 183 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
189 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) 184 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
190 ; 185 ;
@@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
198 vmw_write(dev_priv, SVGA_REG_TRACES, 193 vmw_write(dev_priv, SVGA_REG_TRACES,
199 dev_priv->traces_state); 194 dev_priv->traces_state);
200 195
201 mutex_unlock(&dev_priv->hw_mutex);
202 vmw_marker_queue_takedown(&fifo->marker_queue); 196 vmw_marker_queue_takedown(&fifo->marker_queue);
203 197
204 if (likely(fifo->static_buffer != NULL)) { 198 if (likely(fifo->static_buffer != NULL)) {
@@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
271 return vmw_fifo_wait_noirq(dev_priv, bytes, 265 return vmw_fifo_wait_noirq(dev_priv, bytes,
272 interruptible, timeout); 266 interruptible, timeout);
273 267
274 mutex_lock(&dev_priv->hw_mutex); 268 spin_lock(&dev_priv->waiter_lock);
275 if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) { 269 if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
276 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); 270 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
277 outl(SVGA_IRQFLAG_FIFO_PROGRESS, 271 outl(SVGA_IRQFLAG_FIFO_PROGRESS,
@@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
280 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 274 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
281 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 275 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
282 } 276 }
283 mutex_unlock(&dev_priv->hw_mutex); 277 spin_unlock(&dev_priv->waiter_lock);
284 278
285 if (interruptible) 279 if (interruptible)
286 ret = wait_event_interruptible_timeout 280 ret = wait_event_interruptible_timeout
@@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
296 else if (likely(ret > 0)) 290 else if (likely(ret > 0))
297 ret = 0; 291 ret = 0;
298 292
299 mutex_lock(&dev_priv->hw_mutex); 293 spin_lock(&dev_priv->waiter_lock);
300 if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { 294 if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
301 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); 295 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
302 dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS; 296 dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
303 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 297 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
304 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 298 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
305 } 299 }
306 mutex_unlock(&dev_priv->hw_mutex); 300 spin_unlock(&dev_priv->waiter_lock);
307 301
308 return ret; 302 return ret;
309} 303}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 37881ecf5d7a..69c8ce23123c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
135 (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); 135 (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
136 compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; 136 compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
137 137
138 mutex_lock(&dev_priv->hw_mutex); 138 spin_lock(&dev_priv->cap_lock);
139 for (i = 0; i < max_size; ++i) { 139 for (i = 0; i < max_size; ++i) {
140 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); 140 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
141 compat_cap->pairs[i][0] = i; 141 compat_cap->pairs[i][0] = i;
142 compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP); 142 compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
143 } 143 }
144 mutex_unlock(&dev_priv->hw_mutex); 144 spin_unlock(&dev_priv->cap_lock);
145 145
146 return 0; 146 return 0;
147} 147}
@@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
191 if (num > SVGA3D_DEVCAP_MAX) 191 if (num > SVGA3D_DEVCAP_MAX)
192 num = SVGA3D_DEVCAP_MAX; 192 num = SVGA3D_DEVCAP_MAX;
193 193
194 mutex_lock(&dev_priv->hw_mutex); 194 spin_lock(&dev_priv->cap_lock);
195 for (i = 0; i < num; ++i) { 195 for (i = 0; i < num; ++i) {
196 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); 196 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
197 *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); 197 *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
198 } 198 }
199 mutex_unlock(&dev_priv->hw_mutex); 199 spin_unlock(&dev_priv->cap_lock);
200 } else if (gb_objects) { 200 } else if (gb_objects) {
201 ret = vmw_fill_compat_cap(dev_priv, bounce, size); 201 ret = vmw_fill_compat_cap(dev_priv, bounce, size);
202 if (unlikely(ret != 0)) 202 if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 0c423766c441..9fe9827ee499 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
62 62
63static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) 63static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
64{ 64{
65 uint32_t busy;
66 65
67 mutex_lock(&dev_priv->hw_mutex); 66 return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
68 busy = vmw_read(dev_priv, SVGA_REG_BUSY);
69 mutex_unlock(&dev_priv->hw_mutex);
70
71 return (busy == 0);
72} 67}
73 68
74void vmw_update_seqno(struct vmw_private *dev_priv, 69void vmw_update_seqno(struct vmw_private *dev_priv,
@@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
184 179
185void vmw_seqno_waiter_add(struct vmw_private *dev_priv) 180void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
186{ 181{
187 mutex_lock(&dev_priv->hw_mutex); 182 spin_lock(&dev_priv->waiter_lock);
188 if (dev_priv->fence_queue_waiters++ == 0) { 183 if (dev_priv->fence_queue_waiters++ == 0) {
189 unsigned long irq_flags; 184 unsigned long irq_flags;
190 185
@@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
195 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 190 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
196 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 191 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
197 } 192 }
198 mutex_unlock(&dev_priv->hw_mutex); 193 spin_unlock(&dev_priv->waiter_lock);
199} 194}
200 195
201void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) 196void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
202{ 197{
203 mutex_lock(&dev_priv->hw_mutex); 198 spin_lock(&dev_priv->waiter_lock);
204 if (--dev_priv->fence_queue_waiters == 0) { 199 if (--dev_priv->fence_queue_waiters == 0) {
205 unsigned long irq_flags; 200 unsigned long irq_flags;
206 201
@@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
209 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 204 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
210 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 205 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
211 } 206 }
212 mutex_unlock(&dev_priv->hw_mutex); 207 spin_unlock(&dev_priv->waiter_lock);
213} 208}
214 209
215 210
216void vmw_goal_waiter_add(struct vmw_private *dev_priv) 211void vmw_goal_waiter_add(struct vmw_private *dev_priv)
217{ 212{
218 mutex_lock(&dev_priv->hw_mutex); 213 spin_lock(&dev_priv->waiter_lock);
219 if (dev_priv->goal_queue_waiters++ == 0) { 214 if (dev_priv->goal_queue_waiters++ == 0) {
220 unsigned long irq_flags; 215 unsigned long irq_flags;
221 216
@@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_private *dev_priv)
226 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 221 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
227 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 222 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
228 } 223 }
229 mutex_unlock(&dev_priv->hw_mutex); 224 spin_unlock(&dev_priv->waiter_lock);
230} 225}
231 226
232void vmw_goal_waiter_remove(struct vmw_private *dev_priv) 227void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
233{ 228{
234 mutex_lock(&dev_priv->hw_mutex); 229 spin_lock(&dev_priv->waiter_lock);
235 if (--dev_priv->goal_queue_waiters == 0) { 230 if (--dev_priv->goal_queue_waiters == 0) {
236 unsigned long irq_flags; 231 unsigned long irq_flags;
237 232
@@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
240 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 235 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
241 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 236 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
242 } 237 }
243 mutex_unlock(&dev_priv->hw_mutex); 238 spin_unlock(&dev_priv->waiter_lock);
244} 239}
245 240
246int vmw_wait_seqno(struct vmw_private *dev_priv, 241int vmw_wait_seqno(struct vmw_private *dev_priv,
@@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device *dev)
315 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) 310 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
316 return; 311 return;
317 312
318 mutex_lock(&dev_priv->hw_mutex);
319 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); 313 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
320 mutex_unlock(&dev_priv->hw_mutex);
321 314
322 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 315 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
323 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 316 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 3725b521d931..8725b79e7847 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force)
1828 struct vmw_private *dev_priv = vmw_priv(dev); 1828 struct vmw_private *dev_priv = vmw_priv(dev);
1829 struct vmw_display_unit *du = vmw_connector_to_du(connector); 1829 struct vmw_display_unit *du = vmw_connector_to_du(connector);
1830 1830
1831 mutex_lock(&dev_priv->hw_mutex);
1832 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); 1831 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
1833 mutex_unlock(&dev_priv->hw_mutex);
1834 1832
1835 return ((vmw_connector_to_du(connector)->unit < num_displays && 1833 return ((vmw_connector_to_du(connector)->unit < num_displays &&
1836 du->pref_active) ? 1834 du->pref_active) ?
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 6529c09c46f0..a7de26d1ac80 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -574,6 +574,16 @@ config SENSORS_IIO_HWMON
574 for those channels specified in the map. This map can be provided 574 for those channels specified in the map. This map can be provided
575 either via platform data or the device tree bindings. 575 either via platform data or the device tree bindings.
576 576
577config SENSORS_I5500
578 tristate "Intel 5500/5520/X58 temperature sensor"
579 depends on X86 && PCI
580 help
581 If you say yes here you get support for the temperature
582 sensor inside the Intel 5500, 5520 and X58 chipsets.
583
584 This driver can also be built as a module. If so, the module
585 will be called i5500_temp.
586
577config SENSORS_CORETEMP 587config SENSORS_CORETEMP
578 tristate "Intel Core/Core2/Atom temperature sensor" 588 tristate "Intel Core/Core2/Atom temperature sensor"
579 depends on X86 589 depends on X86
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 67280643bcf0..6c941472e707 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -68,6 +68,7 @@ obj-$(CONFIG_SENSORS_GPIO_FAN) += gpio-fan.o
68obj-$(CONFIG_SENSORS_HIH6130) += hih6130.o 68obj-$(CONFIG_SENSORS_HIH6130) += hih6130.o
69obj-$(CONFIG_SENSORS_HTU21) += htu21.o 69obj-$(CONFIG_SENSORS_HTU21) += htu21.o
70obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o 70obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o
71obj-$(CONFIG_SENSORS_I5500) += i5500_temp.o
71obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o 72obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o
72obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o 73obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o
73obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o 74obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o
diff --git a/drivers/hwmon/i5500_temp.c b/drivers/hwmon/i5500_temp.c
new file mode 100644
index 000000000000..3e3ccbf18b4e
--- /dev/null
+++ b/drivers/hwmon/i5500_temp.c
@@ -0,0 +1,149 @@
1/*
2 * i5500_temp - Driver for Intel 5500/5520/X58 chipset thermal sensor
3 *
4 * Copyright (C) 2012, 2014 Jean Delvare <jdelvare@suse.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/slab.h>
20#include <linux/jiffies.h>
21#include <linux/device.h>
22#include <linux/pci.h>
23#include <linux/hwmon.h>
24#include <linux/hwmon-sysfs.h>
25#include <linux/err.h>
26#include <linux/mutex.h>
27
28/* Register definitions from datasheet */
29#define REG_TSTHRCATA 0xE2
30#define REG_TSCTRL 0xE8
31#define REG_TSTHRRPEX 0xEB
32#define REG_TSTHRLO 0xEC
33#define REG_TSTHRHI 0xEE
34#define REG_CTHINT 0xF0
35#define REG_TSFSC 0xF3
36#define REG_CTSTS 0xF4
37#define REG_TSTHRRQPI 0xF5
38#define REG_CTCTRL 0xF7
39#define REG_TSTIMER 0xF8
40
41/*
42 * Sysfs stuff
43 */
44
45/* Sensor resolution : 0.5 degree C */
46static ssize_t show_temp(struct device *dev,
47 struct device_attribute *devattr, char *buf)
48{
49 struct pci_dev *pdev = to_pci_dev(dev->parent);
50 long temp;
51 u16 tsthrhi;
52 s8 tsfsc;
53
54 pci_read_config_word(pdev, REG_TSTHRHI, &tsthrhi);
55 pci_read_config_byte(pdev, REG_TSFSC, &tsfsc);
56 temp = ((long)tsthrhi - tsfsc) * 500;
57
58 return sprintf(buf, "%ld\n", temp);
59}
60
61static ssize_t show_thresh(struct device *dev,
62 struct device_attribute *devattr, char *buf)
63{
64 struct pci_dev *pdev = to_pci_dev(dev->parent);
65 int reg = to_sensor_dev_attr(devattr)->index;
66 long temp;
67 u16 tsthr;
68
69 pci_read_config_word(pdev, reg, &tsthr);
70 temp = tsthr * 500;
71
72 return sprintf(buf, "%ld\n", temp);
73}
74
75static ssize_t show_alarm(struct device *dev,
76 struct device_attribute *devattr, char *buf)
77{
78 struct pci_dev *pdev = to_pci_dev(dev->parent);
79 int nr = to_sensor_dev_attr(devattr)->index;
80 u8 ctsts;
81
82 pci_read_config_byte(pdev, REG_CTSTS, &ctsts);
83 return sprintf(buf, "%u\n", (unsigned int)ctsts & (1 << nr));
84}
85
86static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
87static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_thresh, NULL, 0xE2);
88static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_thresh, NULL, 0xEC);
89static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_thresh, NULL, 0xEE);
90static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0);
91static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1);
92
93static struct attribute *i5500_temp_attrs[] = {
94 &dev_attr_temp1_input.attr,
95 &sensor_dev_attr_temp1_crit.dev_attr.attr,
96 &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
97 &sensor_dev_attr_temp1_max.dev_attr.attr,
98 &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
99 &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
100 NULL
101};
102
103ATTRIBUTE_GROUPS(i5500_temp);
104
105static const struct pci_device_id i5500_temp_ids[] = {
106 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3438) },
107 { 0 },
108};
109
110MODULE_DEVICE_TABLE(pci, i5500_temp_ids);
111
112static int i5500_temp_probe(struct pci_dev *pdev,
113 const struct pci_device_id *id)
114{
115 int err;
116 struct device *hwmon_dev;
117 u32 tstimer;
118 s8 tsfsc;
119
120 err = pci_enable_device(pdev);
121 if (err) {
122 dev_err(&pdev->dev, "Failed to enable device\n");
123 return err;
124 }
125
126 pci_read_config_byte(pdev, REG_TSFSC, &tsfsc);
127 pci_read_config_dword(pdev, REG_TSTIMER, &tstimer);
128 if (tsfsc == 0x7F && tstimer == 0x07D30D40) {
129 dev_notice(&pdev->dev, "Sensor seems to be disabled\n");
130 return -ENODEV;
131 }
132
133 hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
134 "intel5500", NULL,
135 i5500_temp_groups);
136 return PTR_ERR_OR_ZERO(hwmon_dev);
137}
138
139static struct pci_driver i5500_temp_driver = {
140 .name = "i5500_temp",
141 .id_table = i5500_temp_ids,
142 .probe = i5500_temp_probe,
143};
144
145module_pci_driver(i5500_temp_driver);
146
147MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
148MODULE_DESCRIPTION("Intel 5500/5520/X58 chipset thermal sensor driver");
149MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index d111ac779c40..63cd031b2c28 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -28,7 +28,7 @@
28#define AT91_AIC_IRQ_MIN_PRIORITY 0 28#define AT91_AIC_IRQ_MIN_PRIORITY 0
29#define AT91_AIC_IRQ_MAX_PRIORITY 7 29#define AT91_AIC_IRQ_MAX_PRIORITY 7
30 30
31#define AT91_AIC_SRCTYPE GENMASK(7, 6) 31#define AT91_AIC_SRCTYPE GENMASK(6, 5)
32#define AT91_AIC_SRCTYPE_LOW (0 << 5) 32#define AT91_AIC_SRCTYPE_LOW (0 << 5)
33#define AT91_AIC_SRCTYPE_FALLING (1 << 5) 33#define AT91_AIC_SRCTYPE_FALLING (1 << 5)
34#define AT91_AIC_SRCTYPE_HIGH (2 << 5) 34#define AT91_AIC_SRCTYPE_HIGH (2 << 5)
@@ -74,7 +74,7 @@ int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val)
74 return -EINVAL; 74 return -EINVAL;
75 } 75 }
76 76
77 *val &= AT91_AIC_SRCTYPE; 77 *val &= ~AT91_AIC_SRCTYPE;
78 *val |= aic_type; 78 *val |= aic_type;
79 79
80 return 0; 80 return 0;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 86e4684adeb1..d8996bdf0f61 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1053,7 +1053,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1053 * of two entries. No, the architecture doesn't let you 1053 * of two entries. No, the architecture doesn't let you
1054 * express an ITT with a single entry. 1054 * express an ITT with a single entry.
1055 */ 1055 */
1056 nr_ites = max(2, roundup_pow_of_two(nvecs)); 1056 nr_ites = max(2UL, roundup_pow_of_two(nvecs));
1057 sz = nr_ites * its->ite_size; 1057 sz = nr_ites * its->ite_size;
1058 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; 1058 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
1059 itt = kmalloc(sz, GFP_KERNEL); 1059 itt = kmalloc(sz, GFP_KERNEL);
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 29b8f21b74d0..6bc2deb73d53 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -381,7 +381,7 @@ hip04_of_init(struct device_node *node, struct device_node *parent)
381 * It will be refined as each CPU probes its ID. 381 * It will be refined as each CPU probes its ID.
382 */ 382 */
383 for (i = 0; i < NR_HIP04_CPU_IF; i++) 383 for (i = 0; i < NR_HIP04_CPU_IF; i++)
384 hip04_cpu_map[i] = 0xff; 384 hip04_cpu_map[i] = 0xffff;
385 385
386 /* 386 /*
387 * Find out how many interrupts are supported. 387 * Find out how many interrupts are supported.
diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c
index 7e342df6a62f..0b0d2c00a2df 100644
--- a/drivers/irqchip/irq-mtk-sysirq.c
+++ b/drivers/irqchip/irq-mtk-sysirq.c
@@ -137,9 +137,9 @@ static int __init mtk_sysirq_of_init(struct device_node *node,
137 return -ENOMEM; 137 return -ENOMEM;
138 138
139 chip_data->intpol_base = of_io_request_and_map(node, 0, "intpol"); 139 chip_data->intpol_base = of_io_request_and_map(node, 0, "intpol");
140 if (!chip_data->intpol_base) { 140 if (IS_ERR(chip_data->intpol_base)) {
141 pr_err("mtk_sysirq: unable to map sysirq register\n"); 141 pr_err("mtk_sysirq: unable to map sysirq register\n");
142 ret = -ENOMEM; 142 ret = PTR_ERR(chip_data->intpol_base);
143 goto out_free; 143 goto out_free;
144 } 144 }
145 145
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
index 28718d3e8281..c03f140acbae 100644
--- a/drivers/irqchip/irq-omap-intc.c
+++ b/drivers/irqchip/irq-omap-intc.c
@@ -263,7 +263,7 @@ static int __init omap_init_irq_of(struct device_node *node)
263 return ret; 263 return ret;
264} 264}
265 265
266static int __init omap_init_irq_legacy(u32 base) 266static int __init omap_init_irq_legacy(u32 base, struct device_node *node)
267{ 267{
268 int j, irq_base; 268 int j, irq_base;
269 269
@@ -277,7 +277,7 @@ static int __init omap_init_irq_legacy(u32 base)
277 irq_base = 0; 277 irq_base = 0;
278 } 278 }
279 279
280 domain = irq_domain_add_legacy(NULL, omap_nr_irqs, irq_base, 0, 280 domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0,
281 &irq_domain_simple_ops, NULL); 281 &irq_domain_simple_ops, NULL);
282 282
283 omap_irq_soft_reset(); 283 omap_irq_soft_reset();
@@ -301,10 +301,26 @@ static int __init omap_init_irq(u32 base, struct device_node *node)
301{ 301{
302 int ret; 302 int ret;
303 303
304 if (node) 304 /*
305 * FIXME legacy OMAP DMA driver sitting under arch/arm/plat-omap/dma.c
306 * depends is still not ready for linear IRQ domains; because of that
307 * we need to temporarily "blacklist" OMAP2 and OMAP3 devices from using
308 * linear IRQ Domain until that driver is finally fixed.
309 */
310 if (of_device_is_compatible(node, "ti,omap2-intc") ||
311 of_device_is_compatible(node, "ti,omap3-intc")) {
312 struct resource res;
313
314 if (of_address_to_resource(node, 0, &res))
315 return -ENOMEM;
316
317 base = res.start;
318 ret = omap_init_irq_legacy(base, node);
319 } else if (node) {
305 ret = omap_init_irq_of(node); 320 ret = omap_init_irq_of(node);
306 else 321 } else {
307 ret = omap_init_irq_legacy(base); 322 ret = omap_init_irq_legacy(base, NULL);
323 }
308 324
309 if (ret == 0) 325 if (ret == 0)
310 omap_irq_enable_protection(); 326 omap_irq_enable_protection();
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 9fc616c2755e..21b156242e42 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -94,6 +94,9 @@ struct cache_disk_superblock {
94} __packed; 94} __packed;
95 95
96struct dm_cache_metadata { 96struct dm_cache_metadata {
97 atomic_t ref_count;
98 struct list_head list;
99
97 struct block_device *bdev; 100 struct block_device *bdev;
98 struct dm_block_manager *bm; 101 struct dm_block_manager *bm;
99 struct dm_space_map *metadata_sm; 102 struct dm_space_map *metadata_sm;
@@ -669,10 +672,10 @@ static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
669 672
670/*----------------------------------------------------------------*/ 673/*----------------------------------------------------------------*/
671 674
672struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, 675static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
673 sector_t data_block_size, 676 sector_t data_block_size,
674 bool may_format_device, 677 bool may_format_device,
675 size_t policy_hint_size) 678 size_t policy_hint_size)
676{ 679{
677 int r; 680 int r;
678 struct dm_cache_metadata *cmd; 681 struct dm_cache_metadata *cmd;
@@ -683,6 +686,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
683 return NULL; 686 return NULL;
684 } 687 }
685 688
689 atomic_set(&cmd->ref_count, 1);
686 init_rwsem(&cmd->root_lock); 690 init_rwsem(&cmd->root_lock);
687 cmd->bdev = bdev; 691 cmd->bdev = bdev;
688 cmd->data_block_size = data_block_size; 692 cmd->data_block_size = data_block_size;
@@ -705,10 +709,95 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
705 return cmd; 709 return cmd;
706} 710}
707 711
712/*
713 * We keep a little list of ref counted metadata objects to prevent two
714 * different target instances creating separate bufio instances. This is
715 * an issue if a table is reloaded before the suspend.
716 */
717static DEFINE_MUTEX(table_lock);
718static LIST_HEAD(table);
719
720static struct dm_cache_metadata *lookup(struct block_device *bdev)
721{
722 struct dm_cache_metadata *cmd;
723
724 list_for_each_entry(cmd, &table, list)
725 if (cmd->bdev == bdev) {
726 atomic_inc(&cmd->ref_count);
727 return cmd;
728 }
729
730 return NULL;
731}
732
733static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
734 sector_t data_block_size,
735 bool may_format_device,
736 size_t policy_hint_size)
737{
738 struct dm_cache_metadata *cmd, *cmd2;
739
740 mutex_lock(&table_lock);
741 cmd = lookup(bdev);
742 mutex_unlock(&table_lock);
743
744 if (cmd)
745 return cmd;
746
747 cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
748 if (cmd) {
749 mutex_lock(&table_lock);
750 cmd2 = lookup(bdev);
751 if (cmd2) {
752 mutex_unlock(&table_lock);
753 __destroy_persistent_data_objects(cmd);
754 kfree(cmd);
755 return cmd2;
756 }
757 list_add(&cmd->list, &table);
758 mutex_unlock(&table_lock);
759 }
760
761 return cmd;
762}
763
764static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
765{
766 if (cmd->data_block_size != data_block_size) {
767 DMERR("data_block_size (%llu) different from that in metadata (%llu)\n",
768 (unsigned long long) data_block_size,
769 (unsigned long long) cmd->data_block_size);
770 return false;
771 }
772
773 return true;
774}
775
776struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
777 sector_t data_block_size,
778 bool may_format_device,
779 size_t policy_hint_size)
780{
781 struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
782 may_format_device, policy_hint_size);
783 if (cmd && !same_params(cmd, data_block_size)) {
784 dm_cache_metadata_close(cmd);
785 return NULL;
786 }
787
788 return cmd;
789}
790
708void dm_cache_metadata_close(struct dm_cache_metadata *cmd) 791void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
709{ 792{
710 __destroy_persistent_data_objects(cmd); 793 if (atomic_dec_and_test(&cmd->ref_count)) {
711 kfree(cmd); 794 mutex_lock(&table_lock);
795 list_del(&cmd->list);
796 mutex_unlock(&table_lock);
797
798 __destroy_persistent_data_objects(cmd);
799 kfree(cmd);
800 }
712} 801}
713 802
714/* 803/*
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1e96d7889f51..e1650539cc2f 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -221,7 +221,13 @@ struct cache {
221 struct list_head need_commit_migrations; 221 struct list_head need_commit_migrations;
222 sector_t migration_threshold; 222 sector_t migration_threshold;
223 wait_queue_head_t migration_wait; 223 wait_queue_head_t migration_wait;
224 atomic_t nr_migrations; 224 atomic_t nr_allocated_migrations;
225
226 /*
227 * The number of in flight migrations that are performing
228 * background io. eg, promotion, writeback.
229 */
230 atomic_t nr_io_migrations;
225 231
226 wait_queue_head_t quiescing_wait; 232 wait_queue_head_t quiescing_wait;
227 atomic_t quiescing; 233 atomic_t quiescing;
@@ -258,7 +264,6 @@ struct cache {
258 struct dm_deferred_set *all_io_ds; 264 struct dm_deferred_set *all_io_ds;
259 265
260 mempool_t *migration_pool; 266 mempool_t *migration_pool;
261 struct dm_cache_migration *next_migration;
262 267
263 struct dm_cache_policy *policy; 268 struct dm_cache_policy *policy;
264 unsigned policy_nr_args; 269 unsigned policy_nr_args;
@@ -350,10 +355,31 @@ static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cel
350 dm_bio_prison_free_cell(cache->prison, cell); 355 dm_bio_prison_free_cell(cache->prison, cell);
351} 356}
352 357
358static struct dm_cache_migration *alloc_migration(struct cache *cache)
359{
360 struct dm_cache_migration *mg;
361
362 mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
363 if (mg) {
364 mg->cache = cache;
365 atomic_inc(&mg->cache->nr_allocated_migrations);
366 }
367
368 return mg;
369}
370
371static void free_migration(struct dm_cache_migration *mg)
372{
373 if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations))
374 wake_up(&mg->cache->migration_wait);
375
376 mempool_free(mg, mg->cache->migration_pool);
377}
378
353static int prealloc_data_structs(struct cache *cache, struct prealloc *p) 379static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
354{ 380{
355 if (!p->mg) { 381 if (!p->mg) {
356 p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT); 382 p->mg = alloc_migration(cache);
357 if (!p->mg) 383 if (!p->mg)
358 return -ENOMEM; 384 return -ENOMEM;
359 } 385 }
@@ -382,7 +408,7 @@ static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
382 free_prison_cell(cache, p->cell1); 408 free_prison_cell(cache, p->cell1);
383 409
384 if (p->mg) 410 if (p->mg)
385 mempool_free(p->mg, cache->migration_pool); 411 free_migration(p->mg);
386} 412}
387 413
388static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p) 414static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
@@ -854,24 +880,14 @@ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
854 * Migration covers moving data from the origin device to the cache, or 880 * Migration covers moving data from the origin device to the cache, or
855 * vice versa. 881 * vice versa.
856 *--------------------------------------------------------------*/ 882 *--------------------------------------------------------------*/
857static void free_migration(struct dm_cache_migration *mg) 883static void inc_io_migrations(struct cache *cache)
858{
859 mempool_free(mg, mg->cache->migration_pool);
860}
861
862static void inc_nr_migrations(struct cache *cache)
863{ 884{
864 atomic_inc(&cache->nr_migrations); 885 atomic_inc(&cache->nr_io_migrations);
865} 886}
866 887
867static void dec_nr_migrations(struct cache *cache) 888static void dec_io_migrations(struct cache *cache)
868{ 889{
869 atomic_dec(&cache->nr_migrations); 890 atomic_dec(&cache->nr_io_migrations);
870
871 /*
872 * Wake the worker in case we're suspending the target.
873 */
874 wake_up(&cache->migration_wait);
875} 891}
876 892
877static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, 893static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
@@ -894,11 +910,10 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
894 wake_worker(cache); 910 wake_worker(cache);
895} 911}
896 912
897static void cleanup_migration(struct dm_cache_migration *mg) 913static void free_io_migration(struct dm_cache_migration *mg)
898{ 914{
899 struct cache *cache = mg->cache; 915 dec_io_migrations(mg->cache);
900 free_migration(mg); 916 free_migration(mg);
901 dec_nr_migrations(cache);
902} 917}
903 918
904static void migration_failure(struct dm_cache_migration *mg) 919static void migration_failure(struct dm_cache_migration *mg)
@@ -923,7 +938,7 @@ static void migration_failure(struct dm_cache_migration *mg)
923 cell_defer(cache, mg->new_ocell, true); 938 cell_defer(cache, mg->new_ocell, true);
924 } 939 }
925 940
926 cleanup_migration(mg); 941 free_io_migration(mg);
927} 942}
928 943
929static void migration_success_pre_commit(struct dm_cache_migration *mg) 944static void migration_success_pre_commit(struct dm_cache_migration *mg)
@@ -934,7 +949,7 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
934 if (mg->writeback) { 949 if (mg->writeback) {
935 clear_dirty(cache, mg->old_oblock, mg->cblock); 950 clear_dirty(cache, mg->old_oblock, mg->cblock);
936 cell_defer(cache, mg->old_ocell, false); 951 cell_defer(cache, mg->old_ocell, false);
937 cleanup_migration(mg); 952 free_io_migration(mg);
938 return; 953 return;
939 954
940 } else if (mg->demote) { 955 } else if (mg->demote) {
@@ -944,14 +959,14 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
944 mg->old_oblock); 959 mg->old_oblock);
945 if (mg->promote) 960 if (mg->promote)
946 cell_defer(cache, mg->new_ocell, true); 961 cell_defer(cache, mg->new_ocell, true);
947 cleanup_migration(mg); 962 free_io_migration(mg);
948 return; 963 return;
949 } 964 }
950 } else { 965 } else {
951 if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) { 966 if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
952 DMWARN_LIMIT("promotion failed; couldn't update on disk metadata"); 967 DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
953 policy_remove_mapping(cache->policy, mg->new_oblock); 968 policy_remove_mapping(cache->policy, mg->new_oblock);
954 cleanup_migration(mg); 969 free_io_migration(mg);
955 return; 970 return;
956 } 971 }
957 } 972 }
@@ -984,7 +999,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
984 } else { 999 } else {
985 if (mg->invalidate) 1000 if (mg->invalidate)
986 policy_remove_mapping(cache->policy, mg->old_oblock); 1001 policy_remove_mapping(cache->policy, mg->old_oblock);
987 cleanup_migration(mg); 1002 free_io_migration(mg);
988 } 1003 }
989 1004
990 } else { 1005 } else {
@@ -999,7 +1014,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
999 bio_endio(mg->new_ocell->holder, 0); 1014 bio_endio(mg->new_ocell->holder, 0);
1000 cell_defer(cache, mg->new_ocell, false); 1015 cell_defer(cache, mg->new_ocell, false);
1001 } 1016 }
1002 cleanup_migration(mg); 1017 free_io_migration(mg);
1003 } 1018 }
1004} 1019}
1005 1020
@@ -1251,7 +1266,7 @@ static void promote(struct cache *cache, struct prealloc *structs,
1251 mg->new_ocell = cell; 1266 mg->new_ocell = cell;
1252 mg->start_jiffies = jiffies; 1267 mg->start_jiffies = jiffies;
1253 1268
1254 inc_nr_migrations(cache); 1269 inc_io_migrations(cache);
1255 quiesce_migration(mg); 1270 quiesce_migration(mg);
1256} 1271}
1257 1272
@@ -1275,7 +1290,7 @@ static void writeback(struct cache *cache, struct prealloc *structs,
1275 mg->new_ocell = NULL; 1290 mg->new_ocell = NULL;
1276 mg->start_jiffies = jiffies; 1291 mg->start_jiffies = jiffies;
1277 1292
1278 inc_nr_migrations(cache); 1293 inc_io_migrations(cache);
1279 quiesce_migration(mg); 1294 quiesce_migration(mg);
1280} 1295}
1281 1296
@@ -1302,7 +1317,7 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs,
1302 mg->new_ocell = new_ocell; 1317 mg->new_ocell = new_ocell;
1303 mg->start_jiffies = jiffies; 1318 mg->start_jiffies = jiffies;
1304 1319
1305 inc_nr_migrations(cache); 1320 inc_io_migrations(cache);
1306 quiesce_migration(mg); 1321 quiesce_migration(mg);
1307} 1322}
1308 1323
@@ -1330,7 +1345,7 @@ static void invalidate(struct cache *cache, struct prealloc *structs,
1330 mg->new_ocell = NULL; 1345 mg->new_ocell = NULL;
1331 mg->start_jiffies = jiffies; 1346 mg->start_jiffies = jiffies;
1332 1347
1333 inc_nr_migrations(cache); 1348 inc_io_migrations(cache);
1334 quiesce_migration(mg); 1349 quiesce_migration(mg);
1335} 1350}
1336 1351
@@ -1412,7 +1427,7 @@ static void process_discard_bio(struct cache *cache, struct prealloc *structs,
1412 1427
1413static bool spare_migration_bandwidth(struct cache *cache) 1428static bool spare_migration_bandwidth(struct cache *cache)
1414{ 1429{
1415 sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) * 1430 sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
1416 cache->sectors_per_block; 1431 cache->sectors_per_block;
1417 return current_volume < cache->migration_threshold; 1432 return current_volume < cache->migration_threshold;
1418} 1433}
@@ -1764,7 +1779,7 @@ static void stop_quiescing(struct cache *cache)
1764 1779
1765static void wait_for_migrations(struct cache *cache) 1780static void wait_for_migrations(struct cache *cache)
1766{ 1781{
1767 wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations)); 1782 wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations));
1768} 1783}
1769 1784
1770static void stop_worker(struct cache *cache) 1785static void stop_worker(struct cache *cache)
@@ -1876,9 +1891,6 @@ static void destroy(struct cache *cache)
1876{ 1891{
1877 unsigned i; 1892 unsigned i;
1878 1893
1879 if (cache->next_migration)
1880 mempool_free(cache->next_migration, cache->migration_pool);
1881
1882 if (cache->migration_pool) 1894 if (cache->migration_pool)
1883 mempool_destroy(cache->migration_pool); 1895 mempool_destroy(cache->migration_pool);
1884 1896
@@ -2424,7 +2436,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2424 INIT_LIST_HEAD(&cache->quiesced_migrations); 2436 INIT_LIST_HEAD(&cache->quiesced_migrations);
2425 INIT_LIST_HEAD(&cache->completed_migrations); 2437 INIT_LIST_HEAD(&cache->completed_migrations);
2426 INIT_LIST_HEAD(&cache->need_commit_migrations); 2438 INIT_LIST_HEAD(&cache->need_commit_migrations);
2427 atomic_set(&cache->nr_migrations, 0); 2439 atomic_set(&cache->nr_allocated_migrations, 0);
2440 atomic_set(&cache->nr_io_migrations, 0);
2428 init_waitqueue_head(&cache->migration_wait); 2441 init_waitqueue_head(&cache->migration_wait);
2429 2442
2430 init_waitqueue_head(&cache->quiescing_wait); 2443 init_waitqueue_head(&cache->quiescing_wait);
@@ -2487,8 +2500,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2487 goto bad; 2500 goto bad;
2488 } 2501 }
2489 2502
2490 cache->next_migration = NULL;
2491
2492 cache->need_tick_bio = true; 2503 cache->need_tick_bio = true;
2493 cache->sized = false; 2504 cache->sized = false;
2494 cache->invalidate = false; 2505 cache->invalidate = false;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b98cd9d84435..2caf5b374649 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -206,6 +206,9 @@ struct mapped_device {
206 /* zero-length flush that will be cloned and submitted to targets */ 206 /* zero-length flush that will be cloned and submitted to targets */
207 struct bio flush_bio; 207 struct bio flush_bio;
208 208
209 /* the number of internal suspends */
210 unsigned internal_suspend_count;
211
209 struct dm_stats stats; 212 struct dm_stats stats;
210}; 213};
211 214
@@ -2928,7 +2931,7 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
2928{ 2931{
2929 struct dm_table *map = NULL; 2932 struct dm_table *map = NULL;
2930 2933
2931 if (dm_suspended_internally_md(md)) 2934 if (md->internal_suspend_count++)
2932 return; /* nested internal suspend */ 2935 return; /* nested internal suspend */
2933 2936
2934 if (dm_suspended_md(md)) { 2937 if (dm_suspended_md(md)) {
@@ -2953,7 +2956,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
2953 2956
2954static void __dm_internal_resume(struct mapped_device *md) 2957static void __dm_internal_resume(struct mapped_device *md)
2955{ 2958{
2956 if (!dm_suspended_internally_md(md)) 2959 BUG_ON(!md->internal_suspend_count);
2960
2961 if (--md->internal_suspend_count)
2957 return; /* resume from nested internal suspend */ 2962 return; /* resume from nested internal suspend */
2958 2963
2959 if (dm_suspended_md(md)) 2964 if (dm_suspended_md(md))
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index db99ca2613ba..06931f6fa26c 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -614,7 +614,7 @@ struct cx23885_board cx23885_boards[] = {
614 .portb = CX23885_MPEG_DVB, 614 .portb = CX23885_MPEG_DVB,
615 }, 615 },
616 [CX23885_BOARD_HAUPPAUGE_HVR4400] = { 616 [CX23885_BOARD_HAUPPAUGE_HVR4400] = {
617 .name = "Hauppauge WinTV-HVR4400", 617 .name = "Hauppauge WinTV-HVR4400/HVR5500",
618 .porta = CX23885_ANALOG_VIDEO, 618 .porta = CX23885_ANALOG_VIDEO,
619 .portb = CX23885_MPEG_DVB, 619 .portb = CX23885_MPEG_DVB,
620 .portc = CX23885_MPEG_DVB, 620 .portc = CX23885_MPEG_DVB,
@@ -622,6 +622,10 @@ struct cx23885_board cx23885_boards[] = {
622 .tuner_addr = 0x60, /* 0xc0 >> 1 */ 622 .tuner_addr = 0x60, /* 0xc0 >> 1 */
623 .tuner_bus = 1, 623 .tuner_bus = 1,
624 }, 624 },
625 [CX23885_BOARD_HAUPPAUGE_STARBURST] = {
626 .name = "Hauppauge WinTV Starburst",
627 .portb = CX23885_MPEG_DVB,
628 },
625 [CX23885_BOARD_AVERMEDIA_HC81R] = { 629 [CX23885_BOARD_AVERMEDIA_HC81R] = {
626 .name = "AVerTV Hybrid Express Slim HC81R", 630 .name = "AVerTV Hybrid Express Slim HC81R",
627 .tuner_type = TUNER_XC2028, 631 .tuner_type = TUNER_XC2028,
@@ -936,19 +940,19 @@ struct cx23885_subid cx23885_subids[] = {
936 }, { 940 }, {
937 .subvendor = 0x0070, 941 .subvendor = 0x0070,
938 .subdevice = 0xc108, 942 .subdevice = 0xc108,
939 .card = CX23885_BOARD_HAUPPAUGE_HVR4400, 943 .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-4400 (Model 121xxx, Hybrid DVB-T/S2, IR) */
940 }, { 944 }, {
941 .subvendor = 0x0070, 945 .subvendor = 0x0070,
942 .subdevice = 0xc138, 946 .subdevice = 0xc138,
943 .card = CX23885_BOARD_HAUPPAUGE_HVR4400, 947 .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-5500 (Model 121xxx, Hybrid DVB-T/C/S2, IR) */
944 }, { 948 }, {
945 .subvendor = 0x0070, 949 .subvendor = 0x0070,
946 .subdevice = 0xc12a, 950 .subdevice = 0xc12a,
947 .card = CX23885_BOARD_HAUPPAUGE_HVR4400, 951 .card = CX23885_BOARD_HAUPPAUGE_STARBURST, /* Hauppauge WinTV Starburst (Model 121x00, DVB-S2, IR) */
948 }, { 952 }, {
949 .subvendor = 0x0070, 953 .subvendor = 0x0070,
950 .subdevice = 0xc1f8, 954 .subdevice = 0xc1f8,
951 .card = CX23885_BOARD_HAUPPAUGE_HVR4400, 955 .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-5500 (Model 121xxx, Hybrid DVB-T/C/S2, IR) */
952 }, { 956 }, {
953 .subvendor = 0x1461, 957 .subvendor = 0x1461,
954 .subdevice = 0xd939, 958 .subdevice = 0xd939,
@@ -1545,8 +1549,9 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
1545 cx_write(GPIO_ISM, 0x00000000);/* INTERRUPTS active low*/ 1549 cx_write(GPIO_ISM, 0x00000000);/* INTERRUPTS active low*/
1546 break; 1550 break;
1547 case CX23885_BOARD_HAUPPAUGE_HVR4400: 1551 case CX23885_BOARD_HAUPPAUGE_HVR4400:
1552 case CX23885_BOARD_HAUPPAUGE_STARBURST:
1548 /* GPIO-8 tda10071 demod reset */ 1553 /* GPIO-8 tda10071 demod reset */
1549 /* GPIO-9 si2165 demod reset */ 1554 /* GPIO-9 si2165 demod reset (only HVR4400/HVR5500)*/
1550 1555
1551 /* Put the parts into reset and back */ 1556 /* Put the parts into reset and back */
1552 cx23885_gpio_enable(dev, GPIO_8 | GPIO_9, 1); 1557 cx23885_gpio_enable(dev, GPIO_8 | GPIO_9, 1);
@@ -1872,6 +1877,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
1872 case CX23885_BOARD_HAUPPAUGE_HVR1850: 1877 case CX23885_BOARD_HAUPPAUGE_HVR1850:
1873 case CX23885_BOARD_HAUPPAUGE_HVR1290: 1878 case CX23885_BOARD_HAUPPAUGE_HVR1290:
1874 case CX23885_BOARD_HAUPPAUGE_HVR4400: 1879 case CX23885_BOARD_HAUPPAUGE_HVR4400:
1880 case CX23885_BOARD_HAUPPAUGE_STARBURST:
1875 case CX23885_BOARD_HAUPPAUGE_IMPACTVCBE: 1881 case CX23885_BOARD_HAUPPAUGE_IMPACTVCBE:
1876 if (dev->i2c_bus[0].i2c_rc == 0) 1882 if (dev->i2c_bus[0].i2c_rc == 0)
1877 hauppauge_eeprom(dev, eeprom+0xc0); 1883 hauppauge_eeprom(dev, eeprom+0xc0);
@@ -1980,6 +1986,11 @@ void cx23885_card_setup(struct cx23885_dev *dev)
1980 ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */ 1986 ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
1981 ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO; 1987 ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
1982 break; 1988 break;
1989 case CX23885_BOARD_HAUPPAUGE_STARBURST:
1990 ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
1991 ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
1992 ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
1993 break;
1983 case CX23885_BOARD_DVBSKY_T9580: 1994 case CX23885_BOARD_DVBSKY_T9580:
1984 case CX23885_BOARD_DVBSKY_T982: 1995 case CX23885_BOARD_DVBSKY_T982:
1985 ts1->gen_ctrl_val = 0x5; /* Parallel */ 1996 ts1->gen_ctrl_val = 0x5; /* Parallel */
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 1d9d0f86ca8c..1ad49946d7fa 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -2049,11 +2049,11 @@ static void cx23885_finidev(struct pci_dev *pci_dev)
2049 2049
2050 cx23885_shutdown(dev); 2050 cx23885_shutdown(dev);
2051 2051
2052 pci_disable_device(pci_dev);
2053
2054 /* unregister stuff */ 2052 /* unregister stuff */
2055 free_irq(pci_dev->irq, dev); 2053 free_irq(pci_dev->irq, dev);
2056 2054
2055 pci_disable_device(pci_dev);
2056
2057 cx23885_dev_unregister(dev); 2057 cx23885_dev_unregister(dev);
2058 vb2_dma_sg_cleanup_ctx(dev->alloc_ctx); 2058 vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
2059 v4l2_ctrl_handler_free(&dev->ctrl_handler); 2059 v4l2_ctrl_handler_free(&dev->ctrl_handler);
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index c47d18270cfc..a9c450d4b54e 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -1710,6 +1710,17 @@ static int dvb_register(struct cx23885_tsport *port)
1710 break; 1710 break;
1711 } 1711 }
1712 break; 1712 break;
1713 case CX23885_BOARD_HAUPPAUGE_STARBURST:
1714 i2c_bus = &dev->i2c_bus[0];
1715 fe0->dvb.frontend = dvb_attach(tda10071_attach,
1716 &hauppauge_tda10071_config,
1717 &i2c_bus->i2c_adap);
1718 if (fe0->dvb.frontend != NULL) {
1719 dvb_attach(a8293_attach, fe0->dvb.frontend,
1720 &i2c_bus->i2c_adap,
1721 &hauppauge_a8293_config);
1722 }
1723 break;
1713 case CX23885_BOARD_DVBSKY_T9580: 1724 case CX23885_BOARD_DVBSKY_T9580:
1714 case CX23885_BOARD_DVBSKY_S950: 1725 case CX23885_BOARD_DVBSKY_S950:
1715 i2c_bus = &dev->i2c_bus[0]; 1726 i2c_bus = &dev->i2c_bus[0];
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index f55cd12da0fd..36f2f96c40e4 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -99,6 +99,7 @@
99#define CX23885_BOARD_DVBSKY_S950 49 99#define CX23885_BOARD_DVBSKY_S950 49
100#define CX23885_BOARD_DVBSKY_S952 50 100#define CX23885_BOARD_DVBSKY_S952 50
101#define CX23885_BOARD_DVBSKY_T982 51 101#define CX23885_BOARD_DVBSKY_T982 51
102#define CX23885_BOARD_HAUPPAUGE_STARBURST 52
102 103
103#define GPIO_0 0x00000001 104#define GPIO_0 0x00000001
104#define GPIO_1 0x00000002 105#define GPIO_1 0x00000002
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index b463fe172d16..3fe9047ef466 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -602,10 +602,13 @@ isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
602 strlcpy(cap->card, video->video.name, sizeof(cap->card)); 602 strlcpy(cap->card, video->video.name, sizeof(cap->card));
603 strlcpy(cap->bus_info, "media", sizeof(cap->bus_info)); 603 strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
604 604
605 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
606 | V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
607
605 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 608 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
606 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 609 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
607 else 610 else
608 cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; 611 cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
609 612
610 return 0; 613 return 0;
611} 614}
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index 8efe40337608..6d885239b16a 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -760,8 +760,9 @@ static int isi_camera_querycap(struct soc_camera_host *ici,
760{ 760{
761 strcpy(cap->driver, "atmel-isi"); 761 strcpy(cap->driver, "atmel-isi");
762 strcpy(cap->card, "Atmel Image Sensor Interface"); 762 strcpy(cap->card, "Atmel Image Sensor Interface");
763 cap->capabilities = (V4L2_CAP_VIDEO_CAPTURE | 763 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
764 V4L2_CAP_STREAMING); 764 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
765
765 return 0; 766 return 0;
766} 767}
767 768
diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c
index ce72bd26a6ac..192377f55840 100644
--- a/drivers/media/platform/soc_camera/mx2_camera.c
+++ b/drivers/media/platform/soc_camera/mx2_camera.c
@@ -1256,7 +1256,8 @@ static int mx2_camera_querycap(struct soc_camera_host *ici,
1256{ 1256{
1257 /* cap->name is set by the friendly caller:-> */ 1257 /* cap->name is set by the friendly caller:-> */
1258 strlcpy(cap->card, MX2_CAM_DRIVER_DESCRIPTION, sizeof(cap->card)); 1258 strlcpy(cap->card, MX2_CAM_DRIVER_DESCRIPTION, sizeof(cap->card));
1259 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 1259 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
1260 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
1260 1261
1261 return 0; 1262 return 0;
1262} 1263}
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c
index a60c3bb0e4cc..0b3299dee05d 100644
--- a/drivers/media/platform/soc_camera/mx3_camera.c
+++ b/drivers/media/platform/soc_camera/mx3_camera.c
@@ -967,7 +967,8 @@ static int mx3_camera_querycap(struct soc_camera_host *ici,
967{ 967{
968 /* cap->name is set by the firendly caller:-> */ 968 /* cap->name is set by the firendly caller:-> */
969 strlcpy(cap->card, "i.MX3x Camera", sizeof(cap->card)); 969 strlcpy(cap->card, "i.MX3x Camera", sizeof(cap->card));
970 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 970 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
971 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
971 972
972 return 0; 973 return 0;
973} 974}
diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/media/platform/soc_camera/omap1_camera.c
index e6b93281f246..16f65ecb70a3 100644
--- a/drivers/media/platform/soc_camera/omap1_camera.c
+++ b/drivers/media/platform/soc_camera/omap1_camera.c
@@ -1427,7 +1427,8 @@ static int omap1_cam_querycap(struct soc_camera_host *ici,
1427{ 1427{
1428 /* cap->name is set by the friendly caller:-> */ 1428 /* cap->name is set by the friendly caller:-> */
1429 strlcpy(cap->card, "OMAP1 Camera", sizeof(cap->card)); 1429 strlcpy(cap->card, "OMAP1 Camera", sizeof(cap->card));
1430 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 1430 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
1431 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
1431 1432
1432 return 0; 1433 return 0;
1433} 1434}
diff --git a/drivers/media/platform/soc_camera/pxa_camera.c b/drivers/media/platform/soc_camera/pxa_camera.c
index 951226af0eba..8d6e343fec0f 100644
--- a/drivers/media/platform/soc_camera/pxa_camera.c
+++ b/drivers/media/platform/soc_camera/pxa_camera.c
@@ -1576,7 +1576,8 @@ static int pxa_camera_querycap(struct soc_camera_host *ici,
1576{ 1576{
1577 /* cap->name is set by the firendly caller:-> */ 1577 /* cap->name is set by the firendly caller:-> */
1578 strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card)); 1578 strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card));
1579 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 1579 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
1580 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
1580 1581
1581 return 0; 1582 return 0;
1582} 1583}
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index 0c1f55648106..9f1473c0a0cf 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -1799,7 +1799,9 @@ static int rcar_vin_querycap(struct soc_camera_host *ici,
1799 struct v4l2_capability *cap) 1799 struct v4l2_capability *cap)
1800{ 1800{
1801 strlcpy(cap->card, "R_Car_VIN", sizeof(cap->card)); 1801 strlcpy(cap->card, "R_Car_VIN", sizeof(cap->card));
1802 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 1802 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
1803 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
1804
1803 return 0; 1805 return 0;
1804} 1806}
1805 1807
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index 8b27b3eb2b25..71787702d4a2 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -1652,7 +1652,9 @@ static int sh_mobile_ceu_querycap(struct soc_camera_host *ici,
1652 struct v4l2_capability *cap) 1652 struct v4l2_capability *cap)
1653{ 1653{
1654 strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card)); 1654 strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card));
1655 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 1655 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
1656 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
1657
1656 return 0; 1658 return 0;
1657} 1659}
1658 1660
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 0f345b1f9014..f327c49d7e09 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -2232,7 +2232,7 @@ static struct dvb_usb_device_properties cxusb_mygica_t230_properties = {
2232 { 2232 {
2233 "Mygica T230 DVB-T/T2/C", 2233 "Mygica T230 DVB-T/T2/C",
2234 { NULL }, 2234 { NULL },
2235 { &cxusb_table[22], NULL }, 2235 { &cxusb_table[20], NULL },
2236 }, 2236 },
2237 } 2237 }
2238}; 2238};
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 1b158f1167ed..536210b39428 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -89,16 +89,6 @@ static int vbi_nr[PVR_NUM] = {[0 ... PVR_NUM-1] = -1};
89module_param_array(vbi_nr, int, NULL, 0444); 89module_param_array(vbi_nr, int, NULL, 0444);
90MODULE_PARM_DESC(vbi_nr, "Offset for device's vbi dev minor"); 90MODULE_PARM_DESC(vbi_nr, "Offset for device's vbi dev minor");
91 91
92static struct v4l2_capability pvr_capability ={
93 .driver = "pvrusb2",
94 .card = "Hauppauge WinTV pvr-usb2",
95 .bus_info = "usb",
96 .version = LINUX_VERSION_CODE,
97 .capabilities = (V4L2_CAP_VIDEO_CAPTURE |
98 V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_RADIO |
99 V4L2_CAP_READWRITE),
100};
101
102static struct v4l2_fmtdesc pvr_fmtdesc [] = { 92static struct v4l2_fmtdesc pvr_fmtdesc [] = {
103 { 93 {
104 .index = 0, 94 .index = 0,
@@ -160,10 +150,22 @@ static int pvr2_querycap(struct file *file, void *priv, struct v4l2_capability *
160 struct pvr2_v4l2_fh *fh = file->private_data; 150 struct pvr2_v4l2_fh *fh = file->private_data;
161 struct pvr2_hdw *hdw = fh->channel.mc_head->hdw; 151 struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
162 152
163 memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability)); 153 strlcpy(cap->driver, "pvrusb2", sizeof(cap->driver));
164 strlcpy(cap->bus_info, pvr2_hdw_get_bus_info(hdw), 154 strlcpy(cap->bus_info, pvr2_hdw_get_bus_info(hdw),
165 sizeof(cap->bus_info)); 155 sizeof(cap->bus_info));
166 strlcpy(cap->card, pvr2_hdw_get_desc(hdw), sizeof(cap->card)); 156 strlcpy(cap->card, pvr2_hdw_get_desc(hdw), sizeof(cap->card));
157 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
158 V4L2_CAP_AUDIO | V4L2_CAP_RADIO |
159 V4L2_CAP_READWRITE | V4L2_CAP_DEVICE_CAPS;
160 switch (fh->pdi->devbase.vfl_type) {
161 case VFL_TYPE_GRABBER:
162 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO;
163 break;
164 case VFL_TYPE_RADIO:
165 cap->device_caps = V4L2_CAP_RADIO;
166 break;
167 }
168 cap->device_caps |= V4L2_CAP_TUNER | V4L2_CAP_READWRITE;
167 return 0; 169 return 0;
168} 170}
169 171
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index d09a8916e940..bc08a829bc13 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -3146,27 +3146,26 @@ static int vb2_thread(void *data)
3146 prequeue--; 3146 prequeue--;
3147 } else { 3147 } else {
3148 call_void_qop(q, wait_finish, q); 3148 call_void_qop(q, wait_finish, q);
3149 ret = vb2_internal_dqbuf(q, &fileio->b, 0); 3149 if (!threadio->stop)
3150 ret = vb2_internal_dqbuf(q, &fileio->b, 0);
3150 call_void_qop(q, wait_prepare, q); 3151 call_void_qop(q, wait_prepare, q);
3151 dprintk(5, "file io: vb2_dqbuf result: %d\n", ret); 3152 dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
3152 } 3153 }
3153 if (threadio->stop) 3154 if (ret || threadio->stop)
3154 break;
3155 if (ret)
3156 break; 3155 break;
3157 try_to_freeze(); 3156 try_to_freeze();
3158 3157
3159 vb = q->bufs[fileio->b.index]; 3158 vb = q->bufs[fileio->b.index];
3160 if (!(fileio->b.flags & V4L2_BUF_FLAG_ERROR)) 3159 if (!(fileio->b.flags & V4L2_BUF_FLAG_ERROR))
3161 ret = threadio->fnc(vb, threadio->priv); 3160 if (threadio->fnc(vb, threadio->priv))
3162 if (ret) 3161 break;
3163 break;
3164 call_void_qop(q, wait_finish, q); 3162 call_void_qop(q, wait_finish, q);
3165 if (set_timestamp) 3163 if (set_timestamp)
3166 v4l2_get_timestamp(&fileio->b.timestamp); 3164 v4l2_get_timestamp(&fileio->b.timestamp);
3167 ret = vb2_internal_qbuf(q, &fileio->b); 3165 if (!threadio->stop)
3166 ret = vb2_internal_qbuf(q, &fileio->b);
3168 call_void_qop(q, wait_prepare, q); 3167 call_void_qop(q, wait_prepare, q);
3169 if (ret) 3168 if (ret || threadio->stop)
3170 break; 3169 break;
3171 } 3170 }
3172 3171
@@ -3235,11 +3234,11 @@ int vb2_thread_stop(struct vb2_queue *q)
3235 threadio->stop = true; 3234 threadio->stop = true;
3236 vb2_internal_streamoff(q, q->type); 3235 vb2_internal_streamoff(q, q->type);
3237 call_void_qop(q, wait_prepare, q); 3236 call_void_qop(q, wait_prepare, q);
3237 err = kthread_stop(threadio->thread);
3238 q->fileio = NULL; 3238 q->fileio = NULL;
3239 fileio->req.count = 0; 3239 fileio->req.count = 0;
3240 vb2_reqbufs(q, &fileio->req); 3240 vb2_reqbufs(q, &fileio->req);
3241 kfree(fileio); 3241 kfree(fileio);
3242 err = kthread_stop(threadio->thread);
3243 threadio->thread = NULL; 3242 threadio->thread = NULL;
3244 kfree(threadio); 3243 kfree(threadio);
3245 q->fileio = NULL; 3244 q->fileio = NULL;
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index f94a9fa60488..c672c4dcffac 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -615,6 +615,9 @@ static void c_can_stop(struct net_device *dev)
615 615
616 c_can_irq_control(priv, false); 616 c_can_irq_control(priv, false);
617 617
618 /* put ctrl to init on stop to end ongoing transmission */
619 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT);
620
618 /* deactivate pins */ 621 /* deactivate pins */
619 pinctrl_pm_select_sleep_state(dev->dev.parent); 622 pinctrl_pm_select_sleep_state(dev->dev.parent);
620 priv->can.state = CAN_STATE_STOPPED; 623 priv->can.state = CAN_STATE_STOPPED;
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index c32cd61073bc..7af379ca861b 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -587,7 +587,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
587 usb_sndbulkpipe(dev->udev, 587 usb_sndbulkpipe(dev->udev,
588 dev->bulk_out->bEndpointAddress), 588 dev->bulk_out->bEndpointAddress),
589 buf, msg->len, 589 buf, msg->len,
590 kvaser_usb_simple_msg_callback, priv); 590 kvaser_usb_simple_msg_callback, netdev);
591 usb_anchor_urb(urb, &priv->tx_submitted); 591 usb_anchor_urb(urb, &priv->tx_submitted);
592 592
593 err = usb_submit_urb(urb, GFP_ATOMIC); 593 err = usb_submit_urb(urb, GFP_ATOMIC);
@@ -662,11 +662,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
662 priv = dev->nets[channel]; 662 priv = dev->nets[channel];
663 stats = &priv->netdev->stats; 663 stats = &priv->netdev->stats;
664 664
665 if (status & M16C_STATE_BUS_RESET) {
666 kvaser_usb_unlink_tx_urbs(priv);
667 return;
668 }
669
670 skb = alloc_can_err_skb(priv->netdev, &cf); 665 skb = alloc_can_err_skb(priv->netdev, &cf);
671 if (!skb) { 666 if (!skb) {
672 stats->rx_dropped++; 667 stats->rx_dropped++;
@@ -677,7 +672,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
677 672
678 netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status); 673 netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status);
679 674
680 if (status & M16C_STATE_BUS_OFF) { 675 if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
681 cf->can_id |= CAN_ERR_BUSOFF; 676 cf->can_id |= CAN_ERR_BUSOFF;
682 677
683 priv->can.can_stats.bus_off++; 678 priv->can.can_stats.bus_off++;
@@ -703,9 +698,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
703 } 698 }
704 699
705 new_state = CAN_STATE_ERROR_PASSIVE; 700 new_state = CAN_STATE_ERROR_PASSIVE;
706 } 701 } else if (status & M16C_STATE_BUS_ERROR) {
707
708 if (status == M16C_STATE_BUS_ERROR) {
709 if ((priv->can.state < CAN_STATE_ERROR_WARNING) && 702 if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&
710 ((txerr >= 96) || (rxerr >= 96))) { 703 ((txerr >= 96) || (rxerr >= 96))) {
711 cf->can_id |= CAN_ERR_CRTL; 704 cf->can_id |= CAN_ERR_CRTL;
@@ -715,7 +708,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
715 708
716 priv->can.can_stats.error_warning++; 709 priv->can.can_stats.error_warning++;
717 new_state = CAN_STATE_ERROR_WARNING; 710 new_state = CAN_STATE_ERROR_WARNING;
718 } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) { 711 } else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) &&
712 ((txerr < 96) && (rxerr < 96))) {
719 cf->can_id |= CAN_ERR_PROT; 713 cf->can_id |= CAN_ERR_PROT;
720 cf->data[2] = CAN_ERR_PROT_ACTIVE; 714 cf->data[2] = CAN_ERR_PROT_ACTIVE;
721 715
@@ -1590,7 +1584,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
1590{ 1584{
1591 struct kvaser_usb *dev; 1585 struct kvaser_usb *dev;
1592 int err = -ENOMEM; 1586 int err = -ENOMEM;
1593 int i; 1587 int i, retry = 3;
1594 1588
1595 dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL); 1589 dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
1596 if (!dev) 1590 if (!dev)
@@ -1608,7 +1602,15 @@ static int kvaser_usb_probe(struct usb_interface *intf,
1608 1602
1609 usb_set_intfdata(intf, dev); 1603 usb_set_intfdata(intf, dev);
1610 1604
1611 err = kvaser_usb_get_software_info(dev); 1605 /* On some x86 laptops, plugging a Kvaser device again after
1606 * an unplug makes the firmware always ignore the very first
1607 * command. For such a case, provide some room for retries
1608 * instead of completely exiting the driver.
1609 */
1610 do {
1611 err = kvaser_usb_get_software_info(dev);
1612 } while (--retry && err == -ETIMEDOUT);
1613
1612 if (err) { 1614 if (err) {
1613 dev_err(&intf->dev, 1615 dev_err(&intf->dev,
1614 "Cannot get software infos, error %d\n", err); 1616 "Cannot get software infos, error %d\n", err);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 75b08c63d39f..29a09271b64a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -767,16 +767,17 @@
767#define MTL_Q_RQOMR 0x40 767#define MTL_Q_RQOMR 0x40
768#define MTL_Q_RQMPOCR 0x44 768#define MTL_Q_RQMPOCR 0x44
769#define MTL_Q_RQDR 0x4c 769#define MTL_Q_RQDR 0x4c
770#define MTL_Q_RQFCR 0x50
770#define MTL_Q_IER 0x70 771#define MTL_Q_IER 0x70
771#define MTL_Q_ISR 0x74 772#define MTL_Q_ISR 0x74
772 773
773/* MTL queue register entry bit positions and sizes */ 774/* MTL queue register entry bit positions and sizes */
775#define MTL_Q_RQFCR_RFA_INDEX 1
776#define MTL_Q_RQFCR_RFA_WIDTH 6
777#define MTL_Q_RQFCR_RFD_INDEX 17
778#define MTL_Q_RQFCR_RFD_WIDTH 6
774#define MTL_Q_RQOMR_EHFC_INDEX 7 779#define MTL_Q_RQOMR_EHFC_INDEX 7
775#define MTL_Q_RQOMR_EHFC_WIDTH 1 780#define MTL_Q_RQOMR_EHFC_WIDTH 1
776#define MTL_Q_RQOMR_RFA_INDEX 8
777#define MTL_Q_RQOMR_RFA_WIDTH 3
778#define MTL_Q_RQOMR_RFD_INDEX 13
779#define MTL_Q_RQOMR_RFD_WIDTH 3
780#define MTL_Q_RQOMR_RQS_INDEX 16 781#define MTL_Q_RQOMR_RQS_INDEX 16
781#define MTL_Q_RQOMR_RQS_WIDTH 9 782#define MTL_Q_RQOMR_RQS_WIDTH 9
782#define MTL_Q_RQOMR_RSF_INDEX 5 783#define MTL_Q_RQOMR_RSF_INDEX 5
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 53f5f66ec2ee..4c66cd1d1e60 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -2079,10 +2079,10 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
2079 2079
2080 for (i = 0; i < pdata->rx_q_count; i++) { 2080 for (i = 0; i < pdata->rx_q_count; i++) {
2081 /* Activate flow control when less than 4k left in fifo */ 2081 /* Activate flow control when less than 4k left in fifo */
2082 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2); 2082 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2);
2083 2083
2084 /* De-activate flow control when more than 6k left in fifo */ 2084 /* De-activate flow control when more than 6k left in fifo */
2085 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4); 2085 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4);
2086 } 2086 }
2087} 2087}
2088 2088
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 1d1147c93d59..e468ed3f210f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3175,7 +3175,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
3175 } 3175 }
3176#endif 3176#endif
3177 if (!bnx2x_fp_lock_napi(fp)) 3177 if (!bnx2x_fp_lock_napi(fp))
3178 return work_done; 3178 return budget;
3179 3179
3180 for_each_cos_in_tx_queue(fp, cos) 3180 for_each_cos_in_tx_queue(fp, cos)
3181 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) 3181 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index b29e027c476e..e356afa44e7d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1335,7 +1335,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
1335 int err; 1335 int err;
1336 1336
1337 if (!enic_poll_lock_napi(&enic->rq[rq])) 1337 if (!enic_poll_lock_napi(&enic->rq[rq]))
1338 return work_done; 1338 return budget;
1339 /* Service RQ 1339 /* Service RQ
1340 */ 1340 */
1341 1341
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index a62fc38f045e..1c75829eb166 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -192,6 +192,10 @@ static char mv643xx_eth_driver_version[] = "1.4";
192#define IS_TSO_HEADER(txq, addr) \ 192#define IS_TSO_HEADER(txq, addr) \
193 ((addr >= txq->tso_hdrs_dma) && \ 193 ((addr >= txq->tso_hdrs_dma) && \
194 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) 194 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
195
196#define DESC_DMA_MAP_SINGLE 0
197#define DESC_DMA_MAP_PAGE 1
198
195/* 199/*
196 * RX/TX descriptors. 200 * RX/TX descriptors.
197 */ 201 */
@@ -362,6 +366,7 @@ struct tx_queue {
362 dma_addr_t tso_hdrs_dma; 366 dma_addr_t tso_hdrs_dma;
363 367
364 struct tx_desc *tx_desc_area; 368 struct tx_desc *tx_desc_area;
369 char *tx_desc_mapping; /* array to track the type of the dma mapping */
365 dma_addr_t tx_desc_dma; 370 dma_addr_t tx_desc_dma;
366 int tx_desc_area_size; 371 int tx_desc_area_size;
367 372
@@ -750,6 +755,7 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
750 if (txq->tx_curr_desc == txq->tx_ring_size) 755 if (txq->tx_curr_desc == txq->tx_ring_size)
751 txq->tx_curr_desc = 0; 756 txq->tx_curr_desc = 0;
752 desc = &txq->tx_desc_area[tx_index]; 757 desc = &txq->tx_desc_area[tx_index];
758 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
753 759
754 desc->l4i_chk = 0; 760 desc->l4i_chk = 0;
755 desc->byte_cnt = length; 761 desc->byte_cnt = length;
@@ -879,14 +885,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
879 skb_frag_t *this_frag; 885 skb_frag_t *this_frag;
880 int tx_index; 886 int tx_index;
881 struct tx_desc *desc; 887 struct tx_desc *desc;
882 void *addr;
883 888
884 this_frag = &skb_shinfo(skb)->frags[frag]; 889 this_frag = &skb_shinfo(skb)->frags[frag];
885 addr = page_address(this_frag->page.p) + this_frag->page_offset;
886 tx_index = txq->tx_curr_desc++; 890 tx_index = txq->tx_curr_desc++;
887 if (txq->tx_curr_desc == txq->tx_ring_size) 891 if (txq->tx_curr_desc == txq->tx_ring_size)
888 txq->tx_curr_desc = 0; 892 txq->tx_curr_desc = 0;
889 desc = &txq->tx_desc_area[tx_index]; 893 desc = &txq->tx_desc_area[tx_index];
894 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;
890 895
891 /* 896 /*
892 * The last fragment will generate an interrupt 897 * The last fragment will generate an interrupt
@@ -902,8 +907,9 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
902 907
903 desc->l4i_chk = 0; 908 desc->l4i_chk = 0;
904 desc->byte_cnt = skb_frag_size(this_frag); 909 desc->byte_cnt = skb_frag_size(this_frag);
905 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr, 910 desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
906 desc->byte_cnt, DMA_TO_DEVICE); 911 this_frag, 0, desc->byte_cnt,
912 DMA_TO_DEVICE);
907 } 913 }
908} 914}
909 915
@@ -936,6 +942,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
936 if (txq->tx_curr_desc == txq->tx_ring_size) 942 if (txq->tx_curr_desc == txq->tx_ring_size)
937 txq->tx_curr_desc = 0; 943 txq->tx_curr_desc = 0;
938 desc = &txq->tx_desc_area[tx_index]; 944 desc = &txq->tx_desc_area[tx_index];
945 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
939 946
940 if (nr_frags) { 947 if (nr_frags) {
941 txq_submit_frag_skb(txq, skb); 948 txq_submit_frag_skb(txq, skb);
@@ -1047,9 +1054,12 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
1047 int tx_index; 1054 int tx_index;
1048 struct tx_desc *desc; 1055 struct tx_desc *desc;
1049 u32 cmd_sts; 1056 u32 cmd_sts;
1057 char desc_dma_map;
1050 1058
1051 tx_index = txq->tx_used_desc; 1059 tx_index = txq->tx_used_desc;
1052 desc = &txq->tx_desc_area[tx_index]; 1060 desc = &txq->tx_desc_area[tx_index];
1061 desc_dma_map = txq->tx_desc_mapping[tx_index];
1062
1053 cmd_sts = desc->cmd_sts; 1063 cmd_sts = desc->cmd_sts;
1054 1064
1055 if (cmd_sts & BUFFER_OWNED_BY_DMA) { 1065 if (cmd_sts & BUFFER_OWNED_BY_DMA) {
@@ -1065,9 +1075,19 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
1065 reclaimed++; 1075 reclaimed++;
1066 txq->tx_desc_count--; 1076 txq->tx_desc_count--;
1067 1077
1068 if (!IS_TSO_HEADER(txq, desc->buf_ptr)) 1078 if (!IS_TSO_HEADER(txq, desc->buf_ptr)) {
1069 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, 1079
1070 desc->byte_cnt, DMA_TO_DEVICE); 1080 if (desc_dma_map == DESC_DMA_MAP_PAGE)
1081 dma_unmap_page(mp->dev->dev.parent,
1082 desc->buf_ptr,
1083 desc->byte_cnt,
1084 DMA_TO_DEVICE);
1085 else
1086 dma_unmap_single(mp->dev->dev.parent,
1087 desc->buf_ptr,
1088 desc->byte_cnt,
1089 DMA_TO_DEVICE);
1090 }
1071 1091
1072 if (cmd_sts & TX_ENABLE_INTERRUPT) { 1092 if (cmd_sts & TX_ENABLE_INTERRUPT) {
1073 struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); 1093 struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
@@ -1996,6 +2016,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
1996 struct tx_queue *txq = mp->txq + index; 2016 struct tx_queue *txq = mp->txq + index;
1997 struct tx_desc *tx_desc; 2017 struct tx_desc *tx_desc;
1998 int size; 2018 int size;
2019 int ret;
1999 int i; 2020 int i;
2000 2021
2001 txq->index = index; 2022 txq->index = index;
@@ -2048,18 +2069,34 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
2048 nexti * sizeof(struct tx_desc); 2069 nexti * sizeof(struct tx_desc);
2049 } 2070 }
2050 2071
2072 txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char),
2073 GFP_KERNEL);
2074 if (!txq->tx_desc_mapping) {
2075 ret = -ENOMEM;
2076 goto err_free_desc_area;
2077 }
2078
2051 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ 2079 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2052 txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent, 2080 txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
2053 txq->tx_ring_size * TSO_HEADER_SIZE, 2081 txq->tx_ring_size * TSO_HEADER_SIZE,
2054 &txq->tso_hdrs_dma, GFP_KERNEL); 2082 &txq->tso_hdrs_dma, GFP_KERNEL);
2055 if (txq->tso_hdrs == NULL) { 2083 if (txq->tso_hdrs == NULL) {
2056 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, 2084 ret = -ENOMEM;
2057 txq->tx_desc_area, txq->tx_desc_dma); 2085 goto err_free_desc_mapping;
2058 return -ENOMEM;
2059 } 2086 }
2060 skb_queue_head_init(&txq->tx_skb); 2087 skb_queue_head_init(&txq->tx_skb);
2061 2088
2062 return 0; 2089 return 0;
2090
2091err_free_desc_mapping:
2092 kfree(txq->tx_desc_mapping);
2093err_free_desc_area:
2094 if (index == 0 && size <= mp->tx_desc_sram_size)
2095 iounmap(txq->tx_desc_area);
2096 else
2097 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2098 txq->tx_desc_area, txq->tx_desc_dma);
2099 return ret;
2063} 2100}
2064 2101
2065static void txq_deinit(struct tx_queue *txq) 2102static void txq_deinit(struct tx_queue *txq)
@@ -2077,6 +2114,8 @@ static void txq_deinit(struct tx_queue *txq)
2077 else 2114 else
2078 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, 2115 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2079 txq->tx_desc_area, txq->tx_desc_dma); 2116 txq->tx_desc_area, txq->tx_desc_dma);
2117 kfree(txq->tx_desc_mapping);
2118
2080 if (txq->tso_hdrs) 2119 if (txq->tso_hdrs)
2081 dma_free_coherent(mp->dev->dev.parent, 2120 dma_free_coherent(mp->dev->dev.parent,
2082 txq->tx_ring_size * TSO_HEADER_SIZE, 2121 txq->tx_ring_size * TSO_HEADER_SIZE,
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 613037584d08..c531c8ae1be4 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2388,7 +2388,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
2388 2388
2389 work_done = netxen_process_rcv_ring(sds_ring, budget); 2389 work_done = netxen_process_rcv_ring(sds_ring, budget);
2390 2390
2391 if ((work_done < budget) && tx_complete) { 2391 if (!tx_complete)
2392 work_done = budget;
2393
2394 if (work_done < budget) {
2392 napi_complete(&sds_ring->napi); 2395 napi_complete(&sds_ring->napi);
2393 if (test_bit(__NX_DEV_UP, &adapter->state)) 2396 if (test_bit(__NX_DEV_UP, &adapter->state))
2394 netxen_nic_enable_int(sds_ring); 2397 netxen_nic_enable_int(sds_ring);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 6576243222af..04283fe0e6a7 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -396,6 +396,9 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
396 [TSU_ADRL31] = 0x01fc, 396 [TSU_ADRL31] = 0x01fc,
397}; 397};
398 398
399static void sh_eth_rcv_snd_disable(struct net_device *ndev);
400static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
401
399static bool sh_eth_is_gether(struct sh_eth_private *mdp) 402static bool sh_eth_is_gether(struct sh_eth_private *mdp)
400{ 403{
401 return mdp->reg_offset == sh_eth_offset_gigabit; 404 return mdp->reg_offset == sh_eth_offset_gigabit;
@@ -1120,6 +1123,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
1120 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; 1123 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1121 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; 1124 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1122 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; 1125 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1126 dma_addr_t dma_addr;
1123 1127
1124 mdp->cur_rx = 0; 1128 mdp->cur_rx = 0;
1125 mdp->cur_tx = 0; 1129 mdp->cur_tx = 0;
@@ -1133,7 +1137,6 @@ static void sh_eth_ring_format(struct net_device *ndev)
1133 /* skb */ 1137 /* skb */
1134 mdp->rx_skbuff[i] = NULL; 1138 mdp->rx_skbuff[i] = NULL;
1135 skb = netdev_alloc_skb(ndev, skbuff_size); 1139 skb = netdev_alloc_skb(ndev, skbuff_size);
1136 mdp->rx_skbuff[i] = skb;
1137 if (skb == NULL) 1140 if (skb == NULL)
1138 break; 1141 break;
1139 sh_eth_set_receive_align(skb); 1142 sh_eth_set_receive_align(skb);
@@ -1142,9 +1145,15 @@ static void sh_eth_ring_format(struct net_device *ndev)
1142 rxdesc = &mdp->rx_ring[i]; 1145 rxdesc = &mdp->rx_ring[i];
1143 /* The size of the buffer is a multiple of 16 bytes. */ 1146 /* The size of the buffer is a multiple of 16 bytes. */
1144 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1147 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1145 dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length, 1148 dma_addr = dma_map_single(&ndev->dev, skb->data,
1146 DMA_FROM_DEVICE); 1149 rxdesc->buffer_length,
1147 rxdesc->addr = virt_to_phys(skb->data); 1150 DMA_FROM_DEVICE);
1151 if (dma_mapping_error(&ndev->dev, dma_addr)) {
1152 kfree_skb(skb);
1153 break;
1154 }
1155 mdp->rx_skbuff[i] = skb;
1156 rxdesc->addr = dma_addr;
1148 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1157 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1149 1158
1150 /* Rx descriptor address set */ 1159 /* Rx descriptor address set */
@@ -1316,8 +1325,10 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
1316 RFLR); 1325 RFLR);
1317 1326
1318 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); 1327 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
1319 if (start) 1328 if (start) {
1329 mdp->irq_enabled = true;
1320 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 1330 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1331 }
1321 1332
1322 /* PAUSE Prohibition */ 1333 /* PAUSE Prohibition */
1323 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | 1334 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
@@ -1356,6 +1367,33 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
1356 return ret; 1367 return ret;
1357} 1368}
1358 1369
1370static void sh_eth_dev_exit(struct net_device *ndev)
1371{
1372 struct sh_eth_private *mdp = netdev_priv(ndev);
1373 int i;
1374
1375 /* Deactivate all TX descriptors, so DMA should stop at next
1376 * packet boundary if it's currently running
1377 */
1378 for (i = 0; i < mdp->num_tx_ring; i++)
1379 mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT);
1380
1381 /* Disable TX FIFO egress to MAC */
1382 sh_eth_rcv_snd_disable(ndev);
1383
1384 /* Stop RX DMA at next packet boundary */
1385 sh_eth_write(ndev, 0, EDRRR);
1386
1387 /* Aside from TX DMA, we can't tell when the hardware is
1388 * really stopped, so we need to reset to make sure.
1389 * Before doing that, wait for long enough to *probably*
1390 * finish transmitting the last packet and poll stats.
1391 */
1392 msleep(2); /* max frame time at 10 Mbps < 1250 us */
1393 sh_eth_get_stats(ndev);
1394 sh_eth_reset(ndev);
1395}
1396
1359/* free Tx skb function */ 1397/* free Tx skb function */
1360static int sh_eth_txfree(struct net_device *ndev) 1398static int sh_eth_txfree(struct net_device *ndev)
1361{ 1399{
@@ -1400,6 +1438,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1400 u16 pkt_len = 0; 1438 u16 pkt_len = 0;
1401 u32 desc_status; 1439 u32 desc_status;
1402 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; 1440 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1441 dma_addr_t dma_addr;
1403 1442
1404 boguscnt = min(boguscnt, *quota); 1443 boguscnt = min(boguscnt, *quota);
1405 limit = boguscnt; 1444 limit = boguscnt;
@@ -1447,9 +1486,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1447 mdp->rx_skbuff[entry] = NULL; 1486 mdp->rx_skbuff[entry] = NULL;
1448 if (mdp->cd->rpadir) 1487 if (mdp->cd->rpadir)
1449 skb_reserve(skb, NET_IP_ALIGN); 1488 skb_reserve(skb, NET_IP_ALIGN);
1450 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, 1489 dma_unmap_single(&ndev->dev, rxdesc->addr,
1451 ALIGN(mdp->rx_buf_sz, 16), 1490 ALIGN(mdp->rx_buf_sz, 16),
1452 DMA_FROM_DEVICE); 1491 DMA_FROM_DEVICE);
1453 skb_put(skb, pkt_len); 1492 skb_put(skb, pkt_len);
1454 skb->protocol = eth_type_trans(skb, ndev); 1493 skb->protocol = eth_type_trans(skb, ndev);
1455 netif_receive_skb(skb); 1494 netif_receive_skb(skb);
@@ -1469,15 +1508,20 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1469 1508
1470 if (mdp->rx_skbuff[entry] == NULL) { 1509 if (mdp->rx_skbuff[entry] == NULL) {
1471 skb = netdev_alloc_skb(ndev, skbuff_size); 1510 skb = netdev_alloc_skb(ndev, skbuff_size);
1472 mdp->rx_skbuff[entry] = skb;
1473 if (skb == NULL) 1511 if (skb == NULL)
1474 break; /* Better luck next round. */ 1512 break; /* Better luck next round. */
1475 sh_eth_set_receive_align(skb); 1513 sh_eth_set_receive_align(skb);
1476 dma_map_single(&ndev->dev, skb->data, 1514 dma_addr = dma_map_single(&ndev->dev, skb->data,
1477 rxdesc->buffer_length, DMA_FROM_DEVICE); 1515 rxdesc->buffer_length,
1516 DMA_FROM_DEVICE);
1517 if (dma_mapping_error(&ndev->dev, dma_addr)) {
1518 kfree_skb(skb);
1519 break;
1520 }
1521 mdp->rx_skbuff[entry] = skb;
1478 1522
1479 skb_checksum_none_assert(skb); 1523 skb_checksum_none_assert(skb);
1480 rxdesc->addr = virt_to_phys(skb->data); 1524 rxdesc->addr = dma_addr;
1481 } 1525 }
1482 if (entry >= mdp->num_rx_ring - 1) 1526 if (entry >= mdp->num_rx_ring - 1)
1483 rxdesc->status |= 1527 rxdesc->status |=
@@ -1573,7 +1617,6 @@ ignore_link:
1573 if (intr_status & EESR_RFRMER) { 1617 if (intr_status & EESR_RFRMER) {
1574 /* Receive Frame Overflow int */ 1618 /* Receive Frame Overflow int */
1575 ndev->stats.rx_frame_errors++; 1619 ndev->stats.rx_frame_errors++;
1576 netif_err(mdp, rx_err, ndev, "Receive Abort\n");
1577 } 1620 }
1578 } 1621 }
1579 1622
@@ -1592,13 +1635,11 @@ ignore_link:
1592 if (intr_status & EESR_RDE) { 1635 if (intr_status & EESR_RDE) {
1593 /* Receive Descriptor Empty int */ 1636 /* Receive Descriptor Empty int */
1594 ndev->stats.rx_over_errors++; 1637 ndev->stats.rx_over_errors++;
1595 netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
1596 } 1638 }
1597 1639
1598 if (intr_status & EESR_RFE) { 1640 if (intr_status & EESR_RFE) {
1599 /* Receive FIFO Overflow int */ 1641 /* Receive FIFO Overflow int */
1600 ndev->stats.rx_fifo_errors++; 1642 ndev->stats.rx_fifo_errors++;
1601 netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
1602 } 1643 }
1603 1644
1604 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { 1645 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
@@ -1653,7 +1694,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1653 if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) 1694 if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
1654 ret = IRQ_HANDLED; 1695 ret = IRQ_HANDLED;
1655 else 1696 else
1656 goto other_irq; 1697 goto out;
1698
1699 if (!likely(mdp->irq_enabled)) {
1700 sh_eth_write(ndev, 0, EESIPR);
1701 goto out;
1702 }
1657 1703
1658 if (intr_status & EESR_RX_CHECK) { 1704 if (intr_status & EESR_RX_CHECK) {
1659 if (napi_schedule_prep(&mdp->napi)) { 1705 if (napi_schedule_prep(&mdp->napi)) {
@@ -1684,7 +1730,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1684 sh_eth_error(ndev, intr_status); 1730 sh_eth_error(ndev, intr_status);
1685 } 1731 }
1686 1732
1687other_irq: 1733out:
1688 spin_unlock(&mdp->lock); 1734 spin_unlock(&mdp->lock);
1689 1735
1690 return ret; 1736 return ret;
@@ -1712,7 +1758,8 @@ static int sh_eth_poll(struct napi_struct *napi, int budget)
1712 napi_complete(napi); 1758 napi_complete(napi);
1713 1759
1714 /* Reenable Rx interrupts */ 1760 /* Reenable Rx interrupts */
1715 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 1761 if (mdp->irq_enabled)
1762 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1716out: 1763out:
1717 return budget - quota; 1764 return budget - quota;
1718} 1765}
@@ -1968,40 +2015,50 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
1968 return -EINVAL; 2015 return -EINVAL;
1969 2016
1970 if (netif_running(ndev)) { 2017 if (netif_running(ndev)) {
2018 netif_device_detach(ndev);
1971 netif_tx_disable(ndev); 2019 netif_tx_disable(ndev);
1972 /* Disable interrupts by clearing the interrupt mask. */ 2020
1973 sh_eth_write(ndev, 0x0000, EESIPR); 2021 /* Serialise with the interrupt handler and NAPI, then
1974 /* Stop the chip's Tx and Rx processes. */ 2022 * disable interrupts. We have to clear the
1975 sh_eth_write(ndev, 0, EDTRR); 2023 * irq_enabled flag first to ensure that interrupts
1976 sh_eth_write(ndev, 0, EDRRR); 2024 * won't be re-enabled.
2025 */
2026 mdp->irq_enabled = false;
1977 synchronize_irq(ndev->irq); 2027 synchronize_irq(ndev->irq);
1978 } 2028 napi_synchronize(&mdp->napi);
2029 sh_eth_write(ndev, 0x0000, EESIPR);
1979 2030
1980 /* Free all the skbuffs in the Rx queue. */ 2031 sh_eth_dev_exit(ndev);
1981 sh_eth_ring_free(ndev); 2032
1982 /* Free DMA buffer */ 2033 /* Free all the skbuffs in the Rx queue. */
1983 sh_eth_free_dma_buffer(mdp); 2034 sh_eth_ring_free(ndev);
2035 /* Free DMA buffer */
2036 sh_eth_free_dma_buffer(mdp);
2037 }
1984 2038
1985 /* Set new parameters */ 2039 /* Set new parameters */
1986 mdp->num_rx_ring = ring->rx_pending; 2040 mdp->num_rx_ring = ring->rx_pending;
1987 mdp->num_tx_ring = ring->tx_pending; 2041 mdp->num_tx_ring = ring->tx_pending;
1988 2042
1989 ret = sh_eth_ring_init(ndev);
1990 if (ret < 0) {
1991 netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__);
1992 return ret;
1993 }
1994 ret = sh_eth_dev_init(ndev, false);
1995 if (ret < 0) {
1996 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__);
1997 return ret;
1998 }
1999
2000 if (netif_running(ndev)) { 2043 if (netif_running(ndev)) {
2044 ret = sh_eth_ring_init(ndev);
2045 if (ret < 0) {
2046 netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
2047 __func__);
2048 return ret;
2049 }
2050 ret = sh_eth_dev_init(ndev, false);
2051 if (ret < 0) {
2052 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
2053 __func__);
2054 return ret;
2055 }
2056
2057 mdp->irq_enabled = true;
2001 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 2058 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
2002 /* Setting the Rx mode will start the Rx process. */ 2059 /* Setting the Rx mode will start the Rx process. */
2003 sh_eth_write(ndev, EDRRR_R, EDRRR); 2060 sh_eth_write(ndev, EDRRR_R, EDRRR);
2004 netif_wake_queue(ndev); 2061 netif_device_attach(ndev);
2005 } 2062 }
2006 2063
2007 return 0; 2064 return 0;
@@ -2117,6 +2174,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2117 } 2174 }
2118 spin_unlock_irqrestore(&mdp->lock, flags); 2175 spin_unlock_irqrestore(&mdp->lock, flags);
2119 2176
2177 if (skb_padto(skb, ETH_ZLEN))
2178 return NETDEV_TX_OK;
2179
2120 entry = mdp->cur_tx % mdp->num_tx_ring; 2180 entry = mdp->cur_tx % mdp->num_tx_ring;
2121 mdp->tx_skbuff[entry] = skb; 2181 mdp->tx_skbuff[entry] = skb;
2122 txdesc = &mdp->tx_ring[entry]; 2182 txdesc = &mdp->tx_ring[entry];
@@ -2126,10 +2186,11 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2126 skb->len + 2); 2186 skb->len + 2);
2127 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, 2187 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2128 DMA_TO_DEVICE); 2188 DMA_TO_DEVICE);
2129 if (skb->len < ETH_ZLEN) 2189 if (dma_mapping_error(&ndev->dev, txdesc->addr)) {
2130 txdesc->buffer_length = ETH_ZLEN; 2190 kfree_skb(skb);
2131 else 2191 return NETDEV_TX_OK;
2132 txdesc->buffer_length = skb->len; 2192 }
2193 txdesc->buffer_length = skb->len;
2133 2194
2134 if (entry >= mdp->num_tx_ring - 1) 2195 if (entry >= mdp->num_tx_ring - 1)
2135 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); 2196 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
@@ -2181,14 +2242,17 @@ static int sh_eth_close(struct net_device *ndev)
2181 2242
2182 netif_stop_queue(ndev); 2243 netif_stop_queue(ndev);
2183 2244
2184 /* Disable interrupts by clearing the interrupt mask. */ 2245 /* Serialise with the interrupt handler and NAPI, then disable
2246 * interrupts. We have to clear the irq_enabled flag first to
2247 * ensure that interrupts won't be re-enabled.
2248 */
2249 mdp->irq_enabled = false;
2250 synchronize_irq(ndev->irq);
2251 napi_disable(&mdp->napi);
2185 sh_eth_write(ndev, 0x0000, EESIPR); 2252 sh_eth_write(ndev, 0x0000, EESIPR);
2186 2253
2187 /* Stop the chip's Tx and Rx processes. */ 2254 sh_eth_dev_exit(ndev);
2188 sh_eth_write(ndev, 0, EDTRR);
2189 sh_eth_write(ndev, 0, EDRRR);
2190 2255
2191 sh_eth_get_stats(ndev);
2192 /* PHY Disconnect */ 2256 /* PHY Disconnect */
2193 if (mdp->phydev) { 2257 if (mdp->phydev) {
2194 phy_stop(mdp->phydev); 2258 phy_stop(mdp->phydev);
@@ -2198,8 +2262,6 @@ static int sh_eth_close(struct net_device *ndev)
2198 2262
2199 free_irq(ndev->irq, ndev); 2263 free_irq(ndev->irq, ndev);
2200 2264
2201 napi_disable(&mdp->napi);
2202
2203 /* Free all the skbuffs in the Rx queue. */ 2265 /* Free all the skbuffs in the Rx queue. */
2204 sh_eth_ring_free(ndev); 2266 sh_eth_ring_free(ndev);
2205 2267
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 71f5de1171bd..332d3c16d483 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -513,6 +513,7 @@ struct sh_eth_private {
513 u32 rx_buf_sz; /* Based on MTU+slack. */ 513 u32 rx_buf_sz; /* Based on MTU+slack. */
514 int edmac_endian; 514 int edmac_endian;
515 struct napi_struct napi; 515 struct napi_struct napi;
516 bool irq_enabled;
516 /* MII transceiver section. */ 517 /* MII transceiver section. */
517 u32 phy_id; /* PHY ID */ 518 u32 phy_id; /* PHY ID */
518 struct mii_bus *mii_bus; /* MDIO bus control */ 519 struct mii_bus *mii_bus; /* MDIO bus control */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8c6b7c1651e5..cf62ff4c8c56 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2778,6 +2778,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
2778 * @addr: iobase memory address 2778 * @addr: iobase memory address
2779 * Description: this is the main probe function used to 2779 * Description: this is the main probe function used to
2780 * call the alloc_etherdev, allocate the priv structure. 2780 * call the alloc_etherdev, allocate the priv structure.
2781 * Return:
2782 * on success the new private structure is returned, otherwise the error
2783 * pointer.
2781 */ 2784 */
2782struct stmmac_priv *stmmac_dvr_probe(struct device *device, 2785struct stmmac_priv *stmmac_dvr_probe(struct device *device,
2783 struct plat_stmmacenet_data *plat_dat, 2786 struct plat_stmmacenet_data *plat_dat,
@@ -2789,7 +2792,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
2789 2792
2790 ndev = alloc_etherdev(sizeof(struct stmmac_priv)); 2793 ndev = alloc_etherdev(sizeof(struct stmmac_priv));
2791 if (!ndev) 2794 if (!ndev)
2792 return NULL; 2795 return ERR_PTR(-ENOMEM);
2793 2796
2794 SET_NETDEV_DEV(ndev, device); 2797 SET_NETDEV_DEV(ndev, device);
2795 2798
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index e068d48b0f21..a39131f494ec 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1683,6 +1683,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
1683 if (vid == priv->data.default_vlan) 1683 if (vid == priv->data.default_vlan)
1684 return 0; 1684 return 0;
1685 1685
1686 if (priv->data.dual_emac) {
1687 /* In dual EMAC, reserved VLAN id should not be used for
1688 * creating VLAN interfaces as this can break the dual
1689 * EMAC port separation
1690 */
1691 int i;
1692
1693 for (i = 0; i < priv->data.slaves; i++) {
1694 if (vid == priv->slaves[i].port_vlan)
1695 return -EINVAL;
1696 }
1697 }
1698
1686 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); 1699 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
1687 return cpsw_add_vlan_ale_entry(priv, vid); 1700 return cpsw_add_vlan_ale_entry(priv, vid);
1688} 1701}
@@ -1696,6 +1709,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
1696 if (vid == priv->data.default_vlan) 1709 if (vid == priv->data.default_vlan)
1697 return 0; 1710 return 0;
1698 1711
1712 if (priv->data.dual_emac) {
1713 int i;
1714
1715 for (i = 0; i < priv->data.slaves; i++) {
1716 if (vid == priv->slaves[i].port_vlan)
1717 return -EINVAL;
1718 }
1719 }
1720
1699 dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); 1721 dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
1700 ret = cpsw_ale_del_vlan(priv->ale, vid, 0); 1722 ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
1701 if (ret != 0) 1723 if (ret != 0)
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index a14d87783245..2e195289ddf4 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -377,9 +377,11 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
377 }; 377 };
378 378
379 dst = ip6_route_output(dev_net(dev), NULL, &fl6); 379 dst = ip6_route_output(dev_net(dev), NULL, &fl6);
380 if (IS_ERR(dst)) 380 if (dst->error) {
381 ret = dst->error;
382 dst_release(dst);
381 goto err; 383 goto err;
382 384 }
383 skb_dst_drop(skb); 385 skb_dst_drop(skb);
384 skb_dst_set(skb, dst); 386 skb_dst_set(skb, dst);
385 err = ip6_local_out(skb); 387 err = ip6_local_out(skb);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 9a72640237cb..62b0bf4fdf6b 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -285,6 +285,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
285 285
286 __ath_cancel_work(sc); 286 __ath_cancel_work(sc);
287 287
288 disable_irq(sc->irq);
288 tasklet_disable(&sc->intr_tq); 289 tasklet_disable(&sc->intr_tq);
289 tasklet_disable(&sc->bcon_tasklet); 290 tasklet_disable(&sc->bcon_tasklet);
290 spin_lock_bh(&sc->sc_pcu_lock); 291 spin_lock_bh(&sc->sc_pcu_lock);
@@ -331,6 +332,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
331 r = -EIO; 332 r = -EIO;
332 333
333out: 334out:
335 enable_irq(sc->irq);
334 spin_unlock_bh(&sc->sc_pcu_lock); 336 spin_unlock_bh(&sc->sc_pcu_lock);
335 tasklet_enable(&sc->bcon_tasklet); 337 tasklet_enable(&sc->bcon_tasklet);
336 tasklet_enable(&sc->intr_tq); 338 tasklet_enable(&sc->intr_tq);
@@ -512,9 +514,6 @@ irqreturn_t ath_isr(int irq, void *dev)
512 if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags)) 514 if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags))
513 return IRQ_NONE; 515 return IRQ_NONE;
514 516
515 if (!AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags))
516 return IRQ_NONE;
517
518 /* shared irq, not for us */ 517 /* shared irq, not for us */
519 if (!ath9k_hw_intrpend(ah)) 518 if (!ath9k_hw_intrpend(ah))
520 return IRQ_NONE; 519 return IRQ_NONE;
@@ -529,7 +528,7 @@ irqreturn_t ath_isr(int irq, void *dev)
529 ath9k_debug_sync_cause(sc, sync_cause); 528 ath9k_debug_sync_cause(sc, sync_cause);
530 status &= ah->imask; /* discard unasked-for bits */ 529 status &= ah->imask; /* discard unasked-for bits */
531 530
532 if (AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags)) 531 if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
533 return IRQ_HANDLED; 532 return IRQ_HANDLED;
534 533
535 /* 534 /*
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 1bbe4fc47b97..660ddb1b7d8a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -246,6 +246,7 @@ enum iwl_ucode_tlv_flag {
246 * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, 246 * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
247 * regardless of the band or the number of the probes. FW will calculate 247 * regardless of the band or the number of the probes. FW will calculate
248 * the actual dwell time. 248 * the actual dwell time.
249 * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
249 */ 250 */
250enum iwl_ucode_tlv_api { 251enum iwl_ucode_tlv_api {
251 IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), 252 IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
@@ -257,6 +258,7 @@ enum iwl_ucode_tlv_api {
257 IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), 258 IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7),
258 IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), 259 IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
259 IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), 260 IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13),
261 IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16),
260}; 262};
261 263
262/** 264/**
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 201846de94e7..cfc0e65b34a5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -653,8 +653,11 @@ enum iwl_scan_channel_flags {
653}; 653};
654 654
655/* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S 655/* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
656 * @flags: enum iwl_scan_channel_flgs 656 * @flags: enum iwl_scan_channel_flags
657 * @non_ebs_ratio: how many regular scan iteration before EBS 657 * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is
658 * involved.
659 * 1 - EBS is disabled.
660 * 2 - every second scan will be full scan(and so on).
658 */ 661 */
659struct iwl_scan_channel_opt { 662struct iwl_scan_channel_opt {
660 __le16 flags; 663 __le16 flags;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index e880f9d4717b..20915587c820 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -3343,18 +3343,16 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
3343 msk |= mvmsta->tfd_queue_msk; 3343 msk |= mvmsta->tfd_queue_msk;
3344 } 3344 }
3345 3345
3346 if (drop) { 3346 msk &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]);
3347 if (iwl_mvm_flush_tx_path(mvm, msk, true))
3348 IWL_ERR(mvm, "flush request fail\n");
3349 mutex_unlock(&mvm->mutex);
3350 } else {
3351 mutex_unlock(&mvm->mutex);
3352 3347
3353 /* this can take a while, and we may need/want other operations 3348 if (iwl_mvm_flush_tx_path(mvm, msk, true))
3354 * to succeed while doing this, so do it without the mutex held 3349 IWL_ERR(mvm, "flush request fail\n");
3355 */ 3350 mutex_unlock(&mvm->mutex);
3356 iwl_trans_wait_tx_queue_empty(mvm->trans, msk); 3351
3357 } 3352 /* this can take a while, and we may need/want other operations
3353 * to succeed while doing this, so do it without the mutex held
3354 */
3355 iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
3358} 3356}
3359 3357
3360const struct ieee80211_ops iwl_mvm_hw_ops = { 3358const struct ieee80211_ops iwl_mvm_hw_ops = {
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index ec9a8e7bae1d..844bf7c4c8de 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -72,6 +72,8 @@
72 72
73#define IWL_PLCP_QUIET_THRESH 1 73#define IWL_PLCP_QUIET_THRESH 1
74#define IWL_ACTIVE_QUIET_TIME 10 74#define IWL_ACTIVE_QUIET_TIME 10
75#define IWL_DENSE_EBS_SCAN_RATIO 5
76#define IWL_SPARSE_EBS_SCAN_RATIO 1
75 77
76struct iwl_mvm_scan_params { 78struct iwl_mvm_scan_params {
77 u32 max_out_time; 79 u32 max_out_time;
@@ -1105,6 +1107,12 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
1105 return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN, 1107 return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
1106 notify); 1108 notify);
1107 1109
1110 if (mvm->scan_status == IWL_MVM_SCAN_NONE)
1111 return 0;
1112
1113 if (iwl_mvm_is_radio_killed(mvm))
1114 goto out;
1115
1108 if (mvm->scan_status != IWL_MVM_SCAN_SCHED && 1116 if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
1109 (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || 1117 (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
1110 mvm->scan_status != IWL_MVM_SCAN_OS)) { 1118 mvm->scan_status != IWL_MVM_SCAN_OS)) {
@@ -1141,6 +1149,7 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
1141 if (mvm->scan_status == IWL_MVM_SCAN_OS) 1149 if (mvm->scan_status == IWL_MVM_SCAN_OS)
1142 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); 1150 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1143 1151
1152out:
1144 mvm->scan_status = IWL_MVM_SCAN_NONE; 1153 mvm->scan_status = IWL_MVM_SCAN_NONE;
1145 1154
1146 if (notify) { 1155 if (notify) {
@@ -1297,18 +1306,6 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
1297 cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); 1306 cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
1298 cmd->iter_num = cpu_to_le32(1); 1307 cmd->iter_num = cpu_to_le32(1);
1299 1308
1300 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
1301 mvm->last_ebs_successful) {
1302 cmd->channel_opt[0].flags =
1303 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
1304 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1305 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
1306 cmd->channel_opt[1].flags =
1307 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
1308 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1309 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
1310 }
1311
1312 if (iwl_mvm_rrm_scan_needed(mvm)) 1309 if (iwl_mvm_rrm_scan_needed(mvm))
1313 cmd->scan_flags |= 1310 cmd->scan_flags |=
1314 cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED); 1311 cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED);
@@ -1383,6 +1380,22 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
1383 cmd->schedule[1].iterations = 0; 1380 cmd->schedule[1].iterations = 0;
1384 cmd->schedule[1].full_scan_mul = 0; 1381 cmd->schedule[1].full_scan_mul = 0;
1385 1382
1383 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
1384 mvm->last_ebs_successful) {
1385 cmd->channel_opt[0].flags =
1386 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
1387 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1388 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
1389 cmd->channel_opt[0].non_ebs_ratio =
1390 cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
1391 cmd->channel_opt[1].flags =
1392 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
1393 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1394 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
1395 cmd->channel_opt[1].non_ebs_ratio =
1396 cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
1397 }
1398
1386 for (i = 1; i <= req->req.n_ssids; i++) 1399 for (i = 1; i <= req->req.n_ssids; i++)
1387 ssid_bitmap |= BIT(i); 1400 ssid_bitmap |= BIT(i);
1388 1401
@@ -1483,6 +1496,22 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
1483 cmd->schedule[1].iterations = 0xff; 1496 cmd->schedule[1].iterations = 0xff;
1484 cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER; 1497 cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
1485 1498
1499 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
1500 mvm->last_ebs_successful) {
1501 cmd->channel_opt[0].flags =
1502 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
1503 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1504 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
1505 cmd->channel_opt[0].non_ebs_ratio =
1506 cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
1507 cmd->channel_opt[1].flags =
1508 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
1509 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1510 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
1511 cmd->channel_opt[1].non_ebs_ratio =
1512 cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
1513 }
1514
1486 iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels, 1515 iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
1487 ssid_bitmap, cmd); 1516 ssid_bitmap, cmd);
1488 1517
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 4333306ccdee..c59d07567d90 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -90,8 +90,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
90 90
91 if (ieee80211_is_probe_resp(fc)) 91 if (ieee80211_is_probe_resp(fc))
92 tx_flags |= TX_CMD_FLG_TSF; 92 tx_flags |= TX_CMD_FLG_TSF;
93 else if (ieee80211_is_back_req(fc))
94 tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
95 93
96 if (ieee80211_has_morefrags(fc)) 94 if (ieee80211_has_morefrags(fc))
97 tx_flags |= TX_CMD_FLG_MORE_FRAG; 95 tx_flags |= TX_CMD_FLG_MORE_FRAG;
@@ -100,6 +98,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
100 u8 *qc = ieee80211_get_qos_ctl(hdr); 98 u8 *qc = ieee80211_get_qos_ctl(hdr);
101 tx_cmd->tid_tspec = qc[0] & 0xf; 99 tx_cmd->tid_tspec = qc[0] & 0xf;
102 tx_flags &= ~TX_CMD_FLG_SEQ_CTL; 100 tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
101 } else if (ieee80211_is_back_req(fc)) {
102 struct ieee80211_bar *bar = (void *)skb->data;
103 u16 control = le16_to_cpu(bar->control);
104
105 tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
106 tx_cmd->tid_tspec = (control &
107 IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
108 IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
109 WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
103 } else { 110 } else {
104 tx_cmd->tid_tspec = IWL_TID_NON_QOS; 111 tx_cmd->tid_tspec = IWL_TID_NON_QOS;
105 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) 112 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index ea63fbd228ed..352b4f28f82c 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -114,17 +114,6 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov,
114 ret = of_overlay_apply_one(ov, tchild, child); 114 ret = of_overlay_apply_one(ov, tchild, child);
115 if (ret) 115 if (ret)
116 return ret; 116 return ret;
117
118 /* The properties are already copied, now do the child nodes */
119 for_each_child_of_node(child, grandchild) {
120 ret = of_overlay_apply_single_device_node(ov, tchild, grandchild);
121 if (ret) {
122 pr_err("%s: Failed to apply single node @%s/%s\n",
123 __func__, tchild->full_name,
124 grandchild->name);
125 return ret;
126 }
127 }
128 } 117 }
129 118
130 return ret; 119 return ret;
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 5b33c6a21807..b0d50d70a8a1 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -188,7 +188,7 @@ static void of_dma_configure(struct device *dev)
188 size = dev->coherent_dma_mask; 188 size = dev->coherent_dma_mask;
189 } else { 189 } else {
190 offset = PFN_DOWN(paddr - dma_addr); 190 offset = PFN_DOWN(paddr - dma_addr);
191 dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset); 191 dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset);
192 } 192 }
193 dev->dma_pfn_offset = offset; 193 dev->dma_pfn_offset = offset;
194 194
@@ -566,6 +566,10 @@ static int of_platform_notify(struct notifier_block *nb,
566 if (!of_node_check_flag(rd->dn->parent, OF_POPULATED_BUS)) 566 if (!of_node_check_flag(rd->dn->parent, OF_POPULATED_BUS))
567 return NOTIFY_OK; /* not for us */ 567 return NOTIFY_OK; /* not for us */
568 568
569 /* already populated? (driver using of_populate manually) */
570 if (of_node_check_flag(rd->dn, OF_POPULATED))
571 return NOTIFY_OK;
572
569 /* pdev_parent may be NULL when no bus platform device */ 573 /* pdev_parent may be NULL when no bus platform device */
570 pdev_parent = of_find_device_by_node(rd->dn->parent); 574 pdev_parent = of_find_device_by_node(rd->dn->parent);
571 pdev = of_platform_device_create(rd->dn, NULL, 575 pdev = of_platform_device_create(rd->dn, NULL,
@@ -581,6 +585,11 @@ static int of_platform_notify(struct notifier_block *nb,
581 break; 585 break;
582 586
583 case OF_RECONFIG_CHANGE_REMOVE: 587 case OF_RECONFIG_CHANGE_REMOVE:
588
589 /* already depopulated? */
590 if (!of_node_check_flag(rd->dn, OF_POPULATED))
591 return NOTIFY_OK;
592
584 /* find our device by node */ 593 /* find our device by node */
585 pdev = of_find_device_by_node(rd->dn); 594 pdev = of_find_device_by_node(rd->dn);
586 if (pdev == NULL) 595 if (pdev == NULL)
diff --git a/drivers/of/unittest-data/tests-overlay.dtsi b/drivers/of/unittest-data/tests-overlay.dtsi
index 75976da22b2e..a2b687d5f324 100644
--- a/drivers/of/unittest-data/tests-overlay.dtsi
+++ b/drivers/of/unittest-data/tests-overlay.dtsi
@@ -176,5 +176,60 @@
176 }; 176 };
177 }; 177 };
178 178
179 overlay10 {
180 fragment@0 {
181 target-path = "/testcase-data/overlay-node/test-bus";
182 __overlay__ {
183
184 /* suppress DTC warning */
185 #address-cells = <1>;
186 #size-cells = <0>;
187
188 test-selftest10 {
189 compatible = "selftest";
190 status = "okay";
191 reg = <10>;
192
193 #address-cells = <1>;
194 #size-cells = <0>;
195
196 test-selftest101 {
197 compatible = "selftest";
198 status = "okay";
199 reg = <1>;
200 };
201
202 };
203 };
204 };
205 };
206
207 overlay11 {
208 fragment@0 {
209 target-path = "/testcase-data/overlay-node/test-bus";
210 __overlay__ {
211
212 /* suppress DTC warning */
213 #address-cells = <1>;
214 #size-cells = <0>;
215
216 test-selftest11 {
217 compatible = "selftest";
218 status = "okay";
219 reg = <11>;
220
221 #address-cells = <1>;
222 #size-cells = <0>;
223
224 test-selftest111 {
225 compatible = "selftest";
226 status = "okay";
227 reg = <1>;
228 };
229
230 };
231 };
232 };
233 };
179 }; 234 };
180}; 235};
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 844838e11ef1..41a4a138f53b 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -978,6 +978,9 @@ static int selftest_probe(struct platform_device *pdev)
978 } 978 }
979 979
980 dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name); 980 dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name);
981
982 of_platform_populate(np, NULL, NULL, &pdev->dev);
983
981 return 0; 984 return 0;
982} 985}
983 986
@@ -1385,6 +1388,39 @@ static void of_selftest_overlay_8(void)
1385 selftest(1, "overlay test %d passed\n", 8); 1388 selftest(1, "overlay test %d passed\n", 8);
1386} 1389}
1387 1390
1391/* test insertion of a bus with parent devices */
1392static void of_selftest_overlay_10(void)
1393{
1394 int ret;
1395 char *child_path;
1396
1397 /* device should disable */
1398 ret = of_selftest_apply_overlay_check(10, 10, 0, 1);
1399 if (selftest(ret == 0, "overlay test %d failed; overlay application\n", 10))
1400 return;
1401
1402 child_path = kasprintf(GFP_KERNEL, "%s/test-selftest101",
1403 selftest_path(10));
1404 if (selftest(child_path, "overlay test %d failed; kasprintf\n", 10))
1405 return;
1406
1407 ret = of_path_platform_device_exists(child_path);
1408 kfree(child_path);
1409 if (selftest(ret, "overlay test %d failed; no child device\n", 10))
1410 return;
1411}
1412
1413/* test insertion of a bus with parent devices (and revert) */
1414static void of_selftest_overlay_11(void)
1415{
1416 int ret;
1417
1418 /* device should disable */
1419 ret = of_selftest_apply_revert_overlay_check(11, 11, 0, 1);
1420 if (selftest(ret == 0, "overlay test %d failed; overlay application\n", 11))
1421 return;
1422}
1423
1388static void __init of_selftest_overlay(void) 1424static void __init of_selftest_overlay(void)
1389{ 1425{
1390 struct device_node *bus_np = NULL; 1426 struct device_node *bus_np = NULL;
@@ -1433,6 +1469,9 @@ static void __init of_selftest_overlay(void)
1433 of_selftest_overlay_6(); 1469 of_selftest_overlay_6();
1434 of_selftest_overlay_8(); 1470 of_selftest_overlay_8();
1435 1471
1472 of_selftest_overlay_10();
1473 of_selftest_overlay_11();
1474
1436out: 1475out:
1437 of_node_put(bus_np); 1476 of_node_put(bus_np);
1438} 1477}
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 37e71ff6408d..dceb9ddfd99a 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -694,9 +694,8 @@ lba_fixup_bus(struct pci_bus *bus)
694 int i; 694 int i;
695 /* PCI-PCI Bridge */ 695 /* PCI-PCI Bridge */
696 pci_read_bridge_bases(bus); 696 pci_read_bridge_bases(bus);
697 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { 697 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++)
698 pci_claim_resource(bus->self, i); 698 pci_claim_bridge_resource(bus->self, i);
699 }
700 } else { 699 } else {
701 /* Host-PCI Bridge */ 700 /* Host-PCI Bridge */
702 int err; 701 int err;
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 73aef51a28f0..8fb16188cd82 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -228,6 +228,49 @@ int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
228} 228}
229EXPORT_SYMBOL(pci_bus_alloc_resource); 229EXPORT_SYMBOL(pci_bus_alloc_resource);
230 230
231/*
232 * The @idx resource of @dev should be a PCI-PCI bridge window. If this
233 * resource fits inside a window of an upstream bridge, do nothing. If it
234 * overlaps an upstream window but extends outside it, clip the resource so
235 * it fits completely inside.
236 */
237bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
238{
239 struct pci_bus *bus = dev->bus;
240 struct resource *res = &dev->resource[idx];
241 struct resource orig_res = *res;
242 struct resource *r;
243 int i;
244
245 pci_bus_for_each_resource(bus, r, i) {
246 resource_size_t start, end;
247
248 if (!r)
249 continue;
250
251 if (resource_type(res) != resource_type(r))
252 continue;
253
254 start = max(r->start, res->start);
255 end = min(r->end, res->end);
256
257 if (start > end)
258 continue; /* no overlap */
259
260 if (res->start == start && res->end == end)
261 return false; /* no change */
262
263 res->start = start;
264 res->end = end;
265 dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n",
266 &orig_res, res);
267
268 return true;
269 }
270
271 return false;
272}
273
231void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { } 274void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
232 275
233/** 276/**
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index cab05f31223f..e9d4fd861ba1 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3271,7 +3271,8 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3271{ 3271{
3272 struct pci_dev *pdev; 3272 struct pci_dev *pdev;
3273 3273
3274 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self) 3274 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
3275 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
3275 return -ENOTTY; 3276 return -ENOTTY;
3276 3277
3277 list_for_each_entry(pdev, &dev->bus->devices, bus_list) 3278 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
@@ -3305,7 +3306,8 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
3305{ 3306{
3306 struct pci_dev *pdev; 3307 struct pci_dev *pdev;
3307 3308
3308 if (dev->subordinate || !dev->slot) 3309 if (dev->subordinate || !dev->slot ||
3310 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
3309 return -ENOTTY; 3311 return -ENOTTY;
3310 3312
3311 list_for_each_entry(pdev, &dev->bus->devices, bus_list) 3313 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
@@ -3557,6 +3559,20 @@ int pci_try_reset_function(struct pci_dev *dev)
3557} 3559}
3558EXPORT_SYMBOL_GPL(pci_try_reset_function); 3560EXPORT_SYMBOL_GPL(pci_try_reset_function);
3559 3561
3562/* Do any devices on or below this bus prevent a bus reset? */
3563static bool pci_bus_resetable(struct pci_bus *bus)
3564{
3565 struct pci_dev *dev;
3566
3567 list_for_each_entry(dev, &bus->devices, bus_list) {
3568 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
3569 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
3570 return false;
3571 }
3572
3573 return true;
3574}
3575
3560/* Lock devices from the top of the tree down */ 3576/* Lock devices from the top of the tree down */
3561static void pci_bus_lock(struct pci_bus *bus) 3577static void pci_bus_lock(struct pci_bus *bus)
3562{ 3578{
@@ -3607,6 +3623,22 @@ unlock:
3607 return 0; 3623 return 0;
3608} 3624}
3609 3625
3626/* Do any devices on or below this slot prevent a bus reset? */
3627static bool pci_slot_resetable(struct pci_slot *slot)
3628{
3629 struct pci_dev *dev;
3630
3631 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3632 if (!dev->slot || dev->slot != slot)
3633 continue;
3634 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
3635 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
3636 return false;
3637 }
3638
3639 return true;
3640}
3641
3610/* Lock devices from the top of the tree down */ 3642/* Lock devices from the top of the tree down */
3611static void pci_slot_lock(struct pci_slot *slot) 3643static void pci_slot_lock(struct pci_slot *slot)
3612{ 3644{
@@ -3728,7 +3760,7 @@ static int pci_slot_reset(struct pci_slot *slot, int probe)
3728{ 3760{
3729 int rc; 3761 int rc;
3730 3762
3731 if (!slot) 3763 if (!slot || !pci_slot_resetable(slot))
3732 return -ENOTTY; 3764 return -ENOTTY;
3733 3765
3734 if (!probe) 3766 if (!probe)
@@ -3820,7 +3852,7 @@ EXPORT_SYMBOL_GPL(pci_try_reset_slot);
3820 3852
3821static int pci_bus_reset(struct pci_bus *bus, int probe) 3853static int pci_bus_reset(struct pci_bus *bus, int probe)
3822{ 3854{
3823 if (!bus->self) 3855 if (!bus->self || !pci_bus_resetable(bus))
3824 return -ENOTTY; 3856 return -ENOTTY;
3825 3857
3826 if (probe) 3858 if (probe)
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 8aff29a804ff..d54632a1db43 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -208,6 +208,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus,
208void __pci_bus_assign_resources(const struct pci_bus *bus, 208void __pci_bus_assign_resources(const struct pci_bus *bus,
209 struct list_head *realloc_head, 209 struct list_head *realloc_head,
210 struct list_head *fail_head); 210 struct list_head *fail_head);
211bool pci_bus_clip_resource(struct pci_dev *dev, int idx);
211 212
212/** 213/**
213 * pci_ari_enabled - query ARI forwarding status 214 * pci_ari_enabled - query ARI forwarding status
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index ed6f89b6efe5..e52356aa09b8 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3028,6 +3028,20 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
3028DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID, 3028DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
3029 quirk_broken_intx_masking); 3029 quirk_broken_intx_masking);
3030 3030
3031static void quirk_no_bus_reset(struct pci_dev *dev)
3032{
3033 dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
3034}
3035
3036/*
3037 * Atheros AR93xx chips do not behave after a bus reset. The device will
3038 * throw a Link Down error on AER-capable systems and regardless of AER,
3039 * config space of the device is never accessible again and typically
3040 * causes the system to hang or reset when access is attempted.
3041 * http://www.spinics.net/lists/linux-pci/msg34797.html
3042 */
3043DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
3044
3031#ifdef CONFIG_ACPI 3045#ifdef CONFIG_ACPI
3032/* 3046/*
3033 * Apple: Shutdown Cactus Ridge Thunderbolt controller. 3047 * Apple: Shutdown Cactus Ridge Thunderbolt controller.
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 0482235eee92..e3e17f3c0f0f 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -530,9 +530,8 @@ EXPORT_SYMBOL(pci_setup_cardbus);
530 config space writes, so it's quite possible that an I/O window of 530 config space writes, so it's quite possible that an I/O window of
531 the bridge will have some undesirable address (e.g. 0) after the 531 the bridge will have some undesirable address (e.g. 0) after the
532 first write. Ditto 64-bit prefetchable MMIO. */ 532 first write. Ditto 64-bit prefetchable MMIO. */
533static void pci_setup_bridge_io(struct pci_bus *bus) 533static void pci_setup_bridge_io(struct pci_dev *bridge)
534{ 534{
535 struct pci_dev *bridge = bus->self;
536 struct resource *res; 535 struct resource *res;
537 struct pci_bus_region region; 536 struct pci_bus_region region;
538 unsigned long io_mask; 537 unsigned long io_mask;
@@ -545,7 +544,7 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
545 io_mask = PCI_IO_1K_RANGE_MASK; 544 io_mask = PCI_IO_1K_RANGE_MASK;
546 545
547 /* Set up the top and bottom of the PCI I/O segment for this bus. */ 546 /* Set up the top and bottom of the PCI I/O segment for this bus. */
548 res = bus->resource[0]; 547 res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
549 pcibios_resource_to_bus(bridge->bus, &region, res); 548 pcibios_resource_to_bus(bridge->bus, &region, res);
550 if (res->flags & IORESOURCE_IO) { 549 if (res->flags & IORESOURCE_IO) {
551 pci_read_config_word(bridge, PCI_IO_BASE, &l); 550 pci_read_config_word(bridge, PCI_IO_BASE, &l);
@@ -568,15 +567,14 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
568 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); 567 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
569} 568}
570 569
571static void pci_setup_bridge_mmio(struct pci_bus *bus) 570static void pci_setup_bridge_mmio(struct pci_dev *bridge)
572{ 571{
573 struct pci_dev *bridge = bus->self;
574 struct resource *res; 572 struct resource *res;
575 struct pci_bus_region region; 573 struct pci_bus_region region;
576 u32 l; 574 u32 l;
577 575
578 /* Set up the top and bottom of the PCI Memory segment for this bus. */ 576 /* Set up the top and bottom of the PCI Memory segment for this bus. */
579 res = bus->resource[1]; 577 res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
580 pcibios_resource_to_bus(bridge->bus, &region, res); 578 pcibios_resource_to_bus(bridge->bus, &region, res);
581 if (res->flags & IORESOURCE_MEM) { 579 if (res->flags & IORESOURCE_MEM) {
582 l = (region.start >> 16) & 0xfff0; 580 l = (region.start >> 16) & 0xfff0;
@@ -588,9 +586,8 @@ static void pci_setup_bridge_mmio(struct pci_bus *bus)
588 pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); 586 pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
589} 587}
590 588
591static void pci_setup_bridge_mmio_pref(struct pci_bus *bus) 589static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
592{ 590{
593 struct pci_dev *bridge = bus->self;
594 struct resource *res; 591 struct resource *res;
595 struct pci_bus_region region; 592 struct pci_bus_region region;
596 u32 l, bu, lu; 593 u32 l, bu, lu;
@@ -602,7 +599,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
602 599
603 /* Set up PREF base/limit. */ 600 /* Set up PREF base/limit. */
604 bu = lu = 0; 601 bu = lu = 0;
605 res = bus->resource[2]; 602 res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
606 pcibios_resource_to_bus(bridge->bus, &region, res); 603 pcibios_resource_to_bus(bridge->bus, &region, res);
607 if (res->flags & IORESOURCE_PREFETCH) { 604 if (res->flags & IORESOURCE_PREFETCH) {
608 l = (region.start >> 16) & 0xfff0; 605 l = (region.start >> 16) & 0xfff0;
@@ -630,13 +627,13 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
630 &bus->busn_res); 627 &bus->busn_res);
631 628
632 if (type & IORESOURCE_IO) 629 if (type & IORESOURCE_IO)
633 pci_setup_bridge_io(bus); 630 pci_setup_bridge_io(bridge);
634 631
635 if (type & IORESOURCE_MEM) 632 if (type & IORESOURCE_MEM)
636 pci_setup_bridge_mmio(bus); 633 pci_setup_bridge_mmio(bridge);
637 634
638 if (type & IORESOURCE_PREFETCH) 635 if (type & IORESOURCE_PREFETCH)
639 pci_setup_bridge_mmio_pref(bus); 636 pci_setup_bridge_mmio_pref(bridge);
640 637
641 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); 638 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
642} 639}
@@ -649,6 +646,41 @@ void pci_setup_bridge(struct pci_bus *bus)
649 __pci_setup_bridge(bus, type); 646 __pci_setup_bridge(bus, type);
650} 647}
651 648
649
650int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
651{
652 if (i < PCI_BRIDGE_RESOURCES || i > PCI_BRIDGE_RESOURCE_END)
653 return 0;
654
655 if (pci_claim_resource(bridge, i) == 0)
656 return 0; /* claimed the window */
657
658 if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI)
659 return 0;
660
661 if (!pci_bus_clip_resource(bridge, i))
662 return -EINVAL; /* clipping didn't change anything */
663
664 switch (i - PCI_BRIDGE_RESOURCES) {
665 case 0:
666 pci_setup_bridge_io(bridge);
667 break;
668 case 1:
669 pci_setup_bridge_mmio(bridge);
670 break;
671 case 2:
672 pci_setup_bridge_mmio_pref(bridge);
673 break;
674 default:
675 return -EINVAL;
676 }
677
678 if (pci_claim_resource(bridge, i) == 0)
679 return 0; /* claimed a smaller window */
680
681 return -EINVAL;
682}
683
652/* Check whether the bridge supports optional I/O and 684/* Check whether the bridge supports optional I/O and
653 prefetchable memory ranges. If not, the respective 685 prefetchable memory ranges. If not, the respective
654 base/limit registers must be read-only and read as 0. */ 686 base/limit registers must be read-only and read as 0. */
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 9411eae39a4e..3d21efe11d7b 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -2,11 +2,9 @@
2 * Driver for Dell laptop extras 2 * Driver for Dell laptop extras
3 * 3 *
4 * Copyright (c) Red Hat <mjg@redhat.com> 4 * Copyright (c) Red Hat <mjg@redhat.com>
5 * Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
6 * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
7 * 5 *
8 * Based on documentation in the libsmbios package: 6 * Based on documentation in the libsmbios package, Copyright (C) 2005 Dell
9 * Copyright (C) 2005-2014 Dell Inc. 7 * Inc.
10 * 8 *
11 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
@@ -34,13 +32,6 @@
34#include "../../firmware/dcdbas.h" 32#include "../../firmware/dcdbas.h"
35 33
36#define BRIGHTNESS_TOKEN 0x7d 34#define BRIGHTNESS_TOKEN 0x7d
37#define KBD_LED_OFF_TOKEN 0x01E1
38#define KBD_LED_ON_TOKEN 0x01E2
39#define KBD_LED_AUTO_TOKEN 0x01E3
40#define KBD_LED_AUTO_25_TOKEN 0x02EA
41#define KBD_LED_AUTO_50_TOKEN 0x02EB
42#define KBD_LED_AUTO_75_TOKEN 0x02EC
43#define KBD_LED_AUTO_100_TOKEN 0x02F6
44 35
45/* This structure will be modified by the firmware when we enter 36/* This structure will be modified by the firmware when we enter
46 * system management mode, hence the volatiles */ 37 * system management mode, hence the volatiles */
@@ -71,13 +62,6 @@ struct calling_interface_structure {
71 62
72struct quirk_entry { 63struct quirk_entry {
73 u8 touchpad_led; 64 u8 touchpad_led;
74
75 int needs_kbd_timeouts;
76 /*
77 * Ordered list of timeouts expressed in seconds.
78 * The list must end with -1
79 */
80 int kbd_timeouts[];
81}; 65};
82 66
83static struct quirk_entry *quirks; 67static struct quirk_entry *quirks;
@@ -92,15 +76,6 @@ static int __init dmi_matched(const struct dmi_system_id *dmi)
92 return 1; 76 return 1;
93} 77}
94 78
95/*
96 * These values come from Windows utility provided by Dell. If any other value
97 * is used then BIOS silently set timeout to 0 without any error message.
98 */
99static struct quirk_entry quirk_dell_xps13_9333 = {
100 .needs_kbd_timeouts = 1,
101 .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 },
102};
103
104static int da_command_address; 79static int da_command_address;
105static int da_command_code; 80static int da_command_code;
106static int da_num_tokens; 81static int da_num_tokens;
@@ -292,15 +267,6 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
292 }, 267 },
293 .driver_data = &quirk_dell_vostro_v130, 268 .driver_data = &quirk_dell_vostro_v130,
294 }, 269 },
295 {
296 .callback = dmi_matched,
297 .ident = "Dell XPS13 9333",
298 .matches = {
299 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
300 DMI_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
301 },
302 .driver_data = &quirk_dell_xps13_9333,
303 },
304 { } 270 { }
305}; 271};
306 272
@@ -365,29 +331,17 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy)
365 } 331 }
366} 332}
367 333
368static int find_token_id(int tokenid) 334static int find_token_location(int tokenid)
369{ 335{
370 int i; 336 int i;
371
372 for (i = 0; i < da_num_tokens; i++) { 337 for (i = 0; i < da_num_tokens; i++) {
373 if (da_tokens[i].tokenID == tokenid) 338 if (da_tokens[i].tokenID == tokenid)
374 return i; 339 return da_tokens[i].location;
375 } 340 }
376 341
377 return -1; 342 return -1;
378} 343}
379 344
380static int find_token_location(int tokenid)
381{
382 int id;
383
384 id = find_token_id(tokenid);
385 if (id == -1)
386 return -1;
387
388 return da_tokens[id].location;
389}
390
391static struct calling_interface_buffer * 345static struct calling_interface_buffer *
392dell_send_request(struct calling_interface_buffer *buffer, int class, 346dell_send_request(struct calling_interface_buffer *buffer, int class,
393 int select) 347 int select)
@@ -408,20 +362,6 @@ dell_send_request(struct calling_interface_buffer *buffer, int class,
408 return buffer; 362 return buffer;
409} 363}
410 364
411static inline int dell_smi_error(int value)
412{
413 switch (value) {
414 case 0: /* Completed successfully */
415 return 0;
416 case -1: /* Completed with error */
417 return -EIO;
418 case -2: /* Function not supported */
419 return -ENXIO;
420 default: /* Unknown error */
421 return -EINVAL;
422 }
423}
424
425/* Derived from information in DellWirelessCtl.cpp: 365/* Derived from information in DellWirelessCtl.cpp:
426 Class 17, select 11 is radio control. It returns an array of 32-bit values. 366 Class 17, select 11 is radio control. It returns an array of 32-bit values.
427 367
@@ -776,7 +716,7 @@ static int dell_send_intensity(struct backlight_device *bd)
776 else 716 else
777 dell_send_request(buffer, 1, 1); 717 dell_send_request(buffer, 1, 1);
778 718
779 out: 719out:
780 release_buffer(); 720 release_buffer();
781 return ret; 721 return ret;
782} 722}
@@ -800,7 +740,7 @@ static int dell_get_intensity(struct backlight_device *bd)
800 740
801 ret = buffer->output[1]; 741 ret = buffer->output[1];
802 742
803 out: 743out:
804 release_buffer(); 744 release_buffer();
805 return ret; 745 return ret;
806} 746}
@@ -849,984 +789,6 @@ static void touchpad_led_exit(void)
849 led_classdev_unregister(&touchpad_led); 789 led_classdev_unregister(&touchpad_led);
850} 790}
851 791
852/*
853 * Derived from information in smbios-keyboard-ctl:
854 *
855 * cbClass 4
856 * cbSelect 11
857 * Keyboard illumination
858 * cbArg1 determines the function to be performed
859 *
860 * cbArg1 0x0 = Get Feature Information
861 * cbRES1 Standard return codes (0, -1, -2)
862 * cbRES2, word0 Bitmap of user-selectable modes
863 * bit 0 Always off (All systems)
864 * bit 1 Always on (Travis ATG, Siberia)
865 * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
866 * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
867 * bit 4 Auto: Input-activity-based On; input-activity based Off
868 * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
869 * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
870 * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
871 * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
872 * bits 9-15 Reserved for future use
873 * cbRES2, byte2 Reserved for future use
874 * cbRES2, byte3 Keyboard illumination type
875 * 0 Reserved
876 * 1 Tasklight
877 * 2 Backlight
878 * 3-255 Reserved for future use
879 * cbRES3, byte0 Supported auto keyboard illumination trigger bitmap.
880 * bit 0 Any keystroke
881 * bit 1 Touchpad activity
882 * bit 2 Pointing stick
883 * bit 3 Any mouse
884 * bits 4-7 Reserved for future use
885 * cbRES3, byte1 Supported timeout unit bitmap
886 * bit 0 Seconds
887 * bit 1 Minutes
888 * bit 2 Hours
889 * bit 3 Days
890 * bits 4-7 Reserved for future use
891 * cbRES3, byte2 Number of keyboard light brightness levels
892 * cbRES4, byte0 Maximum acceptable seconds value (0 if seconds not supported).
893 * cbRES4, byte1 Maximum acceptable minutes value (0 if minutes not supported).
894 * cbRES4, byte2 Maximum acceptable hours value (0 if hours not supported).
895 * cbRES4, byte3 Maximum acceptable days value (0 if days not supported)
896 *
897 * cbArg1 0x1 = Get Current State
898 * cbRES1 Standard return codes (0, -1, -2)
899 * cbRES2, word0 Bitmap of current mode state
900 * bit 0 Always off (All systems)
901 * bit 1 Always on (Travis ATG, Siberia)
902 * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
903 * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
904 * bit 4 Auto: Input-activity-based On; input-activity based Off
905 * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
906 * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
907 * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
908 * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
909 * bits 9-15 Reserved for future use
910 * Note: Only One bit can be set
911 * cbRES2, byte2 Currently active auto keyboard illumination triggers.
912 * bit 0 Any keystroke
913 * bit 1 Touchpad activity
914 * bit 2 Pointing stick
915 * bit 3 Any mouse
916 * bits 4-7 Reserved for future use
917 * cbRES2, byte3 Current Timeout
918 * bits 7:6 Timeout units indicator:
919 * 00b Seconds
920 * 01b Minutes
921 * 10b Hours
922 * 11b Days
923 * bits 5:0 Timeout value (0-63) in sec/min/hr/day
924 * NOTE: A value of 0 means always on (no timeout) if any bits of RES3 byte
925 * are set upon return from the [Get feature information] call.
926 * cbRES3, byte0 Current setting of ALS value that turns the light on or off.
927 * cbRES3, byte1 Current ALS reading
928 * cbRES3, byte2 Current keyboard light level.
929 *
930 * cbArg1 0x2 = Set New State
931 * cbRES1 Standard return codes (0, -1, -2)
932 * cbArg2, word0 Bitmap of current mode state
933 * bit 0 Always off (All systems)
934 * bit 1 Always on (Travis ATG, Siberia)
935 * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
936 * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
937 * bit 4 Auto: Input-activity-based On; input-activity based Off
938 * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
939 * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
940 * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
941 * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
942 * bits 9-15 Reserved for future use
943 * Note: Only One bit can be set
944 * cbArg2, byte2 Desired auto keyboard illumination triggers. Must remain inactive to allow
945 * keyboard to turn off automatically.
946 * bit 0 Any keystroke
947 * bit 1 Touchpad activity
948 * bit 2 Pointing stick
949 * bit 3 Any mouse
950 * bits 4-7 Reserved for future use
951 * cbArg2, byte3 Desired Timeout
952 * bits 7:6 Timeout units indicator:
953 * 00b Seconds
954 * 01b Minutes
955 * 10b Hours
956 * 11b Days
957 * bits 5:0 Timeout value (0-63) in sec/min/hr/day
958 * cbArg3, byte0 Desired setting of ALS value that turns the light on or off.
959 * cbArg3, byte2 Desired keyboard light level.
960 */
961
962
963enum kbd_timeout_unit {
964 KBD_TIMEOUT_SECONDS = 0,
965 KBD_TIMEOUT_MINUTES,
966 KBD_TIMEOUT_HOURS,
967 KBD_TIMEOUT_DAYS,
968};
969
970enum kbd_mode_bit {
971 KBD_MODE_BIT_OFF = 0,
972 KBD_MODE_BIT_ON,
973 KBD_MODE_BIT_ALS,
974 KBD_MODE_BIT_TRIGGER_ALS,
975 KBD_MODE_BIT_TRIGGER,
976 KBD_MODE_BIT_TRIGGER_25,
977 KBD_MODE_BIT_TRIGGER_50,
978 KBD_MODE_BIT_TRIGGER_75,
979 KBD_MODE_BIT_TRIGGER_100,
980};
981
982#define kbd_is_als_mode_bit(bit) \
983 ((bit) == KBD_MODE_BIT_ALS || (bit) == KBD_MODE_BIT_TRIGGER_ALS)
984#define kbd_is_trigger_mode_bit(bit) \
985 ((bit) >= KBD_MODE_BIT_TRIGGER_ALS && (bit) <= KBD_MODE_BIT_TRIGGER_100)
986#define kbd_is_level_mode_bit(bit) \
987 ((bit) >= KBD_MODE_BIT_TRIGGER_25 && (bit) <= KBD_MODE_BIT_TRIGGER_100)
988
989struct kbd_info {
990 u16 modes;
991 u8 type;
992 u8 triggers;
993 u8 levels;
994 u8 seconds;
995 u8 minutes;
996 u8 hours;
997 u8 days;
998};
999
1000struct kbd_state {
1001 u8 mode_bit;
1002 u8 triggers;
1003 u8 timeout_value;
1004 u8 timeout_unit;
1005 u8 als_setting;
1006 u8 als_value;
1007 u8 level;
1008};
1009
1010static const int kbd_tokens[] = {
1011 KBD_LED_OFF_TOKEN,
1012 KBD_LED_AUTO_25_TOKEN,
1013 KBD_LED_AUTO_50_TOKEN,
1014 KBD_LED_AUTO_75_TOKEN,
1015 KBD_LED_AUTO_100_TOKEN,
1016 KBD_LED_ON_TOKEN,
1017};
1018
1019static u16 kbd_token_bits;
1020
1021static struct kbd_info kbd_info;
1022static bool kbd_als_supported;
1023static bool kbd_triggers_supported;
1024
1025static u8 kbd_mode_levels[16];
1026static int kbd_mode_levels_count;
1027
1028static u8 kbd_previous_level;
1029static u8 kbd_previous_mode_bit;
1030
1031static bool kbd_led_present;
1032
1033/*
1034 * NOTE: there are three ways to set the keyboard backlight level.
1035 * First, via kbd_state.mode_bit (assigning KBD_MODE_BIT_TRIGGER_* value).
1036 * Second, via kbd_state.level (assigning numerical value <= kbd_info.levels).
1037 * Third, via SMBIOS tokens (KBD_LED_* in kbd_tokens)
1038 *
1039 * There are laptops which support only one of these methods. If we want to
1040 * support as many machines as possible we need to implement all three methods.
1041 * The first two methods use the kbd_state structure. The third uses SMBIOS
1042 * tokens. If kbd_info.levels == 0, the machine does not support setting the
1043 * keyboard backlight level via kbd_state.level.
1044 */
1045
1046static int kbd_get_info(struct kbd_info *info)
1047{
1048 u8 units;
1049 int ret;
1050
1051 get_buffer();
1052
1053 buffer->input[0] = 0x0;
1054 dell_send_request(buffer, 4, 11);
1055 ret = buffer->output[0];
1056
1057 if (ret) {
1058 ret = dell_smi_error(ret);
1059 goto out;
1060 }
1061
1062 info->modes = buffer->output[1] & 0xFFFF;
1063 info->type = (buffer->output[1] >> 24) & 0xFF;
1064 info->triggers = buffer->output[2] & 0xFF;
1065 units = (buffer->output[2] >> 8) & 0xFF;
1066 info->levels = (buffer->output[2] >> 16) & 0xFF;
1067
1068 if (units & BIT(0))
1069 info->seconds = (buffer->output[3] >> 0) & 0xFF;
1070 if (units & BIT(1))
1071 info->minutes = (buffer->output[3] >> 8) & 0xFF;
1072 if (units & BIT(2))
1073 info->hours = (buffer->output[3] >> 16) & 0xFF;
1074 if (units & BIT(3))
1075 info->days = (buffer->output[3] >> 24) & 0xFF;
1076
1077 out:
1078 release_buffer();
1079 return ret;
1080}
1081
1082static unsigned int kbd_get_max_level(void)
1083{
1084 if (kbd_info.levels != 0)
1085 return kbd_info.levels;
1086 if (kbd_mode_levels_count > 0)
1087 return kbd_mode_levels_count - 1;
1088 return 0;
1089}
1090
1091static int kbd_get_level(struct kbd_state *state)
1092{
1093 int i;
1094
1095 if (kbd_info.levels != 0)
1096 return state->level;
1097
1098 if (kbd_mode_levels_count > 0) {
1099 for (i = 0; i < kbd_mode_levels_count; ++i)
1100 if (kbd_mode_levels[i] == state->mode_bit)
1101 return i;
1102 return 0;
1103 }
1104
1105 return -EINVAL;
1106}
1107
1108static int kbd_set_level(struct kbd_state *state, u8 level)
1109{
1110 if (kbd_info.levels != 0) {
1111 if (level != 0)
1112 kbd_previous_level = level;
1113 if (state->level == level)
1114 return 0;
1115 state->level = level;
1116 if (level != 0 && state->mode_bit == KBD_MODE_BIT_OFF)
1117 state->mode_bit = kbd_previous_mode_bit;
1118 else if (level == 0 && state->mode_bit != KBD_MODE_BIT_OFF) {
1119 kbd_previous_mode_bit = state->mode_bit;
1120 state->mode_bit = KBD_MODE_BIT_OFF;
1121 }
1122 return 0;
1123 }
1124
1125 if (kbd_mode_levels_count > 0 && level < kbd_mode_levels_count) {
1126 if (level != 0)
1127 kbd_previous_level = level;
1128 state->mode_bit = kbd_mode_levels[level];
1129 return 0;
1130 }
1131
1132 return -EINVAL;
1133}
1134
1135static int kbd_get_state(struct kbd_state *state)
1136{
1137 int ret;
1138
1139 get_buffer();
1140
1141 buffer->input[0] = 0x1;
1142 dell_send_request(buffer, 4, 11);
1143 ret = buffer->output[0];
1144
1145 if (ret) {
1146 ret = dell_smi_error(ret);
1147 goto out;
1148 }
1149
1150 state->mode_bit = ffs(buffer->output[1] & 0xFFFF);
1151 if (state->mode_bit != 0)
1152 state->mode_bit--;
1153
1154 state->triggers = (buffer->output[1] >> 16) & 0xFF;
1155 state->timeout_value = (buffer->output[1] >> 24) & 0x3F;
1156 state->timeout_unit = (buffer->output[1] >> 30) & 0x3;
1157 state->als_setting = buffer->output[2] & 0xFF;
1158 state->als_value = (buffer->output[2] >> 8) & 0xFF;
1159 state->level = (buffer->output[2] >> 16) & 0xFF;
1160
1161 out:
1162 release_buffer();
1163 return ret;
1164}
1165
1166static int kbd_set_state(struct kbd_state *state)
1167{
1168 int ret;
1169
1170 get_buffer();
1171 buffer->input[0] = 0x2;
1172 buffer->input[1] = BIT(state->mode_bit) & 0xFFFF;
1173 buffer->input[1] |= (state->triggers & 0xFF) << 16;
1174 buffer->input[1] |= (state->timeout_value & 0x3F) << 24;
1175 buffer->input[1] |= (state->timeout_unit & 0x3) << 30;
1176 buffer->input[2] = state->als_setting & 0xFF;
1177 buffer->input[2] |= (state->level & 0xFF) << 16;
1178 dell_send_request(buffer, 4, 11);
1179 ret = buffer->output[0];
1180 release_buffer();
1181
1182 return dell_smi_error(ret);
1183}
1184
1185static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
1186{
1187 int ret;
1188
1189 ret = kbd_set_state(state);
1190 if (ret == 0)
1191 return 0;
1192
1193 /*
1194 * When setting the new state fails,try to restore the previous one.
1195 * This is needed on some machines where BIOS sets a default state when
1196 * setting a new state fails. This default state could be all off.
1197 */
1198
1199 if (kbd_set_state(old))
1200 pr_err("Setting old previous keyboard state failed\n");
1201
1202 return ret;
1203}
1204
1205static int kbd_set_token_bit(u8 bit)
1206{
1207 int id;
1208 int ret;
1209
1210 if (bit >= ARRAY_SIZE(kbd_tokens))
1211 return -EINVAL;
1212
1213 id = find_token_id(kbd_tokens[bit]);
1214 if (id == -1)
1215 return -EINVAL;
1216
1217 get_buffer();
1218 buffer->input[0] = da_tokens[id].location;
1219 buffer->input[1] = da_tokens[id].value;
1220 dell_send_request(buffer, 1, 0);
1221 ret = buffer->output[0];
1222 release_buffer();
1223
1224 return dell_smi_error(ret);
1225}
1226
1227static int kbd_get_token_bit(u8 bit)
1228{
1229 int id;
1230 int ret;
1231 int val;
1232
1233 if (bit >= ARRAY_SIZE(kbd_tokens))
1234 return -EINVAL;
1235
1236 id = find_token_id(kbd_tokens[bit]);
1237 if (id == -1)
1238 return -EINVAL;
1239
1240 get_buffer();
1241 buffer->input[0] = da_tokens[id].location;
1242 dell_send_request(buffer, 0, 0);
1243 ret = buffer->output[0];
1244 val = buffer->output[1];
1245 release_buffer();
1246
1247 if (ret)
1248 return dell_smi_error(ret);
1249
1250 return (val == da_tokens[id].value);
1251}
1252
1253static int kbd_get_first_active_token_bit(void)
1254{
1255 int i;
1256 int ret;
1257
1258 for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i) {
1259 ret = kbd_get_token_bit(i);
1260 if (ret == 1)
1261 return i;
1262 }
1263
1264 return ret;
1265}
1266
1267static int kbd_get_valid_token_counts(void)
1268{
1269 return hweight16(kbd_token_bits);
1270}
1271
1272static inline int kbd_init_info(void)
1273{
1274 struct kbd_state state;
1275 int ret;
1276 int i;
1277
1278 ret = kbd_get_info(&kbd_info);
1279 if (ret)
1280 return ret;
1281
1282 kbd_get_state(&state);
1283
1284 /* NOTE: timeout value is stored in 6 bits so max value is 63 */
1285 if (kbd_info.seconds > 63)
1286 kbd_info.seconds = 63;
1287 if (kbd_info.minutes > 63)
1288 kbd_info.minutes = 63;
1289 if (kbd_info.hours > 63)
1290 kbd_info.hours = 63;
1291 if (kbd_info.days > 63)
1292 kbd_info.days = 63;
1293
1294 /* NOTE: On tested machines ON mode did not work and caused
1295 * problems (turned backlight off) so do not use it
1296 */
1297 kbd_info.modes &= ~BIT(KBD_MODE_BIT_ON);
1298
1299 kbd_previous_level = kbd_get_level(&state);
1300 kbd_previous_mode_bit = state.mode_bit;
1301
1302 if (kbd_previous_level == 0 && kbd_get_max_level() != 0)
1303 kbd_previous_level = 1;
1304
1305 if (kbd_previous_mode_bit == KBD_MODE_BIT_OFF) {
1306 kbd_previous_mode_bit =
1307 ffs(kbd_info.modes & ~BIT(KBD_MODE_BIT_OFF));
1308 if (kbd_previous_mode_bit != 0)
1309 kbd_previous_mode_bit--;
1310 }
1311
1312 if (kbd_info.modes & (BIT(KBD_MODE_BIT_ALS) |
1313 BIT(KBD_MODE_BIT_TRIGGER_ALS)))
1314 kbd_als_supported = true;
1315
1316 if (kbd_info.modes & (
1317 BIT(KBD_MODE_BIT_TRIGGER_ALS) | BIT(KBD_MODE_BIT_TRIGGER) |
1318 BIT(KBD_MODE_BIT_TRIGGER_25) | BIT(KBD_MODE_BIT_TRIGGER_50) |
1319 BIT(KBD_MODE_BIT_TRIGGER_75) | BIT(KBD_MODE_BIT_TRIGGER_100)
1320 ))
1321 kbd_triggers_supported = true;
1322
1323 /* kbd_mode_levels[0] is reserved, see below */
1324 for (i = 0; i < 16; ++i)
1325 if (kbd_is_level_mode_bit(i) && (BIT(i) & kbd_info.modes))
1326 kbd_mode_levels[1 + kbd_mode_levels_count++] = i;
1327
1328 /*
1329 * Find the first supported mode and assign to kbd_mode_levels[0].
1330 * This should be 0 (off), but we cannot depend on the BIOS to
1331 * support 0.
1332 */
1333 if (kbd_mode_levels_count > 0) {
1334 for (i = 0; i < 16; ++i) {
1335 if (BIT(i) & kbd_info.modes) {
1336 kbd_mode_levels[0] = i;
1337 break;
1338 }
1339 }
1340 kbd_mode_levels_count++;
1341 }
1342
1343 return 0;
1344
1345}
1346
1347static inline void kbd_init_tokens(void)
1348{
1349 int i;
1350
1351 for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i)
1352 if (find_token_id(kbd_tokens[i]) != -1)
1353 kbd_token_bits |= BIT(i);
1354}
1355
1356static void kbd_init(void)
1357{
1358 int ret;
1359
1360 ret = kbd_init_info();
1361 kbd_init_tokens();
1362
1363 if (kbd_token_bits != 0 || ret == 0)
1364 kbd_led_present = true;
1365}
1366
1367static ssize_t kbd_led_timeout_store(struct device *dev,
1368 struct device_attribute *attr,
1369 const char *buf, size_t count)
1370{
1371 struct kbd_state new_state;
1372 struct kbd_state state;
1373 bool convert;
1374 int value;
1375 int ret;
1376 char ch;
1377 u8 unit;
1378 int i;
1379
1380 ret = sscanf(buf, "%d %c", &value, &ch);
1381 if (ret < 1)
1382 return -EINVAL;
1383 else if (ret == 1)
1384 ch = 's';
1385
1386 if (value < 0)
1387 return -EINVAL;
1388
1389 convert = false;
1390
1391 switch (ch) {
1392 case 's':
1393 if (value > kbd_info.seconds)
1394 convert = true;
1395 unit = KBD_TIMEOUT_SECONDS;
1396 break;
1397 case 'm':
1398 if (value > kbd_info.minutes)
1399 convert = true;
1400 unit = KBD_TIMEOUT_MINUTES;
1401 break;
1402 case 'h':
1403 if (value > kbd_info.hours)
1404 convert = true;
1405 unit = KBD_TIMEOUT_HOURS;
1406 break;
1407 case 'd':
1408 if (value > kbd_info.days)
1409 convert = true;
1410 unit = KBD_TIMEOUT_DAYS;
1411 break;
1412 default:
1413 return -EINVAL;
1414 }
1415
1416 if (quirks && quirks->needs_kbd_timeouts)
1417 convert = true;
1418
1419 if (convert) {
1420 /* Convert value from current units to seconds */
1421 switch (unit) {
1422 case KBD_TIMEOUT_DAYS:
1423 value *= 24;
1424 case KBD_TIMEOUT_HOURS:
1425 value *= 60;
1426 case KBD_TIMEOUT_MINUTES:
1427 value *= 60;
1428 unit = KBD_TIMEOUT_SECONDS;
1429 }
1430
1431 if (quirks && quirks->needs_kbd_timeouts) {
1432 for (i = 0; quirks->kbd_timeouts[i] != -1; i++) {
1433 if (value <= quirks->kbd_timeouts[i]) {
1434 value = quirks->kbd_timeouts[i];
1435 break;
1436 }
1437 }
1438 }
1439
1440 if (value <= kbd_info.seconds && kbd_info.seconds) {
1441 unit = KBD_TIMEOUT_SECONDS;
1442 } else if (value / 60 <= kbd_info.minutes && kbd_info.minutes) {
1443 value /= 60;
1444 unit = KBD_TIMEOUT_MINUTES;
1445 } else if (value / (60 * 60) <= kbd_info.hours && kbd_info.hours) {
1446 value /= (60 * 60);
1447 unit = KBD_TIMEOUT_HOURS;
1448 } else if (value / (60 * 60 * 24) <= kbd_info.days && kbd_info.days) {
1449 value /= (60 * 60 * 24);
1450 unit = KBD_TIMEOUT_DAYS;
1451 } else {
1452 return -EINVAL;
1453 }
1454 }
1455
1456 ret = kbd_get_state(&state);
1457 if (ret)
1458 return ret;
1459
1460 new_state = state;
1461 new_state.timeout_value = value;
1462 new_state.timeout_unit = unit;
1463
1464 ret = kbd_set_state_safe(&new_state, &state);
1465 if (ret)
1466 return ret;
1467
1468 return count;
1469}
1470
1471static ssize_t kbd_led_timeout_show(struct device *dev,
1472 struct device_attribute *attr, char *buf)
1473{
1474 struct kbd_state state;
1475 int ret;
1476 int len;
1477
1478 ret = kbd_get_state(&state);
1479 if (ret)
1480 return ret;
1481
1482 len = sprintf(buf, "%d", state.timeout_value);
1483
1484 switch (state.timeout_unit) {
1485 case KBD_TIMEOUT_SECONDS:
1486 return len + sprintf(buf+len, "s\n");
1487 case KBD_TIMEOUT_MINUTES:
1488 return len + sprintf(buf+len, "m\n");
1489 case KBD_TIMEOUT_HOURS:
1490 return len + sprintf(buf+len, "h\n");
1491 case KBD_TIMEOUT_DAYS:
1492 return len + sprintf(buf+len, "d\n");
1493 default:
1494 return -EINVAL;
1495 }
1496
1497 return len;
1498}
1499
1500static DEVICE_ATTR(stop_timeout, S_IRUGO | S_IWUSR,
1501 kbd_led_timeout_show, kbd_led_timeout_store);
1502
1503static const char * const kbd_led_triggers[] = {
1504 "keyboard",
1505 "touchpad",
1506 /*"trackstick"*/ NULL, /* NOTE: trackstick is just alias for touchpad */
1507 "mouse",
1508};
1509
1510static ssize_t kbd_led_triggers_store(struct device *dev,
1511 struct device_attribute *attr,
1512 const char *buf, size_t count)
1513{
1514 struct kbd_state new_state;
1515 struct kbd_state state;
1516 bool triggers_enabled = false;
1517 bool als_enabled = false;
1518 bool disable_als = false;
1519 bool enable_als = false;
1520 int trigger_bit = -1;
1521 char trigger[21];
1522 int i, ret;
1523
1524 ret = sscanf(buf, "%20s", trigger);
1525 if (ret != 1)
1526 return -EINVAL;
1527
1528 if (trigger[0] != '+' && trigger[0] != '-')
1529 return -EINVAL;
1530
1531 ret = kbd_get_state(&state);
1532 if (ret)
1533 return ret;
1534
1535 if (kbd_als_supported)
1536 als_enabled = kbd_is_als_mode_bit(state.mode_bit);
1537
1538 if (kbd_triggers_supported)
1539 triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
1540
1541 if (kbd_als_supported) {
1542 if (strcmp(trigger, "+als") == 0) {
1543 if (als_enabled)
1544 return count;
1545 enable_als = true;
1546 } else if (strcmp(trigger, "-als") == 0) {
1547 if (!als_enabled)
1548 return count;
1549 disable_als = true;
1550 }
1551 }
1552
1553 if (enable_als || disable_als) {
1554 new_state = state;
1555 if (enable_als) {
1556 if (triggers_enabled)
1557 new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS;
1558 else
1559 new_state.mode_bit = KBD_MODE_BIT_ALS;
1560 } else {
1561 if (triggers_enabled) {
1562 new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
1563 kbd_set_level(&new_state, kbd_previous_level);
1564 } else {
1565 new_state.mode_bit = KBD_MODE_BIT_ON;
1566 }
1567 }
1568 if (!(kbd_info.modes & BIT(new_state.mode_bit)))
1569 return -EINVAL;
1570 ret = kbd_set_state_safe(&new_state, &state);
1571 if (ret)
1572 return ret;
1573 kbd_previous_mode_bit = new_state.mode_bit;
1574 return count;
1575 }
1576
1577 if (kbd_triggers_supported) {
1578 for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
1579 if (!(kbd_info.triggers & BIT(i)))
1580 continue;
1581 if (!kbd_led_triggers[i])
1582 continue;
1583 if (strcmp(trigger+1, kbd_led_triggers[i]) != 0)
1584 continue;
1585 if (trigger[0] == '+' &&
1586 triggers_enabled && (state.triggers & BIT(i)))
1587 return count;
1588 if (trigger[0] == '-' &&
1589 (!triggers_enabled || !(state.triggers & BIT(i))))
1590 return count;
1591 trigger_bit = i;
1592 break;
1593 }
1594 }
1595
1596 if (trigger_bit != -1) {
1597 new_state = state;
1598 if (trigger[0] == '+')
1599 new_state.triggers |= BIT(trigger_bit);
1600 else {
1601 new_state.triggers &= ~BIT(trigger_bit);
1602 /* NOTE: trackstick bit (2) must be disabled when
1603 * disabling touchpad bit (1), otherwise touchpad
1604 * bit (1) will not be disabled */
1605 if (trigger_bit == 1)
1606 new_state.triggers &= ~BIT(2);
1607 }
1608 if ((kbd_info.triggers & new_state.triggers) !=
1609 new_state.triggers)
1610 return -EINVAL;
1611 if (new_state.triggers && !triggers_enabled) {
1612 if (als_enabled)
1613 new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS;
1614 else {
1615 new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
1616 kbd_set_level(&new_state, kbd_previous_level);
1617 }
1618 } else if (new_state.triggers == 0) {
1619 if (als_enabled)
1620 new_state.mode_bit = KBD_MODE_BIT_ALS;
1621 else
1622 kbd_set_level(&new_state, 0);
1623 }
1624 if (!(kbd_info.modes & BIT(new_state.mode_bit)))
1625 return -EINVAL;
1626 ret = kbd_set_state_safe(&new_state, &state);
1627 if (ret)
1628 return ret;
1629 if (new_state.mode_bit != KBD_MODE_BIT_OFF)
1630 kbd_previous_mode_bit = new_state.mode_bit;
1631 return count;
1632 }
1633
1634 return -EINVAL;
1635}
1636
1637static ssize_t kbd_led_triggers_show(struct device *dev,
1638 struct device_attribute *attr, char *buf)
1639{
1640 struct kbd_state state;
1641 bool triggers_enabled;
1642 int level, i, ret;
1643 int len = 0;
1644
1645 ret = kbd_get_state(&state);
1646 if (ret)
1647 return ret;
1648
1649 len = 0;
1650
1651 if (kbd_triggers_supported) {
1652 triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
1653 level = kbd_get_level(&state);
1654 for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
1655 if (!(kbd_info.triggers & BIT(i)))
1656 continue;
1657 if (!kbd_led_triggers[i])
1658 continue;
1659 if ((triggers_enabled || level <= 0) &&
1660 (state.triggers & BIT(i)))
1661 buf[len++] = '+';
1662 else
1663 buf[len++] = '-';
1664 len += sprintf(buf+len, "%s ", kbd_led_triggers[i]);
1665 }
1666 }
1667
1668 if (kbd_als_supported) {
1669 if (kbd_is_als_mode_bit(state.mode_bit))
1670 len += sprintf(buf+len, "+als ");
1671 else
1672 len += sprintf(buf+len, "-als ");
1673 }
1674
1675 if (len)
1676 buf[len - 1] = '\n';
1677
1678 return len;
1679}
1680
1681static DEVICE_ATTR(start_triggers, S_IRUGO | S_IWUSR,
1682 kbd_led_triggers_show, kbd_led_triggers_store);
1683
1684static ssize_t kbd_led_als_store(struct device *dev,
1685 struct device_attribute *attr,
1686 const char *buf, size_t count)
1687{
1688 struct kbd_state state;
1689 struct kbd_state new_state;
1690 u8 setting;
1691 int ret;
1692
1693 ret = kstrtou8(buf, 10, &setting);
1694 if (ret)
1695 return ret;
1696
1697 ret = kbd_get_state(&state);
1698 if (ret)
1699 return ret;
1700
1701 new_state = state;
1702 new_state.als_setting = setting;
1703
1704 ret = kbd_set_state_safe(&new_state, &state);
1705 if (ret)
1706 return ret;
1707
1708 return count;
1709}
1710
1711static ssize_t kbd_led_als_show(struct device *dev,
1712 struct device_attribute *attr, char *buf)
1713{
1714 struct kbd_state state;
1715 int ret;
1716
1717 ret = kbd_get_state(&state);
1718 if (ret)
1719 return ret;
1720
1721 return sprintf(buf, "%d\n", state.als_setting);
1722}
1723
1724static DEVICE_ATTR(als_setting, S_IRUGO | S_IWUSR,
1725 kbd_led_als_show, kbd_led_als_store);
1726
1727static struct attribute *kbd_led_attrs[] = {
1728 &dev_attr_stop_timeout.attr,
1729 &dev_attr_start_triggers.attr,
1730 &dev_attr_als_setting.attr,
1731 NULL,
1732};
1733ATTRIBUTE_GROUPS(kbd_led);
1734
1735static enum led_brightness kbd_led_level_get(struct led_classdev *led_cdev)
1736{
1737 int ret;
1738 u16 num;
1739 struct kbd_state state;
1740
1741 if (kbd_get_max_level()) {
1742 ret = kbd_get_state(&state);
1743 if (ret)
1744 return 0;
1745 ret = kbd_get_level(&state);
1746 if (ret < 0)
1747 return 0;
1748 return ret;
1749 }
1750
1751 if (kbd_get_valid_token_counts()) {
1752 ret = kbd_get_first_active_token_bit();
1753 if (ret < 0)
1754 return 0;
1755 for (num = kbd_token_bits; num != 0 && ret > 0; --ret)
1756 num &= num - 1; /* clear the first bit set */
1757 if (num == 0)
1758 return 0;
1759 return ffs(num) - 1;
1760 }
1761
1762 pr_warn("Keyboard brightness level control not supported\n");
1763 return 0;
1764}
1765
1766static void kbd_led_level_set(struct led_classdev *led_cdev,
1767 enum led_brightness value)
1768{
1769 struct kbd_state state;
1770 struct kbd_state new_state;
1771 u16 num;
1772
1773 if (kbd_get_max_level()) {
1774 if (kbd_get_state(&state))
1775 return;
1776 new_state = state;
1777 if (kbd_set_level(&new_state, value))
1778 return;
1779 kbd_set_state_safe(&new_state, &state);
1780 return;
1781 }
1782
1783 if (kbd_get_valid_token_counts()) {
1784 for (num = kbd_token_bits; num != 0 && value > 0; --value)
1785 num &= num - 1; /* clear the first bit set */
1786 if (num == 0)
1787 return;
1788 kbd_set_token_bit(ffs(num) - 1);
1789 return;
1790 }
1791
1792 pr_warn("Keyboard brightness level control not supported\n");
1793}
1794
1795static struct led_classdev kbd_led = {
1796 .name = "dell::kbd_backlight",
1797 .brightness_set = kbd_led_level_set,
1798 .brightness_get = kbd_led_level_get,
1799 .groups = kbd_led_groups,
1800};
1801
1802static int __init kbd_led_init(struct device *dev)
1803{
1804 kbd_init();
1805 if (!kbd_led_present)
1806 return -ENODEV;
1807 kbd_led.max_brightness = kbd_get_max_level();
1808 if (!kbd_led.max_brightness) {
1809 kbd_led.max_brightness = kbd_get_valid_token_counts();
1810 if (kbd_led.max_brightness)
1811 kbd_led.max_brightness--;
1812 }
1813 return led_classdev_register(dev, &kbd_led);
1814}
1815
1816static void brightness_set_exit(struct led_classdev *led_cdev,
1817 enum led_brightness value)
1818{
1819 /* Don't change backlight level on exit */
1820};
1821
1822static void kbd_led_exit(void)
1823{
1824 if (!kbd_led_present)
1825 return;
1826 kbd_led.brightness_set = brightness_set_exit;
1827 led_classdev_unregister(&kbd_led);
1828}
1829
1830static int __init dell_init(void) 792static int __init dell_init(void)
1831{ 793{
1832 int max_intensity = 0; 794 int max_intensity = 0;
@@ -1879,8 +841,6 @@ static int __init dell_init(void)
1879 if (quirks && quirks->touchpad_led) 841 if (quirks && quirks->touchpad_led)
1880 touchpad_led_init(&platform_device->dev); 842 touchpad_led_init(&platform_device->dev);
1881 843
1882 kbd_led_init(&platform_device->dev);
1883
1884 dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL); 844 dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
1885 if (dell_laptop_dir != NULL) 845 if (dell_laptop_dir != NULL)
1886 debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL, 846 debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
@@ -1948,7 +908,6 @@ static void __exit dell_exit(void)
1948 debugfs_remove_recursive(dell_laptop_dir); 908 debugfs_remove_recursive(dell_laptop_dir);
1949 if (quirks && quirks->touchpad_led) 909 if (quirks && quirks->touchpad_led)
1950 touchpad_led_exit(); 910 touchpad_led_exit();
1951 kbd_led_exit();
1952 i8042_remove_filter(dell_laptop_i8042_filter); 911 i8042_remove_filter(dell_laptop_i8042_filter);
1953 cancel_delayed_work_sync(&dell_rfkill_work); 912 cancel_delayed_work_sync(&dell_rfkill_work);
1954 backlight_device_unregister(dell_backlight_device); 913 backlight_device_unregister(dell_backlight_device);
@@ -1965,7 +924,5 @@ module_init(dell_init);
1965module_exit(dell_exit); 924module_exit(dell_exit);
1966 925
1967MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); 926MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
1968MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
1969MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
1970MODULE_DESCRIPTION("Dell laptop driver"); 927MODULE_DESCRIPTION("Dell laptop driver");
1971MODULE_LICENSE("GPL"); 928MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index e225711bb8bc..9c48fb32f660 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1488,7 +1488,7 @@ struct regulator *regulator_get_optional(struct device *dev, const char *id)
1488} 1488}
1489EXPORT_SYMBOL_GPL(regulator_get_optional); 1489EXPORT_SYMBOL_GPL(regulator_get_optional);
1490 1490
1491/* Locks held by regulator_put() */ 1491/* regulator_list_mutex lock held by regulator_put() */
1492static void _regulator_put(struct regulator *regulator) 1492static void _regulator_put(struct regulator *regulator)
1493{ 1493{
1494 struct regulator_dev *rdev; 1494 struct regulator_dev *rdev;
@@ -1503,12 +1503,14 @@ static void _regulator_put(struct regulator *regulator)
1503 /* remove any sysfs entries */ 1503 /* remove any sysfs entries */
1504 if (regulator->dev) 1504 if (regulator->dev)
1505 sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); 1505 sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
1506 mutex_lock(&rdev->mutex);
1506 kfree(regulator->supply_name); 1507 kfree(regulator->supply_name);
1507 list_del(&regulator->list); 1508 list_del(&regulator->list);
1508 kfree(regulator); 1509 kfree(regulator);
1509 1510
1510 rdev->open_count--; 1511 rdev->open_count--;
1511 rdev->exclusive = 0; 1512 rdev->exclusive = 0;
1513 mutex_unlock(&rdev->mutex);
1512 1514
1513 module_put(rdev->owner); 1515 module_put(rdev->owner);
1514} 1516}
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 2809ae0d6bcd..ff828117798f 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -405,6 +405,40 @@ static struct regulator_ops s2mps14_reg_ops;
405 .enable_mask = S2MPS14_ENABLE_MASK \ 405 .enable_mask = S2MPS14_ENABLE_MASK \
406} 406}
407 407
408#define regulator_desc_s2mps13_buck7(num, min, step, min_sel) { \
409 .name = "BUCK"#num, \
410 .id = S2MPS13_BUCK##num, \
411 .ops = &s2mps14_reg_ops, \
412 .type = REGULATOR_VOLTAGE, \
413 .owner = THIS_MODULE, \
414 .min_uV = min, \
415 .uV_step = step, \
416 .linear_min_sel = min_sel, \
417 .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
418 .ramp_delay = S2MPS13_BUCK_RAMP_DELAY, \
419 .vsel_reg = S2MPS13_REG_B1OUT + (num) * 2 - 1, \
420 .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
421 .enable_reg = S2MPS13_REG_B1CTRL + (num - 1) * 2, \
422 .enable_mask = S2MPS14_ENABLE_MASK \
423}
424
425#define regulator_desc_s2mps13_buck8_10(num, min, step, min_sel) { \
426 .name = "BUCK"#num, \
427 .id = S2MPS13_BUCK##num, \
428 .ops = &s2mps14_reg_ops, \
429 .type = REGULATOR_VOLTAGE, \
430 .owner = THIS_MODULE, \
431 .min_uV = min, \
432 .uV_step = step, \
433 .linear_min_sel = min_sel, \
434 .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
435 .ramp_delay = S2MPS13_BUCK_RAMP_DELAY, \
436 .vsel_reg = S2MPS13_REG_B1OUT + (num) * 2 - 1, \
437 .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
438 .enable_reg = S2MPS13_REG_B1CTRL + (num) * 2 - 1, \
439 .enable_mask = S2MPS14_ENABLE_MASK \
440}
441
408static const struct regulator_desc s2mps13_regulators[] = { 442static const struct regulator_desc s2mps13_regulators[] = {
409 regulator_desc_s2mps13_ldo(1, MIN_800_MV, STEP_12_5_MV, 0x00), 443 regulator_desc_s2mps13_ldo(1, MIN_800_MV, STEP_12_5_MV, 0x00),
410 regulator_desc_s2mps13_ldo(2, MIN_1400_MV, STEP_50_MV, 0x0C), 444 regulator_desc_s2mps13_ldo(2, MIN_1400_MV, STEP_50_MV, 0x0C),
@@ -452,10 +486,10 @@ static const struct regulator_desc s2mps13_regulators[] = {
452 regulator_desc_s2mps13_buck(4, MIN_500_MV, STEP_6_25_MV, 0x10), 486 regulator_desc_s2mps13_buck(4, MIN_500_MV, STEP_6_25_MV, 0x10),
453 regulator_desc_s2mps13_buck(5, MIN_500_MV, STEP_6_25_MV, 0x10), 487 regulator_desc_s2mps13_buck(5, MIN_500_MV, STEP_6_25_MV, 0x10),
454 regulator_desc_s2mps13_buck(6, MIN_500_MV, STEP_6_25_MV, 0x10), 488 regulator_desc_s2mps13_buck(6, MIN_500_MV, STEP_6_25_MV, 0x10),
455 regulator_desc_s2mps13_buck(7, MIN_500_MV, STEP_6_25_MV, 0x10), 489 regulator_desc_s2mps13_buck7(7, MIN_500_MV, STEP_6_25_MV, 0x10),
456 regulator_desc_s2mps13_buck(8, MIN_1000_MV, STEP_12_5_MV, 0x20), 490 regulator_desc_s2mps13_buck8_10(8, MIN_1000_MV, STEP_12_5_MV, 0x20),
457 regulator_desc_s2mps13_buck(9, MIN_1000_MV, STEP_12_5_MV, 0x20), 491 regulator_desc_s2mps13_buck8_10(9, MIN_1000_MV, STEP_12_5_MV, 0x20),
458 regulator_desc_s2mps13_buck(10, MIN_500_MV, STEP_6_25_MV, 0x10), 492 regulator_desc_s2mps13_buck8_10(10, MIN_500_MV, STEP_6_25_MV, 0x10),
459}; 493};
460 494
461static int s2mps14_regulator_enable(struct regulator_dev *rdev) 495static int s2mps14_regulator_enable(struct regulator_dev *rdev)
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index b5e7c4670205..89ac1d5083c6 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -832,6 +832,7 @@ static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume);
832static const struct platform_device_id s5m_rtc_id[] = { 832static const struct platform_device_id s5m_rtc_id[] = {
833 { "s5m-rtc", S5M8767X }, 833 { "s5m-rtc", S5M8767X },
834 { "s2mps14-rtc", S2MPS14X }, 834 { "s2mps14-rtc", S2MPS14X },
835 { },
835}; 836};
836 837
837static struct platform_driver s5m_rtc_driver = { 838static struct platform_driver s5m_rtc_driver = {
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index f407e3763432..642c77c76b84 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1784,6 +1784,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
1784 QETH_DBF_TEXT(SETUP, 2, "idxanswr"); 1784 QETH_DBF_TEXT(SETUP, 2, "idxanswr");
1785 card = CARD_FROM_CDEV(channel->ccwdev); 1785 card = CARD_FROM_CDEV(channel->ccwdev);
1786 iob = qeth_get_buffer(channel); 1786 iob = qeth_get_buffer(channel);
1787 if (!iob)
1788 return -ENOMEM;
1787 iob->callback = idx_reply_cb; 1789 iob->callback = idx_reply_cb;
1788 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); 1790 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1789 channel->ccw.count = QETH_BUFSIZE; 1791 channel->ccw.count = QETH_BUFSIZE;
@@ -1834,6 +1836,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
1834 QETH_DBF_TEXT(SETUP, 2, "idxactch"); 1836 QETH_DBF_TEXT(SETUP, 2, "idxactch");
1835 1837
1836 iob = qeth_get_buffer(channel); 1838 iob = qeth_get_buffer(channel);
1839 if (!iob)
1840 return -ENOMEM;
1837 iob->callback = idx_reply_cb; 1841 iob->callback = idx_reply_cb;
1838 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1)); 1842 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1839 channel->ccw.count = IDX_ACTIVATE_SIZE; 1843 channel->ccw.count = IDX_ACTIVATE_SIZE;
@@ -2021,10 +2025,36 @@ void qeth_prepare_control_data(struct qeth_card *card, int len,
2021} 2025}
2022EXPORT_SYMBOL_GPL(qeth_prepare_control_data); 2026EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
2023 2027
2028/**
2029 * qeth_send_control_data() - send control command to the card
2030 * @card: qeth_card structure pointer
2031 * @len: size of the command buffer
2032 * @iob: qeth_cmd_buffer pointer
2033 * @reply_cb: callback function pointer
2034 * @cb_card: pointer to the qeth_card structure
2035 * @cb_reply: pointer to the qeth_reply structure
2036 * @cb_cmd: pointer to the original iob for non-IPA
2037 * commands, or to the qeth_ipa_cmd structure
2038 * for the IPA commands.
2039 * @reply_param: private pointer passed to the callback
2040 *
2041 * Returns the value of the `return_code' field of the response
2042 * block returned from the hardware, or other error indication.
2043 * Value of zero indicates successful execution of the command.
2044 *
2045 * Callback function gets called one or more times, with cb_cmd
2046 * pointing to the response returned by the hardware. Callback
2047 * function must return non-zero if more reply blocks are expected,
2048 * and zero if the last or only reply block is received. Callback
2049 * function can get the value of the reply_param pointer from the
2050 * field 'param' of the structure qeth_reply.
2051 */
2052
2024int qeth_send_control_data(struct qeth_card *card, int len, 2053int qeth_send_control_data(struct qeth_card *card, int len,
2025 struct qeth_cmd_buffer *iob, 2054 struct qeth_cmd_buffer *iob,
2026 int (*reply_cb)(struct qeth_card *, struct qeth_reply *, 2055 int (*reply_cb)(struct qeth_card *cb_card,
2027 unsigned long), 2056 struct qeth_reply *cb_reply,
2057 unsigned long cb_cmd),
2028 void *reply_param) 2058 void *reply_param)
2029{ 2059{
2030 int rc; 2060 int rc;
@@ -2914,9 +2944,16 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
2914 struct qeth_cmd_buffer *iob; 2944 struct qeth_cmd_buffer *iob;
2915 struct qeth_ipa_cmd *cmd; 2945 struct qeth_ipa_cmd *cmd;
2916 2946
2917 iob = qeth_wait_for_buffer(&card->write); 2947 iob = qeth_get_buffer(&card->write);
2918 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 2948 if (iob) {
2919 qeth_fill_ipacmd_header(card, cmd, ipacmd, prot); 2949 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
2950 qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
2951 } else {
2952 dev_warn(&card->gdev->dev,
2953 "The qeth driver ran out of channel command buffers\n");
2954 QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers",
2955 dev_name(&card->gdev->dev));
2956 }
2920 2957
2921 return iob; 2958 return iob;
2922} 2959}
@@ -2932,6 +2969,12 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2932} 2969}
2933EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); 2970EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
2934 2971
2972/**
2973 * qeth_send_ipa_cmd() - send an IPA command
2974 *
2975 * See qeth_send_control_data() for explanation of the arguments.
2976 */
2977
2935int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, 2978int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2936 int (*reply_cb)(struct qeth_card *, struct qeth_reply*, 2979 int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
2937 unsigned long), 2980 unsigned long),
@@ -2968,6 +3011,8 @@ int qeth_send_startlan(struct qeth_card *card)
2968 QETH_DBF_TEXT(SETUP, 2, "strtlan"); 3011 QETH_DBF_TEXT(SETUP, 2, "strtlan");
2969 3012
2970 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0); 3013 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
3014 if (!iob)
3015 return -ENOMEM;
2971 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); 3016 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
2972 return rc; 3017 return rc;
2973} 3018}
@@ -3013,11 +3058,13 @@ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
3013 3058
3014 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS, 3059 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
3015 QETH_PROT_IPV4); 3060 QETH_PROT_IPV4);
3016 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 3061 if (iob) {
3017 cmd->data.setadapterparms.hdr.cmdlength = cmdlen; 3062 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3018 cmd->data.setadapterparms.hdr.command_code = command; 3063 cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
3019 cmd->data.setadapterparms.hdr.used_total = 1; 3064 cmd->data.setadapterparms.hdr.command_code = command;
3020 cmd->data.setadapterparms.hdr.seq_no = 1; 3065 cmd->data.setadapterparms.hdr.used_total = 1;
3066 cmd->data.setadapterparms.hdr.seq_no = 1;
3067 }
3021 3068
3022 return iob; 3069 return iob;
3023} 3070}
@@ -3030,6 +3077,8 @@ int qeth_query_setadapterparms(struct qeth_card *card)
3030 QETH_CARD_TEXT(card, 3, "queryadp"); 3077 QETH_CARD_TEXT(card, 3, "queryadp");
3031 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, 3078 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
3032 sizeof(struct qeth_ipacmd_setadpparms)); 3079 sizeof(struct qeth_ipacmd_setadpparms));
3080 if (!iob)
3081 return -ENOMEM;
3033 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); 3082 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
3034 return rc; 3083 return rc;
3035} 3084}
@@ -3080,6 +3129,8 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
3080 3129
3081 QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot); 3130 QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
3082 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot); 3131 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
3132 if (!iob)
3133 return -ENOMEM;
3083 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); 3134 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
3084 return rc; 3135 return rc;
3085} 3136}
@@ -3119,6 +3170,8 @@ int qeth_query_switch_attributes(struct qeth_card *card,
3119 return -ENOMEDIUM; 3170 return -ENOMEDIUM;
3120 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 3171 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES,
3121 sizeof(struct qeth_ipacmd_setadpparms_hdr)); 3172 sizeof(struct qeth_ipacmd_setadpparms_hdr));
3173 if (!iob)
3174 return -ENOMEM;
3122 return qeth_send_ipa_cmd(card, iob, 3175 return qeth_send_ipa_cmd(card, iob,
3123 qeth_query_switch_attributes_cb, sw_info); 3176 qeth_query_switch_attributes_cb, sw_info);
3124} 3177}
@@ -3146,6 +3199,8 @@ static int qeth_query_setdiagass(struct qeth_card *card)
3146 3199
3147 QETH_DBF_TEXT(SETUP, 2, "qdiagass"); 3200 QETH_DBF_TEXT(SETUP, 2, "qdiagass");
3148 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); 3201 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
3202 if (!iob)
3203 return -ENOMEM;
3149 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 3204 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3150 cmd->data.diagass.subcmd_len = 16; 3205 cmd->data.diagass.subcmd_len = 16;
3151 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY; 3206 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY;
@@ -3197,6 +3252,8 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
3197 3252
3198 QETH_DBF_TEXT(SETUP, 2, "diagtrap"); 3253 QETH_DBF_TEXT(SETUP, 2, "diagtrap");
3199 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); 3254 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
3255 if (!iob)
3256 return -ENOMEM;
3200 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 3257 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3201 cmd->data.diagass.subcmd_len = 80; 3258 cmd->data.diagass.subcmd_len = 80;
3202 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP; 3259 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP;
@@ -4162,6 +4219,8 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
4162 4219
4163 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, 4220 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4164 sizeof(struct qeth_ipacmd_setadpparms)); 4221 sizeof(struct qeth_ipacmd_setadpparms));
4222 if (!iob)
4223 return;
4165 cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); 4224 cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
4166 cmd->data.setadapterparms.data.mode = mode; 4225 cmd->data.setadapterparms.data.mode = mode;
4167 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); 4226 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
@@ -4232,6 +4291,8 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4232 4291
4233 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, 4292 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4234 sizeof(struct qeth_ipacmd_setadpparms)); 4293 sizeof(struct qeth_ipacmd_setadpparms));
4294 if (!iob)
4295 return -ENOMEM;
4235 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 4296 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4236 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; 4297 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4237 cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN; 4298 cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
@@ -4345,6 +4406,8 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4345 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, 4406 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4346 sizeof(struct qeth_ipacmd_setadpparms_hdr) + 4407 sizeof(struct qeth_ipacmd_setadpparms_hdr) +
4347 sizeof(struct qeth_set_access_ctrl)); 4408 sizeof(struct qeth_set_access_ctrl));
4409 if (!iob)
4410 return -ENOMEM;
4348 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 4411 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4349 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4412 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4350 access_ctrl_req->subcmd_code = isolation; 4413 access_ctrl_req->subcmd_code = isolation;
@@ -4588,6 +4651,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4588 4651
4589 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, 4652 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
4590 QETH_SNMP_SETADP_CMDLENGTH + req_len); 4653 QETH_SNMP_SETADP_CMDLENGTH + req_len);
4654 if (!iob) {
4655 rc = -ENOMEM;
4656 goto out;
4657 }
4591 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 4658 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4592 memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len); 4659 memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
4593 rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, 4660 rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
@@ -4599,7 +4666,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4599 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) 4666 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4600 rc = -EFAULT; 4667 rc = -EFAULT;
4601 } 4668 }
4602 4669out:
4603 kfree(ureq); 4670 kfree(ureq);
4604 kfree(qinfo.udata); 4671 kfree(qinfo.udata);
4605 return rc; 4672 return rc;
@@ -4670,6 +4737,10 @@ int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4670 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, 4737 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4671 sizeof(struct qeth_ipacmd_setadpparms_hdr) + 4738 sizeof(struct qeth_ipacmd_setadpparms_hdr) +
4672 sizeof(struct qeth_query_oat)); 4739 sizeof(struct qeth_query_oat));
4740 if (!iob) {
4741 rc = -ENOMEM;
4742 goto out_free;
4743 }
4673 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 4744 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4674 oat_req = &cmd->data.setadapterparms.data.query_oat; 4745 oat_req = &cmd->data.setadapterparms.data.query_oat;
4675 oat_req->subcmd_code = oat_data.command; 4746 oat_req->subcmd_code = oat_data.command;
@@ -4735,6 +4806,8 @@ static int qeth_query_card_info(struct qeth_card *card,
4735 return -EOPNOTSUPP; 4806 return -EOPNOTSUPP;
4736 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 4807 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO,
4737 sizeof(struct qeth_ipacmd_setadpparms_hdr)); 4808 sizeof(struct qeth_ipacmd_setadpparms_hdr));
4809 if (!iob)
4810 return -ENOMEM;
4738 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, 4811 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
4739 (void *)carrier_info); 4812 (void *)carrier_info);
4740} 4813}
@@ -5060,11 +5133,23 @@ retriable:
5060 card->options.adp.supported_funcs = 0; 5133 card->options.adp.supported_funcs = 0;
5061 card->options.sbp.supported_funcs = 0; 5134 card->options.sbp.supported_funcs = 0;
5062 card->info.diagass_support = 0; 5135 card->info.diagass_support = 0;
5063 qeth_query_ipassists(card, QETH_PROT_IPV4); 5136 rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
5064 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) 5137 if (rc == -ENOMEM)
5065 qeth_query_setadapterparms(card); 5138 goto out;
5066 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) 5139 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
5067 qeth_query_setdiagass(card); 5140 rc = qeth_query_setadapterparms(card);
5141 if (rc < 0) {
5142 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
5143 goto out;
5144 }
5145 }
5146 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
5147 rc = qeth_query_setdiagass(card);
5148 if (rc < 0) {
5149 QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
5150 goto out;
5151 }
5152 }
5068 return 0; 5153 return 0;
5069out: 5154out:
5070 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " 5155 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index d02cd1a67943..ce87ae72edbd 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -27,10 +27,7 @@ static int qeth_l2_set_offline(struct ccwgroup_device *);
27static int qeth_l2_stop(struct net_device *); 27static int qeth_l2_stop(struct net_device *);
28static int qeth_l2_send_delmac(struct qeth_card *, __u8 *); 28static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
29static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *, 29static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
30 enum qeth_ipa_cmds, 30 enum qeth_ipa_cmds);
31 int (*reply_cb) (struct qeth_card *,
32 struct qeth_reply*,
33 unsigned long));
34static void qeth_l2_set_multicast_list(struct net_device *); 31static void qeth_l2_set_multicast_list(struct net_device *);
35static int qeth_l2_recover(void *); 32static int qeth_l2_recover(void *);
36static void qeth_bridgeport_query_support(struct qeth_card *card); 33static void qeth_bridgeport_query_support(struct qeth_card *card);
@@ -130,56 +127,71 @@ static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
130 return ndev; 127 return ndev;
131} 128}
132 129
133static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card, 130static int qeth_setdel_makerc(struct qeth_card *card, int retcode)
134 struct qeth_reply *reply,
135 unsigned long data)
136{ 131{
137 struct qeth_ipa_cmd *cmd; 132 int rc;
138 __u8 *mac;
139 133
140 QETH_CARD_TEXT(card, 2, "L2Sgmacb"); 134 if (retcode)
141 cmd = (struct qeth_ipa_cmd *) data; 135 QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
142 mac = &cmd->data.setdelmac.mac[0]; 136 switch (retcode) {
143 /* MAC already registered, needed in couple/uncouple case */ 137 case IPA_RC_SUCCESS:
144 if (cmd->hdr.return_code == IPA_RC_L2_DUP_MAC) { 138 rc = 0;
145 QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s \n", 139 break;
146 mac, QETH_CARD_IFNAME(card)); 140 case IPA_RC_L2_UNSUPPORTED_CMD:
147 cmd->hdr.return_code = 0; 141 rc = -ENOSYS;
142 break;
143 case IPA_RC_L2_ADDR_TABLE_FULL:
144 rc = -ENOSPC;
145 break;
146 case IPA_RC_L2_DUP_MAC:
147 case IPA_RC_L2_DUP_LAYER3_MAC:
148 rc = -EEXIST;
149 break;
150 case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
151 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
152 rc = -EPERM;
153 break;
154 case IPA_RC_L2_MAC_NOT_FOUND:
155 rc = -ENOENT;
156 break;
157 case -ENOMEM:
158 rc = -ENOMEM;
159 break;
160 default:
161 rc = -EIO;
162 break;
148 } 163 }
149 if (cmd->hdr.return_code) 164 return rc;
150 QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %x\n",
151 mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
152 return 0;
153} 165}
154 166
155static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac) 167static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
156{ 168{
157 QETH_CARD_TEXT(card, 2, "L2Sgmac"); 169 int rc;
158 return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
159 qeth_l2_send_setgroupmac_cb);
160}
161
162static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
163 struct qeth_reply *reply,
164 unsigned long data)
165{
166 struct qeth_ipa_cmd *cmd;
167 __u8 *mac;
168 170
169 QETH_CARD_TEXT(card, 2, "L2Dgmacb"); 171 QETH_CARD_TEXT(card, 2, "L2Sgmac");
170 cmd = (struct qeth_ipa_cmd *) data; 172 rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
171 mac = &cmd->data.setdelmac.mac[0]; 173 IPA_CMD_SETGMAC));
172 if (cmd->hdr.return_code) 174 if (rc == -EEXIST)
173 QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %x\n", 175 QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n",
174 mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code); 176 mac, QETH_CARD_IFNAME(card));
175 return 0; 177 else if (rc)
178 QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %d\n",
179 mac, QETH_CARD_IFNAME(card), rc);
180 return rc;
176} 181}
177 182
178static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac) 183static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
179{ 184{
185 int rc;
186
180 QETH_CARD_TEXT(card, 2, "L2Dgmac"); 187 QETH_CARD_TEXT(card, 2, "L2Dgmac");
181 return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC, 188 rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
182 qeth_l2_send_delgroupmac_cb); 189 IPA_CMD_DELGMAC));
190 if (rc)
191 QETH_DBF_MESSAGE(2,
192 "Could not delete group MAC %pM on %s: %d\n",
193 mac, QETH_CARD_IFNAME(card), rc);
194 return rc;
183} 195}
184 196
185static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac) 197static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
@@ -197,10 +209,11 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
197 mc->is_vmac = vmac; 209 mc->is_vmac = vmac;
198 210
199 if (vmac) { 211 if (vmac) {
200 rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC, 212 rc = qeth_setdel_makerc(card,
201 NULL); 213 qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC));
202 } else { 214 } else {
203 rc = qeth_l2_send_setgroupmac(card, mac); 215 rc = qeth_setdel_makerc(card,
216 qeth_l2_send_setgroupmac(card, mac));
204 } 217 }
205 218
206 if (!rc) 219 if (!rc)
@@ -218,7 +231,7 @@ static void qeth_l2_del_all_mc(struct qeth_card *card, int del)
218 if (del) { 231 if (del) {
219 if (mc->is_vmac) 232 if (mc->is_vmac)
220 qeth_l2_send_setdelmac(card, mc->mc_addr, 233 qeth_l2_send_setdelmac(card, mc->mc_addr,
221 IPA_CMD_DELVMAC, NULL); 234 IPA_CMD_DELVMAC);
222 else 235 else
223 qeth_l2_send_delgroupmac(card, mc->mc_addr); 236 qeth_l2_send_delgroupmac(card, mc->mc_addr);
224 } 237 }
@@ -291,6 +304,8 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
291 304
292 QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd); 305 QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
293 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); 306 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
307 if (!iob)
308 return -ENOMEM;
294 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 309 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
295 cmd->data.setdelvlan.vlan_id = i; 310 cmd->data.setdelvlan.vlan_id = i;
296 return qeth_send_ipa_cmd(card, iob, 311 return qeth_send_ipa_cmd(card, iob,
@@ -313,6 +328,7 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
313{ 328{
314 struct qeth_card *card = dev->ml_priv; 329 struct qeth_card *card = dev->ml_priv;
315 struct qeth_vlan_vid *id; 330 struct qeth_vlan_vid *id;
331 int rc;
316 332
317 QETH_CARD_TEXT_(card, 4, "aid:%d", vid); 333 QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
318 if (!vid) 334 if (!vid)
@@ -328,7 +344,11 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
328 id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC); 344 id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
329 if (id) { 345 if (id) {
330 id->vid = vid; 346 id->vid = vid;
331 qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN); 347 rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
348 if (rc) {
349 kfree(id);
350 return rc;
351 }
332 spin_lock_bh(&card->vlanlock); 352 spin_lock_bh(&card->vlanlock);
333 list_add_tail(&id->list, &card->vid_list); 353 list_add_tail(&id->list, &card->vid_list);
334 spin_unlock_bh(&card->vlanlock); 354 spin_unlock_bh(&card->vlanlock);
@@ -343,6 +363,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
343{ 363{
344 struct qeth_vlan_vid *id, *tmpid = NULL; 364 struct qeth_vlan_vid *id, *tmpid = NULL;
345 struct qeth_card *card = dev->ml_priv; 365 struct qeth_card *card = dev->ml_priv;
366 int rc = 0;
346 367
347 QETH_CARD_TEXT_(card, 4, "kid:%d", vid); 368 QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
348 if (card->info.type == QETH_CARD_TYPE_OSM) { 369 if (card->info.type == QETH_CARD_TYPE_OSM) {
@@ -363,11 +384,11 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
363 } 384 }
364 spin_unlock_bh(&card->vlanlock); 385 spin_unlock_bh(&card->vlanlock);
365 if (tmpid) { 386 if (tmpid) {
366 qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); 387 rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
367 kfree(tmpid); 388 kfree(tmpid);
368 } 389 }
369 qeth_l2_set_multicast_list(card->dev); 390 qeth_l2_set_multicast_list(card->dev);
370 return 0; 391 return rc;
371} 392}
372 393
373static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) 394static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
@@ -539,91 +560,62 @@ out:
539} 560}
540 561
541static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, 562static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
542 enum qeth_ipa_cmds ipacmd, 563 enum qeth_ipa_cmds ipacmd)
543 int (*reply_cb) (struct qeth_card *,
544 struct qeth_reply*,
545 unsigned long))
546{ 564{
547 struct qeth_ipa_cmd *cmd; 565 struct qeth_ipa_cmd *cmd;
548 struct qeth_cmd_buffer *iob; 566 struct qeth_cmd_buffer *iob;
549 567
550 QETH_CARD_TEXT(card, 2, "L2sdmac"); 568 QETH_CARD_TEXT(card, 2, "L2sdmac");
551 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); 569 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
570 if (!iob)
571 return -ENOMEM;
552 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 572 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
553 cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; 573 cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
554 memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN); 574 memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
555 return qeth_send_ipa_cmd(card, iob, reply_cb, NULL); 575 return qeth_send_ipa_cmd(card, iob, NULL, NULL);
556} 576}
557 577
558static int qeth_l2_send_setmac_cb(struct qeth_card *card, 578static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
559 struct qeth_reply *reply,
560 unsigned long data)
561{ 579{
562 struct qeth_ipa_cmd *cmd; 580 int rc;
563 581
564 QETH_CARD_TEXT(card, 2, "L2Smaccb"); 582 QETH_CARD_TEXT(card, 2, "L2Setmac");
565 cmd = (struct qeth_ipa_cmd *) data; 583 rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
566 if (cmd->hdr.return_code) { 584 IPA_CMD_SETVMAC));
567 QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code); 585 if (rc == 0) {
586 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
587 memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
588 dev_info(&card->gdev->dev,
589 "MAC address %pM successfully registered on device %s\n",
590 card->dev->dev_addr, card->dev->name);
591 } else {
568 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; 592 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
569 switch (cmd->hdr.return_code) { 593 switch (rc) {
570 case IPA_RC_L2_DUP_MAC: 594 case -EEXIST:
571 case IPA_RC_L2_DUP_LAYER3_MAC:
572 dev_warn(&card->gdev->dev, 595 dev_warn(&card->gdev->dev,
573 "MAC address %pM already exists\n", 596 "MAC address %pM already exists\n", mac);
574 cmd->data.setdelmac.mac);
575 break; 597 break;
576 case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: 598 case -EPERM:
577 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
578 dev_warn(&card->gdev->dev, 599 dev_warn(&card->gdev->dev,
579 "MAC address %pM is not authorized\n", 600 "MAC address %pM is not authorized\n", mac);
580 cmd->data.setdelmac.mac);
581 break;
582 default:
583 break; 601 break;
584 } 602 }
585 } else {
586 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
587 memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
588 OSA_ADDR_LEN);
589 dev_info(&card->gdev->dev,
590 "MAC address %pM successfully registered on device %s\n",
591 card->dev->dev_addr, card->dev->name);
592 }
593 return 0;
594}
595
596static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
597{
598 QETH_CARD_TEXT(card, 2, "L2Setmac");
599 return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
600 qeth_l2_send_setmac_cb);
601}
602
603static int qeth_l2_send_delmac_cb(struct qeth_card *card,
604 struct qeth_reply *reply,
605 unsigned long data)
606{
607 struct qeth_ipa_cmd *cmd;
608
609 QETH_CARD_TEXT(card, 2, "L2Dmaccb");
610 cmd = (struct qeth_ipa_cmd *) data;
611 if (cmd->hdr.return_code) {
612 QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
613 return 0;
614 } 603 }
615 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; 604 return rc;
616
617 return 0;
618} 605}
619 606
620static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) 607static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
621{ 608{
609 int rc;
610
622 QETH_CARD_TEXT(card, 2, "L2Delmac"); 611 QETH_CARD_TEXT(card, 2, "L2Delmac");
623 if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) 612 if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
624 return 0; 613 return 0;
625 return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC, 614 rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
626 qeth_l2_send_delmac_cb); 615 IPA_CMD_DELVMAC));
616 if (rc == 0)
617 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
618 return rc;
627} 619}
628 620
629static int qeth_l2_request_initial_mac(struct qeth_card *card) 621static int qeth_l2_request_initial_mac(struct qeth_card *card)
@@ -651,7 +643,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
651 if (rc) { 643 if (rc) {
652 QETH_DBF_MESSAGE(2, "couldn't get MAC address on " 644 QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
653 "device %s: x%x\n", CARD_BUS_ID(card), rc); 645 "device %s: x%x\n", CARD_BUS_ID(card), rc);
654 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 646 QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
655 return rc; 647 return rc;
656 } 648 }
657 QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN); 649 QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN);
@@ -687,7 +679,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
687 return -ERESTARTSYS; 679 return -ERESTARTSYS;
688 } 680 }
689 rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); 681 rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
690 if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND)) 682 if (!rc || (rc == -ENOENT))
691 rc = qeth_l2_send_setmac(card, addr->sa_data); 683 rc = qeth_l2_send_setmac(card, addr->sa_data);
692 return rc ? -EINVAL : 0; 684 return rc ? -EINVAL : 0;
693} 685}
@@ -996,7 +988,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
996 recover_flag = card->state; 988 recover_flag = card->state;
997 rc = qeth_core_hardsetup_card(card); 989 rc = qeth_core_hardsetup_card(card);
998 if (rc) { 990 if (rc) {
999 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 991 QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
1000 rc = -ENODEV; 992 rc = -ENODEV;
1001 goto out_remove; 993 goto out_remove;
1002 } 994 }
@@ -1730,6 +1722,8 @@ static void qeth_bridgeport_query_support(struct qeth_card *card)
1730 1722
1731 QETH_CARD_TEXT(card, 2, "brqsuppo"); 1723 QETH_CARD_TEXT(card, 2, "brqsuppo");
1732 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); 1724 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
1725 if (!iob)
1726 return;
1733 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1727 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1734 cmd->data.sbp.hdr.cmdlength = 1728 cmd->data.sbp.hdr.cmdlength =
1735 sizeof(struct qeth_ipacmd_sbp_hdr) + 1729 sizeof(struct qeth_ipacmd_sbp_hdr) +
@@ -1805,6 +1799,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
1805 if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS)) 1799 if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))
1806 return -EOPNOTSUPP; 1800 return -EOPNOTSUPP;
1807 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); 1801 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
1802 if (!iob)
1803 return -ENOMEM;
1808 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1804 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1809 cmd->data.sbp.hdr.cmdlength = 1805 cmd->data.sbp.hdr.cmdlength =
1810 sizeof(struct qeth_ipacmd_sbp_hdr); 1806 sizeof(struct qeth_ipacmd_sbp_hdr);
@@ -1817,9 +1813,7 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
1817 if (rc) 1813 if (rc)
1818 return rc; 1814 return rc;
1819 rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS); 1815 rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
1820 if (rc) 1816 return rc;
1821 return rc;
1822 return 0;
1823} 1817}
1824EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports); 1818EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports);
1825 1819
@@ -1873,6 +1867,8 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
1873 if (!(card->options.sbp.supported_funcs & setcmd)) 1867 if (!(card->options.sbp.supported_funcs & setcmd))
1874 return -EOPNOTSUPP; 1868 return -EOPNOTSUPP;
1875 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); 1869 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
1870 if (!iob)
1871 return -ENOMEM;
1876 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1872 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1877 cmd->data.sbp.hdr.cmdlength = cmdlength; 1873 cmd->data.sbp.hdr.cmdlength = cmdlength;
1878 cmd->data.sbp.hdr.command_code = setcmd; 1874 cmd->data.sbp.hdr.command_code = setcmd;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 625227ad16ee..e2a0ee845399 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -549,6 +549,8 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
549 QETH_CARD_TEXT(card, 4, "setdelmc"); 549 QETH_CARD_TEXT(card, 4, "setdelmc");
550 550
551 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); 551 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
552 if (!iob)
553 return -ENOMEM;
552 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 554 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
553 memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN); 555 memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN);
554 if (addr->proto == QETH_PROT_IPV6) 556 if (addr->proto == QETH_PROT_IPV6)
@@ -588,6 +590,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,
588 QETH_CARD_TEXT_(card, 4, "flags%02X", flags); 590 QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
589 591
590 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); 592 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
593 if (!iob)
594 return -ENOMEM;
591 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 595 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
592 if (addr->proto == QETH_PROT_IPV6) { 596 if (addr->proto == QETH_PROT_IPV6) {
593 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr, 597 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
@@ -616,6 +620,8 @@ static int qeth_l3_send_setrouting(struct qeth_card *card,
616 620
617 QETH_CARD_TEXT(card, 4, "setroutg"); 621 QETH_CARD_TEXT(card, 4, "setroutg");
618 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot); 622 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
623 if (!iob)
624 return -ENOMEM;
619 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 625 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
620 cmd->data.setrtg.type = (type); 626 cmd->data.setrtg.type = (type);
621 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); 627 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
@@ -1049,12 +1055,14 @@ static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd(
1049 QETH_CARD_TEXT(card, 4, "getasscm"); 1055 QETH_CARD_TEXT(card, 4, "getasscm");
1050 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot); 1056 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
1051 1057
1052 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1058 if (iob) {
1053 cmd->data.setassparms.hdr.assist_no = ipa_func; 1059 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1054 cmd->data.setassparms.hdr.length = 8 + len; 1060 cmd->data.setassparms.hdr.assist_no = ipa_func;
1055 cmd->data.setassparms.hdr.command_code = cmd_code; 1061 cmd->data.setassparms.hdr.length = 8 + len;
1056 cmd->data.setassparms.hdr.return_code = 0; 1062 cmd->data.setassparms.hdr.command_code = cmd_code;
1057 cmd->data.setassparms.hdr.seq_no = 0; 1063 cmd->data.setassparms.hdr.return_code = 0;
1064 cmd->data.setassparms.hdr.seq_no = 0;
1065 }
1058 1066
1059 return iob; 1067 return iob;
1060} 1068}
@@ -1090,6 +1098,8 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
1090 QETH_CARD_TEXT(card, 4, "simassp6"); 1098 QETH_CARD_TEXT(card, 4, "simassp6");
1091 iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, 1099 iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
1092 0, QETH_PROT_IPV6); 1100 0, QETH_PROT_IPV6);
1101 if (!iob)
1102 return -ENOMEM;
1093 rc = qeth_l3_send_setassparms(card, iob, 0, 0, 1103 rc = qeth_l3_send_setassparms(card, iob, 0, 0,
1094 qeth_l3_default_setassparms_cb, NULL); 1104 qeth_l3_default_setassparms_cb, NULL);
1095 return rc; 1105 return rc;
@@ -1108,6 +1118,8 @@ static int qeth_l3_send_simple_setassparms(struct qeth_card *card,
1108 length = sizeof(__u32); 1118 length = sizeof(__u32);
1109 iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, 1119 iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
1110 length, QETH_PROT_IPV4); 1120 length, QETH_PROT_IPV4);
1121 if (!iob)
1122 return -ENOMEM;
1111 rc = qeth_l3_send_setassparms(card, iob, length, data, 1123 rc = qeth_l3_send_setassparms(card, iob, length, data,
1112 qeth_l3_default_setassparms_cb, NULL); 1124 qeth_l3_default_setassparms_cb, NULL);
1113 return rc; 1125 return rc;
@@ -1494,6 +1506,8 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
1494 1506
1495 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, 1507 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
1496 QETH_PROT_IPV6); 1508 QETH_PROT_IPV6);
1509 if (!iob)
1510 return -ENOMEM;
1497 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1511 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1498 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = 1512 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
1499 card->info.unique_id; 1513 card->info.unique_id;
@@ -1537,6 +1551,8 @@ static int qeth_l3_get_unique_id(struct qeth_card *card)
1537 1551
1538 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, 1552 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
1539 QETH_PROT_IPV6); 1553 QETH_PROT_IPV6);
1554 if (!iob)
1555 return -ENOMEM;
1540 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1556 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1541 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = 1557 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
1542 card->info.unique_id; 1558 card->info.unique_id;
@@ -1611,6 +1627,8 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
1611 QETH_DBF_TEXT(SETUP, 2, "diagtrac"); 1627 QETH_DBF_TEXT(SETUP, 2, "diagtrac");
1612 1628
1613 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); 1629 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
1630 if (!iob)
1631 return -ENOMEM;
1614 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1632 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1615 cmd->data.diagass.subcmd_len = 16; 1633 cmd->data.diagass.subcmd_len = 16;
1616 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE; 1634 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE;
@@ -2442,6 +2460,8 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
2442 IPA_CMD_ASS_ARP_QUERY_INFO, 2460 IPA_CMD_ASS_ARP_QUERY_INFO,
2443 sizeof(struct qeth_arp_query_data) - sizeof(char), 2461 sizeof(struct qeth_arp_query_data) - sizeof(char),
2444 prot); 2462 prot);
2463 if (!iob)
2464 return -ENOMEM;
2445 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 2465 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
2446 cmd->data.setassparms.data.query_arp.request_bits = 0x000F; 2466 cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
2447 cmd->data.setassparms.data.query_arp.reply_bits = 0; 2467 cmd->data.setassparms.data.query_arp.reply_bits = 0;
@@ -2535,6 +2555,8 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
2535 IPA_CMD_ASS_ARP_ADD_ENTRY, 2555 IPA_CMD_ASS_ARP_ADD_ENTRY,
2536 sizeof(struct qeth_arp_cache_entry), 2556 sizeof(struct qeth_arp_cache_entry),
2537 QETH_PROT_IPV4); 2557 QETH_PROT_IPV4);
2558 if (!iob)
2559 return -ENOMEM;
2538 rc = qeth_l3_send_setassparms(card, iob, 2560 rc = qeth_l3_send_setassparms(card, iob,
2539 sizeof(struct qeth_arp_cache_entry), 2561 sizeof(struct qeth_arp_cache_entry),
2540 (unsigned long) entry, 2562 (unsigned long) entry,
@@ -2574,6 +2596,8 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card,
2574 IPA_CMD_ASS_ARP_REMOVE_ENTRY, 2596 IPA_CMD_ASS_ARP_REMOVE_ENTRY,
2575 12, 2597 12,
2576 QETH_PROT_IPV4); 2598 QETH_PROT_IPV4);
2599 if (!iob)
2600 return -ENOMEM;
2577 rc = qeth_l3_send_setassparms(card, iob, 2601 rc = qeth_l3_send_setassparms(card, iob,
2578 12, (unsigned long)buf, 2602 12, (unsigned long)buf,
2579 qeth_l3_default_setassparms_cb, NULL); 2603 qeth_l3_default_setassparms_cb, NULL);
@@ -3262,6 +3286,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
3262 3286
3263static int qeth_l3_setup_netdev(struct qeth_card *card) 3287static int qeth_l3_setup_netdev(struct qeth_card *card)
3264{ 3288{
3289 int rc;
3290
3265 if (card->info.type == QETH_CARD_TYPE_OSD || 3291 if (card->info.type == QETH_CARD_TYPE_OSD ||
3266 card->info.type == QETH_CARD_TYPE_OSX) { 3292 card->info.type == QETH_CARD_TYPE_OSX) {
3267 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || 3293 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
@@ -3293,7 +3319,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3293 return -ENODEV; 3319 return -ENODEV;
3294 card->dev->flags |= IFF_NOARP; 3320 card->dev->flags |= IFF_NOARP;
3295 card->dev->netdev_ops = &qeth_l3_netdev_ops; 3321 card->dev->netdev_ops = &qeth_l3_netdev_ops;
3296 qeth_l3_iqd_read_initial_mac(card); 3322 rc = qeth_l3_iqd_read_initial_mac(card);
3323 if (rc)
3324 return rc;
3297 if (card->options.hsuid[0]) 3325 if (card->options.hsuid[0])
3298 memcpy(card->dev->perm_addr, card->options.hsuid, 9); 3326 memcpy(card->dev->perm_addr, card->options.hsuid, 9);
3299 } else 3327 } else
@@ -3360,7 +3388,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3360 recover_flag = card->state; 3388 recover_flag = card->state;
3361 rc = qeth_core_hardsetup_card(card); 3389 rc = qeth_core_hardsetup_card(card);
3362 if (rc) { 3390 if (rc) {
3363 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3391 QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
3364 rc = -ENODEV; 3392 rc = -ENODEV;
3365 goto out_remove; 3393 goto out_remove;
3366 } 3394 }
@@ -3401,7 +3429,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3401contin: 3429contin:
3402 rc = qeth_l3_setadapter_parms(card); 3430 rc = qeth_l3_setadapter_parms(card);
3403 if (rc) 3431 if (rc)
3404 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3432 QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
3405 if (!card->options.sniffer) { 3433 if (!card->options.sniffer) {
3406 rc = qeth_l3_start_ipassists(card); 3434 rc = qeth_l3_start_ipassists(card);
3407 if (rc) { 3435 if (rc) {
@@ -3410,10 +3438,10 @@ contin:
3410 } 3438 }
3411 rc = qeth_l3_setrouting_v4(card); 3439 rc = qeth_l3_setrouting_v4(card);
3412 if (rc) 3440 if (rc)
3413 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); 3441 QETH_DBF_TEXT_(SETUP, 2, "4err%04x", rc);
3414 rc = qeth_l3_setrouting_v6(card); 3442 rc = qeth_l3_setrouting_v6(card);
3415 if (rc) 3443 if (rc)
3416 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); 3444 QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc);
3417 } 3445 }
3418 netif_tx_disable(card->dev); 3446 netif_tx_disable(card->dev);
3419 3447
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index df4e27cd996a..9219953ee949 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -683,6 +683,7 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
683 ipr_reinit_ipr_cmnd(ipr_cmd); 683 ipr_reinit_ipr_cmnd(ipr_cmd);
684 ipr_cmd->u.scratch = 0; 684 ipr_cmd->u.scratch = 0;
685 ipr_cmd->sibling = NULL; 685 ipr_cmd->sibling = NULL;
686 ipr_cmd->eh_comp = NULL;
686 ipr_cmd->fast_done = fast_done; 687 ipr_cmd->fast_done = fast_done;
687 init_timer(&ipr_cmd->timer); 688 init_timer(&ipr_cmd->timer);
688} 689}
@@ -848,6 +849,8 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
848 849
849 scsi_dma_unmap(ipr_cmd->scsi_cmd); 850 scsi_dma_unmap(ipr_cmd->scsi_cmd);
850 scsi_cmd->scsi_done(scsi_cmd); 851 scsi_cmd->scsi_done(scsi_cmd);
852 if (ipr_cmd->eh_comp)
853 complete(ipr_cmd->eh_comp);
851 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 854 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
852} 855}
853 856
@@ -4811,6 +4814,84 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
4811 return rc; 4814 return rc;
4812} 4815}
4813 4816
4817/**
4818 * ipr_match_lun - Match function for specified LUN
4819 * @ipr_cmd: ipr command struct
4820 * @device: device to match (sdev)
4821 *
4822 * Returns:
4823 * 1 if command matches sdev / 0 if command does not match sdev
4824 **/
4825static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
4826{
4827 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
4828 return 1;
4829 return 0;
4830}
4831
4832/**
4833 * ipr_wait_for_ops - Wait for matching commands to complete
4834 * @ipr_cmd: ipr command struct
4835 * @device: device to match (sdev)
4836 * @match: match function to use
4837 *
4838 * Returns:
4839 * SUCCESS / FAILED
4840 **/
4841static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
4842 int (*match)(struct ipr_cmnd *, void *))
4843{
4844 struct ipr_cmnd *ipr_cmd;
4845 int wait;
4846 unsigned long flags;
4847 struct ipr_hrr_queue *hrrq;
4848 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
4849 DECLARE_COMPLETION_ONSTACK(comp);
4850
4851 ENTER;
4852 do {
4853 wait = 0;
4854
4855 for_each_hrrq(hrrq, ioa_cfg) {
4856 spin_lock_irqsave(hrrq->lock, flags);
4857 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4858 if (match(ipr_cmd, device)) {
4859 ipr_cmd->eh_comp = &comp;
4860 wait++;
4861 }
4862 }
4863 spin_unlock_irqrestore(hrrq->lock, flags);
4864 }
4865
4866 if (wait) {
4867 timeout = wait_for_completion_timeout(&comp, timeout);
4868
4869 if (!timeout) {
4870 wait = 0;
4871
4872 for_each_hrrq(hrrq, ioa_cfg) {
4873 spin_lock_irqsave(hrrq->lock, flags);
4874 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4875 if (match(ipr_cmd, device)) {
4876 ipr_cmd->eh_comp = NULL;
4877 wait++;
4878 }
4879 }
4880 spin_unlock_irqrestore(hrrq->lock, flags);
4881 }
4882
4883 if (wait)
4884 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
4885 LEAVE;
4886 return wait ? FAILED : SUCCESS;
4887 }
4888 }
4889 } while (wait);
4890
4891 LEAVE;
4892 return SUCCESS;
4893}
4894
4814static int ipr_eh_host_reset(struct scsi_cmnd *cmd) 4895static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4815{ 4896{
4816 struct ipr_ioa_cfg *ioa_cfg; 4897 struct ipr_ioa_cfg *ioa_cfg;
@@ -5030,11 +5111,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5030static int ipr_eh_dev_reset(struct scsi_cmnd *cmd) 5111static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5031{ 5112{
5032 int rc; 5113 int rc;
5114 struct ipr_ioa_cfg *ioa_cfg;
5115
5116 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5033 5117
5034 spin_lock_irq(cmd->device->host->host_lock); 5118 spin_lock_irq(cmd->device->host->host_lock);
5035 rc = __ipr_eh_dev_reset(cmd); 5119 rc = __ipr_eh_dev_reset(cmd);
5036 spin_unlock_irq(cmd->device->host->host_lock); 5120 spin_unlock_irq(cmd->device->host->host_lock);
5037 5121
5122 if (rc == SUCCESS)
5123 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5124
5038 return rc; 5125 return rc;
5039} 5126}
5040 5127
@@ -5234,13 +5321,18 @@ static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5234{ 5321{
5235 unsigned long flags; 5322 unsigned long flags;
5236 int rc; 5323 int rc;
5324 struct ipr_ioa_cfg *ioa_cfg;
5237 5325
5238 ENTER; 5326 ENTER;
5239 5327
5328 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5329
5240 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags); 5330 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5241 rc = ipr_cancel_op(scsi_cmd); 5331 rc = ipr_cancel_op(scsi_cmd);
5242 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags); 5332 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5243 5333
5334 if (rc == SUCCESS)
5335 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5244 LEAVE; 5336 LEAVE;
5245 return rc; 5337 return rc;
5246} 5338}
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index b4f3eec51bc9..ec03b42fa2b9 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1606,6 +1606,7 @@ struct ipr_cmnd {
1606 struct scsi_device *sdev; 1606 struct scsi_device *sdev;
1607 } u; 1607 } u;
1608 1608
1609 struct completion *eh_comp;
1609 struct ipr_hrr_queue *hrrq; 1610 struct ipr_hrr_queue *hrrq;
1610 struct ipr_ioa_cfg *ioa_cfg; 1611 struct ipr_ioa_cfg *ioa_cfg;
1611}; 1612};
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index e02885451425..9b3829931f40 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -986,9 +986,9 @@ int scsi_device_get(struct scsi_device *sdev)
986 return -ENXIO; 986 return -ENXIO;
987 if (!get_device(&sdev->sdev_gendev)) 987 if (!get_device(&sdev->sdev_gendev))
988 return -ENXIO; 988 return -ENXIO;
989 /* We can fail this if we're doing SCSI operations 989 /* We can fail try_module_get if we're doing SCSI operations
990 * from module exit (like cache flush) */ 990 * from module exit (like cache flush) */
991 try_module_get(sdev->host->hostt->module); 991 __module_get(sdev->host->hostt->module);
992 992
993 return 0; 993 return 0;
994} 994}
@@ -1004,14 +1004,7 @@ EXPORT_SYMBOL(scsi_device_get);
1004 */ 1004 */
1005void scsi_device_put(struct scsi_device *sdev) 1005void scsi_device_put(struct scsi_device *sdev)
1006{ 1006{
1007#ifdef CONFIG_MODULE_UNLOAD 1007 module_put(sdev->host->hostt->module);
1008 struct module *module = sdev->host->hostt->module;
1009
1010 /* The module refcount will be zero if scsi_device_get()
1011 * was called from a module removal routine */
1012 if (module && module_refcount(module) != 0)
1013 module_put(module);
1014#endif
1015 put_device(&sdev->sdev_gendev); 1008 put_device(&sdev->sdev_gendev);
1016} 1009}
1017EXPORT_SYMBOL(scsi_device_put); 1010EXPORT_SYMBOL(scsi_device_put);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 7b8b51bc29b4..4aca1b0378c2 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1623,7 +1623,7 @@ resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1623 req_opcode = cmd[3]; 1623 req_opcode = cmd[3];
1624 req_sa = get_unaligned_be16(cmd + 4); 1624 req_sa = get_unaligned_be16(cmd + 4);
1625 alloc_len = get_unaligned_be32(cmd + 6); 1625 alloc_len = get_unaligned_be32(cmd + 6);
1626 if (alloc_len < 4 && alloc_len > 0xffff) { 1626 if (alloc_len < 4 || alloc_len > 0xffff) {
1627 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); 1627 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1628 return check_condition_result; 1628 return check_condition_result;
1629 } 1629 }
@@ -1631,7 +1631,7 @@ resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1631 a_len = 8192; 1631 a_len = 8192;
1632 else 1632 else
1633 a_len = alloc_len; 1633 a_len = alloc_len;
1634 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_KERNEL); 1634 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1635 if (NULL == arr) { 1635 if (NULL == arr) {
1636 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, 1636 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1637 INSUFF_RES_ASCQ); 1637 INSUFF_RES_ASCQ);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 6d5c0b8cb0bb..17bb541f7cc2 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1143,7 +1143,17 @@ int scsi_init_io(struct scsi_cmnd *cmd)
1143 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; 1143 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1144 int ivecs, count; 1144 int ivecs, count;
1145 1145
1146 BUG_ON(prot_sdb == NULL); 1146 if (prot_sdb == NULL) {
1147 /*
1148 * This can happen if someone (e.g. multipath)
1149 * queues a command to a device on an adapter
1150 * that does not support DIX.
1151 */
1152 WARN_ON_ONCE(1);
1153 error = BLKPREP_KILL;
1154 goto err_exit;
1155 }
1156
1147 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); 1157 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1148 1158
1149 if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) { 1159 if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) {
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 7281316a5ecb..a67d37c7e3c0 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -271,7 +271,6 @@ int dw_spi_mid_init(struct dw_spi *dws)
271 iounmap(clk_reg); 271 iounmap(clk_reg);
272 272
273 dws->num_cs = 16; 273 dws->num_cs = 16;
274 dws->fifo_len = 40; /* FIFO has 40 words buffer */
275 274
276#ifdef CONFIG_SPI_DW_MID_DMA 275#ifdef CONFIG_SPI_DW_MID_DMA
277 dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); 276 dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index d0d5542efc06..8edcd1b84562 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -621,13 +621,13 @@ static void spi_hw_init(struct dw_spi *dws)
621 if (!dws->fifo_len) { 621 if (!dws->fifo_len) {
622 u32 fifo; 622 u32 fifo;
623 623
624 for (fifo = 2; fifo <= 257; fifo++) { 624 for (fifo = 2; fifo <= 256; fifo++) {
625 dw_writew(dws, DW_SPI_TXFLTR, fifo); 625 dw_writew(dws, DW_SPI_TXFLTR, fifo);
626 if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) 626 if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
627 break; 627 break;
628 } 628 }
629 629
630 dws->fifo_len = (fifo == 257) ? 0 : fifo; 630 dws->fifo_len = (fifo == 2) ? 0 : fifo - 1;
631 dw_writew(dws, DW_SPI_TXFLTR, 0); 631 dw_writew(dws, DW_SPI_TXFLTR, 0);
632 } 632 }
633} 633}
@@ -673,7 +673,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
673 if (dws->dma_ops && dws->dma_ops->dma_init) { 673 if (dws->dma_ops && dws->dma_ops->dma_init) {
674 ret = dws->dma_ops->dma_init(dws); 674 ret = dws->dma_ops->dma_init(dws);
675 if (ret) { 675 if (ret) {
676 dev_warn(&master->dev, "DMA init failed\n"); 676 dev_warn(dev, "DMA init failed\n");
677 dws->dma_inited = 0; 677 dws->dma_inited = 0;
678 } 678 }
679 } 679 }
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 05c623cfb078..23822e7df6c1 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -546,8 +546,8 @@ static void giveback(struct driver_data *drv_data)
546 cs_deassert(drv_data); 546 cs_deassert(drv_data);
547 } 547 }
548 548
549 spi_finalize_current_message(drv_data->master);
550 drv_data->cur_chip = NULL; 549 drv_data->cur_chip = NULL;
550 spi_finalize_current_message(drv_data->master);
551} 551}
552 552
553static void reset_sccr1(struct driver_data *drv_data) 553static void reset_sccr1(struct driver_data *drv_data)
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 96a5fc0878d8..3ab7a21445fc 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -82,7 +82,7 @@ struct sh_msiof_spi_priv {
82#define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */ 82#define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */
83#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */ 83#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
84#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */ 84#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
85#define MDR1_FLD_MASK 0x000000c0 /* Frame Sync Signal Interval (0-3) */ 85#define MDR1_FLD_MASK 0x0000000c /* Frame Sync Signal Interval (0-3) */
86#define MDR1_FLD_SHIFT 2 86#define MDR1_FLD_SHIFT 2
87#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */ 87#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */
88/* TMDR1 */ 88/* TMDR1 */
diff --git a/drivers/staging/media/tlg2300/Kconfig b/drivers/staging/media/tlg2300/Kconfig
index 81784c6f7b88..77d8753f6ba4 100644
--- a/drivers/staging/media/tlg2300/Kconfig
+++ b/drivers/staging/media/tlg2300/Kconfig
@@ -1,6 +1,7 @@
1config VIDEO_TLG2300 1config VIDEO_TLG2300
2 tristate "Telegent TLG2300 USB video capture support (Deprecated)" 2 tristate "Telegent TLG2300 USB video capture support (Deprecated)"
3 depends on VIDEO_DEV && I2C && SND && DVB_CORE 3 depends on VIDEO_DEV && I2C && SND && DVB_CORE
4 depends on MEDIA_USB_SUPPORT
4 select VIDEO_TUNER 5 select VIDEO_TUNER
5 select VIDEO_TVEEPROM 6 select VIDEO_TVEEPROM
6 depends on RC_CORE 7 depends on RC_CORE
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c
index 5927c0a98a74..bcfd2a22208f 100644
--- a/drivers/watchdog/cadence_wdt.c
+++ b/drivers/watchdog/cadence_wdt.c
@@ -503,7 +503,6 @@ static struct platform_driver cdns_wdt_driver = {
503 .shutdown = cdns_wdt_shutdown, 503 .shutdown = cdns_wdt_shutdown,
504 .driver = { 504 .driver = {
505 .name = "cdns-wdt", 505 .name = "cdns-wdt",
506 .owner = THIS_MODULE,
507 .of_match_table = cdns_wdt_of_match, 506 .of_match_table = cdns_wdt_of_match,
508 .pm = &cdns_wdt_pm_ops, 507 .pm = &cdns_wdt_pm_ops,
509 }, 508 },
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index d6add516a7a7..5142bbabe027 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -52,6 +52,8 @@
52#define IMX2_WDT_WRSR 0x04 /* Reset Status Register */ 52#define IMX2_WDT_WRSR 0x04 /* Reset Status Register */
53#define IMX2_WDT_WRSR_TOUT (1 << 1) /* -> Reset due to Timeout */ 53#define IMX2_WDT_WRSR_TOUT (1 << 1) /* -> Reset due to Timeout */
54 54
55#define IMX2_WDT_WMCR 0x08 /* Misc Register */
56
55#define IMX2_WDT_MAX_TIME 128 57#define IMX2_WDT_MAX_TIME 128
56#define IMX2_WDT_DEFAULT_TIME 60 /* in seconds */ 58#define IMX2_WDT_DEFAULT_TIME 60 /* in seconds */
57 59
@@ -274,6 +276,13 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
274 276
275 imx2_wdt_ping_if_active(wdog); 277 imx2_wdt_ping_if_active(wdog);
276 278
279 /*
280 * Disable the watchdog power down counter at boot. Otherwise the power
281 * down counter will pull down the #WDOG interrupt line for one clock
282 * cycle.
283 */
284 regmap_write(wdev->regmap, IMX2_WDT_WMCR, 0);
285
277 ret = watchdog_register_device(wdog); 286 ret = watchdog_register_device(wdog);
278 if (ret) { 287 if (ret) {
279 dev_err(&pdev->dev, "cannot register watchdog device\n"); 288 dev_err(&pdev->dev, "cannot register watchdog device\n");
@@ -327,18 +336,21 @@ static void imx2_wdt_shutdown(struct platform_device *pdev)
327} 336}
328 337
329#ifdef CONFIG_PM_SLEEP 338#ifdef CONFIG_PM_SLEEP
330/* Disable watchdog if it is active during suspend */ 339/* Disable watchdog if it is active or non-active but still running */
331static int imx2_wdt_suspend(struct device *dev) 340static int imx2_wdt_suspend(struct device *dev)
332{ 341{
333 struct watchdog_device *wdog = dev_get_drvdata(dev); 342 struct watchdog_device *wdog = dev_get_drvdata(dev);
334 struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog); 343 struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
335 344
336 imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME); 345 /* The watchdog IP block is running */
337 imx2_wdt_ping(wdog); 346 if (imx2_wdt_is_running(wdev)) {
347 imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
348 imx2_wdt_ping(wdog);
338 349
339 /* Watchdog has been stopped but IP block is still running */ 350 /* The watchdog is not active */
340 if (!watchdog_active(wdog) && imx2_wdt_is_running(wdev)) 351 if (!watchdog_active(wdog))
341 del_timer_sync(&wdev->timer); 352 del_timer_sync(&wdev->timer);
353 }
342 354
343 clk_disable_unprepare(wdev->clk); 355 clk_disable_unprepare(wdev->clk);
344 356
@@ -354,15 +366,25 @@ static int imx2_wdt_resume(struct device *dev)
354 clk_prepare_enable(wdev->clk); 366 clk_prepare_enable(wdev->clk);
355 367
356 if (watchdog_active(wdog) && !imx2_wdt_is_running(wdev)) { 368 if (watchdog_active(wdog) && !imx2_wdt_is_running(wdev)) {
357 /* Resumes from deep sleep we need restart 369 /*
358 * the watchdog again. 370 * If the watchdog is still active and resumes
371 * from deep sleep state, need to restart the
372 * watchdog again.
359 */ 373 */
360 imx2_wdt_setup(wdog); 374 imx2_wdt_setup(wdog);
361 imx2_wdt_set_timeout(wdog, wdog->timeout); 375 imx2_wdt_set_timeout(wdog, wdog->timeout);
362 imx2_wdt_ping(wdog); 376 imx2_wdt_ping(wdog);
363 } else if (imx2_wdt_is_running(wdev)) { 377 } else if (imx2_wdt_is_running(wdev)) {
378 /* Resuming from non-deep sleep state. */
379 imx2_wdt_set_timeout(wdog, wdog->timeout);
364 imx2_wdt_ping(wdog); 380 imx2_wdt_ping(wdog);
365 mod_timer(&wdev->timer, jiffies + wdog->timeout * HZ / 2); 381 /*
382 * But the watchdog is not active, then start
383 * the timer again.
384 */
385 if (!watchdog_active(wdog))
386 mod_timer(&wdev->timer,
387 jiffies + wdog->timeout * HZ / 2);
366 } 388 }
367 389
368 return 0; 390 return 0;
diff --git a/drivers/watchdog/meson_wdt.c b/drivers/watchdog/meson_wdt.c
index ef6a298e8c45..1f4155ee3404 100644
--- a/drivers/watchdog/meson_wdt.c
+++ b/drivers/watchdog/meson_wdt.c
@@ -215,7 +215,6 @@ static struct platform_driver meson_wdt_driver = {
215 .remove = meson_wdt_remove, 215 .remove = meson_wdt_remove,
216 .shutdown = meson_wdt_shutdown, 216 .shutdown = meson_wdt_shutdown,
217 .driver = { 217 .driver = {
218 .owner = THIS_MODULE,
219 .name = DRV_NAME, 218 .name = DRV_NAME,
220 .of_match_table = meson_wdt_dt_ids, 219 .of_match_table = meson_wdt_dt_ids,
221 }, 220 },
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 7e607416755a..0b180708bf79 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1171,6 +1171,7 @@ struct btrfs_space_info {
1171 struct percpu_counter total_bytes_pinned; 1171 struct percpu_counter total_bytes_pinned;
1172 1172
1173 struct list_head list; 1173 struct list_head list;
1174 /* Protected by the spinlock 'lock'. */
1174 struct list_head ro_bgs; 1175 struct list_head ro_bgs;
1175 1176
1176 struct rw_semaphore groups_sem; 1177 struct rw_semaphore groups_sem;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 15116585e714..a684086c3c81 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -9422,7 +9422,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9422 * are still on the list after taking the semaphore 9422 * are still on the list after taking the semaphore
9423 */ 9423 */
9424 list_del_init(&block_group->list); 9424 list_del_init(&block_group->list);
9425 list_del_init(&block_group->ro_list);
9426 if (list_empty(&block_group->space_info->block_groups[index])) { 9425 if (list_empty(&block_group->space_info->block_groups[index])) {
9427 kobj = block_group->space_info->block_group_kobjs[index]; 9426 kobj = block_group->space_info->block_group_kobjs[index];
9428 block_group->space_info->block_group_kobjs[index] = NULL; 9427 block_group->space_info->block_group_kobjs[index] = NULL;
@@ -9464,6 +9463,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9464 btrfs_remove_free_space_cache(block_group); 9463 btrfs_remove_free_space_cache(block_group);
9465 9464
9466 spin_lock(&block_group->space_info->lock); 9465 spin_lock(&block_group->space_info->lock);
9466 list_del_init(&block_group->ro_list);
9467 block_group->space_info->total_bytes -= block_group->key.offset; 9467 block_group->space_info->total_bytes -= block_group->key.offset;
9468 block_group->space_info->bytes_readonly -= block_group->key.offset; 9468 block_group->space_info->bytes_readonly -= block_group->key.offset;
9469 block_group->space_info->disk_total -= block_group->key.offset * factor; 9469 block_group->space_info->disk_total -= block_group->key.offset * factor;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 4ebabd237153..790dbae3343c 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2190,7 +2190,7 @@ void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end)
2190 2190
2191 next = next_state(state); 2191 next = next_state(state);
2192 2192
2193 failrec = (struct io_failure_record *)state->private; 2193 failrec = (struct io_failure_record *)(unsigned long)state->private;
2194 free_extent_state(state); 2194 free_extent_state(state);
2195 kfree(failrec); 2195 kfree(failrec);
2196 2196
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 9e1569ffbf6e..2f0fbc374e87 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3053,7 +3053,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3053 3053
3054 ppath = btrfs_alloc_path(); 3054 ppath = btrfs_alloc_path();
3055 if (!ppath) { 3055 if (!ppath) {
3056 btrfs_free_path(ppath); 3056 btrfs_free_path(path);
3057 return -ENOMEM; 3057 return -ENOMEM;
3058 } 3058 }
3059 3059
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 60f7cbe815e9..6f49b2872a64 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1000,10 +1000,20 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
1000 */ 1000 */
1001 if (fs_info->pending_changes == 0) 1001 if (fs_info->pending_changes == 0)
1002 return 0; 1002 return 0;
1003 /*
1004 * A non-blocking test if the fs is frozen. We must not
1005 * start a new transaction here otherwise a deadlock
1006 * happens. The pending operations are delayed to the
1007 * next commit after thawing.
1008 */
1009 if (__sb_start_write(sb, SB_FREEZE_WRITE, false))
1010 __sb_end_write(sb, SB_FREEZE_WRITE);
1011 else
1012 return 0;
1003 trans = btrfs_start_transaction(root, 0); 1013 trans = btrfs_start_transaction(root, 0);
1004 } else {
1005 return PTR_ERR(trans);
1006 } 1014 }
1015 if (IS_ERR(trans))
1016 return PTR_ERR(trans);
1007 } 1017 }
1008 return btrfs_commit_transaction(trans, root); 1018 return btrfs_commit_transaction(trans, root);
1009} 1019}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index a605d4e2f2bc..e88b59d13439 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -2118,7 +2118,7 @@ void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
2118 unsigned long prev; 2118 unsigned long prev;
2119 unsigned long bit; 2119 unsigned long bit;
2120 2120
2121 prev = cmpxchg(&fs_info->pending_changes, 0, 0); 2121 prev = xchg(&fs_info->pending_changes, 0);
2122 if (!prev) 2122 if (!prev)
2123 return; 2123 return;
2124 2124
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 45cb59bcc791..8b7898b7670f 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -86,21 +86,16 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
86 } 86 }
87 87
88 src_inode = file_inode(src_file.file); 88 src_inode = file_inode(src_file.file);
89 rc = -EINVAL;
90 if (S_ISDIR(src_inode->i_mode))
91 goto out_fput;
89 92
90 /* 93 /*
91 * Note: cifs case is easier than btrfs since server responsible for 94 * Note: cifs case is easier than btrfs since server responsible for
92 * checks for proper open modes and file type and if it wants 95 * checks for proper open modes and file type and if it wants
93 * server could even support copy of range where source = target 96 * server could even support copy of range where source = target
94 */ 97 */
95 98 lock_two_nondirectories(target_inode, src_inode);
96 /* so we do not deadlock racing two ioctls on same files */
97 if (target_inode < src_inode) {
98 mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_PARENT);
99 mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_CHILD);
100 } else {
101 mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_PARENT);
102 mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_CHILD);
103 }
104 99
105 /* determine range to clone */ 100 /* determine range to clone */
106 rc = -EINVAL; 101 rc = -EINVAL;
@@ -124,13 +119,7 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
124out_unlock: 119out_unlock:
125 /* although unlocking in the reverse order from locking is not 120 /* although unlocking in the reverse order from locking is not
126 strictly necessary here it is a little cleaner to be consistent */ 121 strictly necessary here it is a little cleaner to be consistent */
127 if (target_inode < src_inode) { 122 unlock_two_nondirectories(src_inode, target_inode);
128 mutex_unlock(&src_inode->i_mutex);
129 mutex_unlock(&target_inode->i_mutex);
130 } else {
131 mutex_unlock(&target_inode->i_mutex);
132 mutex_unlock(&src_inode->i_mutex);
133 }
134out_fput: 123out_fput:
135 fdput(src_file); 124 fdput(src_file);
136out_drop_write: 125out_drop_write:
diff --git a/include/dt-bindings/interrupt-controller/arm-gic.h b/include/dt-bindings/interrupt-controller/arm-gic.h
index 1ea1b702fec2..d4110d5caa3e 100644
--- a/include/dt-bindings/interrupt-controller/arm-gic.h
+++ b/include/dt-bindings/interrupt-controller/arm-gic.h
@@ -7,14 +7,14 @@
7 7
8#include <dt-bindings/interrupt-controller/irq.h> 8#include <dt-bindings/interrupt-controller/irq.h>
9 9
10/* interrupt specific cell 0 */ 10/* interrupt specifier cell 0 */
11 11
12#define GIC_SPI 0 12#define GIC_SPI 0
13#define GIC_PPI 1 13#define GIC_PPI 1
14 14
15/* 15/*
16 * Interrupt specifier cell 2. 16 * Interrupt specifier cell 2.
17 * The flaggs in irq.h are valid, plus those below. 17 * The flags in irq.h are valid, plus those below.
18 */ 18 */
19#define GIC_CPU_MASK_RAW(x) ((x) << 8) 19#define GIC_CPU_MASK_RAW(x) ((x) << 8)
20#define GIC_CPU_MASK_SIMPLE(num) GIC_CPU_MASK_RAW((1 << (num)) - 1) 20#define GIC_CPU_MASK_SIMPLE(num) GIC_CPU_MASK_RAW((1 << (num)) - 1)
diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h
index ce5dda8958fe..b1fd675fa36f 100644
--- a/include/linux/mfd/samsung/s2mps13.h
+++ b/include/linux/mfd/samsung/s2mps13.h
@@ -59,6 +59,7 @@ enum s2mps13_reg {
59 S2MPS13_REG_B6CTRL, 59 S2MPS13_REG_B6CTRL,
60 S2MPS13_REG_B6OUT, 60 S2MPS13_REG_B6OUT,
61 S2MPS13_REG_B7CTRL, 61 S2MPS13_REG_B7CTRL,
62 S2MPS13_REG_B7SW,
62 S2MPS13_REG_B7OUT, 63 S2MPS13_REG_B7OUT,
63 S2MPS13_REG_B8CTRL, 64 S2MPS13_REG_B8CTRL,
64 S2MPS13_REG_B8OUT, 65 S2MPS13_REG_B8OUT,
@@ -102,6 +103,7 @@ enum s2mps13_reg {
102 S2MPS13_REG_L26CTRL, 103 S2MPS13_REG_L26CTRL,
103 S2MPS13_REG_L27CTRL, 104 S2MPS13_REG_L27CTRL,
104 S2MPS13_REG_L28CTRL, 105 S2MPS13_REG_L28CTRL,
106 S2MPS13_REG_L29CTRL,
105 S2MPS13_REG_L30CTRL, 107 S2MPS13_REG_L30CTRL,
106 S2MPS13_REG_L31CTRL, 108 S2MPS13_REG_L31CTRL,
107 S2MPS13_REG_L32CTRL, 109 S2MPS13_REG_L32CTRL,
diff --git a/include/linux/module.h b/include/linux/module.h
index ebfb0e153c6a..b653d7c0a05a 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -444,7 +444,7 @@ extern void __module_put_and_exit(struct module *mod, long code)
444#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code) 444#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code)
445 445
446#ifdef CONFIG_MODULE_UNLOAD 446#ifdef CONFIG_MODULE_UNLOAD
447unsigned long module_refcount(struct module *mod); 447int module_refcount(struct module *mod);
448void __symbol_put(const char *symbol); 448void __symbol_put(const char *symbol);
449#define symbol_put(x) __symbol_put(VMLINUX_SYMBOL_STR(x)) 449#define symbol_put(x) __symbol_put(VMLINUX_SYMBOL_STR(x))
450void symbol_put_addr(void *addr); 450void symbol_put_addr(void *addr);
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
index 7eeb9bbfb816..f7556261fe3c 100644
--- a/include/linux/moduleloader.h
+++ b/include/linux/moduleloader.h
@@ -26,7 +26,7 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
26void *module_alloc(unsigned long size); 26void *module_alloc(unsigned long size);
27 27
28/* Free memory returned from module_alloc. */ 28/* Free memory returned from module_alloc. */
29void module_free(struct module *mod, void *module_region); 29void module_memfree(void *module_region);
30 30
31/* 31/*
32 * Apply the given relocation to the (simplified) ELF. Return -error 32 * Apply the given relocation to the (simplified) ELF. Return -error
@@ -82,4 +82,6 @@ int module_finalize(const Elf_Ehdr *hdr,
82/* Any cleanup needed when module leaves. */ 82/* Any cleanup needed when module leaves. */
83void module_arch_cleanup(struct module *mod); 83void module_arch_cleanup(struct module *mod);
84 84
85/* Any cleanup before freeing mod->module_init */
86void module_arch_freeing_init(struct module *mod);
85#endif 87#endif
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 853698c721f7..76200984d1e2 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -85,11 +85,6 @@ static inline void oom_killer_enable(void)
85 oom_killer_disabled = false; 85 oom_killer_disabled = false;
86} 86}
87 87
88static inline bool oom_gfp_allowed(gfp_t gfp_mask)
89{
90 return (gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY);
91}
92
93extern struct task_struct *find_lock_task_mm(struct task_struct *p); 88extern struct task_struct *find_lock_task_mm(struct task_struct *p);
94 89
95static inline bool task_will_free_mem(struct task_struct *task) 90static inline bool task_will_free_mem(struct task_struct *task)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 360a966a97a5..9603094ed59b 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -175,6 +175,8 @@ enum pci_dev_flags {
175 PCI_DEV_FLAGS_DMA_ALIAS_DEVFN = (__force pci_dev_flags_t) (1 << 4), 175 PCI_DEV_FLAGS_DMA_ALIAS_DEVFN = (__force pci_dev_flags_t) (1 << 4),
176 /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */ 176 /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
177 PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5), 177 PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
178 /* Do not use bus resets for device */
179 PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
178}; 180};
179 181
180enum pci_irq_reroute_variant { 182enum pci_irq_reroute_variant {
@@ -1065,6 +1067,7 @@ resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
1065void pci_bus_assign_resources(const struct pci_bus *bus); 1067void pci_bus_assign_resources(const struct pci_bus *bus);
1066void pci_bus_size_bridges(struct pci_bus *bus); 1068void pci_bus_size_bridges(struct pci_bus *bus);
1067int pci_claim_resource(struct pci_dev *, int); 1069int pci_claim_resource(struct pci_dev *, int);
1070int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
1068void pci_assign_unassigned_resources(void); 1071void pci_assign_unassigned_resources(void);
1069void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge); 1072void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
1070void pci_assign_unassigned_bus_resources(struct pci_bus *bus); 1073void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
diff --git a/include/linux/printk.h b/include/linux/printk.h
index c8f170324e64..4d5bf5726578 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -10,9 +10,6 @@
10extern const char linux_banner[]; 10extern const char linux_banner[];
11extern const char linux_proc_banner[]; 11extern const char linux_proc_banner[];
12 12
13extern char *log_buf_addr_get(void);
14extern u32 log_buf_len_get(void);
15
16static inline int printk_get_level(const char *buffer) 13static inline int printk_get_level(const char *buffer)
17{ 14{
18 if (buffer[0] == KERN_SOH_ASCII && buffer[1]) { 15 if (buffer[0] == KERN_SOH_ASCII && buffer[1]) {
@@ -163,6 +160,8 @@ extern int kptr_restrict;
163 160
164extern void wake_up_klogd(void); 161extern void wake_up_klogd(void);
165 162
163char *log_buf_addr_get(void);
164u32 log_buf_len_get(void);
166void log_buf_kexec_setup(void); 165void log_buf_kexec_setup(void);
167void __init setup_log_buf(int early); 166void __init setup_log_buf(int early);
168void dump_stack_set_arch_desc(const char *fmt, ...); 167void dump_stack_set_arch_desc(const char *fmt, ...);
@@ -198,6 +197,16 @@ static inline void wake_up_klogd(void)
198{ 197{
199} 198}
200 199
200static inline char *log_buf_addr_get(void)
201{
202 return NULL;
203}
204
205static inline u32 log_buf_len_get(void)
206{
207 return 0;
208}
209
201static inline void log_buf_kexec_setup(void) 210static inline void log_buf_kexec_setup(void)
202{ 211{
203} 212}
diff --git a/include/linux/time.h b/include/linux/time.h
index 203c2ad40d71..beebe3a02d43 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -110,6 +110,19 @@ static inline bool timespec_valid_strict(const struct timespec *ts)
110 return true; 110 return true;
111} 111}
112 112
113static inline bool timeval_valid(const struct timeval *tv)
114{
115 /* Dates before 1970 are bogus */
116 if (tv->tv_sec < 0)
117 return false;
118
119 /* Can't have more microseconds then a second */
120 if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
121 return false;
122
123 return true;
124}
125
113extern struct timespec timespec_trunc(struct timespec t, unsigned gran); 126extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
114 127
115#define CURRENT_TIME (current_kernel_time()) 128#define CURRENT_TIME (current_kernel_time())
diff --git a/include/net/ip.h b/include/net/ip.h
index 0bb620702929..f7cbd703d15d 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -39,11 +39,12 @@ struct inet_skb_parm {
39 struct ip_options opt; /* Compiled IP options */ 39 struct ip_options opt; /* Compiled IP options */
40 unsigned char flags; 40 unsigned char flags;
41 41
42#define IPSKB_FORWARDED 1 42#define IPSKB_FORWARDED BIT(0)
43#define IPSKB_XFRM_TUNNEL_SIZE 2 43#define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
44#define IPSKB_XFRM_TRANSFORMED 4 44#define IPSKB_XFRM_TRANSFORMED BIT(2)
45#define IPSKB_FRAG_COMPLETE 8 45#define IPSKB_FRAG_COMPLETE BIT(3)
46#define IPSKB_REROUTED 16 46#define IPSKB_REROUTED BIT(4)
47#define IPSKB_DOREDIRECT BIT(5)
47 48
48 u16 frag_max_size; 49 u16 frag_max_size;
49}; 50};
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 6edf1f2028cd..86b399c66c3d 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -146,6 +146,14 @@ TRACE_EVENT(kvm_msi_set_irq,
146 146
147#if defined(CONFIG_HAVE_KVM_IRQFD) 147#if defined(CONFIG_HAVE_KVM_IRQFD)
148 148
149#ifdef kvm_irqchips
150#define kvm_ack_irq_string "irqchip %s pin %u"
151#define kvm_ack_irq_parm __print_symbolic(__entry->irqchip, kvm_irqchips), __entry->pin
152#else
153#define kvm_ack_irq_string "irqchip %d pin %u"
154#define kvm_ack_irq_parm __entry->irqchip, __entry->pin
155#endif
156
149TRACE_EVENT(kvm_ack_irq, 157TRACE_EVENT(kvm_ack_irq,
150 TP_PROTO(unsigned int irqchip, unsigned int pin), 158 TP_PROTO(unsigned int irqchip, unsigned int pin),
151 TP_ARGS(irqchip, pin), 159 TP_ARGS(irqchip, pin),
@@ -160,13 +168,7 @@ TRACE_EVENT(kvm_ack_irq,
160 __entry->pin = pin; 168 __entry->pin = pin;
161 ), 169 ),
162 170
163#ifdef kvm_irqchips 171 TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
164 TP_printk("irqchip %s pin %u",
165 __print_symbolic(__entry->irqchip, kvm_irqchips),
166 __entry->pin)
167#else
168 TP_printk("irqchip %d pin %u", __entry->irqchip, __entry->pin)
169#endif
170); 172);
171 173
172#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */ 174#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index d6594e457a25..a64e7a207d2b 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -163,7 +163,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
163 163
164void bpf_jit_binary_free(struct bpf_binary_header *hdr) 164void bpf_jit_binary_free(struct bpf_binary_header *hdr)
165{ 165{
166 module_free(NULL, hdr); 166 module_memfree(hdr);
167} 167}
168#endif /* CONFIG_BPF_JIT */ 168#endif /* CONFIG_BPF_JIT */
169 169
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 088ac0b1b106..536edc2be307 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -150,7 +150,7 @@ static int map_lookup_elem(union bpf_attr *attr)
150 int ufd = attr->map_fd; 150 int ufd = attr->map_fd;
151 struct fd f = fdget(ufd); 151 struct fd f = fdget(ufd);
152 struct bpf_map *map; 152 struct bpf_map *map;
153 void *key, *value; 153 void *key, *value, *ptr;
154 int err; 154 int err;
155 155
156 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) 156 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
@@ -169,20 +169,29 @@ static int map_lookup_elem(union bpf_attr *attr)
169 if (copy_from_user(key, ukey, map->key_size) != 0) 169 if (copy_from_user(key, ukey, map->key_size) != 0)
170 goto free_key; 170 goto free_key;
171 171
172 err = -ENOENT; 172 err = -ENOMEM;
173 rcu_read_lock(); 173 value = kmalloc(map->value_size, GFP_USER);
174 value = map->ops->map_lookup_elem(map, key);
175 if (!value) 174 if (!value)
176 goto err_unlock; 175 goto free_key;
176
177 rcu_read_lock();
178 ptr = map->ops->map_lookup_elem(map, key);
179 if (ptr)
180 memcpy(value, ptr, map->value_size);
181 rcu_read_unlock();
182
183 err = -ENOENT;
184 if (!ptr)
185 goto free_value;
177 186
178 err = -EFAULT; 187 err = -EFAULT;
179 if (copy_to_user(uvalue, value, map->value_size) != 0) 188 if (copy_to_user(uvalue, value, map->value_size) != 0)
180 goto err_unlock; 189 goto free_value;
181 190
182 err = 0; 191 err = 0;
183 192
184err_unlock: 193free_value:
185 rcu_read_unlock(); 194 kfree(value);
186free_key: 195free_key:
187 kfree(key); 196 kfree(key);
188err_put: 197err_put:
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index bb263d0caab3..04cfe8ace520 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1909,7 +1909,7 @@ static void cgroup_kill_sb(struct super_block *sb)
1909 * 1909 *
1910 * And don't kill the default root. 1910 * And don't kill the default root.
1911 */ 1911 */
1912 if (css_has_online_children(&root->cgrp.self) || 1912 if (!list_empty(&root->cgrp.self.children) ||
1913 root == &cgrp_dfl_root) 1913 root == &cgrp_dfl_root)
1914 cgroup_put(&root->cgrp); 1914 cgroup_put(&root->cgrp);
1915 else 1915 else
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index f191bddf64b8..7b40c5f07dce 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -2023,7 +2023,7 @@ static int kdb_lsmod(int argc, const char **argv)
2023 kdb_printf("%-20s%8u 0x%p ", mod->name, 2023 kdb_printf("%-20s%8u 0x%p ", mod->name,
2024 mod->core_size, (void *)mod); 2024 mod->core_size, (void *)mod);
2025#ifdef CONFIG_MODULE_UNLOAD 2025#ifdef CONFIG_MODULE_UNLOAD
2026 kdb_printf("%4ld ", module_refcount(mod)); 2026 kdb_printf("%4d ", module_refcount(mod));
2027#endif 2027#endif
2028 if (mod->state == MODULE_STATE_GOING) 2028 if (mod->state == MODULE_STATE_GOING)
2029 kdb_printf(" (Unloading)"); 2029 kdb_printf(" (Unloading)");
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 06f58309fed2..ee619929cf90 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -127,7 +127,7 @@ static void *alloc_insn_page(void)
127 127
128static void free_insn_page(void *page) 128static void free_insn_page(void *page)
129{ 129{
130 module_free(NULL, page); 130 module_memfree(page);
131} 131}
132 132
133struct kprobe_insn_cache kprobe_insn_slots = { 133struct kprobe_insn_cache kprobe_insn_slots = {
diff --git a/kernel/module.c b/kernel/module.c
index 3965511ae133..d856e96a3cce 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -772,9 +772,18 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
772 return 0; 772 return 0;
773} 773}
774 774
775unsigned long module_refcount(struct module *mod) 775/**
776 * module_refcount - return the refcount or -1 if unloading
777 *
778 * @mod: the module we're checking
779 *
780 * Returns:
781 * -1 if the module is in the process of unloading
782 * otherwise the number of references in the kernel to the module
783 */
784int module_refcount(struct module *mod)
776{ 785{
777 return (unsigned long)atomic_read(&mod->refcnt) - MODULE_REF_BASE; 786 return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
778} 787}
779EXPORT_SYMBOL(module_refcount); 788EXPORT_SYMBOL(module_refcount);
780 789
@@ -856,7 +865,7 @@ static inline void print_unload_info(struct seq_file *m, struct module *mod)
856 struct module_use *use; 865 struct module_use *use;
857 int printed_something = 0; 866 int printed_something = 0;
858 867
859 seq_printf(m, " %lu ", module_refcount(mod)); 868 seq_printf(m, " %i ", module_refcount(mod));
860 869
861 /* 870 /*
862 * Always include a trailing , so userspace can differentiate 871 * Always include a trailing , so userspace can differentiate
@@ -908,7 +917,7 @@ EXPORT_SYMBOL_GPL(symbol_put_addr);
908static ssize_t show_refcnt(struct module_attribute *mattr, 917static ssize_t show_refcnt(struct module_attribute *mattr,
909 struct module_kobject *mk, char *buffer) 918 struct module_kobject *mk, char *buffer)
910{ 919{
911 return sprintf(buffer, "%lu\n", module_refcount(mk->mod)); 920 return sprintf(buffer, "%i\n", module_refcount(mk->mod));
912} 921}
913 922
914static struct module_attribute modinfo_refcnt = 923static struct module_attribute modinfo_refcnt =
@@ -1795,7 +1804,7 @@ static void unset_module_core_ro_nx(struct module *mod) { }
1795static void unset_module_init_ro_nx(struct module *mod) { } 1804static void unset_module_init_ro_nx(struct module *mod) { }
1796#endif 1805#endif
1797 1806
1798void __weak module_free(struct module *mod, void *module_region) 1807void __weak module_memfree(void *module_region)
1799{ 1808{
1800 vfree(module_region); 1809 vfree(module_region);
1801} 1810}
@@ -1804,6 +1813,10 @@ void __weak module_arch_cleanup(struct module *mod)
1804{ 1813{
1805} 1814}
1806 1815
1816void __weak module_arch_freeing_init(struct module *mod)
1817{
1818}
1819
1807/* Free a module, remove from lists, etc. */ 1820/* Free a module, remove from lists, etc. */
1808static void free_module(struct module *mod) 1821static void free_module(struct module *mod)
1809{ 1822{
@@ -1841,7 +1854,8 @@ static void free_module(struct module *mod)
1841 1854
1842 /* This may be NULL, but that's OK */ 1855 /* This may be NULL, but that's OK */
1843 unset_module_init_ro_nx(mod); 1856 unset_module_init_ro_nx(mod);
1844 module_free(mod, mod->module_init); 1857 module_arch_freeing_init(mod);
1858 module_memfree(mod->module_init);
1845 kfree(mod->args); 1859 kfree(mod->args);
1846 percpu_modfree(mod); 1860 percpu_modfree(mod);
1847 1861
@@ -1850,7 +1864,7 @@ static void free_module(struct module *mod)
1850 1864
1851 /* Finally, free the core (containing the module structure) */ 1865 /* Finally, free the core (containing the module structure) */
1852 unset_module_core_ro_nx(mod); 1866 unset_module_core_ro_nx(mod);
1853 module_free(mod, mod->module_core); 1867 module_memfree(mod->module_core);
1854 1868
1855#ifdef CONFIG_MPU 1869#ifdef CONFIG_MPU
1856 update_protections(current->mm); 1870 update_protections(current->mm);
@@ -2785,7 +2799,7 @@ static int move_module(struct module *mod, struct load_info *info)
2785 */ 2799 */
2786 kmemleak_ignore(ptr); 2800 kmemleak_ignore(ptr);
2787 if (!ptr) { 2801 if (!ptr) {
2788 module_free(mod, mod->module_core); 2802 module_memfree(mod->module_core);
2789 return -ENOMEM; 2803 return -ENOMEM;
2790 } 2804 }
2791 memset(ptr, 0, mod->init_size); 2805 memset(ptr, 0, mod->init_size);
@@ -2930,8 +2944,9 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
2930static void module_deallocate(struct module *mod, struct load_info *info) 2944static void module_deallocate(struct module *mod, struct load_info *info)
2931{ 2945{
2932 percpu_modfree(mod); 2946 percpu_modfree(mod);
2933 module_free(mod, mod->module_init); 2947 module_arch_freeing_init(mod);
2934 module_free(mod, mod->module_core); 2948 module_memfree(mod->module_init);
2949 module_memfree(mod->module_core);
2935} 2950}
2936 2951
2937int __weak module_finalize(const Elf_Ehdr *hdr, 2952int __weak module_finalize(const Elf_Ehdr *hdr,
@@ -2983,10 +2998,31 @@ static void do_mod_ctors(struct module *mod)
2983#endif 2998#endif
2984} 2999}
2985 3000
3001/* For freeing module_init on success, in case kallsyms traversing */
3002struct mod_initfree {
3003 struct rcu_head rcu;
3004 void *module_init;
3005};
3006
3007static void do_free_init(struct rcu_head *head)
3008{
3009 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
3010 module_memfree(m->module_init);
3011 kfree(m);
3012}
3013
2986/* This is where the real work happens */ 3014/* This is where the real work happens */
2987static int do_init_module(struct module *mod) 3015static int do_init_module(struct module *mod)
2988{ 3016{
2989 int ret = 0; 3017 int ret = 0;
3018 struct mod_initfree *freeinit;
3019
3020 freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL);
3021 if (!freeinit) {
3022 ret = -ENOMEM;
3023 goto fail;
3024 }
3025 freeinit->module_init = mod->module_init;
2990 3026
2991 /* 3027 /*
2992 * We want to find out whether @mod uses async during init. Clear 3028 * We want to find out whether @mod uses async during init. Clear
@@ -2999,18 +3035,7 @@ static int do_init_module(struct module *mod)
2999 if (mod->init != NULL) 3035 if (mod->init != NULL)
3000 ret = do_one_initcall(mod->init); 3036 ret = do_one_initcall(mod->init);
3001 if (ret < 0) { 3037 if (ret < 0) {
3002 /* 3038 goto fail_free_freeinit;
3003 * Init routine failed: abort. Try to protect us from
3004 * buggy refcounters.
3005 */
3006 mod->state = MODULE_STATE_GOING;
3007 synchronize_sched();
3008 module_put(mod);
3009 blocking_notifier_call_chain(&module_notify_list,
3010 MODULE_STATE_GOING, mod);
3011 free_module(mod);
3012 wake_up_all(&module_wq);
3013 return ret;
3014 } 3039 }
3015 if (ret > 0) { 3040 if (ret > 0) {
3016 pr_warn("%s: '%s'->init suspiciously returned %d, it should " 3041 pr_warn("%s: '%s'->init suspiciously returned %d, it should "
@@ -3055,15 +3080,35 @@ static int do_init_module(struct module *mod)
3055 mod->strtab = mod->core_strtab; 3080 mod->strtab = mod->core_strtab;
3056#endif 3081#endif
3057 unset_module_init_ro_nx(mod); 3082 unset_module_init_ro_nx(mod);
3058 module_free(mod, mod->module_init); 3083 module_arch_freeing_init(mod);
3059 mod->module_init = NULL; 3084 mod->module_init = NULL;
3060 mod->init_size = 0; 3085 mod->init_size = 0;
3061 mod->init_ro_size = 0; 3086 mod->init_ro_size = 0;
3062 mod->init_text_size = 0; 3087 mod->init_text_size = 0;
3088 /*
3089 * We want to free module_init, but be aware that kallsyms may be
3090 * walking this with preempt disabled. In all the failure paths,
3091 * we call synchronize_rcu/synchronize_sched, but we don't want
3092 * to slow down the success path, so use actual RCU here.
3093 */
3094 call_rcu(&freeinit->rcu, do_free_init);
3063 mutex_unlock(&module_mutex); 3095 mutex_unlock(&module_mutex);
3064 wake_up_all(&module_wq); 3096 wake_up_all(&module_wq);
3065 3097
3066 return 0; 3098 return 0;
3099
3100fail_free_freeinit:
3101 kfree(freeinit);
3102fail:
3103 /* Try to protect us from buggy refcounters. */
3104 mod->state = MODULE_STATE_GOING;
3105 synchronize_sched();
3106 module_put(mod);
3107 blocking_notifier_call_chain(&module_notify_list,
3108 MODULE_STATE_GOING, mod);
3109 free_module(mod);
3110 wake_up_all(&module_wq);
3111 return ret;
3067} 3112}
3068 3113
3069static int may_init_module(void) 3114static int may_init_module(void)
diff --git a/kernel/params.c b/kernel/params.c
index 0af9b2c4e56c..728e05b167de 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -642,12 +642,15 @@ static __modinit int add_sysfs_param(struct module_kobject *mk,
642 mk->mp->grp.attrs = new_attrs; 642 mk->mp->grp.attrs = new_attrs;
643 643
644 /* Tack new one on the end. */ 644 /* Tack new one on the end. */
645 memset(&mk->mp->attrs[mk->mp->num], 0, sizeof(mk->mp->attrs[0]));
645 sysfs_attr_init(&mk->mp->attrs[mk->mp->num].mattr.attr); 646 sysfs_attr_init(&mk->mp->attrs[mk->mp->num].mattr.attr);
646 mk->mp->attrs[mk->mp->num].param = kp; 647 mk->mp->attrs[mk->mp->num].param = kp;
647 mk->mp->attrs[mk->mp->num].mattr.show = param_attr_show; 648 mk->mp->attrs[mk->mp->num].mattr.show = param_attr_show;
648 /* Do not allow runtime DAC changes to make param writable. */ 649 /* Do not allow runtime DAC changes to make param writable. */
649 if ((kp->perm & (S_IWUSR | S_IWGRP | S_IWOTH)) != 0) 650 if ((kp->perm & (S_IWUSR | S_IWGRP | S_IWOTH)) != 0)
650 mk->mp->attrs[mk->mp->num].mattr.store = param_attr_store; 651 mk->mp->attrs[mk->mp->num].mattr.store = param_attr_store;
652 else
653 mk->mp->attrs[mk->mp->num].mattr.store = NULL;
651 mk->mp->attrs[mk->mp->num].mattr.attr.name = (char *)name; 654 mk->mp->attrs[mk->mp->num].mattr.attr.name = (char *)name;
652 mk->mp->attrs[mk->mp->num].mattr.attr.mode = kp->perm; 655 mk->mp->attrs[mk->mp->num].mattr.attr.mode = kp->perm;
653 mk->mp->num++; 656 mk->mp->num++;
diff --git a/kernel/sys.c b/kernel/sys.c
index a8c9f5a7dda6..ea9c88109894 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2210,9 +2210,13 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2210 up_write(&me->mm->mmap_sem); 2210 up_write(&me->mm->mmap_sem);
2211 break; 2211 break;
2212 case PR_MPX_ENABLE_MANAGEMENT: 2212 case PR_MPX_ENABLE_MANAGEMENT:
2213 if (arg2 || arg3 || arg4 || arg5)
2214 return -EINVAL;
2213 error = MPX_ENABLE_MANAGEMENT(me); 2215 error = MPX_ENABLE_MANAGEMENT(me);
2214 break; 2216 break;
2215 case PR_MPX_DISABLE_MANAGEMENT: 2217 case PR_MPX_DISABLE_MANAGEMENT:
2218 if (arg2 || arg3 || arg4 || arg5)
2219 return -EINVAL;
2216 error = MPX_DISABLE_MANAGEMENT(me); 2220 error = MPX_DISABLE_MANAGEMENT(me);
2217 break; 2221 break;
2218 default: 2222 default:
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 87a346fd6d61..28bf91c60a0b 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -633,6 +633,13 @@ int ntp_validate_timex(struct timex *txc)
633 if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME))) 633 if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
634 return -EPERM; 634 return -EPERM;
635 635
636 if (txc->modes & ADJ_FREQUENCY) {
637 if (LONG_MIN / PPM_SCALE > txc->freq)
638 return -EINVAL;
639 if (LONG_MAX / PPM_SCALE < txc->freq)
640 return -EINVAL;
641 }
642
636 return 0; 643 return 0;
637} 644}
638 645
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 6390517e77d4..2c85b7724af4 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -196,6 +196,10 @@ SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
196 if (tv) { 196 if (tv) {
197 if (copy_from_user(&user_tv, tv, sizeof(*tv))) 197 if (copy_from_user(&user_tv, tv, sizeof(*tv)))
198 return -EFAULT; 198 return -EFAULT;
199
200 if (!timeval_valid(&user_tv))
201 return -EINVAL;
202
199 new_ts.tv_sec = user_tv.tv_sec; 203 new_ts.tv_sec = user_tv.tv_sec;
200 new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC; 204 new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
201 } 205 }
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 851924fa5170..683b4782019b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1477,9 +1477,9 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1477 1477
1478 pr_info("Task in "); 1478 pr_info("Task in ");
1479 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1479 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1480 pr_info(" killed as a result of limit of "); 1480 pr_cont(" killed as a result of limit of ");
1481 pr_cont_cgroup_path(memcg->css.cgroup); 1481 pr_cont_cgroup_path(memcg->css.cgroup);
1482 pr_info("\n"); 1482 pr_cont("\n");
1483 1483
1484 rcu_read_unlock(); 1484 rcu_read_unlock();
1485 1485
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7633c503a116..8e20f9c2fa5a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2332,12 +2332,21 @@ static inline struct page *
2332__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 2332__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2333 struct zonelist *zonelist, enum zone_type high_zoneidx, 2333 struct zonelist *zonelist, enum zone_type high_zoneidx,
2334 nodemask_t *nodemask, struct zone *preferred_zone, 2334 nodemask_t *nodemask, struct zone *preferred_zone,
2335 int classzone_idx, int migratetype) 2335 int classzone_idx, int migratetype, unsigned long *did_some_progress)
2336{ 2336{
2337 struct page *page; 2337 struct page *page;
2338 2338
2339 /* Acquire the per-zone oom lock for each zone */ 2339 *did_some_progress = 0;
2340
2341 if (oom_killer_disabled)
2342 return NULL;
2343
2344 /*
2345 * Acquire the per-zone oom lock for each zone. If that
2346 * fails, somebody else is making progress for us.
2347 */
2340 if (!oom_zonelist_trylock(zonelist, gfp_mask)) { 2348 if (!oom_zonelist_trylock(zonelist, gfp_mask)) {
2349 *did_some_progress = 1;
2341 schedule_timeout_uninterruptible(1); 2350 schedule_timeout_uninterruptible(1);
2342 return NULL; 2351 return NULL;
2343 } 2352 }
@@ -2363,12 +2372,18 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2363 goto out; 2372 goto out;
2364 2373
2365 if (!(gfp_mask & __GFP_NOFAIL)) { 2374 if (!(gfp_mask & __GFP_NOFAIL)) {
2375 /* Coredumps can quickly deplete all memory reserves */
2376 if (current->flags & PF_DUMPCORE)
2377 goto out;
2366 /* The OOM killer will not help higher order allocs */ 2378 /* The OOM killer will not help higher order allocs */
2367 if (order > PAGE_ALLOC_COSTLY_ORDER) 2379 if (order > PAGE_ALLOC_COSTLY_ORDER)
2368 goto out; 2380 goto out;
2369 /* The OOM killer does not needlessly kill tasks for lowmem */ 2381 /* The OOM killer does not needlessly kill tasks for lowmem */
2370 if (high_zoneidx < ZONE_NORMAL) 2382 if (high_zoneidx < ZONE_NORMAL)
2371 goto out; 2383 goto out;
2384 /* The OOM killer does not compensate for light reclaim */
2385 if (!(gfp_mask & __GFP_FS))
2386 goto out;
2372 /* 2387 /*
2373 * GFP_THISNODE contains __GFP_NORETRY and we never hit this. 2388 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
2374 * Sanity check for bare calls of __GFP_THISNODE, not real OOM. 2389 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
@@ -2381,7 +2396,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2381 } 2396 }
2382 /* Exhausted what can be done so it's blamo time */ 2397 /* Exhausted what can be done so it's blamo time */
2383 out_of_memory(zonelist, gfp_mask, order, nodemask, false); 2398 out_of_memory(zonelist, gfp_mask, order, nodemask, false);
2384 2399 *did_some_progress = 1;
2385out: 2400out:
2386 oom_zonelist_unlock(zonelist, gfp_mask); 2401 oom_zonelist_unlock(zonelist, gfp_mask);
2387 return page; 2402 return page;
@@ -2658,7 +2673,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2658 (gfp_mask & GFP_THISNODE) == GFP_THISNODE) 2673 (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
2659 goto nopage; 2674 goto nopage;
2660 2675
2661restart: 2676retry:
2662 if (!(gfp_mask & __GFP_NO_KSWAPD)) 2677 if (!(gfp_mask & __GFP_NO_KSWAPD))
2663 wake_all_kswapds(order, zonelist, high_zoneidx, 2678 wake_all_kswapds(order, zonelist, high_zoneidx,
2664 preferred_zone, nodemask); 2679 preferred_zone, nodemask);
@@ -2681,7 +2696,6 @@ restart:
2681 classzone_idx = zonelist_zone_idx(preferred_zoneref); 2696 classzone_idx = zonelist_zone_idx(preferred_zoneref);
2682 } 2697 }
2683 2698
2684rebalance:
2685 /* This is the last chance, in general, before the goto nopage. */ 2699 /* This is the last chance, in general, before the goto nopage. */
2686 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, 2700 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
2687 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS, 2701 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
@@ -2788,54 +2802,28 @@ rebalance:
2788 if (page) 2802 if (page)
2789 goto got_pg; 2803 goto got_pg;
2790 2804
2791 /*
2792 * If we failed to make any progress reclaiming, then we are
2793 * running out of options and have to consider going OOM
2794 */
2795 if (!did_some_progress) {
2796 if (oom_gfp_allowed(gfp_mask)) {
2797 if (oom_killer_disabled)
2798 goto nopage;
2799 /* Coredumps can quickly deplete all memory reserves */
2800 if ((current->flags & PF_DUMPCORE) &&
2801 !(gfp_mask & __GFP_NOFAIL))
2802 goto nopage;
2803 page = __alloc_pages_may_oom(gfp_mask, order,
2804 zonelist, high_zoneidx,
2805 nodemask, preferred_zone,
2806 classzone_idx, migratetype);
2807 if (page)
2808 goto got_pg;
2809
2810 if (!(gfp_mask & __GFP_NOFAIL)) {
2811 /*
2812 * The oom killer is not called for high-order
2813 * allocations that may fail, so if no progress
2814 * is being made, there are no other options and
2815 * retrying is unlikely to help.
2816 */
2817 if (order > PAGE_ALLOC_COSTLY_ORDER)
2818 goto nopage;
2819 /*
2820 * The oom killer is not called for lowmem
2821 * allocations to prevent needlessly killing
2822 * innocent tasks.
2823 */
2824 if (high_zoneidx < ZONE_NORMAL)
2825 goto nopage;
2826 }
2827
2828 goto restart;
2829 }
2830 }
2831
2832 /* Check if we should retry the allocation */ 2805 /* Check if we should retry the allocation */
2833 pages_reclaimed += did_some_progress; 2806 pages_reclaimed += did_some_progress;
2834 if (should_alloc_retry(gfp_mask, order, did_some_progress, 2807 if (should_alloc_retry(gfp_mask, order, did_some_progress,
2835 pages_reclaimed)) { 2808 pages_reclaimed)) {
2809 /*
2810 * If we fail to make progress by freeing individual
2811 * pages, but the allocation wants us to keep going,
2812 * start OOM killing tasks.
2813 */
2814 if (!did_some_progress) {
2815 page = __alloc_pages_may_oom(gfp_mask, order, zonelist,
2816 high_zoneidx, nodemask,
2817 preferred_zone, classzone_idx,
2818 migratetype,&did_some_progress);
2819 if (page)
2820 goto got_pg;
2821 if (!did_some_progress)
2822 goto nopage;
2823 }
2836 /* Wait for some write requests to complete then retry */ 2824 /* Wait for some write requests to complete then retry */
2837 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); 2825 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2838 goto rebalance; 2826 goto retry;
2839 } else { 2827 } else {
2840 /* 2828 /*
2841 * High-order allocations do not necessarily loop after 2829 * High-order allocations do not necessarily loop after
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ab2505c3ef54..dcd90c891d8e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2656,7 +2656,7 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
2656 * should make reasonable progress. 2656 * should make reasonable progress.
2657 */ 2657 */
2658 for_each_zone_zonelist_nodemask(zone, z, zonelist, 2658 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2659 gfp_mask, nodemask) { 2659 gfp_zone(gfp_mask), nodemask) {
2660 if (zone_idx(zone) > ZONE_NORMAL) 2660 if (zone_idx(zone) > ZONE_NORMAL)
2661 continue; 2661 continue;
2662 2662
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 515569ffde8a..589aafd01fc5 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -46,6 +46,7 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds)
46 snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x", 46 snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x",
47 ds->index, ds->pd->sw_addr); 47 ds->index, ds->pd->sw_addr);
48 ds->slave_mii_bus->parent = ds->master_dev; 48 ds->slave_mii_bus->parent = ds->master_dev;
49 ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
49} 50}
50 51
51 52
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 3a83ce5efa80..787b3c294ce6 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -129,7 +129,8 @@ int ip_forward(struct sk_buff *skb)
129 * We now generate an ICMP HOST REDIRECT giving the route 129 * We now generate an ICMP HOST REDIRECT giving the route
130 * we calculated. 130 * we calculated.
131 */ 131 */
132 if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb)) 132 if (IPCB(skb)->flags & IPSKB_DOREDIRECT && !opt->srr &&
133 !skb_sec_path(skb))
133 ip_rt_send_redirect(skb); 134 ip_rt_send_redirect(skb);
134 135
135 skb->priority = rt_tos2priority(iph->tos); 136 skb->priority = rt_tos2priority(iph->tos);
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index c0d82f78d364..2a3720fb5a5f 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -966,8 +966,11 @@ bool ping_rcv(struct sk_buff *skb)
966 966
967 sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); 967 sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
968 if (sk != NULL) { 968 if (sk != NULL) {
969 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
970
969 pr_debug("rcv on socket %p\n", sk); 971 pr_debug("rcv on socket %p\n", sk);
970 ping_queue_rcv_skb(sk, skb_get(skb)); 972 if (skb2)
973 ping_queue_rcv_skb(sk, skb2);
971 sock_put(sk); 974 sock_put(sk);
972 return true; 975 return true;
973 } 976 }
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6a2155b02602..d58dd0ec3e53 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1554,11 +1554,10 @@ static int __mkroute_input(struct sk_buff *skb,
1554 1554
1555 do_cache = res->fi && !itag; 1555 do_cache = res->fi && !itag;
1556 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) && 1556 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1557 skb->protocol == htons(ETH_P_IP) &&
1557 (IN_DEV_SHARED_MEDIA(out_dev) || 1558 (IN_DEV_SHARED_MEDIA(out_dev) ||
1558 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) { 1559 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1559 flags |= RTCF_DOREDIRECT; 1560 IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1560 do_cache = false;
1561 }
1562 1561
1563 if (skb->protocol != htons(ETH_P_IP)) { 1562 if (skb->protocol != htons(ETH_P_IP)) {
1564 /* Not IP (i.e. ARP). Do not create route, if it is 1563 /* Not IP (i.e. ARP). Do not create route, if it is
@@ -2303,6 +2302,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2303 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; 2302 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2304 if (rt->rt_flags & RTCF_NOTIFY) 2303 if (rt->rt_flags & RTCF_NOTIFY)
2305 r->rtm_flags |= RTM_F_NOTIFY; 2304 r->rtm_flags |= RTM_F_NOTIFY;
2305 if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2306 r->rtm_flags |= RTCF_DOREDIRECT;
2306 2307
2307 if (nla_put_be32(skb, RTA_DST, dst)) 2308 if (nla_put_be32(skb, RTA_DST, dst))
2308 goto nla_put_failure; 2309 goto nla_put_failure;
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 7927db0a9279..4a000f1dd757 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -99,11 +99,13 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin
99 s_slot = cb->args[0]; 99 s_slot = cb->args[0];
100 num = s_num = cb->args[1]; 100 num = s_num = cb->args[1];
101 101
102 for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) { 102 for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
103 struct sock *sk; 103 struct sock *sk;
104 struct hlist_nulls_node *node; 104 struct hlist_nulls_node *node;
105 struct udp_hslot *hslot = &table->hash[slot]; 105 struct udp_hslot *hslot = &table->hash[slot];
106 106
107 num = 0;
108
107 if (hlist_nulls_empty(&hslot->head)) 109 if (hlist_nulls_empty(&hslot->head))
108 continue; 110 continue;
109 111
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index b2d1838897c9..f1c6d5e98322 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -659,6 +659,29 @@ static int fib6_commit_metrics(struct dst_entry *dst,
659 return 0; 659 return 0;
660} 660}
661 661
662static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn,
663 struct net *net)
664{
665 if (atomic_read(&rt->rt6i_ref) != 1) {
666 /* This route is used as dummy address holder in some split
667 * nodes. It is not leaked, but it still holds other resources,
668 * which must be released in time. So, scan ascendant nodes
669 * and replace dummy references to this route with references
670 * to still alive ones.
671 */
672 while (fn) {
673 if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
674 fn->leaf = fib6_find_prefix(net, fn);
675 atomic_inc(&fn->leaf->rt6i_ref);
676 rt6_release(rt);
677 }
678 fn = fn->parent;
679 }
680 /* No more references are possible at this point. */
681 BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
682 }
683}
684
662/* 685/*
663 * Insert routing information in a node. 686 * Insert routing information in a node.
664 */ 687 */
@@ -807,11 +830,12 @@ add:
807 rt->dst.rt6_next = iter->dst.rt6_next; 830 rt->dst.rt6_next = iter->dst.rt6_next;
808 atomic_inc(&rt->rt6i_ref); 831 atomic_inc(&rt->rt6i_ref);
809 inet6_rt_notify(RTM_NEWROUTE, rt, info); 832 inet6_rt_notify(RTM_NEWROUTE, rt, info);
810 rt6_release(iter);
811 if (!(fn->fn_flags & RTN_RTINFO)) { 833 if (!(fn->fn_flags & RTN_RTINFO)) {
812 info->nl_net->ipv6.rt6_stats->fib_route_nodes++; 834 info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
813 fn->fn_flags |= RTN_RTINFO; 835 fn->fn_flags |= RTN_RTINFO;
814 } 836 }
837 fib6_purge_rt(iter, fn, info->nl_net);
838 rt6_release(iter);
815 } 839 }
816 840
817 return 0; 841 return 0;
@@ -1322,24 +1346,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
1322 fn = fib6_repair_tree(net, fn); 1346 fn = fib6_repair_tree(net, fn);
1323 } 1347 }
1324 1348
1325 if (atomic_read(&rt->rt6i_ref) != 1) { 1349 fib6_purge_rt(rt, fn, net);
1326 /* This route is used as dummy address holder in some split
1327 * nodes. It is not leaked, but it still holds other resources,
1328 * which must be released in time. So, scan ascendant nodes
1329 * and replace dummy references to this route with references
1330 * to still alive ones.
1331 */
1332 while (fn) {
1333 if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
1334 fn->leaf = fib6_find_prefix(net, fn);
1335 atomic_inc(&fn->leaf->rt6i_ref);
1336 rt6_release(rt);
1337 }
1338 fn = fn->parent;
1339 }
1340 /* No more references are possible at this point. */
1341 BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
1342 }
1343 1350
1344 inet6_rt_notify(RTM_DELROUTE, rt, info); 1351 inet6_rt_notify(RTM_DELROUTE, rt, info);
1345 rt6_release(rt); 1352 rt6_release(rt);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 166e33bed222..495965358d22 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1242,12 +1242,16 @@ restart:
1242 rt = net->ipv6.ip6_null_entry; 1242 rt = net->ipv6.ip6_null_entry;
1243 else if (rt->dst.error) { 1243 else if (rt->dst.error) {
1244 rt = net->ipv6.ip6_null_entry; 1244 rt = net->ipv6.ip6_null_entry;
1245 } else if (rt == net->ipv6.ip6_null_entry) { 1245 goto out;
1246 }
1247
1248 if (rt == net->ipv6.ip6_null_entry) {
1246 fn = fib6_backtrack(fn, &fl6->saddr); 1249 fn = fib6_backtrack(fn, &fl6->saddr);
1247 if (fn) 1250 if (fn)
1248 goto restart; 1251 goto restart;
1249 } 1252 }
1250 1253
1254out:
1251 dst_hold(&rt->dst); 1255 dst_hold(&rt->dst);
1252 1256
1253 read_unlock_bh(&table->tb6_lock); 1257 read_unlock_bh(&table->tb6_lock);
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 5f983644373a..48bf5a06847b 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -130,12 +130,18 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
130{ 130{
131 struct flowi6 *fl6 = &fl->u.ip6; 131 struct flowi6 *fl6 = &fl->u.ip6;
132 int onlyproto = 0; 132 int onlyproto = 0;
133 u16 offset = skb_network_header_len(skb);
134 const struct ipv6hdr *hdr = ipv6_hdr(skb); 133 const struct ipv6hdr *hdr = ipv6_hdr(skb);
134 u16 offset = sizeof(*hdr);
135 struct ipv6_opt_hdr *exthdr; 135 struct ipv6_opt_hdr *exthdr;
136 const unsigned char *nh = skb_network_header(skb); 136 const unsigned char *nh = skb_network_header(skb);
137 u8 nexthdr = nh[IP6CB(skb)->nhoff]; 137 u16 nhoff = IP6CB(skb)->nhoff;
138 int oif = 0; 138 int oif = 0;
139 u8 nexthdr;
140
141 if (!nhoff)
142 nhoff = offsetof(struct ipv6hdr, nexthdr);
143
144 nexthdr = nh[nhoff];
139 145
140 if (skb_dst(skb)) 146 if (skb_dst(skb))
141 oif = skb_dst(skb)->dev->ifindex; 147 oif = skb_dst(skb)->dev->ifindex;
diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c
index 612a5ddaf93b..799bafc2af39 100644
--- a/net/llc/sysctl_net_llc.c
+++ b/net/llc/sysctl_net_llc.c
@@ -18,28 +18,28 @@ static struct ctl_table llc2_timeout_table[] = {
18 { 18 {
19 .procname = "ack", 19 .procname = "ack",
20 .data = &sysctl_llc2_ack_timeout, 20 .data = &sysctl_llc2_ack_timeout,
21 .maxlen = sizeof(long), 21 .maxlen = sizeof(sysctl_llc2_ack_timeout),
22 .mode = 0644, 22 .mode = 0644,
23 .proc_handler = proc_dointvec_jiffies, 23 .proc_handler = proc_dointvec_jiffies,
24 }, 24 },
25 { 25 {
26 .procname = "busy", 26 .procname = "busy",
27 .data = &sysctl_llc2_busy_timeout, 27 .data = &sysctl_llc2_busy_timeout,
28 .maxlen = sizeof(long), 28 .maxlen = sizeof(sysctl_llc2_busy_timeout),
29 .mode = 0644, 29 .mode = 0644,
30 .proc_handler = proc_dointvec_jiffies, 30 .proc_handler = proc_dointvec_jiffies,
31 }, 31 },
32 { 32 {
33 .procname = "p", 33 .procname = "p",
34 .data = &sysctl_llc2_p_timeout, 34 .data = &sysctl_llc2_p_timeout,
35 .maxlen = sizeof(long), 35 .maxlen = sizeof(sysctl_llc2_p_timeout),
36 .mode = 0644, 36 .mode = 0644,
37 .proc_handler = proc_dointvec_jiffies, 37 .proc_handler = proc_dointvec_jiffies,
38 }, 38 },
39 { 39 {
40 .procname = "rej", 40 .procname = "rej",
41 .data = &sysctl_llc2_rej_timeout, 41 .data = &sysctl_llc2_rej_timeout,
42 .maxlen = sizeof(long), 42 .maxlen = sizeof(sysctl_llc2_rej_timeout),
43 .mode = 0644, 43 .mode = 0644,
44 .proc_handler = proc_dointvec_jiffies, 44 .proc_handler = proc_dointvec_jiffies,
45 }, 45 },
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 4c5192e0d66c..4a95fe3cffbc 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -86,20 +86,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
86 } 86 }
87 } 87 }
88 88
89 /* tear down aggregation sessions and remove STAs */
90 mutex_lock(&local->sta_mtx);
91 list_for_each_entry(sta, &local->sta_list, list) {
92 if (sta->uploaded) {
93 enum ieee80211_sta_state state;
94
95 state = sta->sta_state;
96 for (; state > IEEE80211_STA_NOTEXIST; state--)
97 WARN_ON(drv_sta_state(local, sta->sdata, sta,
98 state, state - 1));
99 }
100 }
101 mutex_unlock(&local->sta_mtx);
102
103 /* remove all interfaces that were created in the driver */ 89 /* remove all interfaces that were created in the driver */
104 list_for_each_entry(sdata, &local->interfaces, list) { 90 list_for_each_entry(sdata, &local->interfaces, list) {
105 if (!ieee80211_sdata_running(sdata)) 91 if (!ieee80211_sdata_running(sdata))
@@ -111,6 +97,21 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
111 case NL80211_IFTYPE_STATION: 97 case NL80211_IFTYPE_STATION:
112 ieee80211_mgd_quiesce(sdata); 98 ieee80211_mgd_quiesce(sdata);
113 break; 99 break;
100 case NL80211_IFTYPE_WDS:
101 /* tear down aggregation sessions and remove STAs */
102 mutex_lock(&local->sta_mtx);
103 sta = sdata->u.wds.sta;
104 if (sta && sta->uploaded) {
105 enum ieee80211_sta_state state;
106
107 state = sta->sta_state;
108 for (; state > IEEE80211_STA_NOTEXIST; state--)
109 WARN_ON(drv_sta_state(local, sta->sdata,
110 sta, state,
111 state - 1));
112 }
113 mutex_unlock(&local->sta_mtx);
114 break;
114 default: 115 default:
115 break; 116 break;
116 } 117 }
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 683b10f46505..d69ca513848e 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -272,7 +272,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
272 else if (rate && rate->flags & IEEE80211_RATE_ERP_G) 272 else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
273 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; 273 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
274 else if (rate) 274 else if (rate)
275 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; 275 channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
276 else 276 else
277 channel_flags |= IEEE80211_CHAN_2GHZ; 277 channel_flags |= IEEE80211_CHAN_2GHZ;
278 put_unaligned_le16(channel_flags, pos); 278 put_unaligned_le16(channel_flags, pos);
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 84c8219c3e1c..f59adf8a4cd7 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -180,6 +180,11 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
180 } 180 }
181 181
182 bpf_size = bpf_len * sizeof(*bpf_ops); 182 bpf_size = bpf_len * sizeof(*bpf_ops);
183 if (bpf_size != nla_len(tb[TCA_BPF_OPS])) {
184 ret = -EINVAL;
185 goto errout;
186 }
187
183 bpf_ops = kzalloc(bpf_size, GFP_KERNEL); 188 bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
184 if (bpf_ops == NULL) { 189 if (bpf_ops == NULL) {
185 ret = -ENOMEM; 190 ret = -ENOMEM;
@@ -215,15 +220,21 @@ static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
215 struct cls_bpf_head *head) 220 struct cls_bpf_head *head)
216{ 221{
217 unsigned int i = 0x80000000; 222 unsigned int i = 0x80000000;
223 u32 handle;
218 224
219 do { 225 do {
220 if (++head->hgen == 0x7FFFFFFF) 226 if (++head->hgen == 0x7FFFFFFF)
221 head->hgen = 1; 227 head->hgen = 1;
222 } while (--i > 0 && cls_bpf_get(tp, head->hgen)); 228 } while (--i > 0 && cls_bpf_get(tp, head->hgen));
223 if (i == 0) 229
230 if (unlikely(i == 0)) {
224 pr_err("Insufficient number of handles\n"); 231 pr_err("Insufficient number of handles\n");
232 handle = 0;
233 } else {
234 handle = head->hgen;
235 }
225 236
226 return i; 237 return handle;
227} 238}
228 239
229static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, 240static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index f791edd64d6c..26d06dbcc1c8 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1182,7 +1182,6 @@ void sctp_assoc_update(struct sctp_association *asoc,
1182 asoc->peer.peer_hmacs = new->peer.peer_hmacs; 1182 asoc->peer.peer_hmacs = new->peer.peer_hmacs;
1183 new->peer.peer_hmacs = NULL; 1183 new->peer.peer_hmacs = NULL;
1184 1184
1185 sctp_auth_key_put(asoc->asoc_shared_key);
1186 sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC); 1185 sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
1187} 1186}
1188 1187
diff --git a/net/socket.c b/net/socket.c
index a2c33a4dc7ba..418795caa897 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -869,9 +869,6 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
869static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb, 869static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb,
870 struct sock_iocb *siocb) 870 struct sock_iocb *siocb)
871{ 871{
872 if (!is_sync_kiocb(iocb))
873 BUG();
874
875 siocb->kiocb = iocb; 872 siocb->kiocb = iocb;
876 iocb->private = siocb; 873 iocb->private = siocb;
877 return siocb; 874 return siocb;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 7ca4b5133123..8887c6e5fca8 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2854,6 +2854,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
2854 if (!rdev->ops->get_key) 2854 if (!rdev->ops->get_key)
2855 return -EOPNOTSUPP; 2855 return -EOPNOTSUPP;
2856 2856
2857 if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
2858 return -ENOENT;
2859
2857 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2860 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2858 if (!msg) 2861 if (!msg)
2859 return -ENOMEM; 2862 return -ENOMEM;
@@ -2873,10 +2876,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
2873 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr)) 2876 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
2874 goto nla_put_failure; 2877 goto nla_put_failure;
2875 2878
2876 if (pairwise && mac_addr &&
2877 !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
2878 return -ENOENT;
2879
2880 err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie, 2879 err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie,
2881 get_key_callback); 2880 get_key_callback);
2882 2881
@@ -3047,7 +3046,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
3047 wdev_lock(dev->ieee80211_ptr); 3046 wdev_lock(dev->ieee80211_ptr);
3048 err = nl80211_key_allowed(dev->ieee80211_ptr); 3047 err = nl80211_key_allowed(dev->ieee80211_ptr);
3049 3048
3050 if (key.type == NL80211_KEYTYPE_PAIRWISE && mac_addr && 3049 if (key.type == NL80211_KEYTYPE_GROUP && mac_addr &&
3051 !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) 3050 !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
3052 err = -ENOENT; 3051 err = -ENOENT;
3053 3052
diff --git a/net/wireless/util.c b/net/wireless/util.c
index d0ac795445b7..5488c3662f7d 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -308,6 +308,12 @@ unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc)
308 goto out; 308 goto out;
309 } 309 }
310 310
311 if (ieee80211_is_mgmt(fc)) {
312 if (ieee80211_has_order(fc))
313 hdrlen += IEEE80211_HT_CTL_LEN;
314 goto out;
315 }
316
311 if (ieee80211_is_ctl(fc)) { 317 if (ieee80211_is_ctl(fc)) {
312 /* 318 /*
313 * ACK and CTS are 10 bytes, all others 16. To see how 319 * ACK and CTS are 10 bytes, all others 16. To see how
diff --git a/samples/bpf/test_maps.c b/samples/bpf/test_maps.c
index e286b42307f3..6299ee95cd11 100644
--- a/samples/bpf/test_maps.c
+++ b/samples/bpf/test_maps.c
@@ -69,9 +69,9 @@ static void test_hashmap_sanity(int i, void *data)
69 69
70 /* iterate over two elements */ 70 /* iterate over two elements */
71 assert(bpf_get_next_key(map_fd, &key, &next_key) == 0 && 71 assert(bpf_get_next_key(map_fd, &key, &next_key) == 0 &&
72 next_key == 2); 72 (next_key == 1 || next_key == 2));
73 assert(bpf_get_next_key(map_fd, &next_key, &next_key) == 0 && 73 assert(bpf_get_next_key(map_fd, &next_key, &next_key) == 0 &&
74 next_key == 1); 74 (next_key == 1 || next_key == 2));
75 assert(bpf_get_next_key(map_fd, &next_key, &next_key) == -1 && 75 assert(bpf_get_next_key(map_fd, &next_key, &next_key) == -1 &&
76 errno == ENOENT); 76 errno == ENOENT);
77 77