aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/device-mapper/cache.txt11
-rw-r--r--Documentation/device-mapper/thin-provisioning.txt34
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,bcm11351-pinctrl.txt (renamed from Documentation/devicetree/bindings/pinctrl/brcm,capri-pinctrl.txt)8
-rw-r--r--MAINTAINERS9
-rw-r--r--Makefile2
-rw-r--r--arch/arc/mm/cache_arc700.c4
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/boot/compressed/.gitignore1
-rw-r--r--arch/arm/boot/dts/bcm11351.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-gta04.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep0020.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep0030.dts2
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi2
-rw-r--r--arch/arm/boot/dts/sun5i-a10s.dtsi2
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi2
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi12
-rw-r--r--arch/arm/configs/tegra_defconfig3
-rw-r--r--arch/arm/include/asm/memory.h9
-rw-r--r--arch/arm/kernel/head-common.S12
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/mach-omap2/cclock3xxx_data.c2
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c8
-rw-r--r--arch/arm/mach-omap2/dpll3xxx.c92
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c20
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c9
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c21
-rw-r--r--arch/arm/mach-omap2/prminst44xx.c4
-rw-r--r--arch/arm/mach-sa1100/include/mach/collie.h2
-rw-r--r--arch/arm/mm/dump.c3
-rw-r--r--arch/c6x/include/asm/cache.h1
-rw-r--r--arch/cris/include/asm/bitops.h2
-rw-r--r--arch/ia64/kernel/uncached.c2
-rw-r--r--arch/powerpc/kernel/process.c9
-rw-r--r--arch/powerpc/kernel/reloc_64.S1
-rw-r--r--arch/powerpc/platforms/cell/ras.c3
-rw-r--r--arch/x86/Kconfig.cpu4
-rw-r--r--arch/x86/include/asm/barrier.h8
-rw-r--r--arch/x86/include/asm/efi.h1
-rw-r--r--arch/x86/include/asm/io.h2
-rw-r--r--arch/x86/include/asm/spinlock.h5
-rw-r--r--arch/x86/kernel/cpu/centaur.c272
-rw-r--r--arch/x86/kernel/head_32.S7
-rw-r--r--arch/x86/kernel/head_64.S6
-rw-r--r--arch/x86/kernel/setup.c10
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/mm/fault.c47
-rw-r--r--arch/x86/platform/efi/efi.c20
-rw-r--r--arch/x86/um/asm/barrier.h4
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-flush.c4
-rw-r--r--block/blk-mq-cpu.c14
-rw-r--r--block/blk-mq.c102
-rw-r--r--block/blk-mq.h1
-rw-r--r--drivers/acpi/ec.c64
-rw-r--r--drivers/acpi/resource.c10
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h2
-rw-r--r--drivers/clk/shmobile/clk-rcar-gen2.c36
-rw-r--r--drivers/cpufreq/cpufreq.c51
-rw-r--r--drivers/firewire/core-device.c22
-rw-r--r--drivers/firewire/net.c6
-rw-r--r--drivers/firewire/ohci.c15
-rw-r--r--drivers/firewire/sbp2.c17
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c10
-rw-r--r--drivers/gpu/drm/bochs/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c23
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c19
-rw-r--r--drivers/gpu/drm/i915/intel_display.c8
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c6
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c4
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/cik.c5
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreen_smc.h2
-rw-r--r--drivers/gpu/drm/radeon/ni.c3
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/r300.c2
-rw-r--r--drivers/gpu/drm/radeon/r420.c2
-rw-r--r--drivers/gpu/drm/radeon/r520.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c5
-rw-r--r--drivers/gpu/drm/radeon/rs400.c2
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/rs690.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770.c3
-rw-r--r--drivers/gpu/drm/radeon/si.c3
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c180
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h7
-rw-r--r--drivers/md/Kconfig10
-rw-r--r--drivers/md/dm-cache-policy-mq.c4
-rw-r--r--drivers/md/dm-snap-persistent.c3
-rw-r--r--drivers/md/dm-thin-metadata.c37
-rw-r--r--drivers/md/dm-thin-metadata.h11
-rw-r--r--drivers/md/dm-thin.c304
-rw-r--r--drivers/md/persistent-data/Kconfig10
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c113
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c2
-rw-r--r--drivers/net/usb/ax88179_178a.c8
-rw-r--r--drivers/pci/bus.c2
-rw-r--r--drivers/pci/pci.c3
-rw-r--r--drivers/pinctrl/Kconfig2
-rw-r--r--drivers/pinctrl/pinctrl-capri.c2
-rw-r--r--drivers/pinctrl/pinctrl-sunxi.c6
-rw-r--r--drivers/pinctrl/pinctrl-sunxi.h6
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c6
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c4
-rw-r--r--drivers/spi/spi-ath79.c4
-rw-r--r--drivers/spi/spi-atmel.c17
-rw-r--r--drivers/spi/spi-coldfire-qspi.c6
-rw-r--r--drivers/spi/spi-fsl-dspi.c6
-rw-r--r--drivers/spi/spi-imx.c4
-rw-r--r--drivers/spi/spi-topcliff-pch.c15
-rw-r--r--drivers/staging/cxt1e1/linux.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c10
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c16
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c2
-rw-r--r--drivers/target/target_core_sbc.c38
-rw-r--r--drivers/thermal/Kconfig13
-rw-r--r--drivers/thermal/thermal_core.c27
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c11
-rw-r--r--drivers/usb/core/config.c4
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/host/xhci.c14
-rw-r--r--fs/bio-integrity.c5
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/file.c24
-rw-r--r--fs/cifs/transport.c29
-rw-r--r--fs/file.c56
-rw-r--r--fs/file_table.c1
-rw-r--r--fs/hfsplus/catalog.c41
-rw-r--r--fs/hfsplus/hfsplus_fs.h1
-rw-r--r--fs/hfsplus/hfsplus_raw.h6
-rw-r--r--fs/hfsplus/inode.c9
-rw-r--r--fs/namei.c2
-rw-r--r--fs/nfs/delegation.c11
-rw-r--r--fs/nfs/nfs4filelayout.c10
-rw-r--r--fs/nfs/nfs4proc.c24
-rw-r--r--fs/nfs/nfs4state.c14
-rw-r--r--fs/ocfs2/file.c8
-rw-r--r--fs/open.c4
-rw-r--r--fs/proc/base.c1
-rw-r--r--fs/read_write.c40
-rw-r--r--include/kvm/arm_vgic.h5
-rw-r--r--include/linux/audit.h3
-rw-r--r--include/linux/blk-mq.h11
-rw-r--r--include/linux/clk/ti.h4
-rw-r--r--include/linux/file.h27
-rw-r--r--include/linux/firewire.h1
-rw-r--r--include/linux/fs.h8
-rw-r--r--include/linux/gfp.h4
-rw-r--r--include/linux/mmzone.h4
-rw-r--r--include/linux/nfs_xdr.h5
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/tracepoint.h6
-rw-r--r--include/target/iscsi/iscsi_transport.h1
-rw-r--r--include/trace/events/sunrpc.h4
-rw-r--r--kernel/audit.c31
-rw-r--r--kernel/audit.h2
-rw-r--r--kernel/auditfilter.c10
-rw-r--r--kernel/cpuset.c10
-rw-r--r--kernel/irq/irqdomain.c1
-rw-r--r--kernel/irq/manage.c3
-rw-r--r--kernel/profile.c4
-rw-r--r--kernel/trace/trace_events.c10
-rw-r--r--kernel/tracepoint.c7
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/compaction.c20
-rw-r--r--mm/migrate.c11
-rw-r--r--net/socket.c13
-rw-r--r--scripts/kallsyms.c3
-rw-r--r--scripts/mod/modpost.c13
-rw-r--r--security/keys/keyring.c6
-rw-r--r--sound/pci/hda/patch_analog.c4
-rw-r--r--sound/pci/hda/patch_realtek.c22
-rw-r--r--sound/soc/codecs/88pm860x-codec.c3
-rw-r--r--sound/soc/codecs/si476x.c2
-rw-r--r--sound/soc/omap/n810.c4
-rw-r--r--sound/soc/soc-pcm.c3
-rw-r--r--sound/usb/mixer.c1
-rw-r--r--tools/testing/selftests/ipc/msgque.c1
183 files changed, 1601 insertions, 1044 deletions
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt
index e6b72d355151..68c0f517c60e 100644
--- a/Documentation/device-mapper/cache.txt
+++ b/Documentation/device-mapper/cache.txt
@@ -124,12 +124,11 @@ the default being 204800 sectors (or 100MB).
124Updating on-disk metadata 124Updating on-disk metadata
125------------------------- 125-------------------------
126 126
127On-disk metadata is committed every time a REQ_SYNC or REQ_FUA bio is 127On-disk metadata is committed every time a FLUSH or FUA bio is written.
128written. If no such requests are made then commits will occur every 128If no such requests are made then commits will occur every second. This
129second. This means the cache behaves like a physical disk that has a 129means the cache behaves like a physical disk that has a volatile write
130write cache (the same is true of the thin-provisioning target). If 130cache. If power is lost you may lose some recent writes. The metadata
131power is lost you may lose some recent writes. The metadata should 131should always be consistent in spite of any crash.
132always be consistent in spite of any crash.
133 132
134The 'dirty' state for a cache block changes far too frequently for us 133The 'dirty' state for a cache block changes far too frequently for us
135to keep updating it on the fly. So we treat it as a hint. In normal 134to keep updating it on the fly. So we treat it as a hint. In normal
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
index 8a7a3d46e0da..05a27e9442bd 100644
--- a/Documentation/device-mapper/thin-provisioning.txt
+++ b/Documentation/device-mapper/thin-provisioning.txt
@@ -116,6 +116,35 @@ Resuming a device with a new table itself triggers an event so the
116userspace daemon can use this to detect a situation where a new table 116userspace daemon can use this to detect a situation where a new table
117already exceeds the threshold. 117already exceeds the threshold.
118 118
119A low water mark for the metadata device is maintained in the kernel and
120will trigger a dm event if free space on the metadata device drops below
121it.
122
123Updating on-disk metadata
124-------------------------
125
126On-disk metadata is committed every time a FLUSH or FUA bio is written.
127If no such requests are made then commits will occur every second. This
128means the thin-provisioning target behaves like a physical disk that has
129a volatile write cache. If power is lost you may lose some recent
130writes. The metadata should always be consistent in spite of any crash.
131
132If data space is exhausted the pool will either error or queue IO
133according to the configuration (see: error_if_no_space). If metadata
134space is exhausted or a metadata operation fails: the pool will error IO
135until the pool is taken offline and repair is performed to 1) fix any
136potential inconsistencies and 2) clear the flag that imposes repair.
137Once the pool's metadata device is repaired it may be resized, which
138will allow the pool to return to normal operation. Note that if a pool
139is flagged as needing repair, the pool's data and metadata devices
140cannot be resized until repair is performed. It should also be noted
141that when the pool's metadata space is exhausted the current metadata
142transaction is aborted. Given that the pool will cache IO whose
143completion may have already been acknowledged to upper IO layers
144(e.g. filesystem) it is strongly suggested that consistency checks
145(e.g. fsck) be performed on those layers when repair of the pool is
146required.
147
119Thin provisioning 148Thin provisioning
120----------------- 149-----------------
121 150
@@ -258,10 +287,9 @@ ii) Status
258 should register for the event and then check the target's status. 287 should register for the event and then check the target's status.
259 288
260 held metadata root: 289 held metadata root:
261 The location, in sectors, of the metadata root that has been 290 The location, in blocks, of the metadata root that has been
262 'held' for userspace read access. '-' indicates there is no 291 'held' for userspace read access. '-' indicates there is no
263 held root. This feature is not yet implemented so '-' is 292 held root.
264 always returned.
265 293
266 discard_passdown|no_discard_passdown 294 discard_passdown|no_discard_passdown
267 Whether or not discards are actually being passed down to the 295 Whether or not discards are actually being passed down to the
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,capri-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/brcm,bcm11351-pinctrl.txt
index 9e9e9ef9f852..c119debe6bab 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,capri-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,bcm11351-pinctrl.txt
@@ -1,4 +1,4 @@
1Broadcom Capri Pin Controller 1Broadcom BCM281xx Pin Controller
2 2
3This is a pin controller for the Broadcom BCM281xx SoC family, which includes 3This is a pin controller for the Broadcom BCM281xx SoC family, which includes
4BCM11130, BCM11140, BCM11351, BCM28145, and BCM28155 SoCs. 4BCM11130, BCM11140, BCM11351, BCM28145, and BCM28155 SoCs.
@@ -7,14 +7,14 @@ BCM11130, BCM11140, BCM11351, BCM28145, and BCM28155 SoCs.
7 7
8Required Properties: 8Required Properties:
9 9
10- compatible: Must be "brcm,capri-pinctrl". 10- compatible: Must be "brcm,bcm11351-pinctrl"
11- reg: Base address of the PAD Controller register block and the size 11- reg: Base address of the PAD Controller register block and the size
12 of the block. 12 of the block.
13 13
14For example, the following is the bare minimum node: 14For example, the following is the bare minimum node:
15 15
16 pinctrl@35004800 { 16 pinctrl@35004800 {
17 compatible = "brcm,capri-pinctrl"; 17 compatible = "brcm,bcm11351-pinctrl";
18 reg = <0x35004800 0x430>; 18 reg = <0x35004800 0x430>;
19 }; 19 };
20 20
@@ -119,7 +119,7 @@ Optional Properties (for HDMI pins):
119Example: 119Example:
120// pin controller node 120// pin controller node
121pinctrl@35004800 { 121pinctrl@35004800 {
122 compatible = "brcm,capri-pinctrl"; 122 compatible = "brcmbcm11351-pinctrl";
123 reg = <0x35004800 0x430>; 123 reg = <0x35004800 0x430>;
124 124
125 // pin configuration node 125 // pin configuration node
diff --git a/MAINTAINERS b/MAINTAINERS
index b359eb056e5c..b3fdb0f004ba 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -474,7 +474,7 @@ F: net/rxrpc/af_rxrpc.c
474 474
475AGPGART DRIVER 475AGPGART DRIVER
476M: David Airlie <airlied@linux.ie> 476M: David Airlie <airlied@linux.ie>
477T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git 477T: git git://people.freedesktop.org/~airlied/linux (part of drm maint)
478S: Maintained 478S: Maintained
479F: drivers/char/agp/ 479F: drivers/char/agp/
480F: include/linux/agp* 480F: include/linux/agp*
@@ -1738,6 +1738,7 @@ F: include/uapi/linux/bfs_fs.h
1738BLACKFIN ARCHITECTURE 1738BLACKFIN ARCHITECTURE
1739M: Steven Miao <realmz6@gmail.com> 1739M: Steven Miao <realmz6@gmail.com>
1740L: adi-buildroot-devel@lists.sourceforge.net 1740L: adi-buildroot-devel@lists.sourceforge.net
1741T: git git://git.code.sf.net/p/adi-linux/code
1741W: http://blackfin.uclinux.org 1742W: http://blackfin.uclinux.org
1742S: Supported 1743S: Supported
1743F: arch/blackfin/ 1744F: arch/blackfin/
@@ -6177,6 +6178,12 @@ S: Supported
6177F: drivers/block/nvme* 6178F: drivers/block/nvme*
6178F: include/linux/nvme.h 6179F: include/linux/nvme.h
6179 6180
6181NXP TDA998X DRM DRIVER
6182M: Russell King <rmk+kernel@arm.linux.org.uk>
6183S: Supported
6184F: drivers/gpu/drm/i2c/tda998x_drv.c
6185F: include/drm/i2c/tda998x.h
6186
6180OMAP SUPPORT 6187OMAP SUPPORT
6181M: Tony Lindgren <tony@atomide.com> 6188M: Tony Lindgren <tony@atomide.com>
6182L: linux-omap@vger.kernel.org 6189L: linux-omap@vger.kernel.org
diff --git a/Makefile b/Makefile
index 78209ee1f981..1a2628ee5d91 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 14 2PATCHLEVEL = 14
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc5 4EXTRAVERSION = -rc6
5NAME = Shuffling Zombie Juror 5NAME = Shuffling Zombie Juror
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
index 6b58c1de7577..400c663b21c2 100644
--- a/arch/arc/mm/cache_arc700.c
+++ b/arch/arc/mm/cache_arc700.c
@@ -282,7 +282,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
282#else 282#else
283 /* if V-P const for loop, PTAG can be written once outside loop */ 283 /* if V-P const for loop, PTAG can be written once outside loop */
284 if (full_page_op) 284 if (full_page_op)
285 write_aux_reg(ARC_REG_DC_PTAG, paddr); 285 write_aux_reg(aux_tag, paddr);
286#endif 286#endif
287 287
288 while (num_lines-- > 0) { 288 while (num_lines-- > 0) {
@@ -296,7 +296,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
296 write_aux_reg(aux_cmd, vaddr); 296 write_aux_reg(aux_cmd, vaddr);
297 vaddr += L1_CACHE_BYTES; 297 vaddr += L1_CACHE_BYTES;
298#else 298#else
299 write_aux_reg(aux, paddr); 299 write_aux_reg(aux_cmd, paddr);
300 paddr += L1_CACHE_BYTES; 300 paddr += L1_CACHE_BYTES;
301#endif 301#endif
302 } 302 }
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index e25419817791..15949459611f 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1578,6 +1578,7 @@ config BL_SWITCHER_DUMMY_IF
1578 1578
1579choice 1579choice
1580 prompt "Memory split" 1580 prompt "Memory split"
1581 depends on MMU
1581 default VMSPLIT_3G 1582 default VMSPLIT_3G
1582 help 1583 help
1583 Select the desired split between kernel and user memory. 1584 Select the desired split between kernel and user memory.
@@ -1595,6 +1596,7 @@ endchoice
1595 1596
1596config PAGE_OFFSET 1597config PAGE_OFFSET
1597 hex 1598 hex
1599 default PHYS_OFFSET if !MMU
1598 default 0x40000000 if VMSPLIT_1G 1600 default 0x40000000 if VMSPLIT_1G
1599 default 0x80000000 if VMSPLIT_2G 1601 default 0x80000000 if VMSPLIT_2G
1600 default 0xC0000000 1602 default 0xC0000000
@@ -1903,6 +1905,7 @@ config XEN
1903 depends on ARM && AEABI && OF 1905 depends on ARM && AEABI && OF
1904 depends on CPU_V7 && !CPU_V6 1906 depends on CPU_V7 && !CPU_V6
1905 depends on !GENERIC_ATOMIC64 1907 depends on !GENERIC_ATOMIC64
1908 depends on MMU
1906 select ARM_PSCI 1909 select ARM_PSCI
1907 select SWIOTLB_XEN 1910 select SWIOTLB_XEN
1908 select ARCH_DMA_ADDR_T_64BIT 1911 select ARCH_DMA_ADDR_T_64BIT
diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore
index 47279aa96a6a..0714e0334e33 100644
--- a/arch/arm/boot/compressed/.gitignore
+++ b/arch/arm/boot/compressed/.gitignore
@@ -1,4 +1,5 @@
1ashldi3.S 1ashldi3.S
2bswapsdi2.S
2font.c 3font.c
3lib1funcs.S 4lib1funcs.S
4hyp-stub.S 5hyp-stub.S
diff --git a/arch/arm/boot/dts/bcm11351.dtsi b/arch/arm/boot/dts/bcm11351.dtsi
index e491b82f8d67..792fde1b7f75 100644
--- a/arch/arm/boot/dts/bcm11351.dtsi
+++ b/arch/arm/boot/dts/bcm11351.dtsi
@@ -147,7 +147,7 @@
147 }; 147 };
148 148
149 pinctrl@35004800 { 149 pinctrl@35004800 {
150 compatible = "brcm,capri-pinctrl"; 150 compatible = "brcm,bcm11351-pinctrl";
151 reg = <0x35004800 0x430>; 151 reg = <0x35004800 0x430>;
152 }; 152 };
153 153
diff --git a/arch/arm/boot/dts/omap3-gta04.dts b/arch/arm/boot/dts/omap3-gta04.dts
index c551e4af4d83..d3b253bbc885 100644
--- a/arch/arm/boot/dts/omap3-gta04.dts
+++ b/arch/arm/boot/dts/omap3-gta04.dts
@@ -13,7 +13,7 @@
13 13
14/ { 14/ {
15 model = "OMAP3 GTA04"; 15 model = "OMAP3 GTA04";
16 compatible = "ti,omap3-gta04", "ti,omap3"; 16 compatible = "ti,omap3-gta04", "ti,omap36xx", "ti,omap3";
17 17
18 cpus { 18 cpus {
19 cpu@0 { 19 cpu@0 {
diff --git a/arch/arm/boot/dts/omap3-igep0020.dts b/arch/arm/boot/dts/omap3-igep0020.dts
index 25a2b5f652fd..f2779ac75872 100644
--- a/arch/arm/boot/dts/omap3-igep0020.dts
+++ b/arch/arm/boot/dts/omap3-igep0020.dts
@@ -14,7 +14,7 @@
14 14
15/ { 15/ {
16 model = "IGEPv2 (TI OMAP AM/DM37x)"; 16 model = "IGEPv2 (TI OMAP AM/DM37x)";
17 compatible = "isee,omap3-igep0020", "ti,omap3"; 17 compatible = "isee,omap3-igep0020", "ti,omap36xx", "ti,omap3";
18 18
19 leds { 19 leds {
20 pinctrl-names = "default"; 20 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/omap3-igep0030.dts b/arch/arm/boot/dts/omap3-igep0030.dts
index 145c58cfc8ac..2793749eb1ba 100644
--- a/arch/arm/boot/dts/omap3-igep0030.dts
+++ b/arch/arm/boot/dts/omap3-igep0030.dts
@@ -13,7 +13,7 @@
13 13
14/ { 14/ {
15 model = "IGEP COM MODULE (TI OMAP AM/DM37x)"; 15 model = "IGEP COM MODULE (TI OMAP AM/DM37x)";
16 compatible = "isee,omap3-igep0030", "ti,omap3"; 16 compatible = "isee,omap3-igep0030", "ti,omap36xx", "ti,omap3";
17 17
18 leds { 18 leds {
19 pinctrl-names = "default"; 19 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 10666ca8aee1..d4d2763f4794 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -426,7 +426,7 @@
426 }; 426 };
427 427
428 rtp: rtp@01c25000 { 428 rtp: rtp@01c25000 {
429 compatible = "allwinner,sun4i-ts"; 429 compatible = "allwinner,sun4i-a10-ts";
430 reg = <0x01c25000 0x100>; 430 reg = <0x01c25000 0x100>;
431 interrupts = <29>; 431 interrupts = <29>;
432 }; 432 };
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index 64961595e8d6..79fd412005b0 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -383,7 +383,7 @@
383 }; 383 };
384 384
385 rtp: rtp@01c25000 { 385 rtp: rtp@01c25000 {
386 compatible = "allwinner,sun4i-ts"; 386 compatible = "allwinner,sun4i-a10-ts";
387 reg = <0x01c25000 0x100>; 387 reg = <0x01c25000 0x100>;
388 interrupts = <29>; 388 interrupts = <29>;
389 }; 389 };
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index 320335abfccd..c463fd730c91 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -346,7 +346,7 @@
346 }; 346 };
347 347
348 rtp: rtp@01c25000 { 348 rtp: rtp@01c25000 {
349 compatible = "allwinner,sun4i-ts"; 349 compatible = "allwinner,sun4i-a10-ts";
350 reg = <0x01c25000 0x100>; 350 reg = <0x01c25000 0x100>;
351 interrupts = <29>; 351 interrupts = <29>;
352 }; 352 };
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 9ff09484847b..6f25cf559ad0 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -454,7 +454,7 @@
454 rtc: rtc@01c20d00 { 454 rtc: rtc@01c20d00 {
455 compatible = "allwinner,sun7i-a20-rtc"; 455 compatible = "allwinner,sun7i-a20-rtc";
456 reg = <0x01c20d00 0x20>; 456 reg = <0x01c20d00 0x20>;
457 interrupts = <0 24 1>; 457 interrupts = <0 24 4>;
458 }; 458 };
459 459
460 sid: eeprom@01c23800 { 460 sid: eeprom@01c23800 {
@@ -463,7 +463,7 @@
463 }; 463 };
464 464
465 rtp: rtp@01c25000 { 465 rtp: rtp@01c25000 {
466 compatible = "allwinner,sun4i-ts"; 466 compatible = "allwinner,sun4i-a10-ts";
467 reg = <0x01c25000 0x100>; 467 reg = <0x01c25000 0x100>;
468 interrupts = <0 29 4>; 468 interrupts = <0 29 4>;
469 }; 469 };
@@ -596,10 +596,10 @@
596 hstimer@01c60000 { 596 hstimer@01c60000 {
597 compatible = "allwinner,sun7i-a20-hstimer"; 597 compatible = "allwinner,sun7i-a20-hstimer";
598 reg = <0x01c60000 0x1000>; 598 reg = <0x01c60000 0x1000>;
599 interrupts = <0 81 1>, 599 interrupts = <0 81 4>,
600 <0 82 1>, 600 <0 82 4>,
601 <0 83 1>, 601 <0 83 4>,
602 <0 84 1>; 602 <0 84 4>;
603 clocks = <&ahb_gates 28>; 603 clocks = <&ahb_gates 28>;
604 }; 604 };
605 605
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 00fe9e9710fd..27d69b558c5d 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -204,7 +204,10 @@ CONFIG_MMC_BLOCK_MINORS=16
204CONFIG_MMC_SDHCI=y 204CONFIG_MMC_SDHCI=y
205CONFIG_MMC_SDHCI_PLTFM=y 205CONFIG_MMC_SDHCI_PLTFM=y
206CONFIG_MMC_SDHCI_TEGRA=y 206CONFIG_MMC_SDHCI_TEGRA=y
207CONFIG_NEW_LEDS=y
208CONFIG_LEDS_CLASS=y
207CONFIG_LEDS_GPIO=y 209CONFIG_LEDS_GPIO=y
210CONFIG_LEDS_TRIGGERS=y
208CONFIG_LEDS_TRIGGER_TIMER=y 211CONFIG_LEDS_TRIGGER_TIMER=y
209CONFIG_LEDS_TRIGGER_ONESHOT=y 212CONFIG_LEDS_TRIGGER_ONESHOT=y
210CONFIG_LEDS_TRIGGER_HEARTBEAT=y 213CONFIG_LEDS_TRIGGER_HEARTBEAT=y
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 8756e4bcdba0..4afb376d9c7c 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -30,14 +30,15 @@
30 */ 30 */
31#define UL(x) _AC(x, UL) 31#define UL(x) _AC(x, UL)
32 32
33/* PAGE_OFFSET - the virtual address of the start of the kernel image */
34#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
35
33#ifdef CONFIG_MMU 36#ifdef CONFIG_MMU
34 37
35/* 38/*
36 * PAGE_OFFSET - the virtual address of the start of the kernel image
37 * TASK_SIZE - the maximum size of a user space task. 39 * TASK_SIZE - the maximum size of a user space task.
38 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area 40 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
39 */ 41 */
40#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
41#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) 42#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
42#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) 43#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
43 44
@@ -104,10 +105,6 @@
104#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) 105#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
105#endif 106#endif
106 107
107#ifndef PAGE_OFFSET
108#define PAGE_OFFSET PLAT_PHYS_OFFSET
109#endif
110
111/* 108/*
112 * The module can be at any place in ram in nommu mode. 109 * The module can be at any place in ram in nommu mode.
113 */ 110 */
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 47cd974e57ea..c96ecacb2021 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -177,6 +177,18 @@ __lookup_processor_type_data:
177 .long __proc_info_end 177 .long __proc_info_end
178 .size __lookup_processor_type_data, . - __lookup_processor_type_data 178 .size __lookup_processor_type_data, . - __lookup_processor_type_data
179 179
180__error_lpae:
181#ifdef CONFIG_DEBUG_LL
182 adr r0, str_lpae
183 bl printascii
184 b __error
185str_lpae: .asciz "\nError: Kernel with LPAE support, but CPU does not support LPAE.\n"
186#else
187 b __error
188#endif
189 .align
190ENDPROC(__error_lpae)
191
180__error_p: 192__error_p:
181#ifdef CONFIG_DEBUG_LL 193#ifdef CONFIG_DEBUG_LL
182 adr r0, str_p1 194 adr r0, str_p1
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 914616e0bdcd..f5f381d91556 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -102,7 +102,7 @@ ENTRY(stext)
102 and r3, r3, #0xf @ extract VMSA support 102 and r3, r3, #0xf @ extract VMSA support
103 cmp r3, #5 @ long-descriptor translation table format? 103 cmp r3, #5 @ long-descriptor translation table format?
104 THUMB( it lo ) @ force fixup-able long branch encoding 104 THUMB( it lo ) @ force fixup-able long branch encoding
105 blo __error_p @ only classic page table format 105 blo __error_lpae @ only classic page table format
106#endif 106#endif
107 107
108#ifndef CONFIG_XIP_KERNEL 108#ifndef CONFIG_XIP_KERNEL
diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c
index 3b05aea56d1f..11ed9152e665 100644
--- a/arch/arm/mach-omap2/cclock3xxx_data.c
+++ b/arch/arm/mach-omap2/cclock3xxx_data.c
@@ -433,7 +433,9 @@ static const struct clk_ops dpll4_m5x2_ck_ops = {
433 .enable = &omap2_dflt_clk_enable, 433 .enable = &omap2_dflt_clk_enable,
434 .disable = &omap2_dflt_clk_disable, 434 .disable = &omap2_dflt_clk_disable,
435 .is_enabled = &omap2_dflt_clk_is_enabled, 435 .is_enabled = &omap2_dflt_clk_is_enabled,
436 .set_rate = &omap3_clkoutx2_set_rate,
436 .recalc_rate = &omap3_clkoutx2_recalc, 437 .recalc_rate = &omap3_clkoutx2_recalc,
438 .round_rate = &omap3_clkoutx2_round_rate,
437}; 439};
438 440
439static const struct clk_ops dpll4_m5x2_ck_3630_ops = { 441static const struct clk_ops dpll4_m5x2_ck_3630_ops = {
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index 4c158c838d40..01fc710c8181 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -23,6 +23,8 @@
23#include "prm.h" 23#include "prm.h"
24#include "clockdomain.h" 24#include "clockdomain.h"
25 25
26#define MAX_CPUS 2
27
26/* Machine specific information */ 28/* Machine specific information */
27struct idle_statedata { 29struct idle_statedata {
28 u32 cpu_state; 30 u32 cpu_state;
@@ -48,11 +50,11 @@ static struct idle_statedata omap4_idle_data[] = {
48 }, 50 },
49}; 51};
50 52
51static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS]; 53static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS];
52static struct clockdomain *cpu_clkdm[NR_CPUS]; 54static struct clockdomain *cpu_clkdm[MAX_CPUS];
53 55
54static atomic_t abort_barrier; 56static atomic_t abort_barrier;
55static bool cpu_done[NR_CPUS]; 57static bool cpu_done[MAX_CPUS];
56static struct idle_statedata *state_ptr = &omap4_idle_data[0]; 58static struct idle_statedata *state_ptr = &omap4_idle_data[0];
57 59
58/* Private functions */ 60/* Private functions */
diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c
index 3185ced807c9..3c418ea54bbe 100644
--- a/arch/arm/mach-omap2/dpll3xxx.c
+++ b/arch/arm/mach-omap2/dpll3xxx.c
@@ -623,6 +623,32 @@ void omap3_dpll_deny_idle(struct clk_hw_omap *clk)
623 623
624/* Clock control for DPLL outputs */ 624/* Clock control for DPLL outputs */
625 625
626/* Find the parent DPLL for the given clkoutx2 clock */
627static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
628{
629 struct clk_hw_omap *pclk = NULL;
630 struct clk *parent;
631
632 /* Walk up the parents of clk, looking for a DPLL */
633 do {
634 do {
635 parent = __clk_get_parent(hw->clk);
636 hw = __clk_get_hw(parent);
637 } while (hw && (__clk_get_flags(hw->clk) & CLK_IS_BASIC));
638 if (!hw)
639 break;
640 pclk = to_clk_hw_omap(hw);
641 } while (pclk && !pclk->dpll_data);
642
643 /* clk does not have a DPLL as a parent? error in the clock data */
644 if (!pclk) {
645 WARN_ON(1);
646 return NULL;
647 }
648
649 return pclk;
650}
651
626/** 652/**
627 * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate 653 * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate
628 * @clk: DPLL output struct clk 654 * @clk: DPLL output struct clk
@@ -637,27 +663,14 @@ unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
637 unsigned long rate; 663 unsigned long rate;
638 u32 v; 664 u32 v;
639 struct clk_hw_omap *pclk = NULL; 665 struct clk_hw_omap *pclk = NULL;
640 struct clk *parent;
641 666
642 if (!parent_rate) 667 if (!parent_rate)
643 return 0; 668 return 0;
644 669
645 /* Walk up the parents of clk, looking for a DPLL */ 670 pclk = omap3_find_clkoutx2_dpll(hw);
646 do {
647 do {
648 parent = __clk_get_parent(hw->clk);
649 hw = __clk_get_hw(parent);
650 } while (hw && (__clk_get_flags(hw->clk) & CLK_IS_BASIC));
651 if (!hw)
652 break;
653 pclk = to_clk_hw_omap(hw);
654 } while (pclk && !pclk->dpll_data);
655 671
656 /* clk does not have a DPLL as a parent? error in the clock data */ 672 if (!pclk)
657 if (!pclk) {
658 WARN_ON(1);
659 return 0; 673 return 0;
660 }
661 674
662 dd = pclk->dpll_data; 675 dd = pclk->dpll_data;
663 676
@@ -672,6 +685,55 @@ unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
672 return rate; 685 return rate;
673} 686}
674 687
688int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate,
689 unsigned long parent_rate)
690{
691 return 0;
692}
693
694long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate,
695 unsigned long *prate)
696{
697 const struct dpll_data *dd;
698 u32 v;
699 struct clk_hw_omap *pclk = NULL;
700
701 if (!*prate)
702 return 0;
703
704 pclk = omap3_find_clkoutx2_dpll(hw);
705
706 if (!pclk)
707 return 0;
708
709 dd = pclk->dpll_data;
710
711 /* TYPE J does not have a clkoutx2 */
712 if (dd->flags & DPLL_J_TYPE) {
713 *prate = __clk_round_rate(__clk_get_parent(pclk->hw.clk), rate);
714 return *prate;
715 }
716
717 WARN_ON(!dd->enable_mask);
718
719 v = omap2_clk_readl(pclk, dd->control_reg) & dd->enable_mask;
720 v >>= __ffs(dd->enable_mask);
721
722 /* If in bypass, the rate is fixed to the bypass rate*/
723 if (v != OMAP3XXX_EN_DPLL_LOCKED)
724 return *prate;
725
726 if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
727 unsigned long best_parent;
728
729 best_parent = (rate / 2);
730 *prate = __clk_round_rate(__clk_get_parent(hw->clk),
731 best_parent);
732 }
733
734 return *prate * 2;
735}
736
675/* OMAP3/4 non-CORE DPLL clkops */ 737/* OMAP3/4 non-CORE DPLL clkops */
676const struct clk_hw_omap_ops clkhwops_omap3_dpll = { 738const struct clk_hw_omap_ops clkhwops_omap3_dpll = {
677 .allow_idle = omap3_dpll_allow_idle, 739 .allow_idle = omap3_dpll_allow_idle,
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 42d81885c700..1f33f5db10d5 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1947,29 +1947,31 @@ static int _ocp_softreset(struct omap_hwmod *oh)
1947 goto dis_opt_clks; 1947 goto dis_opt_clks;
1948 1948
1949 _write_sysconfig(v, oh); 1949 _write_sysconfig(v, oh);
1950 ret = _clear_softreset(oh, &v);
1951 if (ret)
1952 goto dis_opt_clks;
1953
1954 _write_sysconfig(v, oh);
1955 1950
1956 if (oh->class->sysc->srst_udelay) 1951 if (oh->class->sysc->srst_udelay)
1957 udelay(oh->class->sysc->srst_udelay); 1952 udelay(oh->class->sysc->srst_udelay);
1958 1953
1959 c = _wait_softreset_complete(oh); 1954 c = _wait_softreset_complete(oh);
1960 if (c == MAX_MODULE_SOFTRESET_WAIT) 1955 if (c == MAX_MODULE_SOFTRESET_WAIT) {
1961 pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n", 1956 pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n",
1962 oh->name, MAX_MODULE_SOFTRESET_WAIT); 1957 oh->name, MAX_MODULE_SOFTRESET_WAIT);
1963 else 1958 ret = -ETIMEDOUT;
1959 goto dis_opt_clks;
1960 } else {
1964 pr_debug("omap_hwmod: %s: softreset in %d usec\n", oh->name, c); 1961 pr_debug("omap_hwmod: %s: softreset in %d usec\n", oh->name, c);
1962 }
1963
1964 ret = _clear_softreset(oh, &v);
1965 if (ret)
1966 goto dis_opt_clks;
1967
1968 _write_sysconfig(v, oh);
1965 1969
1966 /* 1970 /*
1967 * XXX add _HWMOD_STATE_WEDGED for modules that don't come back from 1971 * XXX add _HWMOD_STATE_WEDGED for modules that don't come back from
1968 * _wait_target_ready() or _reset() 1972 * _wait_target_ready() or _reset()
1969 */ 1973 */
1970 1974
1971 ret = (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : 0;
1972
1973dis_opt_clks: 1975dis_opt_clks:
1974 if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET) 1976 if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
1975 _disable_optional_clocks(oh); 1977 _disable_optional_clocks(oh);
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 18f333c440db..810c205d668b 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -1365,11 +1365,10 @@ static struct omap_hwmod_class_sysconfig dra7xx_spinlock_sysc = {
1365 .rev_offs = 0x0000, 1365 .rev_offs = 0x0000,
1366 .sysc_offs = 0x0010, 1366 .sysc_offs = 0x0010,
1367 .syss_offs = 0x0014, 1367 .syss_offs = 0x0014,
1368 .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | 1368 .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP |
1369 SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | 1369 SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
1370 SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), 1370 SYSS_HAS_RESET_STATUS),
1371 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | 1371 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
1372 SIDLE_SMART_WKUP),
1373 .sysc_fields = &omap_hwmod_sysc_type1, 1372 .sysc_fields = &omap_hwmod_sysc_type1,
1374}; 1373};
1375 1374
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 3d5b24dcd9a4..c33e07e2f0d4 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -22,6 +22,8 @@
22#include "common-board-devices.h" 22#include "common-board-devices.h"
23#include "dss-common.h" 23#include "dss-common.h"
24#include "control.h" 24#include "control.h"
25#include "omap-secure.h"
26#include "soc.h"
25 27
26struct pdata_init { 28struct pdata_init {
27 const char *compatible; 29 const char *compatible;
@@ -169,6 +171,22 @@ static void __init am3517_evm_legacy_init(void)
169 omap_ctrl_writel(v, AM35XX_CONTROL_IP_SW_RESET); 171 omap_ctrl_writel(v, AM35XX_CONTROL_IP_SW_RESET);
170 omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); /* OCP barrier */ 172 omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); /* OCP barrier */
171} 173}
174
175static void __init nokia_n900_legacy_init(void)
176{
177 hsmmc2_internal_input_clk();
178
179 if (omap_type() == OMAP2_DEVICE_TYPE_SEC) {
180 if (IS_ENABLED(CONFIG_ARM_ERRATA_430973)) {
181 pr_info("RX-51: Enabling ARM errata 430973 workaround\n");
182 /* set IBE to 1 */
183 rx51_secure_update_aux_cr(BIT(6), 0);
184 } else {
185 pr_warning("RX-51: Not enabling ARM errata 430973 workaround\n");
186 pr_warning("Thumb binaries may crash randomly without this workaround\n");
187 }
188 }
189}
172#endif /* CONFIG_ARCH_OMAP3 */ 190#endif /* CONFIG_ARCH_OMAP3 */
173 191
174#ifdef CONFIG_ARCH_OMAP4 192#ifdef CONFIG_ARCH_OMAP4
@@ -239,6 +257,7 @@ struct of_dev_auxdata omap_auxdata_lookup[] __initdata = {
239#endif 257#endif
240#ifdef CONFIG_ARCH_OMAP3 258#ifdef CONFIG_ARCH_OMAP3
241 OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002030, "48002030.pinmux", &pcs_pdata), 259 OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002030, "48002030.pinmux", &pcs_pdata),
260 OF_DEV_AUXDATA("ti,omap3-padconf", 0x480025a0, "480025a0.pinmux", &pcs_pdata),
242 OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002a00, "48002a00.pinmux", &pcs_pdata), 261 OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002a00, "48002a00.pinmux", &pcs_pdata),
243 /* Only on am3517 */ 262 /* Only on am3517 */
244 OF_DEV_AUXDATA("ti,davinci_mdio", 0x5c030000, "davinci_mdio.0", NULL), 263 OF_DEV_AUXDATA("ti,davinci_mdio", 0x5c030000, "davinci_mdio.0", NULL),
@@ -259,7 +278,7 @@ struct of_dev_auxdata omap_auxdata_lookup[] __initdata = {
259static struct pdata_init pdata_quirks[] __initdata = { 278static struct pdata_init pdata_quirks[] __initdata = {
260#ifdef CONFIG_ARCH_OMAP3 279#ifdef CONFIG_ARCH_OMAP3
261 { "compulab,omap3-sbc-t3730", omap3_sbc_t3730_legacy_init, }, 280 { "compulab,omap3-sbc-t3730", omap3_sbc_t3730_legacy_init, },
262 { "nokia,omap3-n900", hsmmc2_internal_input_clk, }, 281 { "nokia,omap3-n900", nokia_n900_legacy_init, },
263 { "nokia,omap3-n9", hsmmc2_internal_input_clk, }, 282 { "nokia,omap3-n9", hsmmc2_internal_input_clk, },
264 { "nokia,omap3-n950", hsmmc2_internal_input_clk, }, 283 { "nokia,omap3-n950", hsmmc2_internal_input_clk, },
265 { "isee,omap3-igep0020", omap3_igep0020_legacy_init, }, 284 { "isee,omap3-igep0020", omap3_igep0020_legacy_init, },
diff --git a/arch/arm/mach-omap2/prminst44xx.c b/arch/arm/mach-omap2/prminst44xx.c
index 6334b96b4097..280f3c58abe5 100644
--- a/arch/arm/mach-omap2/prminst44xx.c
+++ b/arch/arm/mach-omap2/prminst44xx.c
@@ -183,11 +183,11 @@ void omap4_prminst_global_warm_sw_reset(void)
183 OMAP4_PRM_RSTCTRL_OFFSET); 183 OMAP4_PRM_RSTCTRL_OFFSET);
184 v |= OMAP4430_RST_GLOBAL_WARM_SW_MASK; 184 v |= OMAP4430_RST_GLOBAL_WARM_SW_MASK;
185 omap4_prminst_write_inst_reg(v, OMAP4430_PRM_PARTITION, 185 omap4_prminst_write_inst_reg(v, OMAP4430_PRM_PARTITION,
186 OMAP4430_PRM_DEVICE_INST, 186 dev_inst,
187 OMAP4_PRM_RSTCTRL_OFFSET); 187 OMAP4_PRM_RSTCTRL_OFFSET);
188 188
189 /* OCP barrier */ 189 /* OCP barrier */
190 v = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, 190 v = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
191 OMAP4430_PRM_DEVICE_INST, 191 dev_inst,
192 OMAP4_PRM_RSTCTRL_OFFSET); 192 OMAP4_PRM_RSTCTRL_OFFSET);
193} 193}
diff --git a/arch/arm/mach-sa1100/include/mach/collie.h b/arch/arm/mach-sa1100/include/mach/collie.h
index f33679d2d3ee..50e1d850ee2e 100644
--- a/arch/arm/mach-sa1100/include/mach/collie.h
+++ b/arch/arm/mach-sa1100/include/mach/collie.h
@@ -13,6 +13,8 @@
13#ifndef __ASM_ARCH_COLLIE_H 13#ifndef __ASM_ARCH_COLLIE_H
14#define __ASM_ARCH_COLLIE_H 14#define __ASM_ARCH_COLLIE_H
15 15
16#include "hardware.h" /* Gives GPIO_MAX */
17
16extern void locomolcd_power(int on); 18extern void locomolcd_power(int on);
17 19
18#define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1) 20#define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1)
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index 2b3a56414271..ef69152f9b52 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -264,6 +264,9 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
264 note_page(st, addr, 3, pmd_val(*pmd)); 264 note_page(st, addr, 3, pmd_val(*pmd));
265 else 265 else
266 walk_pte(st, pmd, addr); 266 walk_pte(st, pmd, addr);
267
268 if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1]))
269 note_page(st, addr + SECTION_SIZE, 3, pmd_val(pmd[1]));
267 } 270 }
268} 271}
269 272
diff --git a/arch/c6x/include/asm/cache.h b/arch/c6x/include/asm/cache.h
index 09c5a0f5f4d1..86648c083bb4 100644
--- a/arch/c6x/include/asm/cache.h
+++ b/arch/c6x/include/asm/cache.h
@@ -12,6 +12,7 @@
12#define _ASM_C6X_CACHE_H 12#define _ASM_C6X_CACHE_H
13 13
14#include <linux/irqflags.h> 14#include <linux/irqflags.h>
15#include <linux/init.h>
15 16
16/* 17/*
17 * Cache line size 18 * Cache line size
diff --git a/arch/cris/include/asm/bitops.h b/arch/cris/include/asm/bitops.h
index 184066ceb1f6..053c17b36559 100644
--- a/arch/cris/include/asm/bitops.h
+++ b/arch/cris/include/asm/bitops.h
@@ -144,7 +144,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
144 * definition, which doesn't have the same semantics. We don't want to 144 * definition, which doesn't have the same semantics. We don't want to
145 * use -fno-builtin, so just hide the name ffs. 145 * use -fno-builtin, so just hide the name ffs.
146 */ 146 */
147#define ffs kernel_ffs 147#define ffs(x) kernel_ffs(x)
148 148
149#include <asm-generic/bitops/fls.h> 149#include <asm-generic/bitops/fls.h>
150#include <asm-generic/bitops/__fls.h> 150#include <asm-generic/bitops/__fls.h>
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index a96bcf83a735..20e8a9b21d75 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -98,7 +98,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
98 /* attempt to allocate a granule's worth of cached memory pages */ 98 /* attempt to allocate a granule's worth of cached memory pages */
99 99
100 page = alloc_pages_exact_node(nid, 100 page = alloc_pages_exact_node(nid,
101 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 101 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
102 IA64_GRANULE_SHIFT-PAGE_SHIFT); 102 IA64_GRANULE_SHIFT-PAGE_SHIFT);
103 if (!page) { 103 if (!page) {
104 mutex_unlock(&uc_pool->add_chunk_mutex); 104 mutex_unlock(&uc_pool->add_chunk_mutex);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 8d4c247f1738..af064d28b365 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1048,6 +1048,15 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1048 flush_altivec_to_thread(src); 1048 flush_altivec_to_thread(src);
1049 flush_vsx_to_thread(src); 1049 flush_vsx_to_thread(src);
1050 flush_spe_to_thread(src); 1050 flush_spe_to_thread(src);
1051 /*
1052 * Flush TM state out so we can copy it. __switch_to_tm() does this
1053 * flush but it removes the checkpointed state from the current CPU and
1054 * transitions the CPU out of TM mode. Hence we need to call
1055 * tm_recheckpoint_new_task() (on the same task) to restore the
1056 * checkpointed state back and the TM mode.
1057 */
1058 __switch_to_tm(src);
1059 tm_recheckpoint_new_task(src);
1051 1060
1052 *dst = *src; 1061 *dst = *src;
1053 1062
diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
index 1482327cfeba..d88736fbece6 100644
--- a/arch/powerpc/kernel/reloc_64.S
+++ b/arch/powerpc/kernel/reloc_64.S
@@ -81,6 +81,7 @@ _GLOBAL(relocate)
81 81
826: blr 826: blr
83 83
84.balign 8
84p_dyn: .llong __dynamic_start - 0b 85p_dyn: .llong __dynamic_start - 0b
85p_rela: .llong __rela_dyn_start - 0b 86p_rela: .llong __rela_dyn_start - 0b
86p_st: .llong _stext - 0b 87p_st: .llong _stext - 0b
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index 5ec1e47a0d77..e865d748179b 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -123,7 +123,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order)
123 123
124 area->nid = nid; 124 area->nid = nid;
125 area->order = order; 125 area->order = order;
126 area->pages = alloc_pages_exact_node(area->nid, GFP_KERNEL|GFP_THISNODE, 126 area->pages = alloc_pages_exact_node(area->nid,
127 GFP_KERNEL|__GFP_THISNODE,
127 area->order); 128 area->order);
128 129
129 if (!area->pages) { 130 if (!area->pages) {
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index c026cca5602c..f3aaf231b4e5 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -341,10 +341,6 @@ config X86_USE_3DNOW
341 def_bool y 341 def_bool y
342 depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML 342 depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML
343 343
344config X86_OOSTORE
345 def_bool y
346 depends on (MWINCHIP3D || MWINCHIPC6) && MTRR
347
348# 344#
349# P6_NOPs are a relatively minor optimization that require a family >= 345# P6_NOPs are a relatively minor optimization that require a family >=
350# 6 processor, except that it is broken on certain VIA chips. 346# 6 processor, except that it is broken on certain VIA chips.
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 04a48903b2eb..69bbb4845020 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -85,11 +85,7 @@
85#else 85#else
86# define smp_rmb() barrier() 86# define smp_rmb() barrier()
87#endif 87#endif
88#ifdef CONFIG_X86_OOSTORE 88#define smp_wmb() barrier()
89# define smp_wmb() wmb()
90#else
91# define smp_wmb() barrier()
92#endif
93#define smp_read_barrier_depends() read_barrier_depends() 89#define smp_read_barrier_depends() read_barrier_depends()
94#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) 90#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
95#else /* !SMP */ 91#else /* !SMP */
@@ -100,7 +96,7 @@
100#define set_mb(var, value) do { var = value; barrier(); } while (0) 96#define set_mb(var, value) do { var = value; barrier(); } while (0)
101#endif /* SMP */ 97#endif /* SMP */
102 98
103#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) 99#if defined(CONFIG_X86_PPRO_FENCE)
104 100
105/* 101/*
106 * For either of these options x86 doesn't have a strong TSO memory 102 * For either of these options x86 doesn't have a strong TSO memory
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 3d6b9f81cc68..acd86c850414 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -134,6 +134,7 @@ extern void efi_setup_page_tables(void);
134extern void __init old_map_region(efi_memory_desc_t *md); 134extern void __init old_map_region(efi_memory_desc_t *md);
135extern void __init runtime_code_page_mkexec(void); 135extern void __init runtime_code_page_mkexec(void);
136extern void __init efi_runtime_mkexec(void); 136extern void __init efi_runtime_mkexec(void);
137extern void __init efi_apply_memmap_quirks(void);
137 138
138struct efi_setup_data { 139struct efi_setup_data {
139 u64 fw_vendor; 140 u64 fw_vendor;
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 34f69cb9350a..91d9c69a629e 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -237,7 +237,7 @@ memcpy_toio(volatile void __iomem *dst, const void *src, size_t count)
237 237
238static inline void flush_write_buffers(void) 238static inline void flush_write_buffers(void)
239{ 239{
240#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) 240#if defined(CONFIG_X86_PPRO_FENCE)
241 asm volatile("lock; addl $0,0(%%esp)": : :"memory"); 241 asm volatile("lock; addl $0,0(%%esp)": : :"memory");
242#endif 242#endif
243} 243}
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index bf156ded74b5..0f62f5482d91 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -26,10 +26,9 @@
26# define LOCK_PTR_REG "D" 26# define LOCK_PTR_REG "D"
27#endif 27#endif
28 28
29#if defined(CONFIG_X86_32) && \ 29#if defined(CONFIG_X86_32) && (defined(CONFIG_X86_PPRO_FENCE))
30 (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
31/* 30/*
32 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock 31 * On PPro SMP, we use a locked operation to unlock
33 * (PPro errata 66, 92) 32 * (PPro errata 66, 92)
34 */ 33 */
35# define UNLOCK_LOCK_PREFIX LOCK_PREFIX 34# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index 8779edab684e..d8fba5c15fbd 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -8,236 +8,6 @@
8 8
9#include "cpu.h" 9#include "cpu.h"
10 10
11#ifdef CONFIG_X86_OOSTORE
12
13static u32 power2(u32 x)
14{
15 u32 s = 1;
16
17 while (s <= x)
18 s <<= 1;
19
20 return s >>= 1;
21}
22
23
24/*
25 * Set up an actual MCR
26 */
27static void centaur_mcr_insert(int reg, u32 base, u32 size, int key)
28{
29 u32 lo, hi;
30
31 hi = base & ~0xFFF;
32 lo = ~(size-1); /* Size is a power of 2 so this makes a mask */
33 lo &= ~0xFFF; /* Remove the ctrl value bits */
34 lo |= key; /* Attribute we wish to set */
35 wrmsr(reg+MSR_IDT_MCR0, lo, hi);
36 mtrr_centaur_report_mcr(reg, lo, hi); /* Tell the mtrr driver */
37}
38
39/*
40 * Figure what we can cover with MCR's
41 *
42 * Shortcut: We know you can't put 4Gig of RAM on a winchip
43 */
44static u32 ramtop(void)
45{
46 u32 clip = 0xFFFFFFFFUL;
47 u32 top = 0;
48 int i;
49
50 for (i = 0; i < e820.nr_map; i++) {
51 unsigned long start, end;
52
53 if (e820.map[i].addr > 0xFFFFFFFFUL)
54 continue;
55 /*
56 * Don't MCR over reserved space. Ignore the ISA hole
57 * we frob around that catastrophe already
58 */
59 if (e820.map[i].type == E820_RESERVED) {
60 if (e820.map[i].addr >= 0x100000UL &&
61 e820.map[i].addr < clip)
62 clip = e820.map[i].addr;
63 continue;
64 }
65 start = e820.map[i].addr;
66 end = e820.map[i].addr + e820.map[i].size;
67 if (start >= end)
68 continue;
69 if (end > top)
70 top = end;
71 }
72 /*
73 * Everything below 'top' should be RAM except for the ISA hole.
74 * Because of the limited MCR's we want to map NV/ACPI into our
75 * MCR range for gunk in RAM
76 *
77 * Clip might cause us to MCR insufficient RAM but that is an
78 * acceptable failure mode and should only bite obscure boxes with
79 * a VESA hole at 15Mb
80 *
81 * The second case Clip sometimes kicks in is when the EBDA is marked
82 * as reserved. Again we fail safe with reasonable results
83 */
84 if (top > clip)
85 top = clip;
86
87 return top;
88}
89
90/*
91 * Compute a set of MCR's to give maximum coverage
92 */
93static int centaur_mcr_compute(int nr, int key)
94{
95 u32 mem = ramtop();
96 u32 root = power2(mem);
97 u32 base = root;
98 u32 top = root;
99 u32 floor = 0;
100 int ct = 0;
101
102 while (ct < nr) {
103 u32 fspace = 0;
104 u32 high;
105 u32 low;
106
107 /*
108 * Find the largest block we will fill going upwards
109 */
110 high = power2(mem-top);
111
112 /*
113 * Find the largest block we will fill going downwards
114 */
115 low = base/2;
116
117 /*
118 * Don't fill below 1Mb going downwards as there
119 * is an ISA hole in the way.
120 */
121 if (base <= 1024*1024)
122 low = 0;
123
124 /*
125 * See how much space we could cover by filling below
126 * the ISA hole
127 */
128
129 if (floor == 0)
130 fspace = 512*1024;
131 else if (floor == 512*1024)
132 fspace = 128*1024;
133
134 /* And forget ROM space */
135
136 /*
137 * Now install the largest coverage we get
138 */
139 if (fspace > high && fspace > low) {
140 centaur_mcr_insert(ct, floor, fspace, key);
141 floor += fspace;
142 } else if (high > low) {
143 centaur_mcr_insert(ct, top, high, key);
144 top += high;
145 } else if (low > 0) {
146 base -= low;
147 centaur_mcr_insert(ct, base, low, key);
148 } else
149 break;
150 ct++;
151 }
152 /*
153 * We loaded ct values. We now need to set the mask. The caller
154 * must do this bit.
155 */
156 return ct;
157}
158
159static void centaur_create_optimal_mcr(void)
160{
161 int used;
162 int i;
163
164 /*
165 * Allocate up to 6 mcrs to mark as much of ram as possible
166 * as write combining and weak write ordered.
167 *
168 * To experiment with: Linux never uses stack operations for
169 * mmio spaces so we could globally enable stack operation wc
170 *
171 * Load the registers with type 31 - full write combining, all
172 * writes weakly ordered.
173 */
174 used = centaur_mcr_compute(6, 31);
175
176 /*
177 * Wipe unused MCRs
178 */
179 for (i = used; i < 8; i++)
180 wrmsr(MSR_IDT_MCR0+i, 0, 0);
181}
182
183static void winchip2_create_optimal_mcr(void)
184{
185 u32 lo, hi;
186 int used;
187 int i;
188
189 /*
190 * Allocate up to 6 mcrs to mark as much of ram as possible
191 * as write combining, weak store ordered.
192 *
193 * Load the registers with type 25
194 * 8 - weak write ordering
195 * 16 - weak read ordering
196 * 1 - write combining
197 */
198 used = centaur_mcr_compute(6, 25);
199
200 /*
201 * Mark the registers we are using.
202 */
203 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
204 for (i = 0; i < used; i++)
205 lo |= 1<<(9+i);
206 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
207
208 /*
209 * Wipe unused MCRs
210 */
211
212 for (i = used; i < 8; i++)
213 wrmsr(MSR_IDT_MCR0+i, 0, 0);
214}
215
216/*
217 * Handle the MCR key on the Winchip 2.
218 */
219static void winchip2_unprotect_mcr(void)
220{
221 u32 lo, hi;
222 u32 key;
223
224 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
225 lo &= ~0x1C0; /* blank bits 8-6 */
226 key = (lo>>17) & 7;
227 lo |= key<<6; /* replace with unlock key */
228 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
229}
230
231static void winchip2_protect_mcr(void)
232{
233 u32 lo, hi;
234
235 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
236 lo &= ~0x1C0; /* blank bits 8-6 */
237 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
238}
239#endif /* CONFIG_X86_OOSTORE */
240
241#define ACE_PRESENT (1 << 6) 11#define ACE_PRESENT (1 << 6)
242#define ACE_ENABLED (1 << 7) 12#define ACE_ENABLED (1 << 7)
243#define ACE_FCR (1 << 28) /* MSR_VIA_FCR */ 13#define ACE_FCR (1 << 28) /* MSR_VIA_FCR */
@@ -362,20 +132,6 @@ static void init_centaur(struct cpuinfo_x86 *c)
362 fcr_clr = DPDC; 132 fcr_clr = DPDC;
363 printk(KERN_NOTICE "Disabling bugged TSC.\n"); 133 printk(KERN_NOTICE "Disabling bugged TSC.\n");
364 clear_cpu_cap(c, X86_FEATURE_TSC); 134 clear_cpu_cap(c, X86_FEATURE_TSC);
365#ifdef CONFIG_X86_OOSTORE
366 centaur_create_optimal_mcr();
367 /*
368 * Enable:
369 * write combining on non-stack, non-string
370 * write combining on string, all types
371 * weak write ordering
372 *
373 * The C6 original lacks weak read order
374 *
375 * Note 0x120 is write only on Winchip 1
376 */
377 wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
378#endif
379 break; 135 break;
380 case 8: 136 case 8:
381 switch (c->x86_mask) { 137 switch (c->x86_mask) {
@@ -392,40 +148,12 @@ static void init_centaur(struct cpuinfo_x86 *c)
392 fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| 148 fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
393 E2MMX|EAMD3D; 149 E2MMX|EAMD3D;
394 fcr_clr = DPDC; 150 fcr_clr = DPDC;
395#ifdef CONFIG_X86_OOSTORE
396 winchip2_unprotect_mcr();
397 winchip2_create_optimal_mcr();
398 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
399 /*
400 * Enable:
401 * write combining on non-stack, non-string
402 * write combining on string, all types
403 * weak write ordering
404 */
405 lo |= 31;
406 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
407 winchip2_protect_mcr();
408#endif
409 break; 151 break;
410 case 9: 152 case 9:
411 name = "3"; 153 name = "3";
412 fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| 154 fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
413 E2MMX|EAMD3D; 155 E2MMX|EAMD3D;
414 fcr_clr = DPDC; 156 fcr_clr = DPDC;
415#ifdef CONFIG_X86_OOSTORE
416 winchip2_unprotect_mcr();
417 winchip2_create_optimal_mcr();
418 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
419 /*
420 * Enable:
421 * write combining on non-stack, non-string
422 * write combining on string, all types
423 * weak write ordering
424 */
425 lo |= 31;
426 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
427 winchip2_protect_mcr();
428#endif
429 break; 157 break;
430 default: 158 default:
431 name = "??"; 159 name = "??";
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 81ba27679f18..f36bd42d6f0c 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -544,6 +544,10 @@ ENDPROC(early_idt_handlers)
544 /* This is global to keep gas from relaxing the jumps */ 544 /* This is global to keep gas from relaxing the jumps */
545ENTRY(early_idt_handler) 545ENTRY(early_idt_handler)
546 cld 546 cld
547
548 cmpl $2,(%esp) # X86_TRAP_NMI
549 je is_nmi # Ignore NMI
550
547 cmpl $2,%ss:early_recursion_flag 551 cmpl $2,%ss:early_recursion_flag
548 je hlt_loop 552 je hlt_loop
549 incl %ss:early_recursion_flag 553 incl %ss:early_recursion_flag
@@ -594,8 +598,9 @@ ex_entry:
594 pop %edx 598 pop %edx
595 pop %ecx 599 pop %ecx
596 pop %eax 600 pop %eax
597 addl $8,%esp /* drop vector number and error code */
598 decl %ss:early_recursion_flag 601 decl %ss:early_recursion_flag
602is_nmi:
603 addl $8,%esp /* drop vector number and error code */
599 iret 604 iret
600ENDPROC(early_idt_handler) 605ENDPROC(early_idt_handler)
601 606
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index e1aabdb314c8..a468c0a65c42 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -343,6 +343,9 @@ early_idt_handlers:
343ENTRY(early_idt_handler) 343ENTRY(early_idt_handler)
344 cld 344 cld
345 345
346 cmpl $2,(%rsp) # X86_TRAP_NMI
347 je is_nmi # Ignore NMI
348
346 cmpl $2,early_recursion_flag(%rip) 349 cmpl $2,early_recursion_flag(%rip)
347 jz 1f 350 jz 1f
348 incl early_recursion_flag(%rip) 351 incl early_recursion_flag(%rip)
@@ -405,8 +408,9 @@ ENTRY(early_idt_handler)
405 popq %rdx 408 popq %rdx
406 popq %rcx 409 popq %rcx
407 popq %rax 410 popq %rax
408 addq $16,%rsp # drop vector number and error code
409 decl early_recursion_flag(%rip) 411 decl early_recursion_flag(%rip)
412is_nmi:
413 addq $16,%rsp # drop vector number and error code
410 INTERRUPT_RETURN 414 INTERRUPT_RETURN
411ENDPROC(early_idt_handler) 415ENDPROC(early_idt_handler)
412 416
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 06853e670354..ce72964b2f46 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1239,14 +1239,8 @@ void __init setup_arch(char **cmdline_p)
1239 register_refined_jiffies(CLOCK_TICK_RATE); 1239 register_refined_jiffies(CLOCK_TICK_RATE);
1240 1240
1241#ifdef CONFIG_EFI 1241#ifdef CONFIG_EFI
1242 /* Once setup is done above, unmap the EFI memory map on 1242 if (efi_enabled(EFI_BOOT))
1243 * mismatched firmware/kernel archtectures since there is no 1243 efi_apply_memmap_quirks();
1244 * support for runtime services.
1245 */
1246 if (efi_enabled(EFI_BOOT) && !efi_is_native()) {
1247 pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
1248 efi_unmap_memmap();
1249 }
1250#endif 1244#endif
1251} 1245}
1252 1246
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e81df8fce027..2de1bc09a8d4 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3002,10 +3002,8 @@ static int cr8_write_interception(struct vcpu_svm *svm)
3002 u8 cr8_prev = kvm_get_cr8(&svm->vcpu); 3002 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
3003 /* instruction emulation calls kvm_set_cr8() */ 3003 /* instruction emulation calls kvm_set_cr8() */
3004 r = cr_interception(svm); 3004 r = cr_interception(svm);
3005 if (irqchip_in_kernel(svm->vcpu.kvm)) { 3005 if (irqchip_in_kernel(svm->vcpu.kvm))
3006 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3007 return r; 3006 return r;
3008 }
3009 if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) 3007 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
3010 return r; 3008 return r;
3011 kvm_run->exit_reason = KVM_EXIT_SET_TPR; 3009 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
@@ -3567,6 +3565,8 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3567 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) 3565 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3568 return; 3566 return;
3569 3567
3568 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3569
3570 if (irr == -1) 3570 if (irr == -1)
3571 return; 3571 return;
3572 3572
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 6dea040cc3a1..a10c8c792161 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1020,13 +1020,17 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
1020 * This routine handles page faults. It determines the address, 1020 * This routine handles page faults. It determines the address,
1021 * and the problem, and then passes it off to one of the appropriate 1021 * and the problem, and then passes it off to one of the appropriate
1022 * routines. 1022 * routines.
1023 *
1024 * This function must have noinline because both callers
1025 * {,trace_}do_page_fault() have notrace on. Having this an actual function
1026 * guarantees there's a function trace entry.
1023 */ 1027 */
1024static void __kprobes 1028static void __kprobes noinline
1025__do_page_fault(struct pt_regs *regs, unsigned long error_code) 1029__do_page_fault(struct pt_regs *regs, unsigned long error_code,
1030 unsigned long address)
1026{ 1031{
1027 struct vm_area_struct *vma; 1032 struct vm_area_struct *vma;
1028 struct task_struct *tsk; 1033 struct task_struct *tsk;
1029 unsigned long address;
1030 struct mm_struct *mm; 1034 struct mm_struct *mm;
1031 int fault; 1035 int fault;
1032 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 1036 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
@@ -1034,9 +1038,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
1034 tsk = current; 1038 tsk = current;
1035 mm = tsk->mm; 1039 mm = tsk->mm;
1036 1040
1037 /* Get the faulting address: */
1038 address = read_cr2();
1039
1040 /* 1041 /*
1041 * Detect and handle instructions that would cause a page fault for 1042 * Detect and handle instructions that would cause a page fault for
1042 * both a tracked kernel page and a userspace page. 1043 * both a tracked kernel page and a userspace page.
@@ -1248,32 +1249,50 @@ good_area:
1248 up_read(&mm->mmap_sem); 1249 up_read(&mm->mmap_sem);
1249} 1250}
1250 1251
1251dotraplinkage void __kprobes 1252dotraplinkage void __kprobes notrace
1252do_page_fault(struct pt_regs *regs, unsigned long error_code) 1253do_page_fault(struct pt_regs *regs, unsigned long error_code)
1253{ 1254{
1255 unsigned long address = read_cr2(); /* Get the faulting address */
1254 enum ctx_state prev_state; 1256 enum ctx_state prev_state;
1255 1257
1258 /*
1259 * We must have this function tagged with __kprobes, notrace and call
1260 * read_cr2() before calling anything else. To avoid calling any kind
1261 * of tracing machinery before we've observed the CR2 value.
1262 *
1263 * exception_{enter,exit}() contain all sorts of tracepoints.
1264 */
1265
1256 prev_state = exception_enter(); 1266 prev_state = exception_enter();
1257 __do_page_fault(regs, error_code); 1267 __do_page_fault(regs, error_code, address);
1258 exception_exit(prev_state); 1268 exception_exit(prev_state);
1259} 1269}
1260 1270
1261static void trace_page_fault_entries(struct pt_regs *regs, 1271#ifdef CONFIG_TRACING
1272static void trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
1262 unsigned long error_code) 1273 unsigned long error_code)
1263{ 1274{
1264 if (user_mode(regs)) 1275 if (user_mode(regs))
1265 trace_page_fault_user(read_cr2(), regs, error_code); 1276 trace_page_fault_user(address, regs, error_code);
1266 else 1277 else
1267 trace_page_fault_kernel(read_cr2(), regs, error_code); 1278 trace_page_fault_kernel(address, regs, error_code);
1268} 1279}
1269 1280
1270dotraplinkage void __kprobes 1281dotraplinkage void __kprobes notrace
1271trace_do_page_fault(struct pt_regs *regs, unsigned long error_code) 1282trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
1272{ 1283{
1284 /*
1285 * The exception_enter and tracepoint processing could
1286 * trigger another page faults (user space callchain
1287 * reading) and destroy the original cr2 value, so read
1288 * the faulting address now.
1289 */
1290 unsigned long address = read_cr2();
1273 enum ctx_state prev_state; 1291 enum ctx_state prev_state;
1274 1292
1275 prev_state = exception_enter(); 1293 prev_state = exception_enter();
1276 trace_page_fault_entries(regs, error_code); 1294 trace_page_fault_entries(address, regs, error_code);
1277 __do_page_fault(regs, error_code); 1295 __do_page_fault(regs, error_code, address);
1278 exception_exit(prev_state); 1296 exception_exit(prev_state);
1279} 1297}
1298#endif /* CONFIG_TRACING */
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 1a201ac7cef8..b97acecf3fd9 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -52,6 +52,7 @@
52#include <asm/tlbflush.h> 52#include <asm/tlbflush.h>
53#include <asm/x86_init.h> 53#include <asm/x86_init.h>
54#include <asm/rtc.h> 54#include <asm/rtc.h>
55#include <asm/uv/uv.h>
55 56
56#define EFI_DEBUG 57#define EFI_DEBUG
57 58
@@ -1210,3 +1211,22 @@ static int __init parse_efi_cmdline(char *str)
1210 return 0; 1211 return 0;
1211} 1212}
1212early_param("efi", parse_efi_cmdline); 1213early_param("efi", parse_efi_cmdline);
1214
1215void __init efi_apply_memmap_quirks(void)
1216{
1217 /*
1218 * Once setup is done earlier, unmap the EFI memory map on mismatched
1219 * firmware/kernel architectures since there is no support for runtime
1220 * services.
1221 */
1222 if (!efi_is_native()) {
1223 pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
1224 efi_unmap_memmap();
1225 }
1226
1227 /*
1228 * UV doesn't support the new EFI pagetable mapping yet.
1229 */
1230 if (is_uv_system())
1231 set_bit(EFI_OLD_MEMMAP, &x86_efi_facility);
1232}
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
index 7d01b8c56c00..cc04e67bfd05 100644
--- a/arch/x86/um/asm/barrier.h
+++ b/arch/x86/um/asm/barrier.h
@@ -40,11 +40,7 @@
40#define smp_rmb() barrier() 40#define smp_rmb() barrier()
41#endif /* CONFIG_X86_PPRO_FENCE */ 41#endif /* CONFIG_X86_PPRO_FENCE */
42 42
43#ifdef CONFIG_X86_OOSTORE
44#define smp_wmb() wmb()
45#else /* CONFIG_X86_OOSTORE */
46#define smp_wmb() barrier() 43#define smp_wmb() barrier()
47#endif /* CONFIG_X86_OOSTORE */
48 44
49#define smp_read_barrier_depends() read_barrier_depends() 45#define smp_read_barrier_depends() read_barrier_depends()
50#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) 46#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
diff --git a/block/blk-exec.c b/block/blk-exec.c
index c68613bb4c79..dbf4502b1d67 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
65 * be resued after dying flag is set 65 * be resued after dying flag is set
66 */ 66 */
67 if (q->mq_ops) { 67 if (q->mq_ops) {
68 blk_mq_insert_request(q, rq, at_head, true); 68 blk_mq_insert_request(rq, at_head, true, false);
69 return; 69 return;
70 } 70 }
71 71
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 66e2b697f5db..f598f794c3c6 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -137,7 +137,7 @@ static void mq_flush_run(struct work_struct *work)
137 rq = container_of(work, struct request, mq_flush_work); 137 rq = container_of(work, struct request, mq_flush_work);
138 138
139 memset(&rq->csd, 0, sizeof(rq->csd)); 139 memset(&rq->csd, 0, sizeof(rq->csd));
140 blk_mq_run_request(rq, true, false); 140 blk_mq_insert_request(rq, false, true, false);
141} 141}
142 142
143static bool blk_flush_queue_rq(struct request *rq) 143static bool blk_flush_queue_rq(struct request *rq)
@@ -411,7 +411,7 @@ void blk_insert_flush(struct request *rq)
411 if ((policy & REQ_FSEQ_DATA) && 411 if ((policy & REQ_FSEQ_DATA) &&
412 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { 412 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
413 if (q->mq_ops) { 413 if (q->mq_ops) {
414 blk_mq_run_request(rq, false, true); 414 blk_mq_insert_request(rq, false, false, true);
415 } else 415 } else
416 list_add_tail(&rq->queuelist, &q->queue_head); 416 list_add_tail(&rq->queuelist, &q->queue_head);
417 return; 417 return;
diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c
index 3146befb56aa..136ef8643bba 100644
--- a/block/blk-mq-cpu.c
+++ b/block/blk-mq-cpu.c
@@ -11,7 +11,7 @@
11#include "blk-mq.h" 11#include "blk-mq.h"
12 12
13static LIST_HEAD(blk_mq_cpu_notify_list); 13static LIST_HEAD(blk_mq_cpu_notify_list);
14static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock); 14static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock);
15 15
16static int blk_mq_main_cpu_notify(struct notifier_block *self, 16static int blk_mq_main_cpu_notify(struct notifier_block *self,
17 unsigned long action, void *hcpu) 17 unsigned long action, void *hcpu)
@@ -19,12 +19,12 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
19 unsigned int cpu = (unsigned long) hcpu; 19 unsigned int cpu = (unsigned long) hcpu;
20 struct blk_mq_cpu_notifier *notify; 20 struct blk_mq_cpu_notifier *notify;
21 21
22 spin_lock(&blk_mq_cpu_notify_lock); 22 raw_spin_lock(&blk_mq_cpu_notify_lock);
23 23
24 list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) 24 list_for_each_entry(notify, &blk_mq_cpu_notify_list, list)
25 notify->notify(notify->data, action, cpu); 25 notify->notify(notify->data, action, cpu);
26 26
27 spin_unlock(&blk_mq_cpu_notify_lock); 27 raw_spin_unlock(&blk_mq_cpu_notify_lock);
28 return NOTIFY_OK; 28 return NOTIFY_OK;
29} 29}
30 30
@@ -32,16 +32,16 @@ void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
32{ 32{
33 BUG_ON(!notifier->notify); 33 BUG_ON(!notifier->notify);
34 34
35 spin_lock(&blk_mq_cpu_notify_lock); 35 raw_spin_lock(&blk_mq_cpu_notify_lock);
36 list_add_tail(&notifier->list, &blk_mq_cpu_notify_list); 36 list_add_tail(&notifier->list, &blk_mq_cpu_notify_list);
37 spin_unlock(&blk_mq_cpu_notify_lock); 37 raw_spin_unlock(&blk_mq_cpu_notify_lock);
38} 38}
39 39
40void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier) 40void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
41{ 41{
42 spin_lock(&blk_mq_cpu_notify_lock); 42 raw_spin_lock(&blk_mq_cpu_notify_lock);
43 list_del(&notifier->list); 43 list_del(&notifier->list);
44 spin_unlock(&blk_mq_cpu_notify_lock); 44 raw_spin_unlock(&blk_mq_cpu_notify_lock);
45} 45}
46 46
47void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, 47void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1fa9dd153fde..883f72089015 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -73,8 +73,8 @@ static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
73 set_bit(ctx->index_hw, hctx->ctx_map); 73 set_bit(ctx->index_hw, hctx->ctx_map);
74} 74}
75 75
76static struct request *blk_mq_alloc_rq(struct blk_mq_hw_ctx *hctx, gfp_t gfp, 76static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
77 bool reserved) 77 gfp_t gfp, bool reserved)
78{ 78{
79 struct request *rq; 79 struct request *rq;
80 unsigned int tag; 80 unsigned int tag;
@@ -193,12 +193,6 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
193 ctx->rq_dispatched[rw_is_sync(rw_flags)]++; 193 ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
194} 194}
195 195
196static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
197 gfp_t gfp, bool reserved)
198{
199 return blk_mq_alloc_rq(hctx, gfp, reserved);
200}
201
202static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, 196static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
203 int rw, gfp_t gfp, 197 int rw, gfp_t gfp,
204 bool reserved) 198 bool reserved)
@@ -289,38 +283,10 @@ void blk_mq_free_request(struct request *rq)
289 __blk_mq_free_request(hctx, ctx, rq); 283 __blk_mq_free_request(hctx, ctx, rq);
290} 284}
291 285
292static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error) 286bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes)
293{
294 if (error)
295 clear_bit(BIO_UPTODATE, &bio->bi_flags);
296 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
297 error = -EIO;
298
299 if (unlikely(rq->cmd_flags & REQ_QUIET))
300 set_bit(BIO_QUIET, &bio->bi_flags);
301
302 /* don't actually finish bio if it's part of flush sequence */
303 if (!(rq->cmd_flags & REQ_FLUSH_SEQ))
304 bio_endio(bio, error);
305}
306
307void blk_mq_end_io(struct request *rq, int error)
308{ 287{
309 struct bio *bio = rq->bio; 288 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
310 unsigned int bytes = 0; 289 return true;
311
312 trace_block_rq_complete(rq->q, rq);
313
314 while (bio) {
315 struct bio *next = bio->bi_next;
316
317 bio->bi_next = NULL;
318 bytes += bio->bi_iter.bi_size;
319 blk_mq_bio_endio(rq, bio, error);
320 bio = next;
321 }
322
323 blk_account_io_completion(rq, bytes);
324 290
325 blk_account_io_done(rq); 291 blk_account_io_done(rq);
326 292
@@ -328,8 +294,9 @@ void blk_mq_end_io(struct request *rq, int error)
328 rq->end_io(rq, error); 294 rq->end_io(rq, error);
329 else 295 else
330 blk_mq_free_request(rq); 296 blk_mq_free_request(rq);
297 return false;
331} 298}
332EXPORT_SYMBOL(blk_mq_end_io); 299EXPORT_SYMBOL(blk_mq_end_io_partial);
333 300
334static void __blk_mq_complete_request_remote(void *data) 301static void __blk_mq_complete_request_remote(void *data)
335{ 302{
@@ -730,60 +697,27 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
730 blk_mq_add_timer(rq); 697 blk_mq_add_timer(rq);
731} 698}
732 699
733void blk_mq_insert_request(struct request_queue *q, struct request *rq, 700void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
734 bool at_head, bool run_queue) 701 bool async)
735{ 702{
703 struct request_queue *q = rq->q;
736 struct blk_mq_hw_ctx *hctx; 704 struct blk_mq_hw_ctx *hctx;
737 struct blk_mq_ctx *ctx, *current_ctx; 705 struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
706
707 current_ctx = blk_mq_get_ctx(q);
708 if (!cpu_online(ctx->cpu))
709 rq->mq_ctx = ctx = current_ctx;
738 710
739 ctx = rq->mq_ctx;
740 hctx = q->mq_ops->map_queue(q, ctx->cpu); 711 hctx = q->mq_ops->map_queue(q, ctx->cpu);
741 712
742 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) { 713 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
714 !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
743 blk_insert_flush(rq); 715 blk_insert_flush(rq);
744 } else { 716 } else {
745 current_ctx = blk_mq_get_ctx(q);
746
747 if (!cpu_online(ctx->cpu)) {
748 ctx = current_ctx;
749 hctx = q->mq_ops->map_queue(q, ctx->cpu);
750 rq->mq_ctx = ctx;
751 }
752 spin_lock(&ctx->lock); 717 spin_lock(&ctx->lock);
753 __blk_mq_insert_request(hctx, rq, at_head); 718 __blk_mq_insert_request(hctx, rq, at_head);
754 spin_unlock(&ctx->lock); 719 spin_unlock(&ctx->lock);
755
756 blk_mq_put_ctx(current_ctx);
757 }
758
759 if (run_queue)
760 __blk_mq_run_hw_queue(hctx);
761}
762EXPORT_SYMBOL(blk_mq_insert_request);
763
764/*
765 * This is a special version of blk_mq_insert_request to bypass FLUSH request
766 * check. Should only be used internally.
767 */
768void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
769{
770 struct request_queue *q = rq->q;
771 struct blk_mq_hw_ctx *hctx;
772 struct blk_mq_ctx *ctx, *current_ctx;
773
774 current_ctx = blk_mq_get_ctx(q);
775
776 ctx = rq->mq_ctx;
777 if (!cpu_online(ctx->cpu)) {
778 ctx = current_ctx;
779 rq->mq_ctx = ctx;
780 } 720 }
781 hctx = q->mq_ops->map_queue(q, ctx->cpu);
782
783 /* ctx->cpu might be offline */
784 spin_lock(&ctx->lock);
785 __blk_mq_insert_request(hctx, rq, false);
786 spin_unlock(&ctx->lock);
787 721
788 blk_mq_put_ctx(current_ctx); 722 blk_mq_put_ctx(current_ctx);
789 723
@@ -926,6 +860,8 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
926 ctx = blk_mq_get_ctx(q); 860 ctx = blk_mq_get_ctx(q);
927 hctx = q->mq_ops->map_queue(q, ctx->cpu); 861 hctx = q->mq_ops->map_queue(q, ctx->cpu);
928 862
863 if (is_sync)
864 rw |= REQ_SYNC;
929 trace_block_getrq(q, bio, rw); 865 trace_block_getrq(q, bio, rw);
930 rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false); 866 rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
931 if (likely(rq)) 867 if (likely(rq))
diff --git a/block/blk-mq.h b/block/blk-mq.h
index ed0035cd458e..72beba1f9d55 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -23,7 +23,6 @@ struct blk_mq_ctx {
23}; 23};
24 24
25void __blk_mq_complete_request(struct request *rq); 25void __blk_mq_complete_request(struct request *rq);
26void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
27void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 26void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
28void blk_mq_init_flush(struct request_queue *q); 27void blk_mq_init_flush(struct request_queue *q);
29void blk_mq_drain_queue(struct request_queue *q); 28void blk_mq_drain_queue(struct request_queue *q);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 959d41acc108..d7d32c28829b 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -67,6 +67,8 @@ enum ec_command {
67#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ 67#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
68#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ 68#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
69#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ 69#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
70#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
71 * when trying to clear the EC */
70 72
71enum { 73enum {
72 EC_FLAGS_QUERY_PENDING, /* Query is pending */ 74 EC_FLAGS_QUERY_PENDING, /* Query is pending */
@@ -116,6 +118,7 @@ EXPORT_SYMBOL(first_ec);
116static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ 118static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
117static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ 119static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
118static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ 120static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
121static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
119 122
120/* -------------------------------------------------------------------------- 123/* --------------------------------------------------------------------------
121 Transaction Management 124 Transaction Management
@@ -440,6 +443,29 @@ acpi_handle ec_get_handle(void)
440 443
441EXPORT_SYMBOL(ec_get_handle); 444EXPORT_SYMBOL(ec_get_handle);
442 445
446static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
447
448/*
449 * Clears stale _Q events that might have accumulated in the EC.
450 * Run with locked ec mutex.
451 */
452static void acpi_ec_clear(struct acpi_ec *ec)
453{
454 int i, status;
455 u8 value = 0;
456
457 for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
458 status = acpi_ec_query_unlocked(ec, &value);
459 if (status || !value)
460 break;
461 }
462
463 if (unlikely(i == ACPI_EC_CLEAR_MAX))
464 pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
465 else
466 pr_info("%d stale EC events cleared\n", i);
467}
468
443void acpi_ec_block_transactions(void) 469void acpi_ec_block_transactions(void)
444{ 470{
445 struct acpi_ec *ec = first_ec; 471 struct acpi_ec *ec = first_ec;
@@ -463,6 +489,10 @@ void acpi_ec_unblock_transactions(void)
463 mutex_lock(&ec->mutex); 489 mutex_lock(&ec->mutex);
464 /* Allow transactions to be carried out again */ 490 /* Allow transactions to be carried out again */
465 clear_bit(EC_FLAGS_BLOCKED, &ec->flags); 491 clear_bit(EC_FLAGS_BLOCKED, &ec->flags);
492
493 if (EC_FLAGS_CLEAR_ON_RESUME)
494 acpi_ec_clear(ec);
495
466 mutex_unlock(&ec->mutex); 496 mutex_unlock(&ec->mutex);
467} 497}
468 498
@@ -821,6 +851,13 @@ static int acpi_ec_add(struct acpi_device *device)
821 851
822 /* EC is fully operational, allow queries */ 852 /* EC is fully operational, allow queries */
823 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); 853 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
854
855 /* Clear stale _Q events if hardware might require that */
856 if (EC_FLAGS_CLEAR_ON_RESUME) {
857 mutex_lock(&ec->mutex);
858 acpi_ec_clear(ec);
859 mutex_unlock(&ec->mutex);
860 }
824 return ret; 861 return ret;
825} 862}
826 863
@@ -922,6 +959,30 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
922 return 0; 959 return 0;
923} 960}
924 961
962/*
963 * On some hardware it is necessary to clear events accumulated by the EC during
964 * sleep. These ECs stop reporting GPEs until they are manually polled, if too
965 * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
966 *
967 * https://bugzilla.kernel.org/show_bug.cgi?id=44161
968 *
969 * Ideally, the EC should also be instructed NOT to accumulate events during
970 * sleep (which Windows seems to do somehow), but the interface to control this
971 * behaviour is not known at this time.
972 *
973 * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
974 * however it is very likely that other Samsung models are affected.
975 *
976 * On systems which don't accumulate _Q events during sleep, this extra check
977 * should be harmless.
978 */
979static int ec_clear_on_resume(const struct dmi_system_id *id)
980{
981 pr_debug("Detected system needing EC poll on resume.\n");
982 EC_FLAGS_CLEAR_ON_RESUME = 1;
983 return 0;
984}
985
925static struct dmi_system_id ec_dmi_table[] __initdata = { 986static struct dmi_system_id ec_dmi_table[] __initdata = {
926 { 987 {
927 ec_skip_dsdt_scan, "Compal JFL92", { 988 ec_skip_dsdt_scan, "Compal JFL92", {
@@ -965,6 +1026,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
965 ec_validate_ecdt, "ASUS hardware", { 1026 ec_validate_ecdt, "ASUS hardware", {
966 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."), 1027 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
967 DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL}, 1028 DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
1029 {
1030 ec_clear_on_resume, "Samsung hardware", {
1031 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
968 {}, 1032 {},
969}; 1033};
970 1034
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index b7201fc6f1e1..0bdacc5e26a3 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -77,18 +77,24 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
77 switch (ares->type) { 77 switch (ares->type) {
78 case ACPI_RESOURCE_TYPE_MEMORY24: 78 case ACPI_RESOURCE_TYPE_MEMORY24:
79 memory24 = &ares->data.memory24; 79 memory24 = &ares->data.memory24;
80 if (!memory24->address_length)
81 return false;
80 acpi_dev_get_memresource(res, memory24->minimum, 82 acpi_dev_get_memresource(res, memory24->minimum,
81 memory24->address_length, 83 memory24->address_length,
82 memory24->write_protect); 84 memory24->write_protect);
83 break; 85 break;
84 case ACPI_RESOURCE_TYPE_MEMORY32: 86 case ACPI_RESOURCE_TYPE_MEMORY32:
85 memory32 = &ares->data.memory32; 87 memory32 = &ares->data.memory32;
88 if (!memory32->address_length)
89 return false;
86 acpi_dev_get_memresource(res, memory32->minimum, 90 acpi_dev_get_memresource(res, memory32->minimum,
87 memory32->address_length, 91 memory32->address_length,
88 memory32->write_protect); 92 memory32->write_protect);
89 break; 93 break;
90 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: 94 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
91 fixed_memory32 = &ares->data.fixed_memory32; 95 fixed_memory32 = &ares->data.fixed_memory32;
96 if (!fixed_memory32->address_length)
97 return false;
92 acpi_dev_get_memresource(res, fixed_memory32->address, 98 acpi_dev_get_memresource(res, fixed_memory32->address,
93 fixed_memory32->address_length, 99 fixed_memory32->address_length,
94 fixed_memory32->write_protect); 100 fixed_memory32->write_protect);
@@ -144,12 +150,16 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
144 switch (ares->type) { 150 switch (ares->type) {
145 case ACPI_RESOURCE_TYPE_IO: 151 case ACPI_RESOURCE_TYPE_IO:
146 io = &ares->data.io; 152 io = &ares->data.io;
153 if (!io->address_length)
154 return false;
147 acpi_dev_get_ioresource(res, io->minimum, 155 acpi_dev_get_ioresource(res, io->minimum,
148 io->address_length, 156 io->address_length,
149 io->io_decode); 157 io->io_decode);
150 break; 158 break;
151 case ACPI_RESOURCE_TYPE_FIXED_IO: 159 case ACPI_RESOURCE_TYPE_FIXED_IO:
152 fixed_io = &ares->data.fixed_io; 160 fixed_io = &ares->data.fixed_io;
161 if (!fixed_io->address_length)
162 return false;
153 acpi_dev_get_ioresource(res, fixed_io->address, 163 acpi_dev_get_ioresource(res, fixed_io->address,
154 fixed_io->address_length, 164 fixed_io->address_length,
155 ACPI_DECODE_10); 165 ACPI_DECODE_10);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 1a3dbd1b196e..8cb2522d592a 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4175,6 +4175,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4175 4175
4176 /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ 4176 /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
4177 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4177 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4178 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4178 4179
4179 /* Blacklist entries taken from Silicon Image 3124/3132 4180 /* Blacklist entries taken from Silicon Image 3124/3132
4180 Windows driver .inf file - also several Linux problem reports */ 4181 Windows driver .inf file - also several Linux problem reports */
@@ -4224,7 +4225,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4224 4225
4225 /* devices that don't properly handle queued TRIM commands */ 4226 /* devices that don't properly handle queued TRIM commands */
4226 { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, 4227 { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4227 { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, 4228 { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4228 4229
4229 /* 4230 /*
4230 * Some WD SATA-I drives spin up and down erratically when the link 4231 * Some WD SATA-I drives spin up and down erratically when the link
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index b52e9a6d6aad..54174cb32feb 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -53,7 +53,7 @@
53#define MTIP_FTL_REBUILD_TIMEOUT_MS 2400000 53#define MTIP_FTL_REBUILD_TIMEOUT_MS 2400000
54 54
55/* unaligned IO handling */ 55/* unaligned IO handling */
56#define MTIP_MAX_UNALIGNED_SLOTS 8 56#define MTIP_MAX_UNALIGNED_SLOTS 2
57 57
58/* Macro to extract the tag bit number from a tag value. */ 58/* Macro to extract the tag bit number from a tag value. */
59#define MTIP_TAG_BIT(tag) (tag & 0x1F) 59#define MTIP_TAG_BIT(tag) (tag & 0x1F)
diff --git a/drivers/clk/shmobile/clk-rcar-gen2.c b/drivers/clk/shmobile/clk-rcar-gen2.c
index dd272a0d1446..99c27b1c625b 100644
--- a/drivers/clk/shmobile/clk-rcar-gen2.c
+++ b/drivers/clk/shmobile/clk-rcar-gen2.c
@@ -26,6 +26,8 @@ struct rcar_gen2_cpg {
26 void __iomem *reg; 26 void __iomem *reg;
27}; 27};
28 28
29#define CPG_FRQCRB 0x00000004
30#define CPG_FRQCRB_KICK BIT(31)
29#define CPG_SDCKCR 0x00000074 31#define CPG_SDCKCR 0x00000074
30#define CPG_PLL0CR 0x000000d8 32#define CPG_PLL0CR 0x000000d8
31#define CPG_FRQCRC 0x000000e0 33#define CPG_FRQCRC 0x000000e0
@@ -45,6 +47,7 @@ struct rcar_gen2_cpg {
45struct cpg_z_clk { 47struct cpg_z_clk {
46 struct clk_hw hw; 48 struct clk_hw hw;
47 void __iomem *reg; 49 void __iomem *reg;
50 void __iomem *kick_reg;
48}; 51};
49 52
50#define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw) 53#define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
@@ -83,17 +86,45 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
83{ 86{
84 struct cpg_z_clk *zclk = to_z_clk(hw); 87 struct cpg_z_clk *zclk = to_z_clk(hw);
85 unsigned int mult; 88 unsigned int mult;
86 u32 val; 89 u32 val, kick;
90 unsigned int i;
87 91
88 mult = div_u64((u64)rate * 32, parent_rate); 92 mult = div_u64((u64)rate * 32, parent_rate);
89 mult = clamp(mult, 1U, 32U); 93 mult = clamp(mult, 1U, 32U);
90 94
95 if (clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
96 return -EBUSY;
97
91 val = clk_readl(zclk->reg); 98 val = clk_readl(zclk->reg);
92 val &= ~CPG_FRQCRC_ZFC_MASK; 99 val &= ~CPG_FRQCRC_ZFC_MASK;
93 val |= (32 - mult) << CPG_FRQCRC_ZFC_SHIFT; 100 val |= (32 - mult) << CPG_FRQCRC_ZFC_SHIFT;
94 clk_writel(val, zclk->reg); 101 clk_writel(val, zclk->reg);
95 102
96 return 0; 103 /*
104 * Set KICK bit in FRQCRB to update hardware setting and wait for
105 * clock change completion.
106 */
107 kick = clk_readl(zclk->kick_reg);
108 kick |= CPG_FRQCRB_KICK;
109 clk_writel(kick, zclk->kick_reg);
110
111 /*
112 * Note: There is no HW information about the worst case latency.
113 *
114 * Using experimental measurements, it seems that no more than
115 * ~10 iterations are needed, independently of the CPU rate.
116 * Since this value might be dependant of external xtal rate, pll1
117 * rate or even the other emulation clocks rate, use 1000 as a
118 * "super" safe value.
119 */
120 for (i = 1000; i; i--) {
121 if (!(clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
122 return 0;
123
124 cpu_relax();
125 }
126
127 return -ETIMEDOUT;
97} 128}
98 129
99static const struct clk_ops cpg_z_clk_ops = { 130static const struct clk_ops cpg_z_clk_ops = {
@@ -120,6 +151,7 @@ static struct clk * __init cpg_z_clk_register(struct rcar_gen2_cpg *cpg)
120 init.num_parents = 1; 151 init.num_parents = 1;
121 152
122 zclk->reg = cpg->reg + CPG_FRQCRC; 153 zclk->reg = cpg->reg + CPG_FRQCRC;
154 zclk->kick_reg = cpg->reg + CPG_FRQCRB;
123 zclk->hw.init = &init; 155 zclk->hw.init = &init;
124 156
125 clk = clk_register(NULL, &zclk->hw); 157 clk = clk_register(NULL, &zclk->hw);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index cb003a6b72c8..cf485d928903 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1109,6 +1109,21 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1109 goto err_set_policy_cpu; 1109 goto err_set_policy_cpu;
1110 } 1110 }
1111 1111
1112 /* related cpus should atleast have policy->cpus */
1113 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1114
1115 /*
1116 * affected cpus must always be the one, which are online. We aren't
1117 * managing offline cpus here.
1118 */
1119 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1120
1121 if (!frozen) {
1122 policy->user_policy.min = policy->min;
1123 policy->user_policy.max = policy->max;
1124 }
1125
1126 down_write(&policy->rwsem);
1112 write_lock_irqsave(&cpufreq_driver_lock, flags); 1127 write_lock_irqsave(&cpufreq_driver_lock, flags);
1113 for_each_cpu(j, policy->cpus) 1128 for_each_cpu(j, policy->cpus)
1114 per_cpu(cpufreq_cpu_data, j) = policy; 1129 per_cpu(cpufreq_cpu_data, j) = policy;
@@ -1162,20 +1177,6 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1162 } 1177 }
1163 } 1178 }
1164 1179
1165 /* related cpus should atleast have policy->cpus */
1166 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1167
1168 /*
1169 * affected cpus must always be the one, which are online. We aren't
1170 * managing offline cpus here.
1171 */
1172 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1173
1174 if (!frozen) {
1175 policy->user_policy.min = policy->min;
1176 policy->user_policy.max = policy->max;
1177 }
1178
1179 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1180 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1180 CPUFREQ_START, policy); 1181 CPUFREQ_START, policy);
1181 1182
@@ -1206,6 +1207,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1206 policy->user_policy.policy = policy->policy; 1207 policy->user_policy.policy = policy->policy;
1207 policy->user_policy.governor = policy->governor; 1208 policy->user_policy.governor = policy->governor;
1208 } 1209 }
1210 up_write(&policy->rwsem);
1209 1211
1210 kobject_uevent(&policy->kobj, KOBJ_ADD); 1212 kobject_uevent(&policy->kobj, KOBJ_ADD);
1211 up_read(&cpufreq_rwsem); 1213 up_read(&cpufreq_rwsem);
@@ -1546,23 +1548,16 @@ static unsigned int __cpufreq_get(unsigned int cpu)
1546 */ 1548 */
1547unsigned int cpufreq_get(unsigned int cpu) 1549unsigned int cpufreq_get(unsigned int cpu)
1548{ 1550{
1549 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 1551 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1550 unsigned int ret_freq = 0; 1552 unsigned int ret_freq = 0;
1551 1553
1552 if (cpufreq_disabled() || !cpufreq_driver) 1554 if (policy) {
1553 return -ENOENT; 1555 down_read(&policy->rwsem);
1554 1556 ret_freq = __cpufreq_get(cpu);
1555 BUG_ON(!policy); 1557 up_read(&policy->rwsem);
1556
1557 if (!down_read_trylock(&cpufreq_rwsem))
1558 return 0;
1559
1560 down_read(&policy->rwsem);
1561
1562 ret_freq = __cpufreq_get(cpu);
1563 1558
1564 up_read(&policy->rwsem); 1559 cpufreq_cpu_put(policy);
1565 up_read(&cpufreq_rwsem); 1560 }
1566 1561
1567 return ret_freq; 1562 return ret_freq;
1568} 1563}
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index de4aa409abe2..2c6d5e118ac1 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -916,7 +916,7 @@ static int lookup_existing_device(struct device *dev, void *data)
916 old->config_rom_retries = 0; 916 old->config_rom_retries = 0;
917 fw_notice(card, "rediscovered device %s\n", dev_name(dev)); 917 fw_notice(card, "rediscovered device %s\n", dev_name(dev));
918 918
919 PREPARE_DELAYED_WORK(&old->work, fw_device_update); 919 old->workfn = fw_device_update;
920 fw_schedule_device_work(old, 0); 920 fw_schedule_device_work(old, 0);
921 921
922 if (current_node == card->root_node) 922 if (current_node == card->root_node)
@@ -1075,7 +1075,7 @@ static void fw_device_init(struct work_struct *work)
1075 if (atomic_cmpxchg(&device->state, 1075 if (atomic_cmpxchg(&device->state,
1076 FW_DEVICE_INITIALIZING, 1076 FW_DEVICE_INITIALIZING,
1077 FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { 1077 FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
1078 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); 1078 device->workfn = fw_device_shutdown;
1079 fw_schedule_device_work(device, SHUTDOWN_DELAY); 1079 fw_schedule_device_work(device, SHUTDOWN_DELAY);
1080 } else { 1080 } else {
1081 fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n", 1081 fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n",
@@ -1196,13 +1196,20 @@ static void fw_device_refresh(struct work_struct *work)
1196 dev_name(&device->device), fw_rcode_string(ret)); 1196 dev_name(&device->device), fw_rcode_string(ret));
1197 gone: 1197 gone:
1198 atomic_set(&device->state, FW_DEVICE_GONE); 1198 atomic_set(&device->state, FW_DEVICE_GONE);
1199 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); 1199 device->workfn = fw_device_shutdown;
1200 fw_schedule_device_work(device, SHUTDOWN_DELAY); 1200 fw_schedule_device_work(device, SHUTDOWN_DELAY);
1201 out: 1201 out:
1202 if (node_id == card->root_node->node_id) 1202 if (node_id == card->root_node->node_id)
1203 fw_schedule_bm_work(card, 0); 1203 fw_schedule_bm_work(card, 0);
1204} 1204}
1205 1205
1206static void fw_device_workfn(struct work_struct *work)
1207{
1208 struct fw_device *device = container_of(to_delayed_work(work),
1209 struct fw_device, work);
1210 device->workfn(work);
1211}
1212
1206void fw_node_event(struct fw_card *card, struct fw_node *node, int event) 1213void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1207{ 1214{
1208 struct fw_device *device; 1215 struct fw_device *device;
@@ -1252,7 +1259,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1252 * power-up after getting plugged in. We schedule the 1259 * power-up after getting plugged in. We schedule the
1253 * first config rom scan half a second after bus reset. 1260 * first config rom scan half a second after bus reset.
1254 */ 1261 */
1255 INIT_DELAYED_WORK(&device->work, fw_device_init); 1262 device->workfn = fw_device_init;
1263 INIT_DELAYED_WORK(&device->work, fw_device_workfn);
1256 fw_schedule_device_work(device, INITIAL_DELAY); 1264 fw_schedule_device_work(device, INITIAL_DELAY);
1257 break; 1265 break;
1258 1266
@@ -1268,7 +1276,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1268 if (atomic_cmpxchg(&device->state, 1276 if (atomic_cmpxchg(&device->state,
1269 FW_DEVICE_RUNNING, 1277 FW_DEVICE_RUNNING,
1270 FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) { 1278 FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
1271 PREPARE_DELAYED_WORK(&device->work, fw_device_refresh); 1279 device->workfn = fw_device_refresh;
1272 fw_schedule_device_work(device, 1280 fw_schedule_device_work(device,
1273 device->is_local ? 0 : INITIAL_DELAY); 1281 device->is_local ? 0 : INITIAL_DELAY);
1274 } 1282 }
@@ -1283,7 +1291,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1283 smp_wmb(); /* update node_id before generation */ 1291 smp_wmb(); /* update node_id before generation */
1284 device->generation = card->generation; 1292 device->generation = card->generation;
1285 if (atomic_read(&device->state) == FW_DEVICE_RUNNING) { 1293 if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
1286 PREPARE_DELAYED_WORK(&device->work, fw_device_update); 1294 device->workfn = fw_device_update;
1287 fw_schedule_device_work(device, 0); 1295 fw_schedule_device_work(device, 0);
1288 } 1296 }
1289 break; 1297 break;
@@ -1308,7 +1316,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1308 device = node->data; 1316 device = node->data;
1309 if (atomic_xchg(&device->state, 1317 if (atomic_xchg(&device->state,
1310 FW_DEVICE_GONE) == FW_DEVICE_RUNNING) { 1318 FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
1311 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); 1319 device->workfn = fw_device_shutdown;
1312 fw_schedule_device_work(device, 1320 fw_schedule_device_work(device,
1313 list_empty(&card->link) ? 0 : SHUTDOWN_DELAY); 1321 list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
1314 } 1322 }
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 6b895986dc22..4af0a7bad7f2 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -929,8 +929,6 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
929 if (rcode == RCODE_COMPLETE) { 929 if (rcode == RCODE_COMPLETE) {
930 fwnet_transmit_packet_done(ptask); 930 fwnet_transmit_packet_done(ptask);
931 } else { 931 } else {
932 fwnet_transmit_packet_failed(ptask);
933
934 if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) { 932 if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) {
935 dev_err(&ptask->dev->netdev->dev, 933 dev_err(&ptask->dev->netdev->dev,
936 "fwnet_write_complete failed: %x (skipped %d)\n", 934 "fwnet_write_complete failed: %x (skipped %d)\n",
@@ -938,8 +936,10 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
938 936
939 errors_skipped = 0; 937 errors_skipped = 0;
940 last_rcode = rcode; 938 last_rcode = rcode;
941 } else 939 } else {
942 errors_skipped++; 940 errors_skipped++;
941 }
942 fwnet_transmit_packet_failed(ptask);
943 } 943 }
944} 944}
945 945
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 6f74d8d3f700..8db663219560 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -290,7 +290,6 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
290#define QUIRK_NO_MSI 0x10 290#define QUIRK_NO_MSI 0x10
291#define QUIRK_TI_SLLZ059 0x20 291#define QUIRK_TI_SLLZ059 0x20
292#define QUIRK_IR_WAKE 0x40 292#define QUIRK_IR_WAKE 0x40
293#define QUIRK_PHY_LCTRL_TIMEOUT 0x80
294 293
295/* In case of multiple matches in ohci_quirks[], only the first one is used. */ 294/* In case of multiple matches in ohci_quirks[], only the first one is used. */
296static const struct { 295static const struct {
@@ -303,10 +302,7 @@ static const struct {
303 QUIRK_BE_HEADERS}, 302 QUIRK_BE_HEADERS},
304 303
305 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, 304 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
306 QUIRK_PHY_LCTRL_TIMEOUT | QUIRK_NO_MSI}, 305 QUIRK_NO_MSI},
307
308 {PCI_VENDOR_ID_ATT, PCI_ANY_ID, PCI_ANY_ID,
309 QUIRK_PHY_LCTRL_TIMEOUT},
310 306
311 {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID, 307 {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
312 QUIRK_RESET_PACKET}, 308 QUIRK_RESET_PACKET},
@@ -353,7 +349,6 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
353 ", disable MSI = " __stringify(QUIRK_NO_MSI) 349 ", disable MSI = " __stringify(QUIRK_NO_MSI)
354 ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059) 350 ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
355 ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE) 351 ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
356 ", phy LCtrl timeout = " __stringify(QUIRK_PHY_LCTRL_TIMEOUT)
357 ")"); 352 ")");
358 353
359#define OHCI_PARAM_DEBUG_AT_AR 1 354#define OHCI_PARAM_DEBUG_AT_AR 1
@@ -2299,9 +2294,6 @@ static int ohci_enable(struct fw_card *card,
2299 * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but 2294 * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
2300 * cannot actually use the phy at that time. These need tens of 2295 * cannot actually use the phy at that time. These need tens of
2301 * millisecods pause between LPS write and first phy access too. 2296 * millisecods pause between LPS write and first phy access too.
2302 *
2303 * But do not wait for 50msec on Agere/LSI cards. Their phy
2304 * arbitration state machine may time out during such a long wait.
2305 */ 2297 */
2306 2298
2307 reg_write(ohci, OHCI1394_HCControlSet, 2299 reg_write(ohci, OHCI1394_HCControlSet,
@@ -2309,11 +2301,8 @@ static int ohci_enable(struct fw_card *card,
2309 OHCI1394_HCControl_postedWriteEnable); 2301 OHCI1394_HCControl_postedWriteEnable);
2310 flush_writes(ohci); 2302 flush_writes(ohci);
2311 2303
2312 if (!(ohci->quirks & QUIRK_PHY_LCTRL_TIMEOUT)) 2304 for (lps = 0, i = 0; !lps && i < 3; i++) {
2313 msleep(50); 2305 msleep(50);
2314
2315 for (lps = 0, i = 0; !lps && i < 150; i++) {
2316 msleep(1);
2317 lps = reg_read(ohci, OHCI1394_HCControlSet) & 2306 lps = reg_read(ohci, OHCI1394_HCControlSet) &
2318 OHCI1394_HCControl_LPS; 2307 OHCI1394_HCControl_LPS;
2319 } 2308 }
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 281029daf98c..7aef911fdc71 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -146,6 +146,7 @@ struct sbp2_logical_unit {
146 */ 146 */
147 int generation; 147 int generation;
148 int retries; 148 int retries;
149 work_func_t workfn;
149 struct delayed_work work; 150 struct delayed_work work;
150 bool has_sdev; 151 bool has_sdev;
151 bool blocked; 152 bool blocked;
@@ -864,7 +865,7 @@ static void sbp2_login(struct work_struct *work)
864 /* set appropriate retry limit(s) in BUSY_TIMEOUT register */ 865 /* set appropriate retry limit(s) in BUSY_TIMEOUT register */
865 sbp2_set_busy_timeout(lu); 866 sbp2_set_busy_timeout(lu);
866 867
867 PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect); 868 lu->workfn = sbp2_reconnect;
868 sbp2_agent_reset(lu); 869 sbp2_agent_reset(lu);
869 870
870 /* This was a re-login. */ 871 /* This was a re-login. */
@@ -918,7 +919,7 @@ static void sbp2_login(struct work_struct *work)
918 * If a bus reset happened, sbp2_update will have requeued 919 * If a bus reset happened, sbp2_update will have requeued
919 * lu->work already. Reset the work from reconnect to login. 920 * lu->work already. Reset the work from reconnect to login.
920 */ 921 */
921 PREPARE_DELAYED_WORK(&lu->work, sbp2_login); 922 lu->workfn = sbp2_login;
922} 923}
923 924
924static void sbp2_reconnect(struct work_struct *work) 925static void sbp2_reconnect(struct work_struct *work)
@@ -952,7 +953,7 @@ static void sbp2_reconnect(struct work_struct *work)
952 lu->retries++ >= 5) { 953 lu->retries++ >= 5) {
953 dev_err(tgt_dev(tgt), "failed to reconnect\n"); 954 dev_err(tgt_dev(tgt), "failed to reconnect\n");
954 lu->retries = 0; 955 lu->retries = 0;
955 PREPARE_DELAYED_WORK(&lu->work, sbp2_login); 956 lu->workfn = sbp2_login;
956 } 957 }
957 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); 958 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
958 959
@@ -972,6 +973,13 @@ static void sbp2_reconnect(struct work_struct *work)
972 sbp2_conditionally_unblock(lu); 973 sbp2_conditionally_unblock(lu);
973} 974}
974 975
976static void sbp2_lu_workfn(struct work_struct *work)
977{
978 struct sbp2_logical_unit *lu = container_of(to_delayed_work(work),
979 struct sbp2_logical_unit, work);
980 lu->workfn(work);
981}
982
975static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) 983static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
976{ 984{
977 struct sbp2_logical_unit *lu; 985 struct sbp2_logical_unit *lu;
@@ -998,7 +1006,8 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
998 lu->blocked = false; 1006 lu->blocked = false;
999 ++tgt->dont_block; 1007 ++tgt->dont_block;
1000 INIT_LIST_HEAD(&lu->orb_list); 1008 INIT_LIST_HEAD(&lu->orb_list);
1001 INIT_DELAYED_WORK(&lu->work, sbp2_login); 1009 lu->workfn = sbp2_login;
1010 INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn);
1002 1011
1003 list_add_tail(&lu->link, &tgt->lu_list); 1012 list_add_tail(&lu->link, &tgt->lu_list);
1004 return 0; 1013 return 0;
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index acf3a36c9ebc..32982da82694 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -68,15 +68,7 @@ void __armada_drm_queue_unref_work(struct drm_device *dev,
68{ 68{
69 struct armada_private *priv = dev->dev_private; 69 struct armada_private *priv = dev->dev_private;
70 70
71 /* 71 WARN_ON(!kfifo_put(&priv->fb_unref, fb));
72 * Yes, we really must jump through these hoops just to store a
73 * _pointer_ to something into the kfifo. This is utterly insane
74 * and idiotic, because it kfifo requires the _data_ pointed to by
75 * the pointer const, not the pointer itself. Not only that, but
76 * you have to pass a pointer _to_ the pointer you want stored.
77 */
78 const struct drm_framebuffer *silly_api_alert = fb;
79 WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert));
80 schedule_work(&priv->fb_unref_work); 72 schedule_work(&priv->fb_unref_work);
81} 73}
82 74
diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig
index c8fcf12019f0..5f8b0c2b9a44 100644
--- a/drivers/gpu/drm/bochs/Kconfig
+++ b/drivers/gpu/drm/bochs/Kconfig
@@ -2,6 +2,7 @@ config DRM_BOCHS
2 tristate "DRM Support for bochs dispi vga interface (qemu stdvga)" 2 tristate "DRM Support for bochs dispi vga interface (qemu stdvga)"
3 depends on DRM && PCI 3 depends on DRM && PCI
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_KMS_FB_HELPER
5 select FB_SYS_FILLRECT 6 select FB_SYS_FILLRECT
6 select FB_SYS_COPYAREA 7 select FB_SYS_COPYAREA
7 select FB_SYS_IMAGEBLIT 8 select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 04f1f02c4019..ec7bb0fc71bc 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -403,7 +403,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
403void intel_detect_pch(struct drm_device *dev) 403void intel_detect_pch(struct drm_device *dev)
404{ 404{
405 struct drm_i915_private *dev_priv = dev->dev_private; 405 struct drm_i915_private *dev_priv = dev->dev_private;
406 struct pci_dev *pch; 406 struct pci_dev *pch = NULL;
407 407
408 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting 408 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
409 * (which really amounts to a PCH but no South Display). 409 * (which really amounts to a PCH but no South Display).
@@ -424,12 +424,9 @@ void intel_detect_pch(struct drm_device *dev)
424 * all the ISA bridge devices and check for the first match, instead 424 * all the ISA bridge devices and check for the first match, instead
425 * of only checking the first one. 425 * of only checking the first one.
426 */ 426 */
427 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); 427 while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
428 while (pch) {
429 struct pci_dev *curr = pch;
430 if (pch->vendor == PCI_VENDOR_ID_INTEL) { 428 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
431 unsigned short id; 429 unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
432 id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
433 dev_priv->pch_id = id; 430 dev_priv->pch_id = id;
434 431
435 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { 432 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
@@ -461,18 +458,16 @@ void intel_detect_pch(struct drm_device *dev)
461 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 458 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
462 WARN_ON(!IS_HASWELL(dev)); 459 WARN_ON(!IS_HASWELL(dev));
463 WARN_ON(!IS_ULT(dev)); 460 WARN_ON(!IS_ULT(dev));
464 } else { 461 } else
465 goto check_next; 462 continue;
466 } 463
467 pci_dev_put(pch);
468 break; 464 break;
469 } 465 }
470check_next:
471 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
472 pci_dev_put(curr);
473 } 466 }
474 if (!pch) 467 if (!pch)
475 DRM_DEBUG_KMS("No PCH found?\n"); 468 DRM_DEBUG_KMS("No PCH found.\n");
469
470 pci_dev_put(pch);
476} 471}
477 472
478bool i915_semaphore_is_enabled(struct drm_device *dev) 473bool i915_semaphore_is_enabled(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 1a24e84f2315..d58b4e287e32 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -82,9 +82,22 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
82 r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size, 82 r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
83 "Graphics Stolen Memory"); 83 "Graphics Stolen Memory");
84 if (r == NULL) { 84 if (r == NULL) {
85 DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", 85 /*
86 base, base + (uint32_t)dev_priv->gtt.stolen_size); 86 * One more attempt but this time requesting region from
87 base = 0; 87 * base + 1, as we have seen that this resolves the region
88 * conflict with the PCI Bus.
89 * This is a BIOS w/a: Some BIOS wrap stolen in the root
90 * PCI bus, but have an off-by-one error. Hence retry the
91 * reservation starting from 1 instead of 0.
92 */
93 r = devm_request_mem_region(dev->dev, base + 1,
94 dev_priv->gtt.stolen_size - 1,
95 "Graphics Stolen Memory");
96 if (r == NULL) {
97 DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
98 base, base + (uint32_t)dev_priv->gtt.stolen_size);
99 base = 0;
100 }
88 } 101 }
89 102
90 return base; 103 return base;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 4c1672809493..9b8a7c7ea7fc 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1092,12 +1092,12 @@ static void assert_cursor(struct drm_i915_private *dev_priv,
1092 struct drm_device *dev = dev_priv->dev; 1092 struct drm_device *dev = dev_priv->dev;
1093 bool cur_state; 1093 bool cur_state;
1094 1094
1095 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 1095 if (IS_845G(dev) || IS_I865G(dev))
1096 cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
1097 else if (IS_845G(dev) || IS_I865G(dev))
1098 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE; 1096 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1099 else 1097 else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev))
1100 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 1098 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1099 else
1100 cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
1101 1101
1102 WARN(cur_state != state, 1102 WARN(cur_state != state,
1103 "cursor on pipe %c assertion failure (expected %s, current %s)\n", 1103 "cursor on pipe %c assertion failure (expected %s, current %s)\n",
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 6db0d9d17f47..ee3181ebcc92 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -845,7 +845,7 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
845{ 845{
846 struct drm_device *dev = intel_hdmi_to_dev(hdmi); 846 struct drm_device *dev = intel_hdmi_to_dev(hdmi);
847 847
848 if (IS_G4X(dev)) 848 if (!hdmi->has_hdmi_sink || IS_G4X(dev))
849 return 165000; 849 return 165000;
850 else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) 850 else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
851 return 300000; 851 return 300000;
@@ -899,8 +899,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
899 * outputs. We also need to check that the higher clock still fits 899 * outputs. We also need to check that the higher clock still fits
900 * within limits. 900 * within limits.
901 */ 901 */
902 if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit 902 if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink &&
903 && HAS_PCH_SPLIT(dev)) { 903 clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) {
904 DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); 904 DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
905 desired_bpp = 12*3; 905 desired_bpp = 12*3;
906 906
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 350de359123a..079ea38f14d9 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -698,7 +698,7 @@ static void i9xx_enable_backlight(struct intel_connector *connector)
698 freq /= 0xff; 698 freq /= 0xff;
699 699
700 ctl = freq << 17; 700 ctl = freq << 17;
701 if (IS_GEN2(dev) && panel->backlight.combination_mode) 701 if (panel->backlight.combination_mode)
702 ctl |= BLM_LEGACY_MODE; 702 ctl |= BLM_LEGACY_MODE;
703 if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm) 703 if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm)
704 ctl |= BLM_POLARITY_PNV; 704 ctl |= BLM_POLARITY_PNV;
@@ -979,7 +979,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector)
979 979
980 ctl = I915_READ(BLC_PWM_CTL); 980 ctl = I915_READ(BLC_PWM_CTL);
981 981
982 if (IS_GEN2(dev)) 982 if (IS_GEN2(dev) || IS_I915GM(dev) || IS_I945GM(dev))
983 panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE; 983 panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
984 984
985 if (IS_PINEVIEW(dev)) 985 if (IS_PINEVIEW(dev))
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d77cc81900f9..e1fc35a72656 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3493,6 +3493,8 @@ static void valleyview_setup_pctx(struct drm_device *dev)
3493 u32 pcbr; 3493 u32 pcbr;
3494 int pctx_size = 24*1024; 3494 int pctx_size = 24*1024;
3495 3495
3496 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3497
3496 pcbr = I915_READ(VLV_PCBR); 3498 pcbr = I915_READ(VLV_PCBR);
3497 if (pcbr) { 3499 if (pcbr) {
3498 /* BIOS set it up already, grab the pre-alloc'd space */ 3500 /* BIOS set it up already, grab the pre-alloc'd space */
@@ -3542,8 +3544,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
3542 I915_WRITE(GTFIFODBG, gtfifodbg); 3544 I915_WRITE(GTFIFODBG, gtfifodbg);
3543 } 3545 }
3544 3546
3545 valleyview_setup_pctx(dev);
3546
3547 /* If VLV, Forcewake all wells, else re-direct to regular path */ 3547 /* If VLV, Forcewake all wells, else re-direct to regular path */
3548 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 3548 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3549 3549
@@ -4395,6 +4395,8 @@ void intel_enable_gt_powersave(struct drm_device *dev)
4395 ironlake_enable_rc6(dev); 4395 ironlake_enable_rc6(dev);
4396 intel_init_emon(dev); 4396 intel_init_emon(dev);
4397 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 4397 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
4398 if (IS_VALLEYVIEW(dev))
4399 valleyview_setup_pctx(dev);
4398 /* 4400 /*
4399 * PCU communication is slow and this doesn't need to be 4401 * PCU communication is slow and this doesn't need to be
4400 * done at any specific time, so do this out of our fast path 4402 * done at any specific time, so do this out of our fast path
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 2cec2ab02f80..607dc14d195e 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1314,7 +1314,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
1314 } 1314 }
1315 if (is_dp) 1315 if (is_dp)
1316 args.v5.ucLaneNum = dp_lane_count; 1316 args.v5.ucLaneNum = dp_lane_count;
1317 else if (radeon_encoder->pixel_clock > 165000) 1317 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
1318 args.v5.ucLaneNum = 8; 1318 args.v5.ucLaneNum = 8;
1319 else 1319 else
1320 args.v5.ucLaneNum = 4; 1320 args.v5.ucLaneNum = 4;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index e6419ca7cd37..e22be8458d92 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3046,7 +3046,7 @@ static u32 cik_create_bitmask(u32 bit_width)
3046} 3046}
3047 3047
3048/** 3048/**
3049 * cik_select_se_sh - select which SE, SH to address 3049 * cik_get_rb_disabled - computes the mask of disabled RBs
3050 * 3050 *
3051 * @rdev: radeon_device pointer 3051 * @rdev: radeon_device pointer
3052 * @max_rb_num: max RBs (render backends) for the asic 3052 * @max_rb_num: max RBs (render backends) for the asic
@@ -7902,7 +7902,8 @@ int cik_resume(struct radeon_device *rdev)
7902 /* init golden registers */ 7902 /* init golden registers */
7903 cik_init_golden_registers(rdev); 7903 cik_init_golden_registers(rdev);
7904 7904
7905 radeon_pm_resume(rdev); 7905 if (rdev->pm.pm_method == PM_METHOD_DPM)
7906 radeon_pm_resume(rdev);
7906 7907
7907 rdev->accel_working = true; 7908 rdev->accel_working = true;
7908 r = cik_startup(rdev); 7909 r = cik_startup(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 8a2c010b7dc5..27b0ff16082e 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -5299,7 +5299,8 @@ int evergreen_resume(struct radeon_device *rdev)
5299 /* init golden registers */ 5299 /* init golden registers */
5300 evergreen_init_golden_registers(rdev); 5300 evergreen_init_golden_registers(rdev);
5301 5301
5302 radeon_pm_resume(rdev); 5302 if (rdev->pm.pm_method == PM_METHOD_DPM)
5303 radeon_pm_resume(rdev);
5303 5304
5304 rdev->accel_working = true; 5305 rdev->accel_working = true;
5305 r = evergreen_startup(rdev); 5306 r = evergreen_startup(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_smc.h b/drivers/gpu/drm/radeon/evergreen_smc.h
index 76ada8cfe902..3a03ba37d043 100644
--- a/drivers/gpu/drm/radeon/evergreen_smc.h
+++ b/drivers/gpu/drm/radeon/evergreen_smc.h
@@ -57,7 +57,7 @@ typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
57 57
58#define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100 58#define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100
59 59
60#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x0 60#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x8
61#define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC 61#define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC
62#define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20 62#define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20
63 63
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index ea932ac66fc6..bf6300cfd62d 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2105,7 +2105,8 @@ int cayman_resume(struct radeon_device *rdev)
2105 /* init golden registers */ 2105 /* init golden registers */
2106 ni_init_golden_registers(rdev); 2106 ni_init_golden_registers(rdev);
2107 2107
2108 radeon_pm_resume(rdev); 2108 if (rdev->pm.pm_method == PM_METHOD_DPM)
2109 radeon_pm_resume(rdev);
2109 2110
2110 rdev->accel_working = true; 2111 rdev->accel_working = true;
2111 r = cayman_startup(rdev); 2112 r = cayman_startup(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index ef024ce3f7cc..3cc78bb66042 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3942,8 +3942,6 @@ int r100_resume(struct radeon_device *rdev)
3942 /* Initialize surface registers */ 3942 /* Initialize surface registers */
3943 radeon_surface_init(rdev); 3943 radeon_surface_init(rdev);
3944 3944
3945 radeon_pm_resume(rdev);
3946
3947 rdev->accel_working = true; 3945 rdev->accel_working = true;
3948 r = r100_startup(rdev); 3946 r = r100_startup(rdev);
3949 if (r) { 3947 if (r) {
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 7c63ef840e86..0b658b34b33a 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -1430,8 +1430,6 @@ int r300_resume(struct radeon_device *rdev)
1430 /* Initialize surface registers */ 1430 /* Initialize surface registers */
1431 radeon_surface_init(rdev); 1431 radeon_surface_init(rdev);
1432 1432
1433 radeon_pm_resume(rdev);
1434
1435 rdev->accel_working = true; 1433 rdev->accel_working = true;
1436 r = r300_startup(rdev); 1434 r = r300_startup(rdev);
1437 if (r) { 1435 if (r) {
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 3768aab2710b..802b19220a21 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -325,8 +325,6 @@ int r420_resume(struct radeon_device *rdev)
325 /* Initialize surface registers */ 325 /* Initialize surface registers */
326 radeon_surface_init(rdev); 326 radeon_surface_init(rdev);
327 327
328 radeon_pm_resume(rdev);
329
330 rdev->accel_working = true; 328 rdev->accel_working = true;
331 r = r420_startup(rdev); 329 r = r420_startup(rdev);
332 if (r) { 330 if (r) {
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index e209eb75024f..98d6053c36c6 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -240,8 +240,6 @@ int r520_resume(struct radeon_device *rdev)
240 /* Initialize surface registers */ 240 /* Initialize surface registers */
241 radeon_surface_init(rdev); 241 radeon_surface_init(rdev);
242 242
243 radeon_pm_resume(rdev);
244
245 rdev->accel_working = true; 243 rdev->accel_working = true;
246 r = r520_startup(rdev); 244 r = r520_startup(rdev);
247 if (r) { 245 if (r) {
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index cdbc4171fe73..647ef4079217 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2968,7 +2968,8 @@ int r600_resume(struct radeon_device *rdev)
2968 /* post card */ 2968 /* post card */
2969 atom_asic_init(rdev->mode_info.atom_context); 2969 atom_asic_init(rdev->mode_info.atom_context);
2970 2970
2971 radeon_pm_resume(rdev); 2971 if (rdev->pm.pm_method == PM_METHOD_DPM)
2972 radeon_pm_resume(rdev);
2972 2973
2973 rdev->accel_working = true; 2974 rdev->accel_working = true;
2974 r = r600_startup(rdev); 2975 r = r600_startup(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index b012cbbc3ed5..044bc98fb459 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1521,13 +1521,16 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1521 if (r) 1521 if (r)
1522 DRM_ERROR("ib ring test failed (%d).\n", r); 1522 DRM_ERROR("ib ring test failed (%d).\n", r);
1523 1523
1524 if (rdev->pm.dpm_enabled) { 1524 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1525 /* do dpm late init */ 1525 /* do dpm late init */
1526 r = radeon_pm_late_init(rdev); 1526 r = radeon_pm_late_init(rdev);
1527 if (r) { 1527 if (r) {
1528 rdev->pm.dpm_enabled = false; 1528 rdev->pm.dpm_enabled = false;
1529 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); 1529 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1530 } 1530 }
1531 } else {
1532 /* resume old pm late */
1533 radeon_pm_resume(rdev);
1531 } 1534 }
1532 1535
1533 radeon_restore_bios_scratch_regs(rdev); 1536 radeon_restore_bios_scratch_regs(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 77f5b0c3edb8..040a2a10ea17 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -714,6 +714,9 @@ int radeon_ttm_init(struct radeon_device *rdev)
714 DRM_ERROR("Failed initializing VRAM heap.\n"); 714 DRM_ERROR("Failed initializing VRAM heap.\n");
715 return r; 715 return r;
716 } 716 }
717 /* Change the size here instead of the init above so only lpfn is affected */
718 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
719
717 r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, 720 r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
718 RADEON_GEM_DOMAIN_VRAM, 721 RADEON_GEM_DOMAIN_VRAM,
719 NULL, &rdev->stollen_vga_memory); 722 NULL, &rdev->stollen_vga_memory);
@@ -935,7 +938,7 @@ static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf,
935 while (size) { 938 while (size) {
936 loff_t p = *pos / PAGE_SIZE; 939 loff_t p = *pos / PAGE_SIZE;
937 unsigned off = *pos & ~PAGE_MASK; 940 unsigned off = *pos & ~PAGE_MASK;
938 ssize_t cur_size = min(size, PAGE_SIZE - off); 941 size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
939 struct page *page; 942 struct page *page;
940 void *ptr; 943 void *ptr;
941 944
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index b5c2369cda2f..130d5cc50d43 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -474,8 +474,6 @@ int rs400_resume(struct radeon_device *rdev)
474 /* Initialize surface registers */ 474 /* Initialize surface registers */
475 radeon_surface_init(rdev); 475 radeon_surface_init(rdev);
476 476
477 radeon_pm_resume(rdev);
478
479 rdev->accel_working = true; 477 rdev->accel_working = true;
480 r = rs400_startup(rdev); 478 r = rs400_startup(rdev);
481 if (r) { 479 if (r) {
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index fdcde7693032..72d3616de08e 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -1048,8 +1048,6 @@ int rs600_resume(struct radeon_device *rdev)
1048 /* Initialize surface registers */ 1048 /* Initialize surface registers */
1049 radeon_surface_init(rdev); 1049 radeon_surface_init(rdev);
1050 1050
1051 radeon_pm_resume(rdev);
1052
1053 rdev->accel_working = true; 1051 rdev->accel_working = true;
1054 r = rs600_startup(rdev); 1052 r = rs600_startup(rdev);
1055 if (r) { 1053 if (r) {
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 35950738bd5e..3462b64369bf 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -756,8 +756,6 @@ int rs690_resume(struct radeon_device *rdev)
756 /* Initialize surface registers */ 756 /* Initialize surface registers */
757 radeon_surface_init(rdev); 757 radeon_surface_init(rdev);
758 758
759 radeon_pm_resume(rdev);
760
761 rdev->accel_working = true; 759 rdev->accel_working = true;
762 r = rs690_startup(rdev); 760 r = rs690_startup(rdev);
763 if (r) { 761 if (r) {
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 98e8138ff779..237dd29d9f1c 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -586,8 +586,6 @@ int rv515_resume(struct radeon_device *rdev)
586 /* Initialize surface registers */ 586 /* Initialize surface registers */
587 radeon_surface_init(rdev); 587 radeon_surface_init(rdev);
588 588
589 radeon_pm_resume(rdev);
590
591 rdev->accel_working = true; 589 rdev->accel_working = true;
592 r = rv515_startup(rdev); 590 r = rv515_startup(rdev);
593 if (r) { 591 if (r) {
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 4e37a42305d8..fef310773aad 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1811,7 +1811,8 @@ int rv770_resume(struct radeon_device *rdev)
1811 /* init golden registers */ 1811 /* init golden registers */
1812 rv770_init_golden_registers(rdev); 1812 rv770_init_golden_registers(rdev);
1813 1813
1814 radeon_pm_resume(rdev); 1814 if (rdev->pm.pm_method == PM_METHOD_DPM)
1815 radeon_pm_resume(rdev);
1815 1816
1816 rdev->accel_working = true; 1817 rdev->accel_working = true;
1817 r = rv770_startup(rdev); 1818 r = rv770_startup(rdev);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 83578324e5d1..9a124d0608b3 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6618,7 +6618,8 @@ int si_resume(struct radeon_device *rdev)
6618 /* init golden registers */ 6618 /* init golden registers */
6619 si_init_golden_registers(rdev); 6619 si_init_golden_registers(rdev);
6620 6620
6621 radeon_pm_resume(rdev); 6621 if (rdev->pm.pm_method == PM_METHOD_DPM)
6622 radeon_pm_resume(rdev);
6622 6623
6623 rdev->accel_working = true; 6624 rdev->accel_working = true;
6624 r = si_startup(rdev); 6625 r = si_startup(rdev);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index d18d08a076e8..8ee228e9ab5a 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -492,12 +492,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
492 isert_conn->state = ISER_CONN_INIT; 492 isert_conn->state = ISER_CONN_INIT;
493 INIT_LIST_HEAD(&isert_conn->conn_accept_node); 493 INIT_LIST_HEAD(&isert_conn->conn_accept_node);
494 init_completion(&isert_conn->conn_login_comp); 494 init_completion(&isert_conn->conn_login_comp);
495 init_waitqueue_head(&isert_conn->conn_wait); 495 init_completion(&isert_conn->conn_wait);
496 init_waitqueue_head(&isert_conn->conn_wait_comp_err); 496 init_completion(&isert_conn->conn_wait_comp_err);
497 kref_init(&isert_conn->conn_kref); 497 kref_init(&isert_conn->conn_kref);
498 kref_get(&isert_conn->conn_kref); 498 kref_get(&isert_conn->conn_kref);
499 mutex_init(&isert_conn->conn_mutex); 499 mutex_init(&isert_conn->conn_mutex);
500 mutex_init(&isert_conn->conn_comp_mutex);
501 spin_lock_init(&isert_conn->conn_lock); 500 spin_lock_init(&isert_conn->conn_lock);
502 501
503 cma_id->context = isert_conn; 502 cma_id->context = isert_conn;
@@ -688,11 +687,11 @@ isert_disconnect_work(struct work_struct *work)
688 687
689 pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 688 pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
690 mutex_lock(&isert_conn->conn_mutex); 689 mutex_lock(&isert_conn->conn_mutex);
691 isert_conn->state = ISER_CONN_DOWN; 690 if (isert_conn->state == ISER_CONN_UP)
691 isert_conn->state = ISER_CONN_TERMINATING;
692 692
693 if (isert_conn->post_recv_buf_count == 0 && 693 if (isert_conn->post_recv_buf_count == 0 &&
694 atomic_read(&isert_conn->post_send_buf_count) == 0) { 694 atomic_read(&isert_conn->post_send_buf_count) == 0) {
695 pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
696 mutex_unlock(&isert_conn->conn_mutex); 695 mutex_unlock(&isert_conn->conn_mutex);
697 goto wake_up; 696 goto wake_up;
698 } 697 }
@@ -712,7 +711,7 @@ isert_disconnect_work(struct work_struct *work)
712 mutex_unlock(&isert_conn->conn_mutex); 711 mutex_unlock(&isert_conn->conn_mutex);
713 712
714wake_up: 713wake_up:
715 wake_up(&isert_conn->conn_wait); 714 complete(&isert_conn->conn_wait);
716 isert_put_conn(isert_conn); 715 isert_put_conn(isert_conn);
717} 716}
718 717
@@ -888,16 +887,17 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
888 * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED 887 * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
889 * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls. 888 * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
890 */ 889 */
891 mutex_lock(&isert_conn->conn_comp_mutex); 890 mutex_lock(&isert_conn->conn_mutex);
892 if (coalesce && 891 if (coalesce && isert_conn->state == ISER_CONN_UP &&
893 ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) { 892 ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
893 tx_desc->llnode_active = true;
894 llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist); 894 llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
895 mutex_unlock(&isert_conn->conn_comp_mutex); 895 mutex_unlock(&isert_conn->conn_mutex);
896 return; 896 return;
897 } 897 }
898 isert_conn->conn_comp_batch = 0; 898 isert_conn->conn_comp_batch = 0;
899 tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist); 899 tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
900 mutex_unlock(&isert_conn->conn_comp_mutex); 900 mutex_unlock(&isert_conn->conn_mutex);
901 901
902 send_wr->send_flags = IB_SEND_SIGNALED; 902 send_wr->send_flags = IB_SEND_SIGNALED;
903} 903}
@@ -1464,7 +1464,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1464 case ISCSI_OP_SCSI_CMD: 1464 case ISCSI_OP_SCSI_CMD:
1465 spin_lock_bh(&conn->cmd_lock); 1465 spin_lock_bh(&conn->cmd_lock);
1466 if (!list_empty(&cmd->i_conn_node)) 1466 if (!list_empty(&cmd->i_conn_node))
1467 list_del(&cmd->i_conn_node); 1467 list_del_init(&cmd->i_conn_node);
1468 spin_unlock_bh(&conn->cmd_lock); 1468 spin_unlock_bh(&conn->cmd_lock);
1469 1469
1470 if (cmd->data_direction == DMA_TO_DEVICE) 1470 if (cmd->data_direction == DMA_TO_DEVICE)
@@ -1476,7 +1476,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1476 case ISCSI_OP_SCSI_TMFUNC: 1476 case ISCSI_OP_SCSI_TMFUNC:
1477 spin_lock_bh(&conn->cmd_lock); 1477 spin_lock_bh(&conn->cmd_lock);
1478 if (!list_empty(&cmd->i_conn_node)) 1478 if (!list_empty(&cmd->i_conn_node))
1479 list_del(&cmd->i_conn_node); 1479 list_del_init(&cmd->i_conn_node);
1480 spin_unlock_bh(&conn->cmd_lock); 1480 spin_unlock_bh(&conn->cmd_lock);
1481 1481
1482 transport_generic_free_cmd(&cmd->se_cmd, 0); 1482 transport_generic_free_cmd(&cmd->se_cmd, 0);
@@ -1486,7 +1486,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1486 case ISCSI_OP_TEXT: 1486 case ISCSI_OP_TEXT:
1487 spin_lock_bh(&conn->cmd_lock); 1487 spin_lock_bh(&conn->cmd_lock);
1488 if (!list_empty(&cmd->i_conn_node)) 1488 if (!list_empty(&cmd->i_conn_node))
1489 list_del(&cmd->i_conn_node); 1489 list_del_init(&cmd->i_conn_node);
1490 spin_unlock_bh(&conn->cmd_lock); 1490 spin_unlock_bh(&conn->cmd_lock);
1491 1491
1492 /* 1492 /*
@@ -1549,6 +1549,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1549 iscsit_stop_dataout_timer(cmd); 1549 iscsit_stop_dataout_timer(cmd);
1550 device->unreg_rdma_mem(isert_cmd, isert_conn); 1550 device->unreg_rdma_mem(isert_cmd, isert_conn);
1551 cmd->write_data_done = wr->cur_rdma_length; 1551 cmd->write_data_done = wr->cur_rdma_length;
1552 wr->send_wr_num = 0;
1552 1553
1553 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1554 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1554 spin_lock_bh(&cmd->istate_lock); 1555 spin_lock_bh(&cmd->istate_lock);
@@ -1589,7 +1590,7 @@ isert_do_control_comp(struct work_struct *work)
1589 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); 1590 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1590 /* 1591 /*
1591 * Call atomic_dec(&isert_conn->post_send_buf_count) 1592 * Call atomic_dec(&isert_conn->post_send_buf_count)
1592 * from isert_free_conn() 1593 * from isert_wait_conn()
1593 */ 1594 */
1594 isert_conn->logout_posted = true; 1595 isert_conn->logout_posted = true;
1595 iscsit_logout_post_handler(cmd, cmd->conn); 1596 iscsit_logout_post_handler(cmd, cmd->conn);
@@ -1613,6 +1614,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1613 struct ib_device *ib_dev) 1614 struct ib_device *ib_dev)
1614{ 1615{
1615 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1616 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1617 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1616 1618
1617 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || 1619 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1618 cmd->i_state == ISTATE_SEND_LOGOUTRSP || 1620 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
@@ -1624,7 +1626,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1624 queue_work(isert_comp_wq, &isert_cmd->comp_work); 1626 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1625 return; 1627 return;
1626 } 1628 }
1627 atomic_dec(&isert_conn->post_send_buf_count); 1629 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
1628 1630
1629 cmd->i_state = ISTATE_SENT_STATUS; 1631 cmd->i_state = ISTATE_SENT_STATUS;
1630 isert_completion_put(tx_desc, isert_cmd, ib_dev); 1632 isert_completion_put(tx_desc, isert_cmd, ib_dev);
@@ -1662,7 +1664,7 @@ __isert_send_completion(struct iser_tx_desc *tx_desc,
1662 case ISER_IB_RDMA_READ: 1664 case ISER_IB_RDMA_READ:
1663 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); 1665 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1664 1666
1665 atomic_dec(&isert_conn->post_send_buf_count); 1667 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1666 isert_completion_rdma_read(tx_desc, isert_cmd); 1668 isert_completion_rdma_read(tx_desc, isert_cmd);
1667 break; 1669 break;
1668 default: 1670 default:
@@ -1691,31 +1693,76 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
1691} 1693}
1692 1694
1693static void 1695static void
1694isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) 1696isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev)
1697{
1698 struct llist_node *llnode;
1699 struct isert_rdma_wr *wr;
1700 struct iser_tx_desc *t;
1701
1702 mutex_lock(&isert_conn->conn_mutex);
1703 llnode = llist_del_all(&isert_conn->conn_comp_llist);
1704 isert_conn->conn_comp_batch = 0;
1705 mutex_unlock(&isert_conn->conn_mutex);
1706
1707 while (llnode) {
1708 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
1709 llnode = llist_next(llnode);
1710 wr = &t->isert_cmd->rdma_wr;
1711
1712 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
1713 isert_completion_put(t, t->isert_cmd, ib_dev);
1714 }
1715}
1716
1717static void
1718isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
1695{ 1719{
1696 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1720 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1721 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1722 struct llist_node *llnode = tx_desc->comp_llnode_batch;
1723 struct isert_rdma_wr *wr;
1724 struct iser_tx_desc *t;
1697 1725
1698 if (tx_desc) { 1726 while (llnode) {
1699 struct isert_cmd *isert_cmd = tx_desc->isert_cmd; 1727 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
1728 llnode = llist_next(llnode);
1729 wr = &t->isert_cmd->rdma_wr;
1700 1730
1701 if (!isert_cmd) 1731 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
1702 isert_unmap_tx_desc(tx_desc, ib_dev); 1732 isert_completion_put(t, t->isert_cmd, ib_dev);
1703 else
1704 isert_completion_put(tx_desc, isert_cmd, ib_dev);
1705 } 1733 }
1734 tx_desc->comp_llnode_batch = NULL;
1706 1735
1707 if (isert_conn->post_recv_buf_count == 0 && 1736 if (!isert_cmd)
1708 atomic_read(&isert_conn->post_send_buf_count) == 0) { 1737 isert_unmap_tx_desc(tx_desc, ib_dev);
1709 pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 1738 else
1710 pr_debug("Calling wake_up from isert_cq_comp_err\n"); 1739 isert_completion_put(tx_desc, isert_cmd, ib_dev);
1740}
1711 1741
1712 mutex_lock(&isert_conn->conn_mutex); 1742static void
1713 if (isert_conn->state != ISER_CONN_DOWN) 1743isert_cq_rx_comp_err(struct isert_conn *isert_conn)
1714 isert_conn->state = ISER_CONN_TERMINATING; 1744{
1715 mutex_unlock(&isert_conn->conn_mutex); 1745 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1746 struct iscsi_conn *conn = isert_conn->conn;
1716 1747
1717 wake_up(&isert_conn->conn_wait_comp_err); 1748 if (isert_conn->post_recv_buf_count)
1749 return;
1750
1751 isert_cq_drain_comp_llist(isert_conn, ib_dev);
1752
1753 if (conn->sess) {
1754 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
1755 target_wait_for_sess_cmds(conn->sess->se_sess);
1718 } 1756 }
1757
1758 while (atomic_read(&isert_conn->post_send_buf_count))
1759 msleep(3000);
1760
1761 mutex_lock(&isert_conn->conn_mutex);
1762 isert_conn->state = ISER_CONN_DOWN;
1763 mutex_unlock(&isert_conn->conn_mutex);
1764
1765 complete(&isert_conn->conn_wait_comp_err);
1719} 1766}
1720 1767
1721static void 1768static void
@@ -1740,8 +1787,14 @@ isert_cq_tx_work(struct work_struct *work)
1740 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); 1787 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1741 pr_debug("TX wc.status: 0x%08x\n", wc.status); 1788 pr_debug("TX wc.status: 0x%08x\n", wc.status);
1742 pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err); 1789 pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
1743 atomic_dec(&isert_conn->post_send_buf_count); 1790
1744 isert_cq_comp_err(tx_desc, isert_conn); 1791 if (wc.wr_id != ISER_FASTREG_LI_WRID) {
1792 if (tx_desc->llnode_active)
1793 continue;
1794
1795 atomic_dec(&isert_conn->post_send_buf_count);
1796 isert_cq_tx_comp_err(tx_desc, isert_conn);
1797 }
1745 } 1798 }
1746 } 1799 }
1747 1800
@@ -1784,7 +1837,7 @@ isert_cq_rx_work(struct work_struct *work)
1784 wc.vendor_err); 1837 wc.vendor_err);
1785 } 1838 }
1786 isert_conn->post_recv_buf_count--; 1839 isert_conn->post_recv_buf_count--;
1787 isert_cq_comp_err(NULL, isert_conn); 1840 isert_cq_rx_comp_err(isert_conn);
1788 } 1841 }
1789 } 1842 }
1790 1843
@@ -2202,6 +2255,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2202 2255
2203 if (!fr_desc->valid) { 2256 if (!fr_desc->valid) {
2204 memset(&inv_wr, 0, sizeof(inv_wr)); 2257 memset(&inv_wr, 0, sizeof(inv_wr));
2258 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
2205 inv_wr.opcode = IB_WR_LOCAL_INV; 2259 inv_wr.opcode = IB_WR_LOCAL_INV;
2206 inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey; 2260 inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
2207 wr = &inv_wr; 2261 wr = &inv_wr;
@@ -2212,6 +2266,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2212 2266
2213 /* Prepare FASTREG WR */ 2267 /* Prepare FASTREG WR */
2214 memset(&fr_wr, 0, sizeof(fr_wr)); 2268 memset(&fr_wr, 0, sizeof(fr_wr));
2269 fr_wr.wr_id = ISER_FASTREG_LI_WRID;
2215 fr_wr.opcode = IB_WR_FAST_REG_MR; 2270 fr_wr.opcode = IB_WR_FAST_REG_MR;
2216 fr_wr.wr.fast_reg.iova_start = 2271 fr_wr.wr.fast_reg.iova_start =
2217 fr_desc->data_frpl->page_list[0] + page_off; 2272 fr_desc->data_frpl->page_list[0] + page_off;
@@ -2377,12 +2432,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2377 isert_init_send_wr(isert_conn, isert_cmd, 2432 isert_init_send_wr(isert_conn, isert_cmd,
2378 &isert_cmd->tx_desc.send_wr, true); 2433 &isert_cmd->tx_desc.send_wr, true);
2379 2434
2380 atomic_inc(&isert_conn->post_send_buf_count); 2435 atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
2381 2436
2382 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 2437 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2383 if (rc) { 2438 if (rc) {
2384 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); 2439 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2385 atomic_dec(&isert_conn->post_send_buf_count); 2440 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
2386 } 2441 }
2387 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n", 2442 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
2388 isert_cmd); 2443 isert_cmd);
@@ -2410,12 +2465,12 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2410 return rc; 2465 return rc;
2411 } 2466 }
2412 2467
2413 atomic_inc(&isert_conn->post_send_buf_count); 2468 atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
2414 2469
2415 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 2470 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2416 if (rc) { 2471 if (rc) {
2417 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); 2472 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2418 atomic_dec(&isert_conn->post_send_buf_count); 2473 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
2419 } 2474 }
2420 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", 2475 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2421 isert_cmd); 2476 isert_cmd);
@@ -2702,22 +2757,11 @@ isert_free_np(struct iscsi_np *np)
2702 kfree(isert_np); 2757 kfree(isert_np);
2703} 2758}
2704 2759
2705static int isert_check_state(struct isert_conn *isert_conn, int state) 2760static void isert_wait_conn(struct iscsi_conn *conn)
2706{
2707 int ret;
2708
2709 mutex_lock(&isert_conn->conn_mutex);
2710 ret = (isert_conn->state == state);
2711 mutex_unlock(&isert_conn->conn_mutex);
2712
2713 return ret;
2714}
2715
2716static void isert_free_conn(struct iscsi_conn *conn)
2717{ 2761{
2718 struct isert_conn *isert_conn = conn->context; 2762 struct isert_conn *isert_conn = conn->context;
2719 2763
2720 pr_debug("isert_free_conn: Starting \n"); 2764 pr_debug("isert_wait_conn: Starting \n");
2721 /* 2765 /*
2722 * Decrement post_send_buf_count for special case when called 2766 * Decrement post_send_buf_count for special case when called
2723 * from isert_do_control_comp() -> iscsit_logout_post_handler() 2767 * from isert_do_control_comp() -> iscsit_logout_post_handler()
@@ -2727,38 +2771,29 @@ static void isert_free_conn(struct iscsi_conn *conn)
2727 atomic_dec(&isert_conn->post_send_buf_count); 2771 atomic_dec(&isert_conn->post_send_buf_count);
2728 2772
2729 if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) { 2773 if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
2730 pr_debug("Calling rdma_disconnect from isert_free_conn\n"); 2774 pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
2731 rdma_disconnect(isert_conn->conn_cm_id); 2775 rdma_disconnect(isert_conn->conn_cm_id);
2732 } 2776 }
2733 /* 2777 /*
2734 * Only wait for conn_wait_comp_err if the isert_conn made it 2778 * Only wait for conn_wait_comp_err if the isert_conn made it
2735 * into full feature phase.. 2779 * into full feature phase..
2736 */ 2780 */
2737 if (isert_conn->state == ISER_CONN_UP) {
2738 pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
2739 isert_conn->state);
2740 mutex_unlock(&isert_conn->conn_mutex);
2741
2742 wait_event(isert_conn->conn_wait_comp_err,
2743 (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
2744
2745 wait_event(isert_conn->conn_wait,
2746 (isert_check_state(isert_conn, ISER_CONN_DOWN)));
2747
2748 isert_put_conn(isert_conn);
2749 return;
2750 }
2751 if (isert_conn->state == ISER_CONN_INIT) { 2781 if (isert_conn->state == ISER_CONN_INIT) {
2752 mutex_unlock(&isert_conn->conn_mutex); 2782 mutex_unlock(&isert_conn->conn_mutex);
2753 isert_put_conn(isert_conn);
2754 return; 2783 return;
2755 } 2784 }
2756 pr_debug("isert_free_conn: wait_event conn_wait %d\n", 2785 if (isert_conn->state == ISER_CONN_UP)
2757 isert_conn->state); 2786 isert_conn->state = ISER_CONN_TERMINATING;
2758 mutex_unlock(&isert_conn->conn_mutex); 2787 mutex_unlock(&isert_conn->conn_mutex);
2759 2788
2760 wait_event(isert_conn->conn_wait, 2789 wait_for_completion(&isert_conn->conn_wait_comp_err);
2761 (isert_check_state(isert_conn, ISER_CONN_DOWN))); 2790
2791 wait_for_completion(&isert_conn->conn_wait);
2792}
2793
2794static void isert_free_conn(struct iscsi_conn *conn)
2795{
2796 struct isert_conn *isert_conn = conn->context;
2762 2797
2763 isert_put_conn(isert_conn); 2798 isert_put_conn(isert_conn);
2764} 2799}
@@ -2771,6 +2806,7 @@ static struct iscsit_transport iser_target_transport = {
2771 .iscsit_setup_np = isert_setup_np, 2806 .iscsit_setup_np = isert_setup_np,
2772 .iscsit_accept_np = isert_accept_np, 2807 .iscsit_accept_np = isert_accept_np,
2773 .iscsit_free_np = isert_free_np, 2808 .iscsit_free_np = isert_free_np,
2809 .iscsit_wait_conn = isert_wait_conn,
2774 .iscsit_free_conn = isert_free_conn, 2810 .iscsit_free_conn = isert_free_conn,
2775 .iscsit_get_login_rx = isert_get_login_rx, 2811 .iscsit_get_login_rx = isert_get_login_rx,
2776 .iscsit_put_login_tx = isert_put_login_tx, 2812 .iscsit_put_login_tx = isert_put_login_tx,
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 708a069002f3..f6ae7f5dd408 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -6,6 +6,7 @@
6 6
7#define ISERT_RDMA_LISTEN_BACKLOG 10 7#define ISERT_RDMA_LISTEN_BACKLOG 10
8#define ISCSI_ISER_SG_TABLESIZE 256 8#define ISCSI_ISER_SG_TABLESIZE 256
9#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
9 10
10enum isert_desc_type { 11enum isert_desc_type {
11 ISCSI_TX_CONTROL, 12 ISCSI_TX_CONTROL,
@@ -45,6 +46,7 @@ struct iser_tx_desc {
45 struct isert_cmd *isert_cmd; 46 struct isert_cmd *isert_cmd;
46 struct llist_node *comp_llnode_batch; 47 struct llist_node *comp_llnode_batch;
47 struct llist_node comp_llnode; 48 struct llist_node comp_llnode;
49 bool llnode_active;
48 struct ib_send_wr send_wr; 50 struct ib_send_wr send_wr;
49} __packed; 51} __packed;
50 52
@@ -116,8 +118,8 @@ struct isert_conn {
116 struct isert_device *conn_device; 118 struct isert_device *conn_device;
117 struct work_struct conn_logout_work; 119 struct work_struct conn_logout_work;
118 struct mutex conn_mutex; 120 struct mutex conn_mutex;
119 wait_queue_head_t conn_wait; 121 struct completion conn_wait;
120 wait_queue_head_t conn_wait_comp_err; 122 struct completion conn_wait_comp_err;
121 struct kref conn_kref; 123 struct kref conn_kref;
122 struct list_head conn_fr_pool; 124 struct list_head conn_fr_pool;
123 int conn_fr_pool_size; 125 int conn_fr_pool_size;
@@ -126,7 +128,6 @@ struct isert_conn {
126#define ISERT_COMP_BATCH_COUNT 8 128#define ISERT_COMP_BATCH_COUNT 8
127 int conn_comp_batch; 129 int conn_comp_batch;
128 struct llist_head conn_comp_llist; 130 struct llist_head conn_comp_llist;
129 struct mutex conn_comp_mutex;
130}; 131};
131 132
132#define ISERT_MAX_CQ 64 133#define ISERT_MAX_CQ 64
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 9a06fe883766..95ad936e6048 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -254,16 +254,6 @@ config DM_THIN_PROVISIONING
254 ---help--- 254 ---help---
255 Provides thin provisioning and snapshots that share a data store. 255 Provides thin provisioning and snapshots that share a data store.
256 256
257config DM_DEBUG_BLOCK_STACK_TRACING
258 boolean "Keep stack trace of persistent data block lock holders"
259 depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA
260 select STACKTRACE
261 ---help---
262 Enable this for messages that may help debug problems with the
263 block manager locking used by thin provisioning and caching.
264
265 If unsure, say N.
266
267config DM_CACHE 257config DM_CACHE
268 tristate "Cache target (EXPERIMENTAL)" 258 tristate "Cache target (EXPERIMENTAL)"
269 depends on BLK_DEV_DM 259 depends on BLK_DEV_DM
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 1e018e986610..0e385e40909e 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -872,7 +872,7 @@ static void mq_destroy(struct dm_cache_policy *p)
872{ 872{
873 struct mq_policy *mq = to_mq_policy(p); 873 struct mq_policy *mq = to_mq_policy(p);
874 874
875 kfree(mq->table); 875 vfree(mq->table);
876 epool_exit(&mq->cache_pool); 876 epool_exit(&mq->cache_pool);
877 epool_exit(&mq->pre_cache_pool); 877 epool_exit(&mq->pre_cache_pool);
878 kfree(mq); 878 kfree(mq);
@@ -1245,7 +1245,7 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
1245 1245
1246 mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16); 1246 mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
1247 mq->hash_bits = ffs(mq->nr_buckets) - 1; 1247 mq->hash_bits = ffs(mq->nr_buckets) - 1;
1248 mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL); 1248 mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets);
1249 if (!mq->table) 1249 if (!mq->table)
1250 goto bad_alloc_table; 1250 goto bad_alloc_table;
1251 1251
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index afc3d017de4c..d6e88178d22c 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -546,6 +546,9 @@ static int read_exceptions(struct pstore *ps,
546 r = insert_exceptions(ps, area, callback, callback_context, 546 r = insert_exceptions(ps, area, callback, callback_context,
547 &full); 547 &full);
548 548
549 if (!full)
550 memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT);
551
549 dm_bufio_release(bp); 552 dm_bufio_release(bp);
550 553
551 dm_bufio_forget(client, chunk); 554 dm_bufio_forget(client, chunk);
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index baa87ff12816..fb9efc829182 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -76,7 +76,7 @@
76 76
77#define THIN_SUPERBLOCK_MAGIC 27022010 77#define THIN_SUPERBLOCK_MAGIC 27022010
78#define THIN_SUPERBLOCK_LOCATION 0 78#define THIN_SUPERBLOCK_LOCATION 0
79#define THIN_VERSION 1 79#define THIN_VERSION 2
80#define THIN_METADATA_CACHE_SIZE 64 80#define THIN_METADATA_CACHE_SIZE 64
81#define SECTOR_TO_BLOCK_SHIFT 3 81#define SECTOR_TO_BLOCK_SHIFT 3
82 82
@@ -1755,3 +1755,38 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
1755 1755
1756 return r; 1756 return r;
1757} 1757}
1758
1759int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
1760{
1761 int r;
1762 struct dm_block *sblock;
1763 struct thin_disk_superblock *disk_super;
1764
1765 down_write(&pmd->root_lock);
1766 pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG;
1767
1768 r = superblock_lock(pmd, &sblock);
1769 if (r) {
1770 DMERR("couldn't read superblock");
1771 goto out;
1772 }
1773
1774 disk_super = dm_block_data(sblock);
1775 disk_super->flags = cpu_to_le32(pmd->flags);
1776
1777 dm_bm_unlock(sblock);
1778out:
1779 up_write(&pmd->root_lock);
1780 return r;
1781}
1782
1783bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
1784{
1785 bool needs_check;
1786
1787 down_read(&pmd->root_lock);
1788 needs_check = pmd->flags & THIN_METADATA_NEEDS_CHECK_FLAG;
1789 up_read(&pmd->root_lock);
1790
1791 return needs_check;
1792}
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 82ea384d36ff..e3c857db195a 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -25,6 +25,11 @@
25 25
26/*----------------------------------------------------------------*/ 26/*----------------------------------------------------------------*/
27 27
28/*
29 * Thin metadata superblock flags.
30 */
31#define THIN_METADATA_NEEDS_CHECK_FLAG (1 << 0)
32
28struct dm_pool_metadata; 33struct dm_pool_metadata;
29struct dm_thin_device; 34struct dm_thin_device;
30 35
@@ -202,6 +207,12 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
202 dm_sm_threshold_fn fn, 207 dm_sm_threshold_fn fn,
203 void *context); 208 void *context);
204 209
210/*
211 * Updates the superblock immediately.
212 */
213int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd);
214bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd);
215
205/*----------------------------------------------------------------*/ 216/*----------------------------------------------------------------*/
206 217
207#endif 218#endif
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 7e84baccf0ad..be70d38745f7 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -130,10 +130,11 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
130struct dm_thin_new_mapping; 130struct dm_thin_new_mapping;
131 131
132/* 132/*
133 * The pool runs in 3 modes. Ordered in degraded order for comparisons. 133 * The pool runs in 4 modes. Ordered in degraded order for comparisons.
134 */ 134 */
135enum pool_mode { 135enum pool_mode {
136 PM_WRITE, /* metadata may be changed */ 136 PM_WRITE, /* metadata may be changed */
137 PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
137 PM_READ_ONLY, /* metadata may not be changed */ 138 PM_READ_ONLY, /* metadata may not be changed */
138 PM_FAIL, /* all I/O fails */ 139 PM_FAIL, /* all I/O fails */
139}; 140};
@@ -198,7 +199,6 @@ struct pool {
198}; 199};
199 200
200static enum pool_mode get_pool_mode(struct pool *pool); 201static enum pool_mode get_pool_mode(struct pool *pool);
201static void out_of_data_space(struct pool *pool);
202static void metadata_operation_failed(struct pool *pool, const char *op, int r); 202static void metadata_operation_failed(struct pool *pool, const char *op, int r);
203 203
204/* 204/*
@@ -226,6 +226,7 @@ struct thin_c {
226 226
227 struct pool *pool; 227 struct pool *pool;
228 struct dm_thin_device *td; 228 struct dm_thin_device *td;
229 bool requeue_mode:1;
229}; 230};
230 231
231/*----------------------------------------------------------------*/ 232/*----------------------------------------------------------------*/
@@ -369,14 +370,18 @@ struct dm_thin_endio_hook {
369 struct dm_thin_new_mapping *overwrite_mapping; 370 struct dm_thin_new_mapping *overwrite_mapping;
370}; 371};
371 372
372static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) 373static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
373{ 374{
374 struct bio *bio; 375 struct bio *bio;
375 struct bio_list bios; 376 struct bio_list bios;
377 unsigned long flags;
376 378
377 bio_list_init(&bios); 379 bio_list_init(&bios);
380
381 spin_lock_irqsave(&tc->pool->lock, flags);
378 bio_list_merge(&bios, master); 382 bio_list_merge(&bios, master);
379 bio_list_init(master); 383 bio_list_init(master);
384 spin_unlock_irqrestore(&tc->pool->lock, flags);
380 385
381 while ((bio = bio_list_pop(&bios))) { 386 while ((bio = bio_list_pop(&bios))) {
382 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 387 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -391,12 +396,26 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
391static void requeue_io(struct thin_c *tc) 396static void requeue_io(struct thin_c *tc)
392{ 397{
393 struct pool *pool = tc->pool; 398 struct pool *pool = tc->pool;
399
400 requeue_bio_list(tc, &pool->deferred_bios);
401 requeue_bio_list(tc, &pool->retry_on_resume_list);
402}
403
404static void error_retry_list(struct pool *pool)
405{
406 struct bio *bio;
394 unsigned long flags; 407 unsigned long flags;
408 struct bio_list bios;
409
410 bio_list_init(&bios);
395 411
396 spin_lock_irqsave(&pool->lock, flags); 412 spin_lock_irqsave(&pool->lock, flags);
397 __requeue_bio_list(tc, &pool->deferred_bios); 413 bio_list_merge(&bios, &pool->retry_on_resume_list);
398 __requeue_bio_list(tc, &pool->retry_on_resume_list); 414 bio_list_init(&pool->retry_on_resume_list);
399 spin_unlock_irqrestore(&pool->lock, flags); 415 spin_unlock_irqrestore(&pool->lock, flags);
416
417 while ((bio = bio_list_pop(&bios)))
418 bio_io_error(bio);
400} 419}
401 420
402/* 421/*
@@ -925,13 +944,15 @@ static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
925 } 944 }
926} 945}
927 946
947static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
948
928static int alloc_data_block(struct thin_c *tc, dm_block_t *result) 949static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
929{ 950{
930 int r; 951 int r;
931 dm_block_t free_blocks; 952 dm_block_t free_blocks;
932 struct pool *pool = tc->pool; 953 struct pool *pool = tc->pool;
933 954
934 if (get_pool_mode(pool) != PM_WRITE) 955 if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
935 return -EINVAL; 956 return -EINVAL;
936 957
937 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); 958 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
@@ -958,7 +979,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
958 } 979 }
959 980
960 if (!free_blocks) { 981 if (!free_blocks) {
961 out_of_data_space(pool); 982 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
962 return -ENOSPC; 983 return -ENOSPC;
963 } 984 }
964 } 985 }
@@ -988,15 +1009,32 @@ static void retry_on_resume(struct bio *bio)
988 spin_unlock_irqrestore(&pool->lock, flags); 1009 spin_unlock_irqrestore(&pool->lock, flags);
989} 1010}
990 1011
991static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) 1012static bool should_error_unserviceable_bio(struct pool *pool)
992{ 1013{
993 /* 1014 enum pool_mode m = get_pool_mode(pool);
994 * When pool is read-only, no cell locking is needed because 1015
995 * nothing is changing. 1016 switch (m) {
996 */ 1017 case PM_WRITE:
997 WARN_ON_ONCE(get_pool_mode(pool) != PM_READ_ONLY); 1018 /* Shouldn't get here */
1019 DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
1020 return true;
1021
1022 case PM_OUT_OF_DATA_SPACE:
1023 return pool->pf.error_if_no_space;
998 1024
999 if (pool->pf.error_if_no_space) 1025 case PM_READ_ONLY:
1026 case PM_FAIL:
1027 return true;
1028 default:
1029 /* Shouldn't get here */
1030 DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
1031 return true;
1032 }
1033}
1034
1035static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
1036{
1037 if (should_error_unserviceable_bio(pool))
1000 bio_io_error(bio); 1038 bio_io_error(bio);
1001 else 1039 else
1002 retry_on_resume(bio); 1040 retry_on_resume(bio);
@@ -1007,11 +1045,20 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c
1007 struct bio *bio; 1045 struct bio *bio;
1008 struct bio_list bios; 1046 struct bio_list bios;
1009 1047
1048 if (should_error_unserviceable_bio(pool)) {
1049 cell_error(pool, cell);
1050 return;
1051 }
1052
1010 bio_list_init(&bios); 1053 bio_list_init(&bios);
1011 cell_release(pool, cell, &bios); 1054 cell_release(pool, cell, &bios);
1012 1055
1013 while ((bio = bio_list_pop(&bios))) 1056 if (should_error_unserviceable_bio(pool))
1014 handle_unserviceable_bio(pool, bio); 1057 while ((bio = bio_list_pop(&bios)))
1058 bio_io_error(bio);
1059 else
1060 while ((bio = bio_list_pop(&bios)))
1061 retry_on_resume(bio);
1015} 1062}
1016 1063
1017static void process_discard(struct thin_c *tc, struct bio *bio) 1064static void process_discard(struct thin_c *tc, struct bio *bio)
@@ -1296,6 +1343,11 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1296 } 1343 }
1297} 1344}
1298 1345
1346static void process_bio_success(struct thin_c *tc, struct bio *bio)
1347{
1348 bio_endio(bio, 0);
1349}
1350
1299static void process_bio_fail(struct thin_c *tc, struct bio *bio) 1351static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1300{ 1352{
1301 bio_io_error(bio); 1353 bio_io_error(bio);
@@ -1328,6 +1380,11 @@ static void process_deferred_bios(struct pool *pool)
1328 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1380 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1329 struct thin_c *tc = h->tc; 1381 struct thin_c *tc = h->tc;
1330 1382
1383 if (tc->requeue_mode) {
1384 bio_endio(bio, DM_ENDIO_REQUEUE);
1385 continue;
1386 }
1387
1331 /* 1388 /*
1332 * If we've got no free new_mapping structs, and processing 1389 * If we've got no free new_mapping structs, and processing
1333 * this bio might require one, we pause until there are some 1390 * this bio might require one, we pause until there are some
@@ -1394,51 +1451,134 @@ static void do_waker(struct work_struct *ws)
1394 1451
1395/*----------------------------------------------------------------*/ 1452/*----------------------------------------------------------------*/
1396 1453
1454struct noflush_work {
1455 struct work_struct worker;
1456 struct thin_c *tc;
1457
1458 atomic_t complete;
1459 wait_queue_head_t wait;
1460};
1461
1462static void complete_noflush_work(struct noflush_work *w)
1463{
1464 atomic_set(&w->complete, 1);
1465 wake_up(&w->wait);
1466}
1467
1468static void do_noflush_start(struct work_struct *ws)
1469{
1470 struct noflush_work *w = container_of(ws, struct noflush_work, worker);
1471 w->tc->requeue_mode = true;
1472 requeue_io(w->tc);
1473 complete_noflush_work(w);
1474}
1475
1476static void do_noflush_stop(struct work_struct *ws)
1477{
1478 struct noflush_work *w = container_of(ws, struct noflush_work, worker);
1479 w->tc->requeue_mode = false;
1480 complete_noflush_work(w);
1481}
1482
1483static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
1484{
1485 struct noflush_work w;
1486
1487 INIT_WORK(&w.worker, fn);
1488 w.tc = tc;
1489 atomic_set(&w.complete, 0);
1490 init_waitqueue_head(&w.wait);
1491
1492 queue_work(tc->pool->wq, &w.worker);
1493
1494 wait_event(w.wait, atomic_read(&w.complete));
1495}
1496
1497/*----------------------------------------------------------------*/
1498
1397static enum pool_mode get_pool_mode(struct pool *pool) 1499static enum pool_mode get_pool_mode(struct pool *pool)
1398{ 1500{
1399 return pool->pf.mode; 1501 return pool->pf.mode;
1400} 1502}
1401 1503
1504static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
1505{
1506 dm_table_event(pool->ti->table);
1507 DMINFO("%s: switching pool to %s mode",
1508 dm_device_name(pool->pool_md), new_mode);
1509}
1510
1402static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) 1511static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1403{ 1512{
1404 int r; 1513 struct pool_c *pt = pool->ti->private;
1405 enum pool_mode old_mode = pool->pf.mode; 1514 bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
1515 enum pool_mode old_mode = get_pool_mode(pool);
1516
1517 /*
1518 * Never allow the pool to transition to PM_WRITE mode if user
1519 * intervention is required to verify metadata and data consistency.
1520 */
1521 if (new_mode == PM_WRITE && needs_check) {
1522 DMERR("%s: unable to switch pool to write mode until repaired.",
1523 dm_device_name(pool->pool_md));
1524 if (old_mode != new_mode)
1525 new_mode = old_mode;
1526 else
1527 new_mode = PM_READ_ONLY;
1528 }
1529 /*
1530 * If we were in PM_FAIL mode, rollback of metadata failed. We're
1531 * not going to recover without a thin_repair. So we never let the
1532 * pool move out of the old mode.
1533 */
1534 if (old_mode == PM_FAIL)
1535 new_mode = old_mode;
1406 1536
1407 switch (new_mode) { 1537 switch (new_mode) {
1408 case PM_FAIL: 1538 case PM_FAIL:
1409 if (old_mode != new_mode) 1539 if (old_mode != new_mode)
1410 DMERR("%s: switching pool to failure mode", 1540 notify_of_pool_mode_change(pool, "failure");
1411 dm_device_name(pool->pool_md));
1412 dm_pool_metadata_read_only(pool->pmd); 1541 dm_pool_metadata_read_only(pool->pmd);
1413 pool->process_bio = process_bio_fail; 1542 pool->process_bio = process_bio_fail;
1414 pool->process_discard = process_bio_fail; 1543 pool->process_discard = process_bio_fail;
1415 pool->process_prepared_mapping = process_prepared_mapping_fail; 1544 pool->process_prepared_mapping = process_prepared_mapping_fail;
1416 pool->process_prepared_discard = process_prepared_discard_fail; 1545 pool->process_prepared_discard = process_prepared_discard_fail;
1546
1547 error_retry_list(pool);
1417 break; 1548 break;
1418 1549
1419 case PM_READ_ONLY: 1550 case PM_READ_ONLY:
1420 if (old_mode != new_mode) 1551 if (old_mode != new_mode)
1421 DMERR("%s: switching pool to read-only mode", 1552 notify_of_pool_mode_change(pool, "read-only");
1422 dm_device_name(pool->pool_md)); 1553 dm_pool_metadata_read_only(pool->pmd);
1423 r = dm_pool_abort_metadata(pool->pmd); 1554 pool->process_bio = process_bio_read_only;
1424 if (r) { 1555 pool->process_discard = process_bio_success;
1425 DMERR("%s: aborting transaction failed", 1556 pool->process_prepared_mapping = process_prepared_mapping_fail;
1426 dm_device_name(pool->pool_md)); 1557 pool->process_prepared_discard = process_prepared_discard_passdown;
1427 new_mode = PM_FAIL; 1558
1428 set_pool_mode(pool, new_mode); 1559 error_retry_list(pool);
1429 } else { 1560 break;
1430 dm_pool_metadata_read_only(pool->pmd); 1561
1431 pool->process_bio = process_bio_read_only; 1562 case PM_OUT_OF_DATA_SPACE:
1432 pool->process_discard = process_discard; 1563 /*
1433 pool->process_prepared_mapping = process_prepared_mapping_fail; 1564 * Ideally we'd never hit this state; the low water mark
1434 pool->process_prepared_discard = process_prepared_discard_passdown; 1565 * would trigger userland to extend the pool before we
1435 } 1566 * completely run out of data space. However, many small
1567 * IOs to unprovisioned space can consume data space at an
1568 * alarming rate. Adjust your low water mark if you're
1569 * frequently seeing this mode.
1570 */
1571 if (old_mode != new_mode)
1572 notify_of_pool_mode_change(pool, "out-of-data-space");
1573 pool->process_bio = process_bio_read_only;
1574 pool->process_discard = process_discard;
1575 pool->process_prepared_mapping = process_prepared_mapping;
1576 pool->process_prepared_discard = process_prepared_discard_passdown;
1436 break; 1577 break;
1437 1578
1438 case PM_WRITE: 1579 case PM_WRITE:
1439 if (old_mode != new_mode) 1580 if (old_mode != new_mode)
1440 DMINFO("%s: switching pool to write mode", 1581 notify_of_pool_mode_change(pool, "write");
1441 dm_device_name(pool->pool_md));
1442 dm_pool_metadata_read_write(pool->pmd); 1582 dm_pool_metadata_read_write(pool->pmd);
1443 pool->process_bio = process_bio; 1583 pool->process_bio = process_bio;
1444 pool->process_discard = process_discard; 1584 pool->process_discard = process_discard;
@@ -1448,32 +1588,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1448 } 1588 }
1449 1589
1450 pool->pf.mode = new_mode; 1590 pool->pf.mode = new_mode;
1591 /*
1592 * The pool mode may have changed, sync it so bind_control_target()
1593 * doesn't cause an unexpected mode transition on resume.
1594 */
1595 pt->adjusted_pf.mode = new_mode;
1451} 1596}
1452 1597
1453/* 1598static void abort_transaction(struct pool *pool)
1454 * Rather than calling set_pool_mode directly, use these which describe the
1455 * reason for mode degradation.
1456 */
1457static void out_of_data_space(struct pool *pool)
1458{ 1599{
1459 DMERR_LIMIT("%s: no free data space available.", 1600 const char *dev_name = dm_device_name(pool->pool_md);
1460 dm_device_name(pool->pool_md)); 1601
1461 set_pool_mode(pool, PM_READ_ONLY); 1602 DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
1603 if (dm_pool_abort_metadata(pool->pmd)) {
1604 DMERR("%s: failed to abort metadata transaction", dev_name);
1605 set_pool_mode(pool, PM_FAIL);
1606 }
1607
1608 if (dm_pool_metadata_set_needs_check(pool->pmd)) {
1609 DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
1610 set_pool_mode(pool, PM_FAIL);
1611 }
1462} 1612}
1463 1613
1464static void metadata_operation_failed(struct pool *pool, const char *op, int r) 1614static void metadata_operation_failed(struct pool *pool, const char *op, int r)
1465{ 1615{
1466 dm_block_t free_blocks;
1467
1468 DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d", 1616 DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
1469 dm_device_name(pool->pool_md), op, r); 1617 dm_device_name(pool->pool_md), op, r);
1470 1618
1471 if (r == -ENOSPC && 1619 abort_transaction(pool);
1472 !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) &&
1473 !free_blocks)
1474 DMERR_LIMIT("%s: no free metadata space available.",
1475 dm_device_name(pool->pool_md));
1476
1477 set_pool_mode(pool, PM_READ_ONLY); 1620 set_pool_mode(pool, PM_READ_ONLY);
1478} 1621}
1479 1622
@@ -1524,6 +1667,11 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1524 1667
1525 thin_hook_bio(tc, bio); 1668 thin_hook_bio(tc, bio);
1526 1669
1670 if (tc->requeue_mode) {
1671 bio_endio(bio, DM_ENDIO_REQUEUE);
1672 return DM_MAPIO_SUBMITTED;
1673 }
1674
1527 if (get_pool_mode(tc->pool) == PM_FAIL) { 1675 if (get_pool_mode(tc->pool) == PM_FAIL) {
1528 bio_io_error(bio); 1676 bio_io_error(bio);
1529 return DM_MAPIO_SUBMITTED; 1677 return DM_MAPIO_SUBMITTED;
@@ -1687,7 +1835,7 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
1687 /* 1835 /*
1688 * We want to make sure that a pool in PM_FAIL mode is never upgraded. 1836 * We want to make sure that a pool in PM_FAIL mode is never upgraded.
1689 */ 1837 */
1690 enum pool_mode old_mode = pool->pf.mode; 1838 enum pool_mode old_mode = get_pool_mode(pool);
1691 enum pool_mode new_mode = pt->adjusted_pf.mode; 1839 enum pool_mode new_mode = pt->adjusted_pf.mode;
1692 1840
1693 /* 1841 /*
@@ -1701,16 +1849,6 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
1701 pool->pf = pt->adjusted_pf; 1849 pool->pf = pt->adjusted_pf;
1702 pool->low_water_blocks = pt->low_water_blocks; 1850 pool->low_water_blocks = pt->low_water_blocks;
1703 1851
1704 /*
1705 * If we were in PM_FAIL mode, rollback of metadata failed. We're
1706 * not going to recover without a thin_repair. So we never let the
1707 * pool move out of the old mode. On the other hand a PM_READ_ONLY
1708 * may have been due to a lack of metadata or data space, and may
1709 * now work (ie. if the underlying devices have been resized).
1710 */
1711 if (old_mode == PM_FAIL)
1712 new_mode = old_mode;
1713
1714 set_pool_mode(pool, new_mode); 1852 set_pool_mode(pool, new_mode);
1715 1853
1716 return 0; 1854 return 0;
@@ -2253,6 +2391,12 @@ static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
2253 return -EINVAL; 2391 return -EINVAL;
2254 2392
2255 } else if (data_size > sb_data_size) { 2393 } else if (data_size > sb_data_size) {
2394 if (dm_pool_metadata_needs_check(pool->pmd)) {
2395 DMERR("%s: unable to grow the data device until repaired.",
2396 dm_device_name(pool->pool_md));
2397 return 0;
2398 }
2399
2256 if (sb_data_size) 2400 if (sb_data_size)
2257 DMINFO("%s: growing the data device from %llu to %llu blocks", 2401 DMINFO("%s: growing the data device from %llu to %llu blocks",
2258 dm_device_name(pool->pool_md), 2402 dm_device_name(pool->pool_md),
@@ -2294,6 +2438,12 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
2294 return -EINVAL; 2438 return -EINVAL;
2295 2439
2296 } else if (metadata_dev_size > sb_metadata_dev_size) { 2440 } else if (metadata_dev_size > sb_metadata_dev_size) {
2441 if (dm_pool_metadata_needs_check(pool->pmd)) {
2442 DMERR("%s: unable to grow the metadata device until repaired.",
2443 dm_device_name(pool->pool_md));
2444 return 0;
2445 }
2446
2297 warn_if_metadata_device_too_big(pool->md_dev); 2447 warn_if_metadata_device_too_big(pool->md_dev);
2298 DMINFO("%s: growing the metadata device from %llu to %llu blocks", 2448 DMINFO("%s: growing the metadata device from %llu to %llu blocks",
2299 dm_device_name(pool->pool_md), 2449 dm_device_name(pool->pool_md),
@@ -2681,7 +2831,9 @@ static void pool_status(struct dm_target *ti, status_type_t type,
2681 else 2831 else
2682 DMEMIT("- "); 2832 DMEMIT("- ");
2683 2833
2684 if (pool->pf.mode == PM_READ_ONLY) 2834 if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
2835 DMEMIT("out_of_data_space ");
2836 else if (pool->pf.mode == PM_READ_ONLY)
2685 DMEMIT("ro "); 2837 DMEMIT("ro ");
2686 else 2838 else
2687 DMEMIT("rw "); 2839 DMEMIT("rw ");
@@ -2795,7 +2947,7 @@ static struct target_type pool_target = {
2795 .name = "thin-pool", 2947 .name = "thin-pool",
2796 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 2948 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2797 DM_TARGET_IMMUTABLE, 2949 DM_TARGET_IMMUTABLE,
2798 .version = {1, 10, 0}, 2950 .version = {1, 11, 0},
2799 .module = THIS_MODULE, 2951 .module = THIS_MODULE,
2800 .ctr = pool_ctr, 2952 .ctr = pool_ctr,
2801 .dtr = pool_dtr, 2953 .dtr = pool_dtr,
@@ -2997,10 +3149,23 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
2997 return 0; 3149 return 0;
2998} 3150}
2999 3151
3000static void thin_postsuspend(struct dm_target *ti) 3152static void thin_presuspend(struct dm_target *ti)
3001{ 3153{
3154 struct thin_c *tc = ti->private;
3155
3002 if (dm_noflush_suspending(ti)) 3156 if (dm_noflush_suspending(ti))
3003 requeue_io((struct thin_c *)ti->private); 3157 noflush_work(tc, do_noflush_start);
3158}
3159
3160static void thin_postsuspend(struct dm_target *ti)
3161{
3162 struct thin_c *tc = ti->private;
3163
3164 /*
3165 * The dm_noflush_suspending flag has been cleared by now, so
3166 * unfortunately we must always run this.
3167 */
3168 noflush_work(tc, do_noflush_stop);
3004} 3169}
3005 3170
3006/* 3171/*
@@ -3085,12 +3250,13 @@ static int thin_iterate_devices(struct dm_target *ti,
3085 3250
3086static struct target_type thin_target = { 3251static struct target_type thin_target = {
3087 .name = "thin", 3252 .name = "thin",
3088 .version = {1, 10, 0}, 3253 .version = {1, 11, 0},
3089 .module = THIS_MODULE, 3254 .module = THIS_MODULE,
3090 .ctr = thin_ctr, 3255 .ctr = thin_ctr,
3091 .dtr = thin_dtr, 3256 .dtr = thin_dtr,
3092 .map = thin_map, 3257 .map = thin_map,
3093 .end_io = thin_endio, 3258 .end_io = thin_endio,
3259 .presuspend = thin_presuspend,
3094 .postsuspend = thin_postsuspend, 3260 .postsuspend = thin_postsuspend,
3095 .status = thin_status, 3261 .status = thin_status,
3096 .iterate_devices = thin_iterate_devices, 3262 .iterate_devices = thin_iterate_devices,
diff --git a/drivers/md/persistent-data/Kconfig b/drivers/md/persistent-data/Kconfig
index 19b268795415..0c2dec7aec20 100644
--- a/drivers/md/persistent-data/Kconfig
+++ b/drivers/md/persistent-data/Kconfig
@@ -6,3 +6,13 @@ config DM_PERSISTENT_DATA
6 ---help--- 6 ---help---
7 Library providing immutable on-disk data structure support for 7 Library providing immutable on-disk data structure support for
8 device-mapper targets such as the thin provisioning target. 8 device-mapper targets such as the thin provisioning target.
9
10config DM_DEBUG_BLOCK_STACK_TRACING
11 boolean "Keep stack trace of persistent data block lock holders"
12 depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA
13 select STACKTRACE
14 ---help---
15 Enable this for messages that may help debug problems with the
16 block manager locking used by thin provisioning and caching.
17
18 If unsure, say N.
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index e9bdd462f4f5..786b689bdfc7 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -91,6 +91,69 @@ struct block_op {
91 dm_block_t block; 91 dm_block_t block;
92}; 92};
93 93
94struct bop_ring_buffer {
95 unsigned begin;
96 unsigned end;
97 struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1];
98};
99
100static void brb_init(struct bop_ring_buffer *brb)
101{
102 brb->begin = 0;
103 brb->end = 0;
104}
105
106static bool brb_empty(struct bop_ring_buffer *brb)
107{
108 return brb->begin == brb->end;
109}
110
111static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old)
112{
113 unsigned r = old + 1;
114 return (r >= (sizeof(brb->bops) / sizeof(*brb->bops))) ? 0 : r;
115}
116
117static int brb_push(struct bop_ring_buffer *brb,
118 enum block_op_type type, dm_block_t b)
119{
120 struct block_op *bop;
121 unsigned next = brb_next(brb, brb->end);
122
123 /*
124 * We don't allow the last bop to be filled, this way we can
125 * differentiate between full and empty.
126 */
127 if (next == brb->begin)
128 return -ENOMEM;
129
130 bop = brb->bops + brb->end;
131 bop->type = type;
132 bop->block = b;
133
134 brb->end = next;
135
136 return 0;
137}
138
139static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
140{
141 struct block_op *bop;
142
143 if (brb_empty(brb))
144 return -ENODATA;
145
146 bop = brb->bops + brb->begin;
147 result->type = bop->type;
148 result->block = bop->block;
149
150 brb->begin = brb_next(brb, brb->begin);
151
152 return 0;
153}
154
155/*----------------------------------------------------------------*/
156
94struct sm_metadata { 157struct sm_metadata {
95 struct dm_space_map sm; 158 struct dm_space_map sm;
96 159
@@ -101,25 +164,20 @@ struct sm_metadata {
101 164
102 unsigned recursion_count; 165 unsigned recursion_count;
103 unsigned allocated_this_transaction; 166 unsigned allocated_this_transaction;
104 unsigned nr_uncommitted; 167 struct bop_ring_buffer uncommitted;
105 struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS];
106 168
107 struct threshold threshold; 169 struct threshold threshold;
108}; 170};
109 171
110static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b) 172static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
111{ 173{
112 struct block_op *op; 174 int r = brb_push(&smm->uncommitted, type, b);
113 175
114 if (smm->nr_uncommitted == MAX_RECURSIVE_ALLOCATIONS) { 176 if (r) {
115 DMERR("too many recursive allocations"); 177 DMERR("too many recursive allocations");
116 return -ENOMEM; 178 return -ENOMEM;
117 } 179 }
118 180
119 op = smm->uncommitted + smm->nr_uncommitted++;
120 op->type = type;
121 op->block = b;
122
123 return 0; 181 return 0;
124} 182}
125 183
@@ -158,11 +216,17 @@ static int out(struct sm_metadata *smm)
158 return -ENOMEM; 216 return -ENOMEM;
159 } 217 }
160 218
161 if (smm->recursion_count == 1 && smm->nr_uncommitted) { 219 if (smm->recursion_count == 1) {
162 while (smm->nr_uncommitted && !r) { 220 while (!brb_empty(&smm->uncommitted)) {
163 smm->nr_uncommitted--; 221 struct block_op bop;
164 r = commit_bop(smm, smm->uncommitted + 222
165 smm->nr_uncommitted); 223 r = brb_pop(&smm->uncommitted, &bop);
224 if (r) {
225 DMERR("bug in bop ring buffer");
226 break;
227 }
228
229 r = commit_bop(smm, &bop);
166 if (r) 230 if (r)
167 break; 231 break;
168 } 232 }
@@ -217,7 +281,8 @@ static int sm_metadata_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
217static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b, 281static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
218 uint32_t *result) 282 uint32_t *result)
219{ 283{
220 int r, i; 284 int r;
285 unsigned i;
221 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); 286 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
222 unsigned adjustment = 0; 287 unsigned adjustment = 0;
223 288
@@ -225,8 +290,10 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
225 * We may have some uncommitted adjustments to add. This list 290 * We may have some uncommitted adjustments to add. This list
226 * should always be really short. 291 * should always be really short.
227 */ 292 */
228 for (i = 0; i < smm->nr_uncommitted; i++) { 293 for (i = smm->uncommitted.begin;
229 struct block_op *op = smm->uncommitted + i; 294 i != smm->uncommitted.end;
295 i = brb_next(&smm->uncommitted, i)) {
296 struct block_op *op = smm->uncommitted.bops + i;
230 297
231 if (op->block != b) 298 if (op->block != b)
232 continue; 299 continue;
@@ -254,7 +321,8 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
254static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm, 321static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
255 dm_block_t b, int *result) 322 dm_block_t b, int *result)
256{ 323{
257 int r, i, adjustment = 0; 324 int r, adjustment = 0;
325 unsigned i;
258 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); 326 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
259 uint32_t rc; 327 uint32_t rc;
260 328
@@ -262,8 +330,11 @@ static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
262 * We may have some uncommitted adjustments to add. This list 330 * We may have some uncommitted adjustments to add. This list
263 * should always be really short. 331 * should always be really short.
264 */ 332 */
265 for (i = 0; i < smm->nr_uncommitted; i++) { 333 for (i = smm->uncommitted.begin;
266 struct block_op *op = smm->uncommitted + i; 334 i != smm->uncommitted.end;
335 i = brb_next(&smm->uncommitted, i)) {
336
337 struct block_op *op = smm->uncommitted.bops + i;
267 338
268 if (op->block != b) 339 if (op->block != b)
269 continue; 340 continue;
@@ -671,7 +742,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
671 smm->begin = superblock + 1; 742 smm->begin = superblock + 1;
672 smm->recursion_count = 0; 743 smm->recursion_count = 0;
673 smm->allocated_this_transaction = 0; 744 smm->allocated_this_transaction = 0;
674 smm->nr_uncommitted = 0; 745 brb_init(&smm->uncommitted);
675 threshold_init(&smm->threshold); 746 threshold_init(&smm->threshold);
676 747
677 memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); 748 memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
@@ -715,7 +786,7 @@ int dm_sm_metadata_open(struct dm_space_map *sm,
715 smm->begin = 0; 786 smm->begin = 0;
716 smm->recursion_count = 0; 787 smm->recursion_count = 0;
717 smm->allocated_this_transaction = 0; 788 smm->allocated_this_transaction = 0;
718 smm->nr_uncommitted = 0; 789 brb_init(&smm->uncommitted);
719 threshold_init(&smm->threshold); 790 threshold_init(&smm->threshold);
720 791
721 memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll)); 792 memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index b9e2000969f0..95c894482fdd 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -240,7 +240,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
240 240
241 nid = cpu_to_node(cpu); 241 nid = cpu_to_node(cpu);
242 page = alloc_pages_exact_node(nid, 242 page = alloc_pages_exact_node(nid,
243 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 243 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
244 pg_order); 244 pg_order);
245 if (page == NULL) { 245 if (page == NULL) {
246 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " 246 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index d2e6fdb25e28..054e59ca6946 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1029,20 +1029,12 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
1029 dev->mii.phy_id = 0x03; 1029 dev->mii.phy_id = 0x03;
1030 dev->mii.supports_gmii = 1; 1030 dev->mii.supports_gmii = 1;
1031 1031
1032 if (usb_device_no_sg_constraint(dev->udev))
1033 dev->can_dma_sg = 1;
1034
1035 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1032 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1036 NETIF_F_RXCSUM; 1033 NETIF_F_RXCSUM;
1037 1034
1038 dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1035 dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1039 NETIF_F_RXCSUM; 1036 NETIF_F_RXCSUM;
1040 1037
1041 if (dev->can_dma_sg) {
1042 dev->net->features |= NETIF_F_SG | NETIF_F_TSO;
1043 dev->net->hw_features |= NETIF_F_SG | NETIF_F_TSO;
1044 }
1045
1046 /* Enable checksum offload */ 1038 /* Enable checksum offload */
1047 *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP | 1039 *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
1048 AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6; 1040 AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 00660cc502c5..38901665c770 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -162,8 +162,6 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
162 162
163 avail = *r; 163 avail = *r;
164 pci_clip_resource_to_region(bus, &avail, region); 164 pci_clip_resource_to_region(bus, &avail, region);
165 if (!resource_size(&avail))
166 continue;
167 165
168 /* 166 /*
169 * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to 167 * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6b05f6134b68..fdbc294821e6 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1192,6 +1192,9 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
1192 return err; 1192 return err;
1193 pci_fixup_device(pci_fixup_enable, dev); 1193 pci_fixup_device(pci_fixup_enable, dev);
1194 1194
1195 if (dev->msi_enabled || dev->msix_enabled)
1196 return 0;
1197
1195 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); 1198 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1196 if (pin) { 1199 if (pin) {
1197 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1200 pci_read_config_word(dev, PCI_COMMAND, &cmd);
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index be361b7cd30f..1e4e69384baa 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -217,7 +217,7 @@ config PINCTRL_IMX28
217 select PINCTRL_MXS 217 select PINCTRL_MXS
218 218
219config PINCTRL_MSM 219config PINCTRL_MSM
220 tristate 220 bool
221 select PINMUX 221 select PINMUX
222 select PINCONF 222 select PINCONF
223 select GENERIC_PINCONF 223 select GENERIC_PINCONF
diff --git a/drivers/pinctrl/pinctrl-capri.c b/drivers/pinctrl/pinctrl-capri.c
index 4669c53f99b0..eb2500212147 100644
--- a/drivers/pinctrl/pinctrl-capri.c
+++ b/drivers/pinctrl/pinctrl-capri.c
@@ -1435,7 +1435,7 @@ int __init capri_pinctrl_probe(struct platform_device *pdev)
1435} 1435}
1436 1436
1437static struct of_device_id capri_pinctrl_of_match[] = { 1437static struct of_device_id capri_pinctrl_of_match[] = {
1438 { .compatible = "brcm,capri-pinctrl", }, 1438 { .compatible = "brcm,bcm11351-pinctrl", },
1439 { }, 1439 { },
1440}; 1440};
1441 1441
diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c
index 9ccf681dad2f..f9fabe9bf47d 100644
--- a/drivers/pinctrl/pinctrl-sunxi.c
+++ b/drivers/pinctrl/pinctrl-sunxi.c
@@ -14,6 +14,7 @@
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/gpio.h> 15#include <linux/gpio.h>
16#include <linux/irqdomain.h> 16#include <linux/irqdomain.h>
17#include <linux/irqchip/chained_irq.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/of.h> 19#include <linux/of.h>
19#include <linux/of_address.h> 20#include <linux/of_address.h>
@@ -584,7 +585,7 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d,
584 spin_lock_irqsave(&pctl->lock, flags); 585 spin_lock_irqsave(&pctl->lock, flags);
585 586
586 regval = readl(pctl->membase + reg); 587 regval = readl(pctl->membase + reg);
587 regval &= ~IRQ_CFG_IRQ_MASK; 588 regval &= ~(IRQ_CFG_IRQ_MASK << index);
588 writel(regval | (mode << index), pctl->membase + reg); 589 writel(regval | (mode << index), pctl->membase + reg);
589 590
590 spin_unlock_irqrestore(&pctl->lock, flags); 591 spin_unlock_irqrestore(&pctl->lock, flags);
@@ -665,6 +666,7 @@ static struct irq_chip sunxi_pinctrl_irq_chip = {
665 666
666static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc) 667static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc)
667{ 668{
669 struct irq_chip *chip = irq_get_chip(irq);
668 struct sunxi_pinctrl *pctl = irq_get_handler_data(irq); 670 struct sunxi_pinctrl *pctl = irq_get_handler_data(irq);
669 const unsigned long reg = readl(pctl->membase + IRQ_STATUS_REG); 671 const unsigned long reg = readl(pctl->membase + IRQ_STATUS_REG);
670 672
@@ -674,10 +676,12 @@ static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc)
674 if (reg) { 676 if (reg) {
675 int irqoffset; 677 int irqoffset;
676 678
679 chained_irq_enter(chip, desc);
677 for_each_set_bit(irqoffset, &reg, SUNXI_IRQ_NUMBER) { 680 for_each_set_bit(irqoffset, &reg, SUNXI_IRQ_NUMBER) {
678 int pin_irq = irq_find_mapping(pctl->domain, irqoffset); 681 int pin_irq = irq_find_mapping(pctl->domain, irqoffset);
679 generic_handle_irq(pin_irq); 682 generic_handle_irq(pin_irq);
680 } 683 }
684 chained_irq_exit(chip, desc);
681 } 685 }
682} 686}
683 687
diff --git a/drivers/pinctrl/pinctrl-sunxi.h b/drivers/pinctrl/pinctrl-sunxi.h
index 01c494f8a14f..552b0e97077a 100644
--- a/drivers/pinctrl/pinctrl-sunxi.h
+++ b/drivers/pinctrl/pinctrl-sunxi.h
@@ -511,7 +511,7 @@ static inline u32 sunxi_pull_offset(u16 pin)
511 511
512static inline u32 sunxi_irq_cfg_reg(u16 irq) 512static inline u32 sunxi_irq_cfg_reg(u16 irq)
513{ 513{
514 u8 reg = irq / IRQ_CFG_IRQ_PER_REG; 514 u8 reg = irq / IRQ_CFG_IRQ_PER_REG * 0x04;
515 return reg + IRQ_CFG_REG; 515 return reg + IRQ_CFG_REG;
516} 516}
517 517
@@ -523,7 +523,7 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq)
523 523
524static inline u32 sunxi_irq_ctrl_reg(u16 irq) 524static inline u32 sunxi_irq_ctrl_reg(u16 irq)
525{ 525{
526 u8 reg = irq / IRQ_CTRL_IRQ_PER_REG; 526 u8 reg = irq / IRQ_CTRL_IRQ_PER_REG * 0x04;
527 return reg + IRQ_CTRL_REG; 527 return reg + IRQ_CTRL_REG;
528} 528}
529 529
@@ -535,7 +535,7 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq)
535 535
536static inline u32 sunxi_irq_status_reg(u16 irq) 536static inline u32 sunxi_irq_status_reg(u16 irq)
537{ 537{
538 u8 reg = irq / IRQ_STATUS_IRQ_PER_REG; 538 u8 reg = irq / IRQ_STATUS_IRQ_PER_REG * 0x04;
539 return reg + IRQ_STATUS_REG; 539 return reg + IRQ_STATUS_REG;
540} 540}
541 541
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 77d103fe39d9..567d6918d50b 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -89,7 +89,8 @@ enum {
89 89
90 /* GPSR6 */ 90 /* GPSR6 */
91 FN_IP13_10, FN_IP13_11, FN_IP13_12, FN_IP13_13, FN_IP13_14, 91 FN_IP13_10, FN_IP13_11, FN_IP13_12, FN_IP13_13, FN_IP13_14,
92 FN_IP13_15, FN_IP13_18_16, FN_IP13_21_19, FN_IP13_22, FN_IP13_24_23, 92 FN_IP13_15, FN_IP13_18_16, FN_IP13_21_19,
93 FN_IP13_22, FN_IP13_24_23, FN_SD1_CLK,
93 FN_IP13_25, FN_IP13_26, FN_IP13_27, FN_IP13_30_28, FN_IP14_1_0, 94 FN_IP13_25, FN_IP13_26, FN_IP13_27, FN_IP13_30_28, FN_IP14_1_0,
94 FN_IP14_2, FN_IP14_3, FN_IP14_4, FN_IP14_5, FN_IP14_6, FN_IP14_7, 95 FN_IP14_2, FN_IP14_3, FN_IP14_4, FN_IP14_5, FN_IP14_6, FN_IP14_7,
95 FN_IP14_10_8, FN_IP14_13_11, FN_IP14_16_14, FN_IP14_19_17, 96 FN_IP14_10_8, FN_IP14_13_11, FN_IP14_16_14, FN_IP14_19_17,
@@ -788,6 +789,7 @@ static const u16 pinmux_data[] = {
788 PINMUX_DATA(USB1_PWEN_MARK, FN_USB1_PWEN), 789 PINMUX_DATA(USB1_PWEN_MARK, FN_USB1_PWEN),
789 PINMUX_DATA(USB1_OVC_MARK, FN_USB1_OVC), 790 PINMUX_DATA(USB1_OVC_MARK, FN_USB1_OVC),
790 PINMUX_DATA(DU0_DOTCLKIN_MARK, FN_DU0_DOTCLKIN), 791 PINMUX_DATA(DU0_DOTCLKIN_MARK, FN_DU0_DOTCLKIN),
792 PINMUX_DATA(SD1_CLK_MARK, FN_SD1_CLK),
791 793
792 /* IPSR0 */ 794 /* IPSR0 */
793 PINMUX_IPSR_DATA(IP0_0, D0), 795 PINMUX_IPSR_DATA(IP0_0, D0),
@@ -3825,7 +3827,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
3825 GP_6_11_FN, FN_IP13_25, 3827 GP_6_11_FN, FN_IP13_25,
3826 GP_6_10_FN, FN_IP13_24_23, 3828 GP_6_10_FN, FN_IP13_24_23,
3827 GP_6_9_FN, FN_IP13_22, 3829 GP_6_9_FN, FN_IP13_22,
3828 0, 0, 3830 GP_6_8_FN, FN_SD1_CLK,
3829 GP_6_7_FN, FN_IP13_21_19, 3831 GP_6_7_FN, FN_IP13_21_19,
3830 GP_6_6_FN, FN_IP13_18_16, 3832 GP_6_6_FN, FN_IP13_18_16,
3831 GP_6_5_FN, FN_IP13_15, 3833 GP_6_5_FN, FN_IP13_15,
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index a0d6152701cd..617a4916b50f 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -598,7 +598,7 @@ static unsigned int sirfsoc_gpio_irq_startup(struct irq_data *d)
598{ 598{
599 struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d); 599 struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
600 600
601 if (gpio_lock_as_irq(&bank->chip.gc, d->hwirq)) 601 if (gpio_lock_as_irq(&bank->chip.gc, d->hwirq % SIRFSOC_GPIO_BANK_SIZE))
602 dev_err(bank->chip.gc.dev, 602 dev_err(bank->chip.gc.dev,
603 "unable to lock HW IRQ %lu for IRQ\n", 603 "unable to lock HW IRQ %lu for IRQ\n",
604 d->hwirq); 604 d->hwirq);
@@ -611,7 +611,7 @@ static void sirfsoc_gpio_irq_shutdown(struct irq_data *d)
611 struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d); 611 struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
612 612
613 sirfsoc_gpio_irq_mask(d); 613 sirfsoc_gpio_irq_mask(d);
614 gpio_unlock_as_irq(&bank->chip.gc, d->hwirq); 614 gpio_unlock_as_irq(&bank->chip.gc, d->hwirq % SIRFSOC_GPIO_BANK_SIZE);
615} 615}
616 616
617static struct irq_chip sirfsoc_irq_chip = { 617static struct irq_chip sirfsoc_irq_chip = {
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 31534b51715a..c3b2fb9b6713 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -132,9 +132,9 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
132 132
133 flags = GPIOF_DIR_OUT; 133 flags = GPIOF_DIR_OUT;
134 if (spi->mode & SPI_CS_HIGH) 134 if (spi->mode & SPI_CS_HIGH)
135 flags |= GPIOF_INIT_HIGH;
136 else
137 flags |= GPIOF_INIT_LOW; 135 flags |= GPIOF_INIT_LOW;
136 else
137 flags |= GPIOF_INIT_HIGH;
138 138
139 status = gpio_request_one(cdata->gpio, flags, 139 status = gpio_request_one(cdata->gpio, flags,
140 dev_name(&spi->dev)); 140 dev_name(&spi->dev));
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index b0842f751016..5d7b07f08326 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1455,6 +1455,14 @@ static int atmel_spi_suspend(struct device *dev)
1455{ 1455{
1456 struct spi_master *master = dev_get_drvdata(dev); 1456 struct spi_master *master = dev_get_drvdata(dev);
1457 struct atmel_spi *as = spi_master_get_devdata(master); 1457 struct atmel_spi *as = spi_master_get_devdata(master);
1458 int ret;
1459
1460 /* Stop the queue running */
1461 ret = spi_master_suspend(master);
1462 if (ret) {
1463 dev_warn(dev, "cannot suspend master\n");
1464 return ret;
1465 }
1458 1466
1459 clk_disable_unprepare(as->clk); 1467 clk_disable_unprepare(as->clk);
1460 return 0; 1468 return 0;
@@ -1464,9 +1472,16 @@ static int atmel_spi_resume(struct device *dev)
1464{ 1472{
1465 struct spi_master *master = dev_get_drvdata(dev); 1473 struct spi_master *master = dev_get_drvdata(dev);
1466 struct atmel_spi *as = spi_master_get_devdata(master); 1474 struct atmel_spi *as = spi_master_get_devdata(master);
1475 int ret;
1467 1476
1468 clk_prepare_enable(as->clk); 1477 clk_prepare_enable(as->clk);
1469 return 0; 1478
1479 /* Start the queue running */
1480 ret = spi_master_resume(master);
1481 if (ret)
1482 dev_err(dev, "problem starting queue (%d)\n", ret);
1483
1484 return ret;
1470} 1485}
1471 1486
1472static SIMPLE_DEV_PM_OPS(atmel_spi_pm_ops, atmel_spi_suspend, atmel_spi_resume); 1487static SIMPLE_DEV_PM_OPS(atmel_spi_pm_ops, atmel_spi_suspend, atmel_spi_resume);
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index cabed8f9119e..28ae470397a9 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -514,7 +514,8 @@ static int mcfqspi_resume(struct device *dev)
514#ifdef CONFIG_PM_RUNTIME 514#ifdef CONFIG_PM_RUNTIME
515static int mcfqspi_runtime_suspend(struct device *dev) 515static int mcfqspi_runtime_suspend(struct device *dev)
516{ 516{
517 struct mcfqspi *mcfqspi = dev_get_drvdata(dev); 517 struct spi_master *master = dev_get_drvdata(dev);
518 struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
518 519
519 clk_disable(mcfqspi->clk); 520 clk_disable(mcfqspi->clk);
520 521
@@ -523,7 +524,8 @@ static int mcfqspi_runtime_suspend(struct device *dev)
523 524
524static int mcfqspi_runtime_resume(struct device *dev) 525static int mcfqspi_runtime_resume(struct device *dev)
525{ 526{
526 struct mcfqspi *mcfqspi = dev_get_drvdata(dev); 527 struct spi_master *master = dev_get_drvdata(dev);
528 struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
527 529
528 clk_enable(mcfqspi->clk); 530 clk_enable(mcfqspi->clk);
529 531
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index ec79f726672a..a25392065d9b 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -420,7 +420,6 @@ static int dspi_suspend(struct device *dev)
420 420
421static int dspi_resume(struct device *dev) 421static int dspi_resume(struct device *dev)
422{ 422{
423
424 struct spi_master *master = dev_get_drvdata(dev); 423 struct spi_master *master = dev_get_drvdata(dev);
425 struct fsl_dspi *dspi = spi_master_get_devdata(master); 424 struct fsl_dspi *dspi = spi_master_get_devdata(master);
426 425
@@ -504,7 +503,7 @@ static int dspi_probe(struct platform_device *pdev)
504 clk_prepare_enable(dspi->clk); 503 clk_prepare_enable(dspi->clk);
505 504
506 init_waitqueue_head(&dspi->waitq); 505 init_waitqueue_head(&dspi->waitq);
507 platform_set_drvdata(pdev, dspi); 506 platform_set_drvdata(pdev, master);
508 507
509 ret = spi_bitbang_start(&dspi->bitbang); 508 ret = spi_bitbang_start(&dspi->bitbang);
510 if (ret != 0) { 509 if (ret != 0) {
@@ -525,7 +524,8 @@ out_master_put:
525 524
526static int dspi_remove(struct platform_device *pdev) 525static int dspi_remove(struct platform_device *pdev)
527{ 526{
528 struct fsl_dspi *dspi = platform_get_drvdata(pdev); 527 struct spi_master *master = platform_get_drvdata(pdev);
528 struct fsl_dspi *dspi = spi_master_get_devdata(master);
529 529
530 /* Disconnect from the SPI framework */ 530 /* Disconnect from the SPI framework */
531 spi_bitbang_stop(&dspi->bitbang); 531 spi_bitbang_stop(&dspi->bitbang);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index a5474ef9d2a0..47f15d97e7fa 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -948,8 +948,8 @@ static int spi_imx_remove(struct platform_device *pdev)
948 spi_bitbang_stop(&spi_imx->bitbang); 948 spi_bitbang_stop(&spi_imx->bitbang);
949 949
950 writel(0, spi_imx->base + MXC_CSPICTRL); 950 writel(0, spi_imx->base + MXC_CSPICTRL);
951 clk_disable_unprepare(spi_imx->clk_ipg); 951 clk_unprepare(spi_imx->clk_ipg);
952 clk_disable_unprepare(spi_imx->clk_per); 952 clk_unprepare(spi_imx->clk_per);
953 spi_master_put(master); 953 spi_master_put(master);
954 954
955 return 0; 955 return 0;
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 2e7f38c7a961..88eb57e858b3 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -915,7 +915,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
915 /* Set Tx DMA */ 915 /* Set Tx DMA */
916 param = &dma->param_tx; 916 param = &dma->param_tx;
917 param->dma_dev = &dma_dev->dev; 917 param->dma_dev = &dma_dev->dev;
918 param->chan_id = data->master->bus_num * 2; /* Tx = 0, 2 */ 918 param->chan_id = data->ch * 2; /* Tx = 0, 2 */;
919 param->tx_reg = data->io_base_addr + PCH_SPDWR; 919 param->tx_reg = data->io_base_addr + PCH_SPDWR;
920 param->width = width; 920 param->width = width;
921 chan = dma_request_channel(mask, pch_spi_filter, param); 921 chan = dma_request_channel(mask, pch_spi_filter, param);
@@ -930,7 +930,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
930 /* Set Rx DMA */ 930 /* Set Rx DMA */
931 param = &dma->param_rx; 931 param = &dma->param_rx;
932 param->dma_dev = &dma_dev->dev; 932 param->dma_dev = &dma_dev->dev;
933 param->chan_id = data->master->bus_num * 2 + 1; /* Rx = Tx + 1 */ 933 param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */;
934 param->rx_reg = data->io_base_addr + PCH_SPDRR; 934 param->rx_reg = data->io_base_addr + PCH_SPDRR;
935 param->width = width; 935 param->width = width;
936 chan = dma_request_channel(mask, pch_spi_filter, param); 936 chan = dma_request_channel(mask, pch_spi_filter, param);
@@ -1452,6 +1452,11 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
1452 1452
1453 pch_spi_set_master_mode(master); 1453 pch_spi_set_master_mode(master);
1454 1454
1455 if (use_dma) {
1456 dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
1457 pch_alloc_dma_buf(board_dat, data);
1458 }
1459
1455 ret = spi_register_master(master); 1460 ret = spi_register_master(master);
1456 if (ret != 0) { 1461 if (ret != 0) {
1457 dev_err(&plat_dev->dev, 1462 dev_err(&plat_dev->dev,
@@ -1459,14 +1464,10 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
1459 goto err_spi_register_master; 1464 goto err_spi_register_master;
1460 } 1465 }
1461 1466
1462 if (use_dma) {
1463 dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
1464 pch_alloc_dma_buf(board_dat, data);
1465 }
1466
1467 return 0; 1467 return 0;
1468 1468
1469err_spi_register_master: 1469err_spi_register_master:
1470 pch_free_dma_buf(board_dat, data);
1470 free_irq(board_dat->pdev->irq, data); 1471 free_irq(board_dat->pdev->irq, data);
1471err_request_irq: 1472err_request_irq:
1472 pch_spi_free_resources(board_dat, data); 1473 pch_spi_free_resources(board_dat, data);
diff --git a/drivers/staging/cxt1e1/linux.c b/drivers/staging/cxt1e1/linux.c
index 4a08e16e42f7..79206cb3fb94 100644
--- a/drivers/staging/cxt1e1/linux.c
+++ b/drivers/staging/cxt1e1/linux.c
@@ -866,6 +866,8 @@ c4_ioctl (struct net_device *ndev, struct ifreq *ifr, int cmd)
866 _IOC_SIZE (iocmd)); 866 _IOC_SIZE (iocmd));
867#endif 867#endif
868 iolen = _IOC_SIZE (iocmd); 868 iolen = _IOC_SIZE (iocmd);
869 if (iolen > sizeof(arg))
870 return -EFAULT;
869 data = ifr->ifr_data + sizeof (iocmd); 871 data = ifr->ifr_data + sizeof (iocmd);
870 if (copy_from_user (&arg, data, iolen)) 872 if (copy_from_user (&arg, data, iolen))
871 return -EFAULT; 873 return -EFAULT;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 7f1a7ce4b771..b83ec378d04f 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -785,7 +785,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
785 spin_unlock_bh(&conn->cmd_lock); 785 spin_unlock_bh(&conn->cmd_lock);
786 786
787 list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) { 787 list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
788 list_del(&cmd->i_conn_node); 788 list_del_init(&cmd->i_conn_node);
789 iscsit_free_cmd(cmd, false); 789 iscsit_free_cmd(cmd, false);
790 } 790 }
791} 791}
@@ -3708,7 +3708,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state
3708 break; 3708 break;
3709 case ISTATE_REMOVE: 3709 case ISTATE_REMOVE:
3710 spin_lock_bh(&conn->cmd_lock); 3710 spin_lock_bh(&conn->cmd_lock);
3711 list_del(&cmd->i_conn_node); 3711 list_del_init(&cmd->i_conn_node);
3712 spin_unlock_bh(&conn->cmd_lock); 3712 spin_unlock_bh(&conn->cmd_lock);
3713 3713
3714 iscsit_free_cmd(cmd, false); 3714 iscsit_free_cmd(cmd, false);
@@ -4151,7 +4151,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
4151 spin_lock_bh(&conn->cmd_lock); 4151 spin_lock_bh(&conn->cmd_lock);
4152 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { 4152 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
4153 4153
4154 list_del(&cmd->i_conn_node); 4154 list_del_init(&cmd->i_conn_node);
4155 spin_unlock_bh(&conn->cmd_lock); 4155 spin_unlock_bh(&conn->cmd_lock);
4156 4156
4157 iscsit_increment_maxcmdsn(cmd, sess); 4157 iscsit_increment_maxcmdsn(cmd, sess);
@@ -4196,6 +4196,10 @@ int iscsit_close_connection(
4196 iscsit_stop_timers_for_cmds(conn); 4196 iscsit_stop_timers_for_cmds(conn);
4197 iscsit_stop_nopin_response_timer(conn); 4197 iscsit_stop_nopin_response_timer(conn);
4198 iscsit_stop_nopin_timer(conn); 4198 iscsit_stop_nopin_timer(conn);
4199
4200 if (conn->conn_transport->iscsit_wait_conn)
4201 conn->conn_transport->iscsit_wait_conn(conn);
4202
4199 iscsit_free_queue_reqs_for_conn(conn); 4203 iscsit_free_queue_reqs_for_conn(conn);
4200 4204
4201 /* 4205 /*
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 33be1fb1df32..4ca8fd2a70db 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -138,7 +138,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
138 list_for_each_entry_safe(cmd, cmd_tmp, 138 list_for_each_entry_safe(cmd, cmd_tmp,
139 &cr->conn_recovery_cmd_list, i_conn_node) { 139 &cr->conn_recovery_cmd_list, i_conn_node) {
140 140
141 list_del(&cmd->i_conn_node); 141 list_del_init(&cmd->i_conn_node);
142 cmd->conn = NULL; 142 cmd->conn = NULL;
143 spin_unlock(&cr->conn_recovery_cmd_lock); 143 spin_unlock(&cr->conn_recovery_cmd_lock);
144 iscsit_free_cmd(cmd, true); 144 iscsit_free_cmd(cmd, true);
@@ -160,7 +160,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
160 list_for_each_entry_safe(cmd, cmd_tmp, 160 list_for_each_entry_safe(cmd, cmd_tmp,
161 &cr->conn_recovery_cmd_list, i_conn_node) { 161 &cr->conn_recovery_cmd_list, i_conn_node) {
162 162
163 list_del(&cmd->i_conn_node); 163 list_del_init(&cmd->i_conn_node);
164 cmd->conn = NULL; 164 cmd->conn = NULL;
165 spin_unlock(&cr->conn_recovery_cmd_lock); 165 spin_unlock(&cr->conn_recovery_cmd_lock);
166 iscsit_free_cmd(cmd, true); 166 iscsit_free_cmd(cmd, true);
@@ -216,7 +216,7 @@ int iscsit_remove_cmd_from_connection_recovery(
216 } 216 }
217 cr = cmd->cr; 217 cr = cmd->cr;
218 218
219 list_del(&cmd->i_conn_node); 219 list_del_init(&cmd->i_conn_node);
220 return --cr->cmd_count; 220 return --cr->cmd_count;
221} 221}
222 222
@@ -297,7 +297,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
297 if (!(cmd->cmd_flags & ICF_OOO_CMDSN)) 297 if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
298 continue; 298 continue;
299 299
300 list_del(&cmd->i_conn_node); 300 list_del_init(&cmd->i_conn_node);
301 301
302 spin_unlock_bh(&conn->cmd_lock); 302 spin_unlock_bh(&conn->cmd_lock);
303 iscsit_free_cmd(cmd, true); 303 iscsit_free_cmd(cmd, true);
@@ -335,7 +335,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
335 /* 335 /*
336 * Only perform connection recovery on ISCSI_OP_SCSI_CMD or 336 * Only perform connection recovery on ISCSI_OP_SCSI_CMD or
337 * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call 337 * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call
338 * list_del(&cmd->i_conn_node); to release the command to the 338 * list_del_init(&cmd->i_conn_node); to release the command to the
339 * session pool and remove it from the connection's list. 339 * session pool and remove it from the connection's list.
340 * 340 *
341 * Also stop the DataOUT timer, which will be restarted after 341 * Also stop the DataOUT timer, which will be restarted after
@@ -351,7 +351,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
351 " CID: %hu\n", cmd->iscsi_opcode, 351 " CID: %hu\n", cmd->iscsi_opcode,
352 cmd->init_task_tag, cmd->cmd_sn, conn->cid); 352 cmd->init_task_tag, cmd->cmd_sn, conn->cid);
353 353
354 list_del(&cmd->i_conn_node); 354 list_del_init(&cmd->i_conn_node);
355 spin_unlock_bh(&conn->cmd_lock); 355 spin_unlock_bh(&conn->cmd_lock);
356 iscsit_free_cmd(cmd, true); 356 iscsit_free_cmd(cmd, true);
357 spin_lock_bh(&conn->cmd_lock); 357 spin_lock_bh(&conn->cmd_lock);
@@ -371,7 +371,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
371 */ 371 */
372 if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd && 372 if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
373 iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) { 373 iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
374 list_del(&cmd->i_conn_node); 374 list_del_init(&cmd->i_conn_node);
375 spin_unlock_bh(&conn->cmd_lock); 375 spin_unlock_bh(&conn->cmd_lock);
376 iscsit_free_cmd(cmd, true); 376 iscsit_free_cmd(cmd, true);
377 spin_lock_bh(&conn->cmd_lock); 377 spin_lock_bh(&conn->cmd_lock);
@@ -393,7 +393,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
393 393
394 cmd->sess = conn->sess; 394 cmd->sess = conn->sess;
395 395
396 list_del(&cmd->i_conn_node); 396 list_del_init(&cmd->i_conn_node);
397 spin_unlock_bh(&conn->cmd_lock); 397 spin_unlock_bh(&conn->cmd_lock);
398 398
399 iscsit_free_all_datain_reqs(cmd); 399 iscsit_free_all_datain_reqs(cmd);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 39761837608d..44a5471de00f 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -137,7 +137,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(
137 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { 137 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
138 138
139 spin_lock(&tpg->tpg_state_lock); 139 spin_lock(&tpg->tpg_state_lock);
140 if (tpg->tpg_state == TPG_STATE_FREE) { 140 if (tpg->tpg_state != TPG_STATE_ACTIVE) {
141 spin_unlock(&tpg->tpg_state_lock); 141 spin_unlock(&tpg->tpg_state_lock);
142 continue; 142 continue;
143 } 143 }
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 42f18fc1067b..77e6531fb0a1 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1079,25 +1079,31 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
1079 left = sectors * dev->prot_length; 1079 left = sectors * dev->prot_length;
1080 1080
1081 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { 1081 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
1082 1082 unsigned int psg_len, copied = 0;
1083 len = min(psg->length, left);
1084 if (offset >= sg->length) {
1085 sg = sg_next(sg);
1086 offset = 0;
1087 }
1088 1083
1089 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1084 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1090 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; 1085 psg_len = min(left, psg->length);
1091 1086 while (psg_len) {
1092 if (read) 1087 len = min(psg_len, sg->length - offset);
1093 memcpy(paddr, addr, len); 1088 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
1094 else 1089
1095 memcpy(addr, paddr, len); 1090 if (read)
1096 1091 memcpy(paddr + copied, addr, len);
1097 left -= len; 1092 else
1098 offset += len; 1093 memcpy(addr, paddr + copied, len);
1094
1095 left -= len;
1096 offset += len;
1097 copied += len;
1098 psg_len -= len;
1099
1100 if (offset >= sg->length) {
1101 sg = sg_next(sg);
1102 offset = 0;
1103 }
1104 kunmap_atomic(addr);
1105 }
1099 kunmap_atomic(paddr); 1106 kunmap_atomic(paddr);
1100 kunmap_atomic(addr);
1101 } 1107 }
1102} 1108}
1103 1109
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 35c066489a19..5f88d767671e 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -136,6 +136,7 @@ config SPEAR_THERMAL
136config RCAR_THERMAL 136config RCAR_THERMAL
137 tristate "Renesas R-Car thermal driver" 137 tristate "Renesas R-Car thermal driver"
138 depends on ARCH_SHMOBILE || COMPILE_TEST 138 depends on ARCH_SHMOBILE || COMPILE_TEST
139 depends on HAS_IOMEM
139 help 140 help
140 Enable this to plug the R-Car thermal sensor driver into the Linux 141 Enable this to plug the R-Car thermal sensor driver into the Linux
141 thermal framework. 142 thermal framework.
@@ -210,8 +211,16 @@ config ACPI_INT3403_THERMAL
210 tristate "ACPI INT3403 thermal driver" 211 tristate "ACPI INT3403 thermal driver"
211 depends on X86 && ACPI 212 depends on X86 && ACPI
212 help 213 help
213 This driver uses ACPI INT3403 device objects. If present, it will 214 Newer laptops and tablets that use ACPI may have thermal sensors
214 register each INT3403 thermal sensor as a thermal zone. 215 outside the core CPU/SOC for thermal safety reasons. These
216 temperature sensors are also exposed for the OS to use via the so
217 called INT3403 ACPI object. This driver will, on devices that have
218 such sensors, expose the temperature information from these sensors
219 to userspace via the normal thermal framework. This means that a wide
220 range of applications and GUI widgets can show this information to
221 the user or use this information for making decisions. For example,
222 the Intel Thermal Daemon can use this information to allow the user
223 to select his laptop to run without turning on the fans.
215 224
216menu "Texas Instruments thermal drivers" 225menu "Texas Instruments thermal drivers"
217source "drivers/thermal/ti-soc-thermal/Kconfig" 226source "drivers/thermal/ti-soc-thermal/Kconfig"
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 338a88bf6662..71b0ec0c370d 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -56,10 +56,15 @@ static LIST_HEAD(thermal_governor_list);
56static DEFINE_MUTEX(thermal_list_lock); 56static DEFINE_MUTEX(thermal_list_lock);
57static DEFINE_MUTEX(thermal_governor_lock); 57static DEFINE_MUTEX(thermal_governor_lock);
58 58
59static struct thermal_governor *def_governor;
60
59static struct thermal_governor *__find_governor(const char *name) 61static struct thermal_governor *__find_governor(const char *name)
60{ 62{
61 struct thermal_governor *pos; 63 struct thermal_governor *pos;
62 64
65 if (!name || !name[0])
66 return def_governor;
67
63 list_for_each_entry(pos, &thermal_governor_list, governor_list) 68 list_for_each_entry(pos, &thermal_governor_list, governor_list)
64 if (!strnicmp(name, pos->name, THERMAL_NAME_LENGTH)) 69 if (!strnicmp(name, pos->name, THERMAL_NAME_LENGTH))
65 return pos; 70 return pos;
@@ -82,17 +87,23 @@ int thermal_register_governor(struct thermal_governor *governor)
82 if (__find_governor(governor->name) == NULL) { 87 if (__find_governor(governor->name) == NULL) {
83 err = 0; 88 err = 0;
84 list_add(&governor->governor_list, &thermal_governor_list); 89 list_add(&governor->governor_list, &thermal_governor_list);
90 if (!def_governor && !strncmp(governor->name,
91 DEFAULT_THERMAL_GOVERNOR, THERMAL_NAME_LENGTH))
92 def_governor = governor;
85 } 93 }
86 94
87 mutex_lock(&thermal_list_lock); 95 mutex_lock(&thermal_list_lock);
88 96
89 list_for_each_entry(pos, &thermal_tz_list, node) { 97 list_for_each_entry(pos, &thermal_tz_list, node) {
98 /*
99 * only thermal zones with specified tz->tzp->governor_name
100 * may run with tz->govenor unset
101 */
90 if (pos->governor) 102 if (pos->governor)
91 continue; 103 continue;
92 if (pos->tzp) 104
93 name = pos->tzp->governor_name; 105 name = pos->tzp->governor_name;
94 else 106
95 name = DEFAULT_THERMAL_GOVERNOR;
96 if (!strnicmp(name, governor->name, THERMAL_NAME_LENGTH)) 107 if (!strnicmp(name, governor->name, THERMAL_NAME_LENGTH))
97 pos->governor = governor; 108 pos->governor = governor;
98 } 109 }
@@ -342,8 +353,8 @@ static void monitor_thermal_zone(struct thermal_zone_device *tz)
342static void handle_non_critical_trips(struct thermal_zone_device *tz, 353static void handle_non_critical_trips(struct thermal_zone_device *tz,
343 int trip, enum thermal_trip_type trip_type) 354 int trip, enum thermal_trip_type trip_type)
344{ 355{
345 if (tz->governor) 356 tz->governor ? tz->governor->throttle(tz, trip) :
346 tz->governor->throttle(tz, trip); 357 def_governor->throttle(tz, trip);
347} 358}
348 359
349static void handle_critical_trips(struct thermal_zone_device *tz, 360static void handle_critical_trips(struct thermal_zone_device *tz,
@@ -1107,7 +1118,7 @@ __thermal_cooling_device_register(struct device_node *np,
1107 INIT_LIST_HEAD(&cdev->thermal_instances); 1118 INIT_LIST_HEAD(&cdev->thermal_instances);
1108 cdev->np = np; 1119 cdev->np = np;
1109 cdev->ops = ops; 1120 cdev->ops = ops;
1110 cdev->updated = true; 1121 cdev->updated = false;
1111 cdev->device.class = &thermal_class; 1122 cdev->device.class = &thermal_class;
1112 cdev->devdata = devdata; 1123 cdev->devdata = devdata;
1113 dev_set_name(&cdev->device, "cooling_device%d", cdev->id); 1124 dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
@@ -1533,7 +1544,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
1533 if (tz->tzp) 1544 if (tz->tzp)
1534 tz->governor = __find_governor(tz->tzp->governor_name); 1545 tz->governor = __find_governor(tz->tzp->governor_name);
1535 else 1546 else
1536 tz->governor = __find_governor(DEFAULT_THERMAL_GOVERNOR); 1547 tz->governor = def_governor;
1537 1548
1538 mutex_unlock(&thermal_governor_lock); 1549 mutex_unlock(&thermal_governor_lock);
1539 1550
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index 972e1c73722a..081fd7e6a9f0 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -68,6 +68,10 @@ struct phy_dev_entry {
68 struct thermal_zone_device *tzone; 68 struct thermal_zone_device *tzone;
69}; 69};
70 70
71static const struct thermal_zone_params pkg_temp_tz_params = {
72 .no_hwmon = true,
73};
74
71/* List maintaining number of package instances */ 75/* List maintaining number of package instances */
72static LIST_HEAD(phy_dev_list); 76static LIST_HEAD(phy_dev_list);
73static DEFINE_MUTEX(phy_dev_list_mutex); 77static DEFINE_MUTEX(phy_dev_list_mutex);
@@ -394,7 +398,6 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
394 int err; 398 int err;
395 u32 tj_max; 399 u32 tj_max;
396 struct phy_dev_entry *phy_dev_entry; 400 struct phy_dev_entry *phy_dev_entry;
397 char buffer[30];
398 int thres_count; 401 int thres_count;
399 u32 eax, ebx, ecx, edx; 402 u32 eax, ebx, ecx, edx;
400 u8 *temp; 403 u8 *temp;
@@ -440,13 +443,11 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
440 phy_dev_entry->first_cpu = cpu; 443 phy_dev_entry->first_cpu = cpu;
441 phy_dev_entry->tj_max = tj_max; 444 phy_dev_entry->tj_max = tj_max;
442 phy_dev_entry->ref_cnt = 1; 445 phy_dev_entry->ref_cnt = 1;
443 snprintf(buffer, sizeof(buffer), "pkg-temp-%d\n", 446 phy_dev_entry->tzone = thermal_zone_device_register("x86_pkg_temp",
444 phy_dev_entry->phys_proc_id);
445 phy_dev_entry->tzone = thermal_zone_device_register(buffer,
446 thres_count, 447 thres_count,
447 (thres_count == MAX_NUMBER_OF_TRIPS) ? 448 (thres_count == MAX_NUMBER_OF_TRIPS) ?
448 0x03 : 0x01, 449 0x03 : 0x01,
449 phy_dev_entry, &tzone_ops, NULL, 0, 0); 450 phy_dev_entry, &tzone_ops, &pkg_temp_tz_params, 0, 0);
450 if (IS_ERR(phy_dev_entry->tzone)) { 451 if (IS_ERR(phy_dev_entry->tzone)) {
451 err = PTR_ERR(phy_dev_entry->tzone); 452 err = PTR_ERR(phy_dev_entry->tzone);
452 goto err_ret_free; 453 goto err_ret_free;
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 8d72f0c65937..062967c90b2a 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -717,6 +717,10 @@ int usb_get_configuration(struct usb_device *dev)
717 result = -ENOMEM; 717 result = -ENOMEM;
718 goto err; 718 goto err;
719 } 719 }
720
721 if (dev->quirks & USB_QUIRK_DELAY_INIT)
722 msleep(100);
723
720 result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno, 724 result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
721 bigbuffer, length); 725 bigbuffer, length);
722 if (result < 0) { 726 if (result < 0) {
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 8f37063c0a49..739ee8e8bdfd 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -47,6 +47,10 @@ static const struct usb_device_id usb_quirk_list[] = {
47 /* Microsoft LifeCam-VX700 v2.0 */ 47 /* Microsoft LifeCam-VX700 v2.0 */
48 { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, 48 { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
49 49
50 /* Logitech HD Pro Webcams C920 and C930e */
51 { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
52 { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
53
50 /* Logitech Quickcam Fusion */ 54 /* Logitech Quickcam Fusion */
51 { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME }, 55 { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
52 56
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6fe577d46fa2..924a6ccdb622 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4733,6 +4733,9 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4733 /* Accept arbitrarily long scatter-gather lists */ 4733 /* Accept arbitrarily long scatter-gather lists */
4734 hcd->self.sg_tablesize = ~0; 4734 hcd->self.sg_tablesize = ~0;
4735 4735
4736 /* support to build packet from discontinuous buffers */
4737 hcd->self.no_sg_constraint = 1;
4738
4736 /* XHCI controllers don't stop the ep queue on short packets :| */ 4739 /* XHCI controllers don't stop the ep queue on short packets :| */
4737 hcd->self.no_stop_on_short = 1; 4740 hcd->self.no_stop_on_short = 1;
4738 4741
@@ -4757,14 +4760,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4757 /* xHCI private pointer was set in xhci_pci_probe for the second 4760 /* xHCI private pointer was set in xhci_pci_probe for the second
4758 * registered roothub. 4761 * registered roothub.
4759 */ 4762 */
4760 xhci = hcd_to_xhci(hcd);
4761 /*
4762 * Support arbitrarily aligned sg-list entries on hosts without
4763 * TD fragment rules (which are currently unsupported).
4764 */
4765 if (xhci->hci_version < 0x100)
4766 hcd->self.no_sg_constraint = 1;
4767
4768 return 0; 4763 return 0;
4769 } 4764 }
4770 4765
@@ -4793,9 +4788,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4793 if (xhci->hci_version > 0x96) 4788 if (xhci->hci_version > 0x96)
4794 xhci->quirks |= XHCI_SPURIOUS_SUCCESS; 4789 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
4795 4790
4796 if (xhci->hci_version < 0x100)
4797 hcd->self.no_sg_constraint = 1;
4798
4799 /* Make sure the HC is halted. */ 4791 /* Make sure the HC is halted. */
4800 retval = xhci_halt(xhci); 4792 retval = xhci_halt(xhci);
4801 if (retval) 4793 if (retval)
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 0129b78a6908..4f70f383132c 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -458,11 +458,10 @@ static int bio_integrity_verify(struct bio *bio)
458 struct blk_integrity_exchg bix; 458 struct blk_integrity_exchg bix;
459 struct bio_vec *bv; 459 struct bio_vec *bv;
460 sector_t sector = bio->bi_integrity->bip_iter.bi_sector; 460 sector_t sector = bio->bi_integrity->bip_iter.bi_sector;
461 unsigned int sectors, total, ret; 461 unsigned int sectors, ret = 0;
462 void *prot_buf = bio->bi_integrity->bip_buf; 462 void *prot_buf = bio->bi_integrity->bip_buf;
463 int i; 463 int i;
464 464
465 ret = total = 0;
466 bix.disk_name = bio->bi_bdev->bd_disk->disk_name; 465 bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
467 bix.sector_size = bi->sector_size; 466 bix.sector_size = bi->sector_size;
468 467
@@ -484,8 +483,6 @@ static int bio_integrity_verify(struct bio *bio)
484 sectors = bv->bv_len / bi->sector_size; 483 sectors = bv->bv_len / bi->sector_size;
485 sector += sectors; 484 sector += sectors;
486 prot_buf += sectors * bi->tuple_size; 485 prot_buf += sectors * bi->tuple_size;
487 total += sectors * bi->tuple_size;
488 BUG_ON(total > bio->bi_integrity->bip_iter.bi_size);
489 486
490 kunmap_atomic(kaddr); 487 kunmap_atomic(kaddr);
491 } 488 }
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index cf32f0393369..c0f3718b77a8 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -513,7 +513,7 @@ struct cifs_mnt_data {
513static inline unsigned int 513static inline unsigned int
514get_rfc1002_length(void *buf) 514get_rfc1002_length(void *buf)
515{ 515{
516 return be32_to_cpu(*((__be32 *)buf)); 516 return be32_to_cpu(*((__be32 *)buf)) & 0xffffff;
517} 517}
518 518
519static inline void 519static inline void
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 53c15074bb36..834fce759d80 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2579,31 +2579,19 @@ cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2579 struct cifsInodeInfo *cinode = CIFS_I(inode); 2579 struct cifsInodeInfo *cinode = CIFS_I(inode);
2580 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; 2580 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2581 ssize_t rc = -EACCES; 2581 ssize_t rc = -EACCES;
2582 loff_t lock_pos = pos;
2582 2583
2583 BUG_ON(iocb->ki_pos != pos); 2584 if (file->f_flags & O_APPEND)
2584 2585 lock_pos = i_size_read(inode);
2585 /* 2586 /*
2586 * We need to hold the sem to be sure nobody modifies lock list 2587 * We need to hold the sem to be sure nobody modifies lock list
2587 * with a brlock that prevents writing. 2588 * with a brlock that prevents writing.
2588 */ 2589 */
2589 down_read(&cinode->lock_sem); 2590 down_read(&cinode->lock_sem);
2590 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs), 2591 if (!cifs_find_lock_conflict(cfile, lock_pos, iov_length(iov, nr_segs),
2591 server->vals->exclusive_lock_type, NULL, 2592 server->vals->exclusive_lock_type, NULL,
2592 CIFS_WRITE_OP)) { 2593 CIFS_WRITE_OP))
2593 mutex_lock(&inode->i_mutex); 2594 rc = generic_file_aio_write(iocb, iov, nr_segs, pos);
2594 rc = __generic_file_aio_write(iocb, iov, nr_segs,
2595 &iocb->ki_pos);
2596 mutex_unlock(&inode->i_mutex);
2597 }
2598
2599 if (rc > 0) {
2600 ssize_t err;
2601
2602 err = generic_write_sync(file, iocb->ki_pos - rc, rc);
2603 if (err < 0)
2604 rc = err;
2605 }
2606
2607 up_read(&cinode->lock_sem); 2595 up_read(&cinode->lock_sem);
2608 return rc; 2596 return rc;
2609} 2597}
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index b37570952846..18cd5650a5fc 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -270,6 +270,26 @@ cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx,
270 iov->iov_len = rqst->rq_pagesz; 270 iov->iov_len = rqst->rq_pagesz;
271} 271}
272 272
273static unsigned long
274rqst_len(struct smb_rqst *rqst)
275{
276 unsigned int i;
277 struct kvec *iov = rqst->rq_iov;
278 unsigned long buflen = 0;
279
280 /* total up iov array first */
281 for (i = 0; i < rqst->rq_nvec; i++)
282 buflen += iov[i].iov_len;
283
284 /* add in the page array if there is one */
285 if (rqst->rq_npages) {
286 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1);
287 buflen += rqst->rq_tailsz;
288 }
289
290 return buflen;
291}
292
273static int 293static int
274smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) 294smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
275{ 295{
@@ -277,6 +297,7 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
277 struct kvec *iov = rqst->rq_iov; 297 struct kvec *iov = rqst->rq_iov;
278 int n_vec = rqst->rq_nvec; 298 int n_vec = rqst->rq_nvec;
279 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base); 299 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
300 unsigned long send_length;
280 unsigned int i; 301 unsigned int i;
281 size_t total_len = 0, sent; 302 size_t total_len = 0, sent;
282 struct socket *ssocket = server->ssocket; 303 struct socket *ssocket = server->ssocket;
@@ -285,6 +306,14 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
285 if (ssocket == NULL) 306 if (ssocket == NULL)
286 return -ENOTSOCK; 307 return -ENOTSOCK;
287 308
309 /* sanity check send length */
310 send_length = rqst_len(rqst);
311 if (send_length != smb_buf_length + 4) {
312 WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
313 send_length, smb_buf_length);
314 return -EIO;
315 }
316
288 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length); 317 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
289 dump_smb(iov[0].iov_base, iov[0].iov_len); 318 dump_smb(iov[0].iov_base, iov[0].iov_len);
290 319
diff --git a/fs/file.c b/fs/file.c
index db25c2bdfe46..60a45e9f5323 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -683,35 +683,65 @@ EXPORT_SYMBOL(fget_raw);
683 * The fput_needed flag returned by fget_light should be passed to the 683 * The fput_needed flag returned by fget_light should be passed to the
684 * corresponding fput_light. 684 * corresponding fput_light.
685 */ 685 */
686struct file *__fget_light(unsigned int fd, fmode_t mask, int *fput_needed) 686static unsigned long __fget_light(unsigned int fd, fmode_t mask)
687{ 687{
688 struct files_struct *files = current->files; 688 struct files_struct *files = current->files;
689 struct file *file; 689 struct file *file;
690 690
691 *fput_needed = 0;
692 if (atomic_read(&files->count) == 1) { 691 if (atomic_read(&files->count) == 1) {
693 file = __fcheck_files(files, fd); 692 file = __fcheck_files(files, fd);
694 if (file && (file->f_mode & mask)) 693 if (!file || unlikely(file->f_mode & mask))
695 file = NULL; 694 return 0;
695 return (unsigned long)file;
696 } else { 696 } else {
697 file = __fget(fd, mask); 697 file = __fget(fd, mask);
698 if (file) 698 if (!file)
699 *fput_needed = 1; 699 return 0;
700 return FDPUT_FPUT | (unsigned long)file;
700 } 701 }
701
702 return file;
703} 702}
704struct file *fget_light(unsigned int fd, int *fput_needed) 703unsigned long __fdget(unsigned int fd)
705{ 704{
706 return __fget_light(fd, FMODE_PATH, fput_needed); 705 return __fget_light(fd, FMODE_PATH);
707} 706}
708EXPORT_SYMBOL(fget_light); 707EXPORT_SYMBOL(__fdget);
709 708
710struct file *fget_raw_light(unsigned int fd, int *fput_needed) 709unsigned long __fdget_raw(unsigned int fd)
711{ 710{
712 return __fget_light(fd, 0, fput_needed); 711 return __fget_light(fd, 0);
712}
713
714unsigned long __fdget_pos(unsigned int fd)
715{
716 struct files_struct *files = current->files;
717 struct file *file;
718 unsigned long v;
719
720 if (atomic_read(&files->count) == 1) {
721 file = __fcheck_files(files, fd);
722 v = 0;
723 } else {
724 file = __fget(fd, 0);
725 v = FDPUT_FPUT;
726 }
727 if (!file)
728 return 0;
729
730 if (file->f_mode & FMODE_ATOMIC_POS) {
731 if (file_count(file) > 1) {
732 v |= FDPUT_POS_UNLOCK;
733 mutex_lock(&file->f_pos_lock);
734 }
735 }
736 return v | (unsigned long)file;
713} 737}
714 738
739/*
740 * We only lock f_pos if we have threads or if the file might be
741 * shared with another process. In both cases we'll have an elevated
742 * file count (done either by fdget() or by fork()).
743 */
744
715void set_close_on_exec(unsigned int fd, int flag) 745void set_close_on_exec(unsigned int fd, int flag)
716{ 746{
717 struct files_struct *files = current->files; 747 struct files_struct *files = current->files;
diff --git a/fs/file_table.c b/fs/file_table.c
index 5fff9030be34..5b24008ea4f6 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -135,6 +135,7 @@ struct file *get_empty_filp(void)
135 atomic_long_set(&f->f_count, 1); 135 atomic_long_set(&f->f_count, 1);
136 rwlock_init(&f->f_owner.lock); 136 rwlock_init(&f->f_owner.lock);
137 spin_lock_init(&f->f_lock); 137 spin_lock_init(&f->f_lock);
138 mutex_init(&f->f_pos_lock);
138 eventpoll_init_file(f); 139 eventpoll_init_file(f);
139 /* f->f_version: 0 */ 140 /* f->f_version: 0 */
140 return f; 141 return f;
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index 968ce411db53..32602c667b4a 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -103,6 +103,8 @@ static int hfsplus_cat_build_record(hfsplus_cat_entry *entry,
103 folder = &entry->folder; 103 folder = &entry->folder;
104 memset(folder, 0, sizeof(*folder)); 104 memset(folder, 0, sizeof(*folder));
105 folder->type = cpu_to_be16(HFSPLUS_FOLDER); 105 folder->type = cpu_to_be16(HFSPLUS_FOLDER);
106 if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags))
107 folder->flags |= cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT);
106 folder->id = cpu_to_be32(inode->i_ino); 108 folder->id = cpu_to_be32(inode->i_ino);
107 HFSPLUS_I(inode)->create_date = 109 HFSPLUS_I(inode)->create_date =
108 folder->create_date = 110 folder->create_date =
@@ -203,6 +205,36 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
203 return hfs_brec_find(fd, hfs_find_rec_by_key); 205 return hfs_brec_find(fd, hfs_find_rec_by_key);
204} 206}
205 207
208static void hfsplus_subfolders_inc(struct inode *dir)
209{
210 struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
211
212 if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) {
213 /*
214 * Increment subfolder count. Note, the value is only meaningful
215 * for folders with HFSPLUS_HAS_FOLDER_COUNT flag set.
216 */
217 HFSPLUS_I(dir)->subfolders++;
218 }
219}
220
221static void hfsplus_subfolders_dec(struct inode *dir)
222{
223 struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
224
225 if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) {
226 /*
227 * Decrement subfolder count. Note, the value is only meaningful
228 * for folders with HFSPLUS_HAS_FOLDER_COUNT flag set.
229 *
230 * Check for zero. Some subfolders may have been created
231 * by an implementation ignorant of this counter.
232 */
233 if (HFSPLUS_I(dir)->subfolders)
234 HFSPLUS_I(dir)->subfolders--;
235 }
236}
237
206int hfsplus_create_cat(u32 cnid, struct inode *dir, 238int hfsplus_create_cat(u32 cnid, struct inode *dir,
207 struct qstr *str, struct inode *inode) 239 struct qstr *str, struct inode *inode)
208{ 240{
@@ -247,6 +279,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
247 goto err1; 279 goto err1;
248 280
249 dir->i_size++; 281 dir->i_size++;
282 if (S_ISDIR(inode->i_mode))
283 hfsplus_subfolders_inc(dir);
250 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; 284 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
251 hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY); 285 hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
252 286
@@ -336,6 +370,8 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
336 goto out; 370 goto out;
337 371
338 dir->i_size--; 372 dir->i_size--;
373 if (type == HFSPLUS_FOLDER)
374 hfsplus_subfolders_dec(dir);
339 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; 375 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
340 hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY); 376 hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
341 377
@@ -380,6 +416,7 @@ int hfsplus_rename_cat(u32 cnid,
380 416
381 hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset, 417 hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset,
382 src_fd.entrylength); 418 src_fd.entrylength);
419 type = be16_to_cpu(entry.type);
383 420
384 /* create new dir entry with the data from the old entry */ 421 /* create new dir entry with the data from the old entry */
385 hfsplus_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name); 422 hfsplus_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name);
@@ -394,6 +431,8 @@ int hfsplus_rename_cat(u32 cnid,
394 if (err) 431 if (err)
395 goto out; 432 goto out;
396 dst_dir->i_size++; 433 dst_dir->i_size++;
434 if (type == HFSPLUS_FOLDER)
435 hfsplus_subfolders_inc(dst_dir);
397 dst_dir->i_mtime = dst_dir->i_ctime = CURRENT_TIME_SEC; 436 dst_dir->i_mtime = dst_dir->i_ctime = CURRENT_TIME_SEC;
398 437
399 /* finally remove the old entry */ 438 /* finally remove the old entry */
@@ -405,6 +444,8 @@ int hfsplus_rename_cat(u32 cnid,
405 if (err) 444 if (err)
406 goto out; 445 goto out;
407 src_dir->i_size--; 446 src_dir->i_size--;
447 if (type == HFSPLUS_FOLDER)
448 hfsplus_subfolders_dec(src_dir);
408 src_dir->i_mtime = src_dir->i_ctime = CURRENT_TIME_SEC; 449 src_dir->i_mtime = src_dir->i_ctime = CURRENT_TIME_SEC;
409 450
410 /* remove old thread entry */ 451 /* remove old thread entry */
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 08846425b67f..62d571eb69ba 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -242,6 +242,7 @@ struct hfsplus_inode_info {
242 */ 242 */
243 sector_t fs_blocks; 243 sector_t fs_blocks;
244 u8 userflags; /* BSD user file flags */ 244 u8 userflags; /* BSD user file flags */
245 u32 subfolders; /* Subfolder count (HFSX only) */
245 struct list_head open_dir_list; 246 struct list_head open_dir_list;
246 loff_t phys_size; 247 loff_t phys_size;
247 248
diff --git a/fs/hfsplus/hfsplus_raw.h b/fs/hfsplus/hfsplus_raw.h
index 8ffb3a8ffe75..5a126828d85e 100644
--- a/fs/hfsplus/hfsplus_raw.h
+++ b/fs/hfsplus/hfsplus_raw.h
@@ -261,7 +261,7 @@ struct hfsplus_cat_folder {
261 struct DInfo user_info; 261 struct DInfo user_info;
262 struct DXInfo finder_info; 262 struct DXInfo finder_info;
263 __be32 text_encoding; 263 __be32 text_encoding;
264 u32 reserved; 264 __be32 subfolders; /* Subfolder count in HFSX. Reserved in HFS+. */
265} __packed; 265} __packed;
266 266
267/* HFS file info (stolen from hfs.h) */ 267/* HFS file info (stolen from hfs.h) */
@@ -301,11 +301,13 @@ struct hfsplus_cat_file {
301 struct hfsplus_fork_raw rsrc_fork; 301 struct hfsplus_fork_raw rsrc_fork;
302} __packed; 302} __packed;
303 303
304/* File attribute bits */ 304/* File and folder flag bits */
305#define HFSPLUS_FILE_LOCKED 0x0001 305#define HFSPLUS_FILE_LOCKED 0x0001
306#define HFSPLUS_FILE_THREAD_EXISTS 0x0002 306#define HFSPLUS_FILE_THREAD_EXISTS 0x0002
307#define HFSPLUS_XATTR_EXISTS 0x0004 307#define HFSPLUS_XATTR_EXISTS 0x0004
308#define HFSPLUS_ACL_EXISTS 0x0008 308#define HFSPLUS_ACL_EXISTS 0x0008
309#define HFSPLUS_HAS_FOLDER_COUNT 0x0010 /* Folder has subfolder count
310 * (HFSX only) */
309 311
310/* HFS+ catalog thread (part of a cat_entry) */ 312/* HFS+ catalog thread (part of a cat_entry) */
311struct hfsplus_cat_thread { 313struct hfsplus_cat_thread {
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index fa929f325f87..a4f45bd88a63 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -375,6 +375,7 @@ struct inode *hfsplus_new_inode(struct super_block *sb, umode_t mode)
375 hip->extent_state = 0; 375 hip->extent_state = 0;
376 hip->flags = 0; 376 hip->flags = 0;
377 hip->userflags = 0; 377 hip->userflags = 0;
378 hip->subfolders = 0;
378 memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec)); 379 memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec));
379 memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec)); 380 memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
380 hip->alloc_blocks = 0; 381 hip->alloc_blocks = 0;
@@ -494,6 +495,10 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
494 inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date); 495 inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
495 HFSPLUS_I(inode)->create_date = folder->create_date; 496 HFSPLUS_I(inode)->create_date = folder->create_date;
496 HFSPLUS_I(inode)->fs_blocks = 0; 497 HFSPLUS_I(inode)->fs_blocks = 0;
498 if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
499 HFSPLUS_I(inode)->subfolders =
500 be32_to_cpu(folder->subfolders);
501 }
497 inode->i_op = &hfsplus_dir_inode_operations; 502 inode->i_op = &hfsplus_dir_inode_operations;
498 inode->i_fop = &hfsplus_dir_operations; 503 inode->i_fop = &hfsplus_dir_operations;
499 } else if (type == HFSPLUS_FILE) { 504 } else if (type == HFSPLUS_FILE) {
@@ -566,6 +571,10 @@ int hfsplus_cat_write_inode(struct inode *inode)
566 folder->content_mod_date = hfsp_ut2mt(inode->i_mtime); 571 folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
567 folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime); 572 folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
568 folder->valence = cpu_to_be32(inode->i_size - 2); 573 folder->valence = cpu_to_be32(inode->i_size - 2);
574 if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
575 folder->subfolders =
576 cpu_to_be32(HFSPLUS_I(inode)->subfolders);
577 }
569 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset, 578 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
570 sizeof(struct hfsplus_cat_folder)); 579 sizeof(struct hfsplus_cat_folder));
571 } else if (HFSPLUS_IS_RSRC(inode)) { 580 } else if (HFSPLUS_IS_RSRC(inode)) {
diff --git a/fs/namei.c b/fs/namei.c
index 385f7817bfcc..2f730ef9b4b3 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1884,7 +1884,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
1884 1884
1885 nd->path = f.file->f_path; 1885 nd->path = f.file->f_path;
1886 if (flags & LOOKUP_RCU) { 1886 if (flags & LOOKUP_RCU) {
1887 if (f.need_put) 1887 if (f.flags & FDPUT_FPUT)
1888 *fp = f.file; 1888 *fp = f.file;
1889 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); 1889 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
1890 rcu_read_lock(); 1890 rcu_read_lock();
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index ef792f29f831..5d8ccecf5f5c 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -659,16 +659,19 @@ int nfs_async_inode_return_delegation(struct inode *inode,
659 659
660 rcu_read_lock(); 660 rcu_read_lock();
661 delegation = rcu_dereference(NFS_I(inode)->delegation); 661 delegation = rcu_dereference(NFS_I(inode)->delegation);
662 if (delegation == NULL)
663 goto out_enoent;
662 664
663 if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid)) { 665 if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid))
664 rcu_read_unlock(); 666 goto out_enoent;
665 return -ENOENT;
666 }
667 nfs_mark_return_delegation(server, delegation); 667 nfs_mark_return_delegation(server, delegation);
668 rcu_read_unlock(); 668 rcu_read_unlock();
669 669
670 nfs_delegation_run_state_manager(clp); 670 nfs_delegation_run_state_manager(clp);
671 return 0; 671 return 0;
672out_enoent:
673 rcu_read_unlock();
674 return -ENOENT;
672} 675}
673 676
674static struct inode * 677static struct inode *
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 12c8132ad408..b9a35c05b60f 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -324,8 +324,9 @@ static void filelayout_read_prepare(struct rpc_task *task, void *data)
324 &rdata->res.seq_res, 324 &rdata->res.seq_res,
325 task)) 325 task))
326 return; 326 return;
327 nfs4_set_rw_stateid(&rdata->args.stateid, rdata->args.context, 327 if (nfs4_set_rw_stateid(&rdata->args.stateid, rdata->args.context,
328 rdata->args.lock_context, FMODE_READ); 328 rdata->args.lock_context, FMODE_READ) == -EIO)
329 rpc_exit(task, -EIO); /* lost lock, terminate I/O */
329} 330}
330 331
331static void filelayout_read_call_done(struct rpc_task *task, void *data) 332static void filelayout_read_call_done(struct rpc_task *task, void *data)
@@ -435,8 +436,9 @@ static void filelayout_write_prepare(struct rpc_task *task, void *data)
435 &wdata->res.seq_res, 436 &wdata->res.seq_res,
436 task)) 437 task))
437 return; 438 return;
438 nfs4_set_rw_stateid(&wdata->args.stateid, wdata->args.context, 439 if (nfs4_set_rw_stateid(&wdata->args.stateid, wdata->args.context,
439 wdata->args.lock_context, FMODE_WRITE); 440 wdata->args.lock_context, FMODE_WRITE) == -EIO)
441 rpc_exit(task, -EIO); /* lost lock, terminate I/O */
440} 442}
441 443
442static void filelayout_write_call_done(struct rpc_task *task, void *data) 444static void filelayout_write_call_done(struct rpc_task *task, void *data)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 2da6a698b8f7..450bfedbe2f4 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2398,13 +2398,16 @@ static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2398 2398
2399 if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) { 2399 if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) {
2400 /* Use that stateid */ 2400 /* Use that stateid */
2401 } else if (truncate && state != NULL && nfs4_valid_open_stateid(state)) { 2401 } else if (truncate && state != NULL) {
2402 struct nfs_lockowner lockowner = { 2402 struct nfs_lockowner lockowner = {
2403 .l_owner = current->files, 2403 .l_owner = current->files,
2404 .l_pid = current->tgid, 2404 .l_pid = current->tgid,
2405 }; 2405 };
2406 nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE, 2406 if (!nfs4_valid_open_stateid(state))
2407 &lockowner); 2407 return -EBADF;
2408 if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
2409 &lockowner) == -EIO)
2410 return -EBADF;
2408 } else 2411 } else
2409 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 2412 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
2410 2413
@@ -4011,8 +4014,9 @@ static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
4011{ 4014{
4012 nfs4_stateid current_stateid; 4015 nfs4_stateid current_stateid;
4013 4016
4014 if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode)) 4017 /* If the current stateid represents a lost lock, then exit */
4015 return false; 4018 if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO)
4019 return true;
4016 return nfs4_stateid_match(stateid, &current_stateid); 4020 return nfs4_stateid_match(stateid, &current_stateid);
4017} 4021}
4018 4022
@@ -5828,8 +5832,7 @@ struct nfs_release_lockowner_data {
5828 struct nfs4_lock_state *lsp; 5832 struct nfs4_lock_state *lsp;
5829 struct nfs_server *server; 5833 struct nfs_server *server;
5830 struct nfs_release_lockowner_args args; 5834 struct nfs_release_lockowner_args args;
5831 struct nfs4_sequence_args seq_args; 5835 struct nfs_release_lockowner_res res;
5832 struct nfs4_sequence_res seq_res;
5833 unsigned long timestamp; 5836 unsigned long timestamp;
5834}; 5837};
5835 5838
@@ -5837,7 +5840,7 @@ static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata
5837{ 5840{
5838 struct nfs_release_lockowner_data *data = calldata; 5841 struct nfs_release_lockowner_data *data = calldata;
5839 nfs40_setup_sequence(data->server, 5842 nfs40_setup_sequence(data->server,
5840 &data->seq_args, &data->seq_res, task); 5843 &data->args.seq_args, &data->res.seq_res, task);
5841 data->timestamp = jiffies; 5844 data->timestamp = jiffies;
5842} 5845}
5843 5846
@@ -5846,7 +5849,7 @@ static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
5846 struct nfs_release_lockowner_data *data = calldata; 5849 struct nfs_release_lockowner_data *data = calldata;
5847 struct nfs_server *server = data->server; 5850 struct nfs_server *server = data->server;
5848 5851
5849 nfs40_sequence_done(task, &data->seq_res); 5852 nfs40_sequence_done(task, &data->res.seq_res);
5850 5853
5851 switch (task->tk_status) { 5854 switch (task->tk_status) {
5852 case 0: 5855 case 0:
@@ -5887,7 +5890,6 @@ static int nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_st
5887 data = kmalloc(sizeof(*data), GFP_NOFS); 5890 data = kmalloc(sizeof(*data), GFP_NOFS);
5888 if (!data) 5891 if (!data)
5889 return -ENOMEM; 5892 return -ENOMEM;
5890 nfs4_init_sequence(&data->seq_args, &data->seq_res, 0);
5891 data->lsp = lsp; 5893 data->lsp = lsp;
5892 data->server = server; 5894 data->server = server;
5893 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 5895 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
@@ -5895,6 +5897,8 @@ static int nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_st
5895 data->args.lock_owner.s_dev = server->s_dev; 5897 data->args.lock_owner.s_dev = server->s_dev;
5896 5898
5897 msg.rpc_argp = &data->args; 5899 msg.rpc_argp = &data->args;
5900 msg.rpc_resp = &data->res;
5901 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
5898 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 5902 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
5899 return 0; 5903 return 0;
5900} 5904}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index e1a47217c05e..0deb32105ccf 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -974,9 +974,6 @@ static int nfs4_copy_lock_stateid(nfs4_stateid *dst,
974 else if (lsp != NULL && test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) { 974 else if (lsp != NULL && test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) {
975 nfs4_stateid_copy(dst, &lsp->ls_stateid); 975 nfs4_stateid_copy(dst, &lsp->ls_stateid);
976 ret = 0; 976 ret = 0;
977 smp_rmb();
978 if (!list_empty(&lsp->ls_seqid.list))
979 ret = -EWOULDBLOCK;
980 } 977 }
981 spin_unlock(&state->state_lock); 978 spin_unlock(&state->state_lock);
982 nfs4_put_lock_state(lsp); 979 nfs4_put_lock_state(lsp);
@@ -984,10 +981,9 @@ out:
984 return ret; 981 return ret;
985} 982}
986 983
987static int nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state) 984static void nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
988{ 985{
989 const nfs4_stateid *src; 986 const nfs4_stateid *src;
990 int ret;
991 int seq; 987 int seq;
992 988
993 do { 989 do {
@@ -996,12 +992,7 @@ static int nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
996 if (test_bit(NFS_OPEN_STATE, &state->flags)) 992 if (test_bit(NFS_OPEN_STATE, &state->flags))
997 src = &state->open_stateid; 993 src = &state->open_stateid;
998 nfs4_stateid_copy(dst, src); 994 nfs4_stateid_copy(dst, src);
999 ret = 0;
1000 smp_rmb();
1001 if (!list_empty(&state->owner->so_seqid.list))
1002 ret = -EWOULDBLOCK;
1003 } while (read_seqretry(&state->seqlock, seq)); 995 } while (read_seqretry(&state->seqlock, seq));
1004 return ret;
1005} 996}
1006 997
1007/* 998/*
@@ -1026,7 +1017,8 @@ int nfs4_select_rw_stateid(nfs4_stateid *dst, struct nfs4_state *state,
1026 * choose to use. 1017 * choose to use.
1027 */ 1018 */
1028 goto out; 1019 goto out;
1029 ret = nfs4_copy_open_stateid(dst, state); 1020 nfs4_copy_open_stateid(dst, state);
1021 ret = 0;
1030out: 1022out:
1031 if (nfs_server_capable(state->inode, NFS_CAP_STATEID_NFSV41)) 1023 if (nfs_server_capable(state->inode, NFS_CAP_STATEID_NFSV41))
1032 dst->seqid = 0; 1024 dst->seqid = 0;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 8450262bcf2a..51632c40e896 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2393,8 +2393,8 @@ out_dio:
2393 2393
2394 if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || 2394 if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
2395 ((file->f_flags & O_DIRECT) && !direct_io)) { 2395 ((file->f_flags & O_DIRECT) && !direct_io)) {
2396 ret = filemap_fdatawrite_range(file->f_mapping, pos, 2396 ret = filemap_fdatawrite_range(file->f_mapping, *ppos,
2397 pos + count - 1); 2397 *ppos + count - 1);
2398 if (ret < 0) 2398 if (ret < 0)
2399 written = ret; 2399 written = ret;
2400 2400
@@ -2407,8 +2407,8 @@ out_dio:
2407 } 2407 }
2408 2408
2409 if (!ret) 2409 if (!ret)
2410 ret = filemap_fdatawait_range(file->f_mapping, pos, 2410 ret = filemap_fdatawait_range(file->f_mapping, *ppos,
2411 pos + count - 1); 2411 *ppos + count - 1);
2412 } 2412 }
2413 2413
2414 /* 2414 /*
diff --git a/fs/open.c b/fs/open.c
index 4b3e1edf2fe4..b9ed8b25c108 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -705,6 +705,10 @@ static int do_dentry_open(struct file *f,
705 return 0; 705 return 0;
706 } 706 }
707 707
708 /* POSIX.1-2008/SUSv4 Section XSI 2.9.7 */
709 if (S_ISREG(inode->i_mode))
710 f->f_mode |= FMODE_ATOMIC_POS;
711
708 f->f_op = fops_get(inode->i_fop); 712 f->f_op = fops_get(inode->i_fop);
709 if (unlikely(WARN_ON(!f->f_op))) { 713 if (unlikely(WARN_ON(!f->f_op))) {
710 error = -ENODEV; 714 error = -ENODEV;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 51507065263b..b9760628e1fd 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1824,6 +1824,7 @@ static int proc_map_files_get_link(struct dentry *dentry, struct path *path)
1824 if (rc) 1824 if (rc)
1825 goto out_mmput; 1825 goto out_mmput;
1826 1826
1827 rc = -ENOENT;
1827 down_read(&mm->mmap_sem); 1828 down_read(&mm->mmap_sem);
1828 vma = find_exact_vma(mm, vm_start, vm_end); 1829 vma = find_exact_vma(mm, vm_start, vm_end);
1829 if (vma && vma->vm_file) { 1830 if (vma && vma->vm_file) {
diff --git a/fs/read_write.c b/fs/read_write.c
index edc5746a902a..54e19b9392dc 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -264,10 +264,22 @@ loff_t vfs_llseek(struct file *file, loff_t offset, int whence)
264} 264}
265EXPORT_SYMBOL(vfs_llseek); 265EXPORT_SYMBOL(vfs_llseek);
266 266
267static inline struct fd fdget_pos(int fd)
268{
269 return __to_fd(__fdget_pos(fd));
270}
271
272static inline void fdput_pos(struct fd f)
273{
274 if (f.flags & FDPUT_POS_UNLOCK)
275 mutex_unlock(&f.file->f_pos_lock);
276 fdput(f);
277}
278
267SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence) 279SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence)
268{ 280{
269 off_t retval; 281 off_t retval;
270 struct fd f = fdget(fd); 282 struct fd f = fdget_pos(fd);
271 if (!f.file) 283 if (!f.file)
272 return -EBADF; 284 return -EBADF;
273 285
@@ -278,7 +290,7 @@ SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence)
278 if (res != (loff_t)retval) 290 if (res != (loff_t)retval)
279 retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */ 291 retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */
280 } 292 }
281 fdput(f); 293 fdput_pos(f);
282 return retval; 294 return retval;
283} 295}
284 296
@@ -498,7 +510,7 @@ static inline void file_pos_write(struct file *file, loff_t pos)
498 510
499SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count) 511SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
500{ 512{
501 struct fd f = fdget(fd); 513 struct fd f = fdget_pos(fd);
502 ssize_t ret = -EBADF; 514 ssize_t ret = -EBADF;
503 515
504 if (f.file) { 516 if (f.file) {
@@ -506,7 +518,7 @@ SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
506 ret = vfs_read(f.file, buf, count, &pos); 518 ret = vfs_read(f.file, buf, count, &pos);
507 if (ret >= 0) 519 if (ret >= 0)
508 file_pos_write(f.file, pos); 520 file_pos_write(f.file, pos);
509 fdput(f); 521 fdput_pos(f);
510 } 522 }
511 return ret; 523 return ret;
512} 524}
@@ -514,7 +526,7 @@ SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
514SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf, 526SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
515 size_t, count) 527 size_t, count)
516{ 528{
517 struct fd f = fdget(fd); 529 struct fd f = fdget_pos(fd);
518 ssize_t ret = -EBADF; 530 ssize_t ret = -EBADF;
519 531
520 if (f.file) { 532 if (f.file) {
@@ -522,7 +534,7 @@ SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
522 ret = vfs_write(f.file, buf, count, &pos); 534 ret = vfs_write(f.file, buf, count, &pos);
523 if (ret >= 0) 535 if (ret >= 0)
524 file_pos_write(f.file, pos); 536 file_pos_write(f.file, pos);
525 fdput(f); 537 fdput_pos(f);
526 } 538 }
527 539
528 return ret; 540 return ret;
@@ -797,7 +809,7 @@ EXPORT_SYMBOL(vfs_writev);
797SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec, 809SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
798 unsigned long, vlen) 810 unsigned long, vlen)
799{ 811{
800 struct fd f = fdget(fd); 812 struct fd f = fdget_pos(fd);
801 ssize_t ret = -EBADF; 813 ssize_t ret = -EBADF;
802 814
803 if (f.file) { 815 if (f.file) {
@@ -805,7 +817,7 @@ SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
805 ret = vfs_readv(f.file, vec, vlen, &pos); 817 ret = vfs_readv(f.file, vec, vlen, &pos);
806 if (ret >= 0) 818 if (ret >= 0)
807 file_pos_write(f.file, pos); 819 file_pos_write(f.file, pos);
808 fdput(f); 820 fdput_pos(f);
809 } 821 }
810 822
811 if (ret > 0) 823 if (ret > 0)
@@ -817,7 +829,7 @@ SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
817SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec, 829SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
818 unsigned long, vlen) 830 unsigned long, vlen)
819{ 831{
820 struct fd f = fdget(fd); 832 struct fd f = fdget_pos(fd);
821 ssize_t ret = -EBADF; 833 ssize_t ret = -EBADF;
822 834
823 if (f.file) { 835 if (f.file) {
@@ -825,7 +837,7 @@ SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
825 ret = vfs_writev(f.file, vec, vlen, &pos); 837 ret = vfs_writev(f.file, vec, vlen, &pos);
826 if (ret >= 0) 838 if (ret >= 0)
827 file_pos_write(f.file, pos); 839 file_pos_write(f.file, pos);
828 fdput(f); 840 fdput_pos(f);
829 } 841 }
830 842
831 if (ret > 0) 843 if (ret > 0)
@@ -968,7 +980,7 @@ COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd,
968 const struct compat_iovec __user *,vec, 980 const struct compat_iovec __user *,vec,
969 compat_ulong_t, vlen) 981 compat_ulong_t, vlen)
970{ 982{
971 struct fd f = fdget(fd); 983 struct fd f = fdget_pos(fd);
972 ssize_t ret; 984 ssize_t ret;
973 loff_t pos; 985 loff_t pos;
974 986
@@ -978,7 +990,7 @@ COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd,
978 ret = compat_readv(f.file, vec, vlen, &pos); 990 ret = compat_readv(f.file, vec, vlen, &pos);
979 if (ret >= 0) 991 if (ret >= 0)
980 f.file->f_pos = pos; 992 f.file->f_pos = pos;
981 fdput(f); 993 fdput_pos(f);
982 return ret; 994 return ret;
983} 995}
984 996
@@ -1035,7 +1047,7 @@ COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd,
1035 const struct compat_iovec __user *, vec, 1047 const struct compat_iovec __user *, vec,
1036 compat_ulong_t, vlen) 1048 compat_ulong_t, vlen)
1037{ 1049{
1038 struct fd f = fdget(fd); 1050 struct fd f = fdget_pos(fd);
1039 ssize_t ret; 1051 ssize_t ret;
1040 loff_t pos; 1052 loff_t pos;
1041 1053
@@ -1045,7 +1057,7 @@ COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd,
1045 ret = compat_writev(f.file, vec, vlen, &pos); 1057 ret = compat_writev(f.file, vec, vlen, &pos);
1046 if (ret >= 0) 1058 if (ret >= 0)
1047 f.file->f_pos = pos; 1059 f.file->f_pos = pos;
1048 fdput(f); 1060 fdput_pos(f);
1049 return ret; 1061 return ret;
1050} 1062}
1051 1063
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index be85127bfed3..f27000f55a83 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -171,6 +171,11 @@ static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 add
171 return 0; 171 return 0;
172} 172}
173 173
174static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
175{
176 return -ENXIO;
177}
178
174static inline int kvm_vgic_init(struct kvm *kvm) 179static inline int kvm_vgic_init(struct kvm *kvm)
175{ 180{
176 return 0; 181 return 0;
diff --git a/include/linux/audit.h b/include/linux/audit.h
index aa865a9a4c4f..ec1464df4c60 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -43,6 +43,7 @@ struct mq_attr;
43struct mqstat; 43struct mqstat;
44struct audit_watch; 44struct audit_watch;
45struct audit_tree; 45struct audit_tree;
46struct sk_buff;
46 47
47struct audit_krule { 48struct audit_krule {
48 int vers_ops; 49 int vers_ops;
@@ -463,7 +464,7 @@ extern int audit_filter_user(int type);
463extern int audit_filter_type(int type); 464extern int audit_filter_type(int type);
464extern int audit_rule_change(int type, __u32 portid, int seq, 465extern int audit_rule_change(int type, __u32 portid, int seq,
465 void *data, size_t datasz); 466 void *data, size_t datasz);
466extern int audit_list_rules_send(__u32 portid, int seq); 467extern int audit_list_rules_send(struct sk_buff *request_skb, int seq);
467 468
468extern u32 audit_enabled; 469extern u32 audit_enabled;
469#else /* CONFIG_AUDIT */ 470#else /* CONFIG_AUDIT */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 18ba8a627f46..2ff2e8d982be 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -121,8 +121,7 @@ void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struc
121 121
122void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); 122void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
123 123
124void blk_mq_insert_request(struct request_queue *, struct request *, 124void blk_mq_insert_request(struct request *, bool, bool, bool);
125 bool, bool);
126void blk_mq_run_queues(struct request_queue *q, bool async); 125void blk_mq_run_queues(struct request_queue *q, bool async);
127void blk_mq_free_request(struct request *rq); 126void blk_mq_free_request(struct request *rq);
128bool blk_mq_can_queue(struct blk_mq_hw_ctx *); 127bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
@@ -134,7 +133,13 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_ind
134struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int); 133struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int);
135void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); 134void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
136 135
137void blk_mq_end_io(struct request *rq, int error); 136bool blk_mq_end_io_partial(struct request *rq, int error,
137 unsigned int nr_bytes);
138static inline void blk_mq_end_io(struct request *rq, int error)
139{
140 bool done = !blk_mq_end_io_partial(rq, error, blk_rq_bytes(rq));
141 BUG_ON(!done);
142}
138 143
139void blk_mq_complete_request(struct request *rq); 144void blk_mq_complete_request(struct request *rq);
140 145
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 092b64168d7f..4a21a872dbbd 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -245,6 +245,10 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
245void omap2_init_clk_clkdm(struct clk_hw *clk); 245void omap2_init_clk_clkdm(struct clk_hw *clk);
246unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, 246unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
247 unsigned long parent_rate); 247 unsigned long parent_rate);
248int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate,
249 unsigned long parent_rate);
250long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate,
251 unsigned long *prate);
248int omap2_clkops_enable_clkdm(struct clk_hw *hw); 252int omap2_clkops_enable_clkdm(struct clk_hw *hw);
249void omap2_clkops_disable_clkdm(struct clk_hw *hw); 253void omap2_clkops_disable_clkdm(struct clk_hw *hw);
250int omap2_clk_disable_autoidle_all(void); 254int omap2_clk_disable_autoidle_all(void);
diff --git a/include/linux/file.h b/include/linux/file.h
index cbacf4faf447..4d69123377a2 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -28,33 +28,36 @@ static inline void fput_light(struct file *file, int fput_needed)
28 28
29struct fd { 29struct fd {
30 struct file *file; 30 struct file *file;
31 int need_put; 31 unsigned int flags;
32}; 32};
33#define FDPUT_FPUT 1
34#define FDPUT_POS_UNLOCK 2
33 35
34static inline void fdput(struct fd fd) 36static inline void fdput(struct fd fd)
35{ 37{
36 if (fd.need_put) 38 if (fd.flags & FDPUT_FPUT)
37 fput(fd.file); 39 fput(fd.file);
38} 40}
39 41
40extern struct file *fget(unsigned int fd); 42extern struct file *fget(unsigned int fd);
41extern struct file *fget_light(unsigned int fd, int *fput_needed); 43extern struct file *fget_raw(unsigned int fd);
44extern unsigned long __fdget(unsigned int fd);
45extern unsigned long __fdget_raw(unsigned int fd);
46extern unsigned long __fdget_pos(unsigned int fd);
42 47
43static inline struct fd fdget(unsigned int fd) 48static inline struct fd __to_fd(unsigned long v)
44{ 49{
45 int b; 50 return (struct fd){(struct file *)(v & ~3),v & 3};
46 struct file *f = fget_light(fd, &b);
47 return (struct fd){f,b};
48} 51}
49 52
50extern struct file *fget_raw(unsigned int fd); 53static inline struct fd fdget(unsigned int fd)
51extern struct file *fget_raw_light(unsigned int fd, int *fput_needed); 54{
55 return __to_fd(__fdget(fd));
56}
52 57
53static inline struct fd fdget_raw(unsigned int fd) 58static inline struct fd fdget_raw(unsigned int fd)
54{ 59{
55 int b; 60 return __to_fd(__fdget_raw(fd));
56 struct file *f = fget_raw_light(fd, &b);
57 return (struct fd){f,b};
58} 61}
59 62
60extern int f_dupfd(unsigned int from, struct file *file, unsigned flags); 63extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 5d7782e42b8f..c3683bdf28fe 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -200,6 +200,7 @@ struct fw_device {
200 unsigned irmc:1; 200 unsigned irmc:1;
201 unsigned bc_implemented:2; 201 unsigned bc_implemented:2;
202 202
203 work_func_t workfn;
203 struct delayed_work work; 204 struct delayed_work work;
204 struct fw_attribute_group attribute_group; 205 struct fw_attribute_group attribute_group;
205}; 206};
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 60829565e552..23b2a35d712e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -123,6 +123,9 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
123/* File is opened with O_PATH; almost nothing can be done with it */ 123/* File is opened with O_PATH; almost nothing can be done with it */
124#define FMODE_PATH ((__force fmode_t)0x4000) 124#define FMODE_PATH ((__force fmode_t)0x4000)
125 125
126/* File needs atomic accesses to f_pos */
127#define FMODE_ATOMIC_POS ((__force fmode_t)0x8000)
128
126/* File was opened by fanotify and shouldn't generate fanotify events */ 129/* File was opened by fanotify and shouldn't generate fanotify events */
127#define FMODE_NONOTIFY ((__force fmode_t)0x1000000) 130#define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
128 131
@@ -780,13 +783,14 @@ struct file {
780 const struct file_operations *f_op; 783 const struct file_operations *f_op;
781 784
782 /* 785 /*
783 * Protects f_ep_links, f_flags, f_pos vs i_size in lseek SEEK_CUR. 786 * Protects f_ep_links, f_flags.
784 * Must not be taken from IRQ context. 787 * Must not be taken from IRQ context.
785 */ 788 */
786 spinlock_t f_lock; 789 spinlock_t f_lock;
787 atomic_long_t f_count; 790 atomic_long_t f_count;
788 unsigned int f_flags; 791 unsigned int f_flags;
789 fmode_t f_mode; 792 fmode_t f_mode;
793 struct mutex f_pos_lock;
790 loff_t f_pos; 794 loff_t f_pos;
791 struct fown_struct f_owner; 795 struct fown_struct f_owner;
792 const struct cred *f_cred; 796 const struct cred *f_cred;
@@ -808,7 +812,7 @@ struct file {
808#ifdef CONFIG_DEBUG_WRITECOUNT 812#ifdef CONFIG_DEBUG_WRITECOUNT
809 unsigned long f_mnt_write_state; 813 unsigned long f_mnt_write_state;
810#endif 814#endif
811}; 815} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
812 816
813struct file_handle { 817struct file_handle {
814 __u32 handle_bytes; 818 __u32 handle_bytes;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 0437439bc047..39b81dc7d01a 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -123,6 +123,10 @@ struct vm_area_struct;
123 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ 123 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
124 __GFP_NO_KSWAPD) 124 __GFP_NO_KSWAPD)
125 125
126/*
127 * GFP_THISNODE does not perform any reclaim, you most likely want to
128 * use __GFP_THISNODE to allocate from a given node without fallback!
129 */
126#ifdef CONFIG_NUMA 130#ifdef CONFIG_NUMA
127#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) 131#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
128#else 132#else
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 5f2052c83154..9b61b9bf81ac 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -590,10 +590,10 @@ static inline bool zone_is_empty(struct zone *zone)
590 590
591/* 591/*
592 * The NUMA zonelists are doubled because we need zonelists that restrict the 592 * The NUMA zonelists are doubled because we need zonelists that restrict the
593 * allocations to a single node for GFP_THISNODE. 593 * allocations to a single node for __GFP_THISNODE.
594 * 594 *
595 * [0] : Zonelist with fallback 595 * [0] : Zonelist with fallback
596 * [1] : No fallback (GFP_THISNODE) 596 * [1] : No fallback (__GFP_THISNODE)
597 */ 597 */
598#define MAX_ZONELISTS 2 598#define MAX_ZONELISTS 2
599 599
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index b2fb167b2e6d..5624e4e2763c 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -467,9 +467,14 @@ struct nfs_lockt_res {
467}; 467};
468 468
469struct nfs_release_lockowner_args { 469struct nfs_release_lockowner_args {
470 struct nfs4_sequence_args seq_args;
470 struct nfs_lowner lock_owner; 471 struct nfs_lowner lock_owner;
471}; 472};
472 473
474struct nfs_release_lockowner_res {
475 struct nfs4_sequence_res seq_res;
476};
477
473struct nfs4_delegreturnargs { 478struct nfs4_delegreturnargs {
474 struct nfs4_sequence_args seq_args; 479 struct nfs4_sequence_args seq_args;
475 const struct nfs_fh *fhandle; 480 const struct nfs_fh *fhandle;
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 9260abdd67df..b5b2df60299e 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -410,7 +410,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
410 * 410 *
411 * %GFP_NOWAIT - Allocation will not sleep. 411 * %GFP_NOWAIT - Allocation will not sleep.
412 * 412 *
413 * %GFP_THISNODE - Allocate node-local memory only. 413 * %__GFP_THISNODE - Allocate node-local memory only.
414 * 414 *
415 * %GFP_DMA - Allocation suitable for DMA. 415 * %GFP_DMA - Allocation suitable for DMA.
416 * Should only be used for kmalloc() caches. Otherwise, use a 416 * Should only be used for kmalloc() caches. Otherwise, use a
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index accc497f8d72..7159a0a933df 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -60,6 +60,12 @@ struct tp_module {
60 unsigned int num_tracepoints; 60 unsigned int num_tracepoints;
61 struct tracepoint * const *tracepoints_ptrs; 61 struct tracepoint * const *tracepoints_ptrs;
62}; 62};
63bool trace_module_has_bad_taint(struct module *mod);
64#else
65static inline bool trace_module_has_bad_taint(struct module *mod)
66{
67 return false;
68}
63#endif /* CONFIG_MODULES */ 69#endif /* CONFIG_MODULES */
64 70
65struct tracepoint_iter { 71struct tracepoint_iter {
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index ae5a17111968..4483fadfa68d 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -12,6 +12,7 @@ struct iscsit_transport {
12 int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *); 12 int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *);
13 int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *); 13 int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *);
14 void (*iscsit_free_np)(struct iscsi_np *); 14 void (*iscsit_free_np)(struct iscsi_np *);
15 void (*iscsit_wait_conn)(struct iscsi_conn *);
15 void (*iscsit_free_conn)(struct iscsi_conn *); 16 void (*iscsit_free_conn)(struct iscsi_conn *);
16 int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *); 17 int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *);
17 int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32); 18 int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32);
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index ddc179b7a105..1fef3e6e9436 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -83,7 +83,7 @@ DECLARE_EVENT_CLASS(rpc_task_running,
83 ), 83 ),
84 84
85 TP_fast_assign( 85 TP_fast_assign(
86 __entry->client_id = clnt->cl_clid; 86 __entry->client_id = clnt ? clnt->cl_clid : -1;
87 __entry->task_id = task->tk_pid; 87 __entry->task_id = task->tk_pid;
88 __entry->action = action; 88 __entry->action = action;
89 __entry->runstate = task->tk_runstate; 89 __entry->runstate = task->tk_runstate;
@@ -91,7 +91,7 @@ DECLARE_EVENT_CLASS(rpc_task_running,
91 __entry->flags = task->tk_flags; 91 __entry->flags = task->tk_flags;
92 ), 92 ),
93 93
94 TP_printk("task:%u@%u flags=%4.4x state=%4.4lx status=%d action=%pf", 94 TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d action=%pf",
95 __entry->task_id, __entry->client_id, 95 __entry->task_id, __entry->client_id,
96 __entry->flags, 96 __entry->flags,
97 __entry->runstate, 97 __entry->runstate,
diff --git a/kernel/audit.c b/kernel/audit.c
index 34c5a2310fbf..3392d3e0254a 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -182,7 +182,7 @@ struct audit_buffer {
182 182
183struct audit_reply { 183struct audit_reply {
184 __u32 portid; 184 __u32 portid;
185 pid_t pid; 185 struct net *net;
186 struct sk_buff *skb; 186 struct sk_buff *skb;
187}; 187};
188 188
@@ -500,7 +500,7 @@ int audit_send_list(void *_dest)
500{ 500{
501 struct audit_netlink_list *dest = _dest; 501 struct audit_netlink_list *dest = _dest;
502 struct sk_buff *skb; 502 struct sk_buff *skb;
503 struct net *net = get_net_ns_by_pid(dest->pid); 503 struct net *net = dest->net;
504 struct audit_net *aunet = net_generic(net, audit_net_id); 504 struct audit_net *aunet = net_generic(net, audit_net_id);
505 505
506 /* wait for parent to finish and send an ACK */ 506 /* wait for parent to finish and send an ACK */
@@ -510,6 +510,7 @@ int audit_send_list(void *_dest)
510 while ((skb = __skb_dequeue(&dest->q)) != NULL) 510 while ((skb = __skb_dequeue(&dest->q)) != NULL)
511 netlink_unicast(aunet->nlsk, skb, dest->portid, 0); 511 netlink_unicast(aunet->nlsk, skb, dest->portid, 0);
512 512
513 put_net(net);
513 kfree(dest); 514 kfree(dest);
514 515
515 return 0; 516 return 0;
@@ -543,7 +544,7 @@ out_kfree_skb:
543static int audit_send_reply_thread(void *arg) 544static int audit_send_reply_thread(void *arg)
544{ 545{
545 struct audit_reply *reply = (struct audit_reply *)arg; 546 struct audit_reply *reply = (struct audit_reply *)arg;
546 struct net *net = get_net_ns_by_pid(reply->pid); 547 struct net *net = reply->net;
547 struct audit_net *aunet = net_generic(net, audit_net_id); 548 struct audit_net *aunet = net_generic(net, audit_net_id);
548 549
549 mutex_lock(&audit_cmd_mutex); 550 mutex_lock(&audit_cmd_mutex);
@@ -552,12 +553,13 @@ static int audit_send_reply_thread(void *arg)
552 /* Ignore failure. It'll only happen if the sender goes away, 553 /* Ignore failure. It'll only happen if the sender goes away,
553 because our timeout is set to infinite. */ 554 because our timeout is set to infinite. */
554 netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0); 555 netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0);
556 put_net(net);
555 kfree(reply); 557 kfree(reply);
556 return 0; 558 return 0;
557} 559}
558/** 560/**
559 * audit_send_reply - send an audit reply message via netlink 561 * audit_send_reply - send an audit reply message via netlink
560 * @portid: netlink port to which to send reply 562 * @request_skb: skb of request we are replying to (used to target the reply)
561 * @seq: sequence number 563 * @seq: sequence number
562 * @type: audit message type 564 * @type: audit message type
563 * @done: done (last) flag 565 * @done: done (last) flag
@@ -568,9 +570,11 @@ static int audit_send_reply_thread(void *arg)
568 * Allocates an skb, builds the netlink message, and sends it to the port id. 570 * Allocates an skb, builds the netlink message, and sends it to the port id.
569 * No failure notifications. 571 * No failure notifications.
570 */ 572 */
571static void audit_send_reply(__u32 portid, int seq, int type, int done, 573static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int done,
572 int multi, const void *payload, int size) 574 int multi, const void *payload, int size)
573{ 575{
576 u32 portid = NETLINK_CB(request_skb).portid;
577 struct net *net = sock_net(NETLINK_CB(request_skb).sk);
574 struct sk_buff *skb; 578 struct sk_buff *skb;
575 struct task_struct *tsk; 579 struct task_struct *tsk;
576 struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), 580 struct audit_reply *reply = kmalloc(sizeof(struct audit_reply),
@@ -583,8 +587,8 @@ static void audit_send_reply(__u32 portid, int seq, int type, int done,
583 if (!skb) 587 if (!skb)
584 goto out; 588 goto out;
585 589
590 reply->net = get_net(net);
586 reply->portid = portid; 591 reply->portid = portid;
587 reply->pid = task_pid_vnr(current);
588 reply->skb = skb; 592 reply->skb = skb;
589 593
590 tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); 594 tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply");
@@ -673,8 +677,7 @@ static int audit_get_feature(struct sk_buff *skb)
673 677
674 seq = nlmsg_hdr(skb)->nlmsg_seq; 678 seq = nlmsg_hdr(skb)->nlmsg_seq;
675 679
676 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0, 680 audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &af, sizeof(af));
677 &af, sizeof(af));
678 681
679 return 0; 682 return 0;
680} 683}
@@ -794,8 +797,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
794 s.backlog = skb_queue_len(&audit_skb_queue); 797 s.backlog = skb_queue_len(&audit_skb_queue);
795 s.version = AUDIT_VERSION_LATEST; 798 s.version = AUDIT_VERSION_LATEST;
796 s.backlog_wait_time = audit_backlog_wait_time; 799 s.backlog_wait_time = audit_backlog_wait_time;
797 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0, 800 audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s));
798 &s, sizeof(s));
799 break; 801 break;
800 } 802 }
801 case AUDIT_SET: { 803 case AUDIT_SET: {
@@ -905,7 +907,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
905 seq, data, nlmsg_len(nlh)); 907 seq, data, nlmsg_len(nlh));
906 break; 908 break;
907 case AUDIT_LIST_RULES: 909 case AUDIT_LIST_RULES:
908 err = audit_list_rules_send(NETLINK_CB(skb).portid, seq); 910 err = audit_list_rules_send(skb, seq);
909 break; 911 break;
910 case AUDIT_TRIM: 912 case AUDIT_TRIM:
911 audit_trim_trees(); 913 audit_trim_trees();
@@ -970,8 +972,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
970 memcpy(sig_data->ctx, ctx, len); 972 memcpy(sig_data->ctx, ctx, len);
971 security_release_secctx(ctx, len); 973 security_release_secctx(ctx, len);
972 } 974 }
973 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_SIGNAL_INFO, 975 audit_send_reply(skb, seq, AUDIT_SIGNAL_INFO, 0, 0,
974 0, 0, sig_data, sizeof(*sig_data) + len); 976 sig_data, sizeof(*sig_data) + len);
975 kfree(sig_data); 977 kfree(sig_data);
976 break; 978 break;
977 case AUDIT_TTY_GET: { 979 case AUDIT_TTY_GET: {
@@ -983,8 +985,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
983 s.log_passwd = tsk->signal->audit_tty_log_passwd; 985 s.log_passwd = tsk->signal->audit_tty_log_passwd;
984 spin_unlock(&tsk->sighand->siglock); 986 spin_unlock(&tsk->sighand->siglock);
985 987
986 audit_send_reply(NETLINK_CB(skb).portid, seq, 988 audit_send_reply(skb, seq, AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
987 AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
988 break; 989 break;
989 } 990 }
990 case AUDIT_TTY_SET: { 991 case AUDIT_TTY_SET: {
diff --git a/kernel/audit.h b/kernel/audit.h
index 57cc64d67718..8df132214606 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -247,7 +247,7 @@ extern void audit_panic(const char *message);
247 247
248struct audit_netlink_list { 248struct audit_netlink_list {
249 __u32 portid; 249 __u32 portid;
250 pid_t pid; 250 struct net *net;
251 struct sk_buff_head q; 251 struct sk_buff_head q;
252}; 252};
253 253
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 14a78cca384e..92062fd6cc8c 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -29,6 +29,8 @@
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/security.h> 31#include <linux/security.h>
32#include <net/net_namespace.h>
33#include <net/sock.h>
32#include "audit.h" 34#include "audit.h"
33 35
34/* 36/*
@@ -1065,11 +1067,13 @@ int audit_rule_change(int type, __u32 portid, int seq, void *data,
1065 1067
1066/** 1068/**
1067 * audit_list_rules_send - list the audit rules 1069 * audit_list_rules_send - list the audit rules
1068 * @portid: target portid for netlink audit messages 1070 * @request_skb: skb of request we are replying to (used to target the reply)
1069 * @seq: netlink audit message sequence (serial) number 1071 * @seq: netlink audit message sequence (serial) number
1070 */ 1072 */
1071int audit_list_rules_send(__u32 portid, int seq) 1073int audit_list_rules_send(struct sk_buff *request_skb, int seq)
1072{ 1074{
1075 u32 portid = NETLINK_CB(request_skb).portid;
1076 struct net *net = sock_net(NETLINK_CB(request_skb).sk);
1073 struct task_struct *tsk; 1077 struct task_struct *tsk;
1074 struct audit_netlink_list *dest; 1078 struct audit_netlink_list *dest;
1075 int err = 0; 1079 int err = 0;
@@ -1083,8 +1087,8 @@ int audit_list_rules_send(__u32 portid, int seq)
1083 dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); 1087 dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL);
1084 if (!dest) 1088 if (!dest)
1085 return -ENOMEM; 1089 return -ENOMEM;
1090 dest->net = get_net(net);
1086 dest->portid = portid; 1091 dest->portid = portid;
1087 dest->pid = task_pid_vnr(current);
1088 skb_queue_head_init(&dest->q); 1092 skb_queue_head_init(&dest->q);
1089 1093
1090 mutex_lock(&audit_filter_mutex); 1094 mutex_lock(&audit_filter_mutex);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 4410ac6a55f1..e6b1b66afe52 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -974,12 +974,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
974 * Temporarilly set tasks mems_allowed to target nodes of migration, 974 * Temporarilly set tasks mems_allowed to target nodes of migration,
975 * so that the migration code can allocate pages on these nodes. 975 * so that the migration code can allocate pages on these nodes.
976 * 976 *
977 * Call holding cpuset_mutex, so current's cpuset won't change
978 * during this call, as manage_mutex holds off any cpuset_attach()
979 * calls. Therefore we don't need to take task_lock around the
980 * call to guarantee_online_mems(), as we know no one is changing
981 * our task's cpuset.
982 *
983 * While the mm_struct we are migrating is typically from some 977 * While the mm_struct we are migrating is typically from some
984 * other task, the task_struct mems_allowed that we are hacking 978 * other task, the task_struct mems_allowed that we are hacking
985 * is for our current task, which must allocate new pages for that 979 * is for our current task, which must allocate new pages for that
@@ -996,8 +990,10 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
996 990
997 do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); 991 do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
998 992
993 rcu_read_lock();
999 mems_cs = effective_nodemask_cpuset(task_cs(tsk)); 994 mems_cs = effective_nodemask_cpuset(task_cs(tsk));
1000 guarantee_online_mems(mems_cs, &tsk->mems_allowed); 995 guarantee_online_mems(mems_cs, &tsk->mems_allowed);
996 rcu_read_unlock();
1001} 997}
1002 998
1003/* 999/*
@@ -2486,9 +2482,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
2486 2482
2487 task_lock(current); 2483 task_lock(current);
2488 cs = nearest_hardwall_ancestor(task_cs(current)); 2484 cs = nearest_hardwall_ancestor(task_cs(current));
2485 allowed = node_isset(node, cs->mems_allowed);
2489 task_unlock(current); 2486 task_unlock(current);
2490 2487
2491 allowed = node_isset(node, cs->mems_allowed);
2492 mutex_unlock(&callback_mutex); 2488 mutex_unlock(&callback_mutex);
2493 return allowed; 2489 return allowed;
2494} 2490}
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index cf68bb36fe58..f14033700c25 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -10,6 +10,7 @@
10#include <linux/mutex.h> 10#include <linux/mutex.h>
11#include <linux/of.h> 11#include <linux/of.h>
12#include <linux/of_address.h> 12#include <linux/of_address.h>
13#include <linux/of_irq.h>
13#include <linux/topology.h> 14#include <linux/topology.h>
14#include <linux/seq_file.h> 15#include <linux/seq_file.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 481a13c43b17..d3bf660cb57f 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -802,8 +802,7 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
802 802
803static void wake_threads_waitq(struct irq_desc *desc) 803static void wake_threads_waitq(struct irq_desc *desc)
804{ 804{
805 if (atomic_dec_and_test(&desc->threads_active) && 805 if (atomic_dec_and_test(&desc->threads_active))
806 waitqueue_active(&desc->wait_for_threads))
807 wake_up(&desc->wait_for_threads); 806 wake_up(&desc->wait_for_threads);
808} 807}
809 808
diff --git a/kernel/profile.c b/kernel/profile.c
index 6631e1ef55ab..ebdd9c1a86b4 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -549,14 +549,14 @@ static int create_hash_tables(void)
549 struct page *page; 549 struct page *page;
550 550
551 page = alloc_pages_exact_node(node, 551 page = alloc_pages_exact_node(node,
552 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 552 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
553 0); 553 0);
554 if (!page) 554 if (!page)
555 goto out_cleanup; 555 goto out_cleanup;
556 per_cpu(cpu_profile_hits, cpu)[1] 556 per_cpu(cpu_profile_hits, cpu)[1]
557 = (struct profile_hit *)page_address(page); 557 = (struct profile_hit *)page_address(page);
558 page = alloc_pages_exact_node(node, 558 page = alloc_pages_exact_node(node,
559 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 559 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
560 0); 560 0);
561 if (!page) 561 if (!page)
562 goto out_cleanup; 562 goto out_cleanup;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index e71ffd4eccb5..f3989ceb5cd5 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1777,6 +1777,16 @@ static void trace_module_add_events(struct module *mod)
1777{ 1777{
1778 struct ftrace_event_call **call, **start, **end; 1778 struct ftrace_event_call **call, **start, **end;
1779 1779
1780 if (!mod->num_trace_events)
1781 return;
1782
1783 /* Don't add infrastructure for mods without tracepoints */
1784 if (trace_module_has_bad_taint(mod)) {
1785 pr_err("%s: module has bad taint, not creating trace events\n",
1786 mod->name);
1787 return;
1788 }
1789
1780 start = mod->trace_events; 1790 start = mod->trace_events;
1781 end = mod->trace_events + mod->num_trace_events; 1791 end = mod->trace_events + mod->num_trace_events;
1782 1792
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 29f26540e9c9..031cc5655a51 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -631,6 +631,11 @@ void tracepoint_iter_reset(struct tracepoint_iter *iter)
631EXPORT_SYMBOL_GPL(tracepoint_iter_reset); 631EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
632 632
633#ifdef CONFIG_MODULES 633#ifdef CONFIG_MODULES
634bool trace_module_has_bad_taint(struct module *mod)
635{
636 return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP));
637}
638
634static int tracepoint_module_coming(struct module *mod) 639static int tracepoint_module_coming(struct module *mod)
635{ 640{
636 struct tp_module *tp_mod, *iter; 641 struct tp_module *tp_mod, *iter;
@@ -641,7 +646,7 @@ static int tracepoint_module_coming(struct module *mod)
641 * module headers (for forced load), to make sure we don't cause a crash. 646 * module headers (for forced load), to make sure we don't cause a crash.
642 * Staging and out-of-tree GPL modules are fine. 647 * Staging and out-of-tree GPL modules are fine.
643 */ 648 */
644 if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP))) 649 if (trace_module_has_bad_taint(mod))
645 return 0; 650 return 0;
646 mutex_lock(&tracepoints_mutex); 651 mutex_lock(&tracepoints_mutex);
647 tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); 652 tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
diff --git a/mm/Kconfig b/mm/Kconfig
index 2d9f1504d75e..2888024e0b0a 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -575,5 +575,5 @@ config PGTABLE_MAPPING
575 then you should select this. This causes zsmalloc to use page table 575 then you should select this. This causes zsmalloc to use page table
576 mapping rather than copying for object mapping. 576 mapping rather than copying for object mapping.
577 577
578 You can check speed with zsmalloc benchmark[1]. 578 You can check speed with zsmalloc benchmark:
579 [1] https://github.com/spartacus06/zsmalloc 579 https://github.com/spartacus06/zsmapbench
diff --git a/mm/compaction.c b/mm/compaction.c
index b48c5259ea33..918577595ea8 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -251,7 +251,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
251{ 251{
252 int nr_scanned = 0, total_isolated = 0; 252 int nr_scanned = 0, total_isolated = 0;
253 struct page *cursor, *valid_page = NULL; 253 struct page *cursor, *valid_page = NULL;
254 unsigned long nr_strict_required = end_pfn - blockpfn;
255 unsigned long flags; 254 unsigned long flags;
256 bool locked = false; 255 bool locked = false;
257 256
@@ -264,11 +263,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
264 263
265 nr_scanned++; 264 nr_scanned++;
266 if (!pfn_valid_within(blockpfn)) 265 if (!pfn_valid_within(blockpfn))
267 continue; 266 goto isolate_fail;
267
268 if (!valid_page) 268 if (!valid_page)
269 valid_page = page; 269 valid_page = page;
270 if (!PageBuddy(page)) 270 if (!PageBuddy(page))
271 continue; 271 goto isolate_fail;
272 272
273 /* 273 /*
274 * The zone lock must be held to isolate freepages. 274 * The zone lock must be held to isolate freepages.
@@ -289,12 +289,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
289 289
290 /* Recheck this is a buddy page under lock */ 290 /* Recheck this is a buddy page under lock */
291 if (!PageBuddy(page)) 291 if (!PageBuddy(page))
292 continue; 292 goto isolate_fail;
293 293
294 /* Found a free page, break it into order-0 pages */ 294 /* Found a free page, break it into order-0 pages */
295 isolated = split_free_page(page); 295 isolated = split_free_page(page);
296 if (!isolated && strict)
297 break;
298 total_isolated += isolated; 296 total_isolated += isolated;
299 for (i = 0; i < isolated; i++) { 297 for (i = 0; i < isolated; i++) {
300 list_add(&page->lru, freelist); 298 list_add(&page->lru, freelist);
@@ -305,7 +303,15 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
305 if (isolated) { 303 if (isolated) {
306 blockpfn += isolated - 1; 304 blockpfn += isolated - 1;
307 cursor += isolated - 1; 305 cursor += isolated - 1;
306 continue;
308 } 307 }
308
309isolate_fail:
310 if (strict)
311 break;
312 else
313 continue;
314
309 } 315 }
310 316
311 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); 317 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
@@ -315,7 +321,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
315 * pages requested were isolated. If there were any failures, 0 is 321 * pages requested were isolated. If there were any failures, 0 is
316 * returned and CMA will fail. 322 * returned and CMA will fail.
317 */ 323 */
318 if (strict && nr_strict_required > total_isolated) 324 if (strict && blockpfn < end_pfn)
319 total_isolated = 0; 325 total_isolated = 0;
320 326
321 if (locked) 327 if (locked)
diff --git a/mm/migrate.c b/mm/migrate.c
index 482a33d89134..b494fdb9a636 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1158,7 +1158,7 @@ static struct page *new_page_node(struct page *p, unsigned long private,
1158 pm->node); 1158 pm->node);
1159 else 1159 else
1160 return alloc_pages_exact_node(pm->node, 1160 return alloc_pages_exact_node(pm->node,
1161 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); 1161 GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
1162} 1162}
1163 1163
1164/* 1164/*
@@ -1544,9 +1544,9 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
1544 struct page *newpage; 1544 struct page *newpage;
1545 1545
1546 newpage = alloc_pages_exact_node(nid, 1546 newpage = alloc_pages_exact_node(nid,
1547 (GFP_HIGHUSER_MOVABLE | GFP_THISNODE | 1547 (GFP_HIGHUSER_MOVABLE |
1548 __GFP_NOMEMALLOC | __GFP_NORETRY | 1548 __GFP_THISNODE | __GFP_NOMEMALLOC |
1549 __GFP_NOWARN) & 1549 __GFP_NORETRY | __GFP_NOWARN) &
1550 ~GFP_IOFS, 0); 1550 ~GFP_IOFS, 0);
1551 1551
1552 return newpage; 1552 return newpage;
@@ -1747,7 +1747,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1747 goto out_dropref; 1747 goto out_dropref;
1748 1748
1749 new_page = alloc_pages_node(node, 1749 new_page = alloc_pages_node(node,
1750 (GFP_TRANSHUGE | GFP_THISNODE) & ~__GFP_WAIT, HPAGE_PMD_ORDER); 1750 (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_WAIT,
1751 HPAGE_PMD_ORDER);
1751 if (!new_page) 1752 if (!new_page)
1752 goto out_fail; 1753 goto out_fail;
1753 1754
diff --git a/net/socket.c b/net/socket.c
index 32df5847efa3..a19ae1968d37 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -450,16 +450,17 @@ EXPORT_SYMBOL(sockfd_lookup);
450 450
451static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) 451static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
452{ 452{
453 struct file *file; 453 struct fd f = fdget(fd);
454 struct socket *sock; 454 struct socket *sock;
455 455
456 *err = -EBADF; 456 *err = -EBADF;
457 file = fget_light(fd, fput_needed); 457 if (f.file) {
458 if (file) { 458 sock = sock_from_file(f.file, err);
459 sock = sock_from_file(file, err); 459 if (likely(sock)) {
460 if (sock) 460 *fput_needed = f.flags;
461 return sock; 461 return sock;
462 fput_light(file, *fput_needed); 462 }
463 fdput(f);
463 } 464 }
464 return NULL; 465 return NULL;
465} 466}
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 276e84b8a8e5..10085de886fe 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -330,7 +330,8 @@ static void write_src(void)
330 printf("\tPTR\t_text + %#llx\n", 330 printf("\tPTR\t_text + %#llx\n",
331 table[i].addr - _text); 331 table[i].addr - _text);
332 else 332 else
333 printf("\tPTR\t%#llx\n", table[i].addr); 333 printf("\tPTR\t_text - %#llx\n",
334 _text - table[i].addr);
334 } else { 335 } else {
335 printf("\tPTR\t%#llx\n", table[i].addr); 336 printf("\tPTR\t%#llx\n", table[i].addr);
336 } 337 }
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 40610984a1b5..99a45fdc1bbf 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1502,6 +1502,16 @@ static int addend_386_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
1502#define R_ARM_JUMP24 29 1502#define R_ARM_JUMP24 29
1503#endif 1503#endif
1504 1504
1505#ifndef R_ARM_THM_CALL
1506#define R_ARM_THM_CALL 10
1507#endif
1508#ifndef R_ARM_THM_JUMP24
1509#define R_ARM_THM_JUMP24 30
1510#endif
1511#ifndef R_ARM_THM_JUMP19
1512#define R_ARM_THM_JUMP19 51
1513#endif
1514
1505static int addend_arm_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r) 1515static int addend_arm_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
1506{ 1516{
1507 unsigned int r_typ = ELF_R_TYPE(r->r_info); 1517 unsigned int r_typ = ELF_R_TYPE(r->r_info);
@@ -1515,6 +1525,9 @@ static int addend_arm_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
1515 case R_ARM_PC24: 1525 case R_ARM_PC24:
1516 case R_ARM_CALL: 1526 case R_ARM_CALL:
1517 case R_ARM_JUMP24: 1527 case R_ARM_JUMP24:
1528 case R_ARM_THM_CALL:
1529 case R_ARM_THM_JUMP24:
1530 case R_ARM_THM_JUMP19:
1518 /* From ARM ABI: ((S + A) | T) - P */ 1531 /* From ARM ABI: ((S + A) | T) - P */
1519 r->r_addend = (int)(long)(elf->hdr + 1532 r->r_addend = (int)(long)(elf->hdr +
1520 sechdr->sh_offset + 1533 sechdr->sh_offset +
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index d46cbc5e335e..2fb2576dc644 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -1000,7 +1000,11 @@ static int keyring_detect_cycle_iterator(const void *object,
1000 1000
1001 kenter("{%d}", key->serial); 1001 kenter("{%d}", key->serial);
1002 1002
1003 BUG_ON(key != ctx->match_data); 1003 /* We might get a keyring with matching index-key that is nonetheless a
1004 * different keyring. */
1005 if (key != ctx->match_data)
1006 return 0;
1007
1004 ctx->result = ERR_PTR(-EDEADLK); 1008 ctx->result = ERR_PTR(-EDEADLK);
1005 return 1; 1009 return 1;
1006} 1010}
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index df3652ad15ef..8ed0bcc01386 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -1026,6 +1026,9 @@ static void ad1884_fixup_thinkpad(struct hda_codec *codec,
1026 spec->gen.keep_eapd_on = 1; 1026 spec->gen.keep_eapd_on = 1;
1027 spec->gen.vmaster_mute.hook = ad_vmaster_eapd_hook; 1027 spec->gen.vmaster_mute.hook = ad_vmaster_eapd_hook;
1028 spec->eapd_nid = 0x12; 1028 spec->eapd_nid = 0x12;
1029 /* Analog PC Beeper - allow firmware/ACPI beeps */
1030 spec->beep_amp = HDA_COMPOSE_AMP_VAL(0x20, 3, 3, HDA_INPUT);
1031 spec->gen.beep_nid = 0; /* no digital beep */
1029 } 1032 }
1030} 1033}
1031 1034
@@ -1092,6 +1095,7 @@ static int patch_ad1884(struct hda_codec *codec)
1092 spec = codec->spec; 1095 spec = codec->spec;
1093 1096
1094 spec->gen.mixer_nid = 0x20; 1097 spec->gen.mixer_nid = 0x20;
1098 spec->gen.mixer_merge_nid = 0x21;
1095 spec->gen.beep_nid = 0x10; 1099 spec->gen.beep_nid = 0x10;
1096 set_beep_amp(spec, 0x10, 0, HDA_OUTPUT); 1100 set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
1097 1101
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index ec304f3ae3b4..8d0a84436674 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3616,6 +3616,19 @@ static void alc_fixup_auto_mute_via_amp(struct hda_codec *codec,
3616 } 3616 }
3617} 3617}
3618 3618
3619static void alc_no_shutup(struct hda_codec *codec)
3620{
3621}
3622
3623static void alc_fixup_no_shutup(struct hda_codec *codec,
3624 const struct hda_fixup *fix, int action)
3625{
3626 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
3627 struct alc_spec *spec = codec->spec;
3628 spec->shutup = alc_no_shutup;
3629 }
3630}
3631
3619static void alc_fixup_headset_mode_alc668(struct hda_codec *codec, 3632static void alc_fixup_headset_mode_alc668(struct hda_codec *codec,
3620 const struct hda_fixup *fix, int action) 3633 const struct hda_fixup *fix, int action)
3621{ 3634{
@@ -3844,6 +3857,7 @@ enum {
3844 ALC269_FIXUP_HP_GPIO_LED, 3857 ALC269_FIXUP_HP_GPIO_LED,
3845 ALC269_FIXUP_INV_DMIC, 3858 ALC269_FIXUP_INV_DMIC,
3846 ALC269_FIXUP_LENOVO_DOCK, 3859 ALC269_FIXUP_LENOVO_DOCK,
3860 ALC269_FIXUP_NO_SHUTUP,
3847 ALC286_FIXUP_SONY_MIC_NO_PRESENCE, 3861 ALC286_FIXUP_SONY_MIC_NO_PRESENCE,
3848 ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT, 3862 ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
3849 ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, 3863 ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
@@ -4020,6 +4034,10 @@ static const struct hda_fixup alc269_fixups[] = {
4020 .type = HDA_FIXUP_FUNC, 4034 .type = HDA_FIXUP_FUNC,
4021 .v.func = alc_fixup_inv_dmic_0x12, 4035 .v.func = alc_fixup_inv_dmic_0x12,
4022 }, 4036 },
4037 [ALC269_FIXUP_NO_SHUTUP] = {
4038 .type = HDA_FIXUP_FUNC,
4039 .v.func = alc_fixup_no_shutup,
4040 },
4023 [ALC269_FIXUP_LENOVO_DOCK] = { 4041 [ALC269_FIXUP_LENOVO_DOCK] = {
4024 .type = HDA_FIXUP_PINS, 4042 .type = HDA_FIXUP_PINS,
4025 .v.pins = (const struct hda_pintbl[]) { 4043 .v.pins = (const struct hda_pintbl[]) {
@@ -4253,6 +4271,7 @@ static const struct hda_fixup alc269_fixups[] = {
4253}; 4271};
4254 4272
4255static const struct snd_pci_quirk alc269_fixup_tbl[] = { 4273static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4274 SND_PCI_QUIRK(0x1025, 0x0283, "Acer TravelMate 8371", ALC269_FIXUP_INV_DMIC),
4256 SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC), 4275 SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC),
4257 SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC), 4276 SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
4258 SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700), 4277 SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
@@ -4404,6 +4423,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4404 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4423 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4405 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4424 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4406 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4425 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4426 SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
4407 SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4427 SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4408 SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), 4428 SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
4409 SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4429 SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -5163,7 +5183,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
5163 SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 5183 SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5164 SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 5184 SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5165 SND_PCI_QUIRK(0x1028, 0x0628, "Dell", ALC668_FIXUP_AUTO_MUTE), 5185 SND_PCI_QUIRK(0x1028, 0x0628, "Dell", ALC668_FIXUP_AUTO_MUTE),
5166 SND_PCI_QUIRK(0x1028, 0x064e, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 5186 SND_PCI_QUIRK(0x1028, 0x064e, "Dell", ALC668_FIXUP_AUTO_MUTE),
5167 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), 5187 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
5168 SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A_CHMAP), 5188 SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A_CHMAP),
5169 SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP), 5189 SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP),
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index 75d0ad5d2dcb..647a72cda005 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -1328,6 +1328,9 @@ static int pm860x_probe(struct snd_soc_codec *codec)
1328 pm860x->codec = codec; 1328 pm860x->codec = codec;
1329 1329
1330 codec->control_data = pm860x->regmap; 1330 codec->control_data = pm860x->regmap;
1331 ret = snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
1332 if (ret)
1333 return ret;
1331 1334
1332 for (i = 0; i < 4; i++) { 1335 for (i = 0; i < 4; i++) {
1333 ret = request_threaded_irq(pm860x->irq[i], NULL, 1336 ret = request_threaded_irq(pm860x->irq[i], NULL,
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c
index 52e7cb08434b..fa2b8e07f420 100644
--- a/sound/soc/codecs/si476x.c
+++ b/sound/soc/codecs/si476x.c
@@ -210,7 +210,7 @@ out:
210static int si476x_codec_probe(struct snd_soc_codec *codec) 210static int si476x_codec_probe(struct snd_soc_codec *codec)
211{ 211{
212 codec->control_data = dev_get_regmap(codec->dev->parent, NULL); 212 codec->control_data = dev_get_regmap(codec->dev->parent, NULL);
213 return 0; 213 return snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
214} 214}
215 215
216static struct snd_soc_dai_ops si476x_dai_ops = { 216static struct snd_soc_dai_ops si476x_dai_ops = {
diff --git a/sound/soc/omap/n810.c b/sound/soc/omap/n810.c
index 3fde9e402710..d163e18d85d4 100644
--- a/sound/soc/omap/n810.c
+++ b/sound/soc/omap/n810.c
@@ -305,7 +305,9 @@ static int __init n810_soc_init(void)
305 int err; 305 int err;
306 struct device *dev; 306 struct device *dev;
307 307
308 if (!(machine_is_nokia_n810() || machine_is_nokia_n810_wimax())) 308 if (!of_have_populated_dt() ||
309 (!of_machine_is_compatible("nokia,n810") &&
310 !of_machine_is_compatible("nokia,n810-wimax")))
309 return -ENODEV; 311 return -ENODEV;
310 312
311 n810_snd_device = platform_device_alloc("soc-audio", -1); 313 n810_snd_device = platform_device_alloc("soc-audio", -1);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 47e1ce771e65..28522bd03b8e 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1989,6 +1989,7 @@ int soc_dpcm_runtime_update(struct snd_soc_card *card)
1989 1989
1990 paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_PLAYBACK, &list); 1990 paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_PLAYBACK, &list);
1991 if (paths < 0) { 1991 if (paths < 0) {
1992 dpcm_path_put(&list);
1992 dev_warn(fe->dev, "ASoC: %s no valid %s path\n", 1993 dev_warn(fe->dev, "ASoC: %s no valid %s path\n",
1993 fe->dai_link->name, "playback"); 1994 fe->dai_link->name, "playback");
1994 mutex_unlock(&card->mutex); 1995 mutex_unlock(&card->mutex);
@@ -2018,6 +2019,7 @@ capture:
2018 2019
2019 paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_CAPTURE, &list); 2020 paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_CAPTURE, &list);
2020 if (paths < 0) { 2021 if (paths < 0) {
2022 dpcm_path_put(&list);
2021 dev_warn(fe->dev, "ASoC: %s no valid %s path\n", 2023 dev_warn(fe->dev, "ASoC: %s no valid %s path\n",
2022 fe->dai_link->name, "capture"); 2024 fe->dai_link->name, "capture");
2023 mutex_unlock(&card->mutex); 2025 mutex_unlock(&card->mutex);
@@ -2082,6 +2084,7 @@ static int dpcm_fe_dai_open(struct snd_pcm_substream *fe_substream)
2082 fe->dpcm[stream].runtime = fe_substream->runtime; 2084 fe->dpcm[stream].runtime = fe_substream->runtime;
2083 2085
2084 if (dpcm_path_get(fe, stream, &list) <= 0) { 2086 if (dpcm_path_get(fe, stream, &list) <= 0) {
2087 dpcm_path_put(&list);
2085 dev_dbg(fe->dev, "ASoC: %s no valid %s route\n", 2088 dev_dbg(fe->dev, "ASoC: %s no valid %s route\n",
2086 fe->dai_link->name, stream ? "capture" : "playback"); 2089 fe->dai_link->name, stream ? "capture" : "playback");
2087 } 2090 }
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 44b0ba4feab3..1bed780e21d9 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -883,6 +883,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
883 } 883 }
884 break; 884 break;
885 885
886 case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */
886 case USB_ID(0x046d, 0x0808): 887 case USB_ID(0x046d, 0x0808):
887 case USB_ID(0x046d, 0x0809): 888 case USB_ID(0x046d, 0x0809):
888 case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */ 889 case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
diff --git a/tools/testing/selftests/ipc/msgque.c b/tools/testing/selftests/ipc/msgque.c
index d66418237d21..aa290c0de6f5 100644
--- a/tools/testing/selftests/ipc/msgque.c
+++ b/tools/testing/selftests/ipc/msgque.c
@@ -201,6 +201,7 @@ int main(int argc, char **argv)
201 201
202 msgque.msq_id = msgget(msgque.key, IPC_CREAT | IPC_EXCL | 0666); 202 msgque.msq_id = msgget(msgque.key, IPC_CREAT | IPC_EXCL | 0666);
203 if (msgque.msq_id == -1) { 203 if (msgque.msq_id == -1) {
204 err = -errno;
204 printf("Can't create queue\n"); 205 printf("Can't create queue\n");
205 goto err_out; 206 goto err_out;
206 } 207 }