aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-09-29 21:29:23 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-09-29 21:29:23 -0400
commit88502b9c0a5dcc884c0dbfb6ddf964ff5da5d8d3 (patch)
treef79f728c308100bc3e57d0d2f5d1e00d90406a0d
parente18945b159a1cdbc031f1d3b0b7e515a33bdcbf7 (diff)
parent15c03dd4859ab16f9212238f29dd315654aa94f6 (diff)
Merge 3.12-rc3 into driver-core-next
We want the driver core and sysfs fixes in here to make merges and development easier. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--CREDITS3
-rw-r--r--Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt10
-rw-r--r--Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt10
-rw-r--r--Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt (renamed from Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt)8
-rw-r--r--Documentation/devicetree/bindings/pci/designware-pcie.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt (renamed from Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt)0
-rw-r--r--Documentation/kernel-parameters.txt4
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt6
-rw-r--r--MAINTAINERS20
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/crypto/aes-armv4.S6
-rw-r--r--arch/arm/include/asm/uaccess.h7
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/entry-header.S8
-rw-r--r--arch/mips/include/asm/cpu-features.h2
-rw-r--r--arch/mips/mm/dma-default.c12
-rw-r--r--arch/openrisc/include/asm/prom.h44
-rw-r--r--arch/powerpc/boot/Makefile4
-rw-r--r--arch/powerpc/boot/epapr-wrapper.c9
-rw-r--r--arch/powerpc/boot/epapr.c4
-rw-r--r--arch/powerpc/boot/of.c16
-rwxr-xr-xarch/powerpc/boot/wrapper9
-rw-r--r--arch/powerpc/include/asm/irq.h4
-rw-r--r--arch/powerpc/include/asm/processor.h4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/irq.c100
-rw-r--r--arch/powerpc/kernel/misc_32.S25
-rw-r--r--arch/powerpc/kernel/misc_64.S10
-rw-r--r--arch/powerpc/kernel/process.c3
-rw-r--r--arch/powerpc/kernel/prom_init.c21
-rw-r--r--arch/powerpc/lib/sstep.c3
-rw-r--r--arch/powerpc/platforms/pseries/smp.c26
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/include/asm/mutex.h2
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/spinlock.h5
-rw-r--r--arch/x86/include/asm/xen/page.h31
-rw-r--r--arch/x86/kernel/cpu/perf_event.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c10
-rw-r--r--arch/x86/kernel/microcode_amd.c1
-rw-r--r--arch/x86/kernel/reboot.c18
-rw-r--r--arch/x86/platform/efi/efi.c11
-rw-r--r--arch/x86/xen/p2m.c10
-rw-r--r--arch/x86/xen/spinlock.c26
-rw-r--r--drivers/acpi/acpi_ipmi.c24
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/ata/sata_promise.c2
-rw-r--r--drivers/base/core.c14
-rw-r--r--drivers/block/cciss.c1
-rw-r--r--drivers/block/cpqarray.c1
-rw-r--r--drivers/char/tpm/xen-tpmfront.c36
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/clksrc-of.c3
-rw-r--r--drivers/clocksource/em_sti.c2
-rw-r--r--drivers/clocksource/exynos_mct.c10
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c2
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c13
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c8
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c7
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c51
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.h2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c26
-rw-r--r--drivers/gpu/drm/radeon/cik.c17
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c24
-rw-r--r--drivers/gpu/drm/radeon/r100.c8
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c3
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c24
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c4
-rw-r--r--drivers/hv/connection.c2
-rw-r--r--drivers/hv/hv_kvp.c38
-rw-r--r--drivers/hv/hv_snapshot.c6
-rw-r--r--drivers/hv/hv_util.c71
-rw-r--r--drivers/hwmon/applesmc.c11
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c26
-rw-r--r--drivers/i2c/busses/i2c-ismt.c3
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c16
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/md/bcache/bcache.h7
-rw-r--r--drivers/md/bcache/bset.c39
-rw-r--r--drivers/md/bcache/btree.c4
-rw-r--r--drivers/md/bcache/journal.c33
-rw-r--r--drivers/md/bcache/request.c15
-rw-r--r--drivers/md/bcache/sysfs.c9
-rw-r--r--drivers/md/bcache/util.c11
-rw-r--r--drivers/md/bcache/util.h12
-rw-r--r--drivers/md/bcache/writeback.c42
-rw-r--r--drivers/md/dm-io.c7
-rw-r--r--drivers/md/dm-mpath.c18
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-snap.c5
-rw-r--r--drivers/md/dm-stats.c23
-rw-r--r--drivers/md/dm-thin.c14
-rw-r--r--drivers/md/dm.c71
-rw-r--r--drivers/md/dm.h3
-rw-r--r--drivers/misc/mei/amthif.c1
-rw-r--r--drivers/misc/mei/bus.c5
-rw-r--r--drivers/misc/mei/client.h6
-rw-r--r--drivers/misc/mei/hbm.c10
-rw-r--r--drivers/misc/mei/init.c3
-rw-r--r--drivers/misc/mei/main.c11
-rw-r--r--drivers/misc/mei/mei_dev.h6
-rw-r--r--drivers/pci/pci.c8
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c11
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c2
-rw-r--r--drivers/staging/octeon-usb/cvmx-usb.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mp.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c2
-rw-r--r--drivers/staging/vt6656/iwctl.c3
-rw-r--r--drivers/staging/vt6656/main_usb.c3
-rw-r--r--drivers/staging/vt6656/rxtx.c2
-rw-r--r--drivers/tty/n_tty.c3
-rw-r--r--drivers/tty/serial/pch_uart.c13
-rw-r--r--drivers/tty/serial/serial-tegra.c4
-rw-r--r--drivers/tty/tty_ioctl.c3
-rw-r--r--drivers/usb/chipidea/Kconfig2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c7
-rw-r--r--drivers/usb/chipidea/core.c1
-rw-r--r--drivers/usb/chipidea/udc.c4
-rw-r--r--drivers/usb/core/devio.c16
-rw-r--r--drivers/usb/core/hub.c3
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/gadget/f_fs.c60
-rw-r--r--drivers/usb/host/ehci-fsl.c17
-rw-r--r--drivers/usb/host/ehci-pci.c2
-rw-r--r--drivers/usb/host/imx21-hcd.c8
-rw-r--r--drivers/usb/host/ohci-hcd.c22
-rw-r--r--drivers/usb/host/ohci-q.c26
-rw-r--r--drivers/usb/host/uhci-pci.c2
-rw-r--r--drivers/usb/host/uhci-q.c12
-rw-r--r--drivers/usb/host/xhci-hub.c47
-rw-r--r--drivers/usb/host/xhci-mem.c2
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/host/xhci-ring.c37
-rw-r--r--drivers/usb/host/xhci.c25
-rw-r--r--drivers/usb/host/xhci.h11
-rw-r--r--drivers/video/mmp/hw/mmp_ctrl.c17
-rw-r--r--drivers/video/mxsfb.c1
-rw-r--r--drivers/video/neofb.c4
-rw-r--r--drivers/video/of_display_timing.c6
-rw-r--r--drivers/video/omap2/displays-new/Kconfig1
-rw-r--r--drivers/video/omap2/displays-new/connector-analog-tv.c2
-rw-r--r--drivers/video/omap2/displays-new/connector-dvi.c2
-rw-r--r--drivers/video/omap2/displays-new/connector-hdmi.c2
-rw-r--r--drivers/video/omap2/dss/dispc.c1
-rw-r--r--drivers/video/s3fb.c9
-rw-r--r--drivers/xen/balloon.c23
-rw-r--r--fs/bio.c4
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/reiserfs/journal.c67
-rw-r--r--fs/udf/ialloc.c16
-rw-r--r--fs/udf/super.c64
-rw-r--r--fs/udf/udf_sb.h2
-rw-r--r--fs/xfs/xfs_buf_item.c1
-rw-r--r--fs/xfs/xfs_da_btree.c5
-rw-r--r--fs/xfs/xfs_fs.h2
-rw-r--r--fs/xfs/xfs_icache.c9
-rw-r--r--fs/xfs/xfs_log_recover.c73
-rw-r--r--include/linux/device-mapper.h3
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/memcontrol.h55
-rw-r--r--include/linux/mutex.h6
-rw-r--r--include/linux/of_irq.h20
-rw-r--r--include/linux/smp.h6
-rw-r--r--include/uapi/drm/radeon_drm.h2
-rw-r--r--include/uapi/linux/perf_event.h15
-rw-r--r--ipc/msg.c19
-rw-r--r--ipc/sem.c34
-rw-r--r--ipc/shm.c17
-rw-r--r--ipc/util.c32
-rw-r--r--ipc/util.h10
-rw-r--r--kernel/audit.c5
-rw-r--r--kernel/context_tracking.c12
-rw-r--r--kernel/events/core.c21
-rw-r--r--kernel/params.c6
-rw-r--r--kernel/reboot.c9
-rw-r--r--kernel/sched/fair.c9
-rw-r--r--kernel/watchdog.c60
-rw-r--r--lib/kobject.c5
-rw-r--r--lib/lockref.c23
-rw-r--r--mm/memcontrol.c560
-rw-r--r--mm/mlock.c1
-rw-r--r--mm/vmscan.c83
-rwxr-xr-xscripts/checkpatch.pl4
-rw-r--r--sound/core/compress_offload.c15
-rw-r--r--sound/pci/hda/patch_cirrus.c72
-rw-r--r--sound/pci/hda/patch_hdmi.c47
-rw-r--r--sound/pci/hda/patch_realtek.c16
-rw-r--r--tools/lib/lk/debugfs.c1
-rw-r--r--tools/perf/arch/x86/util/tsc.c6
-rw-r--r--tools/perf/builtin-inject.c2
-rw-r--r--tools/perf/builtin-kmem.c2
-rw-r--r--tools/perf/builtin-report.c5
-rw-r--r--tools/perf/builtin-script.c2
-rw-r--r--tools/perf/builtin-trace.c18
-rw-r--r--tools/perf/config/Makefile5
-rw-r--r--tools/perf/config/feature-tests.mak10
-rw-r--r--tools/perf/util/annotate.c2
-rw-r--r--tools/perf/util/dwarf-aux.c19
-rw-r--r--tools/perf/util/dwarf-aux.h3
-rw-r--r--tools/perf/util/header.c41
-rw-r--r--tools/perf/util/hist.c2
-rw-r--r--tools/perf/util/machine.c2
-rw-r--r--tools/perf/util/probe-finder.c89
-rw-r--r--tools/perf/util/probe-finder.h3
-rw-r--r--tools/perf/util/session.c9
-rw-r--r--tools/perf/util/session.h4
-rw-r--r--tools/perf/util/symbol-elf.c16
-rw-r--r--tools/perf/util/trace-event-parse.c2
230 files changed, 2355 insertions, 1253 deletions
diff --git a/CREDITS b/CREDITS
index 9416a9a8b95e..0640e1650483 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2808,8 +2808,7 @@ S: Ottawa, Ontario
2808S: Canada K2P 0X8 2808S: Canada K2P 0X8
2809 2809
2810N: Mikael Pettersson 2810N: Mikael Pettersson
2811E: mikpe@it.uu.se 2811E: mikpelinux@gmail.com
2812W: http://user.it.uu.se/~mikpe/linux/
2813D: Miscellaneous fixes 2812D: Miscellaneous fixes
2814 2813
2815N: Reed H. Petty 2814N: Reed H. Petty
diff --git a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
index 6d1c0988cfc7..c67b975c8906 100644
--- a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
@@ -1,11 +1,11 @@
1* Samsung Exynos specific extensions to the Synopsis Designware Mobile 1* Samsung Exynos specific extensions to the Synopsys Designware Mobile
2 Storage Host Controller 2 Storage Host Controller
3 3
4The Synopsis designware mobile storage host controller is used to interface 4The Synopsys designware mobile storage host controller is used to interface
5a SoC with storage medium such as eMMC or SD/MMC cards. This file documents 5a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
6differences between the core Synopsis dw mshc controller properties described 6differences between the core Synopsys dw mshc controller properties described
7by synopsis-dw-mshc.txt and the properties used by the Samsung Exynos specific 7by synopsys-dw-mshc.txt and the properties used by the Samsung Exynos specific
8extensions to the Synopsis Designware Mobile Storage Host Controller. 8extensions to the Synopsys Designware Mobile Storage Host Controller.
9 9
10Required Properties: 10Required Properties:
11 11
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
index 8a3d91d47b6a..c559f3f36309 100644
--- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
@@ -1,11 +1,11 @@
1* Rockchip specific extensions to the Synopsis Designware Mobile 1* Rockchip specific extensions to the Synopsys Designware Mobile
2 Storage Host Controller 2 Storage Host Controller
3 3
4The Synopsis designware mobile storage host controller is used to interface 4The Synopsys designware mobile storage host controller is used to interface
5a SoC with storage medium such as eMMC or SD/MMC cards. This file documents 5a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
6differences between the core Synopsis dw mshc controller properties described 6differences between the core Synopsys dw mshc controller properties described
7by synopsis-dw-mshc.txt and the properties used by the Rockchip specific 7by synopsys-dw-mshc.txt and the properties used by the Rockchip specific
8extensions to the Synopsis Designware Mobile Storage Host Controller. 8extensions to the Synopsys Designware Mobile Storage Host Controller.
9 9
10Required Properties: 10Required Properties:
11 11
diff --git a/Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
index cdcebea9c6f5..066a78b034ca 100644
--- a/Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
@@ -1,14 +1,14 @@
1* Synopsis Designware Mobile Storage Host Controller 1* Synopsys Designware Mobile Storage Host Controller
2 2
3The Synopsis designware mobile storage host controller is used to interface 3The Synopsys designware mobile storage host controller is used to interface
4a SoC with storage medium such as eMMC or SD/MMC cards. This file documents 4a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
5differences between the core mmc properties described by mmc.txt and the 5differences between the core mmc properties described by mmc.txt and the
6properties used by the Synopsis Designware Mobile Storage Host Controller. 6properties used by the Synopsys Designware Mobile Storage Host Controller.
7 7
8Required Properties: 8Required Properties:
9 9
10* compatible: should be 10* compatible: should be
11 - snps,dw-mshc: for controllers compliant with synopsis dw-mshc. 11 - snps,dw-mshc: for controllers compliant with synopsys dw-mshc.
12* #address-cells: should be 1. 12* #address-cells: should be 1.
13* #size-cells: should be 0. 13* #size-cells: should be 0.
14 14
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
index eabcb4b5db6e..e216af356847 100644
--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -1,4 +1,4 @@
1* Synopsis Designware PCIe interface 1* Synopsys Designware PCIe interface
2 2
3Required properties: 3Required properties:
4- compatible: should contain "snps,dw-pcie" to identify the 4- compatible: should contain "snps,dw-pcie" to identify the
diff --git a/Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt b/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt
index c5e032c85bf9..c5e032c85bf9 100644
--- a/Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt
+++ b/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 1a036cd972fb..539a23631990 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3485,6 +3485,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
3485 the unplug protocol 3485 the unplug protocol
3486 never -- do not unplug even if version check succeeds 3486 never -- do not unplug even if version check succeeds
3487 3487
3488 xen_nopvspin [X86,XEN]
3489 Disables the ticketlock slowpath using Xen PV
3490 optimizations.
3491
3488 xirc2ps_cs= [NET,PCMCIA] 3492 xirc2ps_cs= [NET,PCMCIA]
3489 Format: 3493 Format:
3490 <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]] 3494 <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index a46ddb85e83a..f911e3656209 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -296,6 +296,12 @@ Cirrus Logic CS4206/4207
296 imac27 IMac 27 Inch 296 imac27 IMac 27 Inch
297 auto BIOS setup (default) 297 auto BIOS setup (default)
298 298
299Cirrus Logic CS4208
300===================
301 mba6 MacBook Air 6,1 and 6,2
302 gpio0 Enable GPIO 0 amp
303 auto BIOS setup (default)
304
299VIA VT17xx/VT18xx/VT20xx 305VIA VT17xx/VT18xx/VT20xx
300======================== 306========================
301 auto BIOS setup (default) 307 auto BIOS setup (default)
diff --git a/MAINTAINERS b/MAINTAINERS
index e61c2e83fc2b..284969fa2896 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1812,7 +1812,8 @@ S: Supported
1812F: drivers/net/ethernet/broadcom/bnx2x/ 1812F: drivers/net/ethernet/broadcom/bnx2x/
1813 1813
1814BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE 1814BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
1815M: Christian Daudt <csd@broadcom.com> 1815M: Christian Daudt <bcm@fixthebug.org>
1816L: bcm-kernel-feedback-list@broadcom.com
1816T: git git://git.github.com/broadcom/bcm11351 1817T: git git://git.github.com/broadcom/bcm11351
1817S: Maintained 1818S: Maintained
1818F: arch/arm/mach-bcm/ 1819F: arch/arm/mach-bcm/
@@ -2639,6 +2640,18 @@ F: include/linux/device-mapper.h
2639F: include/linux/dm-*.h 2640F: include/linux/dm-*.h
2640F: include/uapi/linux/dm-*.h 2641F: include/uapi/linux/dm-*.h
2641 2642
2643DIGI NEO AND CLASSIC PCI PRODUCTS
2644M: Lidza Louina <lidza.louina@gmail.com>
2645L: driverdev-devel@linuxdriverproject.org
2646S: Maintained
2647F: drivers/staging/dgnc/
2648
2649DIGI EPCA PCI PRODUCTS
2650M: Lidza Louina <lidza.louina@gmail.com>
2651L: driverdev-devel@linuxdriverproject.org
2652S: Maintained
2653F: drivers/staging/dgap/
2654
2642DIOLAN U2C-12 I2C DRIVER 2655DIOLAN U2C-12 I2C DRIVER
2643M: Guenter Roeck <linux@roeck-us.net> 2656M: Guenter Roeck <linux@roeck-us.net>
2644L: linux-i2c@vger.kernel.org 2657L: linux-i2c@vger.kernel.org
@@ -6595,7 +6608,7 @@ S: Obsolete
6595F: drivers/net/wireless/prism54/ 6608F: drivers/net/wireless/prism54/
6596 6609
6597PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER 6610PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER
6598M: Mikael Pettersson <mikpe@it.uu.se> 6611M: Mikael Pettersson <mikpelinux@gmail.com>
6599L: linux-ide@vger.kernel.org 6612L: linux-ide@vger.kernel.org
6600S: Maintained 6613S: Maintained
6601F: drivers/ata/sata_promise.* 6614F: drivers/ata/sata_promise.*
@@ -8724,9 +8737,8 @@ F: Documentation/hid/hiddev.txt
8724F: drivers/hid/usbhid/ 8737F: drivers/hid/usbhid/
8725 8738
8726USB/IP DRIVERS 8739USB/IP DRIVERS
8727M: Matt Mooney <mfm@muteddisk.com>
8728L: linux-usb@vger.kernel.org 8740L: linux-usb@vger.kernel.org
8729S: Maintained 8741S: Orphan
8730F: drivers/staging/usbip/ 8742F: drivers/staging/usbip/
8731 8743
8732USB ISP116X DRIVER 8744USB ISP116X DRIVER
diff --git a/Makefile b/Makefile
index 8d0668f473ba..2ae108d4f2af 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 12 2PATCHLEVEL = 12
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc3
5NAME = One Giant Leap for Frogkind 5NAME = One Giant Leap for Frogkind
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 1feb169274fe..af2cc6eabcc7 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -286,9 +286,6 @@ config HAVE_PERF_USER_STACK_DUMP
286config HAVE_ARCH_JUMP_LABEL 286config HAVE_ARCH_JUMP_LABEL
287 bool 287 bool
288 288
289config HAVE_ARCH_MUTEX_CPU_RELAX
290 bool
291
292config HAVE_RCU_TABLE_FREE 289config HAVE_RCU_TABLE_FREE
293 bool 290 bool
294 291
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 3f7714d8d2d2..1ad6fb6c094d 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -2217,8 +2217,7 @@ config NEON
2217 2217
2218config KERNEL_MODE_NEON 2218config KERNEL_MODE_NEON
2219 bool "Support for NEON in kernel mode" 2219 bool "Support for NEON in kernel mode"
2220 default n 2220 depends on NEON && AEABI
2221 depends on NEON
2222 help 2221 help
2223 Say Y to include support for NEON in kernel mode. 2222 Say Y to include support for NEON in kernel mode.
2224 2223
diff --git a/arch/arm/crypto/aes-armv4.S b/arch/arm/crypto/aes-armv4.S
index 19d6cd6f29f9..3a14ea8fe97e 100644
--- a/arch/arm/crypto/aes-armv4.S
+++ b/arch/arm/crypto/aes-armv4.S
@@ -148,7 +148,7 @@ AES_Te:
148@ const AES_KEY *key) { 148@ const AES_KEY *key) {
149.align 5 149.align 5
150ENTRY(AES_encrypt) 150ENTRY(AES_encrypt)
151 sub r3,pc,#8 @ AES_encrypt 151 adr r3,AES_encrypt
152 stmdb sp!,{r1,r4-r12,lr} 152 stmdb sp!,{r1,r4-r12,lr}
153 mov r12,r0 @ inp 153 mov r12,r0 @ inp
154 mov r11,r2 154 mov r11,r2
@@ -381,7 +381,7 @@ _armv4_AES_encrypt:
381.align 5 381.align 5
382ENTRY(private_AES_set_encrypt_key) 382ENTRY(private_AES_set_encrypt_key)
383_armv4_AES_set_encrypt_key: 383_armv4_AES_set_encrypt_key:
384 sub r3,pc,#8 @ AES_set_encrypt_key 384 adr r3,_armv4_AES_set_encrypt_key
385 teq r0,#0 385 teq r0,#0
386 moveq r0,#-1 386 moveq r0,#-1
387 beq .Labrt 387 beq .Labrt
@@ -843,7 +843,7 @@ AES_Td:
843@ const AES_KEY *key) { 843@ const AES_KEY *key) {
844.align 5 844.align 5
845ENTRY(AES_decrypt) 845ENTRY(AES_decrypt)
846 sub r3,pc,#8 @ AES_decrypt 846 adr r3,AES_decrypt
847 stmdb sp!,{r1,r4-r12,lr} 847 stmdb sp!,{r1,r4-r12,lr}
848 mov r12,r0 @ inp 848 mov r12,r0 @ inp
849 mov r11,r2 849 mov r11,r2
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 7e1f76027f66..72abdc541f38 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -19,6 +19,13 @@
19#include <asm/unified.h> 19#include <asm/unified.h>
20#include <asm/compiler.h> 20#include <asm/compiler.h>
21 21
22#if __LINUX_ARM_ARCH__ < 6
23#include <asm-generic/uaccess-unaligned.h>
24#else
25#define __get_user_unaligned __get_user
26#define __put_user_unaligned __put_user
27#endif
28
22#define VERIFY_READ 0 29#define VERIFY_READ 0
23#define VERIFY_WRITE 1 30#define VERIFY_WRITE 1
24 31
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 74ad15d1a065..bc6bd9683ba4 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -442,10 +442,10 @@ local_restart:
442 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 442 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
443 443
444 add r1, sp, #S_OFF 444 add r1, sp, #S_OFF
445 cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) 4452: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
446 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back 446 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
447 bcs arm_syscall 447 bcs arm_syscall
4482: mov why, #0 @ no longer a real syscall 448 mov why, #0 @ no longer a real syscall
449 b sys_ni_syscall @ not private func 449 b sys_ni_syscall @ not private func
450 450
451#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) 451#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index de23a9beed13..39f89fbd5111 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -329,10 +329,10 @@
329#ifdef CONFIG_CONTEXT_TRACKING 329#ifdef CONFIG_CONTEXT_TRACKING
330 .if \save 330 .if \save
331 stmdb sp!, {r0-r3, ip, lr} 331 stmdb sp!, {r0-r3, ip, lr}
332 bl user_exit 332 bl context_tracking_user_exit
333 ldmia sp!, {r0-r3, ip, lr} 333 ldmia sp!, {r0-r3, ip, lr}
334 .else 334 .else
335 bl user_exit 335 bl context_tracking_user_exit
336 .endif 336 .endif
337#endif 337#endif
338 .endm 338 .endm
@@ -341,10 +341,10 @@
341#ifdef CONFIG_CONTEXT_TRACKING 341#ifdef CONFIG_CONTEXT_TRACKING
342 .if \save 342 .if \save
343 stmdb sp!, {r0-r3, ip, lr} 343 stmdb sp!, {r0-r3, ip, lr}
344 bl user_enter 344 bl context_tracking_user_enter
345 ldmia sp!, {r0-r3, ip, lr} 345 ldmia sp!, {r0-r3, ip, lr}
346 .else 346 .else
347 bl user_enter 347 bl context_tracking_user_enter
348 .endif 348 .endif
349#endif 349#endif
350 .endm 350 .endm
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 51680d15ca8e..d445d060e346 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -187,7 +187,7 @@
187 187
188/* 188/*
189 * MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other 189 * MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other
190 * pre-MIPS32/MIPS53 processors have CLO, CLZ. The IDT RC64574 is 64-bit and 190 * pre-MIPS32/MIPS64 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
191 * has CLO and CLZ but not DCLO nor DCLZ. For 64-bit kernels 191 * has CLO and CLZ but not DCLO nor DCLZ. For 64-bit kernels
192 * cpu_has_clo_clz also indicates the availability of DCLO and DCLZ. 192 * cpu_has_clo_clz also indicates the availability of DCLO and DCLZ.
193 */ 193 */
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index f25a7e9f8cbc..5f8b95512580 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -308,12 +308,10 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev,
308{ 308{
309 int i; 309 int i;
310 310
311 /* Make sure that gcc doesn't leave the empty loop body. */ 311 if (cpu_needs_post_dma_flush(dev))
312 for (i = 0; i < nelems; i++, sg++) { 312 for (i = 0; i < nelems; i++, sg++)
313 if (cpu_needs_post_dma_flush(dev))
314 __dma_sync(sg_page(sg), sg->offset, sg->length, 313 __dma_sync(sg_page(sg), sg->offset, sg->length,
315 direction); 314 direction);
316 }
317} 315}
318 316
319static void mips_dma_sync_sg_for_device(struct device *dev, 317static void mips_dma_sync_sg_for_device(struct device *dev,
@@ -321,12 +319,10 @@ static void mips_dma_sync_sg_for_device(struct device *dev,
321{ 319{
322 int i; 320 int i;
323 321
324 /* Make sure that gcc doesn't leave the empty loop body. */ 322 if (!plat_device_is_coherent(dev))
325 for (i = 0; i < nelems; i++, sg++) { 323 for (i = 0; i < nelems; i++, sg++)
326 if (!plat_device_is_coherent(dev))
327 __dma_sync(sg_page(sg), sg->offset, sg->length, 324 __dma_sync(sg_page(sg), sg->offset, sg->length,
328 direction); 325 direction);
329 }
330} 326}
331 327
332int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 328int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
diff --git a/arch/openrisc/include/asm/prom.h b/arch/openrisc/include/asm/prom.h
index eb59bfe23e85..93c9980e1b6b 100644
--- a/arch/openrisc/include/asm/prom.h
+++ b/arch/openrisc/include/asm/prom.h
@@ -14,53 +14,9 @@
14 * the Free Software Foundation; either version 2 of the License, or 14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version. 15 * (at your option) any later version.
16 */ 16 */
17
18#include <linux/of.h> /* linux/of.h gets to determine #include ordering */
19
20#ifndef _ASM_OPENRISC_PROM_H 17#ifndef _ASM_OPENRISC_PROM_H
21#define _ASM_OPENRISC_PROM_H 18#define _ASM_OPENRISC_PROM_H
22#ifdef __KERNEL__
23#ifndef __ASSEMBLY__
24 19
25#include <linux/types.h>
26#include <asm/irq.h>
27#include <linux/irqdomain.h>
28#include <linux/atomic.h>
29#include <linux/of_irq.h>
30#include <linux/of_fdt.h>
31#include <linux/of_address.h>
32#include <linux/proc_fs.h>
33#include <linux/platform_device.h>
34#define HAVE_ARCH_DEVTREE_FIXUPS 20#define HAVE_ARCH_DEVTREE_FIXUPS
35 21
36/* Other Prototypes */
37extern int early_uartlite_console(void);
38
39/* Parse the ibm,dma-window property of an OF node into the busno, phys and
40 * size parameters.
41 */
42void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
43 unsigned long *busno, unsigned long *phys, unsigned long *size);
44
45extern void kdump_move_device_tree(void);
46
47/* Get the MAC address */
48extern const void *of_get_mac_address(struct device_node *np);
49
50/**
51 * of_irq_map_pci - Resolve the interrupt for a PCI device
52 * @pdev: the device whose interrupt is to be resolved
53 * @out_irq: structure of_irq filled by this function
54 *
55 * This function resolves the PCI interrupt for a given PCI device. If a
56 * device-node exists for a given pci_dev, it will use normal OF tree
57 * walking. If not, it will implement standard swizzling and walk up the
58 * PCI tree until an device-node is found, at which point it will finish
59 * resolving using the OF tree walking.
60 */
61struct pci_dev;
62extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
63
64#endif /* __ASSEMBLY__ */
65#endif /* __KERNEL__ */
66#endif /* _ASM_OPENRISC_PROM_H */ 22#endif /* _ASM_OPENRISC_PROM_H */
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 6a15c968d214..15ca2255f438 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -74,7 +74,7 @@ src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c
74src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c 74src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c
75src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c 75src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c
76 76
77src-plat-y := of.c 77src-plat-y := of.c epapr.c
78src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \ 78src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \
79 treeboot-walnut.c cuboot-acadia.c \ 79 treeboot-walnut.c cuboot-acadia.c \
80 cuboot-kilauea.c simpleboot.c \ 80 cuboot-kilauea.c simpleboot.c \
@@ -97,7 +97,7 @@ src-plat-$(CONFIG_EMBEDDED6xx) += cuboot-pq2.c cuboot-mpc7448hpc2.c \
97 prpmc2800.c 97 prpmc2800.c
98src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c 98src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c
99src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c 99src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c
100src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c 100src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
101 101
102src-wlib := $(sort $(src-wlib-y)) 102src-wlib := $(sort $(src-wlib-y))
103src-plat := $(sort $(src-plat-y)) 103src-plat := $(sort $(src-plat-y))
diff --git a/arch/powerpc/boot/epapr-wrapper.c b/arch/powerpc/boot/epapr-wrapper.c
new file mode 100644
index 000000000000..c10191006673
--- /dev/null
+++ b/arch/powerpc/boot/epapr-wrapper.c
@@ -0,0 +1,9 @@
1extern void epapr_platform_init(unsigned long r3, unsigned long r4,
2 unsigned long r5, unsigned long r6,
3 unsigned long r7);
4
5void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
6 unsigned long r6, unsigned long r7)
7{
8 epapr_platform_init(r3, r4, r5, r6, r7);
9}
diff --git a/arch/powerpc/boot/epapr.c b/arch/powerpc/boot/epapr.c
index 06c1961bd124..02e91aa2194a 100644
--- a/arch/powerpc/boot/epapr.c
+++ b/arch/powerpc/boot/epapr.c
@@ -48,8 +48,8 @@ static void platform_fixups(void)
48 fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size); 48 fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size);
49} 49}
50 50
51void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, 51void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
52 unsigned long r6, unsigned long r7) 52 unsigned long r6, unsigned long r7)
53{ 53{
54 epapr_magic = r6; 54 epapr_magic = r6;
55 ima_size = r7; 55 ima_size = r7;
diff --git a/arch/powerpc/boot/of.c b/arch/powerpc/boot/of.c
index 61d9899aa0d0..62e2f43ec1df 100644
--- a/arch/powerpc/boot/of.c
+++ b/arch/powerpc/boot/of.c
@@ -26,6 +26,9 @@
26 26
27static unsigned long claim_base; 27static unsigned long claim_base;
28 28
29void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
30 unsigned long r6, unsigned long r7);
31
29static void *of_try_claim(unsigned long size) 32static void *of_try_claim(unsigned long size)
30{ 33{
31 unsigned long addr = 0; 34 unsigned long addr = 0;
@@ -61,7 +64,7 @@ static void of_image_hdr(const void *hdr)
61 } 64 }
62} 65}
63 66
64void platform_init(unsigned long a1, unsigned long a2, void *promptr) 67static void of_platform_init(unsigned long a1, unsigned long a2, void *promptr)
65{ 68{
66 platform_ops.image_hdr = of_image_hdr; 69 platform_ops.image_hdr = of_image_hdr;
67 platform_ops.malloc = of_try_claim; 70 platform_ops.malloc = of_try_claim;
@@ -81,3 +84,14 @@ void platform_init(unsigned long a1, unsigned long a2, void *promptr)
81 loader_info.initrd_size = a2; 84 loader_info.initrd_size = a2;
82 } 85 }
83} 86}
87
88void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
89 unsigned long r6, unsigned long r7)
90{
91 /* Detect OF vs. ePAPR boot */
92 if (r5)
93 of_platform_init(r3, r4, (void *)r5);
94 else
95 epapr_platform_init(r3, r4, r5, r6, r7);
96}
97
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 6761c746048d..cd7af841ba05 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -148,18 +148,18 @@ make_space=y
148 148
149case "$platform" in 149case "$platform" in
150pseries) 150pseries)
151 platformo=$object/of.o 151 platformo="$object/of.o $object/epapr.o"
152 link_address='0x4000000' 152 link_address='0x4000000'
153 ;; 153 ;;
154maple) 154maple)
155 platformo=$object/of.o 155 platformo="$object/of.o $object/epapr.o"
156 link_address='0x400000' 156 link_address='0x400000'
157 ;; 157 ;;
158pmac|chrp) 158pmac|chrp)
159 platformo=$object/of.o 159 platformo="$object/of.o $object/epapr.o"
160 ;; 160 ;;
161coff) 161coff)
162 platformo="$object/crt0.o $object/of.o" 162 platformo="$object/crt0.o $object/of.o $object/epapr.o"
163 lds=$object/zImage.coff.lds 163 lds=$object/zImage.coff.lds
164 link_address='0x500000' 164 link_address='0x500000'
165 pie= 165 pie=
@@ -253,6 +253,7 @@ treeboot-iss4xx-mpic)
253 platformo="$object/treeboot-iss4xx.o" 253 platformo="$object/treeboot-iss4xx.o"
254 ;; 254 ;;
255epapr) 255epapr)
256 platformo="$object/epapr.o $object/epapr-wrapper.o"
256 link_address='0x20000000' 257 link_address='0x20000000'
257 pie=-pie 258 pie=-pie
258 ;; 259 ;;
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 0e40843a1c6e..41f13cec8a8f 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -69,9 +69,9 @@ extern struct thread_info *softirq_ctx[NR_CPUS];
69 69
70extern void irq_ctx_init(void); 70extern void irq_ctx_init(void);
71extern void call_do_softirq(struct thread_info *tp); 71extern void call_do_softirq(struct thread_info *tp);
72extern int call_handle_irq(int irq, void *p1, 72extern void call_do_irq(struct pt_regs *regs, struct thread_info *tp);
73 struct thread_info *tp, void *func);
74extern void do_IRQ(struct pt_regs *regs); 73extern void do_IRQ(struct pt_regs *regs);
74extern void __do_irq(struct pt_regs *regs);
75 75
76int irq_choose_cpu(const struct cpumask *mask); 76int irq_choose_cpu(const struct cpumask *mask);
77 77
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index e378cccfca55..ce4de5aed7b5 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -149,8 +149,6 @@ typedef struct {
149 149
150struct thread_struct { 150struct thread_struct {
151 unsigned long ksp; /* Kernel stack pointer */ 151 unsigned long ksp; /* Kernel stack pointer */
152 unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
153
154#ifdef CONFIG_PPC64 152#ifdef CONFIG_PPC64
155 unsigned long ksp_vsid; 153 unsigned long ksp_vsid;
156#endif 154#endif
@@ -162,6 +160,7 @@ struct thread_struct {
162#endif 160#endif
163#ifdef CONFIG_PPC32 161#ifdef CONFIG_PPC32
164 void *pgdir; /* root of page-table tree */ 162 void *pgdir; /* root of page-table tree */
163 unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
165#endif 164#endif
166#ifdef CONFIG_PPC_ADV_DEBUG_REGS 165#ifdef CONFIG_PPC_ADV_DEBUG_REGS
167 /* 166 /*
@@ -321,7 +320,6 @@ struct thread_struct {
321#else 320#else
322#define INIT_THREAD { \ 321#define INIT_THREAD { \
323 .ksp = INIT_SP, \ 322 .ksp = INIT_SP, \
324 .ksp_limit = INIT_SP_LIMIT, \
325 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ 323 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
326 .fs = KERNEL_DS, \ 324 .fs = KERNEL_DS, \
327 .fpr = {{0}}, \ 325 .fpr = {{0}}, \
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index d8958be5f31a..502c7a4e73f7 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -80,10 +80,11 @@ int main(void)
80 DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr)); 80 DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr));
81#else 81#else
82 DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); 82 DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
83 DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
84 DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
83#endif /* CONFIG_PPC64 */ 85#endif /* CONFIG_PPC64 */
84 86
85 DEFINE(KSP, offsetof(struct thread_struct, ksp)); 87 DEFINE(KSP, offsetof(struct thread_struct, ksp));
86 DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
87 DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); 88 DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
88#ifdef CONFIG_BOOKE 89#ifdef CONFIG_BOOKE
89 DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0])); 90 DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index c69440cef7af..57d286a78f86 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -441,50 +441,6 @@ void migrate_irqs(void)
441} 441}
442#endif 442#endif
443 443
444static inline void handle_one_irq(unsigned int irq)
445{
446 struct thread_info *curtp, *irqtp;
447 unsigned long saved_sp_limit;
448 struct irq_desc *desc;
449
450 desc = irq_to_desc(irq);
451 if (!desc)
452 return;
453
454 /* Switch to the irq stack to handle this */
455 curtp = current_thread_info();
456 irqtp = hardirq_ctx[smp_processor_id()];
457
458 if (curtp == irqtp) {
459 /* We're already on the irq stack, just handle it */
460 desc->handle_irq(irq, desc);
461 return;
462 }
463
464 saved_sp_limit = current->thread.ksp_limit;
465
466 irqtp->task = curtp->task;
467 irqtp->flags = 0;
468
469 /* Copy the softirq bits in preempt_count so that the
470 * softirq checks work in the hardirq context. */
471 irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
472 (curtp->preempt_count & SOFTIRQ_MASK);
473
474 current->thread.ksp_limit = (unsigned long)irqtp +
475 _ALIGN_UP(sizeof(struct thread_info), 16);
476
477 call_handle_irq(irq, desc, irqtp, desc->handle_irq);
478 current->thread.ksp_limit = saved_sp_limit;
479 irqtp->task = NULL;
480
481 /* Set any flag that may have been set on the
482 * alternate stack
483 */
484 if (irqtp->flags)
485 set_bits(irqtp->flags, &curtp->flags);
486}
487
488static inline void check_stack_overflow(void) 444static inline void check_stack_overflow(void)
489{ 445{
490#ifdef CONFIG_DEBUG_STACKOVERFLOW 446#ifdef CONFIG_DEBUG_STACKOVERFLOW
@@ -501,9 +457,9 @@ static inline void check_stack_overflow(void)
501#endif 457#endif
502} 458}
503 459
504void do_IRQ(struct pt_regs *regs) 460void __do_irq(struct pt_regs *regs)
505{ 461{
506 struct pt_regs *old_regs = set_irq_regs(regs); 462 struct irq_desc *desc;
507 unsigned int irq; 463 unsigned int irq;
508 464
509 irq_enter(); 465 irq_enter();
@@ -519,18 +475,56 @@ void do_IRQ(struct pt_regs *regs)
519 */ 475 */
520 irq = ppc_md.get_irq(); 476 irq = ppc_md.get_irq();
521 477
522 /* We can hard enable interrupts now */ 478 /* We can hard enable interrupts now to allow perf interrupts */
523 may_hard_irq_enable(); 479 may_hard_irq_enable();
524 480
525 /* And finally process it */ 481 /* And finally process it */
526 if (irq != NO_IRQ) 482 if (unlikely(irq == NO_IRQ))
527 handle_one_irq(irq);
528 else
529 __get_cpu_var(irq_stat).spurious_irqs++; 483 __get_cpu_var(irq_stat).spurious_irqs++;
484 else {
485 desc = irq_to_desc(irq);
486 if (likely(desc))
487 desc->handle_irq(irq, desc);
488 }
530 489
531 trace_irq_exit(regs); 490 trace_irq_exit(regs);
532 491
533 irq_exit(); 492 irq_exit();
493}
494
495void do_IRQ(struct pt_regs *regs)
496{
497 struct pt_regs *old_regs = set_irq_regs(regs);
498 struct thread_info *curtp, *irqtp;
499
500 /* Switch to the irq stack to handle this */
501 curtp = current_thread_info();
502 irqtp = hardirq_ctx[raw_smp_processor_id()];
503
504 /* Already there ? */
505 if (unlikely(curtp == irqtp)) {
506 __do_irq(regs);
507 set_irq_regs(old_regs);
508 return;
509 }
510
511 /* Prepare the thread_info in the irq stack */
512 irqtp->task = curtp->task;
513 irqtp->flags = 0;
514
515 /* Copy the preempt_count so that the [soft]irq checks work. */
516 irqtp->preempt_count = curtp->preempt_count;
517
518 /* Switch stack and call */
519 call_do_irq(regs, irqtp);
520
521 /* Restore stack limit */
522 irqtp->task = NULL;
523
524 /* Copy back updates to the thread_info */
525 if (irqtp->flags)
526 set_bits(irqtp->flags, &curtp->flags);
527
534 set_irq_regs(old_regs); 528 set_irq_regs(old_regs);
535} 529}
536 530
@@ -592,28 +586,22 @@ void irq_ctx_init(void)
592 memset((void *)softirq_ctx[i], 0, THREAD_SIZE); 586 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
593 tp = softirq_ctx[i]; 587 tp = softirq_ctx[i];
594 tp->cpu = i; 588 tp->cpu = i;
595 tp->preempt_count = 0;
596 589
597 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); 590 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
598 tp = hardirq_ctx[i]; 591 tp = hardirq_ctx[i];
599 tp->cpu = i; 592 tp->cpu = i;
600 tp->preempt_count = HARDIRQ_OFFSET;
601 } 593 }
602} 594}
603 595
604static inline void do_softirq_onstack(void) 596static inline void do_softirq_onstack(void)
605{ 597{
606 struct thread_info *curtp, *irqtp; 598 struct thread_info *curtp, *irqtp;
607 unsigned long saved_sp_limit = current->thread.ksp_limit;
608 599
609 curtp = current_thread_info(); 600 curtp = current_thread_info();
610 irqtp = softirq_ctx[smp_processor_id()]; 601 irqtp = softirq_ctx[smp_processor_id()];
611 irqtp->task = curtp->task; 602 irqtp->task = curtp->task;
612 irqtp->flags = 0; 603 irqtp->flags = 0;
613 current->thread.ksp_limit = (unsigned long)irqtp +
614 _ALIGN_UP(sizeof(struct thread_info), 16);
615 call_do_softirq(irqtp); 604 call_do_softirq(irqtp);
616 current->thread.ksp_limit = saved_sp_limit;
617 irqtp->task = NULL; 605 irqtp->task = NULL;
618 606
619 /* Set any flag that may have been set on the 607 /* Set any flag that may have been set on the
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 777d999f563b..2b0ad9845363 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -36,26 +36,41 @@
36 36
37 .text 37 .text
38 38
39/*
40 * We store the saved ksp_limit in the unused part
41 * of the STACK_FRAME_OVERHEAD
42 */
39_GLOBAL(call_do_softirq) 43_GLOBAL(call_do_softirq)
40 mflr r0 44 mflr r0
41 stw r0,4(r1) 45 stw r0,4(r1)
46 lwz r10,THREAD+KSP_LIMIT(r2)
47 addi r11,r3,THREAD_INFO_GAP
42 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) 48 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
43 mr r1,r3 49 mr r1,r3
50 stw r10,8(r1)
51 stw r11,THREAD+KSP_LIMIT(r2)
44 bl __do_softirq 52 bl __do_softirq
53 lwz r10,8(r1)
45 lwz r1,0(r1) 54 lwz r1,0(r1)
46 lwz r0,4(r1) 55 lwz r0,4(r1)
56 stw r10,THREAD+KSP_LIMIT(r2)
47 mtlr r0 57 mtlr r0
48 blr 58 blr
49 59
50_GLOBAL(call_handle_irq) 60_GLOBAL(call_do_irq)
51 mflr r0 61 mflr r0
52 stw r0,4(r1) 62 stw r0,4(r1)
53 mtctr r6 63 lwz r10,THREAD+KSP_LIMIT(r2)
54 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5) 64 addi r11,r3,THREAD_INFO_GAP
55 mr r1,r5 65 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
56 bctrl 66 mr r1,r4
67 stw r10,8(r1)
68 stw r11,THREAD+KSP_LIMIT(r2)
69 bl __do_irq
70 lwz r10,8(r1)
57 lwz r1,0(r1) 71 lwz r1,0(r1)
58 lwz r0,4(r1) 72 lwz r0,4(r1)
73 stw r10,THREAD+KSP_LIMIT(r2)
59 mtlr r0 74 mtlr r0
60 blr 75 blr
61 76
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 971d7e78aff2..e59caf874d05 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -40,14 +40,12 @@ _GLOBAL(call_do_softirq)
40 mtlr r0 40 mtlr r0
41 blr 41 blr
42 42
43_GLOBAL(call_handle_irq) 43_GLOBAL(call_do_irq)
44 ld r8,0(r6)
45 mflr r0 44 mflr r0
46 std r0,16(r1) 45 std r0,16(r1)
47 mtctr r8 46 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
48 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5) 47 mr r1,r4
49 mr r1,r5 48 bl .__do_irq
50 bctrl
51 ld r1,0(r1) 49 ld r1,0(r1)
52 ld r0,16(r1) 50 ld r0,16(r1)
53 mtlr r0 51 mtlr r0
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 6f428da53e20..96d2fdf3aa9e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1000,9 +1000,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
1000 kregs = (struct pt_regs *) sp; 1000 kregs = (struct pt_regs *) sp;
1001 sp -= STACK_FRAME_OVERHEAD; 1001 sp -= STACK_FRAME_OVERHEAD;
1002 p->thread.ksp = sp; 1002 p->thread.ksp = sp;
1003#ifdef CONFIG_PPC32
1003 p->thread.ksp_limit = (unsigned long)task_stack_page(p) + 1004 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
1004 _ALIGN_UP(sizeof(struct thread_info), 16); 1005 _ALIGN_UP(sizeof(struct thread_info), 16);
1005 1006#endif
1006#ifdef CONFIG_HAVE_HW_BREAKPOINT 1007#ifdef CONFIG_HAVE_HW_BREAKPOINT
1007 p->thread.ptrace_bps[0] = NULL; 1008 p->thread.ptrace_bps[0] = NULL;
1008#endif 1009#endif
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 12e656ffe60e..5fe2842e8bab 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -196,6 +196,8 @@ static int __initdata mem_reserve_cnt;
196 196
197static cell_t __initdata regbuf[1024]; 197static cell_t __initdata regbuf[1024];
198 198
199static bool rtas_has_query_cpu_stopped;
200
199 201
200/* 202/*
201 * Error results ... some OF calls will return "-1" on error, some 203 * Error results ... some OF calls will return "-1" on error, some
@@ -1574,6 +1576,11 @@ static void __init prom_instantiate_rtas(void)
1574 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry", 1576 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1575 &val, sizeof(val)); 1577 &val, sizeof(val));
1576 1578
1579 /* Check if it supports "query-cpu-stopped-state" */
1580 if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1581 &val, sizeof(val)) != PROM_ERROR)
1582 rtas_has_query_cpu_stopped = true;
1583
1577#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__) 1584#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
1578 /* PowerVN takeover hack */ 1585 /* PowerVN takeover hack */
1579 prom_rtas_data = base; 1586 prom_rtas_data = base;
@@ -1815,6 +1822,18 @@ static void __init prom_hold_cpus(void)
1815 = (void *) LOW_ADDR(__secondary_hold_acknowledge); 1822 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
1816 unsigned long secondary_hold = LOW_ADDR(__secondary_hold); 1823 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1817 1824
1825 /*
1826 * On pseries, if RTAS supports "query-cpu-stopped-state",
1827 * we skip this stage, the CPUs will be started by the
1828 * kernel using RTAS.
1829 */
1830 if ((of_platform == PLATFORM_PSERIES ||
1831 of_platform == PLATFORM_PSERIES_LPAR) &&
1832 rtas_has_query_cpu_stopped) {
1833 prom_printf("prom_hold_cpus: skipped\n");
1834 return;
1835 }
1836
1818 prom_debug("prom_hold_cpus: start...\n"); 1837 prom_debug("prom_hold_cpus: start...\n");
1819 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop); 1838 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
1820 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop); 1839 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
@@ -3011,6 +3030,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3011 * On non-powermacs, put all CPUs in spin-loops. 3030 * On non-powermacs, put all CPUs in spin-loops.
3012 * 3031 *
3013 * PowerMacs use a different mechanism to spin CPUs 3032 * PowerMacs use a different mechanism to spin CPUs
3033 *
3034 * (This must be done after instanciating RTAS)
3014 */ 3035 */
3015 if (of_platform != PLATFORM_POWERMAC && 3036 if (of_platform != PLATFORM_POWERMAC &&
3016 of_platform != PLATFORM_OPAL) 3037 of_platform != PLATFORM_OPAL)
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index a7ee978fb860..b1faa1593c90 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1505,6 +1505,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1505 */ 1505 */
1506 if ((ra == 1) && !(regs->msr & MSR_PR) \ 1506 if ((ra == 1) && !(regs->msr & MSR_PR) \
1507 && (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) { 1507 && (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
1508#ifdef CONFIG_PPC32
1508 /* 1509 /*
1509 * Check if we will touch kernel sack overflow 1510 * Check if we will touch kernel sack overflow
1510 */ 1511 */
@@ -1513,7 +1514,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1513 err = -EINVAL; 1514 err = -EINVAL;
1514 break; 1515 break;
1515 } 1516 }
1516 1517#endif /* CONFIG_PPC32 */
1517 /* 1518 /*
1518 * Check if we already set since that means we'll 1519 * Check if we already set since that means we'll
1519 * lose the previous value. 1520 * lose the previous value.
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 1c1771a40250..24f58cb0a543 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -233,18 +233,24 @@ static void __init smp_init_pseries(void)
233 233
234 alloc_bootmem_cpumask_var(&of_spin_mask); 234 alloc_bootmem_cpumask_var(&of_spin_mask);
235 235
236 /* Mark threads which are still spinning in hold loops. */ 236 /*
237 if (cpu_has_feature(CPU_FTR_SMT)) { 237 * Mark threads which are still spinning in hold loops
238 for_each_present_cpu(i) { 238 *
239 if (cpu_thread_in_core(i) == 0) 239 * We know prom_init will not have started them if RTAS supports
240 cpumask_set_cpu(i, of_spin_mask); 240 * query-cpu-stopped-state.
241 } 241 */
242 } else { 242 if (rtas_token("query-cpu-stopped-state") == RTAS_UNKNOWN_SERVICE) {
243 cpumask_copy(of_spin_mask, cpu_present_mask); 243 if (cpu_has_feature(CPU_FTR_SMT)) {
244 for_each_present_cpu(i) {
245 if (cpu_thread_in_core(i) == 0)
246 cpumask_set_cpu(i, of_spin_mask);
247 }
248 } else
249 cpumask_copy(of_spin_mask, cpu_present_mask);
250
251 cpumask_clear_cpu(boot_cpuid, of_spin_mask);
244 } 252 }
245 253
246 cpumask_clear_cpu(boot_cpuid, of_spin_mask);
247
248 /* Non-lpar has additional take/give timebase */ 254 /* Non-lpar has additional take/give timebase */
249 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { 255 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
250 smp_ops->give_timebase = rtas_give_timebase; 256 smp_ops->give_timebase = rtas_give_timebase;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index dcc6ac2d8026..7143793859fa 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -93,6 +93,7 @@ config S390
93 select ARCH_INLINE_WRITE_UNLOCK_IRQ 93 select ARCH_INLINE_WRITE_UNLOCK_IRQ
94 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE 94 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
95 select ARCH_SAVE_PAGE_KEYS if HIBERNATION 95 select ARCH_SAVE_PAGE_KEYS if HIBERNATION
96 select ARCH_USE_CMPXCHG_LOCKREF
96 select ARCH_WANT_IPC_PARSE_VERSION 97 select ARCH_WANT_IPC_PARSE_VERSION
97 select BUILDTIME_EXTABLE_SORT 98 select BUILDTIME_EXTABLE_SORT
98 select CLONE_BACKWARDS2 99 select CLONE_BACKWARDS2
@@ -102,7 +103,6 @@ config S390
102 select GENERIC_TIME_VSYSCALL_OLD 103 select GENERIC_TIME_VSYSCALL_OLD
103 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 104 select HAVE_ALIGNED_STRUCT_PAGE if SLUB
104 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 105 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
105 select HAVE_ARCH_MUTEX_CPU_RELAX
106 select HAVE_ARCH_SECCOMP_FILTER 106 select HAVE_ARCH_SECCOMP_FILTER
107 select HAVE_ARCH_TRACEHOOK 107 select HAVE_ARCH_TRACEHOOK
108 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT 108 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT
diff --git a/arch/s390/include/asm/mutex.h b/arch/s390/include/asm/mutex.h
index 688271f5f2e4..458c1f7fbc18 100644
--- a/arch/s390/include/asm/mutex.h
+++ b/arch/s390/include/asm/mutex.h
@@ -7,5 +7,3 @@
7 */ 7 */
8 8
9#include <asm-generic/mutex-dec.h> 9#include <asm-generic/mutex-dec.h>
10
11#define arch_mutex_cpu_relax() barrier()
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 0eb37505cab1..ca7821f07260 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -198,6 +198,8 @@ static inline void cpu_relax(void)
198 barrier(); 198 barrier();
199} 199}
200 200
201#define arch_mutex_cpu_relax() barrier()
202
201static inline void psw_set_key(unsigned int key) 203static inline void psw_set_key(unsigned int key)
202{ 204{
203 asm volatile("spka 0(%0)" : : "d" (key)); 205 asm volatile("spka 0(%0)" : : "d" (key));
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 701fe8c59e1f..83e5d216105e 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -44,6 +44,11 @@ extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
44extern int arch_spin_trylock_retry(arch_spinlock_t *); 44extern int arch_spin_trylock_retry(arch_spinlock_t *);
45extern void arch_spin_relax(arch_spinlock_t *lock); 45extern void arch_spin_relax(arch_spinlock_t *lock);
46 46
47static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
48{
49 return lock.owner_cpu == 0;
50}
51
47static inline void arch_spin_lock(arch_spinlock_t *lp) 52static inline void arch_spin_lock(arch_spinlock_t *lp)
48{ 53{
49 int old; 54 int old;
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 6aef9fbc09b7..b913915e8e63 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -79,30 +79,38 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
79 return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY; 79 return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
80} 80}
81 81
82static inline unsigned long mfn_to_pfn(unsigned long mfn) 82static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
83{ 83{
84 unsigned long pfn; 84 unsigned long pfn;
85 int ret = 0; 85 int ret;
86 86
87 if (xen_feature(XENFEAT_auto_translated_physmap)) 87 if (xen_feature(XENFEAT_auto_translated_physmap))
88 return mfn; 88 return mfn;
89 89
90 if (unlikely(mfn >= machine_to_phys_nr)) { 90 if (unlikely(mfn >= machine_to_phys_nr))
91 pfn = ~0; 91 return ~0;
92 goto try_override; 92
93 }
94 pfn = 0;
95 /* 93 /*
96 * The array access can fail (e.g., device space beyond end of RAM). 94 * The array access can fail (e.g., device space beyond end of RAM).
97 * In such cases it doesn't matter what we return (we return garbage), 95 * In such cases it doesn't matter what we return (we return garbage),
98 * but we must handle the fault without crashing! 96 * but we must handle the fault without crashing!
99 */ 97 */
100 ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); 98 ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
101try_override:
102 /* ret might be < 0 if there are no entries in the m2p for mfn */
103 if (ret < 0) 99 if (ret < 0)
104 pfn = ~0; 100 return ~0;
105 else if (get_phys_to_machine(pfn) != mfn) 101
102 return pfn;
103}
104
105static inline unsigned long mfn_to_pfn(unsigned long mfn)
106{
107 unsigned long pfn;
108
109 if (xen_feature(XENFEAT_auto_translated_physmap))
110 return mfn;
111
112 pfn = mfn_to_pfn_no_overrides(mfn);
113 if (get_phys_to_machine(pfn) != mfn) {
106 /* 114 /*
107 * If this appears to be a foreign mfn (because the pfn 115 * If this appears to be a foreign mfn (because the pfn
108 * doesn't map back to the mfn), then check the local override 116 * doesn't map back to the mfn), then check the local override
@@ -111,6 +119,7 @@ try_override:
111 * m2p_find_override_pfn returns ~0 if it doesn't find anything. 119 * m2p_find_override_pfn returns ~0 if it doesn't find anything.
112 */ 120 */
113 pfn = m2p_find_override_pfn(mfn, ~0); 121 pfn = m2p_find_override_pfn(mfn, ~0);
122 }
114 123
115 /* 124 /*
116 * pfn is ~0 if there are no entries in the m2p for mfn or if the 125 * pfn is ~0 if there are no entries in the m2p for mfn or if the
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 8355c84b9729..897783b3302a 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1506,7 +1506,7 @@ static int __init init_hw_perf_events(void)
1506 err = amd_pmu_init(); 1506 err = amd_pmu_init();
1507 break; 1507 break;
1508 default: 1508 default:
1509 return 0; 1509 err = -ENOTSUPP;
1510 } 1510 }
1511 if (err != 0) { 1511 if (err != 0) {
1512 pr_cont("no PMU driver, software events only.\n"); 1512 pr_cont("no PMU driver, software events only.\n");
@@ -1883,9 +1883,9 @@ static struct pmu pmu = {
1883 1883
1884void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) 1884void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
1885{ 1885{
1886 userpg->cap_usr_time = 0; 1886 userpg->cap_user_time = 0;
1887 userpg->cap_usr_time_zero = 0; 1887 userpg->cap_user_time_zero = 0;
1888 userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc; 1888 userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
1889 userpg->pmc_width = x86_pmu.cntval_bits; 1889 userpg->pmc_width = x86_pmu.cntval_bits;
1890 1890
1891 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 1891 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
@@ -1894,13 +1894,13 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
1894 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 1894 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
1895 return; 1895 return;
1896 1896
1897 userpg->cap_usr_time = 1; 1897 userpg->cap_user_time = 1;
1898 userpg->time_mult = this_cpu_read(cyc2ns); 1898 userpg->time_mult = this_cpu_read(cyc2ns);
1899 userpg->time_shift = CYC2NS_SCALE_FACTOR; 1899 userpg->time_shift = CYC2NS_SCALE_FACTOR;
1900 userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; 1900 userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
1901 1901
1902 if (sched_clock_stable && !check_tsc_disabled()) { 1902 if (sched_clock_stable && !check_tsc_disabled()) {
1903 userpg->cap_usr_time_zero = 1; 1903 userpg->cap_user_time_zero = 1;
1904 userpg->time_zero = this_cpu_read(cyc2ns_offset); 1904 userpg->time_zero = this_cpu_read(cyc2ns_offset);
1905 } 1905 }
1906} 1906}
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 9db76c31b3c3..f31a1655d1ff 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2325,6 +2325,7 @@ __init int intel_pmu_init(void)
2325 break; 2325 break;
2326 2326
2327 case 55: /* Atom 22nm "Silvermont" */ 2327 case 55: /* Atom 22nm "Silvermont" */
2328 case 77: /* Avoton "Silvermont" */
2328 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, 2329 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
2329 sizeof(hw_cache_event_ids)); 2330 sizeof(hw_cache_event_ids));
2330 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, 2331 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 8ed44589b0e4..4118f9f68315 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -2706,14 +2706,14 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
2706 box->hrtimer.function = uncore_pmu_hrtimer; 2706 box->hrtimer.function = uncore_pmu_hrtimer;
2707} 2707}
2708 2708
2709struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu) 2709static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
2710{ 2710{
2711 struct intel_uncore_box *box; 2711 struct intel_uncore_box *box;
2712 int i, size; 2712 int i, size;
2713 2713
2714 size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg); 2714 size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
2715 2715
2716 box = kzalloc_node(size, GFP_KERNEL, cpu_to_node(cpu)); 2716 box = kzalloc_node(size, GFP_KERNEL, node);
2717 if (!box) 2717 if (!box)
2718 return NULL; 2718 return NULL;
2719 2719
@@ -3031,7 +3031,7 @@ static int uncore_validate_group(struct intel_uncore_pmu *pmu,
3031 struct intel_uncore_box *fake_box; 3031 struct intel_uncore_box *fake_box;
3032 int ret = -EINVAL, n; 3032 int ret = -EINVAL, n;
3033 3033
3034 fake_box = uncore_alloc_box(pmu->type, smp_processor_id()); 3034 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
3035 if (!fake_box) 3035 if (!fake_box)
3036 return -ENOMEM; 3036 return -ENOMEM;
3037 3037
@@ -3294,7 +3294,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
3294 } 3294 }
3295 3295
3296 type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; 3296 type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
3297 box = uncore_alloc_box(type, 0); 3297 box = uncore_alloc_box(type, NUMA_NO_NODE);
3298 if (!box) 3298 if (!box)
3299 return -ENOMEM; 3299 return -ENOMEM;
3300 3300
@@ -3499,7 +3499,7 @@ static int uncore_cpu_prepare(int cpu, int phys_id)
3499 if (pmu->func_id < 0) 3499 if (pmu->func_id < 0)
3500 pmu->func_id = j; 3500 pmu->func_id = j;
3501 3501
3502 box = uncore_alloc_box(type, cpu); 3502 box = uncore_alloc_box(type, cpu_to_node(cpu));
3503 if (!box) 3503 if (!box)
3504 return -ENOMEM; 3504 return -ENOMEM;
3505 3505
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 7123b5df479d..af99f71aeb7f 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -216,6 +216,7 @@ int apply_microcode_amd(int cpu)
216 /* need to apply patch? */ 216 /* need to apply patch? */
217 if (rev >= mc_amd->hdr.patch_id) { 217 if (rev >= mc_amd->hdr.patch_id) {
218 c->microcode = rev; 218 c->microcode = rev;
219 uci->cpu_sig.rev = rev;
219 return 0; 220 return 0;
220 } 221 }
221 222
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 563ed91e6faa..e643e744e4d8 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -352,12 +352,28 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
352 }, 352 },
353 { /* Handle problems with rebooting on the Precision M6600. */ 353 { /* Handle problems with rebooting on the Precision M6600. */
354 .callback = set_pci_reboot, 354 .callback = set_pci_reboot,
355 .ident = "Dell OptiPlex 990", 355 .ident = "Dell Precision M6600",
356 .matches = { 356 .matches = {
357 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 357 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
358 DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"), 358 DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
359 }, 359 },
360 }, 360 },
361 { /* Handle problems with rebooting on the Dell PowerEdge C6100. */
362 .callback = set_pci_reboot,
363 .ident = "Dell PowerEdge C6100",
364 .matches = {
365 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
366 DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
367 },
368 },
369 { /* Some C6100 machines were shipped with vendor being 'Dell'. */
370 .callback = set_pci_reboot,
371 .ident = "Dell PowerEdge C6100",
372 .matches = {
373 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
374 DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
375 },
376 },
361 { } 377 { }
362}; 378};
363 379
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 90f6ed127096..c7e22ab29a5a 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -912,10 +912,13 @@ void __init efi_enter_virtual_mode(void)
912 912
913 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 913 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
914 md = p; 914 md = p;
915 if (!(md->attribute & EFI_MEMORY_RUNTIME) && 915 if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
916 md->type != EFI_BOOT_SERVICES_CODE && 916#ifdef CONFIG_X86_64
917 md->type != EFI_BOOT_SERVICES_DATA) 917 if (md->type != EFI_BOOT_SERVICES_CODE &&
918 continue; 918 md->type != EFI_BOOT_SERVICES_DATA)
919#endif
920 continue;
921 }
919 922
920 size = md->num_pages << EFI_PAGE_SHIFT; 923 size = md->num_pages << EFI_PAGE_SHIFT;
921 end = md->phys_addr + size; 924 end = md->phys_addr + size;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 8b901e8d782d..a61c7d5811be 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -879,7 +879,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
879 unsigned long uninitialized_var(address); 879 unsigned long uninitialized_var(address);
880 unsigned level; 880 unsigned level;
881 pte_t *ptep = NULL; 881 pte_t *ptep = NULL;
882 int ret = 0;
883 882
884 pfn = page_to_pfn(page); 883 pfn = page_to_pfn(page);
885 if (!PageHighMem(page)) { 884 if (!PageHighMem(page)) {
@@ -926,8 +925,8 @@ int m2p_add_override(unsigned long mfn, struct page *page,
926 * frontend pages while they are being shared with the backend, 925 * frontend pages while they are being shared with the backend,
927 * because mfn_to_pfn (that ends up being called by GUPF) will 926 * because mfn_to_pfn (that ends up being called by GUPF) will
928 * return the backend pfn rather than the frontend pfn. */ 927 * return the backend pfn rather than the frontend pfn. */
929 ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); 928 pfn = mfn_to_pfn_no_overrides(mfn);
930 if (ret == 0 && get_phys_to_machine(pfn) == mfn) 929 if (get_phys_to_machine(pfn) == mfn)
931 set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)); 930 set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
932 931
933 return 0; 932 return 0;
@@ -942,7 +941,6 @@ int m2p_remove_override(struct page *page,
942 unsigned long uninitialized_var(address); 941 unsigned long uninitialized_var(address);
943 unsigned level; 942 unsigned level;
944 pte_t *ptep = NULL; 943 pte_t *ptep = NULL;
945 int ret = 0;
946 944
947 pfn = page_to_pfn(page); 945 pfn = page_to_pfn(page);
948 mfn = get_phys_to_machine(pfn); 946 mfn = get_phys_to_machine(pfn);
@@ -1029,8 +1027,8 @@ int m2p_remove_override(struct page *page,
1029 * the original pfn causes mfn_to_pfn(mfn) to return the frontend 1027 * the original pfn causes mfn_to_pfn(mfn) to return the frontend
1030 * pfn again. */ 1028 * pfn again. */
1031 mfn &= ~FOREIGN_FRAME_BIT; 1029 mfn &= ~FOREIGN_FRAME_BIT;
1032 ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); 1030 pfn = mfn_to_pfn_no_overrides(mfn);
1033 if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) && 1031 if (get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
1034 m2p_find_override(mfn) == NULL) 1032 m2p_find_override(mfn) == NULL)
1035 set_phys_to_machine(pfn, mfn); 1033 set_phys_to_machine(pfn, mfn);
1036 1034
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 253f63fceea1..be6b86078957 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -259,6 +259,14 @@ void xen_uninit_lock_cpu(int cpu)
259} 259}
260 260
261 261
262/*
263 * Our init of PV spinlocks is split in two init functions due to us
264 * using paravirt patching and jump labels patching and having to do
265 * all of this before SMP code is invoked.
266 *
267 * The paravirt patching needs to be done _before_ the alternative asm code
268 * is started, otherwise we would not patch the core kernel code.
269 */
262void __init xen_init_spinlocks(void) 270void __init xen_init_spinlocks(void)
263{ 271{
264 272
@@ -267,12 +275,26 @@ void __init xen_init_spinlocks(void)
267 return; 275 return;
268 } 276 }
269 277
270 static_key_slow_inc(&paravirt_ticketlocks_enabled);
271
272 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning); 278 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
273 pv_lock_ops.unlock_kick = xen_unlock_kick; 279 pv_lock_ops.unlock_kick = xen_unlock_kick;
274} 280}
275 281
282/*
283 * While the jump_label init code needs to happend _after_ the jump labels are
284 * enabled and before SMP is started. Hence we use pre-SMP initcall level
285 * init. We cannot do it in xen_init_spinlocks as that is done before
286 * jump labels are activated.
287 */
288static __init int xen_init_spinlocks_jump(void)
289{
290 if (!xen_pvspin)
291 return 0;
292
293 static_key_slow_inc(&paravirt_ticketlocks_enabled);
294 return 0;
295}
296early_initcall(xen_init_spinlocks_jump);
297
276static __init int xen_parse_nopvspin(char *arg) 298static __init int xen_parse_nopvspin(char *arg)
277{ 299{
278 xen_pvspin = false; 300 xen_pvspin = false;
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
index f40acef80269..a6977e12d574 100644
--- a/drivers/acpi/acpi_ipmi.c
+++ b/drivers/acpi/acpi_ipmi.c
@@ -39,6 +39,7 @@
39#include <linux/ipmi.h> 39#include <linux/ipmi.h>
40#include <linux/device.h> 40#include <linux/device.h>
41#include <linux/pnp.h> 41#include <linux/pnp.h>
42#include <linux/spinlock.h>
42 43
43MODULE_AUTHOR("Zhao Yakui"); 44MODULE_AUTHOR("Zhao Yakui");
44MODULE_DESCRIPTION("ACPI IPMI Opregion driver"); 45MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
@@ -57,7 +58,7 @@ struct acpi_ipmi_device {
57 struct list_head head; 58 struct list_head head;
58 /* the IPMI request message list */ 59 /* the IPMI request message list */
59 struct list_head tx_msg_list; 60 struct list_head tx_msg_list;
60 struct mutex tx_msg_lock; 61 spinlock_t tx_msg_lock;
61 acpi_handle handle; 62 acpi_handle handle;
62 struct pnp_dev *pnp_dev; 63 struct pnp_dev *pnp_dev;
63 ipmi_user_t user_interface; 64 ipmi_user_t user_interface;
@@ -147,6 +148,7 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
147 struct kernel_ipmi_msg *msg; 148 struct kernel_ipmi_msg *msg;
148 struct acpi_ipmi_buffer *buffer; 149 struct acpi_ipmi_buffer *buffer;
149 struct acpi_ipmi_device *device; 150 struct acpi_ipmi_device *device;
151 unsigned long flags;
150 152
151 msg = &tx_msg->tx_message; 153 msg = &tx_msg->tx_message;
152 /* 154 /*
@@ -177,10 +179,10 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
177 179
178 /* Get the msgid */ 180 /* Get the msgid */
179 device = tx_msg->device; 181 device = tx_msg->device;
180 mutex_lock(&device->tx_msg_lock); 182 spin_lock_irqsave(&device->tx_msg_lock, flags);
181 device->curr_msgid++; 183 device->curr_msgid++;
182 tx_msg->tx_msgid = device->curr_msgid; 184 tx_msg->tx_msgid = device->curr_msgid;
183 mutex_unlock(&device->tx_msg_lock); 185 spin_unlock_irqrestore(&device->tx_msg_lock, flags);
184} 186}
185 187
186static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg, 188static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
@@ -242,6 +244,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
242 int msg_found = 0; 244 int msg_found = 0;
243 struct acpi_ipmi_msg *tx_msg; 245 struct acpi_ipmi_msg *tx_msg;
244 struct pnp_dev *pnp_dev = ipmi_device->pnp_dev; 246 struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
247 unsigned long flags;
245 248
246 if (msg->user != ipmi_device->user_interface) { 249 if (msg->user != ipmi_device->user_interface) {
247 dev_warn(&pnp_dev->dev, "Unexpected response is returned. " 250 dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
@@ -250,7 +253,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
250 ipmi_free_recv_msg(msg); 253 ipmi_free_recv_msg(msg);
251 return; 254 return;
252 } 255 }
253 mutex_lock(&ipmi_device->tx_msg_lock); 256 spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
254 list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) { 257 list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
255 if (msg->msgid == tx_msg->tx_msgid) { 258 if (msg->msgid == tx_msg->tx_msgid) {
256 msg_found = 1; 259 msg_found = 1;
@@ -258,7 +261,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
258 } 261 }
259 } 262 }
260 263
261 mutex_unlock(&ipmi_device->tx_msg_lock); 264 spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
262 if (!msg_found) { 265 if (!msg_found) {
263 dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is " 266 dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
264 "returned.\n", msg->msgid); 267 "returned.\n", msg->msgid);
@@ -378,6 +381,7 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
378 struct acpi_ipmi_device *ipmi_device = handler_context; 381 struct acpi_ipmi_device *ipmi_device = handler_context;
379 int err, rem_time; 382 int err, rem_time;
380 acpi_status status; 383 acpi_status status;
384 unsigned long flags;
381 /* 385 /*
382 * IPMI opregion message. 386 * IPMI opregion message.
383 * IPMI message is firstly written to the BMC and system software 387 * IPMI message is firstly written to the BMC and system software
@@ -395,9 +399,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
395 return AE_NO_MEMORY; 399 return AE_NO_MEMORY;
396 400
397 acpi_format_ipmi_msg(tx_msg, address, value); 401 acpi_format_ipmi_msg(tx_msg, address, value);
398 mutex_lock(&ipmi_device->tx_msg_lock); 402 spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
399 list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list); 403 list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
400 mutex_unlock(&ipmi_device->tx_msg_lock); 404 spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
401 err = ipmi_request_settime(ipmi_device->user_interface, 405 err = ipmi_request_settime(ipmi_device->user_interface,
402 &tx_msg->addr, 406 &tx_msg->addr,
403 tx_msg->tx_msgid, 407 tx_msg->tx_msgid,
@@ -413,9 +417,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
413 status = AE_OK; 417 status = AE_OK;
414 418
415end_label: 419end_label:
416 mutex_lock(&ipmi_device->tx_msg_lock); 420 spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
417 list_del(&tx_msg->head); 421 list_del(&tx_msg->head);
418 mutex_unlock(&ipmi_device->tx_msg_lock); 422 spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
419 kfree(tx_msg); 423 kfree(tx_msg);
420 return status; 424 return status;
421} 425}
@@ -457,7 +461,7 @@ static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
457 461
458 INIT_LIST_HEAD(&ipmi_device->head); 462 INIT_LIST_HEAD(&ipmi_device->head);
459 463
460 mutex_init(&ipmi_device->tx_msg_lock); 464 spin_lock_init(&ipmi_device->tx_msg_lock);
461 INIT_LIST_HEAD(&ipmi_device->tx_msg_list); 465 INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
462 ipmi_install_space_handler(ipmi_device); 466 ipmi_install_space_handler(ipmi_device);
463 467
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index fbdb82e70d10..611ce9061dc5 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1121,7 +1121,7 @@ int acpi_bus_register_driver(struct acpi_driver *driver)
1121EXPORT_SYMBOL(acpi_bus_register_driver); 1121EXPORT_SYMBOL(acpi_bus_register_driver);
1122 1122
1123/** 1123/**
1124 * acpi_bus_unregister_driver - unregisters a driver with the APIC bus 1124 * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
1125 * @driver: driver to unregister 1125 * @driver: driver to unregister
1126 * 1126 *
1127 * Unregisters a driver with the ACPI bus. Searches the namespace for all 1127 * Unregisters a driver with the ACPI bus. Searches the namespace for all
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 958ba2a420c3..97f4acb54ad6 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -2,7 +2,7 @@
2 * sata_promise.c - Promise SATA 2 * sata_promise.c - Promise SATA
3 * 3 *
4 * Maintained by: Tejun Heo <tj@kernel.org> 4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Mikael Pettersson <mikpe@it.uu.se> 5 * Mikael Pettersson
6 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * Please ALWAYS copy linux-ide@vger.kernel.org
7 * on emails. 7 * on emails.
8 * 8 *
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 3335000be2dc..319c2c594ac6 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2019,7 +2019,7 @@ EXPORT_SYMBOL_GPL(device_move);
2019 */ 2019 */
2020void device_shutdown(void) 2020void device_shutdown(void)
2021{ 2021{
2022 struct device *dev; 2022 struct device *dev, *parent;
2023 2023
2024 spin_lock(&devices_kset->list_lock); 2024 spin_lock(&devices_kset->list_lock);
2025 /* 2025 /*
@@ -2036,7 +2036,7 @@ void device_shutdown(void)
2036 * prevent it from being freed because parent's 2036 * prevent it from being freed because parent's
2037 * lock is to be held 2037 * lock is to be held
2038 */ 2038 */
2039 get_device(dev->parent); 2039 parent = get_device(dev->parent);
2040 get_device(dev); 2040 get_device(dev);
2041 /* 2041 /*
2042 * Make sure the device is off the kset list, in the 2042 * Make sure the device is off the kset list, in the
@@ -2046,8 +2046,8 @@ void device_shutdown(void)
2046 spin_unlock(&devices_kset->list_lock); 2046 spin_unlock(&devices_kset->list_lock);
2047 2047
2048 /* hold lock to avoid race with probe/release */ 2048 /* hold lock to avoid race with probe/release */
2049 if (dev->parent) 2049 if (parent)
2050 device_lock(dev->parent); 2050 device_lock(parent);
2051 device_lock(dev); 2051 device_lock(dev);
2052 2052
2053 /* Don't allow any more runtime suspends */ 2053 /* Don't allow any more runtime suspends */
@@ -2065,11 +2065,11 @@ void device_shutdown(void)
2065 } 2065 }
2066 2066
2067 device_unlock(dev); 2067 device_unlock(dev);
2068 if (dev->parent) 2068 if (parent)
2069 device_unlock(dev->parent); 2069 device_unlock(parent);
2070 2070
2071 put_device(dev); 2071 put_device(dev);
2072 put_device(dev->parent); 2072 put_device(parent);
2073 2073
2074 spin_lock(&devices_kset->list_lock); 2074 spin_lock(&devices_kset->list_lock);
2075 } 2075 }
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index d2d95ff5353b..edfa2515bc86 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1189,6 +1189,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
1189 int err; 1189 int err;
1190 u32 cp; 1190 u32 cp;
1191 1191
1192 memset(&arg64, 0, sizeof(arg64));
1192 err = 0; 1193 err = 0;
1193 err |= 1194 err |=
1194 copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 1195 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 639d26b90b91..2b9440384536 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -1193,6 +1193,7 @@ out_passthru:
1193 ida_pci_info_struct pciinfo; 1193 ida_pci_info_struct pciinfo;
1194 1194
1195 if (!arg) return -EINVAL; 1195 if (!arg) return -EINVAL;
1196 memset(&pciinfo, 0, sizeof(pciinfo));
1196 pciinfo.bus = host->pci_dev->bus->number; 1197 pciinfo.bus = host->pci_dev->bus->number;
1197 pciinfo.dev_fn = host->pci_dev->devfn; 1198 pciinfo.dev_fn = host->pci_dev->devfn;
1198 pciinfo.board_id = host->board_id; 1199 pciinfo.board_id = host->board_id;
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 7a7929ba2658..06189e55b4e5 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -142,32 +142,6 @@ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
142 return length; 142 return length;
143} 143}
144 144
145ssize_t tpm_show_locality(struct device *dev, struct device_attribute *attr,
146 char *buf)
147{
148 struct tpm_chip *chip = dev_get_drvdata(dev);
149 struct tpm_private *priv = TPM_VPRIV(chip);
150 u8 locality = priv->shr->locality;
151
152 return sprintf(buf, "%d\n", locality);
153}
154
155ssize_t tpm_store_locality(struct device *dev, struct device_attribute *attr,
156 const char *buf, size_t len)
157{
158 struct tpm_chip *chip = dev_get_drvdata(dev);
159 struct tpm_private *priv = TPM_VPRIV(chip);
160 u8 val;
161
162 int rv = kstrtou8(buf, 0, &val);
163 if (rv)
164 return rv;
165
166 priv->shr->locality = val;
167
168 return len;
169}
170
171static const struct file_operations vtpm_ops = { 145static const struct file_operations vtpm_ops = {
172 .owner = THIS_MODULE, 146 .owner = THIS_MODULE,
173 .llseek = no_llseek, 147 .llseek = no_llseek,
@@ -188,8 +162,6 @@ static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
188static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); 162static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
189static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); 163static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
190static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); 164static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
191static DEVICE_ATTR(locality, S_IRUGO | S_IWUSR, tpm_show_locality,
192 tpm_store_locality);
193 165
194static struct attribute *vtpm_attrs[] = { 166static struct attribute *vtpm_attrs[] = {
195 &dev_attr_pubek.attr, 167 &dev_attr_pubek.attr,
@@ -202,7 +174,6 @@ static struct attribute *vtpm_attrs[] = {
202 &dev_attr_cancel.attr, 174 &dev_attr_cancel.attr,
203 &dev_attr_durations.attr, 175 &dev_attr_durations.attr,
204 &dev_attr_timeouts.attr, 176 &dev_attr_timeouts.attr,
205 &dev_attr_locality.attr,
206 NULL, 177 NULL,
207}; 178};
208 179
@@ -210,8 +181,6 @@ static struct attribute_group vtpm_attr_grp = {
210 .attrs = vtpm_attrs, 181 .attrs = vtpm_attrs,
211}; 182};
212 183
213#define TPM_LONG_TIMEOUT (10 * 60 * HZ)
214
215static const struct tpm_vendor_specific tpm_vtpm = { 184static const struct tpm_vendor_specific tpm_vtpm = {
216 .status = vtpm_status, 185 .status = vtpm_status,
217 .recv = vtpm_recv, 186 .recv = vtpm_recv,
@@ -224,11 +193,6 @@ static const struct tpm_vendor_specific tpm_vtpm = {
224 .miscdev = { 193 .miscdev = {
225 .fops = &vtpm_ops, 194 .fops = &vtpm_ops,
226 }, 195 },
227 .duration = {
228 TPM_LONG_TIMEOUT,
229 TPM_LONG_TIMEOUT,
230 TPM_LONG_TIMEOUT,
231 },
232}; 196};
233 197
234static irqreturn_t tpmif_interrupt(int dummy, void *dev_id) 198static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 41c69469ce20..971d796e071d 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -26,6 +26,7 @@ config DW_APB_TIMER_OF
26 26
27config ARMADA_370_XP_TIMER 27config ARMADA_370_XP_TIMER
28 bool 28 bool
29 select CLKSRC_OF
29 30
30config ORION_TIMER 31config ORION_TIMER
31 select CLKSRC_OF 32 select CLKSRC_OF
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c
index 37f5325bec95..b9ddd9e3a2f5 100644
--- a/drivers/clocksource/clksrc-of.c
+++ b/drivers/clocksource/clksrc-of.c
@@ -30,6 +30,9 @@ void __init clocksource_of_init(void)
30 clocksource_of_init_fn init_func; 30 clocksource_of_init_fn init_func;
31 31
32 for_each_matching_node_and_match(np, __clksrc_of_table, &match) { 32 for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
33 if (!of_device_is_available(np))
34 continue;
35
33 init_func = match->data; 36 init_func = match->data;
34 init_func(np); 37 init_func(np);
35 } 38 }
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index b9c81b7c3a3b..3a5909c12d42 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -301,7 +301,7 @@ static void em_sti_register_clockevent(struct em_sti_priv *p)
301 ced->name = dev_name(&p->pdev->dev); 301 ced->name = dev_name(&p->pdev->dev);
302 ced->features = CLOCK_EVT_FEAT_ONESHOT; 302 ced->features = CLOCK_EVT_FEAT_ONESHOT;
303 ced->rating = 200; 303 ced->rating = 200;
304 ced->cpumask = cpumask_of(0); 304 ced->cpumask = cpu_possible_mask;
305 ced->set_next_event = em_sti_clock_event_next; 305 ced->set_next_event = em_sti_clock_event_next;
306 ced->set_mode = em_sti_clock_event_mode; 306 ced->set_mode = em_sti_clock_event_mode;
307 307
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 5b34768f4d7c..62b0de6a1837 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -428,7 +428,6 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
428 evt->irq); 428 evt->irq);
429 return -EIO; 429 return -EIO;
430 } 430 }
431 irq_set_affinity(evt->irq, cpumask_of(cpu));
432 } else { 431 } else {
433 enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0); 432 enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
434 } 433 }
@@ -449,6 +448,7 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
449 unsigned long action, void *hcpu) 448 unsigned long action, void *hcpu)
450{ 449{
451 struct mct_clock_event_device *mevt; 450 struct mct_clock_event_device *mevt;
451 unsigned int cpu;
452 452
453 /* 453 /*
454 * Grab cpu pointer in each case to avoid spurious 454 * Grab cpu pointer in each case to avoid spurious
@@ -459,6 +459,12 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
459 mevt = this_cpu_ptr(&percpu_mct_tick); 459 mevt = this_cpu_ptr(&percpu_mct_tick);
460 exynos4_local_timer_setup(&mevt->evt); 460 exynos4_local_timer_setup(&mevt->evt);
461 break; 461 break;
462 case CPU_ONLINE:
463 cpu = (unsigned long)hcpu;
464 if (mct_int_type == MCT_INT_SPI)
465 irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu],
466 cpumask_of(cpu));
467 break;
462 case CPU_DYING: 468 case CPU_DYING:
463 mevt = this_cpu_ptr(&percpu_mct_tick); 469 mevt = this_cpu_ptr(&percpu_mct_tick);
464 exynos4_local_timer_stop(&mevt->evt); 470 exynos4_local_timer_stop(&mevt->evt);
@@ -500,6 +506,8 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
500 &percpu_mct_tick); 506 &percpu_mct_tick);
501 WARN(err, "MCT: can't request IRQ %d (%d)\n", 507 WARN(err, "MCT: can't request IRQ %d (%d)\n",
502 mct_irqs[MCT_L0_IRQ], err); 508 mct_irqs[MCT_L0_IRQ], err);
509 } else {
510 irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0));
503 } 511 }
504 512
505 err = register_cpu_notifier(&exynos4_mct_cpu_nb); 513 err = register_cpu_notifier(&exynos4_mct_cpu_nb);
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index a1260b4549db..d2c3253e015e 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -986,6 +986,10 @@ static int __init acpi_cpufreq_init(void)
986{ 986{
987 int ret; 987 int ret;
988 988
989 /* don't keep reloading if cpufreq_driver exists */
990 if (cpufreq_get_current_driver())
991 return 0;
992
989 if (acpi_disabled) 993 if (acpi_disabled)
990 return 0; 994 return 0;
991 995
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 89b3c52cd5c3..04548f7023af 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1460,6 +1460,9 @@ unsigned int cpufreq_get(unsigned int cpu)
1460{ 1460{
1461 unsigned int ret_freq = 0; 1461 unsigned int ret_freq = 0;
1462 1462
1463 if (cpufreq_disabled() || !cpufreq_driver)
1464 return -ENOENT;
1465
1463 if (!down_read_trylock(&cpufreq_rwsem)) 1466 if (!down_read_trylock(&cpufreq_rwsem))
1464 return 0; 1467 return 0;
1465 1468
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index d514c152fd1a..be5380ecdcd4 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -457,7 +457,7 @@ err_free_table:
457 opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); 457 opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
458err_put_node: 458err_put_node:
459 of_node_put(np); 459 of_node_put(np);
460 dev_err(dvfs_info->dev, "%s: failed initialization\n", __func__); 460 dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
461 return ret; 461 return ret;
462} 462}
463 463
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index b1f8fc69023f..60e84043aa34 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -707,8 +707,7 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
707 reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2); 707 reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
708 break; 708 break;
709 case DRM_MODE_DPMS_OFF: 709 case DRM_MODE_DPMS_OFF:
710 /* disable audio and video ports */ 710 /* disable video ports */
711 reg_write(encoder, REG_ENA_AP, 0x00);
712 reg_write(encoder, REG_ENA_VP_0, 0x00); 711 reg_write(encoder, REG_ENA_VP_0, 0x00);
713 reg_write(encoder, REG_ENA_VP_1, 0x00); 712 reg_write(encoder, REG_ENA_VP_1, 0x00);
714 reg_write(encoder, REG_ENA_VP_2, 0x00); 713 reg_write(encoder, REG_ENA_VP_2, 0x00);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index df9253d890ee..cdfb9da0e4ce 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4800,10 +4800,10 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
4800 4800
4801 if (!mutex_trylock(&dev->struct_mutex)) { 4801 if (!mutex_trylock(&dev->struct_mutex)) {
4802 if (!mutex_is_locked_by(&dev->struct_mutex, current)) 4802 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4803 return SHRINK_STOP; 4803 return 0;
4804 4804
4805 if (dev_priv->mm.shrinker_no_lock_stealing) 4805 if (dev_priv->mm.shrinker_no_lock_stealing)
4806 return SHRINK_STOP; 4806 return 0;
4807 4807
4808 unlock = false; 4808 unlock = false;
4809 } 4809 }
@@ -4901,10 +4901,10 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
4901 4901
4902 if (!mutex_trylock(&dev->struct_mutex)) { 4902 if (!mutex_trylock(&dev->struct_mutex)) {
4903 if (!mutex_is_locked_by(&dev->struct_mutex, current)) 4903 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4904 return 0; 4904 return SHRINK_STOP;
4905 4905
4906 if (dev_priv->mm.shrinker_no_lock_stealing) 4906 if (dev_priv->mm.shrinker_no_lock_stealing)
4907 return 0; 4907 return SHRINK_STOP;
4908 4908
4909 unlock = false; 4909 unlock = false;
4910 } 4910 }
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index aba9d7498996..dae364f0028c 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -143,8 +143,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
143 143
144 /* Seek the first printf which is hits start position */ 144 /* Seek the first printf which is hits start position */
145 if (e->pos < e->start) { 145 if (e->pos < e->start) {
146 len = vsnprintf(NULL, 0, f, args); 146 va_list tmp;
147 if (!__i915_error_seek(e, len)) 147
148 va_copy(tmp, args);
149 if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp)))
148 return; 150 return;
149 } 151 }
150 152
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d8a1d98693e7..e5822e79f912 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4775,6 +4775,10 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
4775 4775
4776 pipeconf = 0; 4776 pipeconf = 0;
4777 4777
4778 if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
4779 I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
4780 pipeconf |= PIPECONF_ENABLE;
4781
4778 if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) { 4782 if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
4779 /* Enable pixel doubling when the dot clock is > 90% of the (display) 4783 /* Enable pixel doubling when the dot clock is > 90% of the (display)
4780 * core speed. 4784 * core speed.
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 2151d13772b8..79c14e298ba6 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -588,7 +588,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
588 DRM_DEBUG_KMS("aux_ch native nack\n"); 588 DRM_DEBUG_KMS("aux_ch native nack\n");
589 return -EREMOTEIO; 589 return -EREMOTEIO;
590 case AUX_NATIVE_REPLY_DEFER: 590 case AUX_NATIVE_REPLY_DEFER:
591 udelay(100); 591 /*
592 * For now, just give more slack to branch devices. We
593 * could check the DPCD for I2C bit rate capabilities,
594 * and if available, adjust the interval. We could also
595 * be more careful with DP-to-Legacy adapters where a
596 * long legacy cable may force very low I2C bit rates.
597 */
598 if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
599 DP_DWN_STRM_PORT_PRESENT)
600 usleep_range(500, 600);
601 else
602 usleep_range(300, 400);
592 continue; 603 continue;
593 default: 604 default:
594 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 605 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index f2c6d7909ae2..dd6f84bf6c22 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -916,6 +916,14 @@ intel_tv_compute_config(struct intel_encoder *encoder,
916 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); 916 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
917 pipe_config->pipe_bpp = 8*3; 917 pipe_config->pipe_bpp = 8*3;
918 918
919 /* TV has it's own notion of sync and other mode flags, so clear them. */
920 pipe_config->adjusted_mode.flags = 0;
921
922 /*
923 * FIXME: We don't check whether the input mode is actually what we want
924 * or whether userspace is doing something stupid.
925 */
926
919 return true; 927 return true;
920} 928}
921 929
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
index 5db5bbaedae2..bc7fd11ad8be 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
@@ -19,8 +19,6 @@
19#include "msm_drv.h" 19#include "msm_drv.h"
20#include "mdp4_kms.h" 20#include "mdp4_kms.h"
21 21
22#include <mach/iommu.h>
23
24static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev); 22static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
25 23
26static int mdp4_hw_init(struct msm_kms *kms) 24static int mdp4_hw_init(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 008d772384c7..b3a2f1629041 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -18,8 +18,6 @@
18#include "msm_drv.h" 18#include "msm_drv.h"
19#include "msm_gpu.h" 19#include "msm_gpu.h"
20 20
21#include <mach/iommu.h>
22
23static void msm_fb_output_poll_changed(struct drm_device *dev) 21static void msm_fb_output_poll_changed(struct drm_device *dev)
24{ 22{
25 struct msm_drm_private *priv = dev->dev_private; 23 struct msm_drm_private *priv = dev->dev_private;
@@ -62,6 +60,8 @@ int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
62 int i, ret; 60 int i, ret;
63 61
64 for (i = 0; i < cnt; i++) { 62 for (i = 0; i < cnt; i++) {
63 /* TODO maybe some day msm iommu won't require this hack: */
64 struct device *msm_iommu_get_ctx(const char *ctx_name);
65 struct device *ctx = msm_iommu_get_ctx(names[i]); 65 struct device *ctx = msm_iommu_get_ctx(names[i]);
66 if (!ctx) 66 if (!ctx)
67 continue; 67 continue;
@@ -199,7 +199,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
199 * imx drm driver on iMX5 199 * imx drm driver on iMX5
200 */ 200 */
201 dev_err(dev->dev, "failed to load kms\n"); 201 dev_err(dev->dev, "failed to load kms\n");
202 ret = PTR_ERR(priv->kms); 202 ret = PTR_ERR(kms);
203 goto fail; 203 goto fail;
204 } 204 }
205 205
@@ -697,7 +697,7 @@ static struct drm_driver msm_driver = {
697 .gem_vm_ops = &vm_ops, 697 .gem_vm_ops = &vm_ops,
698 .dumb_create = msm_gem_dumb_create, 698 .dumb_create = msm_gem_dumb_create,
699 .dumb_map_offset = msm_gem_dumb_map_offset, 699 .dumb_map_offset = msm_gem_dumb_map_offset,
700 .dumb_destroy = msm_gem_dumb_destroy, 700 .dumb_destroy = drm_gem_dumb_destroy,
701#ifdef CONFIG_DEBUG_FS 701#ifdef CONFIG_DEBUG_FS
702 .debugfs_init = msm_debugfs_init, 702 .debugfs_init = msm_debugfs_init,
703 .debugfs_cleanup = msm_debugfs_cleanup, 703 .debugfs_cleanup = msm_debugfs_cleanup,
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 29eacfa29cfb..2bae46c66a30 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -319,13 +319,6 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
319 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); 319 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
320} 320}
321 321
322int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
323 uint32_t handle)
324{
325 /* No special work needed, drop the reference and see what falls out */
326 return drm_gem_handle_delete(file, handle);
327}
328
329int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 322int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
330 uint32_t handle, uint64_t *offset) 323 uint32_t handle, uint64_t *offset)
331{ 324{
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 05ff315e8e9e..b162e98a2953 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -1168,6 +1168,23 @@ static const struct radeon_blacklist_clocks btc_blacklist_clocks[] =
1168 { 25000, 30000, RADEON_SCLK_UP } 1168 { 25000, 30000, RADEON_SCLK_UP }
1169}; 1169};
1170 1170
1171void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
1172 u32 *max_clock)
1173{
1174 u32 i, clock = 0;
1175
1176 if ((table == NULL) || (table->count == 0)) {
1177 *max_clock = clock;
1178 return;
1179 }
1180
1181 for (i = 0; i < table->count; i++) {
1182 if (clock < table->entries[i].clk)
1183 clock = table->entries[i].clk;
1184 }
1185 *max_clock = clock;
1186}
1187
1171void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, 1188void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
1172 u32 clock, u16 max_voltage, u16 *voltage) 1189 u32 clock, u16 max_voltage, u16 *voltage)
1173{ 1190{
@@ -2080,6 +2097,7 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
2080 bool disable_mclk_switching; 2097 bool disable_mclk_switching;
2081 u32 mclk, sclk; 2098 u32 mclk, sclk;
2082 u16 vddc, vddci; 2099 u16 vddc, vddci;
2100 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
2083 2101
2084 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 2102 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
2085 btc_dpm_vblank_too_short(rdev)) 2103 btc_dpm_vblank_too_short(rdev))
@@ -2121,6 +2139,39 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
2121 ps->low.vddci = max_limits->vddci; 2139 ps->low.vddci = max_limits->vddci;
2122 } 2140 }
2123 2141
2142 /* limit clocks to max supported clocks based on voltage dependency tables */
2143 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2144 &max_sclk_vddc);
2145 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2146 &max_mclk_vddci);
2147 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2148 &max_mclk_vddc);
2149
2150 if (max_sclk_vddc) {
2151 if (ps->low.sclk > max_sclk_vddc)
2152 ps->low.sclk = max_sclk_vddc;
2153 if (ps->medium.sclk > max_sclk_vddc)
2154 ps->medium.sclk = max_sclk_vddc;
2155 if (ps->high.sclk > max_sclk_vddc)
2156 ps->high.sclk = max_sclk_vddc;
2157 }
2158 if (max_mclk_vddci) {
2159 if (ps->low.mclk > max_mclk_vddci)
2160 ps->low.mclk = max_mclk_vddci;
2161 if (ps->medium.mclk > max_mclk_vddci)
2162 ps->medium.mclk = max_mclk_vddci;
2163 if (ps->high.mclk > max_mclk_vddci)
2164 ps->high.mclk = max_mclk_vddci;
2165 }
2166 if (max_mclk_vddc) {
2167 if (ps->low.mclk > max_mclk_vddc)
2168 ps->low.mclk = max_mclk_vddc;
2169 if (ps->medium.mclk > max_mclk_vddc)
2170 ps->medium.mclk = max_mclk_vddc;
2171 if (ps->high.mclk > max_mclk_vddc)
2172 ps->high.mclk = max_mclk_vddc;
2173 }
2174
2124 /* XXX validate the min clocks required for display */ 2175 /* XXX validate the min clocks required for display */
2125 2176
2126 if (disable_mclk_switching) { 2177 if (disable_mclk_switching) {
diff --git a/drivers/gpu/drm/radeon/btc_dpm.h b/drivers/gpu/drm/radeon/btc_dpm.h
index 1a15e0e41950..3b6f12b7760b 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.h
+++ b/drivers/gpu/drm/radeon/btc_dpm.h
@@ -46,6 +46,8 @@ void btc_adjust_clock_combinations(struct radeon_device *rdev,
46 struct rv7xx_pl *pl); 46 struct rv7xx_pl *pl);
47void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, 47void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
48 u32 clock, u16 max_voltage, u16 *voltage); 48 u32 clock, u16 max_voltage, u16 *voltage);
49void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
50 u32 *max_clock);
49void btc_apply_voltage_delta_rules(struct radeon_device *rdev, 51void btc_apply_voltage_delta_rules(struct radeon_device *rdev,
50 u16 max_vddc, u16 max_vddci, 52 u16 max_vddc, u16 max_vddci,
51 u16 *vddc, u16 *vddci); 53 u16 *vddc, u16 *vddci);
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 899627443030..51e947a97edf 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -146,6 +146,8 @@ static const struct ci_pt_config_reg didt_config_ci[] =
146}; 146};
147 147
148extern u8 rv770_get_memory_module_index(struct radeon_device *rdev); 148extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
149extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
150 u32 *max_clock);
149extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev, 151extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
150 u32 arb_freq_src, u32 arb_freq_dest); 152 u32 arb_freq_src, u32 arb_freq_dest);
151extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock); 153extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
@@ -712,6 +714,7 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
712 struct radeon_clock_and_voltage_limits *max_limits; 714 struct radeon_clock_and_voltage_limits *max_limits;
713 bool disable_mclk_switching; 715 bool disable_mclk_switching;
714 u32 sclk, mclk; 716 u32 sclk, mclk;
717 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
715 int i; 718 int i;
716 719
717 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 720 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -739,6 +742,29 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
739 } 742 }
740 } 743 }
741 744
745 /* limit clocks to max supported clocks based on voltage dependency tables */
746 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
747 &max_sclk_vddc);
748 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
749 &max_mclk_vddci);
750 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
751 &max_mclk_vddc);
752
753 for (i = 0; i < ps->performance_level_count; i++) {
754 if (max_sclk_vddc) {
755 if (ps->performance_levels[i].sclk > max_sclk_vddc)
756 ps->performance_levels[i].sclk = max_sclk_vddc;
757 }
758 if (max_mclk_vddci) {
759 if (ps->performance_levels[i].mclk > max_mclk_vddci)
760 ps->performance_levels[i].mclk = max_mclk_vddci;
761 }
762 if (max_mclk_vddc) {
763 if (ps->performance_levels[i].mclk > max_mclk_vddc)
764 ps->performance_levels[i].mclk = max_mclk_vddc;
765 }
766 }
767
742 /* XXX validate the min clocks required for display */ 768 /* XXX validate the min clocks required for display */
743 769
744 if (disable_mclk_switching) { 770 if (disable_mclk_switching) {
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index adbdb6503b05..d02fd1c045d5 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -2845,10 +2845,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
2845 rdev->config.cik.tile_config |= (3 << 0); 2845 rdev->config.cik.tile_config |= (3 << 0);
2846 break; 2846 break;
2847 } 2847 }
2848 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) 2848 rdev->config.cik.tile_config |=
2849 rdev->config.cik.tile_config |= 1 << 4; 2849 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
2850 else
2851 rdev->config.cik.tile_config |= 0 << 4;
2852 rdev->config.cik.tile_config |= 2850 rdev->config.cik.tile_config |=
2853 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 2851 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
2854 rdev->config.cik.tile_config |= 2852 rdev->config.cik.tile_config |=
@@ -4456,8 +4454,8 @@ static int cik_mc_init(struct radeon_device *rdev)
4456 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 4454 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
4457 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 4455 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
4458 /* size in MB on si */ 4456 /* size in MB on si */
4459 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 4457 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
4460 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 4458 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
4461 rdev->mc.visible_vram_size = rdev->mc.aper_size; 4459 rdev->mc.visible_vram_size = rdev->mc.aper_size;
4462 si_vram_gtt_location(rdev, &rdev->mc); 4460 si_vram_gtt_location(rdev, &rdev->mc);
4463 radeon_update_bandwidth_info(rdev); 4461 radeon_update_bandwidth_info(rdev);
@@ -4735,12 +4733,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
4735 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT; 4733 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
4736 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT; 4734 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
4737 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT; 4735 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
4738 char *block = (char *)&mc_client; 4736 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
4737 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
4739 4738
4740 printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n", 4739 printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
4741 protections, vmid, addr, 4740 protections, vmid, addr,
4742 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read", 4741 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
4743 block, mc_id); 4742 block, mc_client, mc_id);
4744} 4743}
4745 4744
4746/** 4745/**
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 6c398a456d78..f26339028154 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -787,6 +787,7 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
787 bool disable_mclk_switching; 787 bool disable_mclk_switching;
788 u32 mclk, sclk; 788 u32 mclk, sclk;
789 u16 vddc, vddci; 789 u16 vddc, vddci;
790 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
790 int i; 791 int i;
791 792
792 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 793 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -813,6 +814,29 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
813 } 814 }
814 } 815 }
815 816
817 /* limit clocks to max supported clocks based on voltage dependency tables */
818 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
819 &max_sclk_vddc);
820 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
821 &max_mclk_vddci);
822 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
823 &max_mclk_vddc);
824
825 for (i = 0; i < ps->performance_level_count; i++) {
826 if (max_sclk_vddc) {
827 if (ps->performance_levels[i].sclk > max_sclk_vddc)
828 ps->performance_levels[i].sclk = max_sclk_vddc;
829 }
830 if (max_mclk_vddci) {
831 if (ps->performance_levels[i].mclk > max_mclk_vddci)
832 ps->performance_levels[i].mclk = max_mclk_vddci;
833 }
834 if (max_mclk_vddc) {
835 if (ps->performance_levels[i].mclk > max_mclk_vddc)
836 ps->performance_levels[i].mclk = max_mclk_vddc;
837 }
838 }
839
816 /* XXX validate the min clocks required for display */ 840 /* XXX validate the min clocks required for display */
817 841
818 if (disable_mclk_switching) { 842 if (disable_mclk_switching) {
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 24175717307b..d71333033b2b 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2933,9 +2933,11 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
2933 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); 2933 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2934 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); 2934 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
2935 seq_printf(m, "%u dwords in ring\n", count); 2935 seq_printf(m, "%u dwords in ring\n", count);
2936 for (j = 0; j <= count; j++) { 2936 if (ring->ready) {
2937 i = (rdp + j) & ring->ptr_mask; 2937 for (j = 0; j <= count; j++) {
2938 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); 2938 i = (rdp + j) & ring->ptr_mask;
2939 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
2940 }
2939 } 2941 }
2940 return 0; 2942 return 0;
2941} 2943}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index e65f211a7be0..5513d8f06252 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -1084,7 +1084,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
1084 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = 1084 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
1085 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); 1085 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
1086 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = 1086 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
1087 le16_to_cpu(limits->entries[i].usVoltage); 1087 le16_to_cpu(entry->usVoltage);
1088 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) 1088 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
1089 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); 1089 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
1090 } 1090 }
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index f443010ce90b..b0fa6002af3e 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -257,10 +257,7 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
257 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 257 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
258 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 258 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
259 */ 259 */
260 if (ASIC_IS_DCE3(rdev)) { 260 if (ASIC_IS_DCE32(rdev)) {
261 /* according to the reg specs, this should DCE3.2 only, but in
262 * practice it seems to cover DCE3.0 as well.
263 */
264 if (dig->dig_encoder == 0) { 261 if (dig->dig_encoder == 0) {
265 dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; 262 dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
266 dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); 263 dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
@@ -276,8 +273,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
276 WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); 273 WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
277 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ 274 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
278 } 275 }
276 } else if (ASIC_IS_DCE3(rdev)) {
277 /* according to the reg specs, this should DCE3.2 only, but in
278 * practice it seems to cover DCE3.0/3.1 as well.
279 */
280 if (dig->dig_encoder == 0) {
281 WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
282 WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
283 WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
284 } else {
285 WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100);
286 WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
287 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
288 }
279 } else { 289 } else {
280 /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ 290 /* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
281 WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | 291 WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
282 AUDIO_DTO_MODULE(clock / 10)); 292 AUDIO_DTO_MODULE(clock / 10));
283 } 293 }
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 5003385a7512..8f7e04538fd6 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1004,6 +1004,8 @@ static struct radeon_asic rv6xx_asic = {
1004 .wait_for_vblank = &avivo_wait_for_vblank, 1004 .wait_for_vblank = &avivo_wait_for_vblank,
1005 .set_backlight_level = &atombios_set_backlight_level, 1005 .set_backlight_level = &atombios_set_backlight_level,
1006 .get_backlight_level = &atombios_get_backlight_level, 1006 .get_backlight_level = &atombios_get_backlight_level,
1007 .hdmi_enable = &r600_hdmi_enable,
1008 .hdmi_setmode = &r600_hdmi_setmode,
1007 }, 1009 },
1008 .copy = { 1010 .copy = {
1009 .blit = &r600_copy_cpdma, 1011 .blit = &r600_copy_cpdma,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 404e25d285ba..f79ee184ffd5 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1367,6 +1367,7 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
1367 int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info); 1367 int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
1368 uint16_t data_offset, size; 1368 uint16_t data_offset, size;
1369 struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; 1369 struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
1370 struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT *ss_assign;
1370 uint8_t frev, crev; 1371 uint8_t frev, crev;
1371 int i, num_indices; 1372 int i, num_indices;
1372 1373
@@ -1378,18 +1379,21 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
1378 1379
1379 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 1380 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
1380 sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT); 1381 sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
1381 1382 ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
1383 ((u8 *)&ss_info->asSS_Info[0]);
1382 for (i = 0; i < num_indices; i++) { 1384 for (i = 0; i < num_indices; i++) {
1383 if (ss_info->asSS_Info[i].ucSS_Id == id) { 1385 if (ss_assign->ucSS_Id == id) {
1384 ss->percentage = 1386 ss->percentage =
1385 le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage); 1387 le16_to_cpu(ss_assign->usSpreadSpectrumPercentage);
1386 ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType; 1388 ss->type = ss_assign->ucSpreadSpectrumType;
1387 ss->step = ss_info->asSS_Info[i].ucSS_Step; 1389 ss->step = ss_assign->ucSS_Step;
1388 ss->delay = ss_info->asSS_Info[i].ucSS_Delay; 1390 ss->delay = ss_assign->ucSS_Delay;
1389 ss->range = ss_info->asSS_Info[i].ucSS_Range; 1391 ss->range = ss_assign->ucSS_Range;
1390 ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; 1392 ss->refdiv = ss_assign->ucRecommendedRef_Div;
1391 return true; 1393 return true;
1392 } 1394 }
1395 ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
1396 ((u8 *)ss_assign + sizeof(struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT));
1393 } 1397 }
1394 } 1398 }
1395 return false; 1399 return false;
@@ -1477,6 +1481,12 @@ union asic_ss_info {
1477 struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3; 1481 struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
1478}; 1482};
1479 1483
1484union asic_ss_assignment {
1485 struct _ATOM_ASIC_SS_ASSIGNMENT v1;
1486 struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2;
1487 struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3;
1488};
1489
1480bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, 1490bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1481 struct radeon_atom_ss *ss, 1491 struct radeon_atom_ss *ss,
1482 int id, u32 clock) 1492 int id, u32 clock)
@@ -1485,6 +1495,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1485 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); 1495 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
1486 uint16_t data_offset, size; 1496 uint16_t data_offset, size;
1487 union asic_ss_info *ss_info; 1497 union asic_ss_info *ss_info;
1498 union asic_ss_assignment *ss_assign;
1488 uint8_t frev, crev; 1499 uint8_t frev, crev;
1489 int i, num_indices; 1500 int i, num_indices;
1490 1501
@@ -1509,45 +1520,52 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1509 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 1520 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
1510 sizeof(ATOM_ASIC_SS_ASSIGNMENT); 1521 sizeof(ATOM_ASIC_SS_ASSIGNMENT);
1511 1522
1523 ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info.asSpreadSpectrum[0]);
1512 for (i = 0; i < num_indices; i++) { 1524 for (i = 0; i < num_indices; i++) {
1513 if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && 1525 if ((ss_assign->v1.ucClockIndication == id) &&
1514 (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) { 1526 (clock <= le32_to_cpu(ss_assign->v1.ulTargetClockRange))) {
1515 ss->percentage = 1527 ss->percentage =
1516 le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1528 le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage);
1517 ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1529 ss->type = ss_assign->v1.ucSpreadSpectrumMode;
1518 ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz); 1530 ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz);
1519 return true; 1531 return true;
1520 } 1532 }
1533 ss_assign = (union asic_ss_assignment *)
1534 ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT));
1521 } 1535 }
1522 break; 1536 break;
1523 case 2: 1537 case 2:
1524 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 1538 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
1525 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); 1539 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
1540 ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_2.asSpreadSpectrum[0]);
1526 for (i = 0; i < num_indices; i++) { 1541 for (i = 0; i < num_indices; i++) {
1527 if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && 1542 if ((ss_assign->v2.ucClockIndication == id) &&
1528 (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) { 1543 (clock <= le32_to_cpu(ss_assign->v2.ulTargetClockRange))) {
1529 ss->percentage = 1544 ss->percentage =
1530 le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1545 le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage);
1531 ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1546 ss->type = ss_assign->v2.ucSpreadSpectrumMode;
1532 ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz); 1547 ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz);
1533 if ((crev == 2) && 1548 if ((crev == 2) &&
1534 ((id == ASIC_INTERNAL_ENGINE_SS) || 1549 ((id == ASIC_INTERNAL_ENGINE_SS) ||
1535 (id == ASIC_INTERNAL_MEMORY_SS))) 1550 (id == ASIC_INTERNAL_MEMORY_SS)))
1536 ss->rate /= 100; 1551 ss->rate /= 100;
1537 return true; 1552 return true;
1538 } 1553 }
1554 ss_assign = (union asic_ss_assignment *)
1555 ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2));
1539 } 1556 }
1540 break; 1557 break;
1541 case 3: 1558 case 3:
1542 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 1559 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
1543 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); 1560 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
1561 ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_3.asSpreadSpectrum[0]);
1544 for (i = 0; i < num_indices; i++) { 1562 for (i = 0; i < num_indices; i++) {
1545 if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && 1563 if ((ss_assign->v3.ucClockIndication == id) &&
1546 (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) { 1564 (clock <= le32_to_cpu(ss_assign->v3.ulTargetClockRange))) {
1547 ss->percentage = 1565 ss->percentage =
1548 le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1566 le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage);
1549 ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1567 ss->type = ss_assign->v3.ucSpreadSpectrumMode;
1550 ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz); 1568 ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz);
1551 if ((id == ASIC_INTERNAL_ENGINE_SS) || 1569 if ((id == ASIC_INTERNAL_ENGINE_SS) ||
1552 (id == ASIC_INTERNAL_MEMORY_SS)) 1570 (id == ASIC_INTERNAL_MEMORY_SS))
1553 ss->rate /= 100; 1571 ss->rate /= 100;
@@ -1555,6 +1573,8 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1555 radeon_atombios_get_igp_ss_overrides(rdev, ss, id); 1573 radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
1556 return true; 1574 return true;
1557 } 1575 }
1576 ss_assign = (union asic_ss_assignment *)
1577 ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3));
1558 } 1578 }
1559 break; 1579 break;
1560 default: 1580 default:
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index ac6ece61a476..66c222836631 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -85,8 +85,9 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
85 VRAM, also but everything into VRAM on AGP cards to avoid 85 VRAM, also but everything into VRAM on AGP cards to avoid
86 image corruptions */ 86 image corruptions */
87 if (p->ring == R600_RING_TYPE_UVD_INDEX && 87 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
88 (i == 0 || p->rdev->flags & RADEON_IS_AGP)) { 88 p->rdev->family < CHIP_PALM &&
89 /* TODO: is this still needed for NI+ ? */ 89 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
90
90 p->relocs[i].lobj.domain = 91 p->relocs[i].lobj.domain =
91 RADEON_GEM_DOMAIN_VRAM; 92 RADEON_GEM_DOMAIN_VRAM;
92 93
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e29faa73b574..841d0e09be3e 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1320,13 +1320,22 @@ int radeon_device_init(struct radeon_device *rdev,
1320 return r; 1320 return r;
1321 } 1321 }
1322 if ((radeon_testing & 1)) { 1322 if ((radeon_testing & 1)) {
1323 radeon_test_moves(rdev); 1323 if (rdev->accel_working)
1324 radeon_test_moves(rdev);
1325 else
1326 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1324 } 1327 }
1325 if ((radeon_testing & 2)) { 1328 if ((radeon_testing & 2)) {
1326 radeon_test_syncing(rdev); 1329 if (rdev->accel_working)
1330 radeon_test_syncing(rdev);
1331 else
1332 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1327 } 1333 }
1328 if (radeon_benchmarking) { 1334 if (radeon_benchmarking) {
1329 radeon_benchmark(rdev, radeon_benchmarking); 1335 if (rdev->accel_working)
1336 radeon_benchmark(rdev, radeon_benchmarking);
1337 else
1338 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1330 } 1339 }
1331 return 0; 1340 return 0;
1332} 1341}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 87e1d69e8fdb..ac07ad1d4f8c 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1002,7 +1002,7 @@ static void radeon_pm_resume_old(struct radeon_device *rdev)
1002{ 1002{
1003 /* set up the default clocks if the MC ucode is loaded */ 1003 /* set up the default clocks if the MC ucode is loaded */
1004 if ((rdev->family >= CHIP_BARTS) && 1004 if ((rdev->family >= CHIP_BARTS) &&
1005 (rdev->family <= CHIP_HAINAN) && 1005 (rdev->family <= CHIP_CAYMAN) &&
1006 rdev->mc_fw) { 1006 rdev->mc_fw) {
1007 if (rdev->pm.default_vddc) 1007 if (rdev->pm.default_vddc)
1008 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1008 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1046,7 +1046,7 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev)
1046 if (ret) { 1046 if (ret) {
1047 DRM_ERROR("radeon: dpm resume failed\n"); 1047 DRM_ERROR("radeon: dpm resume failed\n");
1048 if ((rdev->family >= CHIP_BARTS) && 1048 if ((rdev->family >= CHIP_BARTS) &&
1049 (rdev->family <= CHIP_HAINAN) && 1049 (rdev->family <= CHIP_CAYMAN) &&
1050 rdev->mc_fw) { 1050 rdev->mc_fw) {
1051 if (rdev->pm.default_vddc) 1051 if (rdev->pm.default_vddc)
1052 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1052 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1097,7 +1097,7 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
1097 radeon_pm_init_profile(rdev); 1097 radeon_pm_init_profile(rdev);
1098 /* set up the default clocks if the MC ucode is loaded */ 1098 /* set up the default clocks if the MC ucode is loaded */
1099 if ((rdev->family >= CHIP_BARTS) && 1099 if ((rdev->family >= CHIP_BARTS) &&
1100 (rdev->family <= CHIP_HAINAN) && 1100 (rdev->family <= CHIP_CAYMAN) &&
1101 rdev->mc_fw) { 1101 rdev->mc_fw) {
1102 if (rdev->pm.default_vddc) 1102 if (rdev->pm.default_vddc)
1103 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1103 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1183,7 +1183,7 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
1183 if (ret) { 1183 if (ret) {
1184 rdev->pm.dpm_enabled = false; 1184 rdev->pm.dpm_enabled = false;
1185 if ((rdev->family >= CHIP_BARTS) && 1185 if ((rdev->family >= CHIP_BARTS) &&
1186 (rdev->family <= CHIP_HAINAN) && 1186 (rdev->family <= CHIP_CAYMAN) &&
1187 rdev->mc_fw) { 1187 rdev->mc_fw) {
1188 if (rdev->pm.default_vddc) 1188 if (rdev->pm.default_vddc)
1189 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1189 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 46a25f037b84..18254e1c3e71 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -839,9 +839,11 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
839 * packet that is the root issue 839 * packet that is the root issue
840 */ 840 */
841 i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; 841 i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
842 for (j = 0; j <= (count + 32); j++) { 842 if (ring->ready) {
843 seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]); 843 for (j = 0; j <= (count + 32); j++) {
844 i = (i + 1) & ring->ptr_mask; 844 seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
845 i = (i + 1) & ring->ptr_mask;
846 }
845 } 847 }
846 return 0; 848 return 0;
847} 849}
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 1a01bbff9bfa..a0f11856ddde 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -476,8 +476,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
476 return -EINVAL; 476 return -EINVAL;
477 } 477 }
478 478
479 /* TODO: is this still necessary on NI+ ? */ 479 if (p->rdev->family < CHIP_PALM && (cmd == 0 || cmd == 0x3) &&
480 if ((cmd == 0 || cmd == 0x3) &&
481 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { 480 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
482 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", 481 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
483 start, end); 482 start, end);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index cfe5d4d28915..9ace28702c76 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2910,6 +2910,7 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2910 bool disable_sclk_switching = false; 2910 bool disable_sclk_switching = false;
2911 u32 mclk, sclk; 2911 u32 mclk, sclk;
2912 u16 vddc, vddci; 2912 u16 vddc, vddci;
2913 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
2913 int i; 2914 int i;
2914 2915
2915 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 2916 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -2943,6 +2944,29 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2943 } 2944 }
2944 } 2945 }
2945 2946
2947 /* limit clocks to max supported clocks based on voltage dependency tables */
2948 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2949 &max_sclk_vddc);
2950 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2951 &max_mclk_vddci);
2952 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2953 &max_mclk_vddc);
2954
2955 for (i = 0; i < ps->performance_level_count; i++) {
2956 if (max_sclk_vddc) {
2957 if (ps->performance_levels[i].sclk > max_sclk_vddc)
2958 ps->performance_levels[i].sclk = max_sclk_vddc;
2959 }
2960 if (max_mclk_vddci) {
2961 if (ps->performance_levels[i].mclk > max_mclk_vddci)
2962 ps->performance_levels[i].mclk = max_mclk_vddci;
2963 }
2964 if (max_mclk_vddc) {
2965 if (ps->performance_levels[i].mclk > max_mclk_vddc)
2966 ps->performance_levels[i].mclk = max_mclk_vddc;
2967 }
2968 }
2969
2946 /* XXX validate the min clocks required for display */ 2970 /* XXX validate the min clocks required for display */
2947 2971
2948 if (disable_mclk_switching) { 2972 if (disable_mclk_switching) {
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 7266805d9786..3100fa9cb52f 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -212,8 +212,8 @@ int uvd_v1_0_start(struct radeon_device *rdev)
212 /* enable VCPU clock */ 212 /* enable VCPU clock */
213 WREG32(UVD_VCPU_CNTL, 1 << 9); 213 WREG32(UVD_VCPU_CNTL, 1 << 9);
214 214
215 /* enable UMC */ 215 /* enable UMC and NC0 */
216 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); 216 WREG32_P(UVD_LMI_CTRL2, 1 << 13, ~((1 << 8) | (1 << 13)));
217 217
218 /* boot up the VCPU */ 218 /* boot up the VCPU */
219 WREG32(UVD_SOFT_RESET, 0); 219 WREG32(UVD_SOFT_RESET, 0);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 8f4743ab5fb2..936093e0271e 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -195,7 +195,7 @@ int vmbus_connect(void)
195 195
196 do { 196 do {
197 ret = vmbus_negotiate_version(msginfo, version); 197 ret = vmbus_negotiate_version(msginfo, version);
198 if (ret) 198 if (ret == -ETIMEDOUT)
199 goto cleanup; 199 goto cleanup;
200 200
201 if (vmbus_connection.conn_state == CONNECTED) 201 if (vmbus_connection.conn_state == CONNECTED)
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 28b03325b872..09988b289622 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -32,13 +32,17 @@
32/* 32/*
33 * Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7) 33 * Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7)
34 */ 34 */
35#define WS2008_SRV_MAJOR 1
36#define WS2008_SRV_MINOR 0
37#define WS2008_SRV_VERSION (WS2008_SRV_MAJOR << 16 | WS2008_SRV_MINOR)
38
35#define WIN7_SRV_MAJOR 3 39#define WIN7_SRV_MAJOR 3
36#define WIN7_SRV_MINOR 0 40#define WIN7_SRV_MINOR 0
37#define WIN7_SRV_MAJOR_MINOR (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR) 41#define WIN7_SRV_VERSION (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR)
38 42
39#define WIN8_SRV_MAJOR 4 43#define WIN8_SRV_MAJOR 4
40#define WIN8_SRV_MINOR 0 44#define WIN8_SRV_MINOR 0
41#define WIN8_SRV_MAJOR_MINOR (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR) 45#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
42 46
43/* 47/*
44 * Global state maintained for transaction that is being processed. 48 * Global state maintained for transaction that is being processed.
@@ -587,6 +591,8 @@ void hv_kvp_onchannelcallback(void *context)
587 591
588 struct icmsg_hdr *icmsghdrp; 592 struct icmsg_hdr *icmsghdrp;
589 struct icmsg_negotiate *negop = NULL; 593 struct icmsg_negotiate *negop = NULL;
594 int util_fw_version;
595 int kvp_srv_version;
590 596
591 if (kvp_transaction.active) { 597 if (kvp_transaction.active) {
592 /* 598 /*
@@ -606,17 +612,26 @@ void hv_kvp_onchannelcallback(void *context)
606 612
607 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 613 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
608 /* 614 /*
609 * We start with win8 version and if the host cannot 615 * Based on the host, select appropriate
610 * support that we use the previous version. 616 * framework and service versions we will
617 * negotiate.
611 */ 618 */
612 if (vmbus_prep_negotiate_resp(icmsghdrp, negop, 619 switch (vmbus_proto_version) {
613 recv_buffer, UTIL_FW_MAJOR_MINOR, 620 case (VERSION_WS2008):
614 WIN8_SRV_MAJOR_MINOR)) 621 util_fw_version = UTIL_WS2K8_FW_VERSION;
615 goto done; 622 kvp_srv_version = WS2008_SRV_VERSION;
616 623 break;
624 case (VERSION_WIN7):
625 util_fw_version = UTIL_FW_VERSION;
626 kvp_srv_version = WIN7_SRV_VERSION;
627 break;
628 default:
629 util_fw_version = UTIL_FW_VERSION;
630 kvp_srv_version = WIN8_SRV_VERSION;
631 }
617 vmbus_prep_negotiate_resp(icmsghdrp, negop, 632 vmbus_prep_negotiate_resp(icmsghdrp, negop,
618 recv_buffer, UTIL_FW_MAJOR_MINOR, 633 recv_buffer, util_fw_version,
619 WIN7_SRV_MAJOR_MINOR); 634 kvp_srv_version);
620 635
621 } else { 636 } else {
622 kvp_msg = (struct hv_kvp_msg *)&recv_buffer[ 637 kvp_msg = (struct hv_kvp_msg *)&recv_buffer[
@@ -649,7 +664,6 @@ void hv_kvp_onchannelcallback(void *context)
649 return; 664 return;
650 665
651 } 666 }
652done:
653 667
654 icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION 668 icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
655 | ICMSGHDRFLAG_RESPONSE; 669 | ICMSGHDRFLAG_RESPONSE;
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index e4572f3f2834..0c3546224376 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -26,7 +26,7 @@
26 26
27#define VSS_MAJOR 5 27#define VSS_MAJOR 5
28#define VSS_MINOR 0 28#define VSS_MINOR 0
29#define VSS_MAJOR_MINOR (VSS_MAJOR << 16 | VSS_MINOR) 29#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
30 30
31 31
32 32
@@ -190,8 +190,8 @@ void hv_vss_onchannelcallback(void *context)
190 190
191 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 191 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
192 vmbus_prep_negotiate_resp(icmsghdrp, negop, 192 vmbus_prep_negotiate_resp(icmsghdrp, negop,
193 recv_buffer, UTIL_FW_MAJOR_MINOR, 193 recv_buffer, UTIL_FW_VERSION,
194 VSS_MAJOR_MINOR); 194 VSS_VERSION);
195 } else { 195 } else {
196 vss_msg = (struct hv_vss_msg *)&recv_buffer[ 196 vss_msg = (struct hv_vss_msg *)&recv_buffer[
197 sizeof(struct vmbuspipe_hdr) + 197 sizeof(struct vmbuspipe_hdr) +
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index cb82233541b1..273e3ddb3a20 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -28,17 +28,32 @@
28#include <linux/reboot.h> 28#include <linux/reboot.h>
29#include <linux/hyperv.h> 29#include <linux/hyperv.h>
30 30
31#define SHUTDOWN_MAJOR 3
32#define SHUTDOWN_MINOR 0
33#define SHUTDOWN_MAJOR_MINOR (SHUTDOWN_MAJOR << 16 | SHUTDOWN_MINOR)
34 31
35#define TIMESYNCH_MAJOR 3 32#define SD_MAJOR 3
36#define TIMESYNCH_MINOR 0 33#define SD_MINOR 0
37#define TIMESYNCH_MAJOR_MINOR (TIMESYNCH_MAJOR << 16 | TIMESYNCH_MINOR) 34#define SD_VERSION (SD_MAJOR << 16 | SD_MINOR)
38 35
39#define HEARTBEAT_MAJOR 3 36#define SD_WS2008_MAJOR 1
40#define HEARTBEAT_MINOR 0 37#define SD_WS2008_VERSION (SD_WS2008_MAJOR << 16 | SD_MINOR)
41#define HEARTBEAT_MAJOR_MINOR (HEARTBEAT_MAJOR << 16 | HEARTBEAT_MINOR) 38
39#define TS_MAJOR 3
40#define TS_MINOR 0
41#define TS_VERSION (TS_MAJOR << 16 | TS_MINOR)
42
43#define TS_WS2008_MAJOR 1
44#define TS_WS2008_VERSION (TS_WS2008_MAJOR << 16 | TS_MINOR)
45
46#define HB_MAJOR 3
47#define HB_MINOR 0
48#define HB_VERSION (HB_MAJOR << 16 | HB_MINOR)
49
50#define HB_WS2008_MAJOR 1
51#define HB_WS2008_VERSION (HB_WS2008_MAJOR << 16 | HB_MINOR)
52
53static int sd_srv_version;
54static int ts_srv_version;
55static int hb_srv_version;
56static int util_fw_version;
42 57
43static void shutdown_onchannelcallback(void *context); 58static void shutdown_onchannelcallback(void *context);
44static struct hv_util_service util_shutdown = { 59static struct hv_util_service util_shutdown = {
@@ -99,8 +114,8 @@ static void shutdown_onchannelcallback(void *context)
99 114
100 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 115 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
101 vmbus_prep_negotiate_resp(icmsghdrp, negop, 116 vmbus_prep_negotiate_resp(icmsghdrp, negop,
102 shut_txf_buf, UTIL_FW_MAJOR_MINOR, 117 shut_txf_buf, util_fw_version,
103 SHUTDOWN_MAJOR_MINOR); 118 sd_srv_version);
104 } else { 119 } else {
105 shutdown_msg = 120 shutdown_msg =
106 (struct shutdown_msg_data *)&shut_txf_buf[ 121 (struct shutdown_msg_data *)&shut_txf_buf[
@@ -216,6 +231,7 @@ static void timesync_onchannelcallback(void *context)
216 struct icmsg_hdr *icmsghdrp; 231 struct icmsg_hdr *icmsghdrp;
217 struct ictimesync_data *timedatap; 232 struct ictimesync_data *timedatap;
218 u8 *time_txf_buf = util_timesynch.recv_buffer; 233 u8 *time_txf_buf = util_timesynch.recv_buffer;
234 struct icmsg_negotiate *negop = NULL;
219 235
220 vmbus_recvpacket(channel, time_txf_buf, 236 vmbus_recvpacket(channel, time_txf_buf,
221 PAGE_SIZE, &recvlen, &requestid); 237 PAGE_SIZE, &recvlen, &requestid);
@@ -225,9 +241,10 @@ static void timesync_onchannelcallback(void *context)
225 sizeof(struct vmbuspipe_hdr)]; 241 sizeof(struct vmbuspipe_hdr)];
226 242
227 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 243 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
228 vmbus_prep_negotiate_resp(icmsghdrp, NULL, time_txf_buf, 244 vmbus_prep_negotiate_resp(icmsghdrp, negop,
229 UTIL_FW_MAJOR_MINOR, 245 time_txf_buf,
230 TIMESYNCH_MAJOR_MINOR); 246 util_fw_version,
247 ts_srv_version);
231 } else { 248 } else {
232 timedatap = (struct ictimesync_data *)&time_txf_buf[ 249 timedatap = (struct ictimesync_data *)&time_txf_buf[
233 sizeof(struct vmbuspipe_hdr) + 250 sizeof(struct vmbuspipe_hdr) +
@@ -257,6 +274,7 @@ static void heartbeat_onchannelcallback(void *context)
257 struct icmsg_hdr *icmsghdrp; 274 struct icmsg_hdr *icmsghdrp;
258 struct heartbeat_msg_data *heartbeat_msg; 275 struct heartbeat_msg_data *heartbeat_msg;
259 u8 *hbeat_txf_buf = util_heartbeat.recv_buffer; 276 u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
277 struct icmsg_negotiate *negop = NULL;
260 278
261 vmbus_recvpacket(channel, hbeat_txf_buf, 279 vmbus_recvpacket(channel, hbeat_txf_buf,
262 PAGE_SIZE, &recvlen, &requestid); 280 PAGE_SIZE, &recvlen, &requestid);
@@ -266,9 +284,9 @@ static void heartbeat_onchannelcallback(void *context)
266 sizeof(struct vmbuspipe_hdr)]; 284 sizeof(struct vmbuspipe_hdr)];
267 285
268 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 286 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
269 vmbus_prep_negotiate_resp(icmsghdrp, NULL, 287 vmbus_prep_negotiate_resp(icmsghdrp, negop,
270 hbeat_txf_buf, UTIL_FW_MAJOR_MINOR, 288 hbeat_txf_buf, util_fw_version,
271 HEARTBEAT_MAJOR_MINOR); 289 hb_srv_version);
272 } else { 290 } else {
273 heartbeat_msg = 291 heartbeat_msg =
274 (struct heartbeat_msg_data *)&hbeat_txf_buf[ 292 (struct heartbeat_msg_data *)&hbeat_txf_buf[
@@ -321,6 +339,25 @@ static int util_probe(struct hv_device *dev,
321 goto error; 339 goto error;
322 340
323 hv_set_drvdata(dev, srv); 341 hv_set_drvdata(dev, srv);
342 /*
343 * Based on the host; initialize the framework and
344 * service version numbers we will negotiate.
345 */
346 switch (vmbus_proto_version) {
347 case (VERSION_WS2008):
348 util_fw_version = UTIL_WS2K8_FW_VERSION;
349 sd_srv_version = SD_WS2008_VERSION;
350 ts_srv_version = TS_WS2008_VERSION;
351 hb_srv_version = HB_WS2008_VERSION;
352 break;
353
354 default:
355 util_fw_version = UTIL_FW_VERSION;
356 sd_srv_version = SD_VERSION;
357 ts_srv_version = TS_VERSION;
358 hb_srv_version = HB_VERSION;
359 }
360
324 return 0; 361 return 0;
325 362
326error: 363error:
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 62c2e32e25ef..98814d12a604 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -525,16 +525,25 @@ static int applesmc_init_smcreg_try(void)
525{ 525{
526 struct applesmc_registers *s = &smcreg; 526 struct applesmc_registers *s = &smcreg;
527 bool left_light_sensor, right_light_sensor; 527 bool left_light_sensor, right_light_sensor;
528 unsigned int count;
528 u8 tmp[1]; 529 u8 tmp[1];
529 int ret; 530 int ret;
530 531
531 if (s->init_complete) 532 if (s->init_complete)
532 return 0; 533 return 0;
533 534
534 ret = read_register_count(&s->key_count); 535 ret = read_register_count(&count);
535 if (ret) 536 if (ret)
536 return ret; 537 return ret;
537 538
539 if (s->cache && s->key_count != count) {
540 pr_warn("key count changed from %d to %d\n",
541 s->key_count, count);
542 kfree(s->cache);
543 s->cache = NULL;
544 }
545 s->key_count = count;
546
538 if (!s->cache) 547 if (!s->cache)
539 s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL); 548 s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL);
540 if (!s->cache) 549 if (!s->cache)
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index dbecf08399f8..5888feef1ac5 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -98,6 +98,8 @@
98 98
99#define DW_IC_ERR_TX_ABRT 0x1 99#define DW_IC_ERR_TX_ABRT 0x1
100 100
101#define DW_IC_TAR_10BITADDR_MASTER BIT(12)
102
101/* 103/*
102 * status codes 104 * status codes
103 */ 105 */
@@ -388,22 +390,34 @@ static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
388static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) 390static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
389{ 391{
390 struct i2c_msg *msgs = dev->msgs; 392 struct i2c_msg *msgs = dev->msgs;
391 u32 ic_con; 393 u32 ic_con, ic_tar = 0;
392 394
393 /* Disable the adapter */ 395 /* Disable the adapter */
394 __i2c_dw_enable(dev, false); 396 __i2c_dw_enable(dev, false);
395 397
396 /* set the slave (target) address */
397 dw_writel(dev, msgs[dev->msg_write_idx].addr, DW_IC_TAR);
398
399 /* if the slave address is ten bit address, enable 10BITADDR */ 398 /* if the slave address is ten bit address, enable 10BITADDR */
400 ic_con = dw_readl(dev, DW_IC_CON); 399 ic_con = dw_readl(dev, DW_IC_CON);
401 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) 400 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
402 ic_con |= DW_IC_CON_10BITADDR_MASTER; 401 ic_con |= DW_IC_CON_10BITADDR_MASTER;
403 else 402 /*
403 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
404 * mode has to be enabled via bit 12 of IC_TAR register.
405 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
406 * detected from registers.
407 */
408 ic_tar = DW_IC_TAR_10BITADDR_MASTER;
409 } else {
404 ic_con &= ~DW_IC_CON_10BITADDR_MASTER; 410 ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
411 }
412
405 dw_writel(dev, ic_con, DW_IC_CON); 413 dw_writel(dev, ic_con, DW_IC_CON);
406 414
415 /*
416 * Set the slave (target) address and enable 10-bit addressing mode
417 * if applicable.
418 */
419 dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR);
420
407 /* Enable the adapter */ 421 /* Enable the adapter */
408 __i2c_dw_enable(dev, true); 422 __i2c_dw_enable(dev, true);
409 423
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 8ed79a086f85..1672effbcebb 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -393,6 +393,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
393 393
394 desc = &priv->hw[priv->head]; 394 desc = &priv->hw[priv->head];
395 395
396 /* Initialize the DMA buffer */
397 memset(priv->dma_buffer, 0, sizeof(priv->dma_buffer));
398
396 /* Initialize the descriptor */ 399 /* Initialize the descriptor */
397 memset(desc, 0, sizeof(struct ismt_desc)); 400 memset(desc, 0, sizeof(struct ismt_desc));
398 desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write); 401 desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 7f3a47443494..d3e9cc3153a9 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -234,9 +234,9 @@ static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data)
234 ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR | 234 ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR |
235 (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT; 235 (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT;
236 236
237 writel_relaxed(data_reg_lo, 237 writel(data_reg_lo,
238 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO); 238 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO);
239 writel_relaxed(data_reg_hi, 239 writel(data_reg_hi,
240 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI); 240 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI);
241 241
242 } else { 242 } else {
@@ -697,6 +697,7 @@ static const struct of_device_id mv64xxx_i2c_of_match_table[] = {
697MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table); 697MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
698 698
699#ifdef CONFIG_OF 699#ifdef CONFIG_OF
700#ifdef CONFIG_HAVE_CLK
700static int 701static int
701mv64xxx_calc_freq(const int tclk, const int n, const int m) 702mv64xxx_calc_freq(const int tclk, const int n, const int m)
702{ 703{
@@ -726,16 +727,12 @@ mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n,
726 return false; 727 return false;
727 return true; 728 return true;
728} 729}
730#endif /* CONFIG_HAVE_CLK */
729 731
730static int 732static int
731mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, 733mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
732 struct device *dev) 734 struct device *dev)
733{ 735{
734 const struct of_device_id *device;
735 struct device_node *np = dev->of_node;
736 u32 bus_freq, tclk;
737 int rc = 0;
738
739 /* CLK is mandatory when using DT to describe the i2c bus. We 736 /* CLK is mandatory when using DT to describe the i2c bus. We
740 * need to know tclk in order to calculate bus clock 737 * need to know tclk in order to calculate bus clock
741 * factors. 738 * factors.
@@ -744,6 +741,11 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
744 /* Have OF but no CLK */ 741 /* Have OF but no CLK */
745 return -ENODEV; 742 return -ENODEV;
746#else 743#else
744 const struct of_device_id *device;
745 struct device_node *np = dev->of_node;
746 u32 bus_freq, tclk;
747 int rc = 0;
748
747 if (IS_ERR(drv_data->clk)) { 749 if (IS_ERR(drv_data->clk)) {
748 rc = -ENODEV; 750 rc = -ENODEV;
749 goto out; 751 goto out;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 3535f3c0f7b4..3747b9bf67d6 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1178,8 +1178,6 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
1178 1178
1179 i2c_del_adapter(&i2c->adap); 1179 i2c_del_adapter(&i2c->adap);
1180 1180
1181 clk_disable_unprepare(i2c->clk);
1182
1183 if (pdev->dev.of_node && IS_ERR(i2c->pctrl)) 1181 if (pdev->dev.of_node && IS_ERR(i2c->pctrl))
1184 s3c24xx_i2c_dt_gpio_free(i2c); 1182 s3c24xx_i2c_dt_gpio_free(i2c);
1185 1183
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index b39f6f0b45f2..0f12382aa35d 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -498,7 +498,7 @@ struct cached_dev {
498 */ 498 */
499 atomic_t has_dirty; 499 atomic_t has_dirty;
500 500
501 struct ratelimit writeback_rate; 501 struct bch_ratelimit writeback_rate;
502 struct delayed_work writeback_rate_update; 502 struct delayed_work writeback_rate_update;
503 503
504 /* 504 /*
@@ -507,10 +507,9 @@ struct cached_dev {
507 */ 507 */
508 sector_t last_read; 508 sector_t last_read;
509 509
510 /* Number of writeback bios in flight */ 510 /* Limit number of writeback bios in flight */
511 atomic_t in_flight; 511 struct semaphore in_flight;
512 struct closure_with_timer writeback; 512 struct closure_with_timer writeback;
513 struct closure_waitlist writeback_wait;
514 513
515 struct keybuf writeback_keys; 514 struct keybuf writeback_keys;
516 515
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 8010eed06a51..22d1ae72c282 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -926,28 +926,45 @@ struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search)
926 926
927/* Mergesort */ 927/* Mergesort */
928 928
929static void sort_key_next(struct btree_iter *iter,
930 struct btree_iter_set *i)
931{
932 i->k = bkey_next(i->k);
933
934 if (i->k == i->end)
935 *i = iter->data[--iter->used];
936}
937
929static void btree_sort_fixup(struct btree_iter *iter) 938static void btree_sort_fixup(struct btree_iter *iter)
930{ 939{
931 while (iter->used > 1) { 940 while (iter->used > 1) {
932 struct btree_iter_set *top = iter->data, *i = top + 1; 941 struct btree_iter_set *top = iter->data, *i = top + 1;
933 struct bkey *k;
934 942
935 if (iter->used > 2 && 943 if (iter->used > 2 &&
936 btree_iter_cmp(i[0], i[1])) 944 btree_iter_cmp(i[0], i[1]))
937 i++; 945 i++;
938 946
939 for (k = i->k; 947 if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
940 k != i->end && bkey_cmp(top->k, &START_KEY(k)) > 0;
941 k = bkey_next(k))
942 if (top->k > i->k)
943 __bch_cut_front(top->k, k);
944 else if (KEY_SIZE(k))
945 bch_cut_back(&START_KEY(k), top->k);
946
947 if (top->k < i->k || k == i->k)
948 break; 948 break;
949 949
950 heap_sift(iter, i - top, btree_iter_cmp); 950 if (!KEY_SIZE(i->k)) {
951 sort_key_next(iter, i);
952 heap_sift(iter, i - top, btree_iter_cmp);
953 continue;
954 }
955
956 if (top->k > i->k) {
957 if (bkey_cmp(top->k, i->k) >= 0)
958 sort_key_next(iter, i);
959 else
960 bch_cut_front(top->k, i->k);
961
962 heap_sift(iter, i - top, btree_iter_cmp);
963 } else {
964 /* can't happen because of comparison func */
965 BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
966 bch_cut_back(&START_KEY(i->k), top->k);
967 }
951 } 968 }
952} 969}
953 970
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index f9764e61978b..f42fc7ed9cd6 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -255,7 +255,7 @@ void bch_btree_node_read(struct btree *b)
255 255
256 return; 256 return;
257err: 257err:
258 bch_cache_set_error(b->c, "io error reading bucket %lu", 258 bch_cache_set_error(b->c, "io error reading bucket %zu",
259 PTR_BUCKET_NR(b->c, &b->key, 0)); 259 PTR_BUCKET_NR(b->c, &b->key, 0));
260} 260}
261 261
@@ -612,7 +612,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
612 return SHRINK_STOP; 612 return SHRINK_STOP;
613 613
614 /* Return -1 if we can't do anything right now */ 614 /* Return -1 if we can't do anything right now */
615 if (sc->gfp_mask & __GFP_WAIT) 615 if (sc->gfp_mask & __GFP_IO)
616 mutex_lock(&c->bucket_lock); 616 mutex_lock(&c->bucket_lock);
617 else if (!mutex_trylock(&c->bucket_lock)) 617 else if (!mutex_trylock(&c->bucket_lock))
618 return -1; 618 return -1;
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index ba95ab84b2be..8435f81e5d85 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -153,7 +153,8 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
153 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS); 153 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
154 pr_debug("%u journal buckets", ca->sb.njournal_buckets); 154 pr_debug("%u journal buckets", ca->sb.njournal_buckets);
155 155
156 /* Read journal buckets ordered by golden ratio hash to quickly 156 /*
157 * Read journal buckets ordered by golden ratio hash to quickly
157 * find a sequence of buckets with valid journal entries 158 * find a sequence of buckets with valid journal entries
158 */ 159 */
159 for (i = 0; i < ca->sb.njournal_buckets; i++) { 160 for (i = 0; i < ca->sb.njournal_buckets; i++) {
@@ -166,18 +167,20 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
166 goto bsearch; 167 goto bsearch;
167 } 168 }
168 169
169 /* If that fails, check all the buckets we haven't checked 170 /*
171 * If that fails, check all the buckets we haven't checked
170 * already 172 * already
171 */ 173 */
172 pr_debug("falling back to linear search"); 174 pr_debug("falling back to linear search");
173 175
174 for (l = 0; l < ca->sb.njournal_buckets; l++) { 176 for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
175 if (test_bit(l, bitmap)) 177 l < ca->sb.njournal_buckets;
176 continue; 178 l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
177
178 if (read_bucket(l)) 179 if (read_bucket(l))
179 goto bsearch; 180 goto bsearch;
180 } 181
182 if (list_empty(list))
183 continue;
181bsearch: 184bsearch:
182 /* Binary search */ 185 /* Binary search */
183 m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1); 186 m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
@@ -197,10 +200,12 @@ bsearch:
197 r = m; 200 r = m;
198 } 201 }
199 202
200 /* Read buckets in reverse order until we stop finding more 203 /*
204 * Read buckets in reverse order until we stop finding more
201 * journal entries 205 * journal entries
202 */ 206 */
203 pr_debug("finishing up"); 207 pr_debug("finishing up: m %u njournal_buckets %u",
208 m, ca->sb.njournal_buckets);
204 l = m; 209 l = m;
205 210
206 while (1) { 211 while (1) {
@@ -228,9 +233,10 @@ bsearch:
228 } 233 }
229 } 234 }
230 235
231 c->journal.seq = list_entry(list->prev, 236 if (!list_empty(list))
232 struct journal_replay, 237 c->journal.seq = list_entry(list->prev,
233 list)->j.seq; 238 struct journal_replay,
239 list)->j.seq;
234 240
235 return 0; 241 return 0;
236#undef read_bucket 242#undef read_bucket
@@ -428,7 +434,7 @@ static void do_journal_discard(struct cache *ca)
428 return; 434 return;
429 } 435 }
430 436
431 switch (atomic_read(&ja->discard_in_flight) == DISCARD_IN_FLIGHT) { 437 switch (atomic_read(&ja->discard_in_flight)) {
432 case DISCARD_IN_FLIGHT: 438 case DISCARD_IN_FLIGHT:
433 return; 439 return;
434 440
@@ -689,6 +695,7 @@ void bch_journal_meta(struct cache_set *c, struct closure *cl)
689 if (cl) 695 if (cl)
690 BUG_ON(!closure_wait(&w->wait, cl)); 696 BUG_ON(!closure_wait(&w->wait, cl));
691 697
698 closure_flush(&c->journal.io);
692 __journal_try_write(c, true); 699 __journal_try_write(c, true);
693 } 700 }
694} 701}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 786a1a4f74d8..71eb233b9ace 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -997,14 +997,17 @@ static void request_write(struct cached_dev *dc, struct search *s)
997 } else { 997 } else {
998 bch_writeback_add(dc); 998 bch_writeback_add(dc);
999 999
1000 if (s->op.flush_journal) { 1000 if (bio->bi_rw & REQ_FLUSH) {
1001 /* Also need to send a flush to the backing device */ 1001 /* Also need to send a flush to the backing device */
1002 s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO, 1002 struct bio *flush = bio_alloc_bioset(0, GFP_NOIO,
1003 dc->disk.bio_split); 1003 dc->disk.bio_split);
1004 1004
1005 bio->bi_size = 0; 1005 flush->bi_rw = WRITE_FLUSH;
1006 bio->bi_vcnt = 0; 1006 flush->bi_bdev = bio->bi_bdev;
1007 closure_bio_submit(bio, cl, s->d); 1007 flush->bi_end_io = request_endio;
1008 flush->bi_private = cl;
1009
1010 closure_bio_submit(flush, cl, s->d);
1008 } else { 1011 } else {
1009 s->op.cache_bio = bio; 1012 s->op.cache_bio = bio;
1010 } 1013 }
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 4fe6ab2fbe2e..924dcfdae111 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -223,8 +223,13 @@ STORE(__cached_dev)
223 } 223 }
224 224
225 if (attr == &sysfs_label) { 225 if (attr == &sysfs_label) {
226 /* note: endlines are preserved */ 226 if (size > SB_LABEL_SIZE)
227 memcpy(dc->sb.label, buf, SB_LABEL_SIZE); 227 return -EINVAL;
228 memcpy(dc->sb.label, buf, size);
229 if (size < SB_LABEL_SIZE)
230 dc->sb.label[size] = '\0';
231 if (size && dc->sb.label[size - 1] == '\n')
232 dc->sb.label[size - 1] = '\0';
228 bch_write_bdev_super(dc, NULL); 233 bch_write_bdev_super(dc, NULL);
229 if (dc->disk.c) { 234 if (dc->disk.c) {
230 memcpy(dc->disk.c->uuids[dc->disk.id].label, 235 memcpy(dc->disk.c->uuids[dc->disk.id].label,
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 98eb81159a22..420dad545c7d 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -190,7 +190,16 @@ void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
190 stats->last = now ?: 1; 190 stats->last = now ?: 1;
191} 191}
192 192
193unsigned bch_next_delay(struct ratelimit *d, uint64_t done) 193/**
194 * bch_next_delay() - increment @d by the amount of work done, and return how
195 * long to delay until the next time to do some work.
196 *
197 * @d - the struct bch_ratelimit to update
198 * @done - the amount of work done, in arbitrary units
199 *
200 * Returns the amount of time to delay by, in jiffies
201 */
202uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
194{ 203{
195 uint64_t now = local_clock(); 204 uint64_t now = local_clock();
196 205
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 1ae2a73ad85f..ea345c6896f4 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -450,17 +450,23 @@ read_attribute(name ## _last_ ## frequency_units)
450 (ewma) >> factor; \ 450 (ewma) >> factor; \
451}) 451})
452 452
453struct ratelimit { 453struct bch_ratelimit {
454 /* Next time we want to do some work, in nanoseconds */
454 uint64_t next; 455 uint64_t next;
456
457 /*
458 * Rate at which we want to do work, in units per nanosecond
459 * The units here correspond to the units passed to bch_next_delay()
460 */
455 unsigned rate; 461 unsigned rate;
456}; 462};
457 463
458static inline void ratelimit_reset(struct ratelimit *d) 464static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
459{ 465{
460 d->next = local_clock(); 466 d->next = local_clock();
461} 467}
462 468
463unsigned bch_next_delay(struct ratelimit *d, uint64_t done); 469uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done);
464 470
465#define __DIV_SAFE(n, d, zero) \ 471#define __DIV_SAFE(n, d, zero) \
466({ \ 472({ \
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 22cbff551628..ba3ee48320f2 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -94,11 +94,15 @@ static void update_writeback_rate(struct work_struct *work)
94 94
95static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) 95static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
96{ 96{
97 uint64_t ret;
98
97 if (atomic_read(&dc->disk.detaching) || 99 if (atomic_read(&dc->disk.detaching) ||
98 !dc->writeback_percent) 100 !dc->writeback_percent)
99 return 0; 101 return 0;
100 102
101 return bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL); 103 ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
104
105 return min_t(uint64_t, ret, HZ);
102} 106}
103 107
104/* Background writeback */ 108/* Background writeback */
@@ -208,7 +212,7 @@ normal_refill:
208 212
209 up_write(&dc->writeback_lock); 213 up_write(&dc->writeback_lock);
210 214
211 ratelimit_reset(&dc->writeback_rate); 215 bch_ratelimit_reset(&dc->writeback_rate);
212 216
213 /* Punt to workqueue only so we don't recurse and blow the stack */ 217 /* Punt to workqueue only so we don't recurse and blow the stack */
214 continue_at(cl, read_dirty, dirty_wq); 218 continue_at(cl, read_dirty, dirty_wq);
@@ -318,9 +322,7 @@ static void write_dirty_finish(struct closure *cl)
318 } 322 }
319 323
320 bch_keybuf_del(&dc->writeback_keys, w); 324 bch_keybuf_del(&dc->writeback_keys, w);
321 atomic_dec_bug(&dc->in_flight); 325 up(&dc->in_flight);
322
323 closure_wake_up(&dc->writeback_wait);
324 326
325 closure_return_with_destructor(cl, dirty_io_destructor); 327 closure_return_with_destructor(cl, dirty_io_destructor);
326} 328}
@@ -349,7 +351,7 @@ static void write_dirty(struct closure *cl)
349 351
350 closure_bio_submit(&io->bio, cl, &io->dc->disk); 352 closure_bio_submit(&io->bio, cl, &io->dc->disk);
351 353
352 continue_at(cl, write_dirty_finish, dirty_wq); 354 continue_at(cl, write_dirty_finish, system_wq);
353} 355}
354 356
355static void read_dirty_endio(struct bio *bio, int error) 357static void read_dirty_endio(struct bio *bio, int error)
@@ -369,7 +371,7 @@ static void read_dirty_submit(struct closure *cl)
369 371
370 closure_bio_submit(&io->bio, cl, &io->dc->disk); 372 closure_bio_submit(&io->bio, cl, &io->dc->disk);
371 373
372 continue_at(cl, write_dirty, dirty_wq); 374 continue_at(cl, write_dirty, system_wq);
373} 375}
374 376
375static void read_dirty(struct closure *cl) 377static void read_dirty(struct closure *cl)
@@ -394,12 +396,8 @@ static void read_dirty(struct closure *cl)
394 396
395 if (delay > 0 && 397 if (delay > 0 &&
396 (KEY_START(&w->key) != dc->last_read || 398 (KEY_START(&w->key) != dc->last_read ||
397 jiffies_to_msecs(delay) > 50)) { 399 jiffies_to_msecs(delay) > 50))
398 w->private = NULL; 400 delay = schedule_timeout_uninterruptible(delay);
399
400 closure_delay(&dc->writeback, delay);
401 continue_at(cl, read_dirty, dirty_wq);
402 }
403 401
404 dc->last_read = KEY_OFFSET(&w->key); 402 dc->last_read = KEY_OFFSET(&w->key);
405 403
@@ -424,15 +422,10 @@ static void read_dirty(struct closure *cl)
424 422
425 trace_bcache_writeback(&w->key); 423 trace_bcache_writeback(&w->key);
426 424
427 closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl); 425 down(&dc->in_flight);
426 closure_call(&io->cl, read_dirty_submit, NULL, cl);
428 427
429 delay = writeback_delay(dc, KEY_SIZE(&w->key)); 428 delay = writeback_delay(dc, KEY_SIZE(&w->key));
430
431 atomic_inc(&dc->in_flight);
432
433 if (!closure_wait_event(&dc->writeback_wait, cl,
434 atomic_read(&dc->in_flight) < 64))
435 continue_at(cl, read_dirty, dirty_wq);
436 } 429 }
437 430
438 if (0) { 431 if (0) {
@@ -442,7 +435,11 @@ err:
442 bch_keybuf_del(&dc->writeback_keys, w); 435 bch_keybuf_del(&dc->writeback_keys, w);
443 } 436 }
444 437
445 refill_dirty(cl); 438 /*
439 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
440 * freed) before refilling again
441 */
442 continue_at(cl, refill_dirty, dirty_wq);
446} 443}
447 444
448/* Init */ 445/* Init */
@@ -484,6 +481,7 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
484 481
485void bch_cached_dev_writeback_init(struct cached_dev *dc) 482void bch_cached_dev_writeback_init(struct cached_dev *dc)
486{ 483{
484 sema_init(&dc->in_flight, 64);
487 closure_init_unlocked(&dc->writeback); 485 closure_init_unlocked(&dc->writeback);
488 init_rwsem(&dc->writeback_lock); 486 init_rwsem(&dc->writeback_lock);
489 487
@@ -513,7 +511,7 @@ void bch_writeback_exit(void)
513 511
514int __init bch_writeback_init(void) 512int __init bch_writeback_init(void)
515{ 513{
516 dirty_wq = create_singlethread_workqueue("bcache_writeback"); 514 dirty_wq = create_workqueue("bcache_writeback");
517 if (!dirty_wq) 515 if (!dirty_wq)
518 return -ENOMEM; 516 return -ENOMEM;
519 517
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index ea49834377c8..2a20986a2fec 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -19,8 +19,6 @@
19#define DM_MSG_PREFIX "io" 19#define DM_MSG_PREFIX "io"
20 20
21#define DM_IO_MAX_REGIONS BITS_PER_LONG 21#define DM_IO_MAX_REGIONS BITS_PER_LONG
22#define MIN_IOS 16
23#define MIN_BIOS 16
24 22
25struct dm_io_client { 23struct dm_io_client {
26 mempool_t *pool; 24 mempool_t *pool;
@@ -50,16 +48,17 @@ static struct kmem_cache *_dm_io_cache;
50struct dm_io_client *dm_io_client_create(void) 48struct dm_io_client *dm_io_client_create(void)
51{ 49{
52 struct dm_io_client *client; 50 struct dm_io_client *client;
51 unsigned min_ios = dm_get_reserved_bio_based_ios();
53 52
54 client = kmalloc(sizeof(*client), GFP_KERNEL); 53 client = kmalloc(sizeof(*client), GFP_KERNEL);
55 if (!client) 54 if (!client)
56 return ERR_PTR(-ENOMEM); 55 return ERR_PTR(-ENOMEM);
57 56
58 client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache); 57 client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
59 if (!client->pool) 58 if (!client->pool)
60 goto bad; 59 goto bad;
61 60
62 client->bios = bioset_create(MIN_BIOS, 0); 61 client->bios = bioset_create(min_ios, 0);
63 if (!client->bios) 62 if (!client->bios)
64 goto bad; 63 goto bad;
65 64
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index b759a127f9c3..de570a558764 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/device-mapper.h> 8#include <linux/device-mapper.h>
9 9
10#include "dm.h"
10#include "dm-path-selector.h" 11#include "dm-path-selector.h"
11#include "dm-uevent.h" 12#include "dm-uevent.h"
12 13
@@ -116,8 +117,6 @@ struct dm_mpath_io {
116 117
117typedef int (*action_fn) (struct pgpath *pgpath); 118typedef int (*action_fn) (struct pgpath *pgpath);
118 119
119#define MIN_IOS 256 /* Mempool size */
120
121static struct kmem_cache *_mpio_cache; 120static struct kmem_cache *_mpio_cache;
122 121
123static struct workqueue_struct *kmultipathd, *kmpath_handlerd; 122static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
@@ -190,6 +189,7 @@ static void free_priority_group(struct priority_group *pg,
190static struct multipath *alloc_multipath(struct dm_target *ti) 189static struct multipath *alloc_multipath(struct dm_target *ti)
191{ 190{
192 struct multipath *m; 191 struct multipath *m;
192 unsigned min_ios = dm_get_reserved_rq_based_ios();
193 193
194 m = kzalloc(sizeof(*m), GFP_KERNEL); 194 m = kzalloc(sizeof(*m), GFP_KERNEL);
195 if (m) { 195 if (m) {
@@ -202,7 +202,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
202 INIT_WORK(&m->trigger_event, trigger_event); 202 INIT_WORK(&m->trigger_event, trigger_event);
203 init_waitqueue_head(&m->pg_init_wait); 203 init_waitqueue_head(&m->pg_init_wait);
204 mutex_init(&m->work_mutex); 204 mutex_init(&m->work_mutex);
205 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); 205 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
206 if (!m->mpio_pool) { 206 if (!m->mpio_pool) {
207 kfree(m); 207 kfree(m);
208 return NULL; 208 return NULL;
@@ -1268,6 +1268,7 @@ static int noretry_error(int error)
1268 case -EREMOTEIO: 1268 case -EREMOTEIO:
1269 case -EILSEQ: 1269 case -EILSEQ:
1270 case -ENODATA: 1270 case -ENODATA:
1271 case -ENOSPC:
1271 return 1; 1272 return 1;
1272 } 1273 }
1273 1274
@@ -1298,8 +1299,17 @@ static int do_end_io(struct multipath *m, struct request *clone,
1298 if (!error && !clone->errors) 1299 if (!error && !clone->errors)
1299 return 0; /* I/O complete */ 1300 return 0; /* I/O complete */
1300 1301
1301 if (noretry_error(error)) 1302 if (noretry_error(error)) {
1303 if ((clone->cmd_flags & REQ_WRITE_SAME) &&
1304 !clone->q->limits.max_write_same_sectors) {
1305 struct queue_limits *limits;
1306
1307 /* device doesn't really support WRITE SAME, disable it */
1308 limits = dm_get_queue_limits(dm_table_get_md(m->ti->table));
1309 limits->max_write_same_sectors = 0;
1310 }
1302 return error; 1311 return error;
1312 }
1303 1313
1304 if (mpio->pgpath) 1314 if (mpio->pgpath)
1305 fail_path(mpio->pgpath); 1315 fail_path(mpio->pgpath);
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 3ac415675b6c..4caa8e6d59d7 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -256,7 +256,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
256 */ 256 */
257 INIT_WORK_ONSTACK(&req.work, do_metadata); 257 INIT_WORK_ONSTACK(&req.work, do_metadata);
258 queue_work(ps->metadata_wq, &req.work); 258 queue_work(ps->metadata_wq, &req.work);
259 flush_work(&req.work); 259 flush_workqueue(ps->metadata_wq);
260 260
261 return req.result; 261 return req.result;
262} 262}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index c434e5aab2df..aec57d76db5d 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -725,17 +725,16 @@ static int calc_max_buckets(void)
725 */ 725 */
726static int init_hash_tables(struct dm_snapshot *s) 726static int init_hash_tables(struct dm_snapshot *s)
727{ 727{
728 sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; 728 sector_t hash_size, cow_dev_size, max_buckets;
729 729
730 /* 730 /*
731 * Calculate based on the size of the original volume or 731 * Calculate based on the size of the original volume or
732 * the COW volume... 732 * the COW volume...
733 */ 733 */
734 cow_dev_size = get_dev_size(s->cow->bdev); 734 cow_dev_size = get_dev_size(s->cow->bdev);
735 origin_dev_size = get_dev_size(s->origin->bdev);
736 max_buckets = calc_max_buckets(); 735 max_buckets = calc_max_buckets();
737 736
738 hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; 737 hash_size = cow_dev_size >> s->store->chunk_shift;
739 hash_size = min(hash_size, max_buckets); 738 hash_size = min(hash_size, max_buckets);
740 739
741 if (hash_size < 64) 740 if (hash_size < 64)
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 8ae31e8d3d64..3d404c1371ed 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -451,19 +451,26 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
451 struct dm_stat_percpu *p; 451 struct dm_stat_percpu *p;
452 452
453 /* 453 /*
454 * For strict correctness we should use local_irq_disable/enable 454 * For strict correctness we should use local_irq_save/restore
455 * instead of preempt_disable/enable. 455 * instead of preempt_disable/enable.
456 * 456 *
457 * This is racy if the driver finishes bios from non-interrupt 457 * preempt_disable/enable is racy if the driver finishes bios
458 * context as well as from interrupt context or from more different 458 * from non-interrupt context as well as from interrupt context
459 * interrupts. 459 * or from more different interrupts.
460 * 460 *
461 * However, the race only results in not counting some events, 461 * On 64-bit architectures the race only results in not counting some
462 * so it is acceptable. 462 * events, so it is acceptable. On 32-bit architectures the race could
463 * cause the counter going off by 2^32, so we need to do proper locking
464 * there.
463 * 465 *
464 * part_stat_lock()/part_stat_unlock() have this race too. 466 * part_stat_lock()/part_stat_unlock() have this race too.
465 */ 467 */
468#if BITS_PER_LONG == 32
469 unsigned long flags;
470 local_irq_save(flags);
471#else
466 preempt_disable(); 472 preempt_disable();
473#endif
467 p = &s->stat_percpu[smp_processor_id()][entry]; 474 p = &s->stat_percpu[smp_processor_id()][entry];
468 475
469 if (!end) { 476 if (!end) {
@@ -478,7 +485,11 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
478 p->ticks[idx] += duration; 485 p->ticks[idx] += duration;
479 } 486 }
480 487
488#if BITS_PER_LONG == 32
489 local_irq_restore(flags);
490#else
481 preempt_enable(); 491 preempt_enable();
492#endif
482} 493}
483 494
484static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw, 495static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index ed063427d676..2c0cf511ec23 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2095,6 +2095,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2095 * them down to the data device. The thin device's discard 2095 * them down to the data device. The thin device's discard
2096 * processing will cause mappings to be removed from the btree. 2096 * processing will cause mappings to be removed from the btree.
2097 */ 2097 */
2098 ti->discard_zeroes_data_unsupported = true;
2098 if (pf.discard_enabled && pf.discard_passdown) { 2099 if (pf.discard_enabled && pf.discard_passdown) {
2099 ti->num_discard_bios = 1; 2100 ti->num_discard_bios = 1;
2100 2101
@@ -2104,7 +2105,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2104 * thin devices' discard limits consistent). 2105 * thin devices' discard limits consistent).
2105 */ 2106 */
2106 ti->discards_supported = true; 2107 ti->discards_supported = true;
2107 ti->discard_zeroes_data_unsupported = true;
2108 } 2108 }
2109 ti->private = pt; 2109 ti->private = pt;
2110 2110
@@ -2689,8 +2689,16 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2689 * They get transferred to the live pool in bind_control_target() 2689 * They get transferred to the live pool in bind_control_target()
2690 * called from pool_preresume(). 2690 * called from pool_preresume().
2691 */ 2691 */
2692 if (!pt->adjusted_pf.discard_enabled) 2692 if (!pt->adjusted_pf.discard_enabled) {
2693 /*
2694 * Must explicitly disallow stacking discard limits otherwise the
2695 * block layer will stack them if pool's data device has support.
2696 * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
2697 * user to see that, so make sure to set all discard limits to 0.
2698 */
2699 limits->discard_granularity = 0;
2693 return; 2700 return;
2701 }
2694 2702
2695 disable_passdown_if_not_supported(pt); 2703 disable_passdown_if_not_supported(pt);
2696 2704
@@ -2826,10 +2834,10 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2826 ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook); 2834 ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
2827 2835
2828 /* In case the pool supports discards, pass them on. */ 2836 /* In case the pool supports discards, pass them on. */
2837 ti->discard_zeroes_data_unsupported = true;
2829 if (tc->pool->pf.discard_enabled) { 2838 if (tc->pool->pf.discard_enabled) {
2830 ti->discards_supported = true; 2839 ti->discards_supported = true;
2831 ti->num_discard_bios = 1; 2840 ti->num_discard_bios = 1;
2832 ti->discard_zeroes_data_unsupported = true;
2833 /* Discard bios must be split on a block boundary */ 2841 /* Discard bios must be split on a block boundary */
2834 ti->split_discard_bios = true; 2842 ti->split_discard_bios = true;
2835 } 2843 }
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6a5e9ed2fcc3..b3e26c7d1417 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -211,10 +211,55 @@ struct dm_md_mempools {
211 struct bio_set *bs; 211 struct bio_set *bs;
212}; 212};
213 213
214#define MIN_IOS 256 214#define RESERVED_BIO_BASED_IOS 16
215#define RESERVED_REQUEST_BASED_IOS 256
216#define RESERVED_MAX_IOS 1024
215static struct kmem_cache *_io_cache; 217static struct kmem_cache *_io_cache;
216static struct kmem_cache *_rq_tio_cache; 218static struct kmem_cache *_rq_tio_cache;
217 219
220/*
221 * Bio-based DM's mempools' reserved IOs set by the user.
222 */
223static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
224
225/*
226 * Request-based DM's mempools' reserved IOs set by the user.
227 */
228static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
229
230static unsigned __dm_get_reserved_ios(unsigned *reserved_ios,
231 unsigned def, unsigned max)
232{
233 unsigned ios = ACCESS_ONCE(*reserved_ios);
234 unsigned modified_ios = 0;
235
236 if (!ios)
237 modified_ios = def;
238 else if (ios > max)
239 modified_ios = max;
240
241 if (modified_ios) {
242 (void)cmpxchg(reserved_ios, ios, modified_ios);
243 ios = modified_ios;
244 }
245
246 return ios;
247}
248
249unsigned dm_get_reserved_bio_based_ios(void)
250{
251 return __dm_get_reserved_ios(&reserved_bio_based_ios,
252 RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
253}
254EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
255
256unsigned dm_get_reserved_rq_based_ios(void)
257{
258 return __dm_get_reserved_ios(&reserved_rq_based_ios,
259 RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
260}
261EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
262
218static int __init local_init(void) 263static int __init local_init(void)
219{ 264{
220 int r = -ENOMEM; 265 int r = -ENOMEM;
@@ -2278,6 +2323,17 @@ struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2278} 2323}
2279 2324
2280/* 2325/*
2326 * The queue_limits are only valid as long as you have a reference
2327 * count on 'md'.
2328 */
2329struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2330{
2331 BUG_ON(!atomic_read(&md->holders));
2332 return &md->queue->limits;
2333}
2334EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2335
2336/*
2281 * Fully initialize a request-based queue (->elevator, ->request_fn, etc). 2337 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2282 */ 2338 */
2283static int dm_init_request_based_queue(struct mapped_device *md) 2339static int dm_init_request_based_queue(struct mapped_device *md)
@@ -2862,18 +2918,18 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u
2862 2918
2863 if (type == DM_TYPE_BIO_BASED) { 2919 if (type == DM_TYPE_BIO_BASED) {
2864 cachep = _io_cache; 2920 cachep = _io_cache;
2865 pool_size = 16; 2921 pool_size = dm_get_reserved_bio_based_ios();
2866 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 2922 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
2867 } else if (type == DM_TYPE_REQUEST_BASED) { 2923 } else if (type == DM_TYPE_REQUEST_BASED) {
2868 cachep = _rq_tio_cache; 2924 cachep = _rq_tio_cache;
2869 pool_size = MIN_IOS; 2925 pool_size = dm_get_reserved_rq_based_ios();
2870 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2926 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
2871 /* per_bio_data_size is not used. See __bind_mempools(). */ 2927 /* per_bio_data_size is not used. See __bind_mempools(). */
2872 WARN_ON(per_bio_data_size != 0); 2928 WARN_ON(per_bio_data_size != 0);
2873 } else 2929 } else
2874 goto out; 2930 goto out;
2875 2931
2876 pools->io_pool = mempool_create_slab_pool(MIN_IOS, cachep); 2932 pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
2877 if (!pools->io_pool) 2933 if (!pools->io_pool)
2878 goto out; 2934 goto out;
2879 2935
@@ -2924,6 +2980,13 @@ module_exit(dm_exit);
2924 2980
2925module_param(major, uint, 0); 2981module_param(major, uint, 0);
2926MODULE_PARM_DESC(major, "The major number of the device mapper"); 2982MODULE_PARM_DESC(major, "The major number of the device mapper");
2983
2984module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
2985MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
2986
2987module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
2988MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
2989
2927MODULE_DESCRIPTION(DM_NAME " driver"); 2990MODULE_DESCRIPTION(DM_NAME " driver");
2928MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 2991MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2929MODULE_LICENSE("GPL"); 2992MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 5e604cc7b4aa..1d1ad7b7e527 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -184,6 +184,9 @@ void dm_free_md_mempools(struct dm_md_mempools *pools);
184/* 184/*
185 * Helpers that are used by DM core 185 * Helpers that are used by DM core
186 */ 186 */
187unsigned dm_get_reserved_bio_based_ios(void);
188unsigned dm_get_reserved_rq_based_ios(void);
189
187static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen) 190static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
188{ 191{
189 return !maxlen || strlen(result) + 1 >= maxlen; 192 return !maxlen || strlen(result) + 1 >= maxlen;
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index d0fdc134068a..f6ff711aa5bb 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -57,6 +57,7 @@ void mei_amthif_reset_params(struct mei_device *dev)
57 dev->iamthif_ioctl = false; 57 dev->iamthif_ioctl = false;
58 dev->iamthif_state = MEI_IAMTHIF_IDLE; 58 dev->iamthif_state = MEI_IAMTHIF_IDLE;
59 dev->iamthif_timer = 0; 59 dev->iamthif_timer = 0;
60 dev->iamthif_stall_timer = 0;
60} 61}
61 62
62/** 63/**
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 6d0282c08a06..cd2033cd7120 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -297,10 +297,13 @@ int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
297 297
298 if (cl->reading_state != MEI_READ_COMPLETE && 298 if (cl->reading_state != MEI_READ_COMPLETE &&
299 !waitqueue_active(&cl->rx_wait)) { 299 !waitqueue_active(&cl->rx_wait)) {
300
300 mutex_unlock(&dev->device_lock); 301 mutex_unlock(&dev->device_lock);
301 302
302 if (wait_event_interruptible(cl->rx_wait, 303 if (wait_event_interruptible(cl->rx_wait,
303 (MEI_READ_COMPLETE == cl->reading_state))) { 304 cl->reading_state == MEI_READ_COMPLETE ||
305 mei_cl_is_transitioning(cl))) {
306
304 if (signal_pending(current)) 307 if (signal_pending(current))
305 return -EINTR; 308 return -EINTR;
306 return -ERESTARTSYS; 309 return -ERESTARTSYS;
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 9eb031e92070..892cc4207fa2 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -90,6 +90,12 @@ static inline bool mei_cl_is_connected(struct mei_cl *cl)
90 cl->dev->dev_state == MEI_DEV_ENABLED && 90 cl->dev->dev_state == MEI_DEV_ENABLED &&
91 cl->state == MEI_FILE_CONNECTED); 91 cl->state == MEI_FILE_CONNECTED);
92} 92}
93static inline bool mei_cl_is_transitioning(struct mei_cl *cl)
94{
95 return (MEI_FILE_INITIALIZING == cl->state ||
96 MEI_FILE_DISCONNECTED == cl->state ||
97 MEI_FILE_DISCONNECTING == cl->state);
98}
93 99
94bool mei_cl_is_other_connecting(struct mei_cl *cl); 100bool mei_cl_is_other_connecting(struct mei_cl *cl);
95int mei_cl_disconnect(struct mei_cl *cl); 101int mei_cl_disconnect(struct mei_cl *cl);
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 6127ab64bb39..0a0448326e9d 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -35,11 +35,15 @@ static void mei_hbm_me_cl_allocate(struct mei_device *dev)
35 struct mei_me_client *clients; 35 struct mei_me_client *clients;
36 int b; 36 int b;
37 37
38 dev->me_clients_num = 0;
39 dev->me_client_presentation_num = 0;
40 dev->me_client_index = 0;
41
38 /* count how many ME clients we have */ 42 /* count how many ME clients we have */
39 for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX) 43 for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
40 dev->me_clients_num++; 44 dev->me_clients_num++;
41 45
42 if (dev->me_clients_num <= 0) 46 if (dev->me_clients_num == 0)
43 return; 47 return;
44 48
45 kfree(dev->me_clients); 49 kfree(dev->me_clients);
@@ -221,7 +225,7 @@ static int mei_hbm_prop_req(struct mei_device *dev)
221 struct hbm_props_request *prop_req; 225 struct hbm_props_request *prop_req;
222 const size_t len = sizeof(struct hbm_props_request); 226 const size_t len = sizeof(struct hbm_props_request);
223 unsigned long next_client_index; 227 unsigned long next_client_index;
224 u8 client_num; 228 unsigned long client_num;
225 229
226 230
227 client_num = dev->me_client_presentation_num; 231 client_num = dev->me_client_presentation_num;
@@ -677,8 +681,6 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
677 if (dev->dev_state == MEI_DEV_INIT_CLIENTS && 681 if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
678 dev->hbm_state == MEI_HBM_ENUM_CLIENTS) { 682 dev->hbm_state == MEI_HBM_ENUM_CLIENTS) {
679 dev->init_clients_timer = 0; 683 dev->init_clients_timer = 0;
680 dev->me_client_presentation_num = 0;
681 dev->me_client_index = 0;
682 mei_hbm_me_cl_allocate(dev); 684 mei_hbm_me_cl_allocate(dev);
683 dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; 685 dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES;
684 686
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 92c73118b13c..6197018e2f16 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -175,6 +175,9 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
175 memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg)); 175 memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
176 } 176 }
177 177
178 /* we're already in reset, cancel the init timer */
179 dev->init_clients_timer = 0;
180
178 dev->me_clients_num = 0; 181 dev->me_clients_num = 0;
179 dev->rd_msg_hdr = 0; 182 dev->rd_msg_hdr = 0;
180 dev->wd_pending = false; 183 dev->wd_pending = false;
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 173ff095be0d..cabeddd66c1f 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -249,19 +249,16 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
249 mutex_unlock(&dev->device_lock); 249 mutex_unlock(&dev->device_lock);
250 250
251 if (wait_event_interruptible(cl->rx_wait, 251 if (wait_event_interruptible(cl->rx_wait,
252 (MEI_READ_COMPLETE == cl->reading_state || 252 MEI_READ_COMPLETE == cl->reading_state ||
253 MEI_FILE_INITIALIZING == cl->state || 253 mei_cl_is_transitioning(cl))) {
254 MEI_FILE_DISCONNECTED == cl->state || 254
255 MEI_FILE_DISCONNECTING == cl->state))) {
256 if (signal_pending(current)) 255 if (signal_pending(current))
257 return -EINTR; 256 return -EINTR;
258 return -ERESTARTSYS; 257 return -ERESTARTSYS;
259 } 258 }
260 259
261 mutex_lock(&dev->device_lock); 260 mutex_lock(&dev->device_lock);
262 if (MEI_FILE_INITIALIZING == cl->state || 261 if (mei_cl_is_transitioning(cl)) {
263 MEI_FILE_DISCONNECTED == cl->state ||
264 MEI_FILE_DISCONNECTING == cl->state) {
265 rets = -EBUSY; 262 rets = -EBUSY;
266 goto out; 263 goto out;
267 } 264 }
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 7b918b2fb894..456b322013e2 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -396,9 +396,9 @@ struct mei_device {
396 struct mei_me_client *me_clients; /* Note: memory has to be allocated */ 396 struct mei_me_client *me_clients; /* Note: memory has to be allocated */
397 DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX); 397 DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX);
398 DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX); 398 DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX);
399 u8 me_clients_num; 399 unsigned long me_clients_num;
400 u8 me_client_presentation_num; 400 unsigned long me_client_presentation_num;
401 u8 me_client_index; 401 unsigned long me_client_index;
402 402
403 struct mei_cl wd_cl; 403 struct mei_cl wd_cl;
404 enum mei_wd_states wd_state; 404 enum mei_wd_states wd_state;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e8ccf6c0f08a..bdd64b1b4817 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1155,8 +1155,14 @@ static void pci_enable_bridge(struct pci_dev *dev)
1155 1155
1156 pci_enable_bridge(dev->bus->self); 1156 pci_enable_bridge(dev->bus->self);
1157 1157
1158 if (pci_is_enabled(dev)) 1158 if (pci_is_enabled(dev)) {
1159 if (!dev->is_busmaster) {
1160 dev_warn(&dev->dev, "driver skip pci_set_master, fix it!\n");
1161 pci_set_master(dev);
1162 }
1159 return; 1163 return;
1164 }
1165
1160 retval = pci_enable_device(dev); 1166 retval = pci_enable_device(dev);
1161 if (retval) 1167 if (retval)
1162 dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n", 1168 dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 47c5888461ff..a2e52a0c53c9 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -41,7 +41,6 @@ struct imx_drm_device {
41 struct list_head encoder_list; 41 struct list_head encoder_list;
42 struct list_head connector_list; 42 struct list_head connector_list;
43 struct mutex mutex; 43 struct mutex mutex;
44 int references;
45 int pipes; 44 int pipes;
46 struct drm_fbdev_cma *fbhelper; 45 struct drm_fbdev_cma *fbhelper;
47}; 46};
@@ -241,8 +240,6 @@ struct drm_device *imx_drm_device_get(void)
241 } 240 }
242 } 241 }
243 242
244 imxdrm->references++;
245
246 return imxdrm->drm; 243 return imxdrm->drm;
247 244
248unwind_crtc: 245unwind_crtc:
@@ -280,8 +277,6 @@ void imx_drm_device_put(void)
280 list_for_each_entry(enc, &imxdrm->encoder_list, list) 277 list_for_each_entry(enc, &imxdrm->encoder_list, list)
281 module_put(enc->owner); 278 module_put(enc->owner);
282 279
283 imxdrm->references--;
284
285 mutex_unlock(&imxdrm->mutex); 280 mutex_unlock(&imxdrm->mutex);
286} 281}
287EXPORT_SYMBOL_GPL(imx_drm_device_put); 282EXPORT_SYMBOL_GPL(imx_drm_device_put);
@@ -485,7 +480,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
485 480
486 mutex_lock(&imxdrm->mutex); 481 mutex_lock(&imxdrm->mutex);
487 482
488 if (imxdrm->references) { 483 if (imxdrm->drm->open_count) {
489 ret = -EBUSY; 484 ret = -EBUSY;
490 goto err_busy; 485 goto err_busy;
491 } 486 }
@@ -564,7 +559,7 @@ int imx_drm_add_encoder(struct drm_encoder *encoder,
564 559
565 mutex_lock(&imxdrm->mutex); 560 mutex_lock(&imxdrm->mutex);
566 561
567 if (imxdrm->references) { 562 if (imxdrm->drm->open_count) {
568 ret = -EBUSY; 563 ret = -EBUSY;
569 goto err_busy; 564 goto err_busy;
570 } 565 }
@@ -709,7 +704,7 @@ int imx_drm_add_connector(struct drm_connector *connector,
709 704
710 mutex_lock(&imxdrm->mutex); 705 mutex_lock(&imxdrm->mutex);
711 706
712 if (imxdrm->references) { 707 if (imxdrm->drm->open_count) {
713 ret = -EBUSY; 708 ret = -EBUSY;
714 goto err_busy; 709 goto err_busy;
715 } 710 }
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 2644edf438c1..c8b43442dc74 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -1387,7 +1387,7 @@ echo_copyout_lsm (struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
1387 if (nob > ulsm_nob) 1387 if (nob > ulsm_nob)
1388 return (-EINVAL); 1388 return (-EINVAL);
1389 1389
1390 if (copy_to_user (ulsm, lsm, sizeof(ulsm))) 1390 if (copy_to_user (ulsm, lsm, sizeof(*ulsm)))
1391 return (-EFAULT); 1391 return (-EFAULT);
1392 1392
1393 for (i = 0; i < lsm->lsm_stripe_count; i++) { 1393 for (i = 0; i < lsm->lsm_stripe_count; i++) {
diff --git a/drivers/staging/octeon-usb/cvmx-usb.c b/drivers/staging/octeon-usb/cvmx-usb.c
index d7b3c82b5ead..45dfe94199ae 100644
--- a/drivers/staging/octeon-usb/cvmx-usb.c
+++ b/drivers/staging/octeon-usb/cvmx-usb.c
@@ -604,7 +604,7 @@ int cvmx_usb_initialize(struct cvmx_usb_state *state, int usb_port_number,
604 } 604 }
605 } 605 }
606 606
607 memset(usb, 0, sizeof(usb)); 607 memset(usb, 0, sizeof(*usb));
608 usb->init_flags = flags; 608 usb->init_flags = flags;
609 609
610 /* Initialize the USB state structure */ 610 /* Initialize the USB state structure */
diff --git a/drivers/staging/rtl8188eu/core/rtw_mp.c b/drivers/staging/rtl8188eu/core/rtw_mp.c
index c7ff2e4d1f23..9832dcbbd07f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mp.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mp.c
@@ -907,7 +907,7 @@ u32 mp_query_psd(struct adapter *pAdapter, u8 *data)
907 sscanf(data, "pts =%d, start =%d, stop =%d", &psd_pts, &psd_start, &psd_stop); 907 sscanf(data, "pts =%d, start =%d, stop =%d", &psd_pts, &psd_start, &psd_stop);
908 } 908 }
909 909
910 _rtw_memset(data, '\0', sizeof(data)); 910 _rtw_memset(data, '\0', sizeof(*data));
911 911
912 i = psd_start; 912 i = psd_start;
913 while (i < psd_stop) { 913 while (i < psd_stop) {
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index 9c2e7a20c09e..ec0028d4e61a 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -57,7 +57,7 @@ static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
57 u8 cut_ver, fab_ver; 57 u8 cut_ver, fab_ver;
58 58
59 /* Init Value */ 59 /* Init Value */
60 _rtw_memset(dm_odm, 0, sizeof(dm_odm)); 60 _rtw_memset(dm_odm, 0, sizeof(*dm_odm));
61 61
62 dm_odm->Adapter = Adapter; 62 dm_odm->Adapter = Adapter;
63 63
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index cd4100fb3645..95953ebc0279 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -6973,7 +6973,7 @@ static int rtw_mp_ctx(struct net_device *dev,
6973 stop = strncmp(extra, "stop", 4); 6973 stop = strncmp(extra, "stop", 4);
6974 sscanf(extra, "count =%d, pkt", &count); 6974 sscanf(extra, "count =%d, pkt", &count);
6975 6975
6976 _rtw_memset(extra, '\0', sizeof(extra)); 6976 _rtw_memset(extra, '\0', sizeof(*extra));
6977 6977
6978 if (stop == 0) { 6978 if (stop == 0) {
6979 bStartTest = 0; /* To set Stop */ 6979 bStartTest = 0; /* To set Stop */
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index d3078d200e50..9ca3180ebaa0 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -54,6 +54,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
54 /*=== Customer ID ===*/ 54 /*=== Customer ID ===*/
55 /****** 8188EUS ********/ 55 /****** 8188EUS ********/
56 {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */ 56 {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */
57 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
57 {} /* Terminating entry */ 58 {} /* Terminating entry */
58}; 59};
59 60
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
index 5bc361b16d4c..56144014b7c9 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
@@ -37,6 +37,8 @@ rt_status SendTxCommandPacket(struct net_device *dev, void *pData, u32 DataLen)
37 /* Get TCB and local buffer from common pool. 37 /* Get TCB and local buffer from common pool.
38 (It is shared by CmdQ, MgntQ, and USB coalesce DataQ) */ 38 (It is shared by CmdQ, MgntQ, and USB coalesce DataQ) */
39 skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4); 39 skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4);
40 if (!skb)
41 return RT_STATUS_FAILURE;
40 memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev)); 42 memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
41 tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); 43 tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
42 tcb_desc->queue_index = TXCMD_QUEUE; 44 tcb_desc->queue_index = TXCMD_QUEUE;
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index d0cf7d8a20e5..8872e0f84f40 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -1634,6 +1634,9 @@ int iwctl_siwencodeext(struct net_device *dev, struct iw_request_info *info,
1634 if (pMgmt == NULL) 1634 if (pMgmt == NULL)
1635 return -EFAULT; 1635 return -EFAULT;
1636 1636
1637 if (!(pDevice->flags & DEVICE_FLAGS_OPENED))
1638 return -ENODEV;
1639
1637 buf = kzalloc(sizeof(struct viawget_wpa_param), GFP_KERNEL); 1640 buf = kzalloc(sizeof(struct viawget_wpa_param), GFP_KERNEL);
1638 if (buf == NULL) 1641 if (buf == NULL)
1639 return -ENOMEM; 1642 return -ENOMEM;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 536971786ae8..6f9d28182445 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1098,6 +1098,8 @@ static int device_close(struct net_device *dev)
1098 memset(pMgmt->abyCurrBSSID, 0, 6); 1098 memset(pMgmt->abyCurrBSSID, 0, 6);
1099 pMgmt->eCurrState = WMAC_STATE_IDLE; 1099 pMgmt->eCurrState = WMAC_STATE_IDLE;
1100 1100
1101 pDevice->flags &= ~DEVICE_FLAGS_OPENED;
1102
1101 device_free_tx_bufs(pDevice); 1103 device_free_tx_bufs(pDevice);
1102 device_free_rx_bufs(pDevice); 1104 device_free_rx_bufs(pDevice);
1103 device_free_int_bufs(pDevice); 1105 device_free_int_bufs(pDevice);
@@ -1109,7 +1111,6 @@ static int device_close(struct net_device *dev)
1109 usb_free_urb(pDevice->pInterruptURB); 1111 usb_free_urb(pDevice->pInterruptURB);
1110 1112
1111 BSSvClearNodeDBTable(pDevice, 0); 1113 BSSvClearNodeDBTable(pDevice, 0);
1112 pDevice->flags &=(~DEVICE_FLAGS_OPENED);
1113 1114
1114 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n"); 1115 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n");
1115 1116
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index fb743a8811bb..14f3e852215d 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -148,6 +148,8 @@ static void *s_vGetFreeContext(struct vnt_private *pDevice)
148 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n"); 148 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n");
149 149
150 for (ii = 0; ii < pDevice->cbTD; ii++) { 150 for (ii = 0; ii < pDevice->cbTD; ii++) {
151 if (!pDevice->apTD[ii])
152 return NULL;
151 pContext = pDevice->apTD[ii]; 153 pContext = pDevice->apTD[ii];
152 if (pContext->bBoolInUse == false) { 154 if (pContext->bBoolInUse == false) {
153 pContext->bBoolInUse = true; 155 pContext->bBoolInUse = true;
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index c9a9ddd1d0bc..01bf5eb4f238 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1758,8 +1758,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
1758 canon_change = (old->c_lflag ^ tty->termios.c_lflag) & ICANON; 1758 canon_change = (old->c_lflag ^ tty->termios.c_lflag) & ICANON;
1759 if (canon_change) { 1759 if (canon_change) {
1760 bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); 1760 bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
1761 ldata->line_start = 0; 1761 ldata->line_start = ldata->canon_head = ldata->read_tail;
1762 ldata->canon_head = ldata->read_tail;
1763 ldata->erasing = 0; 1762 ldata->erasing = 0;
1764 ldata->lnext = 0; 1763 ldata->lnext = 0;
1765 } 1764 }
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 52379e56a31e..44077c0b7670 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -667,30 +667,21 @@ static int pop_tx_x(struct eg20t_port *priv, unsigned char *buf)
667 667
668static int dma_push_rx(struct eg20t_port *priv, int size) 668static int dma_push_rx(struct eg20t_port *priv, int size)
669{ 669{
670 struct tty_struct *tty;
671 int room; 670 int room;
672 struct uart_port *port = &priv->port; 671 struct uart_port *port = &priv->port;
673 struct tty_port *tport = &port->state->port; 672 struct tty_port *tport = &port->state->port;
674 673
675 port = &priv->port;
676 tty = tty_port_tty_get(tport);
677 if (!tty) {
678 dev_dbg(priv->port.dev, "%s:tty is busy now", __func__);
679 return 0;
680 }
681
682 room = tty_buffer_request_room(tport, size); 674 room = tty_buffer_request_room(tport, size);
683 675
684 if (room < size) 676 if (room < size)
685 dev_warn(port->dev, "Rx overrun: dropping %u bytes\n", 677 dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
686 size - room); 678 size - room);
687 if (!room) 679 if (!room)
688 return room; 680 return 0;
689 681
690 tty_insert_flip_string(tport, sg_virt(&priv->sg_rx), size); 682 tty_insert_flip_string(tport, sg_virt(&priv->sg_rx), size);
691 683
692 port->icount.rx += room; 684 port->icount.rx += room;
693 tty_kref_put(tty);
694 685
695 return room; 686 return room;
696} 687}
@@ -1098,6 +1089,8 @@ static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr)
1098 if (tty == NULL) { 1089 if (tty == NULL) {
1099 for (i = 0; error_msg[i] != NULL; i++) 1090 for (i = 0; error_msg[i] != NULL; i++)
1100 dev_err(&priv->pdev->dev, error_msg[i]); 1091 dev_err(&priv->pdev->dev, error_msg[i]);
1092 } else {
1093 tty_kref_put(tty);
1101 } 1094 }
1102} 1095}
1103 1096
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index d0d972f7e43e..0489a2bdcdf9 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -732,7 +732,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
732static void tegra_uart_stop_rx(struct uart_port *u) 732static void tegra_uart_stop_rx(struct uart_port *u)
733{ 733{
734 struct tegra_uart_port *tup = to_tegra_uport(u); 734 struct tegra_uart_port *tup = to_tegra_uport(u);
735 struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port); 735 struct tty_struct *tty;
736 struct tty_port *port = &u->state->port; 736 struct tty_port *port = &u->state->port;
737 struct dma_tx_state state; 737 struct dma_tx_state state;
738 unsigned long ier; 738 unsigned long ier;
@@ -744,6 +744,8 @@ static void tegra_uart_stop_rx(struct uart_port *u)
744 if (!tup->rx_in_progress) 744 if (!tup->rx_in_progress)
745 return; 745 return;
746 746
747 tty = tty_port_tty_get(&tup->uport.state->port);
748
747 tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */ 749 tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */
748 750
749 ier = tup->ier_shadow; 751 ier = tup->ier_shadow;
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 03ba081c5772..6fd60fece6b4 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -1201,6 +1201,9 @@ int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
1201 } 1201 }
1202 return 0; 1202 return 0;
1203 case TCFLSH: 1203 case TCFLSH:
1204 retval = tty_check_change(tty);
1205 if (retval)
1206 return retval;
1204 return __tty_perform_flush(tty, arg); 1207 return __tty_perform_flush(tty, arg);
1205 default: 1208 default:
1206 /* Try the mode commands */ 1209 /* Try the mode commands */
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 4a851e15e58c..77b47d82c9a6 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -1,6 +1,6 @@
1config USB_CHIPIDEA 1config USB_CHIPIDEA
2 tristate "ChipIdea Highspeed Dual Role Controller" 2 tristate "ChipIdea Highspeed Dual Role Controller"
3 depends on (USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET) 3 depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA
4 help 4 help
5 Say Y here if your system has a dual role high speed USB 5 Say Y here if your system has a dual role high speed USB
6 controller based on ChipIdea silicon IP. Currently, only the 6 controller based on ChipIdea silicon IP. Currently, only the
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 74d998d9b45b..be822a2c1776 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -131,7 +131,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
131 if (ret) { 131 if (ret) {
132 dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n", 132 dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n",
133 ret); 133 ret);
134 goto err_clk; 134 goto err_phy;
135 } 135 }
136 } 136 }
137 137
@@ -143,7 +143,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
143 dev_err(&pdev->dev, 143 dev_err(&pdev->dev,
144 "Can't register ci_hdrc platform device, err=%d\n", 144 "Can't register ci_hdrc platform device, err=%d\n",
145 ret); 145 ret);
146 goto err_clk; 146 goto err_phy;
147 } 147 }
148 148
149 if (data->usbmisc_data) { 149 if (data->usbmisc_data) {
@@ -164,6 +164,9 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
164 164
165disable_device: 165disable_device:
166 ci_hdrc_remove_device(data->ci_pdev); 166 ci_hdrc_remove_device(data->ci_pdev);
167err_phy:
168 if (data->phy)
169 usb_phy_shutdown(data->phy);
167err_clk: 170err_clk:
168 clk_disable_unprepare(data->clk); 171 clk_disable_unprepare(data->clk);
169 return ret; 172 return ret;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 94626409559a..23763dcec069 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -605,6 +605,7 @@ static int ci_hdrc_remove(struct platform_device *pdev)
605 dbg_remove_files(ci); 605 dbg_remove_files(ci);
606 free_irq(ci->irq, ci); 606 free_irq(ci->irq, ci);
607 ci_role_destroy(ci); 607 ci_role_destroy(ci);
608 kfree(ci->hw_bank.regmap);
608 609
609 return 0; 610 return 0;
610} 611}
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 6b4c2f2eb946..9333083dd111 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1600,6 +1600,8 @@ static void destroy_eps(struct ci_hdrc *ci)
1600 for (i = 0; i < ci->hw_ep_max; i++) { 1600 for (i = 0; i < ci->hw_ep_max; i++) {
1601 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i]; 1601 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
1602 1602
1603 if (hwep->pending_td)
1604 free_pending_td(hwep);
1603 dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma); 1605 dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);
1604 } 1606 }
1605} 1607}
@@ -1667,13 +1669,13 @@ static int ci_udc_stop(struct usb_gadget *gadget,
1667 if (ci->platdata->notify_event) 1669 if (ci->platdata->notify_event)
1668 ci->platdata->notify_event(ci, 1670 ci->platdata->notify_event(ci,
1669 CI_HDRC_CONTROLLER_STOPPED_EVENT); 1671 CI_HDRC_CONTROLLER_STOPPED_EVENT);
1670 ci->driver = NULL;
1671 spin_unlock_irqrestore(&ci->lock, flags); 1672 spin_unlock_irqrestore(&ci->lock, flags);
1672 _gadget_stop_activity(&ci->gadget); 1673 _gadget_stop_activity(&ci->gadget);
1673 spin_lock_irqsave(&ci->lock, flags); 1674 spin_lock_irqsave(&ci->lock, flags);
1674 pm_runtime_put(&ci->gadget.dev); 1675 pm_runtime_put(&ci->gadget.dev);
1675 } 1676 }
1676 1677
1678 ci->driver = NULL;
1677 spin_unlock_irqrestore(&ci->lock, flags); 1679 spin_unlock_irqrestore(&ci->lock, flags);
1678 1680
1679 return 0; 1681 return 0;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 737e3c19967b..71dc5d768fa5 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -742,6 +742,22 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
742 if ((index & ~USB_DIR_IN) == 0) 742 if ((index & ~USB_DIR_IN) == 0)
743 return 0; 743 return 0;
744 ret = findintfep(ps->dev, index); 744 ret = findintfep(ps->dev, index);
745 if (ret < 0) {
746 /*
747 * Some not fully compliant Win apps seem to get
748 * index wrong and have the endpoint number here
749 * rather than the endpoint address (with the
750 * correct direction). Win does let this through,
751 * so we'll not reject it here but leave it to
752 * the device to not break KVM. But we warn.
753 */
754 ret = findintfep(ps->dev, index ^ 0x80);
755 if (ret >= 0)
756 dev_info(&ps->dev->dev,
757 "%s: process %i (%s) requesting ep %02x but needs %02x\n",
758 __func__, task_pid_nr(current),
759 current->comm, index, index ^ 0x80);
760 }
745 if (ret >= 0) 761 if (ret >= 0)
746 ret = checkintf(ps, ret); 762 ret = checkintf(ps, ret);
747 break; 763 break;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index dde4c83516a1..e6b682c6c236 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3426,6 +3426,9 @@ static int usb_req_set_sel(struct usb_device *udev, enum usb3_link_state state)
3426 unsigned long long u2_pel; 3426 unsigned long long u2_pel;
3427 int ret; 3427 int ret;
3428 3428
3429 if (udev->state != USB_STATE_CONFIGURED)
3430 return 0;
3431
3429 /* Convert SEL and PEL stored in ns to us */ 3432 /* Convert SEL and PEL stored in ns to us */
3430 u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); 3433 u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
3431 u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); 3434 u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 997ebe420bc9..2e252aae51ca 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -29,6 +29,7 @@
29#define PCI_VENDOR_ID_SYNOPSYS 0x16c3 29#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
30#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd 30#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
31#define PCI_DEVICE_ID_INTEL_BYT 0x0f37 31#define PCI_DEVICE_ID_INTEL_BYT 0x0f37
32#define PCI_DEVICE_ID_INTEL_MRFLD 0x119e
32 33
33struct dwc3_pci { 34struct dwc3_pci {
34 struct device *dev; 35 struct device *dev;
@@ -189,6 +190,7 @@ static DEFINE_PCI_DEVICE_TABLE(dwc3_pci_id_table) = {
189 PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3), 190 PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
190 }, 191 },
191 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), }, 192 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
193 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
192 { } /* Terminating Entry */ 194 { } /* Terminating Entry */
193}; 195};
194MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table); 196MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 1a66c5baa0d1..0658908d8968 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1034,37 +1034,19 @@ struct ffs_sb_fill_data {
1034 struct ffs_file_perms perms; 1034 struct ffs_file_perms perms;
1035 umode_t root_mode; 1035 umode_t root_mode;
1036 const char *dev_name; 1036 const char *dev_name;
1037 union { 1037 struct ffs_data *ffs_data;
1038 /* set by ffs_fs_mount(), read by ffs_sb_fill() */
1039 void *private_data;
1040 /* set by ffs_sb_fill(), read by ffs_fs_mount */
1041 struct ffs_data *ffs_data;
1042 };
1043}; 1038};
1044 1039
1045static int ffs_sb_fill(struct super_block *sb, void *_data, int silent) 1040static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1046{ 1041{
1047 struct ffs_sb_fill_data *data = _data; 1042 struct ffs_sb_fill_data *data = _data;
1048 struct inode *inode; 1043 struct inode *inode;
1049 struct ffs_data *ffs; 1044 struct ffs_data *ffs = data->ffs_data;
1050 1045
1051 ENTER(); 1046 ENTER();
1052 1047
1053 /* Initialise data */
1054 ffs = ffs_data_new();
1055 if (unlikely(!ffs))
1056 goto Enomem;
1057
1058 ffs->sb = sb; 1048 ffs->sb = sb;
1059 ffs->dev_name = kstrdup(data->dev_name, GFP_KERNEL); 1049 data->ffs_data = NULL;
1060 if (unlikely(!ffs->dev_name))
1061 goto Enomem;
1062 ffs->file_perms = data->perms;
1063 ffs->private_data = data->private_data;
1064
1065 /* used by the caller of this function */
1066 data->ffs_data = ffs;
1067
1068 sb->s_fs_info = ffs; 1050 sb->s_fs_info = ffs;
1069 sb->s_blocksize = PAGE_CACHE_SIZE; 1051 sb->s_blocksize = PAGE_CACHE_SIZE;
1070 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1052 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
@@ -1080,17 +1062,14 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1080 &data->perms); 1062 &data->perms);
1081 sb->s_root = d_make_root(inode); 1063 sb->s_root = d_make_root(inode);
1082 if (unlikely(!sb->s_root)) 1064 if (unlikely(!sb->s_root))
1083 goto Enomem; 1065 return -ENOMEM;
1084 1066
1085 /* EP0 file */ 1067 /* EP0 file */
1086 if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs, 1068 if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
1087 &ffs_ep0_operations, NULL))) 1069 &ffs_ep0_operations, NULL)))
1088 goto Enomem; 1070 return -ENOMEM;
1089 1071
1090 return 0; 1072 return 0;
1091
1092Enomem:
1093 return -ENOMEM;
1094} 1073}
1095 1074
1096static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts) 1075static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
@@ -1193,6 +1172,7 @@ ffs_fs_mount(struct file_system_type *t, int flags,
1193 struct dentry *rv; 1172 struct dentry *rv;
1194 int ret; 1173 int ret;
1195 void *ffs_dev; 1174 void *ffs_dev;
1175 struct ffs_data *ffs;
1196 1176
1197 ENTER(); 1177 ENTER();
1198 1178
@@ -1200,18 +1180,30 @@ ffs_fs_mount(struct file_system_type *t, int flags,
1200 if (unlikely(ret < 0)) 1180 if (unlikely(ret < 0))
1201 return ERR_PTR(ret); 1181 return ERR_PTR(ret);
1202 1182
1183 ffs = ffs_data_new();
1184 if (unlikely(!ffs))
1185 return ERR_PTR(-ENOMEM);
1186 ffs->file_perms = data.perms;
1187
1188 ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
1189 if (unlikely(!ffs->dev_name)) {
1190 ffs_data_put(ffs);
1191 return ERR_PTR(-ENOMEM);
1192 }
1193
1203 ffs_dev = functionfs_acquire_dev_callback(dev_name); 1194 ffs_dev = functionfs_acquire_dev_callback(dev_name);
1204 if (IS_ERR(ffs_dev)) 1195 if (IS_ERR(ffs_dev)) {
1205 return ffs_dev; 1196 ffs_data_put(ffs);
1197 return ERR_CAST(ffs_dev);
1198 }
1199 ffs->private_data = ffs_dev;
1200 data.ffs_data = ffs;
1206 1201
1207 data.dev_name = dev_name;
1208 data.private_data = ffs_dev;
1209 rv = mount_nodev(t, flags, &data, ffs_sb_fill); 1202 rv = mount_nodev(t, flags, &data, ffs_sb_fill);
1210 1203 if (IS_ERR(rv) && data.ffs_data) {
1211 /* data.ffs_data is set by ffs_sb_fill */
1212 if (IS_ERR(rv))
1213 functionfs_release_dev_callback(data.ffs_data); 1204 functionfs_release_dev_callback(data.ffs_data);
1214 1205 ffs_data_put(data.ffs_data);
1206 }
1215 return rv; 1207 return rv;
1216} 1208}
1217 1209
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 4449f565d6c6..f2407b2e8a99 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -130,7 +130,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
130 } 130 }
131 131
132 /* Enable USB controller, 83xx or 8536 */ 132 /* Enable USB controller, 83xx or 8536 */
133 if (pdata->have_sysif_regs) 133 if (pdata->have_sysif_regs && pdata->controller_ver < FSL_USB_VER_1_6)
134 setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4); 134 setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4);
135 135
136 /* Don't need to set host mode here. It will be done by tdi_reset() */ 136 /* Don't need to set host mode here. It will be done by tdi_reset() */
@@ -232,15 +232,9 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
232 case FSL_USB2_PHY_ULPI: 232 case FSL_USB2_PHY_ULPI:
233 if (pdata->have_sysif_regs && pdata->controller_ver) { 233 if (pdata->have_sysif_regs && pdata->controller_ver) {
234 /* controller version 1.6 or above */ 234 /* controller version 1.6 or above */
235 clrbits32(non_ehci + FSL_SOC_USB_CTRL, UTMI_PHY_EN);
235 setbits32(non_ehci + FSL_SOC_USB_CTRL, 236 setbits32(non_ehci + FSL_SOC_USB_CTRL,
236 ULPI_PHY_CLK_SEL); 237 ULPI_PHY_CLK_SEL | USB_CTRL_USB_EN);
237 /*
238 * Due to controller issue of PHY_CLK_VALID in ULPI
239 * mode, we set USB_CTRL_USB_EN before checking
240 * PHY_CLK_VALID, otherwise PHY_CLK_VALID doesn't work.
241 */
242 clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
243 UTMI_PHY_EN, USB_CTRL_USB_EN);
244 } 238 }
245 portsc |= PORT_PTS_ULPI; 239 portsc |= PORT_PTS_ULPI;
246 break; 240 break;
@@ -270,8 +264,9 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
270 if (pdata->have_sysif_regs && pdata->controller_ver && 264 if (pdata->have_sysif_regs && pdata->controller_ver &&
271 (phy_mode == FSL_USB2_PHY_ULPI)) { 265 (phy_mode == FSL_USB2_PHY_ULPI)) {
272 /* check PHY_CLK_VALID to get phy clk valid */ 266 /* check PHY_CLK_VALID to get phy clk valid */
273 if (!spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) & 267 if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
274 PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0)) { 268 PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0) ||
269 in_be32(non_ehci + FSL_SOC_USB_PRICTRL))) {
275 printk(KERN_WARNING "fsl-ehci: USB PHY clock invalid\n"); 270 printk(KERN_WARNING "fsl-ehci: USB PHY clock invalid\n");
276 return -EINVAL; 271 return -EINVAL;
277 } 272 }
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 6bd299e61f58..854c2ec7b699 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -361,7 +361,7 @@ static struct pci_driver ehci_pci_driver = {
361 .remove = usb_hcd_pci_remove, 361 .remove = usb_hcd_pci_remove,
362 .shutdown = usb_hcd_pci_shutdown, 362 .shutdown = usb_hcd_pci_shutdown,
363 363
364#ifdef CONFIG_PM_SLEEP 364#ifdef CONFIG_PM
365 .driver = { 365 .driver = {
366 .pm = &usb_hcd_pci_pm_ops 366 .pm = &usb_hcd_pci_pm_ops
367 }, 367 },
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index 60a5de505ca1..adb01d950a16 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -824,13 +824,13 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
824 i = DIV_ROUND_UP(wrap_frame( 824 i = DIV_ROUND_UP(wrap_frame(
825 cur_frame - urb->start_frame), 825 cur_frame - urb->start_frame),
826 urb->interval); 826 urb->interval);
827 if (urb->transfer_flags & URB_ISO_ASAP) { 827
828 /* Treat underruns as if URB_ISO_ASAP was set */
829 if ((urb->transfer_flags & URB_ISO_ASAP) ||
830 i >= urb->number_of_packets) {
828 urb->start_frame = wrap_frame(urb->start_frame 831 urb->start_frame = wrap_frame(urb->start_frame
829 + i * urb->interval); 832 + i * urb->interval);
830 i = 0; 833 i = 0;
831 } else if (i >= urb->number_of_packets) {
832 ret = -EXDEV;
833 goto alloc_dmem_failed;
834 } 834 }
835 } 835 }
836 } 836 }
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 8f6b695af6a4..604cad1bcf9c 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -216,31 +216,26 @@ static int ohci_urb_enqueue (
216 frame &= ~(ed->interval - 1); 216 frame &= ~(ed->interval - 1);
217 frame |= ed->branch; 217 frame |= ed->branch;
218 urb->start_frame = frame; 218 urb->start_frame = frame;
219 ed->last_iso = frame + ed->interval * (size - 1);
219 } 220 }
220 } else if (ed->type == PIPE_ISOCHRONOUS) { 221 } else if (ed->type == PIPE_ISOCHRONOUS) {
221 u16 next = ohci_frame_no(ohci) + 1; 222 u16 next = ohci_frame_no(ohci) + 1;
222 u16 frame = ed->last_iso + ed->interval; 223 u16 frame = ed->last_iso + ed->interval;
224 u16 length = ed->interval * (size - 1);
223 225
224 /* Behind the scheduling threshold? */ 226 /* Behind the scheduling threshold? */
225 if (unlikely(tick_before(frame, next))) { 227 if (unlikely(tick_before(frame, next))) {
226 228
227 /* USB_ISO_ASAP: Round up to the first available slot */ 229 /* URB_ISO_ASAP: Round up to the first available slot */
228 if (urb->transfer_flags & URB_ISO_ASAP) { 230 if (urb->transfer_flags & URB_ISO_ASAP) {
229 frame += (next - frame + ed->interval - 1) & 231 frame += (next - frame + ed->interval - 1) &
230 -ed->interval; 232 -ed->interval;
231 233
232 /* 234 /*
233 * Not ASAP: Use the next slot in the stream. If 235 * Not ASAP: Use the next slot in the stream,
234 * the entire URB falls before the threshold, fail. 236 * no matter what.
235 */ 237 */
236 } else { 238 } else {
237 if (tick_before(frame + ed->interval *
238 (urb->number_of_packets - 1), next)) {
239 retval = -EXDEV;
240 usb_hcd_unlink_urb_from_ep(hcd, urb);
241 goto fail;
242 }
243
244 /* 239 /*
245 * Some OHCI hardware doesn't handle late TDs 240 * Some OHCI hardware doesn't handle late TDs
246 * correctly. After retiring them it proceeds 241 * correctly. After retiring them it proceeds
@@ -251,9 +246,16 @@ static int ohci_urb_enqueue (
251 urb_priv->td_cnt = DIV_ROUND_UP( 246 urb_priv->td_cnt = DIV_ROUND_UP(
252 (u16) (next - frame), 247 (u16) (next - frame),
253 ed->interval); 248 ed->interval);
249 if (urb_priv->td_cnt >= urb_priv->length) {
250 ++urb_priv->td_cnt; /* Mark it */
251 ohci_dbg(ohci, "iso underrun %p (%u+%u < %u)\n",
252 urb, frame, length,
253 next);
254 }
254 } 255 }
255 } 256 }
256 urb->start_frame = frame; 257 urb->start_frame = frame;
258 ed->last_iso = frame + length;
257 } 259 }
258 260
259 /* fill the TDs and link them to the ed; and 261 /* fill the TDs and link them to the ed; and
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index df4a6707322d..e7f577e63624 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -41,9 +41,13 @@ finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status)
41__releases(ohci->lock) 41__releases(ohci->lock)
42__acquires(ohci->lock) 42__acquires(ohci->lock)
43{ 43{
44 struct device *dev = ohci_to_hcd(ohci)->self.controller; 44 struct device *dev = ohci_to_hcd(ohci)->self.controller;
45 struct usb_host_endpoint *ep = urb->ep;
46 struct urb_priv *urb_priv;
47
45 // ASSERT (urb->hcpriv != 0); 48 // ASSERT (urb->hcpriv != 0);
46 49
50 restart:
47 urb_free_priv (ohci, urb->hcpriv); 51 urb_free_priv (ohci, urb->hcpriv);
48 urb->hcpriv = NULL; 52 urb->hcpriv = NULL;
49 if (likely(status == -EINPROGRESS)) 53 if (likely(status == -EINPROGRESS))
@@ -80,6 +84,21 @@ __acquires(ohci->lock)
80 ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE); 84 ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE);
81 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); 85 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
82 } 86 }
87
88 /*
89 * An isochronous URB that is sumitted too late won't have any TDs
90 * (marked by the fact that the td_cnt value is larger than the
91 * actual number of TDs). If the next URB on this endpoint is like
92 * that, give it back now.
93 */
94 if (!list_empty(&ep->urb_list)) {
95 urb = list_first_entry(&ep->urb_list, struct urb, urb_list);
96 urb_priv = urb->hcpriv;
97 if (urb_priv->td_cnt > urb_priv->length) {
98 status = 0;
99 goto restart;
100 }
101 }
83} 102}
84 103
85 104
@@ -546,7 +565,6 @@ td_fill (struct ohci_hcd *ohci, u32 info,
546 td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000); 565 td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000);
547 *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci, 566 *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci,
548 (data & 0x0FFF) | 0xE000); 567 (data & 0x0FFF) | 0xE000);
549 td->ed->last_iso = info & 0xffff;
550 } else { 568 } else {
551 td->hwCBP = cpu_to_hc32 (ohci, data); 569 td->hwCBP = cpu_to_hc32 (ohci, data);
552 } 570 }
@@ -996,7 +1014,7 @@ rescan_this:
996 urb_priv->td_cnt++; 1014 urb_priv->td_cnt++;
997 1015
998 /* if URB is done, clean up */ 1016 /* if URB is done, clean up */
999 if (urb_priv->td_cnt == urb_priv->length) { 1017 if (urb_priv->td_cnt >= urb_priv->length) {
1000 modified = completed = 1; 1018 modified = completed = 1;
1001 finish_urb(ohci, urb, 0); 1019 finish_urb(ohci, urb, 0);
1002 } 1020 }
@@ -1086,7 +1104,7 @@ static void takeback_td(struct ohci_hcd *ohci, struct td *td)
1086 urb_priv->td_cnt++; 1104 urb_priv->td_cnt++;
1087 1105
1088 /* If all this urb's TDs are done, call complete() */ 1106 /* If all this urb's TDs are done, call complete() */
1089 if (urb_priv->td_cnt == urb_priv->length) 1107 if (urb_priv->td_cnt >= urb_priv->length)
1090 finish_urb(ohci, urb, status); 1108 finish_urb(ohci, urb, status);
1091 1109
1092 /* clean schedule: unlink EDs that are no longer busy */ 1110 /* clean schedule: unlink EDs that are no longer busy */
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index c300bd2f7d1c..0f228c46eeda 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -293,7 +293,7 @@ static struct pci_driver uhci_pci_driver = {
293 .remove = usb_hcd_pci_remove, 293 .remove = usb_hcd_pci_remove,
294 .shutdown = uhci_shutdown, 294 .shutdown = uhci_shutdown,
295 295
296#ifdef CONFIG_PM_SLEEP 296#ifdef CONFIG_PM
297 .driver = { 297 .driver = {
298 .pm = &usb_hcd_pci_pm_ops 298 .pm = &usb_hcd_pci_pm_ops
299 }, 299 },
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 041c6ddb695c..da6f56d996ce 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -1303,7 +1303,7 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1303 } 1303 }
1304 1304
1305 /* Fell behind? */ 1305 /* Fell behind? */
1306 if (uhci_frame_before_eq(frame, next)) { 1306 if (!uhci_frame_before_eq(next, frame)) {
1307 1307
1308 /* USB_ISO_ASAP: Round up to the first available slot */ 1308 /* USB_ISO_ASAP: Round up to the first available slot */
1309 if (urb->transfer_flags & URB_ISO_ASAP) 1309 if (urb->transfer_flags & URB_ISO_ASAP)
@@ -1311,13 +1311,17 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1311 -qh->period; 1311 -qh->period;
1312 1312
1313 /* 1313 /*
1314 * Not ASAP: Use the next slot in the stream. If 1314 * Not ASAP: Use the next slot in the stream,
1315 * the entire URB falls before the threshold, fail. 1315 * no matter what.
1316 */ 1316 */
1317 else if (!uhci_frame_before_eq(next, 1317 else if (!uhci_frame_before_eq(next,
1318 frame + (urb->number_of_packets - 1) * 1318 frame + (urb->number_of_packets - 1) *
1319 qh->period)) 1319 qh->period))
1320 return -EXDEV; 1320 dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n",
1321 urb, frame,
1322 (urb->number_of_packets - 1) *
1323 qh->period,
1324 next);
1321 } 1325 }
1322 } 1326 }
1323 1327
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index fae697ed0b70..773a6b28c4f1 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -287,7 +287,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
287 if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) 287 if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue)
288 xhci_queue_stop_endpoint(xhci, slot_id, i, suspend); 288 xhci_queue_stop_endpoint(xhci, slot_id, i, suspend);
289 } 289 }
290 cmd->command_trb = xhci->cmd_ring->enqueue; 290 cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
291 list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list); 291 list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list);
292 xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend); 292 xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend);
293 xhci_ring_cmd_db(xhci); 293 xhci_ring_cmd_db(xhci);
@@ -552,11 +552,15 @@ void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex)
552 * - Mark a port as being done with device resume, 552 * - Mark a port as being done with device resume,
553 * and ring the endpoint doorbells. 553 * and ring the endpoint doorbells.
554 * - Stop the Synopsys redriver Compliance Mode polling. 554 * - Stop the Synopsys redriver Compliance Mode polling.
555 * - Drop and reacquire the xHCI lock, in order to wait for port resume.
555 */ 556 */
556static u32 xhci_get_port_status(struct usb_hcd *hcd, 557static u32 xhci_get_port_status(struct usb_hcd *hcd,
557 struct xhci_bus_state *bus_state, 558 struct xhci_bus_state *bus_state,
558 __le32 __iomem **port_array, 559 __le32 __iomem **port_array,
559 u16 wIndex, u32 raw_port_status) 560 u16 wIndex, u32 raw_port_status,
561 unsigned long flags)
562 __releases(&xhci->lock)
563 __acquires(&xhci->lock)
560{ 564{
561 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 565 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
562 u32 status = 0; 566 u32 status = 0;
@@ -591,21 +595,42 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
591 return 0xffffffff; 595 return 0xffffffff;
592 if (time_after_eq(jiffies, 596 if (time_after_eq(jiffies,
593 bus_state->resume_done[wIndex])) { 597 bus_state->resume_done[wIndex])) {
598 int time_left;
599
594 xhci_dbg(xhci, "Resume USB2 port %d\n", 600 xhci_dbg(xhci, "Resume USB2 port %d\n",
595 wIndex + 1); 601 wIndex + 1);
596 bus_state->resume_done[wIndex] = 0; 602 bus_state->resume_done[wIndex] = 0;
597 clear_bit(wIndex, &bus_state->resuming_ports); 603 clear_bit(wIndex, &bus_state->resuming_ports);
604
605 set_bit(wIndex, &bus_state->rexit_ports);
598 xhci_set_link_state(xhci, port_array, wIndex, 606 xhci_set_link_state(xhci, port_array, wIndex,
599 XDEV_U0); 607 XDEV_U0);
600 xhci_dbg(xhci, "set port %d resume\n", 608
601 wIndex + 1); 609 spin_unlock_irqrestore(&xhci->lock, flags);
602 slot_id = xhci_find_slot_id_by_port(hcd, xhci, 610 time_left = wait_for_completion_timeout(
603 wIndex + 1); 611 &bus_state->rexit_done[wIndex],
604 if (!slot_id) { 612 msecs_to_jiffies(
605 xhci_dbg(xhci, "slot_id is zero\n"); 613 XHCI_MAX_REXIT_TIMEOUT));
606 return 0xffffffff; 614 spin_lock_irqsave(&xhci->lock, flags);
615
616 if (time_left) {
617 slot_id = xhci_find_slot_id_by_port(hcd,
618 xhci, wIndex + 1);
619 if (!slot_id) {
620 xhci_dbg(xhci, "slot_id is zero\n");
621 return 0xffffffff;
622 }
623 xhci_ring_device(xhci, slot_id);
624 } else {
625 int port_status = xhci_readl(xhci,
626 port_array[wIndex]);
627 xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n",
628 XHCI_MAX_REXIT_TIMEOUT,
629 port_status);
630 status |= USB_PORT_STAT_SUSPEND;
631 clear_bit(wIndex, &bus_state->rexit_ports);
607 } 632 }
608 xhci_ring_device(xhci, slot_id); 633
609 bus_state->port_c_suspend |= 1 << wIndex; 634 bus_state->port_c_suspend |= 1 << wIndex;
610 bus_state->suspended_ports &= ~(1 << wIndex); 635 bus_state->suspended_ports &= ~(1 << wIndex);
611 } else { 636 } else {
@@ -728,7 +753,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
728 break; 753 break;
729 } 754 }
730 status = xhci_get_port_status(hcd, bus_state, port_array, 755 status = xhci_get_port_status(hcd, bus_state, port_array,
731 wIndex, temp); 756 wIndex, temp, flags);
732 if (status == 0xffffffff) 757 if (status == 0xffffffff)
733 goto error; 758 goto error;
734 759
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 53b972c2a09f..83bcd13622c3 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2428,6 +2428,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2428 for (i = 0; i < USB_MAXCHILDREN; ++i) { 2428 for (i = 0; i < USB_MAXCHILDREN; ++i) {
2429 xhci->bus_state[0].resume_done[i] = 0; 2429 xhci->bus_state[0].resume_done[i] = 0;
2430 xhci->bus_state[1].resume_done[i] = 0; 2430 xhci->bus_state[1].resume_done[i] = 0;
2431 /* Only the USB 2.0 completions will ever be used. */
2432 init_completion(&xhci->bus_state[1].rexit_done[i]);
2431 } 2433 }
2432 2434
2433 if (scratchpad_alloc(xhci, flags)) 2435 if (scratchpad_alloc(xhci, flags))
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index c2d495057eb5..236c3aabe940 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -351,7 +351,7 @@ static struct pci_driver xhci_pci_driver = {
351 /* suspend and resume implemented later */ 351 /* suspend and resume implemented later */
352 352
353 .shutdown = usb_hcd_pci_shutdown, 353 .shutdown = usb_hcd_pci_shutdown,
354#ifdef CONFIG_PM_SLEEP 354#ifdef CONFIG_PM
355 .driver = { 355 .driver = {
356 .pm = &usb_hcd_pci_pm_ops 356 .pm = &usb_hcd_pci_pm_ops
357 }, 357 },
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 411da1fc7ae8..6bfbd80ec2b9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -123,6 +123,16 @@ static int enqueue_is_link_trb(struct xhci_ring *ring)
123 return TRB_TYPE_LINK_LE32(link->control); 123 return TRB_TYPE_LINK_LE32(link->control);
124} 124}
125 125
126union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring)
127{
128 /* Enqueue pointer can be left pointing to the link TRB,
129 * we must handle that
130 */
131 if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control))
132 return ring->enq_seg->next->trbs;
133 return ring->enqueue;
134}
135
126/* Updates trb to point to the next TRB in the ring, and updates seg if the next 136/* Updates trb to point to the next TRB in the ring, and updates seg if the next
127 * TRB is in a new segment. This does not skip over link TRBs, and it does not 137 * TRB is in a new segment. This does not skip over link TRBs, and it does not
128 * effect the ring dequeue or enqueue pointers. 138 * effect the ring dequeue or enqueue pointers.
@@ -859,8 +869,12 @@ remove_finished_td:
859 /* Otherwise ring the doorbell(s) to restart queued transfers */ 869 /* Otherwise ring the doorbell(s) to restart queued transfers */
860 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 870 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
861 } 871 }
862 ep->stopped_td = NULL; 872
863 ep->stopped_trb = NULL; 873 /* Clear stopped_td and stopped_trb if endpoint is not halted */
874 if (!(ep->ep_state & EP_HALTED)) {
875 ep->stopped_td = NULL;
876 ep->stopped_trb = NULL;
877 }
864 878
865 /* 879 /*
866 * Drop the lock and complete the URBs in the cancelled TD list. 880 * Drop the lock and complete the URBs in the cancelled TD list.
@@ -1414,6 +1428,12 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1414 inc_deq(xhci, xhci->cmd_ring); 1428 inc_deq(xhci, xhci->cmd_ring);
1415 return; 1429 return;
1416 } 1430 }
1431 /* There is no command to handle if we get a stop event when the
1432 * command ring is empty, event->cmd_trb points to the next
1433 * unset command
1434 */
1435 if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
1436 return;
1417 } 1437 }
1418 1438
1419 switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]) 1439 switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
@@ -1743,6 +1763,19 @@ static void handle_port_status(struct xhci_hcd *xhci,
1743 } 1763 }
1744 } 1764 }
1745 1765
1766 /*
1767 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
1768 * RExit to a disconnect state). If so, let the the driver know it's
1769 * out of the RExit state.
1770 */
1771 if (!DEV_SUPERSPEED(temp) &&
1772 test_and_clear_bit(faked_port_index,
1773 &bus_state->rexit_ports)) {
1774 complete(&bus_state->rexit_done[faked_port_index]);
1775 bogus_port_status = true;
1776 goto cleanup;
1777 }
1778
1746 if (hcd->speed != HCD_USB3) 1779 if (hcd->speed != HCD_USB3)
1747 xhci_test_and_clear_bit(xhci, port_array, faked_port_index, 1780 xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
1748 PORT_PLC); 1781 PORT_PLC);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 49b6edb84a79..1e36dbb48366 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -2598,15 +2598,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2598 if (command) { 2598 if (command) {
2599 cmd_completion = command->completion; 2599 cmd_completion = command->completion;
2600 cmd_status = &command->status; 2600 cmd_status = &command->status;
2601 command->command_trb = xhci->cmd_ring->enqueue; 2601 command->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
2602
2603 /* Enqueue pointer can be left pointing to the link TRB,
2604 * we must handle that
2605 */
2606 if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
2607 command->command_trb =
2608 xhci->cmd_ring->enq_seg->next->trbs;
2609
2610 list_add_tail(&command->cmd_list, &virt_dev->cmd_list); 2602 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
2611 } else { 2603 } else {
2612 cmd_completion = &virt_dev->cmd_completion; 2604 cmd_completion = &virt_dev->cmd_completion;
@@ -2614,7 +2606,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2614 } 2606 }
2615 init_completion(cmd_completion); 2607 init_completion(cmd_completion);
2616 2608
2617 cmd_trb = xhci->cmd_ring->dequeue; 2609 cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
2618 if (!ctx_change) 2610 if (!ctx_change)
2619 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, 2611 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
2620 udev->slot_id, must_succeed); 2612 udev->slot_id, must_succeed);
@@ -3439,14 +3431,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3439 3431
3440 /* Attempt to submit the Reset Device command to the command ring */ 3432 /* Attempt to submit the Reset Device command to the command ring */
3441 spin_lock_irqsave(&xhci->lock, flags); 3433 spin_lock_irqsave(&xhci->lock, flags);
3442 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; 3434 reset_device_cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
3443
3444 /* Enqueue pointer can be left pointing to the link TRB,
3445 * we must handle that
3446 */
3447 if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
3448 reset_device_cmd->command_trb =
3449 xhci->cmd_ring->enq_seg->next->trbs;
3450 3435
3451 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); 3436 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
3452 ret = xhci_queue_reset_device(xhci, slot_id); 3437 ret = xhci_queue_reset_device(xhci, slot_id);
@@ -3650,7 +3635,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3650 union xhci_trb *cmd_trb; 3635 union xhci_trb *cmd_trb;
3651 3636
3652 spin_lock_irqsave(&xhci->lock, flags); 3637 spin_lock_irqsave(&xhci->lock, flags);
3653 cmd_trb = xhci->cmd_ring->dequeue; 3638 cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
3654 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); 3639 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
3655 if (ret) { 3640 if (ret) {
3656 spin_unlock_irqrestore(&xhci->lock, flags); 3641 spin_unlock_irqrestore(&xhci->lock, flags);
@@ -3785,7 +3770,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3785 slot_ctx->dev_info >> 27); 3770 slot_ctx->dev_info >> 27);
3786 3771
3787 spin_lock_irqsave(&xhci->lock, flags); 3772 spin_lock_irqsave(&xhci->lock, flags);
3788 cmd_trb = xhci->cmd_ring->dequeue; 3773 cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
3789 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, 3774 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
3790 udev->slot_id); 3775 udev->slot_id);
3791 if (ret) { 3776 if (ret) {
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 46aa14894148..289fbfbae746 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1412,8 +1412,18 @@ struct xhci_bus_state {
1412 unsigned long resume_done[USB_MAXCHILDREN]; 1412 unsigned long resume_done[USB_MAXCHILDREN];
1413 /* which ports have started to resume */ 1413 /* which ports have started to resume */
1414 unsigned long resuming_ports; 1414 unsigned long resuming_ports;
1415 /* Which ports are waiting on RExit to U0 transition. */
1416 unsigned long rexit_ports;
1417 struct completion rexit_done[USB_MAXCHILDREN];
1415}; 1418};
1416 1419
1420
1421/*
1422 * It can take up to 20 ms to transition from RExit to U0 on the
1423 * Intel Lynx Point LP xHCI host.
1424 */
1425#define XHCI_MAX_REXIT_TIMEOUT (20 * 1000)
1426
1417static inline unsigned int hcd_index(struct usb_hcd *hcd) 1427static inline unsigned int hcd_index(struct usb_hcd *hcd)
1418{ 1428{
1419 if (hcd->speed == HCD_USB3) 1429 if (hcd->speed == HCD_USB3)
@@ -1840,6 +1850,7 @@ int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
1840 union xhci_trb *cmd_trb); 1850 union xhci_trb *cmd_trb);
1841void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, 1851void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
1842 unsigned int ep_index, unsigned int stream_id); 1852 unsigned int ep_index, unsigned int stream_id);
1853union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring);
1843 1854
1844/* xHCI roothub code */ 1855/* xHCI roothub code */
1845void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array, 1856void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
diff --git a/drivers/video/mmp/hw/mmp_ctrl.c b/drivers/video/mmp/hw/mmp_ctrl.c
index 75dca19bf214..6ac755270ab4 100644
--- a/drivers/video/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/mmp/hw/mmp_ctrl.c
@@ -514,7 +514,7 @@ static int mmphw_probe(struct platform_device *pdev)
514 if (IS_ERR(ctrl->clk)) { 514 if (IS_ERR(ctrl->clk)) {
515 dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name); 515 dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name);
516 ret = -ENOENT; 516 ret = -ENOENT;
517 goto failed_get_clk; 517 goto failed;
518 } 518 }
519 clk_prepare_enable(ctrl->clk); 519 clk_prepare_enable(ctrl->clk);
520 520
@@ -551,21 +551,8 @@ failed_path_init:
551 path_deinit(path_plat); 551 path_deinit(path_plat);
552 } 552 }
553 553
554 if (ctrl->clk) { 554 clk_disable_unprepare(ctrl->clk);
555 devm_clk_put(ctrl->dev, ctrl->clk);
556 clk_disable_unprepare(ctrl->clk);
557 }
558failed_get_clk:
559 devm_free_irq(ctrl->dev, ctrl->irq, ctrl);
560failed: 555failed:
561 if (ctrl) {
562 if (ctrl->reg_base)
563 devm_iounmap(ctrl->dev, ctrl->reg_base);
564 devm_release_mem_region(ctrl->dev, res->start,
565 resource_size(res));
566 devm_kfree(ctrl->dev, ctrl);
567 }
568
569 dev_err(&pdev->dev, "device init failed\n"); 556 dev_err(&pdev->dev, "device init failed\n");
570 557
571 return ret; 558 return ret;
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index d250ed0f806d..27197a8048c0 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -620,6 +620,7 @@ static int mxsfb_restore_mode(struct mxsfb_info *host)
620 break; 620 break;
621 case 3: 621 case 3:
622 bits_per_pixel = 32; 622 bits_per_pixel = 32;
623 break;
623 case 1: 624 case 1:
624 default: 625 default:
625 return -EINVAL; 626 return -EINVAL;
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index 7ef079c146e7..c172a5281f9e 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -2075,6 +2075,7 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
2075 if (!fb_find_mode(&info->var, info, mode_option, NULL, 0, 2075 if (!fb_find_mode(&info->var, info, mode_option, NULL, 0,
2076 info->monspecs.modedb, 16)) { 2076 info->monspecs.modedb, 16)) {
2077 printk(KERN_ERR "neofb: Unable to find usable video mode.\n"); 2077 printk(KERN_ERR "neofb: Unable to find usable video mode.\n");
2078 err = -EINVAL;
2078 goto err_map_video; 2079 goto err_map_video;
2079 } 2080 }
2080 2081
@@ -2097,7 +2098,8 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
2097 info->fix.smem_len >> 10, info->var.xres, 2098 info->fix.smem_len >> 10, info->var.xres,
2098 info->var.yres, h_sync / 1000, h_sync % 1000, v_sync); 2099 info->var.yres, h_sync / 1000, h_sync % 1000, v_sync);
2099 2100
2100 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) 2101 err = fb_alloc_cmap(&info->cmap, 256, 0);
2102 if (err < 0)
2101 goto err_map_video; 2103 goto err_map_video;
2102 2104
2103 err = register_framebuffer(info); 2105 err = register_framebuffer(info);
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
index 171821ddd78d..ba5b40f581f6 100644
--- a/drivers/video/of_display_timing.c
+++ b/drivers/video/of_display_timing.c
@@ -120,7 +120,7 @@ int of_get_display_timing(struct device_node *np, const char *name,
120 return -EINVAL; 120 return -EINVAL;
121 } 121 }
122 122
123 timing_np = of_find_node_by_name(np, name); 123 timing_np = of_get_child_by_name(np, name);
124 if (!timing_np) { 124 if (!timing_np) {
125 pr_err("%s: could not find node '%s'\n", 125 pr_err("%s: could not find node '%s'\n",
126 of_node_full_name(np), name); 126 of_node_full_name(np), name);
@@ -143,11 +143,11 @@ struct display_timings *of_get_display_timings(struct device_node *np)
143 struct display_timings *disp; 143 struct display_timings *disp;
144 144
145 if (!np) { 145 if (!np) {
146 pr_err("%s: no devicenode given\n", of_node_full_name(np)); 146 pr_err("%s: no device node given\n", of_node_full_name(np));
147 return NULL; 147 return NULL;
148 } 148 }
149 149
150 timings_np = of_find_node_by_name(np, "display-timings"); 150 timings_np = of_get_child_by_name(np, "display-timings");
151 if (!timings_np) { 151 if (!timings_np) {
152 pr_err("%s: could not find display-timings node\n", 152 pr_err("%s: could not find display-timings node\n",
153 of_node_full_name(np)); 153 of_node_full_name(np));
diff --git a/drivers/video/omap2/displays-new/Kconfig b/drivers/video/omap2/displays-new/Kconfig
index 6c90885b0940..10b25e7cd878 100644
--- a/drivers/video/omap2/displays-new/Kconfig
+++ b/drivers/video/omap2/displays-new/Kconfig
@@ -35,6 +35,7 @@ config DISPLAY_PANEL_DPI
35 35
36config DISPLAY_PANEL_DSI_CM 36config DISPLAY_PANEL_DSI_CM
37 tristate "Generic DSI Command Mode Panel" 37 tristate "Generic DSI Command Mode Panel"
38 depends on BACKLIGHT_CLASS_DEVICE
38 help 39 help
39 Driver for generic DSI command mode panels. 40 Driver for generic DSI command mode panels.
40 41
diff --git a/drivers/video/omap2/displays-new/connector-analog-tv.c b/drivers/video/omap2/displays-new/connector-analog-tv.c
index 1b60698f141e..ccd9073f706f 100644
--- a/drivers/video/omap2/displays-new/connector-analog-tv.c
+++ b/drivers/video/omap2/displays-new/connector-analog-tv.c
@@ -191,7 +191,7 @@ static int tvc_probe_pdata(struct platform_device *pdev)
191 in = omap_dss_find_output(pdata->source); 191 in = omap_dss_find_output(pdata->source);
192 if (in == NULL) { 192 if (in == NULL) {
193 dev_err(&pdev->dev, "Failed to find video source\n"); 193 dev_err(&pdev->dev, "Failed to find video source\n");
194 return -ENODEV; 194 return -EPROBE_DEFER;
195 } 195 }
196 196
197 ddata->in = in; 197 ddata->in = in;
diff --git a/drivers/video/omap2/displays-new/connector-dvi.c b/drivers/video/omap2/displays-new/connector-dvi.c
index bc5f8ceda371..63d88ee6dfe4 100644
--- a/drivers/video/omap2/displays-new/connector-dvi.c
+++ b/drivers/video/omap2/displays-new/connector-dvi.c
@@ -263,7 +263,7 @@ static int dvic_probe_pdata(struct platform_device *pdev)
263 in = omap_dss_find_output(pdata->source); 263 in = omap_dss_find_output(pdata->source);
264 if (in == NULL) { 264 if (in == NULL) {
265 dev_err(&pdev->dev, "Failed to find video source\n"); 265 dev_err(&pdev->dev, "Failed to find video source\n");
266 return -ENODEV; 266 return -EPROBE_DEFER;
267 } 267 }
268 268
269 ddata->in = in; 269 ddata->in = in;
diff --git a/drivers/video/omap2/displays-new/connector-hdmi.c b/drivers/video/omap2/displays-new/connector-hdmi.c
index c5826716d6ab..9abe2c039ae9 100644
--- a/drivers/video/omap2/displays-new/connector-hdmi.c
+++ b/drivers/video/omap2/displays-new/connector-hdmi.c
@@ -290,7 +290,7 @@ static int hdmic_probe_pdata(struct platform_device *pdev)
290 in = omap_dss_find_output(pdata->source); 290 in = omap_dss_find_output(pdata->source);
291 if (in == NULL) { 291 if (in == NULL) {
292 dev_err(&pdev->dev, "Failed to find video source\n"); 292 dev_err(&pdev->dev, "Failed to find video source\n");
293 return -ENODEV; 293 return -EPROBE_DEFER;
294 } 294 }
295 295
296 ddata->in = in; 296 ddata->in = in;
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 02a7340111df..477975009eee 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -3691,6 +3691,7 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
3691 } 3691 }
3692 3692
3693 pm_runtime_enable(&pdev->dev); 3693 pm_runtime_enable(&pdev->dev);
3694 pm_runtime_irq_safe(&pdev->dev);
3694 3695
3695 r = dispc_runtime_get(); 3696 r = dispc_runtime_get();
3696 if (r) 3697 if (r)
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index 47ca86c5c6c0..d838ba829459 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -1336,14 +1336,7 @@ static int s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1336 (info->var.bits_per_pixel * info->var.xres_virtual); 1336 (info->var.bits_per_pixel * info->var.xres_virtual);
1337 if (info->var.yres_virtual < info->var.yres) { 1337 if (info->var.yres_virtual < info->var.yres) {
1338 dev_err(info->device, "virtual vertical size smaller than real\n"); 1338 dev_err(info->device, "virtual vertical size smaller than real\n");
1339 goto err_find_mode; 1339 rc = -EINVAL;
1340 }
1341
1342 /* maximize virtual vertical size for fast scrolling */
1343 info->var.yres_virtual = info->fix.smem_len * 8 /
1344 (info->var.bits_per_pixel * info->var.xres_virtual);
1345 if (info->var.yres_virtual < info->var.yres) {
1346 dev_err(info->device, "virtual vertical size smaller than real\n");
1347 goto err_find_mode; 1340 goto err_find_mode;
1348 } 1341 }
1349 1342
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index a50c6e3a7cc4..b232908a6192 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -398,8 +398,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
398 if (nr_pages > ARRAY_SIZE(frame_list)) 398 if (nr_pages > ARRAY_SIZE(frame_list))
399 nr_pages = ARRAY_SIZE(frame_list); 399 nr_pages = ARRAY_SIZE(frame_list);
400 400
401 scratch_page = get_balloon_scratch_page();
402
403 for (i = 0; i < nr_pages; i++) { 401 for (i = 0; i < nr_pages; i++) {
404 page = alloc_page(gfp); 402 page = alloc_page(gfp);
405 if (page == NULL) { 403 if (page == NULL) {
@@ -413,6 +411,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
413 411
414 scrub_page(page); 412 scrub_page(page);
415 413
414 /*
415 * Ballooned out frames are effectively replaced with
416 * a scratch frame. Ensure direct mappings and the
417 * p2m are consistent.
418 */
419 scratch_page = get_balloon_scratch_page();
416#ifdef CONFIG_XEN_HAVE_PVMMU 420#ifdef CONFIG_XEN_HAVE_PVMMU
417 if (xen_pv_domain() && !PageHighMem(page)) { 421 if (xen_pv_domain() && !PageHighMem(page)) {
418 ret = HYPERVISOR_update_va_mapping( 422 ret = HYPERVISOR_update_va_mapping(
@@ -422,24 +426,19 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
422 BUG_ON(ret); 426 BUG_ON(ret);
423 } 427 }
424#endif 428#endif
425 }
426
427 /* Ensure that ballooned highmem pages don't have kmaps. */
428 kmap_flush_unused();
429 flush_tlb_all();
430
431 /* No more mappings: invalidate P2M and add to balloon. */
432 for (i = 0; i < nr_pages; i++) {
433 pfn = mfn_to_pfn(frame_list[i]);
434 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 429 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
435 unsigned long p; 430 unsigned long p;
436 p = page_to_pfn(scratch_page); 431 p = page_to_pfn(scratch_page);
437 __set_phys_to_machine(pfn, pfn_to_mfn(p)); 432 __set_phys_to_machine(pfn, pfn_to_mfn(p));
438 } 433 }
434 put_balloon_scratch_page();
435
439 balloon_append(pfn_to_page(pfn)); 436 balloon_append(pfn_to_page(pfn));
440 } 437 }
441 438
442 put_balloon_scratch_page(); 439 /* Ensure that ballooned highmem pages don't have kmaps. */
440 kmap_flush_unused();
441 flush_tlb_all();
443 442
444 set_xen_guest_handle(reservation.extent_start, frame_list); 443 set_xen_guest_handle(reservation.extent_start, frame_list);
445 reservation.nr_extents = nr_pages; 444 reservation.nr_extents = nr_pages;
diff --git a/fs/bio.c b/fs/bio.c
index b3b20ed9510e..ea5035da4d9a 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -917,8 +917,8 @@ void bio_copy_data(struct bio *dst, struct bio *src)
917 src_p = kmap_atomic(src_bv->bv_page); 917 src_p = kmap_atomic(src_bv->bv_page);
918 dst_p = kmap_atomic(dst_bv->bv_page); 918 dst_p = kmap_atomic(dst_bv->bv_page);
919 919
920 memcpy(dst_p + dst_bv->bv_offset, 920 memcpy(dst_p + dst_offset,
921 src_p + src_bv->bv_offset, 921 src_p + src_offset,
922 bytes); 922 bytes);
923 923
924 kunmap_atomic(dst_p); 924 kunmap_atomic(dst_p);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 121da2dc3be8..d4e81e4a9b04 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1924,7 +1924,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
1924{ 1924{
1925 int tmp, hangup_needed = 0; 1925 int tmp, hangup_needed = 0;
1926 struct ocfs2_super *osb = NULL; 1926 struct ocfs2_super *osb = NULL;
1927 char nodestr[8]; 1927 char nodestr[12];
1928 1928
1929 trace_ocfs2_dismount_volume(sb); 1929 trace_ocfs2_dismount_volume(sb);
1930 1930
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 73feacc49b2e..fd777032c2ba 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1163,21 +1163,6 @@ static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
1163 return NULL; 1163 return NULL;
1164} 1164}
1165 1165
1166static int newer_jl_done(struct reiserfs_journal_cnode *cn)
1167{
1168 struct super_block *sb = cn->sb;
1169 b_blocknr_t blocknr = cn->blocknr;
1170
1171 cn = cn->hprev;
1172 while (cn) {
1173 if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist &&
1174 atomic_read(&cn->jlist->j_commit_left) != 0)
1175 return 0;
1176 cn = cn->hprev;
1177 }
1178 return 1;
1179}
1180
1181static void remove_journal_hash(struct super_block *, 1166static void remove_journal_hash(struct super_block *,
1182 struct reiserfs_journal_cnode **, 1167 struct reiserfs_journal_cnode **,
1183 struct reiserfs_journal_list *, unsigned long, 1168 struct reiserfs_journal_list *, unsigned long,
@@ -1353,7 +1338,6 @@ static int flush_journal_list(struct super_block *s,
1353 reiserfs_warning(s, "clm-2048", "called with wcount %d", 1338 reiserfs_warning(s, "clm-2048", "called with wcount %d",
1354 atomic_read(&journal->j_wcount)); 1339 atomic_read(&journal->j_wcount));
1355 } 1340 }
1356 BUG_ON(jl->j_trans_id == 0);
1357 1341
1358 /* if flushall == 0, the lock is already held */ 1342 /* if flushall == 0, the lock is already held */
1359 if (flushall) { 1343 if (flushall) {
@@ -1593,31 +1577,6 @@ static int flush_journal_list(struct super_block *s,
1593 return err; 1577 return err;
1594} 1578}
1595 1579
1596static int test_transaction(struct super_block *s,
1597 struct reiserfs_journal_list *jl)
1598{
1599 struct reiserfs_journal_cnode *cn;
1600
1601 if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0)
1602 return 1;
1603
1604 cn = jl->j_realblock;
1605 while (cn) {
1606 /* if the blocknr == 0, this has been cleared from the hash,
1607 ** skip it
1608 */
1609 if (cn->blocknr == 0) {
1610 goto next;
1611 }
1612 if (cn->bh && !newer_jl_done(cn))
1613 return 0;
1614 next:
1615 cn = cn->next;
1616 cond_resched();
1617 }
1618 return 0;
1619}
1620
1621static int write_one_transaction(struct super_block *s, 1580static int write_one_transaction(struct super_block *s,
1622 struct reiserfs_journal_list *jl, 1581 struct reiserfs_journal_list *jl,
1623 struct buffer_chunk *chunk) 1582 struct buffer_chunk *chunk)
@@ -1805,6 +1764,8 @@ static int flush_used_journal_lists(struct super_block *s,
1805 break; 1764 break;
1806 tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next); 1765 tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
1807 } 1766 }
1767 get_journal_list(jl);
1768 get_journal_list(flush_jl);
1808 /* try to find a group of blocks we can flush across all the 1769 /* try to find a group of blocks we can flush across all the
1809 ** transactions, but only bother if we've actually spanned 1770 ** transactions, but only bother if we've actually spanned
1810 ** across multiple lists 1771 ** across multiple lists
@@ -1813,6 +1774,8 @@ static int flush_used_journal_lists(struct super_block *s,
1813 ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i); 1774 ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
1814 } 1775 }
1815 flush_journal_list(s, flush_jl, 1); 1776 flush_journal_list(s, flush_jl, 1);
1777 put_journal_list(s, flush_jl);
1778 put_journal_list(s, jl);
1816 return 0; 1779 return 0;
1817} 1780}
1818 1781
@@ -3868,27 +3831,6 @@ int reiserfs_prepare_for_journal(struct super_block *sb,
3868 return 1; 3831 return 1;
3869} 3832}
3870 3833
3871static void flush_old_journal_lists(struct super_block *s)
3872{
3873 struct reiserfs_journal *journal = SB_JOURNAL(s);
3874 struct reiserfs_journal_list *jl;
3875 struct list_head *entry;
3876 time_t now = get_seconds();
3877
3878 while (!list_empty(&journal->j_journal_list)) {
3879 entry = journal->j_journal_list.next;
3880 jl = JOURNAL_LIST_ENTRY(entry);
3881 /* this check should always be run, to send old lists to disk */
3882 if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4)) &&
3883 atomic_read(&jl->j_commit_left) == 0 &&
3884 test_transaction(s, jl)) {
3885 flush_used_journal_lists(s, jl);
3886 } else {
3887 break;
3888 }
3889 }
3890}
3891
3892/* 3834/*
3893** long and ugly. If flush, will not return until all commit 3835** long and ugly. If flush, will not return until all commit
3894** blocks and all real buffers in the trans are on disk. 3836** blocks and all real buffers in the trans are on disk.
@@ -4232,7 +4174,6 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
4232 } 4174 }
4233 } 4175 }
4234 } 4176 }
4235 flush_old_journal_lists(sb);
4236 4177
4237 journal->j_current_jl->j_list_bitmap = 4178 journal->j_current_jl->j_list_bitmap =
4238 get_list_bitmap(sb, journal->j_current_jl); 4179 get_list_bitmap(sb, journal->j_current_jl);
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 7e5aae4bf46f..6eaf5edf1ea1 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -30,18 +30,17 @@ void udf_free_inode(struct inode *inode)
30{ 30{
31 struct super_block *sb = inode->i_sb; 31 struct super_block *sb = inode->i_sb;
32 struct udf_sb_info *sbi = UDF_SB(sb); 32 struct udf_sb_info *sbi = UDF_SB(sb);
33 struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
33 34
34 mutex_lock(&sbi->s_alloc_mutex); 35 if (lvidiu) {
35 if (sbi->s_lvid_bh) { 36 mutex_lock(&sbi->s_alloc_mutex);
36 struct logicalVolIntegrityDescImpUse *lvidiu =
37 udf_sb_lvidiu(sbi);
38 if (S_ISDIR(inode->i_mode)) 37 if (S_ISDIR(inode->i_mode))
39 le32_add_cpu(&lvidiu->numDirs, -1); 38 le32_add_cpu(&lvidiu->numDirs, -1);
40 else 39 else
41 le32_add_cpu(&lvidiu->numFiles, -1); 40 le32_add_cpu(&lvidiu->numFiles, -1);
42 udf_updated_lvid(sb); 41 udf_updated_lvid(sb);
42 mutex_unlock(&sbi->s_alloc_mutex);
43 } 43 }
44 mutex_unlock(&sbi->s_alloc_mutex);
45 44
46 udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1); 45 udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1);
47} 46}
@@ -55,6 +54,7 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
55 uint32_t start = UDF_I(dir)->i_location.logicalBlockNum; 54 uint32_t start = UDF_I(dir)->i_location.logicalBlockNum;
56 struct udf_inode_info *iinfo; 55 struct udf_inode_info *iinfo;
57 struct udf_inode_info *dinfo = UDF_I(dir); 56 struct udf_inode_info *dinfo = UDF_I(dir);
57 struct logicalVolIntegrityDescImpUse *lvidiu;
58 58
59 inode = new_inode(sb); 59 inode = new_inode(sb);
60 60
@@ -92,12 +92,10 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
92 return NULL; 92 return NULL;
93 } 93 }
94 94
95 if (sbi->s_lvid_bh) { 95 lvidiu = udf_sb_lvidiu(sb);
96 struct logicalVolIntegrityDescImpUse *lvidiu; 96 if (lvidiu) {
97
98 iinfo->i_unique = lvid_get_unique_id(sb); 97 iinfo->i_unique = lvid_get_unique_id(sb);
99 mutex_lock(&sbi->s_alloc_mutex); 98 mutex_lock(&sbi->s_alloc_mutex);
100 lvidiu = udf_sb_lvidiu(sbi);
101 if (S_ISDIR(mode)) 99 if (S_ISDIR(mode))
102 le32_add_cpu(&lvidiu->numDirs, 1); 100 le32_add_cpu(&lvidiu->numDirs, 1);
103 else 101 else
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 839a2bad7f45..91219385691d 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -94,13 +94,25 @@ static unsigned int udf_count_free(struct super_block *);
94static int udf_statfs(struct dentry *, struct kstatfs *); 94static int udf_statfs(struct dentry *, struct kstatfs *);
95static int udf_show_options(struct seq_file *, struct dentry *); 95static int udf_show_options(struct seq_file *, struct dentry *);
96 96
97struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi) 97struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
98{ 98{
99 struct logicalVolIntegrityDesc *lvid = 99 struct logicalVolIntegrityDesc *lvid;
100 (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data; 100 unsigned int partnum;
101 __u32 number_of_partitions = le32_to_cpu(lvid->numOfPartitions); 101 unsigned int offset;
102 __u32 offset = number_of_partitions * 2 * 102
103 sizeof(uint32_t)/sizeof(uint8_t); 103 if (!UDF_SB(sb)->s_lvid_bh)
104 return NULL;
105 lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
106 partnum = le32_to_cpu(lvid->numOfPartitions);
107 if ((sb->s_blocksize - sizeof(struct logicalVolIntegrityDescImpUse) -
108 offsetof(struct logicalVolIntegrityDesc, impUse)) /
109 (2 * sizeof(uint32_t)) < partnum) {
110 udf_err(sb, "Logical volume integrity descriptor corrupted "
111 "(numOfPartitions = %u)!\n", partnum);
112 return NULL;
113 }
114 /* The offset is to skip freeSpaceTable and sizeTable arrays */
115 offset = partnum * 2 * sizeof(uint32_t);
104 return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]); 116 return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
105} 117}
106 118
@@ -629,9 +641,10 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
629 struct udf_options uopt; 641 struct udf_options uopt;
630 struct udf_sb_info *sbi = UDF_SB(sb); 642 struct udf_sb_info *sbi = UDF_SB(sb);
631 int error = 0; 643 int error = 0;
644 struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
632 645
633 if (sbi->s_lvid_bh) { 646 if (lvidiu) {
634 int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev); 647 int write_rev = le16_to_cpu(lvidiu->minUDFWriteRev);
635 if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY)) 648 if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY))
636 return -EACCES; 649 return -EACCES;
637 } 650 }
@@ -1905,11 +1918,12 @@ static void udf_open_lvid(struct super_block *sb)
1905 1918
1906 if (!bh) 1919 if (!bh)
1907 return; 1920 return;
1908
1909 mutex_lock(&sbi->s_alloc_mutex);
1910 lvid = (struct logicalVolIntegrityDesc *)bh->b_data; 1921 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1911 lvidiu = udf_sb_lvidiu(sbi); 1922 lvidiu = udf_sb_lvidiu(sb);
1923 if (!lvidiu)
1924 return;
1912 1925
1926 mutex_lock(&sbi->s_alloc_mutex);
1913 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 1927 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1914 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 1928 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1915 udf_time_to_disk_stamp(&lvid->recordingDateAndTime, 1929 udf_time_to_disk_stamp(&lvid->recordingDateAndTime,
@@ -1937,10 +1951,12 @@ static void udf_close_lvid(struct super_block *sb)
1937 1951
1938 if (!bh) 1952 if (!bh)
1939 return; 1953 return;
1954 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1955 lvidiu = udf_sb_lvidiu(sb);
1956 if (!lvidiu)
1957 return;
1940 1958
1941 mutex_lock(&sbi->s_alloc_mutex); 1959 mutex_lock(&sbi->s_alloc_mutex);
1942 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1943 lvidiu = udf_sb_lvidiu(sbi);
1944 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 1960 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1945 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 1961 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1946 udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME); 1962 udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME);
@@ -2093,15 +2109,19 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
2093 2109
2094 if (sbi->s_lvid_bh) { 2110 if (sbi->s_lvid_bh) {
2095 struct logicalVolIntegrityDescImpUse *lvidiu = 2111 struct logicalVolIntegrityDescImpUse *lvidiu =
2096 udf_sb_lvidiu(sbi); 2112 udf_sb_lvidiu(sb);
2097 uint16_t minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev); 2113 uint16_t minUDFReadRev;
2098 uint16_t minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev); 2114 uint16_t minUDFWriteRev;
2099 /* uint16_t maxUDFWriteRev =
2100 le16_to_cpu(lvidiu->maxUDFWriteRev); */
2101 2115
2116 if (!lvidiu) {
2117 ret = -EINVAL;
2118 goto error_out;
2119 }
2120 minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
2121 minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
2102 if (minUDFReadRev > UDF_MAX_READ_VERSION) { 2122 if (minUDFReadRev > UDF_MAX_READ_VERSION) {
2103 udf_err(sb, "minUDFReadRev=%x (max is %x)\n", 2123 udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
2104 le16_to_cpu(lvidiu->minUDFReadRev), 2124 minUDFReadRev,
2105 UDF_MAX_READ_VERSION); 2125 UDF_MAX_READ_VERSION);
2106 ret = -EINVAL; 2126 ret = -EINVAL;
2107 goto error_out; 2127 goto error_out;
@@ -2265,11 +2285,7 @@ static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
2265 struct logicalVolIntegrityDescImpUse *lvidiu; 2285 struct logicalVolIntegrityDescImpUse *lvidiu;
2266 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 2286 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
2267 2287
2268 if (sbi->s_lvid_bh != NULL) 2288 lvidiu = udf_sb_lvidiu(sb);
2269 lvidiu = udf_sb_lvidiu(sbi);
2270 else
2271 lvidiu = NULL;
2272
2273 buf->f_type = UDF_SUPER_MAGIC; 2289 buf->f_type = UDF_SUPER_MAGIC;
2274 buf->f_bsize = sb->s_blocksize; 2290 buf->f_bsize = sb->s_blocksize;
2275 buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len; 2291 buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index ed401e94aa8c..1f32c7bd9f57 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -162,7 +162,7 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
162 return sb->s_fs_info; 162 return sb->s_fs_info;
163} 163}
164 164
165struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi); 165struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb);
166 166
167int udf_compute_nr_groups(struct super_block *sb, u32 partition); 167int udf_compute_nr_groups(struct super_block *sb, u32 partition);
168 168
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 88c5ea75ebf6..f1d85cfc0a54 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -628,6 +628,7 @@ xfs_buf_item_unlock(
628 else if (aborted) { 628 else if (aborted) {
629 ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp)); 629 ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
630 if (lip->li_flags & XFS_LI_IN_AIL) { 630 if (lip->li_flags & XFS_LI_IN_AIL) {
631 spin_lock(&lip->li_ailp->xa_lock);
631 xfs_trans_ail_delete(lip->li_ailp, lip, 632 xfs_trans_ail_delete(lip->li_ailp, lip,
632 SHUTDOWN_LOG_IO_ERROR); 633 SHUTDOWN_LOG_IO_ERROR);
633 } 634 }
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 069537c845e5..20bf8e8002d6 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -1224,6 +1224,7 @@ xfs_da3_node_toosmall(
1224 /* start with smaller blk num */ 1224 /* start with smaller blk num */
1225 forward = nodehdr.forw < nodehdr.back; 1225 forward = nodehdr.forw < nodehdr.back;
1226 for (i = 0; i < 2; forward = !forward, i++) { 1226 for (i = 0; i < 2; forward = !forward, i++) {
1227 struct xfs_da3_icnode_hdr thdr;
1227 if (forward) 1228 if (forward)
1228 blkno = nodehdr.forw; 1229 blkno = nodehdr.forw;
1229 else 1230 else
@@ -1236,10 +1237,10 @@ xfs_da3_node_toosmall(
1236 return(error); 1237 return(error);
1237 1238
1238 node = bp->b_addr; 1239 node = bp->b_addr;
1239 xfs_da3_node_hdr_from_disk(&nodehdr, node); 1240 xfs_da3_node_hdr_from_disk(&thdr, node);
1240 xfs_trans_brelse(state->args->trans, bp); 1241 xfs_trans_brelse(state->args->trans, bp);
1241 1242
1242 if (count - nodehdr.count >= 0) 1243 if (count - thdr.count >= 0)
1243 break; /* fits with at least 25% to spare */ 1244 break; /* fits with at least 25% to spare */
1244 } 1245 }
1245 if (i >= 2) { 1246 if (i >= 2) {
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index 1edb5cc3e5f4..18272c766a50 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -515,7 +515,7 @@ typedef struct xfs_swapext
515/* XFS_IOC_GETBIOSIZE ---- deprecated 47 */ 515/* XFS_IOC_GETBIOSIZE ---- deprecated 47 */
516#define XFS_IOC_GETBMAPX _IOWR('X', 56, struct getbmap) 516#define XFS_IOC_GETBMAPX _IOWR('X', 56, struct getbmap)
517#define XFS_IOC_ZERO_RANGE _IOW ('X', 57, struct xfs_flock64) 517#define XFS_IOC_ZERO_RANGE _IOW ('X', 57, struct xfs_flock64)
518#define XFS_IOC_FREE_EOFBLOCKS _IOR ('X', 58, struct xfs_eofblocks) 518#define XFS_IOC_FREE_EOFBLOCKS _IOR ('X', 58, struct xfs_fs_eofblocks)
519 519
520/* 520/*
521 * ioctl commands that replace IRIX syssgi()'s 521 * ioctl commands that replace IRIX syssgi()'s
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 193206ba4358..474807a401c8 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -119,11 +119,6 @@ xfs_inode_free(
119 ip->i_itemp = NULL; 119 ip->i_itemp = NULL;
120 } 120 }
121 121
122 /* asserts to verify all state is correct here */
123 ASSERT(atomic_read(&ip->i_pincount) == 0);
124 ASSERT(!spin_is_locked(&ip->i_flags_lock));
125 ASSERT(!xfs_isiflocked(ip));
126
127 /* 122 /*
128 * Because we use RCU freeing we need to ensure the inode always 123 * Because we use RCU freeing we need to ensure the inode always
129 * appears to be reclaimed with an invalid inode number when in the 124 * appears to be reclaimed with an invalid inode number when in the
@@ -135,6 +130,10 @@ xfs_inode_free(
135 ip->i_ino = 0; 130 ip->i_ino = 0;
136 spin_unlock(&ip->i_flags_lock); 131 spin_unlock(&ip->i_flags_lock);
137 132
133 /* asserts to verify all state is correct here */
134 ASSERT(atomic_read(&ip->i_pincount) == 0);
135 ASSERT(!xfs_isiflocked(ip));
136
138 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); 137 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
139} 138}
140 139
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index dabda9521b4b..cc179878fe41 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1970,6 +1970,13 @@ xlog_recover_do_inode_buffer(
1970 * magic number. If we don't recognise the magic number in the buffer, then 1970 * magic number. If we don't recognise the magic number in the buffer, then
1971 * return a LSN of -1 so that the caller knows it was an unrecognised block and 1971 * return a LSN of -1 so that the caller knows it was an unrecognised block and
1972 * so can recover the buffer. 1972 * so can recover the buffer.
1973 *
1974 * Note: we cannot rely solely on magic number matches to determine that the
1975 * buffer has a valid LSN - we also need to verify that it belongs to this
1976 * filesystem, so we need to extract the object's LSN and compare it to that
1977 * which we read from the superblock. If the UUIDs don't match, then we've got a
1978 * stale metadata block from an old filesystem instance that we need to recover
1979 * over the top of.
1973 */ 1980 */
1974static xfs_lsn_t 1981static xfs_lsn_t
1975xlog_recover_get_buf_lsn( 1982xlog_recover_get_buf_lsn(
@@ -1980,6 +1987,8 @@ xlog_recover_get_buf_lsn(
1980 __uint16_t magic16; 1987 __uint16_t magic16;
1981 __uint16_t magicda; 1988 __uint16_t magicda;
1982 void *blk = bp->b_addr; 1989 void *blk = bp->b_addr;
1990 uuid_t *uuid;
1991 xfs_lsn_t lsn = -1;
1983 1992
1984 /* v4 filesystems always recover immediately */ 1993 /* v4 filesystems always recover immediately */
1985 if (!xfs_sb_version_hascrc(&mp->m_sb)) 1994 if (!xfs_sb_version_hascrc(&mp->m_sb))
@@ -1992,43 +2001,79 @@ xlog_recover_get_buf_lsn(
1992 case XFS_ABTB_MAGIC: 2001 case XFS_ABTB_MAGIC:
1993 case XFS_ABTC_MAGIC: 2002 case XFS_ABTC_MAGIC:
1994 case XFS_IBT_CRC_MAGIC: 2003 case XFS_IBT_CRC_MAGIC:
1995 case XFS_IBT_MAGIC: 2004 case XFS_IBT_MAGIC: {
1996 return be64_to_cpu( 2005 struct xfs_btree_block *btb = blk;
1997 ((struct xfs_btree_block *)blk)->bb_u.s.bb_lsn); 2006
2007 lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
2008 uuid = &btb->bb_u.s.bb_uuid;
2009 break;
2010 }
1998 case XFS_BMAP_CRC_MAGIC: 2011 case XFS_BMAP_CRC_MAGIC:
1999 case XFS_BMAP_MAGIC: 2012 case XFS_BMAP_MAGIC: {
2000 return be64_to_cpu( 2013 struct xfs_btree_block *btb = blk;
2001 ((struct xfs_btree_block *)blk)->bb_u.l.bb_lsn); 2014
2015 lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
2016 uuid = &btb->bb_u.l.bb_uuid;
2017 break;
2018 }
2002 case XFS_AGF_MAGIC: 2019 case XFS_AGF_MAGIC:
2003 return be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn); 2020 lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
2021 uuid = &((struct xfs_agf *)blk)->agf_uuid;
2022 break;
2004 case XFS_AGFL_MAGIC: 2023 case XFS_AGFL_MAGIC:
2005 return be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn); 2024 lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
2025 uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
2026 break;
2006 case XFS_AGI_MAGIC: 2027 case XFS_AGI_MAGIC:
2007 return be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn); 2028 lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
2029 uuid = &((struct xfs_agi *)blk)->agi_uuid;
2030 break;
2008 case XFS_SYMLINK_MAGIC: 2031 case XFS_SYMLINK_MAGIC:
2009 return be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn); 2032 lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
2033 uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
2034 break;
2010 case XFS_DIR3_BLOCK_MAGIC: 2035 case XFS_DIR3_BLOCK_MAGIC:
2011 case XFS_DIR3_DATA_MAGIC: 2036 case XFS_DIR3_DATA_MAGIC:
2012 case XFS_DIR3_FREE_MAGIC: 2037 case XFS_DIR3_FREE_MAGIC:
2013 return be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn); 2038 lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
2039 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
2040 break;
2014 case XFS_ATTR3_RMT_MAGIC: 2041 case XFS_ATTR3_RMT_MAGIC:
2015 return be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn); 2042 lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
2043 uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid;
2044 break;
2016 case XFS_SB_MAGIC: 2045 case XFS_SB_MAGIC:
2017 return be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn); 2046 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
2047 uuid = &((struct xfs_dsb *)blk)->sb_uuid;
2048 break;
2018 default: 2049 default:
2019 break; 2050 break;
2020 } 2051 }
2021 2052
2053 if (lsn != (xfs_lsn_t)-1) {
2054 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2055 goto recover_immediately;
2056 return lsn;
2057 }
2058
2022 magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic); 2059 magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
2023 switch (magicda) { 2060 switch (magicda) {
2024 case XFS_DIR3_LEAF1_MAGIC: 2061 case XFS_DIR3_LEAF1_MAGIC:
2025 case XFS_DIR3_LEAFN_MAGIC: 2062 case XFS_DIR3_LEAFN_MAGIC:
2026 case XFS_DA3_NODE_MAGIC: 2063 case XFS_DA3_NODE_MAGIC:
2027 return be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn); 2064 lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
2065 uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
2066 break;
2028 default: 2067 default:
2029 break; 2068 break;
2030 } 2069 }
2031 2070
2071 if (lsn != (xfs_lsn_t)-1) {
2072 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2073 goto recover_immediately;
2074 return lsn;
2075 }
2076
2032 /* 2077 /*
2033 * We do individual object checks on dquot and inode buffers as they 2078 * We do individual object checks on dquot and inode buffers as they
2034 * have their own individual LSN records. Also, we could have a stale 2079 * have their own individual LSN records. Also, we could have a stale
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 653073de09e3..ed419c62dde1 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -406,13 +406,14 @@ int dm_noflush_suspending(struct dm_target *ti);
406union map_info *dm_get_mapinfo(struct bio *bio); 406union map_info *dm_get_mapinfo(struct bio *bio);
407union map_info *dm_get_rq_mapinfo(struct request *rq); 407union map_info *dm_get_rq_mapinfo(struct request *rq);
408 408
409struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
410
409/* 411/*
410 * Geometry functions. 412 * Geometry functions.
411 */ 413 */
412int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); 414int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
413int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); 415int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
414 416
415
416/*----------------------------------------------------------------- 417/*-----------------------------------------------------------------
417 * Functions for manipulating device-mapper tables. 418 * Functions for manipulating device-mapper tables.
418 *---------------------------------------------------------------*/ 419 *---------------------------------------------------------------*/
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index a3b8b2e2d244..d98503bde7e9 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -30,10 +30,13 @@
30/* 30/*
31 * Framework version for util services. 31 * Framework version for util services.
32 */ 32 */
33#define UTIL_FW_MINOR 0
34
35#define UTIL_WS2K8_FW_MAJOR 1
36#define UTIL_WS2K8_FW_VERSION (UTIL_WS2K8_FW_MAJOR << 16 | UTIL_FW_MINOR)
33 37
34#define UTIL_FW_MAJOR 3 38#define UTIL_FW_MAJOR 3
35#define UTIL_FW_MINOR 0 39#define UTIL_FW_VERSION (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
36#define UTIL_FW_MAJOR_MINOR (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
37 40
38 41
39/* 42/*
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 60e95872da29..ecc82b37c4cc 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -53,23 +53,6 @@ struct mem_cgroup_reclaim_cookie {
53 unsigned int generation; 53 unsigned int generation;
54}; 54};
55 55
56enum mem_cgroup_filter_t {
57 VISIT, /* visit current node */
58 SKIP, /* skip the current node and continue traversal */
59 SKIP_TREE, /* skip the whole subtree and continue traversal */
60};
61
62/*
63 * mem_cgroup_filter_t predicate might instruct mem_cgroup_iter_cond how to
64 * iterate through the hierarchy tree. Each tree element is checked by the
65 * predicate before it is returned by the iterator. If a filter returns
66 * SKIP or SKIP_TREE then the iterator code continues traversal (with the
67 * next node down the hierarchy or the next node that doesn't belong under the
68 * memcg's subtree).
69 */
70typedef enum mem_cgroup_filter_t
71(*mem_cgroup_iter_filter)(struct mem_cgroup *memcg, struct mem_cgroup *root);
72
73#ifdef CONFIG_MEMCG 56#ifdef CONFIG_MEMCG
74/* 57/*
75 * All "charge" functions with gfp_mask should use GFP_KERNEL or 58 * All "charge" functions with gfp_mask should use GFP_KERNEL or
@@ -137,18 +120,9 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
137extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, 120extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
138 struct page *oldpage, struct page *newpage, bool migration_ok); 121 struct page *oldpage, struct page *newpage, bool migration_ok);
139 122
140struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root, 123struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
141 struct mem_cgroup *prev, 124 struct mem_cgroup *,
142 struct mem_cgroup_reclaim_cookie *reclaim, 125 struct mem_cgroup_reclaim_cookie *);
143 mem_cgroup_iter_filter cond);
144
145static inline struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
146 struct mem_cgroup *prev,
147 struct mem_cgroup_reclaim_cookie *reclaim)
148{
149 return mem_cgroup_iter_cond(root, prev, reclaim, NULL);
150}
151
152void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 126void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
153 127
154/* 128/*
@@ -260,9 +234,9 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
260 mem_cgroup_update_page_stat(page, idx, -1); 234 mem_cgroup_update_page_stat(page, idx, -1);
261} 235}
262 236
263enum mem_cgroup_filter_t 237unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
264mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg, 238 gfp_t gfp_mask,
265 struct mem_cgroup *root); 239 unsigned long *total_scanned);
266 240
267void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); 241void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
268static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, 242static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
@@ -376,15 +350,6 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
376 struct page *oldpage, struct page *newpage, bool migration_ok) 350 struct page *oldpage, struct page *newpage, bool migration_ok)
377{ 351{
378} 352}
379static inline struct mem_cgroup *
380mem_cgroup_iter_cond(struct mem_cgroup *root,
381 struct mem_cgroup *prev,
382 struct mem_cgroup_reclaim_cookie *reclaim,
383 mem_cgroup_iter_filter cond)
384{
385 /* first call must return non-NULL, second return NULL */
386 return (struct mem_cgroup *)(unsigned long)!prev;
387}
388 353
389static inline struct mem_cgroup * 354static inline struct mem_cgroup *
390mem_cgroup_iter(struct mem_cgroup *root, 355mem_cgroup_iter(struct mem_cgroup *root,
@@ -471,11 +436,11 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
471} 436}
472 437
473static inline 438static inline
474enum mem_cgroup_filter_t 439unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
475mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg, 440 gfp_t gfp_mask,
476 struct mem_cgroup *root) 441 unsigned long *total_scanned)
477{ 442{
478 return VISIT; 443 return 0;
479} 444}
480 445
481static inline void mem_cgroup_split_huge_fixup(struct page *head) 446static inline void mem_cgroup_split_huge_fixup(struct page *head)
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index ccd4260834c5..bab49da8a0f0 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -15,8 +15,8 @@
15#include <linux/spinlock_types.h> 15#include <linux/spinlock_types.h>
16#include <linux/linkage.h> 16#include <linux/linkage.h>
17#include <linux/lockdep.h> 17#include <linux/lockdep.h>
18
19#include <linux/atomic.h> 18#include <linux/atomic.h>
19#include <asm/processor.h>
20 20
21/* 21/*
22 * Simple, straightforward mutexes with strict semantics: 22 * Simple, straightforward mutexes with strict semantics:
@@ -175,8 +175,8 @@ extern void mutex_unlock(struct mutex *lock);
175 175
176extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); 176extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
177 177
178#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX 178#ifndef arch_mutex_cpu_relax
179#define arch_mutex_cpu_relax() cpu_relax() 179# define arch_mutex_cpu_relax() cpu_relax()
180#endif 180#endif
181 181
182#endif 182#endif
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 535cecf1e02f..fcd63baee5f2 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -1,8 +1,6 @@
1#ifndef __OF_IRQ_H 1#ifndef __OF_IRQ_H
2#define __OF_IRQ_H 2#define __OF_IRQ_H
3 3
4#if defined(CONFIG_OF)
5struct of_irq;
6#include <linux/types.h> 4#include <linux/types.h>
7#include <linux/errno.h> 5#include <linux/errno.h>
8#include <linux/irq.h> 6#include <linux/irq.h>
@@ -10,14 +8,6 @@ struct of_irq;
10#include <linux/ioport.h> 8#include <linux/ioport.h>
11#include <linux/of.h> 9#include <linux/of.h>
12 10
13/*
14 * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
15 * implements it differently. However, the prototype is the same for all,
16 * so declare it here regardless of the CONFIG_OF_IRQ setting.
17 */
18extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
19
20#if defined(CONFIG_OF_IRQ)
21/** 11/**
22 * of_irq - container for device_node/irq_specifier pair for an irq controller 12 * of_irq - container for device_node/irq_specifier pair for an irq controller
23 * @controller: pointer to interrupt controller device tree node 13 * @controller: pointer to interrupt controller device tree node
@@ -71,11 +61,17 @@ extern int of_irq_to_resource(struct device_node *dev, int index,
71extern int of_irq_count(struct device_node *dev); 61extern int of_irq_count(struct device_node *dev);
72extern int of_irq_to_resource_table(struct device_node *dev, 62extern int of_irq_to_resource_table(struct device_node *dev,
73 struct resource *res, int nr_irqs); 63 struct resource *res, int nr_irqs);
74extern struct device_node *of_irq_find_parent(struct device_node *child);
75 64
76extern void of_irq_init(const struct of_device_id *matches); 65extern void of_irq_init(const struct of_device_id *matches);
77 66
78#endif /* CONFIG_OF_IRQ */ 67#if defined(CONFIG_OF)
68/*
69 * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
70 * implements it differently. However, the prototype is the same for all,
71 * so declare it here regardless of the CONFIG_OF_IRQ setting.
72 */
73extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
74extern struct device_node *of_irq_find_parent(struct device_node *child);
79 75
80#else /* !CONFIG_OF */ 76#else /* !CONFIG_OF */
81static inline unsigned int irq_of_parse_and_map(struct device_node *dev, 77static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
diff --git a/include/linux/smp.h b/include/linux/smp.h
index cfb7ca094b38..731f5237d5f4 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -155,6 +155,12 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
155 155
156static inline void kick_all_cpus_sync(void) { } 156static inline void kick_all_cpus_sync(void) { }
157 157
158static inline void __smp_call_function_single(int cpuid,
159 struct call_single_data *data, int wait)
160{
161 on_each_cpu(data->func, data->info, wait);
162}
163
158#endif /* !SMP */ 164#endif /* !SMP */
159 165
160/* 166/*
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index fa8b3adf9ffb..46d41e8b0dcc 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -1007,4 +1007,6 @@ struct drm_radeon_info {
1007#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3 1007#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3
1008#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2 1008#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2
1009 1009
1010#define CIK_TILE_MODE_DEPTH_STENCIL_1D 5
1011
1010#endif 1012#endif
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 40a1fb807396..009a655a5d35 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -380,10 +380,13 @@ struct perf_event_mmap_page {
380 union { 380 union {
381 __u64 capabilities; 381 __u64 capabilities;
382 struct { 382 struct {
383 __u64 cap_usr_time : 1, 383 __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
384 cap_usr_rdpmc : 1, 384 cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
385 cap_usr_time_zero : 1, 385
386 cap_____res : 61; 386 cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
387 cap_user_time : 1, /* The time_* fields are used */
388 cap_user_time_zero : 1, /* The time_zero field is used */
389 cap_____res : 59;
387 }; 390 };
388 }; 391 };
389 392
@@ -442,12 +445,13 @@ struct perf_event_mmap_page {
442 * ((rem * time_mult) >> time_shift); 445 * ((rem * time_mult) >> time_shift);
443 */ 446 */
444 __u64 time_zero; 447 __u64 time_zero;
448 __u32 size; /* Header size up to __reserved[] fields. */
445 449
446 /* 450 /*
447 * Hole for extension of the self monitor capabilities 451 * Hole for extension of the self monitor capabilities
448 */ 452 */
449 453
450 __u64 __reserved[119]; /* align to 1k */ 454 __u8 __reserved[118*8+4]; /* align to 1k. */
451 455
452 /* 456 /*
453 * Control data for the mmap() data buffer. 457 * Control data for the mmap() data buffer.
@@ -528,6 +532,7 @@ enum perf_event_type {
528 * u64 len; 532 * u64 len;
529 * u64 pgoff; 533 * u64 pgoff;
530 * char filename[]; 534 * char filename[];
535 * struct sample_id sample_id;
531 * }; 536 * };
532 */ 537 */
533 PERF_RECORD_MMAP = 1, 538 PERF_RECORD_MMAP = 1,
diff --git a/ipc/msg.c b/ipc/msg.c
index b0d541d42677..9e4310c546ae 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -165,6 +165,15 @@ static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
165 ipc_rmid(&msg_ids(ns), &s->q_perm); 165 ipc_rmid(&msg_ids(ns), &s->q_perm);
166} 166}
167 167
168static void msg_rcu_free(struct rcu_head *head)
169{
170 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
171 struct msg_queue *msq = ipc_rcu_to_struct(p);
172
173 security_msg_queue_free(msq);
174 ipc_rcu_free(head);
175}
176
168/** 177/**
169 * newque - Create a new msg queue 178 * newque - Create a new msg queue
170 * @ns: namespace 179 * @ns: namespace
@@ -189,15 +198,14 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
189 msq->q_perm.security = NULL; 198 msq->q_perm.security = NULL;
190 retval = security_msg_queue_alloc(msq); 199 retval = security_msg_queue_alloc(msq);
191 if (retval) { 200 if (retval) {
192 ipc_rcu_putref(msq); 201 ipc_rcu_putref(msq, ipc_rcu_free);
193 return retval; 202 return retval;
194 } 203 }
195 204
196 /* ipc_addid() locks msq upon success. */ 205 /* ipc_addid() locks msq upon success. */
197 id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); 206 id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
198 if (id < 0) { 207 if (id < 0) {
199 security_msg_queue_free(msq); 208 ipc_rcu_putref(msq, msg_rcu_free);
200 ipc_rcu_putref(msq);
201 return id; 209 return id;
202 } 210 }
203 211
@@ -276,8 +284,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
276 free_msg(msg); 284 free_msg(msg);
277 } 285 }
278 atomic_sub(msq->q_cbytes, &ns->msg_bytes); 286 atomic_sub(msq->q_cbytes, &ns->msg_bytes);
279 security_msg_queue_free(msq); 287 ipc_rcu_putref(msq, msg_rcu_free);
280 ipc_rcu_putref(msq);
281} 288}
282 289
283/* 290/*
@@ -717,7 +724,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
717 rcu_read_lock(); 724 rcu_read_lock();
718 ipc_lock_object(&msq->q_perm); 725 ipc_lock_object(&msq->q_perm);
719 726
720 ipc_rcu_putref(msq); 727 ipc_rcu_putref(msq, ipc_rcu_free);
721 if (msq->q_perm.deleted) { 728 if (msq->q_perm.deleted) {
722 err = -EIDRM; 729 err = -EIDRM;
723 goto out_unlock0; 730 goto out_unlock0;
diff --git a/ipc/sem.c b/ipc/sem.c
index 69b6a21f3844..19c8b980d1fe 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -243,6 +243,15 @@ static void merge_queues(struct sem_array *sma)
243 } 243 }
244} 244}
245 245
246static void sem_rcu_free(struct rcu_head *head)
247{
248 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
249 struct sem_array *sma = ipc_rcu_to_struct(p);
250
251 security_sem_free(sma);
252 ipc_rcu_free(head);
253}
254
246/* 255/*
247 * If the request contains only one semaphore operation, and there are 256 * If the request contains only one semaphore operation, and there are
248 * no complex transactions pending, lock only the semaphore involved. 257 * no complex transactions pending, lock only the semaphore involved.
@@ -374,12 +383,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
374static inline void sem_lock_and_putref(struct sem_array *sma) 383static inline void sem_lock_and_putref(struct sem_array *sma)
375{ 384{
376 sem_lock(sma, NULL, -1); 385 sem_lock(sma, NULL, -1);
377 ipc_rcu_putref(sma); 386 ipc_rcu_putref(sma, ipc_rcu_free);
378}
379
380static inline void sem_putref(struct sem_array *sma)
381{
382 ipc_rcu_putref(sma);
383} 387}
384 388
385static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) 389static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -458,14 +462,13 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
458 sma->sem_perm.security = NULL; 462 sma->sem_perm.security = NULL;
459 retval = security_sem_alloc(sma); 463 retval = security_sem_alloc(sma);
460 if (retval) { 464 if (retval) {
461 ipc_rcu_putref(sma); 465 ipc_rcu_putref(sma, ipc_rcu_free);
462 return retval; 466 return retval;
463 } 467 }
464 468
465 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); 469 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
466 if (id < 0) { 470 if (id < 0) {
467 security_sem_free(sma); 471 ipc_rcu_putref(sma, sem_rcu_free);
468 ipc_rcu_putref(sma);
469 return id; 472 return id;
470 } 473 }
471 ns->used_sems += nsems; 474 ns->used_sems += nsems;
@@ -1047,8 +1050,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1047 1050
1048 wake_up_sem_queue_do(&tasks); 1051 wake_up_sem_queue_do(&tasks);
1049 ns->used_sems -= sma->sem_nsems; 1052 ns->used_sems -= sma->sem_nsems;
1050 security_sem_free(sma); 1053 ipc_rcu_putref(sma, sem_rcu_free);
1051 ipc_rcu_putref(sma);
1052} 1054}
1053 1055
1054static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) 1056static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
@@ -1292,7 +1294,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1292 rcu_read_unlock(); 1294 rcu_read_unlock();
1293 sem_io = ipc_alloc(sizeof(ushort)*nsems); 1295 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1294 if(sem_io == NULL) { 1296 if(sem_io == NULL) {
1295 sem_putref(sma); 1297 ipc_rcu_putref(sma, ipc_rcu_free);
1296 return -ENOMEM; 1298 return -ENOMEM;
1297 } 1299 }
1298 1300
@@ -1328,20 +1330,20 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1328 if(nsems > SEMMSL_FAST) { 1330 if(nsems > SEMMSL_FAST) {
1329 sem_io = ipc_alloc(sizeof(ushort)*nsems); 1331 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1330 if(sem_io == NULL) { 1332 if(sem_io == NULL) {
1331 sem_putref(sma); 1333 ipc_rcu_putref(sma, ipc_rcu_free);
1332 return -ENOMEM; 1334 return -ENOMEM;
1333 } 1335 }
1334 } 1336 }
1335 1337
1336 if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) { 1338 if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) {
1337 sem_putref(sma); 1339 ipc_rcu_putref(sma, ipc_rcu_free);
1338 err = -EFAULT; 1340 err = -EFAULT;
1339 goto out_free; 1341 goto out_free;
1340 } 1342 }
1341 1343
1342 for (i = 0; i < nsems; i++) { 1344 for (i = 0; i < nsems; i++) {
1343 if (sem_io[i] > SEMVMX) { 1345 if (sem_io[i] > SEMVMX) {
1344 sem_putref(sma); 1346 ipc_rcu_putref(sma, ipc_rcu_free);
1345 err = -ERANGE; 1347 err = -ERANGE;
1346 goto out_free; 1348 goto out_free;
1347 } 1349 }
@@ -1629,7 +1631,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1629 /* step 2: allocate new undo structure */ 1631 /* step 2: allocate new undo structure */
1630 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); 1632 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1631 if (!new) { 1633 if (!new) {
1632 sem_putref(sma); 1634 ipc_rcu_putref(sma, ipc_rcu_free);
1633 return ERR_PTR(-ENOMEM); 1635 return ERR_PTR(-ENOMEM);
1634 } 1636 }
1635 1637
diff --git a/ipc/shm.c b/ipc/shm.c
index 2821cdf93adb..d69739610fd4 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -167,6 +167,15 @@ static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
167 ipc_lock_object(&ipcp->shm_perm); 167 ipc_lock_object(&ipcp->shm_perm);
168} 168}
169 169
170static void shm_rcu_free(struct rcu_head *head)
171{
172 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
173 struct shmid_kernel *shp = ipc_rcu_to_struct(p);
174
175 security_shm_free(shp);
176 ipc_rcu_free(head);
177}
178
170static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) 179static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
171{ 180{
172 ipc_rmid(&shm_ids(ns), &s->shm_perm); 181 ipc_rmid(&shm_ids(ns), &s->shm_perm);
@@ -208,8 +217,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
208 user_shm_unlock(file_inode(shp->shm_file)->i_size, 217 user_shm_unlock(file_inode(shp->shm_file)->i_size,
209 shp->mlock_user); 218 shp->mlock_user);
210 fput (shp->shm_file); 219 fput (shp->shm_file);
211 security_shm_free(shp); 220 ipc_rcu_putref(shp, shm_rcu_free);
212 ipc_rcu_putref(shp);
213} 221}
214 222
215/* 223/*
@@ -497,7 +505,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
497 shp->shm_perm.security = NULL; 505 shp->shm_perm.security = NULL;
498 error = security_shm_alloc(shp); 506 error = security_shm_alloc(shp);
499 if (error) { 507 if (error) {
500 ipc_rcu_putref(shp); 508 ipc_rcu_putref(shp, ipc_rcu_free);
501 return error; 509 return error;
502 } 510 }
503 511
@@ -566,8 +574,7 @@ no_id:
566 user_shm_unlock(size, shp->mlock_user); 574 user_shm_unlock(size, shp->mlock_user);
567 fput(file); 575 fput(file);
568no_file: 576no_file:
569 security_shm_free(shp); 577 ipc_rcu_putref(shp, shm_rcu_free);
570 ipc_rcu_putref(shp);
571 return error; 578 return error;
572} 579}
573 580
diff --git a/ipc/util.c b/ipc/util.c
index e829da9ed01f..fdb8ae740775 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -474,11 +474,6 @@ void ipc_free(void* ptr, int size)
474 kfree(ptr); 474 kfree(ptr);
475} 475}
476 476
477struct ipc_rcu {
478 struct rcu_head rcu;
479 atomic_t refcount;
480} ____cacheline_aligned_in_smp;
481
482/** 477/**
483 * ipc_rcu_alloc - allocate ipc and rcu space 478 * ipc_rcu_alloc - allocate ipc and rcu space
484 * @size: size desired 479 * @size: size desired
@@ -505,27 +500,24 @@ int ipc_rcu_getref(void *ptr)
505 return atomic_inc_not_zero(&p->refcount); 500 return atomic_inc_not_zero(&p->refcount);
506} 501}
507 502
508/** 503void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head))
509 * ipc_schedule_free - free ipc + rcu space
510 * @head: RCU callback structure for queued work
511 */
512static void ipc_schedule_free(struct rcu_head *head)
513{
514 vfree(container_of(head, struct ipc_rcu, rcu));
515}
516
517void ipc_rcu_putref(void *ptr)
518{ 504{
519 struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1; 505 struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1;
520 506
521 if (!atomic_dec_and_test(&p->refcount)) 507 if (!atomic_dec_and_test(&p->refcount))
522 return; 508 return;
523 509
524 if (is_vmalloc_addr(ptr)) { 510 call_rcu(&p->rcu, func);
525 call_rcu(&p->rcu, ipc_schedule_free); 511}
526 } else { 512
527 kfree_rcu(p, rcu); 513void ipc_rcu_free(struct rcu_head *head)
528 } 514{
515 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
516
517 if (is_vmalloc_addr(p))
518 vfree(p);
519 else
520 kfree(p);
529} 521}
530 522
531/** 523/**
diff --git a/ipc/util.h b/ipc/util.h
index c5f3338ba1fa..f2f5036f2eed 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -47,6 +47,13 @@ static inline void msg_exit_ns(struct ipc_namespace *ns) { }
47static inline void shm_exit_ns(struct ipc_namespace *ns) { } 47static inline void shm_exit_ns(struct ipc_namespace *ns) { }
48#endif 48#endif
49 49
50struct ipc_rcu {
51 struct rcu_head rcu;
52 atomic_t refcount;
53} ____cacheline_aligned_in_smp;
54
55#define ipc_rcu_to_struct(p) ((void *)(p+1))
56
50/* 57/*
51 * Structure that holds the parameters needed by the ipc operations 58 * Structure that holds the parameters needed by the ipc operations
52 * (see after) 59 * (see after)
@@ -120,7 +127,8 @@ void ipc_free(void* ptr, int size);
120 */ 127 */
121void* ipc_rcu_alloc(int size); 128void* ipc_rcu_alloc(int size);
122int ipc_rcu_getref(void *ptr); 129int ipc_rcu_getref(void *ptr);
123void ipc_rcu_putref(void *ptr); 130void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head));
131void ipc_rcu_free(struct rcu_head *head);
124 132
125struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int); 133struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
126struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id); 134struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id);
diff --git a/kernel/audit.c b/kernel/audit.c
index 91e53d04b6a9..7b0e23a740ce 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1117,9 +1117,10 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
1117 1117
1118 sleep_time = timeout_start + audit_backlog_wait_time - 1118 sleep_time = timeout_start + audit_backlog_wait_time -
1119 jiffies; 1119 jiffies;
1120 if ((long)sleep_time > 0) 1120 if ((long)sleep_time > 0) {
1121 wait_for_auditd(sleep_time); 1121 wait_for_auditd(sleep_time);
1122 continue; 1122 continue;
1123 }
1123 } 1124 }
1124 if (audit_rate_check() && printk_ratelimit()) 1125 if (audit_rate_check() && printk_ratelimit())
1125 printk(KERN_WARNING 1126 printk(KERN_WARNING
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 247091bf0587..859c8dfd78a1 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -51,6 +51,15 @@ void context_tracking_user_enter(void)
51 unsigned long flags; 51 unsigned long flags;
52 52
53 /* 53 /*
54 * Repeat the user_enter() check here because some archs may be calling
55 * this from asm and if no CPU needs context tracking, they shouldn't
56 * go further. Repeat the check here until they support the static key
57 * check.
58 */
59 if (!static_key_false(&context_tracking_enabled))
60 return;
61
62 /*
54 * Some contexts may involve an exception occuring in an irq, 63 * Some contexts may involve an exception occuring in an irq,
55 * leading to that nesting: 64 * leading to that nesting:
56 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() 65 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
@@ -151,6 +160,9 @@ void context_tracking_user_exit(void)
151{ 160{
152 unsigned long flags; 161 unsigned long flags;
153 162
163 if (!static_key_false(&context_tracking_enabled))
164 return;
165
154 if (in_interrupt()) 166 if (in_interrupt())
155 return; 167 return;
156 168
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 1a825a486a25..4ccb29bb761e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3660,6 +3660,26 @@ static void calc_timer_values(struct perf_event *event,
3660 *running = ctx_time - event->tstamp_running; 3660 *running = ctx_time - event->tstamp_running;
3661} 3661}
3662 3662
3663static void perf_event_init_userpage(struct perf_event *event)
3664{
3665 struct perf_event_mmap_page *userpg;
3666 struct ring_buffer *rb;
3667
3668 rcu_read_lock();
3669 rb = rcu_dereference(event->rb);
3670 if (!rb)
3671 goto unlock;
3672
3673 userpg = rb->user_page;
3674
3675 /* Allow new userspace to detect that bit 0 is deprecated */
3676 userpg->cap_bit0_is_deprecated = 1;
3677 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
3678
3679unlock:
3680 rcu_read_unlock();
3681}
3682
3663void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) 3683void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
3664{ 3684{
3665} 3685}
@@ -4044,6 +4064,7 @@ again:
4044 ring_buffer_attach(event, rb); 4064 ring_buffer_attach(event, rb);
4045 rcu_assign_pointer(event->rb, rb); 4065 rcu_assign_pointer(event->rb, rb);
4046 4066
4067 perf_event_init_userpage(event);
4047 perf_event_update_userpage(event); 4068 perf_event_update_userpage(event);
4048 4069
4049unlock: 4070unlock:
diff --git a/kernel/params.c b/kernel/params.c
index 81c4e78c8f4c..c00d5b502aa4 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -254,11 +254,11 @@ int parse_args(const char *doing,
254 254
255 255
256STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul); 256STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul);
257STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtoul); 257STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtol);
258STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul); 258STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul);
259STANDARD_PARAM_DEF(int, int, "%i", long, kstrtoul); 259STANDARD_PARAM_DEF(int, int, "%i", long, kstrtol);
260STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul); 260STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul);
261STANDARD_PARAM_DEF(long, long, "%li", long, kstrtoul); 261STANDARD_PARAM_DEF(long, long, "%li", long, kstrtol);
262STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul); 262STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul);
263 263
264int param_set_charp(const char *val, const struct kernel_param *kp) 264int param_set_charp(const char *val, const struct kernel_param *kp)
diff --git a/kernel/reboot.c b/kernel/reboot.c
index 269ed9384cc4..f813b3474646 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -32,7 +32,14 @@ EXPORT_SYMBOL(cad_pid);
32#endif 32#endif
33enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE; 33enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
34 34
35int reboot_default; 35/*
36 * This variable is used privately to keep track of whether or not
37 * reboot_type is still set to its default value (i.e., reboot= hasn't
38 * been set on the command line). This is needed so that we can
39 * suppress DMI scanning for reboot quirks. Without it, it's
40 * impossible to override a faulty reboot quirk without recompiling.
41 */
42int reboot_default = 1;
36int reboot_cpu; 43int reboot_cpu;
37enum reboot_type reboot_type = BOOT_ACPI; 44enum reboot_type reboot_type = BOOT_ACPI;
38int reboot_force; 45int reboot_force;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 11cd13667359..7c70201fbc61 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4242,7 +4242,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
4242 } 4242 }
4243 4243
4244 if (!se) { 4244 if (!se) {
4245 cfs_rq->h_load = rq->avg.load_avg_contrib; 4245 cfs_rq->h_load = cfs_rq->runnable_load_avg;
4246 cfs_rq->last_h_load_update = now; 4246 cfs_rq->last_h_load_update = now;
4247 } 4247 }
4248 4248
@@ -4823,8 +4823,8 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
4823 (busiest->load_per_task * SCHED_POWER_SCALE) / 4823 (busiest->load_per_task * SCHED_POWER_SCALE) /
4824 busiest->group_power; 4824 busiest->group_power;
4825 4825
4826 if (busiest->avg_load - local->avg_load + scaled_busy_load_per_task >= 4826 if (busiest->avg_load + scaled_busy_load_per_task >=
4827 (scaled_busy_load_per_task * imbn)) { 4827 local->avg_load + (scaled_busy_load_per_task * imbn)) {
4828 env->imbalance = busiest->load_per_task; 4828 env->imbalance = busiest->load_per_task;
4829 return; 4829 return;
4830 } 4830 }
@@ -4896,7 +4896,8 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
4896 * max load less than avg load(as we skip the groups at or below 4896 * max load less than avg load(as we skip the groups at or below
4897 * its cpu_power, while calculating max_load..) 4897 * its cpu_power, while calculating max_load..)
4898 */ 4898 */
4899 if (busiest->avg_load < sds->avg_load) { 4899 if (busiest->avg_load <= sds->avg_load ||
4900 local->avg_load >= sds->avg_load) {
4900 env->imbalance = 0; 4901 env->imbalance = 0;
4901 return fix_small_imbalance(env, sds); 4902 return fix_small_imbalance(env, sds);
4902 } 4903 }
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 51c4f34d258e..4431610f049a 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -486,7 +486,52 @@ static struct smp_hotplug_thread watchdog_threads = {
486 .unpark = watchdog_enable, 486 .unpark = watchdog_enable,
487}; 487};
488 488
489static int watchdog_enable_all_cpus(void) 489static void restart_watchdog_hrtimer(void *info)
490{
491 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
492 int ret;
493
494 /*
495 * No need to cancel and restart hrtimer if it is currently executing
496 * because it will reprogram itself with the new period now.
497 * We should never see it unqueued here because we are running per-cpu
498 * with interrupts disabled.
499 */
500 ret = hrtimer_try_to_cancel(hrtimer);
501 if (ret == 1)
502 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
503 HRTIMER_MODE_REL_PINNED);
504}
505
506static void update_timers(int cpu)
507{
508 struct call_single_data data = {.func = restart_watchdog_hrtimer};
509 /*
510 * Make sure that perf event counter will adopt to a new
511 * sampling period. Updating the sampling period directly would
512 * be much nicer but we do not have an API for that now so
513 * let's use a big hammer.
514 * Hrtimer will adopt the new period on the next tick but this
515 * might be late already so we have to restart the timer as well.
516 */
517 watchdog_nmi_disable(cpu);
518 __smp_call_function_single(cpu, &data, 1);
519 watchdog_nmi_enable(cpu);
520}
521
522static void update_timers_all_cpus(void)
523{
524 int cpu;
525
526 get_online_cpus();
527 preempt_disable();
528 for_each_online_cpu(cpu)
529 update_timers(cpu);
530 preempt_enable();
531 put_online_cpus();
532}
533
534static int watchdog_enable_all_cpus(bool sample_period_changed)
490{ 535{
491 int err = 0; 536 int err = 0;
492 537
@@ -496,6 +541,8 @@ static int watchdog_enable_all_cpus(void)
496 pr_err("Failed to create watchdog threads, disabled\n"); 541 pr_err("Failed to create watchdog threads, disabled\n");
497 else 542 else
498 watchdog_running = 1; 543 watchdog_running = 1;
544 } else if (sample_period_changed) {
545 update_timers_all_cpus();
499 } 546 }
500 547
501 return err; 548 return err;
@@ -520,13 +567,15 @@ int proc_dowatchdog(struct ctl_table *table, int write,
520 void __user *buffer, size_t *lenp, loff_t *ppos) 567 void __user *buffer, size_t *lenp, loff_t *ppos)
521{ 568{
522 int err, old_thresh, old_enabled; 569 int err, old_thresh, old_enabled;
570 static DEFINE_MUTEX(watchdog_proc_mutex);
523 571
572 mutex_lock(&watchdog_proc_mutex);
524 old_thresh = ACCESS_ONCE(watchdog_thresh); 573 old_thresh = ACCESS_ONCE(watchdog_thresh);
525 old_enabled = ACCESS_ONCE(watchdog_user_enabled); 574 old_enabled = ACCESS_ONCE(watchdog_user_enabled);
526 575
527 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 576 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
528 if (err || !write) 577 if (err || !write)
529 return err; 578 goto out;
530 579
531 set_sample_period(); 580 set_sample_period();
532 /* 581 /*
@@ -535,7 +584,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
535 * watchdog_*_all_cpus() function takes care of this. 584 * watchdog_*_all_cpus() function takes care of this.
536 */ 585 */
537 if (watchdog_user_enabled && watchdog_thresh) 586 if (watchdog_user_enabled && watchdog_thresh)
538 err = watchdog_enable_all_cpus(); 587 err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
539 else 588 else
540 watchdog_disable_all_cpus(); 589 watchdog_disable_all_cpus();
541 590
@@ -544,7 +593,8 @@ int proc_dowatchdog(struct ctl_table *table, int write,
544 watchdog_thresh = old_thresh; 593 watchdog_thresh = old_thresh;
545 watchdog_user_enabled = old_enabled; 594 watchdog_user_enabled = old_enabled;
546 } 595 }
547 596out:
597 mutex_unlock(&watchdog_proc_mutex);
548 return err; 598 return err;
549} 599}
550#endif /* CONFIG_SYSCTL */ 600#endif /* CONFIG_SYSCTL */
@@ -554,5 +604,5 @@ void __init lockup_detector_init(void)
554 set_sample_period(); 604 set_sample_period();
555 605
556 if (watchdog_user_enabled) 606 if (watchdog_user_enabled)
557 watchdog_enable_all_cpus(); 607 watchdog_enable_all_cpus(false);
558} 608}
diff --git a/lib/kobject.c b/lib/kobject.c
index a5a9b13b0648..151089788c21 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -1006,10 +1006,7 @@ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)
1006 1006
1007bool kobj_ns_current_may_mount(enum kobj_ns_type type) 1007bool kobj_ns_current_may_mount(enum kobj_ns_type type)
1008{ 1008{
1009 bool may_mount = false; 1009 bool may_mount = true;
1010
1011 if (type == KOBJ_NS_TYPE_NONE)
1012 return true;
1013 1010
1014 spin_lock(&kobj_ns_type_lock); 1011 spin_lock(&kobj_ns_type_lock);
1015 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) && 1012 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
diff --git a/lib/lockref.c b/lib/lockref.c
index 677d036cf3c7..6f9d434c1521 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -4,6 +4,22 @@
4#ifdef CONFIG_CMPXCHG_LOCKREF 4#ifdef CONFIG_CMPXCHG_LOCKREF
5 5
6/* 6/*
7 * Allow weakly-ordered memory architectures to provide barrier-less
8 * cmpxchg semantics for lockref updates.
9 */
10#ifndef cmpxchg64_relaxed
11# define cmpxchg64_relaxed cmpxchg64
12#endif
13
14/*
15 * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP.
16 * This is useful for architectures with an expensive cpu_relax().
17 */
18#ifndef arch_mutex_cpu_relax
19# define arch_mutex_cpu_relax() cpu_relax()
20#endif
21
22/*
7 * Note that the "cmpxchg()" reloads the "old" value for the 23 * Note that the "cmpxchg()" reloads the "old" value for the
8 * failure case. 24 * failure case.
9 */ 25 */
@@ -14,12 +30,13 @@
14 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ 30 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
15 struct lockref new = old, prev = old; \ 31 struct lockref new = old, prev = old; \
16 CODE \ 32 CODE \
17 old.lock_count = cmpxchg64(&lockref->lock_count, \ 33 old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
18 old.lock_count, new.lock_count); \ 34 old.lock_count, \
35 new.lock_count); \
19 if (likely(old.lock_count == prev.lock_count)) { \ 36 if (likely(old.lock_count == prev.lock_count)) { \
20 SUCCESS; \ 37 SUCCESS; \
21 } \ 38 } \
22 cpu_relax(); \ 39 arch_mutex_cpu_relax(); \
23 } \ 40 } \
24} while (0) 41} while (0)
25 42
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d5ff3ce13029..1c52ddbc839b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -39,6 +39,7 @@
39#include <linux/limits.h> 39#include <linux/limits.h>
40#include <linux/export.h> 40#include <linux/export.h>
41#include <linux/mutex.h> 41#include <linux/mutex.h>
42#include <linux/rbtree.h>
42#include <linux/slab.h> 43#include <linux/slab.h>
43#include <linux/swap.h> 44#include <linux/swap.h>
44#include <linux/swapops.h> 45#include <linux/swapops.h>
@@ -160,6 +161,10 @@ struct mem_cgroup_per_zone {
160 161
161 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1]; 162 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
162 163
164 struct rb_node tree_node; /* RB tree node */
165 unsigned long long usage_in_excess;/* Set to the value by which */
166 /* the soft limit is exceeded*/
167 bool on_tree;
163 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 168 struct mem_cgroup *memcg; /* Back pointer, we cannot */
164 /* use container_of */ 169 /* use container_of */
165}; 170};
@@ -168,6 +173,26 @@ struct mem_cgroup_per_node {
168 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; 173 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
169}; 174};
170 175
176/*
177 * Cgroups above their limits are maintained in a RB-Tree, independent of
178 * their hierarchy representation
179 */
180
181struct mem_cgroup_tree_per_zone {
182 struct rb_root rb_root;
183 spinlock_t lock;
184};
185
186struct mem_cgroup_tree_per_node {
187 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
188};
189
190struct mem_cgroup_tree {
191 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
192};
193
194static struct mem_cgroup_tree soft_limit_tree __read_mostly;
195
171struct mem_cgroup_threshold { 196struct mem_cgroup_threshold {
172 struct eventfd_ctx *eventfd; 197 struct eventfd_ctx *eventfd;
173 u64 threshold; 198 u64 threshold;
@@ -303,22 +328,6 @@ struct mem_cgroup {
303 atomic_t numainfo_events; 328 atomic_t numainfo_events;
304 atomic_t numainfo_updating; 329 atomic_t numainfo_updating;
305#endif 330#endif
306 /*
307 * Protects soft_contributed transitions.
308 * See mem_cgroup_update_soft_limit
309 */
310 spinlock_t soft_lock;
311
312 /*
313 * If true then this group has increased parents' children_in_excess
314 * when it got over the soft limit.
315 * When a group falls bellow the soft limit, parents' children_in_excess
316 * is decreased and soft_contributed changed to false.
317 */
318 bool soft_contributed;
319
320 /* Number of children that are in soft limit excess */
321 atomic_t children_in_excess;
322 331
323 struct mem_cgroup_per_node *nodeinfo[0]; 332 struct mem_cgroup_per_node *nodeinfo[0];
324 /* WARNING: nodeinfo must be the last member here */ 333 /* WARNING: nodeinfo must be the last member here */
@@ -422,6 +431,7 @@ static bool move_file(void)
422 * limit reclaim to prevent infinite loops, if they ever occur. 431 * limit reclaim to prevent infinite loops, if they ever occur.
423 */ 432 */
424#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 433#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
434#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
425 435
426enum charge_type { 436enum charge_type {
427 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 437 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
@@ -648,6 +658,164 @@ page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
648 return mem_cgroup_zoneinfo(memcg, nid, zid); 658 return mem_cgroup_zoneinfo(memcg, nid, zid);
649} 659}
650 660
661static struct mem_cgroup_tree_per_zone *
662soft_limit_tree_node_zone(int nid, int zid)
663{
664 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
665}
666
667static struct mem_cgroup_tree_per_zone *
668soft_limit_tree_from_page(struct page *page)
669{
670 int nid = page_to_nid(page);
671 int zid = page_zonenum(page);
672
673 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
674}
675
676static void
677__mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
678 struct mem_cgroup_per_zone *mz,
679 struct mem_cgroup_tree_per_zone *mctz,
680 unsigned long long new_usage_in_excess)
681{
682 struct rb_node **p = &mctz->rb_root.rb_node;
683 struct rb_node *parent = NULL;
684 struct mem_cgroup_per_zone *mz_node;
685
686 if (mz->on_tree)
687 return;
688
689 mz->usage_in_excess = new_usage_in_excess;
690 if (!mz->usage_in_excess)
691 return;
692 while (*p) {
693 parent = *p;
694 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
695 tree_node);
696 if (mz->usage_in_excess < mz_node->usage_in_excess)
697 p = &(*p)->rb_left;
698 /*
699 * We can't avoid mem cgroups that are over their soft
700 * limit by the same amount
701 */
702 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
703 p = &(*p)->rb_right;
704 }
705 rb_link_node(&mz->tree_node, parent, p);
706 rb_insert_color(&mz->tree_node, &mctz->rb_root);
707 mz->on_tree = true;
708}
709
710static void
711__mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
712 struct mem_cgroup_per_zone *mz,
713 struct mem_cgroup_tree_per_zone *mctz)
714{
715 if (!mz->on_tree)
716 return;
717 rb_erase(&mz->tree_node, &mctz->rb_root);
718 mz->on_tree = false;
719}
720
721static void
722mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
723 struct mem_cgroup_per_zone *mz,
724 struct mem_cgroup_tree_per_zone *mctz)
725{
726 spin_lock(&mctz->lock);
727 __mem_cgroup_remove_exceeded(memcg, mz, mctz);
728 spin_unlock(&mctz->lock);
729}
730
731
732static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
733{
734 unsigned long long excess;
735 struct mem_cgroup_per_zone *mz;
736 struct mem_cgroup_tree_per_zone *mctz;
737 int nid = page_to_nid(page);
738 int zid = page_zonenum(page);
739 mctz = soft_limit_tree_from_page(page);
740
741 /*
742 * Necessary to update all ancestors when hierarchy is used.
743 * because their event counter is not touched.
744 */
745 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
746 mz = mem_cgroup_zoneinfo(memcg, nid, zid);
747 excess = res_counter_soft_limit_excess(&memcg->res);
748 /*
749 * We have to update the tree if mz is on RB-tree or
750 * mem is over its softlimit.
751 */
752 if (excess || mz->on_tree) {
753 spin_lock(&mctz->lock);
754 /* if on-tree, remove it */
755 if (mz->on_tree)
756 __mem_cgroup_remove_exceeded(memcg, mz, mctz);
757 /*
758 * Insert again. mz->usage_in_excess will be updated.
759 * If excess is 0, no tree ops.
760 */
761 __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
762 spin_unlock(&mctz->lock);
763 }
764 }
765}
766
767static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
768{
769 int node, zone;
770 struct mem_cgroup_per_zone *mz;
771 struct mem_cgroup_tree_per_zone *mctz;
772
773 for_each_node(node) {
774 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
775 mz = mem_cgroup_zoneinfo(memcg, node, zone);
776 mctz = soft_limit_tree_node_zone(node, zone);
777 mem_cgroup_remove_exceeded(memcg, mz, mctz);
778 }
779 }
780}
781
782static struct mem_cgroup_per_zone *
783__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
784{
785 struct rb_node *rightmost = NULL;
786 struct mem_cgroup_per_zone *mz;
787
788retry:
789 mz = NULL;
790 rightmost = rb_last(&mctz->rb_root);
791 if (!rightmost)
792 goto done; /* Nothing to reclaim from */
793
794 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
795 /*
796 * Remove the node now but someone else can add it back,
797 * we will to add it back at the end of reclaim to its correct
798 * position in the tree.
799 */
800 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
801 if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
802 !css_tryget(&mz->memcg->css))
803 goto retry;
804done:
805 return mz;
806}
807
808static struct mem_cgroup_per_zone *
809mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
810{
811 struct mem_cgroup_per_zone *mz;
812
813 spin_lock(&mctz->lock);
814 mz = __mem_cgroup_largest_soft_limit_node(mctz);
815 spin_unlock(&mctz->lock);
816 return mz;
817}
818
651/* 819/*
652 * Implementation Note: reading percpu statistics for memcg. 820 * Implementation Note: reading percpu statistics for memcg.
653 * 821 *
@@ -822,48 +990,6 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
822} 990}
823 991
824/* 992/*
825 * Called from rate-limited memcg_check_events when enough
826 * MEM_CGROUP_TARGET_SOFTLIMIT events are accumulated and it makes sure
827 * that all the parents up the hierarchy will be notified that this group
828 * is in excess or that it is not in excess anymore. mmecg->soft_contributed
829 * makes the transition a single action whenever the state flips from one to
830 * the other.
831 */
832static void mem_cgroup_update_soft_limit(struct mem_cgroup *memcg)
833{
834 unsigned long long excess = res_counter_soft_limit_excess(&memcg->res);
835 struct mem_cgroup *parent = memcg;
836 int delta = 0;
837
838 spin_lock(&memcg->soft_lock);
839 if (excess) {
840 if (!memcg->soft_contributed) {
841 delta = 1;
842 memcg->soft_contributed = true;
843 }
844 } else {
845 if (memcg->soft_contributed) {
846 delta = -1;
847 memcg->soft_contributed = false;
848 }
849 }
850
851 /*
852 * Necessary to update all ancestors when hierarchy is used
853 * because their event counter is not touched.
854 * We track children even outside the hierarchy for the root
855 * cgroup because tree walk starting at root should visit
856 * all cgroups and we want to prevent from pointless tree
857 * walk if no children is below the limit.
858 */
859 while (delta && (parent = parent_mem_cgroup(parent)))
860 atomic_add(delta, &parent->children_in_excess);
861 if (memcg != root_mem_cgroup && !root_mem_cgroup->use_hierarchy)
862 atomic_add(delta, &root_mem_cgroup->children_in_excess);
863 spin_unlock(&memcg->soft_lock);
864}
865
866/*
867 * Check events in order. 993 * Check events in order.
868 * 994 *
869 */ 995 */
@@ -886,7 +1012,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
886 1012
887 mem_cgroup_threshold(memcg); 1013 mem_cgroup_threshold(memcg);
888 if (unlikely(do_softlimit)) 1014 if (unlikely(do_softlimit))
889 mem_cgroup_update_soft_limit(memcg); 1015 mem_cgroup_update_tree(memcg, page);
890#if MAX_NUMNODES > 1 1016#if MAX_NUMNODES > 1
891 if (unlikely(do_numainfo)) 1017 if (unlikely(do_numainfo))
892 atomic_inc(&memcg->numainfo_events); 1018 atomic_inc(&memcg->numainfo_events);
@@ -929,15 +1055,6 @@ struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
929 return memcg; 1055 return memcg;
930} 1056}
931 1057
932static enum mem_cgroup_filter_t
933mem_cgroup_filter(struct mem_cgroup *memcg, struct mem_cgroup *root,
934 mem_cgroup_iter_filter cond)
935{
936 if (!cond)
937 return VISIT;
938 return cond(memcg, root);
939}
940
941/* 1058/*
942 * Returns a next (in a pre-order walk) alive memcg (with elevated css 1059 * Returns a next (in a pre-order walk) alive memcg (with elevated css
943 * ref. count) or NULL if the whole root's subtree has been visited. 1060 * ref. count) or NULL if the whole root's subtree has been visited.
@@ -945,7 +1062,7 @@ mem_cgroup_filter(struct mem_cgroup *memcg, struct mem_cgroup *root,
945 * helper function to be used by mem_cgroup_iter 1062 * helper function to be used by mem_cgroup_iter
946 */ 1063 */
947static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root, 1064static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
948 struct mem_cgroup *last_visited, mem_cgroup_iter_filter cond) 1065 struct mem_cgroup *last_visited)
949{ 1066{
950 struct cgroup_subsys_state *prev_css, *next_css; 1067 struct cgroup_subsys_state *prev_css, *next_css;
951 1068
@@ -963,31 +1080,11 @@ skip_node:
963 if (next_css) { 1080 if (next_css) {
964 struct mem_cgroup *mem = mem_cgroup_from_css(next_css); 1081 struct mem_cgroup *mem = mem_cgroup_from_css(next_css);
965 1082
966 switch (mem_cgroup_filter(mem, root, cond)) { 1083 if (css_tryget(&mem->css))
967 case SKIP: 1084 return mem;
1085 else {
968 prev_css = next_css; 1086 prev_css = next_css;
969 goto skip_node; 1087 goto skip_node;
970 case SKIP_TREE:
971 if (mem == root)
972 return NULL;
973 /*
974 * css_rightmost_descendant is not an optimal way to
975 * skip through a subtree (especially for imbalanced
976 * trees leaning to right) but that's what we have right
977 * now. More effective solution would be traversing
978 * right-up for first non-NULL without calling
979 * css_next_descendant_pre afterwards.
980 */
981 prev_css = css_rightmost_descendant(next_css);
982 goto skip_node;
983 case VISIT:
984 if (css_tryget(&mem->css))
985 return mem;
986 else {
987 prev_css = next_css;
988 goto skip_node;
989 }
990 break;
991 } 1088 }
992 } 1089 }
993 1090
@@ -1051,7 +1148,6 @@ static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
1051 * @root: hierarchy root 1148 * @root: hierarchy root
1052 * @prev: previously returned memcg, NULL on first invocation 1149 * @prev: previously returned memcg, NULL on first invocation
1053 * @reclaim: cookie for shared reclaim walks, NULL for full walks 1150 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1054 * @cond: filter for visited nodes, NULL for no filter
1055 * 1151 *
1056 * Returns references to children of the hierarchy below @root, or 1152 * Returns references to children of the hierarchy below @root, or
1057 * @root itself, or %NULL after a full round-trip. 1153 * @root itself, or %NULL after a full round-trip.
@@ -1064,18 +1160,15 @@ static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
1064 * divide up the memcgs in the hierarchy among all concurrent 1160 * divide up the memcgs in the hierarchy among all concurrent
1065 * reclaimers operating on the same zone and priority. 1161 * reclaimers operating on the same zone and priority.
1066 */ 1162 */
1067struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root, 1163struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1068 struct mem_cgroup *prev, 1164 struct mem_cgroup *prev,
1069 struct mem_cgroup_reclaim_cookie *reclaim, 1165 struct mem_cgroup_reclaim_cookie *reclaim)
1070 mem_cgroup_iter_filter cond)
1071{ 1166{
1072 struct mem_cgroup *memcg = NULL; 1167 struct mem_cgroup *memcg = NULL;
1073 struct mem_cgroup *last_visited = NULL; 1168 struct mem_cgroup *last_visited = NULL;
1074 1169
1075 if (mem_cgroup_disabled()) { 1170 if (mem_cgroup_disabled())
1076 /* first call must return non-NULL, second return NULL */ 1171 return NULL;
1077 return (struct mem_cgroup *)(unsigned long)!prev;
1078 }
1079 1172
1080 if (!root) 1173 if (!root)
1081 root = root_mem_cgroup; 1174 root = root_mem_cgroup;
@@ -1086,9 +1179,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
1086 if (!root->use_hierarchy && root != root_mem_cgroup) { 1179 if (!root->use_hierarchy && root != root_mem_cgroup) {
1087 if (prev) 1180 if (prev)
1088 goto out_css_put; 1181 goto out_css_put;
1089 if (mem_cgroup_filter(root, root, cond) == VISIT) 1182 return root;
1090 return root;
1091 return NULL;
1092 } 1183 }
1093 1184
1094 rcu_read_lock(); 1185 rcu_read_lock();
@@ -1111,7 +1202,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
1111 last_visited = mem_cgroup_iter_load(iter, root, &seq); 1202 last_visited = mem_cgroup_iter_load(iter, root, &seq);
1112 } 1203 }
1113 1204
1114 memcg = __mem_cgroup_iter_next(root, last_visited, cond); 1205 memcg = __mem_cgroup_iter_next(root, last_visited);
1115 1206
1116 if (reclaim) { 1207 if (reclaim) {
1117 mem_cgroup_iter_update(iter, last_visited, memcg, seq); 1208 mem_cgroup_iter_update(iter, last_visited, memcg, seq);
@@ -1122,11 +1213,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
1122 reclaim->generation = iter->generation; 1213 reclaim->generation = iter->generation;
1123 } 1214 }
1124 1215
1125 /* 1216 if (prev && !memcg)
1126 * We have finished the whole tree walk or no group has been
1127 * visited because filter told us to skip the root node.
1128 */
1129 if (!memcg && (prev || (cond && !last_visited)))
1130 goto out_unlock; 1217 goto out_unlock;
1131 } 1218 }
1132out_unlock: 1219out_unlock:
@@ -1767,7 +1854,6 @@ static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
1767 return total; 1854 return total;
1768} 1855}
1769 1856
1770#if MAX_NUMNODES > 1
1771/** 1857/**
1772 * test_mem_cgroup_node_reclaimable 1858 * test_mem_cgroup_node_reclaimable
1773 * @memcg: the target memcg 1859 * @memcg: the target memcg
@@ -1790,6 +1876,7 @@ static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1790 return false; 1876 return false;
1791 1877
1792} 1878}
1879#if MAX_NUMNODES > 1
1793 1880
1794/* 1881/*
1795 * Always updating the nodemask is not very good - even if we have an empty 1882 * Always updating the nodemask is not very good - even if we have an empty
@@ -1857,50 +1944,104 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1857 return node; 1944 return node;
1858} 1945}
1859 1946
1947/*
1948 * Check all nodes whether it contains reclaimable pages or not.
1949 * For quick scan, we make use of scan_nodes. This will allow us to skip
1950 * unused nodes. But scan_nodes is lazily updated and may not cotain
1951 * enough new information. We need to do double check.
1952 */
1953static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1954{
1955 int nid;
1956
1957 /*
1958 * quick check...making use of scan_node.
1959 * We can skip unused nodes.
1960 */
1961 if (!nodes_empty(memcg->scan_nodes)) {
1962 for (nid = first_node(memcg->scan_nodes);
1963 nid < MAX_NUMNODES;
1964 nid = next_node(nid, memcg->scan_nodes)) {
1965
1966 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1967 return true;
1968 }
1969 }
1970 /*
1971 * Check rest of nodes.
1972 */
1973 for_each_node_state(nid, N_MEMORY) {
1974 if (node_isset(nid, memcg->scan_nodes))
1975 continue;
1976 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1977 return true;
1978 }
1979 return false;
1980}
1981
1860#else 1982#else
1861int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1983int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1862{ 1984{
1863 return 0; 1985 return 0;
1864} 1986}
1865 1987
1866#endif 1988static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1867
1868/*
1869 * A group is eligible for the soft limit reclaim under the given root
1870 * hierarchy if
1871 * a) it is over its soft limit
1872 * b) any parent up the hierarchy is over its soft limit
1873 *
1874 * If the given group doesn't have any children over the limit then it
1875 * doesn't make any sense to iterate its subtree.
1876 */
1877enum mem_cgroup_filter_t
1878mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
1879 struct mem_cgroup *root)
1880{ 1989{
1881 struct mem_cgroup *parent; 1990 return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
1882 1991}
1883 if (!memcg) 1992#endif
1884 memcg = root_mem_cgroup;
1885 parent = memcg;
1886
1887 if (res_counter_soft_limit_excess(&memcg->res))
1888 return VISIT;
1889 1993
1890 /* 1994static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1891 * If any parent up to the root in the hierarchy is over its soft limit 1995 struct zone *zone,
1892 * then we have to obey and reclaim from this group as well. 1996 gfp_t gfp_mask,
1893 */ 1997 unsigned long *total_scanned)
1894 while ((parent = parent_mem_cgroup(parent))) { 1998{
1895 if (res_counter_soft_limit_excess(&parent->res)) 1999 struct mem_cgroup *victim = NULL;
1896 return VISIT; 2000 int total = 0;
1897 if (parent == root) 2001 int loop = 0;
2002 unsigned long excess;
2003 unsigned long nr_scanned;
2004 struct mem_cgroup_reclaim_cookie reclaim = {
2005 .zone = zone,
2006 .priority = 0,
2007 };
2008
2009 excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
2010
2011 while (1) {
2012 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
2013 if (!victim) {
2014 loop++;
2015 if (loop >= 2) {
2016 /*
2017 * If we have not been able to reclaim
2018 * anything, it might because there are
2019 * no reclaimable pages under this hierarchy
2020 */
2021 if (!total)
2022 break;
2023 /*
2024 * We want to do more targeted reclaim.
2025 * excess >> 2 is not to excessive so as to
2026 * reclaim too much, nor too less that we keep
2027 * coming back to reclaim from this cgroup
2028 */
2029 if (total >= (excess >> 2) ||
2030 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
2031 break;
2032 }
2033 continue;
2034 }
2035 if (!mem_cgroup_reclaimable(victim, false))
2036 continue;
2037 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
2038 zone, &nr_scanned);
2039 *total_scanned += nr_scanned;
2040 if (!res_counter_soft_limit_excess(&root_memcg->res))
1898 break; 2041 break;
1899 } 2042 }
1900 2043 mem_cgroup_iter_break(root_memcg, victim);
1901 if (!atomic_read(&memcg->children_in_excess)) 2044 return total;
1902 return SKIP_TREE;
1903 return SKIP;
1904} 2045}
1905 2046
1906static DEFINE_SPINLOCK(memcg_oom_lock); 2047static DEFINE_SPINLOCK(memcg_oom_lock);
@@ -2812,7 +2953,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2812 unlock_page_cgroup(pc); 2953 unlock_page_cgroup(pc);
2813 2954
2814 /* 2955 /*
2815 * "charge_statistics" updated event counter. 2956 * "charge_statistics" updated event counter. Then, check it.
2957 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2958 * if they exceeds softlimit.
2816 */ 2959 */
2817 memcg_check_events(memcg, page); 2960 memcg_check_events(memcg, page);
2818} 2961}
@@ -4647,6 +4790,98 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
4647 return ret; 4790 return ret;
4648} 4791}
4649 4792
4793unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
4794 gfp_t gfp_mask,
4795 unsigned long *total_scanned)
4796{
4797 unsigned long nr_reclaimed = 0;
4798 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
4799 unsigned long reclaimed;
4800 int loop = 0;
4801 struct mem_cgroup_tree_per_zone *mctz;
4802 unsigned long long excess;
4803 unsigned long nr_scanned;
4804
4805 if (order > 0)
4806 return 0;
4807
4808 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
4809 /*
4810 * This loop can run a while, specially if mem_cgroup's continuously
4811 * keep exceeding their soft limit and putting the system under
4812 * pressure
4813 */
4814 do {
4815 if (next_mz)
4816 mz = next_mz;
4817 else
4818 mz = mem_cgroup_largest_soft_limit_node(mctz);
4819 if (!mz)
4820 break;
4821
4822 nr_scanned = 0;
4823 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
4824 gfp_mask, &nr_scanned);
4825 nr_reclaimed += reclaimed;
4826 *total_scanned += nr_scanned;
4827 spin_lock(&mctz->lock);
4828
4829 /*
4830 * If we failed to reclaim anything from this memory cgroup
4831 * it is time to move on to the next cgroup
4832 */
4833 next_mz = NULL;
4834 if (!reclaimed) {
4835 do {
4836 /*
4837 * Loop until we find yet another one.
4838 *
4839 * By the time we get the soft_limit lock
4840 * again, someone might have aded the
4841 * group back on the RB tree. Iterate to
4842 * make sure we get a different mem.
4843 * mem_cgroup_largest_soft_limit_node returns
4844 * NULL if no other cgroup is present on
4845 * the tree
4846 */
4847 next_mz =
4848 __mem_cgroup_largest_soft_limit_node(mctz);
4849 if (next_mz == mz)
4850 css_put(&next_mz->memcg->css);
4851 else /* next_mz == NULL or other memcg */
4852 break;
4853 } while (1);
4854 }
4855 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
4856 excess = res_counter_soft_limit_excess(&mz->memcg->res);
4857 /*
4858 * One school of thought says that we should not add
4859 * back the node to the tree if reclaim returns 0.
4860 * But our reclaim could return 0, simply because due
4861 * to priority we are exposing a smaller subset of
4862 * memory to reclaim from. Consider this as a longer
4863 * term TODO.
4864 */
4865 /* If excess == 0, no tree ops */
4866 __mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
4867 spin_unlock(&mctz->lock);
4868 css_put(&mz->memcg->css);
4869 loop++;
4870 /*
4871 * Could not reclaim anything and there are no more
4872 * mem cgroups to try or we seem to be looping without
4873 * reclaiming anything.
4874 */
4875 if (!nr_reclaimed &&
4876 (next_mz == NULL ||
4877 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
4878 break;
4879 } while (!nr_reclaimed);
4880 if (next_mz)
4881 css_put(&next_mz->memcg->css);
4882 return nr_reclaimed;
4883}
4884
4650/** 4885/**
4651 * mem_cgroup_force_empty_list - clears LRU of a group 4886 * mem_cgroup_force_empty_list - clears LRU of a group
4652 * @memcg: group to clear 4887 * @memcg: group to clear
@@ -5911,6 +6146,8 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
5911 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 6146 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
5912 mz = &pn->zoneinfo[zone]; 6147 mz = &pn->zoneinfo[zone];
5913 lruvec_init(&mz->lruvec); 6148 lruvec_init(&mz->lruvec);
6149 mz->usage_in_excess = 0;
6150 mz->on_tree = false;
5914 mz->memcg = memcg; 6151 mz->memcg = memcg;
5915 } 6152 }
5916 memcg->nodeinfo[node] = pn; 6153 memcg->nodeinfo[node] = pn;
@@ -5966,6 +6203,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
5966 int node; 6203 int node;
5967 size_t size = memcg_size(); 6204 size_t size = memcg_size();
5968 6205
6206 mem_cgroup_remove_from_trees(memcg);
5969 free_css_id(&mem_cgroup_subsys, &memcg->css); 6207 free_css_id(&mem_cgroup_subsys, &memcg->css);
5970 6208
5971 for_each_node(node) 6209 for_each_node(node)
@@ -6002,6 +6240,29 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
6002} 6240}
6003EXPORT_SYMBOL(parent_mem_cgroup); 6241EXPORT_SYMBOL(parent_mem_cgroup);
6004 6242
6243static void __init mem_cgroup_soft_limit_tree_init(void)
6244{
6245 struct mem_cgroup_tree_per_node *rtpn;
6246 struct mem_cgroup_tree_per_zone *rtpz;
6247 int tmp, node, zone;
6248
6249 for_each_node(node) {
6250 tmp = node;
6251 if (!node_state(node, N_NORMAL_MEMORY))
6252 tmp = -1;
6253 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
6254 BUG_ON(!rtpn);
6255
6256 soft_limit_tree.rb_tree_per_node[node] = rtpn;
6257
6258 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
6259 rtpz = &rtpn->rb_tree_per_zone[zone];
6260 rtpz->rb_root = RB_ROOT;
6261 spin_lock_init(&rtpz->lock);
6262 }
6263 }
6264}
6265
6005static struct cgroup_subsys_state * __ref 6266static struct cgroup_subsys_state * __ref
6006mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 6267mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
6007{ 6268{
@@ -6031,7 +6292,6 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
6031 mutex_init(&memcg->thresholds_lock); 6292 mutex_init(&memcg->thresholds_lock);
6032 spin_lock_init(&memcg->move_lock); 6293 spin_lock_init(&memcg->move_lock);
6033 vmpressure_init(&memcg->vmpressure); 6294 vmpressure_init(&memcg->vmpressure);
6034 spin_lock_init(&memcg->soft_lock);
6035 6295
6036 return &memcg->css; 6296 return &memcg->css;
6037 6297
@@ -6109,13 +6369,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
6109 6369
6110 mem_cgroup_invalidate_reclaim_iterators(memcg); 6370 mem_cgroup_invalidate_reclaim_iterators(memcg);
6111 mem_cgroup_reparent_charges(memcg); 6371 mem_cgroup_reparent_charges(memcg);
6112 if (memcg->soft_contributed) {
6113 while ((memcg = parent_mem_cgroup(memcg)))
6114 atomic_dec(&memcg->children_in_excess);
6115
6116 if (memcg != root_mem_cgroup && !root_mem_cgroup->use_hierarchy)
6117 atomic_dec(&root_mem_cgroup->children_in_excess);
6118 }
6119 mem_cgroup_destroy_all_caches(memcg); 6372 mem_cgroup_destroy_all_caches(memcg);
6120 vmpressure_cleanup(&memcg->vmpressure); 6373 vmpressure_cleanup(&memcg->vmpressure);
6121} 6374}
@@ -6790,6 +7043,7 @@ static int __init mem_cgroup_init(void)
6790{ 7043{
6791 hotcpu_notifier(memcg_cpu_hotplug_callback, 0); 7044 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
6792 enable_swap_cgroup(); 7045 enable_swap_cgroup();
7046 mem_cgroup_soft_limit_tree_init();
6793 memcg_stock_init(); 7047 memcg_stock_init();
6794 return 0; 7048 return 0;
6795} 7049}
diff --git a/mm/mlock.c b/mm/mlock.c
index d63802663242..67ba6da7d0e3 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -736,6 +736,7 @@ static int do_mlockall(int flags)
736 736
737 /* Ignore errors */ 737 /* Ignore errors */
738 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); 738 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
739 cond_resched();
739 } 740 }
740out: 741out:
741 return 0; 742 return 0;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8ed1b775bdc9..beb35778c69f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -139,23 +139,11 @@ static bool global_reclaim(struct scan_control *sc)
139{ 139{
140 return !sc->target_mem_cgroup; 140 return !sc->target_mem_cgroup;
141} 141}
142
143static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
144{
145 struct mem_cgroup *root = sc->target_mem_cgroup;
146 return !mem_cgroup_disabled() &&
147 mem_cgroup_soft_reclaim_eligible(root, root) != SKIP_TREE;
148}
149#else 142#else
150static bool global_reclaim(struct scan_control *sc) 143static bool global_reclaim(struct scan_control *sc)
151{ 144{
152 return true; 145 return true;
153} 146}
154
155static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
156{
157 return false;
158}
159#endif 147#endif
160 148
161unsigned long zone_reclaimable_pages(struct zone *zone) 149unsigned long zone_reclaimable_pages(struct zone *zone)
@@ -2176,11 +2164,9 @@ static inline bool should_continue_reclaim(struct zone *zone,
2176 } 2164 }
2177} 2165}
2178 2166
2179static int 2167static void shrink_zone(struct zone *zone, struct scan_control *sc)
2180__shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
2181{ 2168{
2182 unsigned long nr_reclaimed, nr_scanned; 2169 unsigned long nr_reclaimed, nr_scanned;
2183 int groups_scanned = 0;
2184 2170
2185 do { 2171 do {
2186 struct mem_cgroup *root = sc->target_mem_cgroup; 2172 struct mem_cgroup *root = sc->target_mem_cgroup;
@@ -2188,17 +2174,15 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
2188 .zone = zone, 2174 .zone = zone,
2189 .priority = sc->priority, 2175 .priority = sc->priority,
2190 }; 2176 };
2191 struct mem_cgroup *memcg = NULL; 2177 struct mem_cgroup *memcg;
2192 mem_cgroup_iter_filter filter = (soft_reclaim) ?
2193 mem_cgroup_soft_reclaim_eligible : NULL;
2194 2178
2195 nr_reclaimed = sc->nr_reclaimed; 2179 nr_reclaimed = sc->nr_reclaimed;
2196 nr_scanned = sc->nr_scanned; 2180 nr_scanned = sc->nr_scanned;
2197 2181
2198 while ((memcg = mem_cgroup_iter_cond(root, memcg, &reclaim, filter))) { 2182 memcg = mem_cgroup_iter(root, NULL, &reclaim);
2183 do {
2199 struct lruvec *lruvec; 2184 struct lruvec *lruvec;
2200 2185
2201 groups_scanned++;
2202 lruvec = mem_cgroup_zone_lruvec(zone, memcg); 2186 lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2203 2187
2204 shrink_lruvec(lruvec, sc); 2188 shrink_lruvec(lruvec, sc);
@@ -2218,7 +2202,8 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
2218 mem_cgroup_iter_break(root, memcg); 2202 mem_cgroup_iter_break(root, memcg);
2219 break; 2203 break;
2220 } 2204 }
2221 } 2205 memcg = mem_cgroup_iter(root, memcg, &reclaim);
2206 } while (memcg);
2222 2207
2223 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, 2208 vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
2224 sc->nr_scanned - nr_scanned, 2209 sc->nr_scanned - nr_scanned,
@@ -2226,37 +2211,6 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
2226 2211
2227 } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed, 2212 } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
2228 sc->nr_scanned - nr_scanned, sc)); 2213 sc->nr_scanned - nr_scanned, sc));
2229
2230 return groups_scanned;
2231}
2232
2233
2234static void shrink_zone(struct zone *zone, struct scan_control *sc)
2235{
2236 bool do_soft_reclaim = mem_cgroup_should_soft_reclaim(sc);
2237 unsigned long nr_scanned = sc->nr_scanned;
2238 int scanned_groups;
2239
2240 scanned_groups = __shrink_zone(zone, sc, do_soft_reclaim);
2241 /*
2242 * memcg iterator might race with other reclaimer or start from
2243 * a incomplete tree walk so the tree walk in __shrink_zone
2244 * might have missed groups that are above the soft limit. Try
2245 * another loop to catch up with others. Do it just once to
2246 * prevent from reclaim latencies when other reclaimers always
2247 * preempt this one.
2248 */
2249 if (do_soft_reclaim && !scanned_groups)
2250 __shrink_zone(zone, sc, do_soft_reclaim);
2251
2252 /*
2253 * No group is over the soft limit or those that are do not have
2254 * pages in the zone we are reclaiming so we have to reclaim everybody
2255 */
2256 if (do_soft_reclaim && (sc->nr_scanned == nr_scanned)) {
2257 __shrink_zone(zone, sc, false);
2258 return;
2259 }
2260} 2214}
2261 2215
2262/* Returns true if compaction should go ahead for a high-order request */ 2216/* Returns true if compaction should go ahead for a high-order request */
@@ -2320,6 +2274,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2320{ 2274{
2321 struct zoneref *z; 2275 struct zoneref *z;
2322 struct zone *zone; 2276 struct zone *zone;
2277 unsigned long nr_soft_reclaimed;
2278 unsigned long nr_soft_scanned;
2323 bool aborted_reclaim = false; 2279 bool aborted_reclaim = false;
2324 2280
2325 /* 2281 /*
@@ -2359,6 +2315,18 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2359 continue; 2315 continue;
2360 } 2316 }
2361 } 2317 }
2318 /*
2319 * This steals pages from memory cgroups over softlimit
2320 * and returns the number of reclaimed pages and
2321 * scanned pages. This works for global memory pressure
2322 * and balancing, not for a memcg's limit.
2323 */
2324 nr_soft_scanned = 0;
2325 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2326 sc->order, sc->gfp_mask,
2327 &nr_soft_scanned);
2328 sc->nr_reclaimed += nr_soft_reclaimed;
2329 sc->nr_scanned += nr_soft_scanned;
2362 /* need some check for avoid more shrink_zone() */ 2330 /* need some check for avoid more shrink_zone() */
2363 } 2331 }
2364 2332
@@ -2952,6 +2920,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2952{ 2920{
2953 int i; 2921 int i;
2954 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 2922 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
2923 unsigned long nr_soft_reclaimed;
2924 unsigned long nr_soft_scanned;
2955 struct scan_control sc = { 2925 struct scan_control sc = {
2956 .gfp_mask = GFP_KERNEL, 2926 .gfp_mask = GFP_KERNEL,
2957 .priority = DEF_PRIORITY, 2927 .priority = DEF_PRIORITY,
@@ -3066,6 +3036,15 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
3066 3036
3067 sc.nr_scanned = 0; 3037 sc.nr_scanned = 0;
3068 3038
3039 nr_soft_scanned = 0;
3040 /*
3041 * Call soft limit reclaim before calling shrink_zone.
3042 */
3043 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
3044 order, sc.gfp_mask,
3045 &nr_soft_scanned);
3046 sc.nr_reclaimed += nr_soft_reclaimed;
3047
3069 /* 3048 /*
3070 * There should be no need to raise the scanning 3049 * There should be no need to raise the scanning
3071 * priority if enough pages are already being scanned 3050 * priority if enough pages are already being scanned
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 47016c304c84..66cad506b8a2 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3975,8 +3975,8 @@ sub string_find_replace {
3975# check for new externs in .h files. 3975# check for new externs in .h files.
3976 if ($realfile =~ /\.h$/ && 3976 if ($realfile =~ /\.h$/ &&
3977 $line =~ /^\+\s*(extern\s+)$Type\s*$Ident\s*\(/s) { 3977 $line =~ /^\+\s*(extern\s+)$Type\s*$Ident\s*\(/s) {
3978 if (WARN("AVOID_EXTERNS", 3978 if (CHK("AVOID_EXTERNS",
3979 "extern prototypes should be avoided in .h files\n" . $herecurr) && 3979 "extern prototypes should be avoided in .h files\n" . $herecurr) &&
3980 $fix) { 3980 $fix) {
3981 $fixed[$linenr - 1] =~ s/(.*)\bextern\b\s*(.*)/$1$2/; 3981 $fixed[$linenr - 1] =~ s/(.*)\bextern\b\s*(.*)/$1$2/;
3982 } 3982 }
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 98969541cbcc..bea523a5d852 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -139,6 +139,18 @@ static int snd_compr_open(struct inode *inode, struct file *f)
139static int snd_compr_free(struct inode *inode, struct file *f) 139static int snd_compr_free(struct inode *inode, struct file *f)
140{ 140{
141 struct snd_compr_file *data = f->private_data; 141 struct snd_compr_file *data = f->private_data;
142 struct snd_compr_runtime *runtime = data->stream.runtime;
143
144 switch (runtime->state) {
145 case SNDRV_PCM_STATE_RUNNING:
146 case SNDRV_PCM_STATE_DRAINING:
147 case SNDRV_PCM_STATE_PAUSED:
148 data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP);
149 break;
150 default:
151 break;
152 }
153
142 data->stream.ops->free(&data->stream); 154 data->stream.ops->free(&data->stream);
143 kfree(data->stream.runtime->buffer); 155 kfree(data->stream.runtime->buffer);
144 kfree(data->stream.runtime); 156 kfree(data->stream.runtime);
@@ -837,7 +849,8 @@ static int snd_compress_dev_disconnect(struct snd_device *device)
837 struct snd_compr *compr; 849 struct snd_compr *compr;
838 850
839 compr = device->device_data; 851 compr = device->device_data;
840 snd_unregister_device(compr->direction, compr->card, compr->device); 852 snd_unregister_device(SNDRV_DEVICE_TYPE_COMPRESS, compr->card,
853 compr->device);
841 return 0; 854 return 0;
842} 855}
843 856
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index b524f89a1f13..18d972501585 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -111,6 +111,9 @@ enum {
111/* 0x0009 - 0x0014 -> 12 test regs */ 111/* 0x0009 - 0x0014 -> 12 test regs */
112/* 0x0015 - visibility reg */ 112/* 0x0015 - visibility reg */
113 113
114/* Cirrus Logic CS4208 */
115#define CS4208_VENDOR_NID 0x24
116
114/* 117/*
115 * Cirrus Logic CS4210 118 * Cirrus Logic CS4210
116 * 119 *
@@ -223,6 +226,16 @@ static const struct hda_verb cs_coef_init_verbs[] = {
223 {} /* terminator */ 226 {} /* terminator */
224}; 227};
225 228
229static const struct hda_verb cs4208_coef_init_verbs[] = {
230 {0x01, AC_VERB_SET_POWER_STATE, 0x00}, /* AFG: D0 */
231 {0x24, AC_VERB_SET_PROC_STATE, 0x01}, /* VPW: processing on */
232 {0x24, AC_VERB_SET_COEF_INDEX, 0x0033},
233 {0x24, AC_VERB_SET_PROC_COEF, 0x0001}, /* A1 ICS */
234 {0x24, AC_VERB_SET_COEF_INDEX, 0x0034},
235 {0x24, AC_VERB_SET_PROC_COEF, 0x1C01}, /* A1 Enable, A Thresh = 300mV */
236 {} /* terminator */
237};
238
226/* Errata: CS4207 rev C0/C1/C2 Silicon 239/* Errata: CS4207 rev C0/C1/C2 Silicon
227 * 240 *
228 * http://www.cirrus.com/en/pubs/errata/ER880C3.pdf 241 * http://www.cirrus.com/en/pubs/errata/ER880C3.pdf
@@ -295,6 +308,8 @@ static int cs_init(struct hda_codec *codec)
295 /* init_verb sequence for C0/C1/C2 errata*/ 308 /* init_verb sequence for C0/C1/C2 errata*/
296 snd_hda_sequence_write(codec, cs_errata_init_verbs); 309 snd_hda_sequence_write(codec, cs_errata_init_verbs);
297 snd_hda_sequence_write(codec, cs_coef_init_verbs); 310 snd_hda_sequence_write(codec, cs_coef_init_verbs);
311 } else if (spec->vendor_nid == CS4208_VENDOR_NID) {
312 snd_hda_sequence_write(codec, cs4208_coef_init_verbs);
298 } 313 }
299 314
300 snd_hda_gen_init(codec); 315 snd_hda_gen_init(codec);
@@ -434,6 +449,29 @@ static const struct hda_pintbl mba42_pincfgs[] = {
434 {} /* terminator */ 449 {} /* terminator */
435}; 450};
436 451
452static const struct hda_pintbl mba6_pincfgs[] = {
453 { 0x10, 0x032120f0 }, /* HP */
454 { 0x11, 0x500000f0 },
455 { 0x12, 0x90100010 }, /* Speaker */
456 { 0x13, 0x500000f0 },
457 { 0x14, 0x500000f0 },
458 { 0x15, 0x770000f0 },
459 { 0x16, 0x770000f0 },
460 { 0x17, 0x430000f0 },
461 { 0x18, 0x43ab9030 }, /* Mic */
462 { 0x19, 0x770000f0 },
463 { 0x1a, 0x770000f0 },
464 { 0x1b, 0x770000f0 },
465 { 0x1c, 0x90a00090 },
466 { 0x1d, 0x500000f0 },
467 { 0x1e, 0x500000f0 },
468 { 0x1f, 0x500000f0 },
469 { 0x20, 0x500000f0 },
470 { 0x21, 0x430000f0 },
471 { 0x22, 0x430000f0 },
472 {} /* terminator */
473};
474
437static void cs420x_fixup_gpio_13(struct hda_codec *codec, 475static void cs420x_fixup_gpio_13(struct hda_codec *codec,
438 const struct hda_fixup *fix, int action) 476 const struct hda_fixup *fix, int action)
439{ 477{
@@ -556,22 +594,23 @@ static int patch_cs420x(struct hda_codec *codec)
556 594
557/* 595/*
558 * CS4208 support: 596 * CS4208 support:
559 * Its layout is no longer compatible with CS4206/CS4207, and the generic 597 * Its layout is no longer compatible with CS4206/CS4207
560 * parser seems working fairly well, except for trivial fixups.
561 */ 598 */
562enum { 599enum {
600 CS4208_MBA6,
563 CS4208_GPIO0, 601 CS4208_GPIO0,
564}; 602};
565 603
566static const struct hda_model_fixup cs4208_models[] = { 604static const struct hda_model_fixup cs4208_models[] = {
567 { .id = CS4208_GPIO0, .name = "gpio0" }, 605 { .id = CS4208_GPIO0, .name = "gpio0" },
606 { .id = CS4208_MBA6, .name = "mba6" },
568 {} 607 {}
569}; 608};
570 609
571static const struct snd_pci_quirk cs4208_fixup_tbl[] = { 610static const struct snd_pci_quirk cs4208_fixup_tbl[] = {
572 /* codec SSID */ 611 /* codec SSID */
573 SND_PCI_QUIRK(0x106b, 0x7100, "MacBookPro 6,1", CS4208_GPIO0), 612 SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
574 SND_PCI_QUIRK(0x106b, 0x7200, "MacBookPro 6,2", CS4208_GPIO0), 613 SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
575 {} /* terminator */ 614 {} /* terminator */
576}; 615};
577 616
@@ -588,18 +627,35 @@ static void cs4208_fixup_gpio0(struct hda_codec *codec,
588} 627}
589 628
590static const struct hda_fixup cs4208_fixups[] = { 629static const struct hda_fixup cs4208_fixups[] = {
630 [CS4208_MBA6] = {
631 .type = HDA_FIXUP_PINS,
632 .v.pins = mba6_pincfgs,
633 .chained = true,
634 .chain_id = CS4208_GPIO0,
635 },
591 [CS4208_GPIO0] = { 636 [CS4208_GPIO0] = {
592 .type = HDA_FIXUP_FUNC, 637 .type = HDA_FIXUP_FUNC,
593 .v.func = cs4208_fixup_gpio0, 638 .v.func = cs4208_fixup_gpio0,
594 }, 639 },
595}; 640};
596 641
642/* correct the 0dB offset of input pins */
643static void cs4208_fix_amp_caps(struct hda_codec *codec, hda_nid_t adc)
644{
645 unsigned int caps;
646
647 caps = query_amp_caps(codec, adc, HDA_INPUT);
648 caps &= ~(AC_AMPCAP_OFFSET);
649 caps |= 0x02;
650 snd_hda_override_amp_caps(codec, adc, HDA_INPUT, caps);
651}
652
597static int patch_cs4208(struct hda_codec *codec) 653static int patch_cs4208(struct hda_codec *codec)
598{ 654{
599 struct cs_spec *spec; 655 struct cs_spec *spec;
600 int err; 656 int err;
601 657
602 spec = cs_alloc_spec(codec, 0); /* no specific w/a */ 658 spec = cs_alloc_spec(codec, CS4208_VENDOR_NID);
603 if (!spec) 659 if (!spec)
604 return -ENOMEM; 660 return -ENOMEM;
605 661
@@ -609,6 +665,12 @@ static int patch_cs4208(struct hda_codec *codec)
609 cs4208_fixups); 665 cs4208_fixups);
610 snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE); 666 snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
611 667
668 snd_hda_override_wcaps(codec, 0x18,
669 get_wcaps(codec, 0x18) | AC_WCAP_STEREO);
670 cs4208_fix_amp_caps(codec, 0x18);
671 cs4208_fix_amp_caps(codec, 0x1b);
672 cs4208_fix_amp_caps(codec, 0x1c);
673
612 err = cs_parse_auto_config(codec); 674 err = cs_parse_auto_config(codec);
613 if (err < 0) 675 if (err < 0)
614 goto error; 676 goto error;
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 3d8cd04455a6..7ea0245fc6bd 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1149,32 +1149,43 @@ static int hdmi_choose_cvt(struct hda_codec *codec,
1149} 1149}
1150 1150
1151static void haswell_config_cvts(struct hda_codec *codec, 1151static void haswell_config_cvts(struct hda_codec *codec,
1152 int pin_id, int mux_id) 1152 hda_nid_t pin_nid, int mux_idx)
1153{ 1153{
1154 struct hdmi_spec *spec = codec->spec; 1154 struct hdmi_spec *spec = codec->spec;
1155 struct hdmi_spec_per_pin *per_pin; 1155 hda_nid_t nid, end_nid;
1156 int pin_idx, mux_idx; 1156 int cvt_idx, curr;
1157 int curr; 1157 struct hdmi_spec_per_cvt *per_cvt;
1158 int err;
1159 1158
1160 for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) { 1159 /* configure all pins, including "no physical connection" ones */
1161 per_pin = get_pin(spec, pin_idx); 1160 end_nid = codec->start_nid + codec->num_nodes;
1161 for (nid = codec->start_nid; nid < end_nid; nid++) {
1162 unsigned int wid_caps = get_wcaps(codec, nid);
1163 unsigned int wid_type = get_wcaps_type(wid_caps);
1162 1164
1163 if (pin_idx == pin_id) 1165 if (wid_type != AC_WID_PIN)
1164 continue; 1166 continue;
1165 1167
1166 curr = snd_hda_codec_read(codec, per_pin->pin_nid, 0, 1168 if (nid == pin_nid)
1169 continue;
1170
1171 curr = snd_hda_codec_read(codec, nid, 0,
1167 AC_VERB_GET_CONNECT_SEL, 0); 1172 AC_VERB_GET_CONNECT_SEL, 0);
1173 if (curr != mux_idx)
1174 continue;
1168 1175
1169 /* Choose another unused converter */ 1176 /* choose an unassigned converter. The conveters in the
1170 if (curr == mux_id) { 1177 * connection list are in the same order as in the codec.
1171 err = hdmi_choose_cvt(codec, pin_idx, NULL, &mux_idx); 1178 */
1172 if (err < 0) 1179 for (cvt_idx = 0; cvt_idx < spec->num_cvts; cvt_idx++) {
1173 return; 1180 per_cvt = get_cvt(spec, cvt_idx);
1174 snd_printdd("HDMI: choose converter %d for pin %d\n", mux_idx, pin_idx); 1181 if (!per_cvt->assigned) {
1175 snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0, 1182 snd_printdd("choose cvt %d for pin nid %d\n",
1183 cvt_idx, nid);
1184 snd_hda_codec_write_cache(codec, nid, 0,
1176 AC_VERB_SET_CONNECT_SEL, 1185 AC_VERB_SET_CONNECT_SEL,
1177 mux_idx); 1186 cvt_idx);
1187 break;
1188 }
1178 } 1189 }
1179 } 1190 }
1180} 1191}
@@ -1216,7 +1227,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
1216 1227
1217 /* configure unused pins to choose other converters */ 1228 /* configure unused pins to choose other converters */
1218 if (is_haswell(codec)) 1229 if (is_haswell(codec))
1219 haswell_config_cvts(codec, pin_idx, mux_idx); 1230 haswell_config_cvts(codec, per_pin->pin_nid, mux_idx);
1220 1231
1221 snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid); 1232 snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
1222 1233
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index bc07d369fac4..0e303b99a47c 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3439,6 +3439,9 @@ static void alc283_fixup_chromebook(struct hda_codec *codec,
3439 /* Set to manual mode */ 3439 /* Set to manual mode */
3440 val = alc_read_coef_idx(codec, 0x06); 3440 val = alc_read_coef_idx(codec, 0x06);
3441 alc_write_coef_idx(codec, 0x06, val & ~0x000c); 3441 alc_write_coef_idx(codec, 0x06, val & ~0x000c);
3442 /* Enable Line1 input control by verb */
3443 val = alc_read_coef_idx(codec, 0x1a);
3444 alc_write_coef_idx(codec, 0x1a, val | (1 << 4));
3442 break; 3445 break;
3443 } 3446 }
3444} 3447}
@@ -3531,6 +3534,7 @@ enum {
3531 ALC269VB_FIXUP_ORDISSIMO_EVE2, 3534 ALC269VB_FIXUP_ORDISSIMO_EVE2,
3532 ALC283_FIXUP_CHROME_BOOK, 3535 ALC283_FIXUP_CHROME_BOOK,
3533 ALC282_FIXUP_ASUS_TX300, 3536 ALC282_FIXUP_ASUS_TX300,
3537 ALC283_FIXUP_INT_MIC,
3534}; 3538};
3535 3539
3536static const struct hda_fixup alc269_fixups[] = { 3540static const struct hda_fixup alc269_fixups[] = {
@@ -3790,6 +3794,16 @@ static const struct hda_fixup alc269_fixups[] = {
3790 .type = HDA_FIXUP_FUNC, 3794 .type = HDA_FIXUP_FUNC,
3791 .v.func = alc282_fixup_asus_tx300, 3795 .v.func = alc282_fixup_asus_tx300,
3792 }, 3796 },
3797 [ALC283_FIXUP_INT_MIC] = {
3798 .type = HDA_FIXUP_VERBS,
3799 .v.verbs = (const struct hda_verb[]) {
3800 {0x20, AC_VERB_SET_COEF_INDEX, 0x1a},
3801 {0x20, AC_VERB_SET_PROC_COEF, 0x0011},
3802 { }
3803 },
3804 .chained = true,
3805 .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
3806 },
3793}; 3807};
3794 3808
3795static const struct snd_pci_quirk alc269_fixup_tbl[] = { 3809static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -3874,7 +3888,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3874 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 3888 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
3875 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 3889 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
3876 SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 3890 SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
3877 SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 3891 SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
3878 SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 3892 SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
3879 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 3893 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
3880 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), 3894 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
diff --git a/tools/lib/lk/debugfs.c b/tools/lib/lk/debugfs.c
index 099e7cd022e4..7c4347962353 100644
--- a/tools/lib/lk/debugfs.c
+++ b/tools/lib/lk/debugfs.c
@@ -5,7 +5,6 @@
5#include <stdbool.h> 5#include <stdbool.h>
6#include <sys/vfs.h> 6#include <sys/vfs.h>
7#include <sys/mount.h> 7#include <sys/mount.h>
8#include <linux/magic.h>
9#include <linux/kernel.h> 8#include <linux/kernel.h>
10 9
11#include "debugfs.h" 10#include "debugfs.h"
diff --git a/tools/perf/arch/x86/util/tsc.c b/tools/perf/arch/x86/util/tsc.c
index 9570c2b0f83c..b2519e49424f 100644
--- a/tools/perf/arch/x86/util/tsc.c
+++ b/tools/perf/arch/x86/util/tsc.c
@@ -32,7 +32,7 @@ u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc)
32int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc, 32int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
33 struct perf_tsc_conversion *tc) 33 struct perf_tsc_conversion *tc)
34{ 34{
35 bool cap_usr_time_zero; 35 bool cap_user_time_zero;
36 u32 seq; 36 u32 seq;
37 int i = 0; 37 int i = 0;
38 38
@@ -42,7 +42,7 @@ int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
42 tc->time_mult = pc->time_mult; 42 tc->time_mult = pc->time_mult;
43 tc->time_shift = pc->time_shift; 43 tc->time_shift = pc->time_shift;
44 tc->time_zero = pc->time_zero; 44 tc->time_zero = pc->time_zero;
45 cap_usr_time_zero = pc->cap_usr_time_zero; 45 cap_user_time_zero = pc->cap_user_time_zero;
46 rmb(); 46 rmb();
47 if (pc->lock == seq && !(seq & 1)) 47 if (pc->lock == seq && !(seq & 1))
48 break; 48 break;
@@ -52,7 +52,7 @@ int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
52 } 52 }
53 } 53 }
54 54
55 if (!cap_usr_time_zero) 55 if (!cap_user_time_zero)
56 return -EOPNOTSUPP; 56 return -EOPNOTSUPP;
57 57
58 return 0; 58 return 0;
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 423875c999b2..afe377b2884f 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -321,8 +321,6 @@ found:
321 return perf_event__repipe(tool, event_sw, &sample_sw, machine); 321 return perf_event__repipe(tool, event_sw, &sample_sw, machine);
322} 322}
323 323
324extern volatile int session_done;
325
326static void sig_handler(int sig __maybe_unused) 324static void sig_handler(int sig __maybe_unused)
327{ 325{
328 session_done = 1; 326 session_done = 1;
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index c2dff9cb1f2c..9b5f077fee5b 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -101,7 +101,7 @@ static int setup_cpunode_map(void)
101 101
102 dir1 = opendir(PATH_SYS_NODE); 102 dir1 = opendir(PATH_SYS_NODE);
103 if (!dir1) 103 if (!dir1)
104 return -1; 104 return 0;
105 105
106 while ((dent1 = readdir(dir1)) != NULL) { 106 while ((dent1 = readdir(dir1)) != NULL) {
107 if (dent1->d_type != DT_DIR || 107 if (dent1->d_type != DT_DIR ||
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 8e50d8d77419..72eae7498c09 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -401,8 +401,6 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
401 return 0; 401 return 0;
402} 402}
403 403
404extern volatile int session_done;
405
406static void sig_handler(int sig __maybe_unused) 404static void sig_handler(int sig __maybe_unused)
407{ 405{
408 session_done = 1; 406 session_done = 1;
@@ -568,6 +566,9 @@ static int __cmd_report(struct perf_report *rep)
568 } 566 }
569 } 567 }
570 568
569 if (session_done())
570 return 0;
571
571 if (nr_samples == 0) { 572 if (nr_samples == 0) {
572 ui__error("The %s file has no samples!\n", session->filename); 573 ui__error("The %s file has no samples!\n", session->filename);
573 return 0; 574 return 0;
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 7f31a3ded1b6..9c333ff3dfeb 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -553,8 +553,6 @@ static struct perf_tool perf_script = {
553 .ordering_requires_timestamps = true, 553 .ordering_requires_timestamps = true,
554}; 554};
555 555
556extern volatile int session_done;
557
558static void sig_handler(int sig __maybe_unused) 556static void sig_handler(int sig __maybe_unused)
559{ 557{
560 session_done = 1; 558 session_done = 1;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index f5aa6375e3e9..71aa3e35406b 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -16,6 +16,23 @@
16#include <sys/mman.h> 16#include <sys/mman.h>
17#include <linux/futex.h> 17#include <linux/futex.h>
18 18
19/* For older distros: */
20#ifndef MAP_STACK
21# define MAP_STACK 0x20000
22#endif
23
24#ifndef MADV_HWPOISON
25# define MADV_HWPOISON 100
26#endif
27
28#ifndef MADV_MERGEABLE
29# define MADV_MERGEABLE 12
30#endif
31
32#ifndef MADV_UNMERGEABLE
33# define MADV_UNMERGEABLE 13
34#endif
35
19static size_t syscall_arg__scnprintf_hex(char *bf, size_t size, 36static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
20 unsigned long arg, 37 unsigned long arg,
21 u8 arg_idx __maybe_unused, 38 u8 arg_idx __maybe_unused,
@@ -1038,6 +1055,7 @@ static int trace__replay(struct trace *trace)
1038 1055
1039 trace->tool.sample = trace__process_sample; 1056 trace->tool.sample = trace__process_sample;
1040 trace->tool.mmap = perf_event__process_mmap; 1057 trace->tool.mmap = perf_event__process_mmap;
1058 trace->tool.mmap2 = perf_event__process_mmap2;
1041 trace->tool.comm = perf_event__process_comm; 1059 trace->tool.comm = perf_event__process_comm;
1042 trace->tool.exit = perf_event__process_exit; 1060 trace->tool.exit = perf_event__process_exit;
1043 trace->tool.fork = perf_event__process_fork; 1061 trace->tool.fork = perf_event__process_fork;
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 214e17e97e5c..5f6f9b3271bb 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -87,7 +87,7 @@ CFLAGS += -Wall
87CFLAGS += -Wextra 87CFLAGS += -Wextra
88CFLAGS += -std=gnu99 88CFLAGS += -std=gnu99
89 89
90EXTLIBS = -lelf -lpthread -lrt -lm 90EXTLIBS = -lelf -lpthread -lrt -lm -ldl
91 91
92ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -fstack-protector-all,-fstack-protector-all),y) 92ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -fstack-protector-all,-fstack-protector-all),y)
93 CFLAGS += -fstack-protector-all 93 CFLAGS += -fstack-protector-all
@@ -180,6 +180,9 @@ FLAGS_LIBELF=$(CFLAGS) $(LDFLAGS) $(EXTLIBS)
180ifeq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_LIBELF),-DLIBELF_MMAP),y) 180ifeq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_LIBELF),-DLIBELF_MMAP),y)
181 CFLAGS += -DLIBELF_MMAP 181 CFLAGS += -DLIBELF_MMAP
182endif 182endif
183ifeq ($(call try-cc,$(SOURCE_ELF_GETPHDRNUM),$(FLAGS_LIBELF),-DHAVE_ELF_GETPHDRNUM),y)
184 CFLAGS += -DHAVE_ELF_GETPHDRNUM
185endif
183 186
184# include ARCH specific config 187# include ARCH specific config
185-include $(src-perf)/arch/$(ARCH)/Makefile 188-include $(src-perf)/arch/$(ARCH)/Makefile
diff --git a/tools/perf/config/feature-tests.mak b/tools/perf/config/feature-tests.mak
index 708fb8e9822a..d5a8dd44945f 100644
--- a/tools/perf/config/feature-tests.mak
+++ b/tools/perf/config/feature-tests.mak
@@ -61,6 +61,15 @@ int main(void)
61} 61}
62endef 62endef
63 63
64define SOURCE_ELF_GETPHDRNUM
65#include <libelf.h>
66int main(void)
67{
68 size_t dst;
69 return elf_getphdrnum(0, &dst);
70}
71endef
72
64ifndef NO_SLANG 73ifndef NO_SLANG
65define SOURCE_SLANG 74define SOURCE_SLANG
66#include <slang.h> 75#include <slang.h>
@@ -210,6 +219,7 @@ define SOURCE_LIBAUDIT
210 219
211int main(void) 220int main(void)
212{ 221{
222 printf(\"error message: %s\n\", audit_errno_to_name(0));
213 return audit_open(); 223 return audit_open();
214} 224}
215endef 225endef
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index bfc5a27597d6..7eae5488ecea 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -809,7 +809,7 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
809 end = map__rip_2objdump(map, sym->end); 809 end = map__rip_2objdump(map, sym->end);
810 810
811 offset = line_ip - start; 811 offset = line_ip - start;
812 if (offset < 0 || (u64)line_ip > end) 812 if ((u64)line_ip < start || (u64)line_ip > end)
813 offset = -1; 813 offset = -1;
814 else 814 else
815 parsed_line = tmp2 + 1; 815 parsed_line = tmp2 + 1;
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index 3e5f5430a28a..e23bde19d590 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -263,6 +263,21 @@ bool die_is_signed_type(Dwarf_Die *tp_die)
263} 263}
264 264
265/** 265/**
266 * die_is_func_def - Ensure that this DIE is a subprogram and definition
267 * @dw_die: a DIE
268 *
269 * Ensure that this DIE is a subprogram and NOT a declaration. This
270 * returns true if @dw_die is a function definition.
271 **/
272bool die_is_func_def(Dwarf_Die *dw_die)
273{
274 Dwarf_Attribute attr;
275
276 return (dwarf_tag(dw_die) == DW_TAG_subprogram &&
277 dwarf_attr(dw_die, DW_AT_declaration, &attr) == NULL);
278}
279
280/**
266 * die_get_data_member_location - Get the data-member offset 281 * die_get_data_member_location - Get the data-member offset
267 * @mb_die: a DIE of a member of a data structure 282 * @mb_die: a DIE of a member of a data structure
268 * @offs: The offset of the member in the data structure 283 * @offs: The offset of the member in the data structure
@@ -392,6 +407,10 @@ static int __die_search_func_cb(Dwarf_Die *fn_die, void *data)
392{ 407{
393 struct __addr_die_search_param *ad = data; 408 struct __addr_die_search_param *ad = data;
394 409
410 /*
411 * Since a declaration entry doesn't has given pc, this always returns
412 * function definition entry.
413 */
395 if (dwarf_tag(fn_die) == DW_TAG_subprogram && 414 if (dwarf_tag(fn_die) == DW_TAG_subprogram &&
396 dwarf_haspc(fn_die, ad->addr)) { 415 dwarf_haspc(fn_die, ad->addr)) {
397 memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die)); 416 memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die));
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
index 6ce1717784b7..8658d41697d2 100644
--- a/tools/perf/util/dwarf-aux.h
+++ b/tools/perf/util/dwarf-aux.h
@@ -38,6 +38,9 @@ extern int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr,
38extern int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr, 38extern int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
39 int (*callback)(Dwarf_Die *, void *), void *data); 39 int (*callback)(Dwarf_Die *, void *), void *data);
40 40
41/* Ensure that this DIE is a subprogram and definition (not declaration) */
42extern bool die_is_func_def(Dwarf_Die *dw_die);
43
41/* Compare diename and tname */ 44/* Compare diename and tname */
42extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname); 45extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname);
43 46
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 26441d0e571b..ce69901176d8 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -199,9 +199,11 @@ static int write_buildid(char *name, size_t name_len, u8 *build_id,
199 return write_padded(fd, name, name_len + 1, len); 199 return write_padded(fd, name, name_len + 1, len);
200} 200}
201 201
202static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, 202static int __dsos__write_buildid_table(struct list_head *head,
203 u16 misc, int fd) 203 struct machine *machine,
204 pid_t pid, u16 misc, int fd)
204{ 205{
206 char nm[PATH_MAX];
205 struct dso *pos; 207 struct dso *pos;
206 208
207 dsos__for_each_with_build_id(pos, head) { 209 dsos__for_each_with_build_id(pos, head) {
@@ -215,6 +217,10 @@ static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
215 if (is_vdso_map(pos->short_name)) { 217 if (is_vdso_map(pos->short_name)) {
216 name = (char *) VDSO__MAP_NAME; 218 name = (char *) VDSO__MAP_NAME;
217 name_len = sizeof(VDSO__MAP_NAME) + 1; 219 name_len = sizeof(VDSO__MAP_NAME) + 1;
220 } else if (dso__is_kcore(pos)) {
221 machine__mmap_name(machine, nm, sizeof(nm));
222 name = nm;
223 name_len = strlen(nm) + 1;
218 } else { 224 } else {
219 name = pos->long_name; 225 name = pos->long_name;
220 name_len = pos->long_name_len + 1; 226 name_len = pos->long_name_len + 1;
@@ -240,10 +246,10 @@ static int machine__write_buildid_table(struct machine *machine, int fd)
240 umisc = PERF_RECORD_MISC_GUEST_USER; 246 umisc = PERF_RECORD_MISC_GUEST_USER;
241 } 247 }
242 248
243 err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid, 249 err = __dsos__write_buildid_table(&machine->kernel_dsos, machine,
244 kmisc, fd); 250 machine->pid, kmisc, fd);
245 if (err == 0) 251 if (err == 0)
246 err = __dsos__write_buildid_table(&machine->user_dsos, 252 err = __dsos__write_buildid_table(&machine->user_dsos, machine,
247 machine->pid, umisc, fd); 253 machine->pid, umisc, fd);
248 return err; 254 return err;
249} 255}
@@ -375,23 +381,31 @@ out_free:
375 return err; 381 return err;
376} 382}
377 383
378static int dso__cache_build_id(struct dso *dso, const char *debugdir) 384static int dso__cache_build_id(struct dso *dso, struct machine *machine,
385 const char *debugdir)
379{ 386{
380 bool is_kallsyms = dso->kernel && dso->long_name[0] != '/'; 387 bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
381 bool is_vdso = is_vdso_map(dso->short_name); 388 bool is_vdso = is_vdso_map(dso->short_name);
389 char *name = dso->long_name;
390 char nm[PATH_MAX];
382 391
383 return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), 392 if (dso__is_kcore(dso)) {
384 dso->long_name, debugdir, 393 is_kallsyms = true;
385 is_kallsyms, is_vdso); 394 machine__mmap_name(machine, nm, sizeof(nm));
395 name = nm;
396 }
397 return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), name,
398 debugdir, is_kallsyms, is_vdso);
386} 399}
387 400
388static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) 401static int __dsos__cache_build_ids(struct list_head *head,
402 struct machine *machine, const char *debugdir)
389{ 403{
390 struct dso *pos; 404 struct dso *pos;
391 int err = 0; 405 int err = 0;
392 406
393 dsos__for_each_with_build_id(pos, head) 407 dsos__for_each_with_build_id(pos, head)
394 if (dso__cache_build_id(pos, debugdir)) 408 if (dso__cache_build_id(pos, machine, debugdir))
395 err = -1; 409 err = -1;
396 410
397 return err; 411 return err;
@@ -399,8 +413,9 @@ static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
399 413
400static int machine__cache_build_ids(struct machine *machine, const char *debugdir) 414static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
401{ 415{
402 int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir); 416 int ret = __dsos__cache_build_ids(&machine->kernel_dsos, machine,
403 ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir); 417 debugdir);
418 ret |= __dsos__cache_build_ids(&machine->user_dsos, machine, debugdir);
404 return ret; 419 return ret;
405} 420}
406 421
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 46a0d35a05e1..9ff6cf3e9a99 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -611,6 +611,8 @@ void hists__collapse_resort(struct hists *hists)
611 next = rb_first(root); 611 next = rb_first(root);
612 612
613 while (next) { 613 while (next) {
614 if (session_done())
615 break;
614 n = rb_entry(next, struct hist_entry, rb_node_in); 616 n = rb_entry(next, struct hist_entry, rb_node_in);
615 next = rb_next(&n->rb_node_in); 617 next = rb_next(&n->rb_node_in);
616 618
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 933d14f287ca..6188d2876a71 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -792,7 +792,7 @@ static int machine__create_modules(struct machine *machine)
792 modules = path; 792 modules = path;
793 } 793 }
794 794
795 if (symbol__restricted_filename(path, "/proc/modules")) 795 if (symbol__restricted_filename(modules, "/proc/modules"))
796 return -1; 796 return -1;
797 797
798 file = fopen(modules, "r"); 798 file = fopen(modules, "r");
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index be0329394d56..371476cb8ddc 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -118,7 +118,6 @@ static const Dwfl_Callbacks offline_callbacks = {
118static int debuginfo__init_offline_dwarf(struct debuginfo *self, 118static int debuginfo__init_offline_dwarf(struct debuginfo *self,
119 const char *path) 119 const char *path)
120{ 120{
121 Dwfl_Module *mod;
122 int fd; 121 int fd;
123 122
124 fd = open(path, O_RDONLY); 123 fd = open(path, O_RDONLY);
@@ -129,11 +128,11 @@ static int debuginfo__init_offline_dwarf(struct debuginfo *self,
129 if (!self->dwfl) 128 if (!self->dwfl)
130 goto error; 129 goto error;
131 130
132 mod = dwfl_report_offline(self->dwfl, "", "", fd); 131 self->mod = dwfl_report_offline(self->dwfl, "", "", fd);
133 if (!mod) 132 if (!self->mod)
134 goto error; 133 goto error;
135 134
136 self->dbg = dwfl_module_getdwarf(mod, &self->bias); 135 self->dbg = dwfl_module_getdwarf(self->mod, &self->bias);
137 if (!self->dbg) 136 if (!self->dbg)
138 goto error; 137 goto error;
139 138
@@ -676,37 +675,42 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
676} 675}
677 676
678/* Convert subprogram DIE to trace point */ 677/* Convert subprogram DIE to trace point */
679static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr, 678static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
680 bool retprobe, struct probe_trace_point *tp) 679 Dwarf_Addr paddr, bool retprobe,
680 struct probe_trace_point *tp)
681{ 681{
682 Dwarf_Addr eaddr, highaddr; 682 Dwarf_Addr eaddr, highaddr;
683 const char *name; 683 GElf_Sym sym;
684 684 const char *symbol;
685 /* Copy the name of probe point */ 685
686 name = dwarf_diename(sp_die); 686 /* Verify the address is correct */
687 if (name) { 687 if (dwarf_entrypc(sp_die, &eaddr) != 0) {
688 if (dwarf_entrypc(sp_die, &eaddr) != 0) { 688 pr_warning("Failed to get entry address of %s\n",
689 pr_warning("Failed to get entry address of %s\n", 689 dwarf_diename(sp_die));
690 dwarf_diename(sp_die)); 690 return -ENOENT;
691 return -ENOENT; 691 }
692 } 692 if (dwarf_highpc(sp_die, &highaddr) != 0) {
693 if (dwarf_highpc(sp_die, &highaddr) != 0) { 693 pr_warning("Failed to get end address of %s\n",
694 pr_warning("Failed to get end address of %s\n", 694 dwarf_diename(sp_die));
695 dwarf_diename(sp_die)); 695 return -ENOENT;
696 return -ENOENT; 696 }
697 } 697 if (paddr > highaddr) {
698 if (paddr > highaddr) { 698 pr_warning("Offset specified is greater than size of %s\n",
699 pr_warning("Offset specified is greater than size of %s\n", 699 dwarf_diename(sp_die));
700 dwarf_diename(sp_die)); 700 return -EINVAL;
701 return -EINVAL; 701 }
702 } 702
703 tp->symbol = strdup(name); 703 /* Get an appropriate symbol from symtab */
704 if (tp->symbol == NULL) 704 symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
705 return -ENOMEM; 705 if (!symbol) {
706 tp->offset = (unsigned long)(paddr - eaddr); 706 pr_warning("Failed to find symbol at 0x%lx\n",
707 } else 707 (unsigned long)paddr);
708 /* This function has no name. */ 708 return -ENOENT;
709 tp->offset = (unsigned long)paddr; 709 }
710 tp->offset = (unsigned long)(paddr - sym.st_value);
711 tp->symbol = strdup(symbol);
712 if (!tp->symbol)
713 return -ENOMEM;
710 714
711 /* Return probe must be on the head of a subprogram */ 715 /* Return probe must be on the head of a subprogram */
712 if (retprobe) { 716 if (retprobe) {
@@ -734,7 +738,7 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
734 } 738 }
735 739
736 /* If not a real subprogram, find a real one */ 740 /* If not a real subprogram, find a real one */
737 if (dwarf_tag(sc_die) != DW_TAG_subprogram) { 741 if (!die_is_func_def(sc_die)) {
738 if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) { 742 if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) {
739 pr_warning("Failed to find probe point in any " 743 pr_warning("Failed to find probe point in any "
740 "functions.\n"); 744 "functions.\n");
@@ -980,12 +984,10 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
980 struct dwarf_callback_param *param = data; 984 struct dwarf_callback_param *param = data;
981 struct probe_finder *pf = param->data; 985 struct probe_finder *pf = param->data;
982 struct perf_probe_point *pp = &pf->pev->point; 986 struct perf_probe_point *pp = &pf->pev->point;
983 Dwarf_Attribute attr;
984 987
985 /* Check tag and diename */ 988 /* Check tag and diename */
986 if (dwarf_tag(sp_die) != DW_TAG_subprogram || 989 if (!die_is_func_def(sp_die) ||
987 !die_compare_name(sp_die, pp->function) || 990 !die_compare_name(sp_die, pp->function))
988 dwarf_attr(sp_die, DW_AT_declaration, &attr))
989 return DWARF_CB_OK; 991 return DWARF_CB_OK;
990 992
991 /* Check declared file */ 993 /* Check declared file */
@@ -1151,7 +1153,7 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
1151 tev = &tf->tevs[tf->ntevs++]; 1153 tev = &tf->tevs[tf->ntevs++];
1152 1154
1153 /* Trace point should be converted from subprogram DIE */ 1155 /* Trace point should be converted from subprogram DIE */
1154 ret = convert_to_trace_point(&pf->sp_die, pf->addr, 1156 ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr,
1155 pf->pev->point.retprobe, &tev->point); 1157 pf->pev->point.retprobe, &tev->point);
1156 if (ret < 0) 1158 if (ret < 0)
1157 return ret; 1159 return ret;
@@ -1183,7 +1185,7 @@ int debuginfo__find_trace_events(struct debuginfo *self,
1183{ 1185{
1184 struct trace_event_finder tf = { 1186 struct trace_event_finder tf = {
1185 .pf = {.pev = pev, .callback = add_probe_trace_event}, 1187 .pf = {.pev = pev, .callback = add_probe_trace_event},
1186 .max_tevs = max_tevs}; 1188 .mod = self->mod, .max_tevs = max_tevs};
1187 int ret; 1189 int ret;
1188 1190
1189 /* Allocate result tevs array */ 1191 /* Allocate result tevs array */
@@ -1252,7 +1254,7 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
1252 vl = &af->vls[af->nvls++]; 1254 vl = &af->vls[af->nvls++];
1253 1255
1254 /* Trace point should be converted from subprogram DIE */ 1256 /* Trace point should be converted from subprogram DIE */
1255 ret = convert_to_trace_point(&pf->sp_die, pf->addr, 1257 ret = convert_to_trace_point(&pf->sp_die, af->mod, pf->addr,
1256 pf->pev->point.retprobe, &vl->point); 1258 pf->pev->point.retprobe, &vl->point);
1257 if (ret < 0) 1259 if (ret < 0)
1258 return ret; 1260 return ret;
@@ -1291,6 +1293,7 @@ int debuginfo__find_available_vars_at(struct debuginfo *self,
1291{ 1293{
1292 struct available_var_finder af = { 1294 struct available_var_finder af = {
1293 .pf = {.pev = pev, .callback = add_available_vars}, 1295 .pf = {.pev = pev, .callback = add_available_vars},
1296 .mod = self->mod,
1294 .max_vls = max_vls, .externs = externs}; 1297 .max_vls = max_vls, .externs = externs};
1295 int ret; 1298 int ret;
1296 1299
@@ -1474,7 +1477,7 @@ static int line_range_inline_cb(Dwarf_Die *in_die, void *data)
1474 return 0; 1477 return 0;
1475} 1478}
1476 1479
1477/* Search function from function name */ 1480/* Search function definition from function name */
1478static int line_range_search_cb(Dwarf_Die *sp_die, void *data) 1481static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
1479{ 1482{
1480 struct dwarf_callback_param *param = data; 1483 struct dwarf_callback_param *param = data;
@@ -1485,7 +1488,7 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
1485 if (lr->file && strtailcmp(lr->file, dwarf_decl_file(sp_die))) 1488 if (lr->file && strtailcmp(lr->file, dwarf_decl_file(sp_die)))
1486 return DWARF_CB_OK; 1489 return DWARF_CB_OK;
1487 1490
1488 if (dwarf_tag(sp_die) == DW_TAG_subprogram && 1491 if (die_is_func_def(sp_die) &&
1489 die_compare_name(sp_die, lr->function)) { 1492 die_compare_name(sp_die, lr->function)) {
1490 lf->fname = dwarf_decl_file(sp_die); 1493 lf->fname = dwarf_decl_file(sp_die);
1491 dwarf_decl_line(sp_die, &lr->offset); 1494 dwarf_decl_line(sp_die, &lr->offset);
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index 17e94d0c36f9..3b7d63018960 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -23,6 +23,7 @@ static inline int is_c_varname(const char *name)
23/* debug information structure */ 23/* debug information structure */
24struct debuginfo { 24struct debuginfo {
25 Dwarf *dbg; 25 Dwarf *dbg;
26 Dwfl_Module *mod;
26 Dwfl *dwfl; 27 Dwfl *dwfl;
27 Dwarf_Addr bias; 28 Dwarf_Addr bias;
28}; 29};
@@ -77,6 +78,7 @@ struct probe_finder {
77 78
78struct trace_event_finder { 79struct trace_event_finder {
79 struct probe_finder pf; 80 struct probe_finder pf;
81 Dwfl_Module *mod; /* For solving symbols */
80 struct probe_trace_event *tevs; /* Found trace events */ 82 struct probe_trace_event *tevs; /* Found trace events */
81 int ntevs; /* Number of trace events */ 83 int ntevs; /* Number of trace events */
82 int max_tevs; /* Max number of trace events */ 84 int max_tevs; /* Max number of trace events */
@@ -84,6 +86,7 @@ struct trace_event_finder {
84 86
85struct available_var_finder { 87struct available_var_finder {
86 struct probe_finder pf; 88 struct probe_finder pf;
89 Dwfl_Module *mod; /* For solving symbols */
87 struct variable_list *vls; /* Found variable lists */ 90 struct variable_list *vls; /* Found variable lists */
88 int nvls; /* Number of variable lists */ 91 int nvls; /* Number of variable lists */
89 int max_vls; /* Max no. of variable lists */ 92 int max_vls; /* Max no. of variable lists */
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 51f5edf2a6d0..70ffa41518f3 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -531,6 +531,9 @@ static int flush_sample_queue(struct perf_session *s,
531 return 0; 531 return 0;
532 532
533 list_for_each_entry_safe(iter, tmp, head, list) { 533 list_for_each_entry_safe(iter, tmp, head, list) {
534 if (session_done())
535 return 0;
536
534 if (iter->timestamp > limit) 537 if (iter->timestamp > limit)
535 break; 538 break;
536 539
@@ -1160,7 +1163,6 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
1160 } 1163 }
1161} 1164}
1162 1165
1163#define session_done() (*(volatile int *)(&session_done))
1164volatile int session_done; 1166volatile int session_done;
1165 1167
1166static int __perf_session__process_pipe_events(struct perf_session *self, 1168static int __perf_session__process_pipe_events(struct perf_session *self,
@@ -1372,10 +1374,13 @@ more:
1372 "Processing events..."); 1374 "Processing events...");
1373 } 1375 }
1374 1376
1377 err = 0;
1378 if (session_done())
1379 goto out_err;
1380
1375 if (file_pos < file_size) 1381 if (file_pos < file_size)
1376 goto more; 1382 goto more;
1377 1383
1378 err = 0;
1379 /* do the final flush for ordered samples */ 1384 /* do the final flush for ordered samples */
1380 session->ordered_samples.next_flush = ULLONG_MAX; 1385 session->ordered_samples.next_flush = ULLONG_MAX;
1381 err = flush_sample_queue(session, tool); 1386 err = flush_sample_queue(session, tool);
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 3aa75fb2225f..04bf7373a7e5 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -124,4 +124,8 @@ int __perf_session__set_tracepoints_handlers(struct perf_session *session,
124 124
125#define perf_session__set_tracepoints_handlers(session, array) \ 125#define perf_session__set_tracepoints_handlers(session, array) \
126 __perf_session__set_tracepoints_handlers(session, array, ARRAY_SIZE(array)) 126 __perf_session__set_tracepoints_handlers(session, array, ARRAY_SIZE(array))
127
128extern volatile int session_done;
129
130#define session_done() (*(volatile int *)(&session_done))
127#endif /* __PERF_SESSION_H */ 131#endif /* __PERF_SESSION_H */
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index a7b9ab557380..a9c829be5216 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -8,6 +8,22 @@
8#include "symbol.h" 8#include "symbol.h"
9#include "debug.h" 9#include "debug.h"
10 10
11#ifndef HAVE_ELF_GETPHDRNUM
12static int elf_getphdrnum(Elf *elf, size_t *dst)
13{
14 GElf_Ehdr gehdr;
15 GElf_Ehdr *ehdr;
16
17 ehdr = gelf_getehdr(elf, &gehdr);
18 if (!ehdr)
19 return -1;
20
21 *dst = ehdr->e_phnum;
22
23 return 0;
24}
25#endif
26
11#ifndef NT_GNU_BUILD_ID 27#ifndef NT_GNU_BUILD_ID
12#define NT_GNU_BUILD_ID 3 28#define NT_GNU_BUILD_ID 3
13#endif 29#endif
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index fe7a27d67d2b..e9e1c03f927d 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -186,7 +186,7 @@ void parse_proc_kallsyms(struct pevent *pevent,
186 char *next = NULL; 186 char *next = NULL;
187 char *addr_str; 187 char *addr_str;
188 char *mod; 188 char *mod;
189 char *fmt; 189 char *fmt = NULL;
190 190
191 line = strtok_r(file, "\n", &next); 191 line = strtok_r(file, "\n", &next);
192 while (line) { 192 while (line) {