aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFelipe Balbi <balbi@ti.com>2013-12-23 12:22:46 -0500
committerFelipe Balbi <balbi@ti.com>2013-12-23 12:22:46 -0500
commite90b8417af0d01cf8c64da6937c914c89ccf6dc1 (patch)
treecbc5e3b975b2efbb786e12b91714f8c3c3979316
parent845c071b7853c0046693022f4e95c9cdd043e2db (diff)
parent413541dd66d51f791a0b169d9b9014e4f56be13c (diff)
Merge tag 'v3.13-rc5' into next
Linux 3.13-rc5 * tag 'v3.13-rc5': (231 commits) Linux 3.13-rc5 aio: clean up and fix aio_setup_ring page mapping aio/migratepages: make aio migrate pages sane aio: fix kioctx leak introduced by "aio: Fix a trinity splat" Don't set the INITRD_COMPRESS environment variable automatically mm: fix build of split ptlock code pstore: Don't allow high traffic options on fragile devices mm: do not allocate page->ptl dynamically, if spinlock_t fits to long mm: page_alloc: revert NUMA aspect of fair allocation policy Revert "mm: page_alloc: exclude unreclaimable allocations from zone fairness policy" mm: Fix NULL pointer dereference in madvise(MADV_WILLNEED) support qla2xxx: Fix scsi_host leak on qlt_lport_register callback failure target: Remove extra percpu_ref_init arm64: ptrace: avoid using HW_BREAKPOINT_EMPTY for disabled events ARC: Allow conditional multiple inclusion of uapi/asm/unistd.h target/file: Update hw_max_sectors based on current block_size iser-target: Move INIT_WORK setup into isert_create_device_ib_res iscsi-target: Fix incorrect np->np_thread NULL assignment mm/hugetlb: check for pte NULL pointer in __page_check_address() fix build with make 3.80 ... Conflicts: drivers/usb/phy/Kconfig
-rw-r--r--Documentation/module-signing.txt240
-rw-r--r--Documentation/networking/ip-sysctl.txt8
-rw-r--r--MAINTAINERS27
-rw-r--r--Makefile24
-rw-r--r--arch/arc/include/uapi/asm/unistd.h8
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi24
-rw-r--r--arch/arm/mach-omap2/display.c38
-rw-r--r--arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c11
-rw-r--r--arch/arm/mach-shmobile/board-lager.c4
-rw-r--r--arch/arm/xen/enlighten.c6
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h4
-rw-r--r--arch/arm64/kernel/ptrace.c38
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h2
-rw-r--r--arch/powerpc/include/asm/opal.h4
-rw-r--r--arch/powerpc/include/asm/switch_to.h2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/crash_dump.c6
-rw-r--r--arch/powerpc/kernel/process.c32
-rw-r--r--arch/powerpc/kernel/ptrace.c4
-rw-r--r--arch/powerpc/kernel/setup-common.c4
-rw-r--r--arch/powerpc/kernel/smp.c4
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c18
-rw-r--r--arch/powerpc/kvm/book3s_hv.c24
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c9
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S23
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S19
-rw-r--r--arch/powerpc/kvm/book3s_pr.c22
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S6
-rw-r--r--arch/powerpc/kvm/booke.c12
-rw-r--r--arch/powerpc/platforms/powernv/opal-lpc.c12
-rw-r--r--arch/powerpc/platforms/powernv/opal-xscom.c4
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c12
-rw-r--r--arch/powerpc/platforms/pseries/msi.c28
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c46
-rw-r--r--arch/powerpc/platforms/pseries/pci.c8
-rw-r--r--arch/sh/lib/Makefile2
-rw-r--r--arch/sparc/include/asm/pgtable_64.h4
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/pgtable.h11
-rw-r--r--arch/x86/include/asm/preempt.h11
-rw-r--r--arch/x86/kernel/cpu/perf_event.h15
-rw-r--r--arch/x86/mm/gup.c13
-rw-r--r--drivers/acpi/apei/erst.c1
-rw-r--r--drivers/clk/clk-s2mps11.c6
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/clksrc-of.c1
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c7
-rw-r--r--drivers/clocksource/sun4i_timer.c3
-rw-r--r--drivers/clocksource/time-armada-370-xp.c10
-rw-r--r--drivers/dma/Kconfig7
-rw-r--r--drivers/dma/at_hdmac_regs.h4
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/dma/dmatest.c8
-rw-r--r--drivers/dma/fsldma.c31
-rw-r--r--drivers/dma/mv_xor.c101
-rw-r--r--drivers/dma/pl330.c5
-rw-r--r--drivers/dma/ppc4xx/adma.c27
-rw-r--r--drivers/dma/txx9dmac.c1
-rw-r--r--drivers/firewire/sbp2.c1
-rw-r--r--drivers/firmware/efi/efi-pstore.c1
-rw-r--r--drivers/gpio/gpio-msm-v2.c4
-rw-r--r--drivers/gpio/gpio-rcar.c3
-rw-r--r--drivers/gpio/gpio-twl4030.c15
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h1
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c7
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c20
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c7
-rw-r--r--drivers/gpu/drm/drm_edid.c8
-rw-r--r--drivers/gpu/drm/drm_stub.c6
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c20
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c9
-rw-r--r--drivers/gpu/drm/i915/intel_display.c7
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c26
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c29
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c1
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c10
-rw-r--r--drivers/gpu/drm/radeon/rs690.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c3
-rw-r--r--drivers/iio/adc/ad7887.c16
-rw-r--r--drivers/iio/imu/adis16400_core.c7
-rw-r--r--drivers/iio/light/cm36651.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c22
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c8
-rw-r--r--drivers/net/can/usb/ems_usb.c3
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c47
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c65
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c19
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c1
-rw-r--r--drivers/net/xen-netback/netback.c3
-rw-r--r--drivers/phy/Kconfig4
-rw-r--r--drivers/phy/phy-core.c26
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h2
-rw-r--r--drivers/regulator/s2mps11.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c10
-rw-r--r--drivers/staging/comedi/drivers.c2
-rw-r--r--drivers/staging/comedi/drivers/8255_pci.c15
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c7
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c39
-rw-r--r--drivers/staging/imx-drm/imx-tve.c9
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-common.c32
-rw-r--r--drivers/target/iscsi/iscsi_target.c27
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c6
-rw-r--r--drivers/target/target_core_device.c5
-rw-r--r--drivers/target/target_core_file.c8
-rw-r--r--drivers/target/target_core_file.h5
-rw-r--r--drivers/target/target_core_tpg.c10
-rw-r--r--drivers/tty/n_tty.c7
-rw-r--r--drivers/tty/serial/8250/8250_dw.c8
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/tty_ldsem.c16
-rw-r--r--drivers/usb/chipidea/core.c4
-rw-r--r--drivers/usb/chipidea/host.c3
-rw-r--r--drivers/usb/chipidea/udc.c3
-rw-r--r--drivers/usb/class/cdc-wdm.c8
-rw-r--r--drivers/usb/dwc3/core.c8
-rw-r--r--drivers/usb/host/ohci-at91.c26
-rw-r--r--drivers/usb/host/xhci-pci.c7
-rw-r--r--drivers/usb/phy/Kconfig4
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c2
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c3
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/zte_ev.c3
-rw-r--r--drivers/xen/balloon.c63
-rw-r--r--drivers/xen/grant-table.c3
-rw-r--r--drivers/xen/privcmd.c9
-rw-r--r--fs/aio.c113
-rw-r--r--fs/ceph/addr.c8
-rw-r--r--fs/ceph/inode.c136
-rw-r--r--fs/pstore/platform.c7
-rw-r--r--fs/sysfs/file.c8
-rw-r--r--fs/xfs/xfs_bmap.c32
-rw-r--r--fs/xfs/xfs_bmap_util.c14
-rw-r--r--fs/xfs/xfs_buf.c37
-rw-r--r--fs/xfs/xfs_buf.h11
-rw-r--r--fs/xfs/xfs_buf_item.c21
-rw-r--r--fs/xfs/xfs_dir2_node.c26
-rw-r--r--fs/xfs/xfs_iops.c3
-rw-r--r--fs/xfs/xfs_log_recover.c13
-rw-r--r--fs/xfs/xfs_qm.c80
-rw-r--r--fs/xfs/xfs_trans_buf.c13
-rw-r--r--include/asm-generic/pgtable.h7
-rw-r--r--include/asm-generic/preempt.h35
-rw-r--r--include/linux/lockref.h2
-rw-r--r--include/linux/math64.h30
-rw-r--r--include/linux/migrate.h12
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mm_types.h52
-rw-r--r--include/linux/pstore.h3
-rw-r--r--include/linux/reboot.h1
-rw-r--r--include/linux/sched.h5
-rw-r--r--include/target/target_core_base.h5
-rw-r--r--include/uapi/drm/vmwgfx_drm.h1
-rw-r--r--include/uapi/linux/perf_event.h1
-rw-r--r--include/xen/interface/io/blkif.h10
-rw-r--r--init/Kconfig6
-rw-r--r--kernel/Makefile7
-rw-r--r--kernel/bounds.c2
-rw-r--r--kernel/events/core.c21
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/kexec.c1
-rw-r--r--kernel/reboot.c2
-rw-r--r--kernel/sched/core.c6
-rw-r--r--kernel/sched/fair.c151
-rw-r--r--kernel/sched/rt.c14
-rw-r--r--kernel/trace/ftrace.c2
-rw-r--r--kernel/user.c6
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/compaction.c4
-rw-r--r--mm/huge_memory.c45
-rw-r--r--mm/memory-failure.c14
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mempolicy.c16
-rw-r--r--mm/migrate.c82
-rw-r--r--mm/mprotect.c13
-rw-r--r--mm/page_alloc.c19
-rw-r--r--mm/pgtable-generic.c8
-rw-r--r--mm/rmap.c4
-rw-r--r--net/core/neighbour.c1
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c1
-rw-r--r--net/ipv4/netfilter/nft_reject_ipv4.c2
-rw-r--r--net/ipv4/udp.c13
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c1
-rw-r--r--net/sctp/probe.c17
-rw-r--r--net/unix/af_unix.c8
-rw-r--r--sound/core/pcm_lib.c2
-rw-r--r--sound/pci/hda/hda_intel.c4
-rw-r--r--sound/pci/hda/patch_realtek.c4
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c30
-rw-r--r--sound/soc/atmel/sam9x5_wm8731.c2
-rw-r--r--sound/soc/codecs/wm5110.c2
-rw-r--r--sound/soc/codecs/wm8904.c2
-rw-r--r--sound/soc/codecs/wm8962.c13
-rw-r--r--sound/soc/codecs/wm_adsp.c10
-rw-r--r--sound/soc/fsl/imx-wm8962.c2
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c24
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c38
-rw-r--r--sound/soc/soc-pcm.c5
-rw-r--r--sound/soc/tegra/tegra20_i2s.c6
-rw-r--r--sound/soc/tegra/tegra20_spdif.c10
-rw-r--r--sound/soc/tegra/tegra30_i2s.c6
-rw-r--r--tools/power/cpupower/utils/cpupower-set.c6
218 files changed, 2084 insertions, 1111 deletions
diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt
new file mode 100644
index 000000000000..2b40e04d3c49
--- /dev/null
+++ b/Documentation/module-signing.txt
@@ -0,0 +1,240 @@
1 ==============================
2 KERNEL MODULE SIGNING FACILITY
3 ==============================
4
5CONTENTS
6
7 - Overview.
8 - Configuring module signing.
9 - Generating signing keys.
10 - Public keys in the kernel.
11 - Manually signing modules.
12 - Signed modules and stripping.
13 - Loading signed modules.
14 - Non-valid signatures and unsigned modules.
15 - Administering/protecting the private key.
16
17
18========
19OVERVIEW
20========
21
22The kernel module signing facility cryptographically signs modules during
23installation and then checks the signature upon loading the module. This
24allows increased kernel security by disallowing the loading of unsigned modules
25or modules signed with an invalid key. Module signing increases security by
26making it harder to load a malicious module into the kernel. The module
27signature checking is done by the kernel so that it is not necessary to have
28trusted userspace bits.
29
30This facility uses X.509 ITU-T standard certificates to encode the public keys
31involved. The signatures are not themselves encoded in any industrial standard
32type. The facility currently only supports the RSA public key encryption
33standard (though it is pluggable and permits others to be used). The possible
34hash algorithms that can be used are SHA-1, SHA-224, SHA-256, SHA-384, and
35SHA-512 (the algorithm is selected by data in the signature).
36
37
38==========================
39CONFIGURING MODULE SIGNING
40==========================
41
42The module signing facility is enabled by going to the "Enable Loadable Module
43Support" section of the kernel configuration and turning on
44
45 CONFIG_MODULE_SIG "Module signature verification"
46
47This has a number of options available:
48
49 (1) "Require modules to be validly signed" (CONFIG_MODULE_SIG_FORCE)
50
51 This specifies how the kernel should deal with a module that has a
52 signature for which the key is not known or a module that is unsigned.
53
54 If this is off (ie. "permissive"), then modules for which the key is not
55 available and modules that are unsigned are permitted, but the kernel will
56 be marked as being tainted.
57
58 If this is on (ie. "restrictive"), only modules that have a valid
59 signature that can be verified by a public key in the kernel's possession
60 will be loaded. All other modules will generate an error.
61
62 Irrespective of the setting here, if the module has a signature block that
63 cannot be parsed, it will be rejected out of hand.
64
65
66 (2) "Automatically sign all modules" (CONFIG_MODULE_SIG_ALL)
67
68 If this is on then modules will be automatically signed during the
69 modules_install phase of a build. If this is off, then the modules must
70 be signed manually using:
71
72 scripts/sign-file
73
74
75 (3) "Which hash algorithm should modules be signed with?"
76
77 This presents a choice of which hash algorithm the installation phase will
78 sign the modules with:
79
80 CONFIG_SIG_SHA1 "Sign modules with SHA-1"
81 CONFIG_SIG_SHA224 "Sign modules with SHA-224"
82 CONFIG_SIG_SHA256 "Sign modules with SHA-256"
83 CONFIG_SIG_SHA384 "Sign modules with SHA-384"
84 CONFIG_SIG_SHA512 "Sign modules with SHA-512"
85
86 The algorithm selected here will also be built into the kernel (rather
87 than being a module) so that modules signed with that algorithm can have
88 their signatures checked without causing a dependency loop.
89
90
91=======================
92GENERATING SIGNING KEYS
93=======================
94
95Cryptographic keypairs are required to generate and check signatures. A
96private key is used to generate a signature and the corresponding public key is
97used to check it. The private key is only needed during the build, after which
98it can be deleted or stored securely. The public key gets built into the
99kernel so that it can be used to check the signatures as the modules are
100loaded.
101
102Under normal conditions, the kernel build will automatically generate a new
103keypair using openssl if one does not exist in the files:
104
105 signing_key.priv
106 signing_key.x509
107
108during the building of vmlinux (the public part of the key needs to be built
109into vmlinux) using parameters in the:
110
111 x509.genkey
112
113file (which is also generated if it does not already exist).
114
115It is strongly recommended that you provide your own x509.genkey file.
116
117Most notably, in the x509.genkey file, the req_distinguished_name section
118should be altered from the default:
119
120 [ req_distinguished_name ]
121 O = Magrathea
122 CN = Glacier signing key
123 emailAddress = slartibartfast@magrathea.h2g2
124
125The generated RSA key size can also be set with:
126
127 [ req ]
128 default_bits = 4096
129
130
131It is also possible to manually generate the key private/public files using the
132x509.genkey key generation configuration file in the root node of the Linux
133kernel sources tree and the openssl command. The following is an example to
134generate the public/private key files:
135
136 openssl req -new -nodes -utf8 -sha256 -days 36500 -batch -x509 \
137 -config x509.genkey -outform DER -out signing_key.x509 \
138 -keyout signing_key.priv
139
140
141=========================
142PUBLIC KEYS IN THE KERNEL
143=========================
144
145The kernel contains a ring of public keys that can be viewed by root. They're
146in a keyring called ".system_keyring" that can be seen by:
147
148 [root@deneb ~]# cat /proc/keys
149 ...
150 223c7853 I------ 1 perm 1f030000 0 0 keyring .system_keyring: 1
151 302d2d52 I------ 1 perm 1f010000 0 0 asymmetri Fedora kernel signing key: d69a84e6bce3d216b979e9505b3e3ef9a7118079: X509.RSA a7118079 []
152 ...
153
154Beyond the public key generated specifically for module signing, any file
155placed in the kernel source root directory or the kernel build root directory
156whose name is suffixed with ".x509" will be assumed to be an X.509 public key
157and will be added to the keyring.
158
159Further, the architecture code may take public keys from a hardware store and
160add those in also (e.g. from the UEFI key database).
161
162Finally, it is possible to add additional public keys by doing:
163
164 keyctl padd asymmetric "" [.system_keyring-ID] <[key-file]
165
166e.g.:
167
168 keyctl padd asymmetric "" 0x223c7853 <my_public_key.x509
169
170Note, however, that the kernel will only permit keys to be added to
171.system_keyring _if_ the new key's X.509 wrapper is validly signed by a key
172that is already resident in the .system_keyring at the time the key was added.
173
174
175=========================
176MANUALLY SIGNING MODULES
177=========================
178
179To manually sign a module, use the scripts/sign-file tool available in
180the Linux kernel source tree. The script requires 4 arguments:
181
182 1. The hash algorithm (e.g., sha256)
183 2. The private key filename
184 3. The public key filename
185 4. The kernel module to be signed
186
187The following is an example to sign a kernel module:
188
189 scripts/sign-file sha512 kernel-signkey.priv \
190 kernel-signkey.x509 module.ko
191
192The hash algorithm used does not have to match the one configured, but if it
193doesn't, you should make sure that hash algorithm is either built into the
194kernel or can be loaded without requiring itself.
195
196
197============================
198SIGNED MODULES AND STRIPPING
199============================
200
201A signed module has a digital signature simply appended at the end. The string
202"~Module signature appended~." at the end of the module's file confirms that a
203signature is present but it does not confirm that the signature is valid!
204
205Signed modules are BRITTLE as the signature is outside of the defined ELF
206container. Thus they MAY NOT be stripped once the signature is computed and
207attached. Note the entire module is the signed payload, including any and all
208debug information present at the time of signing.
209
210
211======================
212LOADING SIGNED MODULES
213======================
214
215Modules are loaded with insmod, modprobe, init_module() or finit_module(),
216exactly as for unsigned modules as no processing is done in userspace. The
217signature checking is all done within the kernel.
218
219
220=========================================
221NON-VALID SIGNATURES AND UNSIGNED MODULES
222=========================================
223
224If CONFIG_MODULE_SIG_FORCE is enabled or enforcemodulesig=1 is supplied on
225the kernel command line, the kernel will only load validly signed modules
226for which it has a public key. Otherwise, it will also load modules that are
227unsigned. Any module for which the kernel has a key, but which proves to have
228a signature mismatch will not be permitted to load.
229
230Any module that has an unparseable signature will be rejected.
231
232
233=========================================
234ADMINISTERING/PROTECTING THE PRIVATE KEY
235=========================================
236
237Since the private key is used to sign modules, viruses and malware could use
238the private key to sign modules and compromise the operating system. The
239private key must be either destroyed or moved to a secure location and not kept
240in the root node of the kernel source tree.
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 3c12d9a7ed00..8a984e994e61 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -16,8 +16,12 @@ ip_default_ttl - INTEGER
16 Default: 64 (as recommended by RFC1700) 16 Default: 64 (as recommended by RFC1700)
17 17
18ip_no_pmtu_disc - BOOLEAN 18ip_no_pmtu_disc - BOOLEAN
19 Disable Path MTU Discovery. 19 Disable Path MTU Discovery. If enabled and a
20 default FALSE 20 fragmentation-required ICMP is received, the PMTU to this
21 destination will be set to min_pmtu (see below). You will need
22 to raise min_pmtu to the smallest interface MTU on your system
23 manually if you want to avoid locally generated fragments.
24 Default: FALSE
21 25
22min_pmtu - INTEGER 26min_pmtu - INTEGER
23 default 552 - minimum discovered Path MTU 27 default 552 - minimum discovered Path MTU
diff --git a/MAINTAINERS b/MAINTAINERS
index 1344816c4c06..d5e4ff328cc7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1008,6 +1008,8 @@ M: Santosh Shilimkar <santosh.shilimkar@ti.com>
1008L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1008L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1009S: Maintained 1009S: Maintained
1010F: arch/arm/mach-keystone/ 1010F: arch/arm/mach-keystone/
1011F: drivers/clk/keystone/
1012T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
1011 1013
1012ARM/LOGICPD PXA270 MACHINE SUPPORT 1014ARM/LOGICPD PXA270 MACHINE SUPPORT
1013M: Lennert Buytenhek <kernel@wantstofly.org> 1015M: Lennert Buytenhek <kernel@wantstofly.org>
@@ -3761,9 +3763,11 @@ F: include/uapi/linux/gigaset_dev.h
3761 3763
3762GPIO SUBSYSTEM 3764GPIO SUBSYSTEM
3763M: Linus Walleij <linus.walleij@linaro.org> 3765M: Linus Walleij <linus.walleij@linaro.org>
3764S: Maintained 3766M: Alexandre Courbot <gnurou@gmail.com>
3765L: linux-gpio@vger.kernel.org 3767L: linux-gpio@vger.kernel.org
3766F: Documentation/gpio.txt 3768T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
3769S: Maintained
3770F: Documentation/gpio/
3767F: drivers/gpio/ 3771F: drivers/gpio/
3768F: include/linux/gpio* 3772F: include/linux/gpio*
3769F: include/asm-generic/gpio.h 3773F: include/asm-generic/gpio.h
@@ -3831,6 +3835,12 @@ T: git git://linuxtv.org/media_tree.git
3831S: Maintained 3835S: Maintained
3832F: drivers/media/usb/gspca/ 3836F: drivers/media/usb/gspca/
3833 3837
3838GUID PARTITION TABLE (GPT)
3839M: Davidlohr Bueso <davidlohr@hp.com>
3840L: linux-efi@vger.kernel.org
3841S: Maintained
3842F: block/partitions/efi.*
3843
3834STK1160 USB VIDEO CAPTURE DRIVER 3844STK1160 USB VIDEO CAPTURE DRIVER
3835M: Ezequiel Garcia <elezegarcia@gmail.com> 3845M: Ezequiel Garcia <elezegarcia@gmail.com>
3836L: linux-media@vger.kernel.org 3846L: linux-media@vger.kernel.org
@@ -5911,12 +5921,21 @@ M: Steffen Klassert <steffen.klassert@secunet.com>
5911M: Herbert Xu <herbert@gondor.apana.org.au> 5921M: Herbert Xu <herbert@gondor.apana.org.au>
5912M: "David S. Miller" <davem@davemloft.net> 5922M: "David S. Miller" <davem@davemloft.net>
5913L: netdev@vger.kernel.org 5923L: netdev@vger.kernel.org
5914T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git 5924T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git
5925T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git
5915S: Maintained 5926S: Maintained
5916F: net/xfrm/ 5927F: net/xfrm/
5917F: net/key/ 5928F: net/key/
5918F: net/ipv4/xfrm* 5929F: net/ipv4/xfrm*
5930F: net/ipv4/esp4.c
5931F: net/ipv4/ah4.c
5932F: net/ipv4/ipcomp.c
5933F: net/ipv4/ip_vti.c
5919F: net/ipv6/xfrm* 5934F: net/ipv6/xfrm*
5935F: net/ipv6/esp6.c
5936F: net/ipv6/ah6.c
5937F: net/ipv6/ipcomp6.c
5938F: net/ipv6/ip6_vti.c
5920F: include/uapi/linux/xfrm.h 5939F: include/uapi/linux/xfrm.h
5921F: include/net/xfrm.h 5940F: include/net/xfrm.h
5922 5941
@@ -9571,7 +9590,7 @@ F: drivers/xen/*swiotlb*
9571 9590
9572XFS FILESYSTEM 9591XFS FILESYSTEM
9573P: Silicon Graphics Inc 9592P: Silicon Graphics Inc
9574M: Dave Chinner <dchinner@fromorbit.com> 9593M: Dave Chinner <david@fromorbit.com>
9575M: Ben Myers <bpm@sgi.com> 9594M: Ben Myers <bpm@sgi.com>
9576M: xfs@oss.sgi.com 9595M: xfs@oss.sgi.com
9577L: xfs@oss.sgi.com 9596L: xfs@oss.sgi.com
diff --git a/Makefile b/Makefile
index 858a147fd836..14d592cbbc5f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 13 2PATCHLEVEL = 13
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc4 4EXTRAVERSION = -rc5
5NAME = One Giant Leap for Frogkind 5NAME = One Giant Leap for Frogkind
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -732,19 +732,15 @@ export mod_strip_cmd
732# Select initial ramdisk compression format, default is gzip(1). 732# Select initial ramdisk compression format, default is gzip(1).
733# This shall be used by the dracut(8) tool while creating an initramfs image. 733# This shall be used by the dracut(8) tool while creating an initramfs image.
734# 734#
735INITRD_COMPRESS=gzip 735INITRD_COMPRESS-y := gzip
736ifeq ($(CONFIG_RD_BZIP2), y) 736INITRD_COMPRESS-$(CONFIG_RD_BZIP2) := bzip2
737 INITRD_COMPRESS=bzip2 737INITRD_COMPRESS-$(CONFIG_RD_LZMA) := lzma
738else ifeq ($(CONFIG_RD_LZMA), y) 738INITRD_COMPRESS-$(CONFIG_RD_XZ) := xz
739 INITRD_COMPRESS=lzma 739INITRD_COMPRESS-$(CONFIG_RD_LZO) := lzo
740else ifeq ($(CONFIG_RD_XZ), y) 740INITRD_COMPRESS-$(CONFIG_RD_LZ4) := lz4
741 INITRD_COMPRESS=xz 741# do not export INITRD_COMPRESS, since we didn't actually
742else ifeq ($(CONFIG_RD_LZO), y) 742# choose a sane default compression above.
743 INITRD_COMPRESS=lzo 743# export INITRD_COMPRESS := $(INITRD_COMPRESS-y)
744else ifeq ($(CONFIG_RD_LZ4), y)
745 INITRD_COMPRESS=lz4
746endif
747export INITRD_COMPRESS
748 744
749ifdef CONFIG_MODULE_SIG_ALL 745ifdef CONFIG_MODULE_SIG_ALL
750MODSECKEY = ./signing_key.priv 746MODSECKEY = ./signing_key.priv
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
index 68125dd766c6..39e58d1cdf90 100644
--- a/arch/arc/include/uapi/asm/unistd.h
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -8,7 +8,11 @@
8 8
9/******** no-legacy-syscalls-ABI *******/ 9/******** no-legacy-syscalls-ABI *******/
10 10
11#ifndef _UAPI_ASM_ARC_UNISTD_H 11/*
12 * Non-typical guard macro to enable inclusion twice in ARCH sys.c
13 * That is how the Generic syscall wrapper generator works
14 */
15#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL)
12#define _UAPI_ASM_ARC_UNISTD_H 16#define _UAPI_ASM_ARC_UNISTD_H
13 17
14#define __ARCH_WANT_SYS_EXECVE 18#define __ARCH_WANT_SYS_EXECVE
@@ -36,4 +40,6 @@ __SYSCALL(__NR_arc_gettls, sys_arc_gettls)
36#define __NR_sysfs (__NR_arch_specific_syscall + 3) 40#define __NR_sysfs (__NR_arch_specific_syscall + 3)
37__SYSCALL(__NR_sysfs, sys_sysfs) 41__SYSCALL(__NR_sysfs, sys_sysfs)
38 42
43#undef __SYSCALL
44
39#endif 45#endif
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index ee845fad939b..46e1d7ef163f 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -87,9 +87,9 @@
87 interrupts = <1 9 0xf04>; 87 interrupts = <1 9 0xf04>;
88 }; 88 };
89 89
90 gpio0: gpio@ffc40000 { 90 gpio0: gpio@e6050000 {
91 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; 91 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
92 reg = <0 0xffc40000 0 0x2c>; 92 reg = <0 0xe6050000 0 0x50>;
93 interrupt-parent = <&gic>; 93 interrupt-parent = <&gic>;
94 interrupts = <0 4 0x4>; 94 interrupts = <0 4 0x4>;
95 #gpio-cells = <2>; 95 #gpio-cells = <2>;
@@ -99,9 +99,9 @@
99 interrupt-controller; 99 interrupt-controller;
100 }; 100 };
101 101
102 gpio1: gpio@ffc41000 { 102 gpio1: gpio@e6051000 {
103 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; 103 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
104 reg = <0 0xffc41000 0 0x2c>; 104 reg = <0 0xe6051000 0 0x50>;
105 interrupt-parent = <&gic>; 105 interrupt-parent = <&gic>;
106 interrupts = <0 5 0x4>; 106 interrupts = <0 5 0x4>;
107 #gpio-cells = <2>; 107 #gpio-cells = <2>;
@@ -111,9 +111,9 @@
111 interrupt-controller; 111 interrupt-controller;
112 }; 112 };
113 113
114 gpio2: gpio@ffc42000 { 114 gpio2: gpio@e6052000 {
115 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; 115 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
116 reg = <0 0xffc42000 0 0x2c>; 116 reg = <0 0xe6052000 0 0x50>;
117 interrupt-parent = <&gic>; 117 interrupt-parent = <&gic>;
118 interrupts = <0 6 0x4>; 118 interrupts = <0 6 0x4>;
119 #gpio-cells = <2>; 119 #gpio-cells = <2>;
@@ -123,9 +123,9 @@
123 interrupt-controller; 123 interrupt-controller;
124 }; 124 };
125 125
126 gpio3: gpio@ffc43000 { 126 gpio3: gpio@e6053000 {
127 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; 127 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
128 reg = <0 0xffc43000 0 0x2c>; 128 reg = <0 0xe6053000 0 0x50>;
129 interrupt-parent = <&gic>; 129 interrupt-parent = <&gic>;
130 interrupts = <0 7 0x4>; 130 interrupts = <0 7 0x4>;
131 #gpio-cells = <2>; 131 #gpio-cells = <2>;
@@ -135,9 +135,9 @@
135 interrupt-controller; 135 interrupt-controller;
136 }; 136 };
137 137
138 gpio4: gpio@ffc44000 { 138 gpio4: gpio@e6054000 {
139 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; 139 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
140 reg = <0 0xffc44000 0 0x2c>; 140 reg = <0 0xe6054000 0 0x50>;
141 interrupt-parent = <&gic>; 141 interrupt-parent = <&gic>;
142 interrupts = <0 8 0x4>; 142 interrupts = <0 8 0x4>;
143 #gpio-cells = <2>; 143 #gpio-cells = <2>;
@@ -147,9 +147,9 @@
147 interrupt-controller; 147 interrupt-controller;
148 }; 148 };
149 149
150 gpio5: gpio@ffc45000 { 150 gpio5: gpio@e6055000 {
151 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; 151 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
152 reg = <0 0xffc45000 0 0x2c>; 152 reg = <0 0xe6055000 0 0x50>;
153 interrupt-parent = <&gic>; 153 interrupt-parent = <&gic>;
154 interrupts = <0 9 0x4>; 154 interrupts = <0 9 0x4>;
155 #gpio-cells = <2>; 155 #gpio-cells = <2>;
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 58347bb874a0..4cf165502b35 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -101,13 +101,51 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initconst = {
101 { "dss_hdmi", "omapdss_hdmi", -1 }, 101 { "dss_hdmi", "omapdss_hdmi", -1 },
102}; 102};
103 103
104static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
105{
106 u32 enable_mask, enable_shift;
107 u32 pipd_mask, pipd_shift;
108 u32 reg;
109
110 if (dsi_id == 0) {
111 enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
112 enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT;
113 pipd_mask = OMAP4_DSI1_PIPD_MASK;
114 pipd_shift = OMAP4_DSI1_PIPD_SHIFT;
115 } else if (dsi_id == 1) {
116 enable_mask = OMAP4_DSI2_LANEENABLE_MASK;
117 enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT;
118 pipd_mask = OMAP4_DSI2_PIPD_MASK;
119 pipd_shift = OMAP4_DSI2_PIPD_SHIFT;
120 } else {
121 return -ENODEV;
122 }
123
124 reg = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
125
126 reg &= ~enable_mask;
127 reg &= ~pipd_mask;
128
129 reg |= (lanes << enable_shift) & enable_mask;
130 reg |= (lanes << pipd_shift) & pipd_mask;
131
132 omap4_ctrl_pad_writel(reg, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
133
134 return 0;
135}
136
104static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask) 137static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask)
105{ 138{
139 if (cpu_is_omap44xx())
140 return omap4_dsi_mux_pads(dsi_id, lane_mask);
141
106 return 0; 142 return 0;
107} 143}
108 144
109static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask) 145static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask)
110{ 146{
147 if (cpu_is_omap44xx())
148 omap4_dsi_mux_pads(dsi_id, 0);
111} 149}
112 150
113static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput) 151static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput)
diff --git a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
index 7eb9a10fc1af..2fddf38192df 100644
--- a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
+++ b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
@@ -8,8 +8,6 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9*/ 9*/
10 10
11#include <linux/clk-provider.h>
12#include <linux/irqchip.h>
13#include <linux/of_platform.h> 11#include <linux/of_platform.h>
14 12
15#include <asm/mach/arch.h> 13#include <asm/mach/arch.h>
@@ -48,15 +46,9 @@ static void __init s3c64xx_dt_map_io(void)
48 panic("SoC is not S3C64xx!"); 46 panic("SoC is not S3C64xx!");
49} 47}
50 48
51static void __init s3c64xx_dt_init_irq(void)
52{
53 of_clk_init(NULL);
54 samsung_wdt_reset_of_init();
55 irqchip_init();
56};
57
58static void __init s3c64xx_dt_init_machine(void) 49static void __init s3c64xx_dt_init_machine(void)
59{ 50{
51 samsung_wdt_reset_of_init();
60 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 52 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
61} 53}
62 54
@@ -79,7 +71,6 @@ DT_MACHINE_START(S3C6400_DT, "Samsung S3C64xx (Flattened Device Tree)")
79 /* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */ 71 /* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */
80 .dt_compat = s3c64xx_dt_compat, 72 .dt_compat = s3c64xx_dt_compat,
81 .map_io = s3c64xx_dt_map_io, 73 .map_io = s3c64xx_dt_map_io,
82 .init_irq = s3c64xx_dt_init_irq,
83 .init_machine = s3c64xx_dt_init_machine, 74 .init_machine = s3c64xx_dt_init_machine,
84 .restart = s3c64xx_dt_restart, 75 .restart = s3c64xx_dt_restart,
85MACHINE_END 76MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index a8d3ce646fb9..e0406fd37390 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -245,7 +245,9 @@ static void __init lager_init(void)
245{ 245{
246 lager_add_standard_devices(); 246 lager_add_standard_devices();
247 247
248 phy_register_fixup_for_id("r8a7790-ether-ff:01", lager_ksz8041_fixup); 248 if (IS_ENABLED(CONFIG_PHYLIB))
249 phy_register_fixup_for_id("r8a7790-ether-ff:01",
250 lager_ksz8041_fixup);
249} 251}
250 252
251static const char * const lager_boards_compat_dt[] __initconst = { 253static const char * const lager_boards_compat_dt[] __initconst = {
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 83e4f959ee47..85501238b425 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -96,7 +96,7 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
96 struct remap_data *info = data; 96 struct remap_data *info = data;
97 struct page *page = info->pages[info->index++]; 97 struct page *page = info->pages[info->index++];
98 unsigned long pfn = page_to_pfn(page); 98 unsigned long pfn = page_to_pfn(page);
99 pte_t pte = pfn_pte(pfn, info->prot); 99 pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
100 100
101 if (map_foreign_page(pfn, info->fgmfn, info->domid)) 101 if (map_foreign_page(pfn, info->fgmfn, info->domid))
102 return -EFAULT; 102 return -EFAULT;
@@ -224,10 +224,10 @@ static int __init xen_guest_init(void)
224 } 224 }
225 if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) 225 if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res))
226 return 0; 226 return 0;
227 xen_hvm_resume_frames = res.start >> PAGE_SHIFT; 227 xen_hvm_resume_frames = res.start;
228 xen_events_irq = irq_of_parse_and_map(node, 0); 228 xen_events_irq = irq_of_parse_and_map(node, 0);
229 pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", 229 pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n",
230 version, xen_events_irq, xen_hvm_resume_frames); 230 version, xen_events_irq, (xen_hvm_resume_frames >> PAGE_SHIFT));
231 xen_domain_type = XEN_HVM_DOMAIN; 231 xen_domain_type = XEN_HVM_DOMAIN;
232 232
233 xen_setup_features(); 233 xen_setup_features();
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
index 2820f1a6eebe..dde3fc9c49f0 100644
--- a/arch/arm64/include/asm/xen/page-coherent.h
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -23,25 +23,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
23 unsigned long offset, size_t size, enum dma_data_direction dir, 23 unsigned long offset, size_t size, enum dma_data_direction dir,
24 struct dma_attrs *attrs) 24 struct dma_attrs *attrs)
25{ 25{
26 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
27} 26}
28 27
29static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 28static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
30 size_t size, enum dma_data_direction dir, 29 size_t size, enum dma_data_direction dir,
31 struct dma_attrs *attrs) 30 struct dma_attrs *attrs)
32{ 31{
33 __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
34} 32}
35 33
36static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, 34static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
37 dma_addr_t handle, size_t size, enum dma_data_direction dir) 35 dma_addr_t handle, size_t size, enum dma_data_direction dir)
38{ 36{
39 __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
40} 37}
41 38
42static inline void xen_dma_sync_single_for_device(struct device *hwdev, 39static inline void xen_dma_sync_single_for_device(struct device *hwdev,
43 dma_addr_t handle, size_t size, enum dma_data_direction dir) 40 dma_addr_t handle, size_t size, enum dma_data_direction dir)
44{ 41{
45 __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
46} 42}
47#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ 43#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 6777a2192b83..6a8928bba03c 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -214,31 +214,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
214{ 214{
215 int err, len, type, disabled = !ctrl.enabled; 215 int err, len, type, disabled = !ctrl.enabled;
216 216
217 if (disabled) { 217 attr->disabled = disabled;
218 len = 0; 218 if (disabled)
219 type = HW_BREAKPOINT_EMPTY; 219 return 0;
220 } else { 220
221 err = arch_bp_generic_fields(ctrl, &len, &type); 221 err = arch_bp_generic_fields(ctrl, &len, &type);
222 if (err) 222 if (err)
223 return err; 223 return err;
224 224
225 switch (note_type) { 225 switch (note_type) {
226 case NT_ARM_HW_BREAK: 226 case NT_ARM_HW_BREAK:
227 if ((type & HW_BREAKPOINT_X) != type) 227 if ((type & HW_BREAKPOINT_X) != type)
228 return -EINVAL;
229 break;
230 case NT_ARM_HW_WATCH:
231 if ((type & HW_BREAKPOINT_RW) != type)
232 return -EINVAL;
233 break;
234 default:
235 return -EINVAL; 228 return -EINVAL;
236 } 229 break;
230 case NT_ARM_HW_WATCH:
231 if ((type & HW_BREAKPOINT_RW) != type)
232 return -EINVAL;
233 break;
234 default:
235 return -EINVAL;
237 } 236 }
238 237
239 attr->bp_len = len; 238 attr->bp_len = len;
240 attr->bp_type = type; 239 attr->bp_type = type;
241 attr->disabled = disabled;
242 240
243 return 0; 241 return 0;
244} 242}
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 4a594b76674d..bc23b1ba7980 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -192,6 +192,10 @@ extern void kvmppc_load_up_vsx(void);
192extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); 192extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
193extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); 193extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
194extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); 194extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
195extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
196 struct kvm_vcpu *vcpu);
197extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
198 struct kvmppc_book3s_shadow_vcpu *svcpu);
195 199
196static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) 200static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
197{ 201{
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 0bd9348a4db9..192917d2239c 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -79,6 +79,7 @@ struct kvmppc_host_state {
79 ulong vmhandler; 79 ulong vmhandler;
80 ulong scratch0; 80 ulong scratch0;
81 ulong scratch1; 81 ulong scratch1;
82 ulong scratch2;
82 u8 in_guest; 83 u8 in_guest;
83 u8 restore_hid5; 84 u8 restore_hid5;
84 u8 napping; 85 u8 napping;
@@ -106,6 +107,7 @@ struct kvmppc_host_state {
106}; 107};
107 108
108struct kvmppc_book3s_shadow_vcpu { 109struct kvmppc_book3s_shadow_vcpu {
110 bool in_use;
109 ulong gpr[14]; 111 ulong gpr[14];
110 u32 cr; 112 u32 cr;
111 u32 xer; 113 u32 xer;
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 033c06be1d84..7bdcf340016c 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -720,13 +720,13 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe,
720int64_t opal_pci_poll(uint64_t phb_id); 720int64_t opal_pci_poll(uint64_t phb_id);
721int64_t opal_return_cpu(void); 721int64_t opal_return_cpu(void);
722 722
723int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, uint64_t *val); 723int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val);
724int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); 724int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val);
725 725
726int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, 726int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
727 uint32_t addr, uint32_t data, uint32_t sz); 727 uint32_t addr, uint32_t data, uint32_t sz);
728int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, 728int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
729 uint32_t addr, uint32_t *data, uint32_t sz); 729 uint32_t addr, __be32 *data, uint32_t sz);
730int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result); 730int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
731int64_t opal_manage_flash(uint8_t op); 731int64_t opal_manage_flash(uint8_t op);
732int64_t opal_update_flash(uint64_t blk_list); 732int64_t opal_update_flash(uint64_t blk_list);
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 9ee12610af02..aace90547614 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -35,7 +35,7 @@ extern void giveup_vsx(struct task_struct *);
35extern void enable_kernel_spe(void); 35extern void enable_kernel_spe(void);
36extern void giveup_spe(struct task_struct *); 36extern void giveup_spe(struct task_struct *);
37extern void load_up_spe(struct task_struct *); 37extern void load_up_spe(struct task_struct *);
38extern void switch_booke_debug_regs(struct thread_struct *new_thread); 38extern void switch_booke_debug_regs(struct debug_reg *new_debug);
39 39
40#ifndef CONFIG_SMP 40#ifndef CONFIG_SMP
41extern void discard_lazy_cpu_state(void); 41extern void discard_lazy_cpu_state(void);
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 2ea5cc033ec8..d3de01066f7d 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -576,6 +576,7 @@ int main(void)
576 HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); 576 HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
577 HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); 577 HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
578 HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); 578 HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
579 HSTATE_FIELD(HSTATE_SCRATCH2, scratch2);
579 HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); 580 HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
580 HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); 581 HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
581 HSTATE_FIELD(HSTATE_NAPPING, napping); 582 HSTATE_FIELD(HSTATE_NAPPING, napping);
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 779a78c26435..11c1d069d920 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -124,15 +124,15 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
124void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) 124void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
125{ 125{
126 unsigned long addr; 126 unsigned long addr;
127 const u32 *basep, *sizep; 127 const __be32 *basep, *sizep;
128 unsigned int rtas_start = 0, rtas_end = 0; 128 unsigned int rtas_start = 0, rtas_end = 0;
129 129
130 basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); 130 basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
131 sizep = of_get_property(rtas.dev, "rtas-size", NULL); 131 sizep = of_get_property(rtas.dev, "rtas-size", NULL);
132 132
133 if (basep && sizep) { 133 if (basep && sizep) {
134 rtas_start = *basep; 134 rtas_start = be32_to_cpup(basep);
135 rtas_end = *basep + *sizep; 135 rtas_end = rtas_start + be32_to_cpup(sizep);
136 } 136 }
137 137
138 for (addr = begin; addr < end; addr += PAGE_SIZE) { 138 for (addr = begin; addr < end; addr += PAGE_SIZE) {
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 3386d8ab7eb0..4a96556fd2d4 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -339,7 +339,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
339#endif 339#endif
340} 340}
341 341
342static void prime_debug_regs(struct thread_struct *thread) 342static void prime_debug_regs(struct debug_reg *debug)
343{ 343{
344 /* 344 /*
345 * We could have inherited MSR_DE from userspace, since 345 * We could have inherited MSR_DE from userspace, since
@@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread)
348 */ 348 */
349 mtmsr(mfmsr() & ~MSR_DE); 349 mtmsr(mfmsr() & ~MSR_DE);
350 350
351 mtspr(SPRN_IAC1, thread->debug.iac1); 351 mtspr(SPRN_IAC1, debug->iac1);
352 mtspr(SPRN_IAC2, thread->debug.iac2); 352 mtspr(SPRN_IAC2, debug->iac2);
353#if CONFIG_PPC_ADV_DEBUG_IACS > 2 353#if CONFIG_PPC_ADV_DEBUG_IACS > 2
354 mtspr(SPRN_IAC3, thread->debug.iac3); 354 mtspr(SPRN_IAC3, debug->iac3);
355 mtspr(SPRN_IAC4, thread->debug.iac4); 355 mtspr(SPRN_IAC4, debug->iac4);
356#endif 356#endif
357 mtspr(SPRN_DAC1, thread->debug.dac1); 357 mtspr(SPRN_DAC1, debug->dac1);
358 mtspr(SPRN_DAC2, thread->debug.dac2); 358 mtspr(SPRN_DAC2, debug->dac2);
359#if CONFIG_PPC_ADV_DEBUG_DVCS > 0 359#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
360 mtspr(SPRN_DVC1, thread->debug.dvc1); 360 mtspr(SPRN_DVC1, debug->dvc1);
361 mtspr(SPRN_DVC2, thread->debug.dvc2); 361 mtspr(SPRN_DVC2, debug->dvc2);
362#endif 362#endif
363 mtspr(SPRN_DBCR0, thread->debug.dbcr0); 363 mtspr(SPRN_DBCR0, debug->dbcr0);
364 mtspr(SPRN_DBCR1, thread->debug.dbcr1); 364 mtspr(SPRN_DBCR1, debug->dbcr1);
365#ifdef CONFIG_BOOKE 365#ifdef CONFIG_BOOKE
366 mtspr(SPRN_DBCR2, thread->debug.dbcr2); 366 mtspr(SPRN_DBCR2, debug->dbcr2);
367#endif 367#endif
368} 368}
369/* 369/*
@@ -371,11 +371,11 @@ static void prime_debug_regs(struct thread_struct *thread)
371 * debug registers, set the debug registers from the values 371 * debug registers, set the debug registers from the values
372 * stored in the new thread. 372 * stored in the new thread.
373 */ 373 */
374void switch_booke_debug_regs(struct thread_struct *new_thread) 374void switch_booke_debug_regs(struct debug_reg *new_debug)
375{ 375{
376 if ((current->thread.debug.dbcr0 & DBCR0_IDM) 376 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
377 || (new_thread->debug.dbcr0 & DBCR0_IDM)) 377 || (new_debug->dbcr0 & DBCR0_IDM))
378 prime_debug_regs(new_thread); 378 prime_debug_regs(new_debug);
379} 379}
380EXPORT_SYMBOL_GPL(switch_booke_debug_regs); 380EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
381#else /* !CONFIG_PPC_ADV_DEBUG_REGS */ 381#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
@@ -683,7 +683,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
683#endif /* CONFIG_SMP */ 683#endif /* CONFIG_SMP */
684 684
685#ifdef CONFIG_PPC_ADV_DEBUG_REGS 685#ifdef CONFIG_PPC_ADV_DEBUG_REGS
686 switch_booke_debug_regs(&new->thread); 686 switch_booke_debug_regs(&new->thread.debug);
687#else 687#else
688/* 688/*
689 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would 689 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 75fb40498b41..2e3d2bf536c5 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1555,7 +1555,7 @@ long arch_ptrace(struct task_struct *child, long request,
1555 1555
1556 flush_fp_to_thread(child); 1556 flush_fp_to_thread(child);
1557 if (fpidx < (PT_FPSCR - PT_FPR0)) 1557 if (fpidx < (PT_FPSCR - PT_FPR0))
1558 memcpy(&tmp, &child->thread.fp_state.fpr, 1558 memcpy(&tmp, &child->thread.TS_FPR(fpidx),
1559 sizeof(long)); 1559 sizeof(long));
1560 else 1560 else
1561 tmp = child->thread.fp_state.fpscr; 1561 tmp = child->thread.fp_state.fpscr;
@@ -1588,7 +1588,7 @@ long arch_ptrace(struct task_struct *child, long request,
1588 1588
1589 flush_fp_to_thread(child); 1589 flush_fp_to_thread(child);
1590 if (fpidx < (PT_FPSCR - PT_FPR0)) 1590 if (fpidx < (PT_FPSCR - PT_FPR0))
1591 memcpy(&child->thread.fp_state.fpr, &data, 1591 memcpy(&child->thread.TS_FPR(fpidx), &data,
1592 sizeof(long)); 1592 sizeof(long));
1593 else 1593 else
1594 child->thread.fp_state.fpscr = data; 1594 child->thread.fp_state.fpscr = data;
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index febc80445d25..bc76cc6b419c 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -479,7 +479,7 @@ void __init smp_setup_cpu_maps(void)
479 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && 479 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) &&
480 (dn = of_find_node_by_path("/rtas"))) { 480 (dn = of_find_node_by_path("/rtas"))) {
481 int num_addr_cell, num_size_cell, maxcpus; 481 int num_addr_cell, num_size_cell, maxcpus;
482 const unsigned int *ireg; 482 const __be32 *ireg;
483 483
484 num_addr_cell = of_n_addr_cells(dn); 484 num_addr_cell = of_n_addr_cells(dn);
485 num_size_cell = of_n_size_cells(dn); 485 num_size_cell = of_n_size_cells(dn);
@@ -489,7 +489,7 @@ void __init smp_setup_cpu_maps(void)
489 if (!ireg) 489 if (!ireg)
490 goto out; 490 goto out;
491 491
492 maxcpus = ireg[num_addr_cell + num_size_cell]; 492 maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell);
493 493
494 /* Double maxcpus for processors which have SMT capability */ 494 /* Double maxcpus for processors which have SMT capability */
495 if (cpu_has_feature(CPU_FTR_SMT)) 495 if (cpu_has_feature(CPU_FTR_SMT))
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index a3b64f3bf9a2..c1cf4a1522d9 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -580,7 +580,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
580int cpu_to_core_id(int cpu) 580int cpu_to_core_id(int cpu)
581{ 581{
582 struct device_node *np; 582 struct device_node *np;
583 const int *reg; 583 const __be32 *reg;
584 int id = -1; 584 int id = -1;
585 585
586 np = of_get_cpu_node(cpu, NULL); 586 np = of_get_cpu_node(cpu, NULL);
@@ -591,7 +591,7 @@ int cpu_to_core_id(int cpu)
591 if (!reg) 591 if (!reg)
592 goto out; 592 goto out;
593 593
594 id = *reg; 594 id = be32_to_cpup(reg);
595out: 595out:
596 of_node_put(np); 596 of_node_put(np);
597 return id; 597 return id;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index f3ff587a8b7d..c5d148434c08 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -469,11 +469,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
469 slb_v = vcpu->kvm->arch.vrma_slb_v; 469 slb_v = vcpu->kvm->arch.vrma_slb_v;
470 } 470 }
471 471
472 preempt_disable();
472 /* Find the HPTE in the hash table */ 473 /* Find the HPTE in the hash table */
473 index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, 474 index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
474 HPTE_V_VALID | HPTE_V_ABSENT); 475 HPTE_V_VALID | HPTE_V_ABSENT);
475 if (index < 0) 476 if (index < 0) {
477 preempt_enable();
476 return -ENOENT; 478 return -ENOENT;
479 }
477 hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); 480 hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
478 v = hptep[0] & ~HPTE_V_HVLOCK; 481 v = hptep[0] & ~HPTE_V_HVLOCK;
479 gr = kvm->arch.revmap[index].guest_rpte; 482 gr = kvm->arch.revmap[index].guest_rpte;
@@ -481,6 +484,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
481 /* Unlock the HPTE */ 484 /* Unlock the HPTE */
482 asm volatile("lwsync" : : : "memory"); 485 asm volatile("lwsync" : : : "memory");
483 hptep[0] = v; 486 hptep[0] = v;
487 preempt_enable();
484 488
485 gpte->eaddr = eaddr; 489 gpte->eaddr = eaddr;
486 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); 490 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
@@ -665,6 +669,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
665 return -EFAULT; 669 return -EFAULT;
666 } else { 670 } else {
667 page = pages[0]; 671 page = pages[0];
672 pfn = page_to_pfn(page);
668 if (PageHuge(page)) { 673 if (PageHuge(page)) {
669 page = compound_head(page); 674 page = compound_head(page);
670 pte_size <<= compound_order(page); 675 pte_size <<= compound_order(page);
@@ -689,7 +694,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
689 } 694 }
690 rcu_read_unlock_sched(); 695 rcu_read_unlock_sched();
691 } 696 }
692 pfn = page_to_pfn(page);
693 } 697 }
694 698
695 ret = -EFAULT; 699 ret = -EFAULT;
@@ -707,8 +711,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
707 r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; 711 r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
708 } 712 }
709 713
710 /* Set the HPTE to point to pfn */ 714 /*
711 r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT); 715 * Set the HPTE to point to pfn.
716 * Since the pfn is at PAGE_SIZE granularity, make sure we
717 * don't mask out lower-order bits if psize < PAGE_SIZE.
718 */
719 if (psize < PAGE_SIZE)
720 psize = PAGE_SIZE;
721 r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1));
712 if (hpte_is_writable(r) && !write_ok) 722 if (hpte_is_writable(r) && !write_ok)
713 r = hpte_make_readonly(r); 723 r = hpte_make_readonly(r);
714 ret = RESUME_GUEST; 724 ret = RESUME_GUEST;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 072287f1c3bc..b51d5db78068 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -131,8 +131,9 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
131static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) 131static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
132{ 132{
133 struct kvmppc_vcore *vc = vcpu->arch.vcore; 133 struct kvmppc_vcore *vc = vcpu->arch.vcore;
134 unsigned long flags;
134 135
135 spin_lock(&vcpu->arch.tbacct_lock); 136 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
136 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && 137 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE &&
137 vc->preempt_tb != TB_NIL) { 138 vc->preempt_tb != TB_NIL) {
138 vc->stolen_tb += mftb() - vc->preempt_tb; 139 vc->stolen_tb += mftb() - vc->preempt_tb;
@@ -143,19 +144,20 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
143 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; 144 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
144 vcpu->arch.busy_preempt = TB_NIL; 145 vcpu->arch.busy_preempt = TB_NIL;
145 } 146 }
146 spin_unlock(&vcpu->arch.tbacct_lock); 147 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
147} 148}
148 149
149static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) 150static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
150{ 151{
151 struct kvmppc_vcore *vc = vcpu->arch.vcore; 152 struct kvmppc_vcore *vc = vcpu->arch.vcore;
153 unsigned long flags;
152 154
153 spin_lock(&vcpu->arch.tbacct_lock); 155 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
154 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) 156 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
155 vc->preempt_tb = mftb(); 157 vc->preempt_tb = mftb();
156 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) 158 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
157 vcpu->arch.busy_preempt = mftb(); 159 vcpu->arch.busy_preempt = mftb();
158 spin_unlock(&vcpu->arch.tbacct_lock); 160 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
159} 161}
160 162
161static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) 163static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
@@ -486,11 +488,11 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
486 */ 488 */
487 if (vc->vcore_state != VCORE_INACTIVE && 489 if (vc->vcore_state != VCORE_INACTIVE &&
488 vc->runner->arch.run_task != current) { 490 vc->runner->arch.run_task != current) {
489 spin_lock(&vc->runner->arch.tbacct_lock); 491 spin_lock_irq(&vc->runner->arch.tbacct_lock);
490 p = vc->stolen_tb; 492 p = vc->stolen_tb;
491 if (vc->preempt_tb != TB_NIL) 493 if (vc->preempt_tb != TB_NIL)
492 p += now - vc->preempt_tb; 494 p += now - vc->preempt_tb;
493 spin_unlock(&vc->runner->arch.tbacct_lock); 495 spin_unlock_irq(&vc->runner->arch.tbacct_lock);
494 } else { 496 } else {
495 p = vc->stolen_tb; 497 p = vc->stolen_tb;
496 } 498 }
@@ -512,10 +514,10 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
512 core_stolen = vcore_stolen_time(vc, now); 514 core_stolen = vcore_stolen_time(vc, now);
513 stolen = core_stolen - vcpu->arch.stolen_logged; 515 stolen = core_stolen - vcpu->arch.stolen_logged;
514 vcpu->arch.stolen_logged = core_stolen; 516 vcpu->arch.stolen_logged = core_stolen;
515 spin_lock(&vcpu->arch.tbacct_lock); 517 spin_lock_irq(&vcpu->arch.tbacct_lock);
516 stolen += vcpu->arch.busy_stolen; 518 stolen += vcpu->arch.busy_stolen;
517 vcpu->arch.busy_stolen = 0; 519 vcpu->arch.busy_stolen = 0;
518 spin_unlock(&vcpu->arch.tbacct_lock); 520 spin_unlock_irq(&vcpu->arch.tbacct_lock);
519 if (!dt || !vpa) 521 if (!dt || !vpa)
520 return; 522 return;
521 memset(dt, 0, sizeof(struct dtl_entry)); 523 memset(dt, 0, sizeof(struct dtl_entry));
@@ -589,7 +591,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
589 if (list_empty(&vcpu->kvm->arch.rtas_tokens)) 591 if (list_empty(&vcpu->kvm->arch.rtas_tokens))
590 return RESUME_HOST; 592 return RESUME_HOST;
591 593
594 idx = srcu_read_lock(&vcpu->kvm->srcu);
592 rc = kvmppc_rtas_hcall(vcpu); 595 rc = kvmppc_rtas_hcall(vcpu);
596 srcu_read_unlock(&vcpu->kvm->srcu, idx);
593 597
594 if (rc == -ENOENT) 598 if (rc == -ENOENT)
595 return RESUME_HOST; 599 return RESUME_HOST;
@@ -1115,13 +1119,13 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
1115 1119
1116 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) 1120 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
1117 return; 1121 return;
1118 spin_lock(&vcpu->arch.tbacct_lock); 1122 spin_lock_irq(&vcpu->arch.tbacct_lock);
1119 now = mftb(); 1123 now = mftb();
1120 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - 1124 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
1121 vcpu->arch.stolen_logged; 1125 vcpu->arch.stolen_logged;
1122 vcpu->arch.busy_preempt = now; 1126 vcpu->arch.busy_preempt = now;
1123 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; 1127 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
1124 spin_unlock(&vcpu->arch.tbacct_lock); 1128 spin_unlock_irq(&vcpu->arch.tbacct_lock);
1125 --vc->n_runnable; 1129 --vc->n_runnable;
1126 list_del(&vcpu->arch.run_list); 1130 list_del(&vcpu->arch.run_list);
1127} 1131}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 9c515440ad1a..8689e2e30857 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -225,6 +225,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
225 is_io = pa & (HPTE_R_I | HPTE_R_W); 225 is_io = pa & (HPTE_R_I | HPTE_R_W);
226 pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); 226 pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
227 pa &= PAGE_MASK; 227 pa &= PAGE_MASK;
228 pa |= gpa & ~PAGE_MASK;
228 } else { 229 } else {
229 /* Translate to host virtual address */ 230 /* Translate to host virtual address */
230 hva = __gfn_to_hva_memslot(memslot, gfn); 231 hva = __gfn_to_hva_memslot(memslot, gfn);
@@ -238,13 +239,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
238 ptel = hpte_make_readonly(ptel); 239 ptel = hpte_make_readonly(ptel);
239 is_io = hpte_cache_bits(pte_val(pte)); 240 is_io = hpte_cache_bits(pte_val(pte));
240 pa = pte_pfn(pte) << PAGE_SHIFT; 241 pa = pte_pfn(pte) << PAGE_SHIFT;
242 pa |= hva & (pte_size - 1);
243 pa |= gpa & ~PAGE_MASK;
241 } 244 }
242 } 245 }
243 246
244 if (pte_size < psize) 247 if (pte_size < psize)
245 return H_PARAMETER; 248 return H_PARAMETER;
246 if (pa && pte_size > psize)
247 pa |= gpa & (pte_size - 1);
248 249
249 ptel &= ~(HPTE_R_PP0 - psize); 250 ptel &= ~(HPTE_R_PP0 - psize);
250 ptel |= pa; 251 ptel |= pa;
@@ -749,6 +750,10 @@ static int slb_base_page_shift[4] = {
749 20, /* 1M, unsupported */ 750 20, /* 1M, unsupported */
750}; 751};
751 752
753/* When called from virtmode, this func should be protected by
754 * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
755 * can trigger deadlock issue.
756 */
752long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, 757long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
753 unsigned long valid) 758 unsigned long valid)
754{ 759{
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index bc8de75b1925..be4fa04a37c9 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -153,7 +153,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
153 153
15413: b machine_check_fwnmi 15413: b machine_check_fwnmi
155 155
156
157/* 156/*
158 * We come in here when wakened from nap mode on a secondary hw thread. 157 * We come in here when wakened from nap mode on a secondary hw thread.
159 * Relocation is off and most register values are lost. 158 * Relocation is off and most register values are lost.
@@ -224,6 +223,11 @@ kvm_start_guest:
224 /* Clear our vcpu pointer so we don't come back in early */ 223 /* Clear our vcpu pointer so we don't come back in early */
225 li r0, 0 224 li r0, 0
226 std r0, HSTATE_KVM_VCPU(r13) 225 std r0, HSTATE_KVM_VCPU(r13)
226 /*
227 * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
228 * the nap_count, because once the increment to nap_count is
229 * visible we could be given another vcpu.
230 */
227 lwsync 231 lwsync
228 /* Clear any pending IPI - we're an offline thread */ 232 /* Clear any pending IPI - we're an offline thread */
229 ld r5, HSTATE_XICS_PHYS(r13) 233 ld r5, HSTATE_XICS_PHYS(r13)
@@ -241,7 +245,6 @@ kvm_start_guest:
241 /* increment the nap count and then go to nap mode */ 245 /* increment the nap count and then go to nap mode */
242 ld r4, HSTATE_KVM_VCORE(r13) 246 ld r4, HSTATE_KVM_VCORE(r13)
243 addi r4, r4, VCORE_NAP_COUNT 247 addi r4, r4, VCORE_NAP_COUNT
244 lwsync /* make previous updates visible */
24551: lwarx r3, 0, r4 24851: lwarx r3, 0, r4
246 addi r3, r3, 1 249 addi r3, r3, 1
247 stwcx. r3, 0, r4 250 stwcx. r3, 0, r4
@@ -751,15 +754,14 @@ kvmppc_interrupt_hv:
751 * guest CR, R12 saved in shadow VCPU SCRATCH1/0 754 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
752 * guest R13 saved in SPRN_SCRATCH0 755 * guest R13 saved in SPRN_SCRATCH0
753 */ 756 */
754 /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ 757 std r9, HSTATE_SCRATCH2(r13)
755 std r9, HSTATE_HOST_R2(r13)
756 758
757 lbz r9, HSTATE_IN_GUEST(r13) 759 lbz r9, HSTATE_IN_GUEST(r13)
758 cmpwi r9, KVM_GUEST_MODE_HOST_HV 760 cmpwi r9, KVM_GUEST_MODE_HOST_HV
759 beq kvmppc_bad_host_intr 761 beq kvmppc_bad_host_intr
760#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 762#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
761 cmpwi r9, KVM_GUEST_MODE_GUEST 763 cmpwi r9, KVM_GUEST_MODE_GUEST
762 ld r9, HSTATE_HOST_R2(r13) 764 ld r9, HSTATE_SCRATCH2(r13)
763 beq kvmppc_interrupt_pr 765 beq kvmppc_interrupt_pr
764#endif 766#endif
765 /* We're now back in the host but in guest MMU context */ 767 /* We're now back in the host but in guest MMU context */
@@ -779,7 +781,7 @@ kvmppc_interrupt_hv:
779 std r6, VCPU_GPR(R6)(r9) 781 std r6, VCPU_GPR(R6)(r9)
780 std r7, VCPU_GPR(R7)(r9) 782 std r7, VCPU_GPR(R7)(r9)
781 std r8, VCPU_GPR(R8)(r9) 783 std r8, VCPU_GPR(R8)(r9)
782 ld r0, HSTATE_HOST_R2(r13) 784 ld r0, HSTATE_SCRATCH2(r13)
783 std r0, VCPU_GPR(R9)(r9) 785 std r0, VCPU_GPR(R9)(r9)
784 std r10, VCPU_GPR(R10)(r9) 786 std r10, VCPU_GPR(R10)(r9)
785 std r11, VCPU_GPR(R11)(r9) 787 std r11, VCPU_GPR(R11)(r9)
@@ -990,14 +992,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
990 */ 992 */
991 /* Increment the threads-exiting-guest count in the 0xff00 993 /* Increment the threads-exiting-guest count in the 0xff00
992 bits of vcore->entry_exit_count */ 994 bits of vcore->entry_exit_count */
993 lwsync
994 ld r5,HSTATE_KVM_VCORE(r13) 995 ld r5,HSTATE_KVM_VCORE(r13)
995 addi r6,r5,VCORE_ENTRY_EXIT 996 addi r6,r5,VCORE_ENTRY_EXIT
99641: lwarx r3,0,r6 99741: lwarx r3,0,r6
997 addi r0,r3,0x100 998 addi r0,r3,0x100
998 stwcx. r0,0,r6 999 stwcx. r0,0,r6
999 bne 41b 1000 bne 41b
1000 lwsync 1001 isync /* order stwcx. vs. reading napping_threads */
1001 1002
1002 /* 1003 /*
1003 * At this point we have an interrupt that we have to pass 1004 * At this point we have an interrupt that we have to pass
@@ -1030,6 +1031,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1030 sld r0,r0,r4 1031 sld r0,r0,r4
1031 andc. r3,r3,r0 /* no sense IPI'ing ourselves */ 1032 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
1032 beq 43f 1033 beq 43f
1034 /* Order entry/exit update vs. IPIs */
1035 sync
1033 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ 1036 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
1034 subf r6,r4,r13 1037 subf r6,r4,r13
103542: andi. r0,r3,1 103842: andi. r0,r3,1
@@ -1638,10 +1641,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1638 bge kvm_cede_exit 1641 bge kvm_cede_exit
1639 stwcx. r4,0,r6 1642 stwcx. r4,0,r6
1640 bne 31b 1643 bne 31b
1644 /* order napping_threads update vs testing entry_exit_count */
1645 isync
1641 li r0,1 1646 li r0,1
1642 stb r0,HSTATE_NAPPING(r13) 1647 stb r0,HSTATE_NAPPING(r13)
1643 /* order napping_threads update vs testing entry_exit_count */
1644 lwsync
1645 mr r4,r3 1648 mr r4,r3
1646 lwz r7,VCORE_ENTRY_EXIT(r5) 1649 lwz r7,VCORE_ENTRY_EXIT(r5)
1647 cmpwi r7,0x100 1650 cmpwi r7,0x100
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index f4dd041c14ea..f779450cb07c 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -129,29 +129,32 @@ kvm_start_lightweight:
129 * R12 = exit handler id 129 * R12 = exit handler id
130 * R13 = PACA 130 * R13 = PACA
131 * SVCPU.* = guest * 131 * SVCPU.* = guest *
132 * MSR.EE = 1
132 * 133 *
133 */ 134 */
134 135
136 PPC_LL r3, GPR4(r1) /* vcpu pointer */
137
138 /*
139 * kvmppc_copy_from_svcpu can clobber volatile registers, save
140 * the exit handler id to the vcpu and restore it from there later.
141 */
142 stw r12, VCPU_TRAP(r3)
143
135 /* Transfer reg values from shadow vcpu back to vcpu struct */ 144 /* Transfer reg values from shadow vcpu back to vcpu struct */
136 /* On 64-bit, interrupts are still off at this point */ 145 /* On 64-bit, interrupts are still off at this point */
137 PPC_LL r3, GPR4(r1) /* vcpu pointer */ 146
138 GET_SHADOW_VCPU(r4) 147 GET_SHADOW_VCPU(r4)
139 bl FUNC(kvmppc_copy_from_svcpu) 148 bl FUNC(kvmppc_copy_from_svcpu)
140 nop 149 nop
141 150
142#ifdef CONFIG_PPC_BOOK3S_64 151#ifdef CONFIG_PPC_BOOK3S_64
143 /* Re-enable interrupts */
144 ld r3, HSTATE_HOST_MSR(r13)
145 ori r3, r3, MSR_EE
146 MTMSR_EERI(r3)
147
148 /* 152 /*
149 * Reload kernel SPRG3 value. 153 * Reload kernel SPRG3 value.
150 * No need to save guest value as usermode can't modify SPRG3. 154 * No need to save guest value as usermode can't modify SPRG3.
151 */ 155 */
152 ld r3, PACA_SPRG3(r13) 156 ld r3, PACA_SPRG3(r13)
153 mtspr SPRN_SPRG3, r3 157 mtspr SPRN_SPRG3, r3
154
155#endif /* CONFIG_PPC_BOOK3S_64 */ 158#endif /* CONFIG_PPC_BOOK3S_64 */
156 159
157 /* R7 = vcpu */ 160 /* R7 = vcpu */
@@ -177,7 +180,7 @@ kvm_start_lightweight:
177 PPC_STL r31, VCPU_GPR(R31)(r7) 180 PPC_STL r31, VCPU_GPR(R31)(r7)
178 181
179 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ 182 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
180 mr r5, r12 183 lwz r5, VCPU_TRAP(r7)
181 184
182 /* Restore r3 (kvm_run) and r4 (vcpu) */ 185 /* Restore r3 (kvm_run) and r4 (vcpu) */
183 REST_2GPRS(3, r1) 186 REST_2GPRS(3, r1)
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index fe14ca3dd171..5b9e9063cfaf 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -66,6 +66,7 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
66 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 66 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
67 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); 67 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
68 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; 68 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
69 svcpu->in_use = 0;
69 svcpu_put(svcpu); 70 svcpu_put(svcpu);
70#endif 71#endif
71 vcpu->cpu = smp_processor_id(); 72 vcpu->cpu = smp_processor_id();
@@ -78,6 +79,9 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
78{ 79{
79#ifdef CONFIG_PPC_BOOK3S_64 80#ifdef CONFIG_PPC_BOOK3S_64
80 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 81 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
82 if (svcpu->in_use) {
83 kvmppc_copy_from_svcpu(vcpu, svcpu);
84 }
81 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); 85 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
82 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; 86 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
83 svcpu_put(svcpu); 87 svcpu_put(svcpu);
@@ -110,12 +114,26 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
110 svcpu->ctr = vcpu->arch.ctr; 114 svcpu->ctr = vcpu->arch.ctr;
111 svcpu->lr = vcpu->arch.lr; 115 svcpu->lr = vcpu->arch.lr;
112 svcpu->pc = vcpu->arch.pc; 116 svcpu->pc = vcpu->arch.pc;
117 svcpu->in_use = true;
113} 118}
114 119
115/* Copy data touched by real-mode code from shadow vcpu back to vcpu */ 120/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
116void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, 121void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
117 struct kvmppc_book3s_shadow_vcpu *svcpu) 122 struct kvmppc_book3s_shadow_vcpu *svcpu)
118{ 123{
124 /*
125 * vcpu_put would just call us again because in_use hasn't
126 * been updated yet.
127 */
128 preempt_disable();
129
130 /*
131 * Maybe we were already preempted and synced the svcpu from
132 * our preempt notifiers. Don't bother touching this svcpu then.
133 */
134 if (!svcpu->in_use)
135 goto out;
136
119 vcpu->arch.gpr[0] = svcpu->gpr[0]; 137 vcpu->arch.gpr[0] = svcpu->gpr[0];
120 vcpu->arch.gpr[1] = svcpu->gpr[1]; 138 vcpu->arch.gpr[1] = svcpu->gpr[1];
121 vcpu->arch.gpr[2] = svcpu->gpr[2]; 139 vcpu->arch.gpr[2] = svcpu->gpr[2];
@@ -139,6 +157,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
139 vcpu->arch.fault_dar = svcpu->fault_dar; 157 vcpu->arch.fault_dar = svcpu->fault_dar;
140 vcpu->arch.fault_dsisr = svcpu->fault_dsisr; 158 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
141 vcpu->arch.last_inst = svcpu->last_inst; 159 vcpu->arch.last_inst = svcpu->last_inst;
160 svcpu->in_use = false;
161
162out:
163 preempt_enable();
142} 164}
143 165
144static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) 166static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index a38c4c9edab8..c3c5231adade 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline)
153 153
154 li r6, MSR_IR | MSR_DR 154 li r6, MSR_IR | MSR_DR
155 andc r6, r5, r6 /* Clear DR and IR in MSR value */ 155 andc r6, r5, r6 /* Clear DR and IR in MSR value */
156#ifdef CONFIG_PPC_BOOK3S_32
157 /* 156 /*
158 * Set EE in HOST_MSR so that it's enabled when we get into our 157 * Set EE in HOST_MSR so that it's enabled when we get into our
159 * C exit handler function. On 64-bit we delay enabling 158 * C exit handler function.
160 * interrupts until we have finished transferring stuff
161 * to or from the PACA.
162 */ 159 */
163 ori r5, r5, MSR_EE 160 ori r5, r5, MSR_EE
164#endif
165 mtsrr0 r7 161 mtsrr0 r7
166 mtsrr1 r6 162 mtsrr1 r6
167 RFI 163 RFI
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 53e65a210b9a..0591e05db74b 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -681,7 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
681int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 681int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
682{ 682{
683 int ret, s; 683 int ret, s;
684 struct thread_struct thread; 684 struct debug_reg debug;
685#ifdef CONFIG_PPC_FPU 685#ifdef CONFIG_PPC_FPU
686 struct thread_fp_state fp; 686 struct thread_fp_state fp;
687 int fpexc_mode; 687 int fpexc_mode;
@@ -723,9 +723,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
723#endif 723#endif
724 724
725 /* Switch to guest debug context */ 725 /* Switch to guest debug context */
726 thread.debug = vcpu->arch.shadow_dbg_reg; 726 debug = vcpu->arch.shadow_dbg_reg;
727 switch_booke_debug_regs(&thread); 727 switch_booke_debug_regs(&debug);
728 thread.debug = current->thread.debug; 728 debug = current->thread.debug;
729 current->thread.debug = vcpu->arch.shadow_dbg_reg; 729 current->thread.debug = vcpu->arch.shadow_dbg_reg;
730 730
731 kvmppc_fix_ee_before_entry(); 731 kvmppc_fix_ee_before_entry();
@@ -736,8 +736,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
736 We also get here with interrupts enabled. */ 736 We also get here with interrupts enabled. */
737 737
738 /* Switch back to user space debug context */ 738 /* Switch back to user space debug context */
739 switch_booke_debug_regs(&thread); 739 switch_booke_debug_regs(&debug);
740 current->thread.debug = thread.debug; 740 current->thread.debug = debug;
741 741
742#ifdef CONFIG_PPC_FPU 742#ifdef CONFIG_PPC_FPU
743 kvmppc_save_guest_fp(vcpu); 743 kvmppc_save_guest_fp(vcpu);
diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c
index e7e59e4f9892..79d83cad3d67 100644
--- a/arch/powerpc/platforms/powernv/opal-lpc.c
+++ b/arch/powerpc/platforms/powernv/opal-lpc.c
@@ -24,25 +24,25 @@ static int opal_lpc_chip_id = -1;
24static u8 opal_lpc_inb(unsigned long port) 24static u8 opal_lpc_inb(unsigned long port)
25{ 25{
26 int64_t rc; 26 int64_t rc;
27 uint32_t data; 27 __be32 data;
28 28
29 if (opal_lpc_chip_id < 0 || port > 0xffff) 29 if (opal_lpc_chip_id < 0 || port > 0xffff)
30 return 0xff; 30 return 0xff;
31 rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1); 31 rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1);
32 return rc ? 0xff : data; 32 return rc ? 0xff : be32_to_cpu(data);
33} 33}
34 34
35static __le16 __opal_lpc_inw(unsigned long port) 35static __le16 __opal_lpc_inw(unsigned long port)
36{ 36{
37 int64_t rc; 37 int64_t rc;
38 uint32_t data; 38 __be32 data;
39 39
40 if (opal_lpc_chip_id < 0 || port > 0xfffe) 40 if (opal_lpc_chip_id < 0 || port > 0xfffe)
41 return 0xffff; 41 return 0xffff;
42 if (port & 1) 42 if (port & 1)
43 return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1); 43 return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1);
44 rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2); 44 rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2);
45 return rc ? 0xffff : data; 45 return rc ? 0xffff : be32_to_cpu(data);
46} 46}
47static u16 opal_lpc_inw(unsigned long port) 47static u16 opal_lpc_inw(unsigned long port)
48{ 48{
@@ -52,7 +52,7 @@ static u16 opal_lpc_inw(unsigned long port)
52static __le32 __opal_lpc_inl(unsigned long port) 52static __le32 __opal_lpc_inl(unsigned long port)
53{ 53{
54 int64_t rc; 54 int64_t rc;
55 uint32_t data; 55 __be32 data;
56 56
57 if (opal_lpc_chip_id < 0 || port > 0xfffc) 57 if (opal_lpc_chip_id < 0 || port > 0xfffc)
58 return 0xffffffff; 58 return 0xffffffff;
@@ -62,7 +62,7 @@ static __le32 __opal_lpc_inl(unsigned long port)
62 (__le32)opal_lpc_inb(port + 2) << 8 | 62 (__le32)opal_lpc_inb(port + 2) << 8 |
63 opal_lpc_inb(port + 3); 63 opal_lpc_inb(port + 3);
64 rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4); 64 rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4);
65 return rc ? 0xffffffff : data; 65 return rc ? 0xffffffff : be32_to_cpu(data);
66} 66}
67 67
68static u32 opal_lpc_inl(unsigned long port) 68static u32 opal_lpc_inl(unsigned long port)
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c
index 4d99a8fd55ac..4fbf276ac99e 100644
--- a/arch/powerpc/platforms/powernv/opal-xscom.c
+++ b/arch/powerpc/platforms/powernv/opal-xscom.c
@@ -96,9 +96,11 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value)
96{ 96{
97 struct opal_scom_map *m = map; 97 struct opal_scom_map *m = map;
98 int64_t rc; 98 int64_t rc;
99 __be64 v;
99 100
100 reg = opal_scom_unmangle(reg); 101 reg = opal_scom_unmangle(reg);
101 rc = opal_xscom_read(m->chip, m->addr + reg, (uint64_t *)__pa(value)); 102 rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v));
103 *value = be64_to_cpu(v);
102 return opal_xscom_err_xlate(rc); 104 return opal_xscom_err_xlate(rc);
103} 105}
104 106
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index e738007eae64..c9fecf09b8fa 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -157,7 +157,7 @@ static void parse_ppp_data(struct seq_file *m)
157{ 157{
158 struct hvcall_ppp_data ppp_data; 158 struct hvcall_ppp_data ppp_data;
159 struct device_node *root; 159 struct device_node *root;
160 const int *perf_level; 160 const __be32 *perf_level;
161 int rc; 161 int rc;
162 162
163 rc = h_get_ppp(&ppp_data); 163 rc = h_get_ppp(&ppp_data);
@@ -201,7 +201,7 @@ static void parse_ppp_data(struct seq_file *m)
201 perf_level = of_get_property(root, 201 perf_level = of_get_property(root,
202 "ibm,partition-performance-parameters-level", 202 "ibm,partition-performance-parameters-level",
203 NULL); 203 NULL);
204 if (perf_level && (*perf_level >= 1)) { 204 if (perf_level && (be32_to_cpup(perf_level) >= 1)) {
205 seq_printf(m, 205 seq_printf(m,
206 "physical_procs_allocated_to_virtualization=%d\n", 206 "physical_procs_allocated_to_virtualization=%d\n",
207 ppp_data.phys_platform_procs); 207 ppp_data.phys_platform_procs);
@@ -435,7 +435,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
435 int partition_potential_processors; 435 int partition_potential_processors;
436 int partition_active_processors; 436 int partition_active_processors;
437 struct device_node *rtas_node; 437 struct device_node *rtas_node;
438 const int *lrdrp = NULL; 438 const __be32 *lrdrp = NULL;
439 439
440 rtas_node = of_find_node_by_path("/rtas"); 440 rtas_node = of_find_node_by_path("/rtas");
441 if (rtas_node) 441 if (rtas_node)
@@ -444,7 +444,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
444 if (lrdrp == NULL) { 444 if (lrdrp == NULL) {
445 partition_potential_processors = vdso_data->processorCount; 445 partition_potential_processors = vdso_data->processorCount;
446 } else { 446 } else {
447 partition_potential_processors = *(lrdrp + 4); 447 partition_potential_processors = be32_to_cpup(lrdrp + 4);
448 } 448 }
449 of_node_put(rtas_node); 449 of_node_put(rtas_node);
450 450
@@ -654,7 +654,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
654 const char *model = ""; 654 const char *model = "";
655 const char *system_id = ""; 655 const char *system_id = "";
656 const char *tmp; 656 const char *tmp;
657 const unsigned int *lp_index_ptr; 657 const __be32 *lp_index_ptr;
658 unsigned int lp_index = 0; 658 unsigned int lp_index = 0;
659 659
660 seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS); 660 seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS);
@@ -670,7 +670,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
670 lp_index_ptr = of_get_property(rootdn, "ibm,partition-no", 670 lp_index_ptr = of_get_property(rootdn, "ibm,partition-no",
671 NULL); 671 NULL);
672 if (lp_index_ptr) 672 if (lp_index_ptr)
673 lp_index = *lp_index_ptr; 673 lp_index = be32_to_cpup(lp_index_ptr);
674 of_node_put(rootdn); 674 of_node_put(rootdn);
675 } 675 }
676 seq_printf(m, "serial_number=%s\n", system_id); 676 seq_printf(m, "serial_number=%s\n", system_id);
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 6d2f0abce6fa..0c882e83c4ce 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -130,7 +130,8 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
130{ 130{
131 struct device_node *dn; 131 struct device_node *dn;
132 struct pci_dn *pdn; 132 struct pci_dn *pdn;
133 const u32 *req_msi; 133 const __be32 *p;
134 u32 req_msi;
134 135
135 pdn = pci_get_pdn(pdev); 136 pdn = pci_get_pdn(pdev);
136 if (!pdn) 137 if (!pdn)
@@ -138,19 +139,20 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
138 139
139 dn = pdn->node; 140 dn = pdn->node;
140 141
141 req_msi = of_get_property(dn, prop_name, NULL); 142 p = of_get_property(dn, prop_name, NULL);
142 if (!req_msi) { 143 if (!p) {
143 pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name); 144 pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name);
144 return -ENOENT; 145 return -ENOENT;
145 } 146 }
146 147
147 if (*req_msi < nvec) { 148 req_msi = be32_to_cpup(p);
149 if (req_msi < nvec) {
148 pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec); 150 pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec);
149 151
150 if (*req_msi == 0) /* Be paranoid */ 152 if (req_msi == 0) /* Be paranoid */
151 return -ENOSPC; 153 return -ENOSPC;
152 154
153 return *req_msi; 155 return req_msi;
154 } 156 }
155 157
156 return 0; 158 return 0;
@@ -171,7 +173,7 @@ static int check_req_msix(struct pci_dev *pdev, int nvec)
171static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) 173static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
172{ 174{
173 struct device_node *dn; 175 struct device_node *dn;
174 const u32 *p; 176 const __be32 *p;
175 177
176 dn = of_node_get(pci_device_to_OF_node(dev)); 178 dn = of_node_get(pci_device_to_OF_node(dev));
177 while (dn) { 179 while (dn) {
@@ -179,7 +181,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
179 if (p) { 181 if (p) {
180 pr_debug("rtas_msi: found prop on dn %s\n", 182 pr_debug("rtas_msi: found prop on dn %s\n",
181 dn->full_name); 183 dn->full_name);
182 *total = *p; 184 *total = be32_to_cpup(p);
183 return dn; 185 return dn;
184 } 186 }
185 187
@@ -232,13 +234,13 @@ struct msi_counts {
232static void *count_non_bridge_devices(struct device_node *dn, void *data) 234static void *count_non_bridge_devices(struct device_node *dn, void *data)
233{ 235{
234 struct msi_counts *counts = data; 236 struct msi_counts *counts = data;
235 const u32 *p; 237 const __be32 *p;
236 u32 class; 238 u32 class;
237 239
238 pr_debug("rtas_msi: counting %s\n", dn->full_name); 240 pr_debug("rtas_msi: counting %s\n", dn->full_name);
239 241
240 p = of_get_property(dn, "class-code", NULL); 242 p = of_get_property(dn, "class-code", NULL);
241 class = p ? *p : 0; 243 class = p ? be32_to_cpup(p) : 0;
242 244
243 if ((class >> 8) != PCI_CLASS_BRIDGE_PCI) 245 if ((class >> 8) != PCI_CLASS_BRIDGE_PCI)
244 counts->num_devices++; 246 counts->num_devices++;
@@ -249,7 +251,7 @@ static void *count_non_bridge_devices(struct device_node *dn, void *data)
249static void *count_spare_msis(struct device_node *dn, void *data) 251static void *count_spare_msis(struct device_node *dn, void *data)
250{ 252{
251 struct msi_counts *counts = data; 253 struct msi_counts *counts = data;
252 const u32 *p; 254 const __be32 *p;
253 int req; 255 int req;
254 256
255 if (dn == counts->requestor) 257 if (dn == counts->requestor)
@@ -260,11 +262,11 @@ static void *count_spare_msis(struct device_node *dn, void *data)
260 req = 0; 262 req = 0;
261 p = of_get_property(dn, "ibm,req#msi", NULL); 263 p = of_get_property(dn, "ibm,req#msi", NULL);
262 if (p) 264 if (p)
263 req = *p; 265 req = be32_to_cpup(p);
264 266
265 p = of_get_property(dn, "ibm,req#msi-x", NULL); 267 p = of_get_property(dn, "ibm,req#msi-x", NULL);
266 if (p) 268 if (p)
267 req = max(req, (int)*p); 269 req = max(req, (int)be32_to_cpup(p));
268 } 270 }
269 271
270 if (req < counts->quota) 272 if (req < counts->quota)
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 7bfaf58d4664..d7096f2f7751 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -43,8 +43,8 @@ static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */
43static DEFINE_SPINLOCK(nvram_lock); 43static DEFINE_SPINLOCK(nvram_lock);
44 44
45struct err_log_info { 45struct err_log_info {
46 int error_type; 46 __be32 error_type;
47 unsigned int seq_num; 47 __be32 seq_num;
48}; 48};
49 49
50struct nvram_os_partition { 50struct nvram_os_partition {
@@ -79,9 +79,9 @@ static const char *pseries_nvram_os_partitions[] = {
79}; 79};
80 80
81struct oops_log_info { 81struct oops_log_info {
82 u16 version; 82 __be16 version;
83 u16 report_length; 83 __be16 report_length;
84 u64 timestamp; 84 __be64 timestamp;
85} __attribute__((packed)); 85} __attribute__((packed));
86 86
87static void oops_to_nvram(struct kmsg_dumper *dumper, 87static void oops_to_nvram(struct kmsg_dumper *dumper,
@@ -291,8 +291,8 @@ int nvram_write_os_partition(struct nvram_os_partition *part, char * buff,
291 length = part->size; 291 length = part->size;
292 } 292 }
293 293
294 info.error_type = err_type; 294 info.error_type = cpu_to_be32(err_type);
295 info.seq_num = error_log_cnt; 295 info.seq_num = cpu_to_be32(error_log_cnt);
296 296
297 tmp_index = part->index; 297 tmp_index = part->index;
298 298
@@ -364,8 +364,8 @@ int nvram_read_partition(struct nvram_os_partition *part, char *buff,
364 } 364 }
365 365
366 if (part->os_partition) { 366 if (part->os_partition) {
367 *error_log_cnt = info.seq_num; 367 *error_log_cnt = be32_to_cpu(info.seq_num);
368 *err_type = info.error_type; 368 *err_type = be32_to_cpu(info.error_type);
369 } 369 }
370 370
371 return 0; 371 return 0;
@@ -529,9 +529,9 @@ static int zip_oops(size_t text_len)
529 pr_err("nvram: logging uncompressed oops/panic report\n"); 529 pr_err("nvram: logging uncompressed oops/panic report\n");
530 return -1; 530 return -1;
531 } 531 }
532 oops_hdr->version = OOPS_HDR_VERSION; 532 oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
533 oops_hdr->report_length = (u16) zipped_len; 533 oops_hdr->report_length = cpu_to_be16(zipped_len);
534 oops_hdr->timestamp = get_seconds(); 534 oops_hdr->timestamp = cpu_to_be64(get_seconds());
535 return 0; 535 return 0;
536} 536}
537 537
@@ -574,9 +574,9 @@ static int nvram_pstore_write(enum pstore_type_id type,
574 clobbering_unread_rtas_event()) 574 clobbering_unread_rtas_event())
575 return -1; 575 return -1;
576 576
577 oops_hdr->version = OOPS_HDR_VERSION; 577 oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
578 oops_hdr->report_length = (u16) size; 578 oops_hdr->report_length = cpu_to_be16(size);
579 oops_hdr->timestamp = get_seconds(); 579 oops_hdr->timestamp = cpu_to_be64(get_seconds());
580 580
581 if (compressed) 581 if (compressed)
582 err_type = ERR_TYPE_KERNEL_PANIC_GZ; 582 err_type = ERR_TYPE_KERNEL_PANIC_GZ;
@@ -670,16 +670,16 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
670 size_t length, hdr_size; 670 size_t length, hdr_size;
671 671
672 oops_hdr = (struct oops_log_info *)buff; 672 oops_hdr = (struct oops_log_info *)buff;
673 if (oops_hdr->version < OOPS_HDR_VERSION) { 673 if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) {
674 /* Old format oops header had 2-byte record size */ 674 /* Old format oops header had 2-byte record size */
675 hdr_size = sizeof(u16); 675 hdr_size = sizeof(u16);
676 length = oops_hdr->version; 676 length = be16_to_cpu(oops_hdr->version);
677 time->tv_sec = 0; 677 time->tv_sec = 0;
678 time->tv_nsec = 0; 678 time->tv_nsec = 0;
679 } else { 679 } else {
680 hdr_size = sizeof(*oops_hdr); 680 hdr_size = sizeof(*oops_hdr);
681 length = oops_hdr->report_length; 681 length = be16_to_cpu(oops_hdr->report_length);
682 time->tv_sec = oops_hdr->timestamp; 682 time->tv_sec = be64_to_cpu(oops_hdr->timestamp);
683 time->tv_nsec = 0; 683 time->tv_nsec = 0;
684 } 684 }
685 *buf = kmalloc(length, GFP_KERNEL); 685 *buf = kmalloc(length, GFP_KERNEL);
@@ -889,13 +889,13 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
889 kmsg_dump_get_buffer(dumper, false, 889 kmsg_dump_get_buffer(dumper, false,
890 oops_data, oops_data_sz, &text_len); 890 oops_data, oops_data_sz, &text_len);
891 err_type = ERR_TYPE_KERNEL_PANIC; 891 err_type = ERR_TYPE_KERNEL_PANIC;
892 oops_hdr->version = OOPS_HDR_VERSION; 892 oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
893 oops_hdr->report_length = (u16) text_len; 893 oops_hdr->report_length = cpu_to_be16(text_len);
894 oops_hdr->timestamp = get_seconds(); 894 oops_hdr->timestamp = cpu_to_be64(get_seconds());
895 } 895 }
896 896
897 (void) nvram_write_os_partition(&oops_log_partition, oops_buf, 897 (void) nvram_write_os_partition(&oops_log_partition, oops_buf,
898 (int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type, 898 (int) (sizeof(*oops_hdr) + text_len), err_type,
899 ++oops_count); 899 ++oops_count);
900 900
901 spin_unlock_irqrestore(&lock, flags); 901 spin_unlock_irqrestore(&lock, flags);
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 5f93856cdf47..70670a2d9cf2 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -113,7 +113,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
113{ 113{
114 struct device_node *dn, *pdn; 114 struct device_node *dn, *pdn;
115 struct pci_bus *bus; 115 struct pci_bus *bus;
116 const uint32_t *pcie_link_speed_stats; 116 const __be32 *pcie_link_speed_stats;
117 117
118 bus = bridge->bus; 118 bus = bridge->bus;
119 119
@@ -122,7 +122,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
122 return 0; 122 return 0;
123 123
124 for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { 124 for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) {
125 pcie_link_speed_stats = (const uint32_t *) of_get_property(pdn, 125 pcie_link_speed_stats = of_get_property(pdn,
126 "ibm,pcie-link-speed-stats", NULL); 126 "ibm,pcie-link-speed-stats", NULL);
127 if (pcie_link_speed_stats) 127 if (pcie_link_speed_stats)
128 break; 128 break;
@@ -135,7 +135,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
135 return 0; 135 return 0;
136 } 136 }
137 137
138 switch (pcie_link_speed_stats[0]) { 138 switch (be32_to_cpup(pcie_link_speed_stats)) {
139 case 0x01: 139 case 0x01:
140 bus->max_bus_speed = PCIE_SPEED_2_5GT; 140 bus->max_bus_speed = PCIE_SPEED_2_5GT;
141 break; 141 break;
@@ -147,7 +147,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
147 break; 147 break;
148 } 148 }
149 149
150 switch (pcie_link_speed_stats[1]) { 150 switch (be32_to_cpup(pcie_link_speed_stats)) {
151 case 0x01: 151 case 0x01:
152 bus->cur_bus_speed = PCIE_SPEED_2_5GT; 152 bus->cur_bus_speed = PCIE_SPEED_2_5GT;
153 break; 153 break;
diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile
index 7b95f29e3174..3baff31e58cf 100644
--- a/arch/sh/lib/Makefile
+++ b/arch/sh/lib/Makefile
@@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \
6 checksum.o strlen.o div64.o div64-generic.o 6 checksum.o strlen.o div64.o div64-generic.o
7 7
8# Extracted from libgcc 8# Extracted from libgcc
9lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ 9obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
10 ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \ 10 ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \
11 udiv_qrnnd.o 11 udiv_qrnnd.o
12 12
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 8358dc144959..0f9e94537eee 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -619,7 +619,7 @@ static inline unsigned long pte_present(pte_t pte)
619} 619}
620 620
621#define pte_accessible pte_accessible 621#define pte_accessible pte_accessible
622static inline unsigned long pte_accessible(pte_t a) 622static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
623{ 623{
624 return pte_val(a) & _PAGE_VALID; 624 return pte_val(a) & _PAGE_VALID;
625} 625}
@@ -847,7 +847,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
847 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U 847 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
848 * and SUN4V pte layout, so this inline test is fine. 848 * and SUN4V pte layout, so this inline test is fine.
849 */ 849 */
850 if (likely(mm != &init_mm) && pte_accessible(orig)) 850 if (likely(mm != &init_mm) && pte_accessible(mm, orig))
851 tlb_batch_add(mm, addr, ptep, orig, fullmm); 851 tlb_batch_add(mm, addr, ptep, orig, fullmm);
852} 852}
853 853
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e903c71f7e69..0952ecd60eca 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -26,6 +26,7 @@ config X86
26 select HAVE_AOUT if X86_32 26 select HAVE_AOUT if X86_32
27 select HAVE_UNSTABLE_SCHED_CLOCK 27 select HAVE_UNSTABLE_SCHED_CLOCK
28 select ARCH_SUPPORTS_NUMA_BALANCING 28 select ARCH_SUPPORTS_NUMA_BALANCING
29 select ARCH_SUPPORTS_INT128 if X86_64
29 select ARCH_WANTS_PROT_NUMA_PROT_NONE 30 select ARCH_WANTS_PROT_NUMA_PROT_NONE
30 select HAVE_IDE 31 select HAVE_IDE
31 select HAVE_OPROFILE 32 select HAVE_OPROFILE
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 3d1999458709..bbc8b12fa443 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -452,9 +452,16 @@ static inline int pte_present(pte_t a)
452} 452}
453 453
454#define pte_accessible pte_accessible 454#define pte_accessible pte_accessible
455static inline int pte_accessible(pte_t a) 455static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
456{ 456{
457 return pte_flags(a) & _PAGE_PRESENT; 457 if (pte_flags(a) & _PAGE_PRESENT)
458 return true;
459
460 if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
461 mm_tlb_flush_pending(mm))
462 return true;
463
464 return false;
458} 465}
459 466
460static inline int pte_hidden(pte_t pte) 467static inline int pte_hidden(pte_t pte)
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 8729723636fd..c8b051933b1b 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -8,6 +8,12 @@
8DECLARE_PER_CPU(int, __preempt_count); 8DECLARE_PER_CPU(int, __preempt_count);
9 9
10/* 10/*
11 * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
12 * that a decrement hitting 0 means we can and should reschedule.
13 */
14#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
15
16/*
11 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users 17 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
12 * that think a non-zero value indicates we cannot preempt. 18 * that think a non-zero value indicates we cannot preempt.
13 */ 19 */
@@ -74,6 +80,11 @@ static __always_inline void __preempt_count_sub(int val)
74 __this_cpu_add_4(__preempt_count, -val); 80 __this_cpu_add_4(__preempt_count, -val);
75} 81}
76 82
83/*
84 * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
85 * a decrement which hits zero means we have no preempt_count and should
86 * reschedule.
87 */
77static __always_inline bool __preempt_count_dec_and_test(void) 88static __always_inline bool __preempt_count_dec_and_test(void)
78{ 89{
79 GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); 90 GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index fd00bb29425d..c1a861829d81 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -262,11 +262,20 @@ struct cpu_hw_events {
262 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ 262 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
263 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) 263 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
264 264
265#define EVENT_CONSTRAINT_END \ 265/*
266 EVENT_CONSTRAINT(0, 0, 0) 266 * We define the end marker as having a weight of -1
267 * to enable blacklisting of events using a counter bitmask
268 * of zero and thus a weight of zero.
269 * The end marker has a weight that cannot possibly be
270 * obtained from counting the bits in the bitmask.
271 */
272#define EVENT_CONSTRAINT_END { .weight = -1 }
267 273
274/*
275 * Check for end marker with weight == -1
276 */
268#define for_each_event_constraint(e, c) \ 277#define for_each_event_constraint(e, c) \
269 for ((e) = (c); (e)->weight; (e)++) 278 for ((e) = (c); (e)->weight != -1; (e)++)
270 279
271/* 280/*
272 * Extra registers for specific events. 281 * Extra registers for specific events.
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index dd74e46828c0..0596e8e0cc19 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -83,6 +83,12 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
83 pte_t pte = gup_get_pte(ptep); 83 pte_t pte = gup_get_pte(ptep);
84 struct page *page; 84 struct page *page;
85 85
86 /* Similar to the PMD case, NUMA hinting must take slow path */
87 if (pte_numa(pte)) {
88 pte_unmap(ptep);
89 return 0;
90 }
91
86 if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) { 92 if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) {
87 pte_unmap(ptep); 93 pte_unmap(ptep);
88 return 0; 94 return 0;
@@ -167,6 +173,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
167 if (pmd_none(pmd) || pmd_trans_splitting(pmd)) 173 if (pmd_none(pmd) || pmd_trans_splitting(pmd))
168 return 0; 174 return 0;
169 if (unlikely(pmd_large(pmd))) { 175 if (unlikely(pmd_large(pmd))) {
176 /*
177 * NUMA hinting faults need to be handled in the GUP
178 * slowpath for accounting purposes and so that they
179 * can be serialised against THP migration.
180 */
181 if (pmd_numa(pmd))
182 return 0;
170 if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) 183 if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
171 return 0; 184 return 0;
172 } else { 185 } else {
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 26311f23c824..cb1d557fc22c 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -942,6 +942,7 @@ static int erst_clearer(enum pstore_type_id type, u64 id, int count,
942static struct pstore_info erst_info = { 942static struct pstore_info erst_info = {
943 .owner = THIS_MODULE, 943 .owner = THIS_MODULE,
944 .name = "erst", 944 .name = "erst",
945 .flags = PSTORE_FLAGS_FRAGILE,
945 .open = erst_open_pstore, 946 .open = erst_open_pstore,
946 .close = erst_close_pstore, 947 .close = erst_close_pstore,
947 .read = erst_reader, 948 .read = erst_reader,
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 7be41e676a64..00a3abe103a5 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -60,7 +60,7 @@ static int s2mps11_clk_prepare(struct clk_hw *hw)
60 struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); 60 struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
61 int ret; 61 int ret;
62 62
63 ret = regmap_update_bits(s2mps11->iodev->regmap, 63 ret = regmap_update_bits(s2mps11->iodev->regmap_pmic,
64 S2MPS11_REG_RTC_CTRL, 64 S2MPS11_REG_RTC_CTRL,
65 s2mps11->mask, s2mps11->mask); 65 s2mps11->mask, s2mps11->mask);
66 if (!ret) 66 if (!ret)
@@ -74,7 +74,7 @@ static void s2mps11_clk_unprepare(struct clk_hw *hw)
74 struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); 74 struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
75 int ret; 75 int ret;
76 76
77 ret = regmap_update_bits(s2mps11->iodev->regmap, S2MPS11_REG_RTC_CTRL, 77 ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, S2MPS11_REG_RTC_CTRL,
78 s2mps11->mask, ~s2mps11->mask); 78 s2mps11->mask, ~s2mps11->mask);
79 79
80 if (!ret) 80 if (!ret)
@@ -174,7 +174,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
174 s2mps11_clk->hw.init = &s2mps11_clks_init[i]; 174 s2mps11_clk->hw.init = &s2mps11_clks_init[i];
175 s2mps11_clk->mask = 1 << i; 175 s2mps11_clk->mask = 1 << i;
176 176
177 ret = regmap_read(s2mps11_clk->iodev->regmap, 177 ret = regmap_read(s2mps11_clk->iodev->regmap_pmic,
178 S2MPS11_REG_RTC_CTRL, &val); 178 S2MPS11_REG_RTC_CTRL, &val);
179 if (ret < 0) 179 if (ret < 0)
180 goto err_reg; 180 goto err_reg;
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 5c07a56962db..634c4d6dd45a 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -75,6 +75,7 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
75config CLKSRC_EFM32 75config CLKSRC_EFM32
76 bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32 76 bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32
77 depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST) 77 depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
78 select CLKSRC_MMIO
78 default ARCH_EFM32 79 default ARCH_EFM32
79 help 80 help
80 Support to use the timers of EFM32 SoCs as clock source and clock 81 Support to use the timers of EFM32 SoCs as clock source and clock
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c
index 35639cf4e5a2..b9ddd9e3a2f5 100644
--- a/drivers/clocksource/clksrc-of.c
+++ b/drivers/clocksource/clksrc-of.c
@@ -35,6 +35,5 @@ void __init clocksource_of_init(void)
35 35
36 init_func = match->data; 36 init_func = match->data;
37 init_func(np); 37 init_func(np);
38 of_node_put(np);
39 } 38 }
40} 39}
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 45ba8aecc729..2a2ea2717f3a 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -108,12 +108,11 @@ static void __init add_clocksource(struct device_node *source_timer)
108 108
109static u64 read_sched_clock(void) 109static u64 read_sched_clock(void)
110{ 110{
111 return __raw_readl(sched_io_base); 111 return ~__raw_readl(sched_io_base);
112} 112}
113 113
114static const struct of_device_id sptimer_ids[] __initconst = { 114static const struct of_device_id sptimer_ids[] __initconst = {
115 { .compatible = "picochip,pc3x2-rtc" }, 115 { .compatible = "picochip,pc3x2-rtc" },
116 { .compatible = "snps,dw-apb-timer-sp" },
117 { /* Sentinel */ }, 116 { /* Sentinel */ },
118}; 117};
119 118
@@ -151,4 +150,6 @@ static void __init dw_apb_timer_init(struct device_node *timer)
151 num_called++; 150 num_called++;
152} 151}
153CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init); 152CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init);
154CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer-osc", dw_apb_timer_init); 153CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init);
154CLOCKSOURCE_OF_DECLARE(apb_timer_sp, "snps,dw-apb-timer-sp", dw_apb_timer_init);
155CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer", dw_apb_timer_init);
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index 2fb4695a28d8..a4f6119aafd8 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -179,6 +179,9 @@ static void __init sun4i_timer_init(struct device_node *node)
179 writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), 179 writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),
180 timer_base + TIMER_CTL_REG(0)); 180 timer_base + TIMER_CTL_REG(0));
181 181
182 /* Make sure timer is stopped before playing with interrupts */
183 sun4i_clkevt_time_stop(0);
184
182 ret = setup_irq(irq, &sun4i_timer_irq); 185 ret = setup_irq(irq, &sun4i_timer_irq);
183 if (ret) 186 if (ret)
184 pr_warn("failed to setup irq %d\n", irq); 187 pr_warn("failed to setup irq %d\n", irq);
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index d8e47e502785..4e7f6802e840 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -256,11 +256,6 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
256 ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; 256 ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
257 257
258 /* 258 /*
259 * Set scale and timer for sched_clock.
260 */
261 sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
262
263 /*
264 * Setup free-running clocksource timer (interrupts 259 * Setup free-running clocksource timer (interrupts
265 * disabled). 260 * disabled).
266 */ 261 */
@@ -270,6 +265,11 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
270 timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN | 265 timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN |
271 TIMER0_DIV(TIMER_DIVIDER_SHIFT)); 266 TIMER0_DIV(TIMER_DIVIDER_SHIFT));
272 267
268 /*
269 * Set scale and timer for sched_clock.
270 */
271 sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
272
273 clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, 273 clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
274 "armada_370_xp_clocksource", 274 "armada_370_xp_clocksource",
275 timer_clk, 300, 32, clocksource_mmio_readl_down); 275 timer_clk, 300, 32, clocksource_mmio_readl_down);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 446687cc2334..c823daaf9043 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -62,6 +62,7 @@ config INTEL_IOATDMA
62 tristate "Intel I/OAT DMA support" 62 tristate "Intel I/OAT DMA support"
63 depends on PCI && X86 63 depends on PCI && X86
64 select DMA_ENGINE 64 select DMA_ENGINE
65 select DMA_ENGINE_RAID
65 select DCA 66 select DCA
66 help 67 help
67 Enable support for the Intel(R) I/OAT DMA engine present 68 Enable support for the Intel(R) I/OAT DMA engine present
@@ -112,6 +113,7 @@ config MV_XOR
112 bool "Marvell XOR engine support" 113 bool "Marvell XOR engine support"
113 depends on PLAT_ORION 114 depends on PLAT_ORION
114 select DMA_ENGINE 115 select DMA_ENGINE
116 select DMA_ENGINE_RAID
115 select ASYNC_TX_ENABLE_CHANNEL_SWITCH 117 select ASYNC_TX_ENABLE_CHANNEL_SWITCH
116 ---help--- 118 ---help---
117 Enable support for the Marvell XOR engine. 119 Enable support for the Marvell XOR engine.
@@ -187,6 +189,7 @@ config AMCC_PPC440SPE_ADMA
187 tristate "AMCC PPC440SPe ADMA support" 189 tristate "AMCC PPC440SPe ADMA support"
188 depends on 440SPe || 440SP 190 depends on 440SPe || 440SP
189 select DMA_ENGINE 191 select DMA_ENGINE
192 select DMA_ENGINE_RAID
190 select ARCH_HAS_ASYNC_TX_FIND_CHANNEL 193 select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
191 select ASYNC_TX_ENABLE_CHANNEL_SWITCH 194 select ASYNC_TX_ENABLE_CHANNEL_SWITCH
192 help 195 help
@@ -352,6 +355,7 @@ config NET_DMA
352 bool "Network: TCP receive copy offload" 355 bool "Network: TCP receive copy offload"
353 depends on DMA_ENGINE && NET 356 depends on DMA_ENGINE && NET
354 default (INTEL_IOATDMA || FSL_DMA) 357 default (INTEL_IOATDMA || FSL_DMA)
358 depends on BROKEN
355 help 359 help
356 This enables the use of DMA engines in the network stack to 360 This enables the use of DMA engines in the network stack to
357 offload receive copy-to-user operations, freeing CPU cycles. 361 offload receive copy-to-user operations, freeing CPU cycles.
@@ -377,4 +381,7 @@ config DMATEST
377 Simple DMA test client. Say N unless you're debugging a 381 Simple DMA test client. Say N unless you're debugging a
378 DMA Device driver. 382 DMA Device driver.
379 383
384config DMA_ENGINE_RAID
385 bool
386
380endif 387endif
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index f31d647acdfa..2787aba60c6b 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -347,10 +347,6 @@ static struct device *chan2dev(struct dma_chan *chan)
347{ 347{
348 return &chan->dev->device; 348 return &chan->dev->device;
349} 349}
350static struct device *chan2parent(struct dma_chan *chan)
351{
352 return chan->dev->device.parent;
353}
354 350
355#if defined(VERBOSE_DEBUG) 351#if defined(VERBOSE_DEBUG)
356static void vdbg_dump_regs(struct at_dma_chan *atchan) 352static void vdbg_dump_regs(struct at_dma_chan *atchan)
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index ea806bdc12ef..ef63b9058f3c 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -912,7 +912,7 @@ struct dmaengine_unmap_pool {
912#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } 912#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
913static struct dmaengine_unmap_pool unmap_pool[] = { 913static struct dmaengine_unmap_pool unmap_pool[] = {
914 __UNMAP_POOL(2), 914 __UNMAP_POOL(2),
915 #if IS_ENABLED(CONFIG_ASYNC_TX_DMA) 915 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
916 __UNMAP_POOL(16), 916 __UNMAP_POOL(16),
917 __UNMAP_POOL(128), 917 __UNMAP_POOL(128),
918 __UNMAP_POOL(256), 918 __UNMAP_POOL(256),
@@ -1054,7 +1054,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
1054 dma_cookie_t cookie; 1054 dma_cookie_t cookie;
1055 unsigned long flags; 1055 unsigned long flags;
1056 1056
1057 unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO); 1057 unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
1058 if (!unmap) 1058 if (!unmap)
1059 return -ENOMEM; 1059 return -ENOMEM;
1060 1060
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 20f9a3aaf926..9dfcaf5c1288 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -539,9 +539,9 @@ static int dmatest_func(void *data)
539 539
540 um->len = params->buf_size; 540 um->len = params->buf_size;
541 for (i = 0; i < src_cnt; i++) { 541 for (i = 0; i < src_cnt; i++) {
542 unsigned long buf = (unsigned long) thread->srcs[i]; 542 void *buf = thread->srcs[i];
543 struct page *pg = virt_to_page(buf); 543 struct page *pg = virt_to_page(buf);
544 unsigned pg_off = buf & ~PAGE_MASK; 544 unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
545 545
546 um->addr[i] = dma_map_page(dev->dev, pg, pg_off, 546 um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
547 um->len, DMA_TO_DEVICE); 547 um->len, DMA_TO_DEVICE);
@@ -559,9 +559,9 @@ static int dmatest_func(void *data)
559 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ 559 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
560 dsts = &um->addr[src_cnt]; 560 dsts = &um->addr[src_cnt];
561 for (i = 0; i < dst_cnt; i++) { 561 for (i = 0; i < dst_cnt; i++) {
562 unsigned long buf = (unsigned long) thread->dsts[i]; 562 void *buf = thread->dsts[i];
563 struct page *pg = virt_to_page(buf); 563 struct page *pg = virt_to_page(buf);
564 unsigned pg_off = buf & ~PAGE_MASK; 564 unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
565 565
566 dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, 566 dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
567 DMA_BIDIRECTIONAL); 567 DMA_BIDIRECTIONAL);
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 7086a16a55f2..f157c6f76b32 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -86,11 +86,6 @@ static void set_desc_cnt(struct fsldma_chan *chan,
86 hw->count = CPU_TO_DMA(chan, count, 32); 86 hw->count = CPU_TO_DMA(chan, count, 32);
87} 87}
88 88
89static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
90{
91 return DMA_TO_CPU(chan, desc->hw.count, 32);
92}
93
94static void set_desc_src(struct fsldma_chan *chan, 89static void set_desc_src(struct fsldma_chan *chan,
95 struct fsl_dma_ld_hw *hw, dma_addr_t src) 90 struct fsl_dma_ld_hw *hw, dma_addr_t src)
96{ 91{
@@ -101,16 +96,6 @@ static void set_desc_src(struct fsldma_chan *chan,
101 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); 96 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
102} 97}
103 98
104static dma_addr_t get_desc_src(struct fsldma_chan *chan,
105 struct fsl_desc_sw *desc)
106{
107 u64 snoop_bits;
108
109 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
110 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
111 return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
112}
113
114static void set_desc_dst(struct fsldma_chan *chan, 99static void set_desc_dst(struct fsldma_chan *chan,
115 struct fsl_dma_ld_hw *hw, dma_addr_t dst) 100 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
116{ 101{
@@ -121,16 +106,6 @@ static void set_desc_dst(struct fsldma_chan *chan,
121 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); 106 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
122} 107}
123 108
124static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
125 struct fsl_desc_sw *desc)
126{
127 u64 snoop_bits;
128
129 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
130 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
131 return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
132}
133
134static void set_desc_next(struct fsldma_chan *chan, 109static void set_desc_next(struct fsldma_chan *chan,
135 struct fsl_dma_ld_hw *hw, dma_addr_t next) 110 struct fsl_dma_ld_hw *hw, dma_addr_t next)
136{ 111{
@@ -408,7 +383,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
408 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); 383 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
409 struct fsl_desc_sw *child; 384 struct fsl_desc_sw *child;
410 unsigned long flags; 385 unsigned long flags;
411 dma_cookie_t cookie; 386 dma_cookie_t cookie = -EINVAL;
412 387
413 spin_lock_irqsave(&chan->desc_lock, flags); 388 spin_lock_irqsave(&chan->desc_lock, flags);
414 389
@@ -854,10 +829,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
854 struct fsl_desc_sw *desc) 829 struct fsl_desc_sw *desc)
855{ 830{
856 struct dma_async_tx_descriptor *txd = &desc->async_tx; 831 struct dma_async_tx_descriptor *txd = &desc->async_tx;
857 struct device *dev = chan->common.device->dev;
858 dma_addr_t src = get_desc_src(chan, desc);
859 dma_addr_t dst = get_desc_dst(chan, desc);
860 u32 len = get_desc_cnt(chan, desc);
861 832
862 /* Run the link descriptor callback function */ 833 /* Run the link descriptor callback function */
863 if (txd->callback) { 834 if (txd->callback) {
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 7807f0ef4e20..53fb0c8365b0 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -54,12 +54,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
54 hw_desc->desc_command = (1 << 31); 54 hw_desc->desc_command = (1 << 31);
55} 55}
56 56
57static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
58{
59 struct mv_xor_desc *hw_desc = desc->hw_desc;
60 return hw_desc->phy_dest_addr;
61}
62
63static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, 57static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
64 u32 byte_count) 58 u32 byte_count)
65{ 59{
@@ -787,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
787/* 781/*
788 * Perform a transaction to verify the HW works. 782 * Perform a transaction to verify the HW works.
789 */ 783 */
790#define MV_XOR_TEST_SIZE 2000
791 784
792static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) 785static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
793{ 786{
@@ -797,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
797 struct dma_chan *dma_chan; 790 struct dma_chan *dma_chan;
798 dma_cookie_t cookie; 791 dma_cookie_t cookie;
799 struct dma_async_tx_descriptor *tx; 792 struct dma_async_tx_descriptor *tx;
793 struct dmaengine_unmap_data *unmap;
800 int err = 0; 794 int err = 0;
801 795
802 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 796 src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
803 if (!src) 797 if (!src)
804 return -ENOMEM; 798 return -ENOMEM;
805 799
806 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 800 dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
807 if (!dest) { 801 if (!dest) {
808 kfree(src); 802 kfree(src);
809 return -ENOMEM; 803 return -ENOMEM;
810 } 804 }
811 805
812 /* Fill in src buffer */ 806 /* Fill in src buffer */
813 for (i = 0; i < MV_XOR_TEST_SIZE; i++) 807 for (i = 0; i < PAGE_SIZE; i++)
814 ((u8 *) src)[i] = (u8)i; 808 ((u8 *) src)[i] = (u8)i;
815 809
816 dma_chan = &mv_chan->dmachan; 810 dma_chan = &mv_chan->dmachan;
@@ -819,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
819 goto out; 813 goto out;
820 } 814 }
821 815
822 dest_dma = dma_map_single(dma_chan->device->dev, dest, 816 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
823 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 817 if (!unmap) {
818 err = -ENOMEM;
819 goto free_resources;
820 }
821
822 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
823 PAGE_SIZE, DMA_TO_DEVICE);
824 unmap->to_cnt = 1;
825 unmap->addr[0] = src_dma;
824 826
825 src_dma = dma_map_single(dma_chan->device->dev, src, 827 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
826 MV_XOR_TEST_SIZE, DMA_TO_DEVICE); 828 PAGE_SIZE, DMA_FROM_DEVICE);
829 unmap->from_cnt = 1;
830 unmap->addr[1] = dest_dma;
831
832 unmap->len = PAGE_SIZE;
827 833
828 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 834 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
829 MV_XOR_TEST_SIZE, 0); 835 PAGE_SIZE, 0);
830 cookie = mv_xor_tx_submit(tx); 836 cookie = mv_xor_tx_submit(tx);
831 mv_xor_issue_pending(dma_chan); 837 mv_xor_issue_pending(dma_chan);
832 async_tx_ack(tx); 838 async_tx_ack(tx);
@@ -841,8 +847,8 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
841 } 847 }
842 848
843 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, 849 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
844 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 850 PAGE_SIZE, DMA_FROM_DEVICE);
845 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { 851 if (memcmp(src, dest, PAGE_SIZE)) {
846 dev_err(dma_chan->device->dev, 852 dev_err(dma_chan->device->dev,
847 "Self-test copy failed compare, disabling\n"); 853 "Self-test copy failed compare, disabling\n");
848 err = -ENODEV; 854 err = -ENODEV;
@@ -850,6 +856,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
850 } 856 }
851 857
852free_resources: 858free_resources:
859 dmaengine_unmap_put(unmap);
853 mv_xor_free_chan_resources(dma_chan); 860 mv_xor_free_chan_resources(dma_chan);
854out: 861out:
855 kfree(src); 862 kfree(src);
@@ -867,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
867 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; 874 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
868 dma_addr_t dest_dma; 875 dma_addr_t dest_dma;
869 struct dma_async_tx_descriptor *tx; 876 struct dma_async_tx_descriptor *tx;
877 struct dmaengine_unmap_data *unmap;
870 struct dma_chan *dma_chan; 878 struct dma_chan *dma_chan;
871 dma_cookie_t cookie; 879 dma_cookie_t cookie;
872 u8 cmp_byte = 0; 880 u8 cmp_byte = 0;
873 u32 cmp_word; 881 u32 cmp_word;
874 int err = 0; 882 int err = 0;
883 int src_count = MV_XOR_NUM_SRC_TEST;
875 884
876 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 885 for (src_idx = 0; src_idx < src_count; src_idx++) {
877 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 886 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
878 if (!xor_srcs[src_idx]) { 887 if (!xor_srcs[src_idx]) {
879 while (src_idx--) 888 while (src_idx--)
@@ -890,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
890 } 899 }
891 900
892 /* Fill in src buffers */ 901 /* Fill in src buffers */
893 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 902 for (src_idx = 0; src_idx < src_count; src_idx++) {
894 u8 *ptr = page_address(xor_srcs[src_idx]); 903 u8 *ptr = page_address(xor_srcs[src_idx]);
895 for (i = 0; i < PAGE_SIZE; i++) 904 for (i = 0; i < PAGE_SIZE; i++)
896 ptr[i] = (1 << src_idx); 905 ptr[i] = (1 << src_idx);
897 } 906 }
898 907
899 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) 908 for (src_idx = 0; src_idx < src_count; src_idx++)
900 cmp_byte ^= (u8) (1 << src_idx); 909 cmp_byte ^= (u8) (1 << src_idx);
901 910
902 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 911 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
@@ -910,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
910 goto out; 919 goto out;
911 } 920 }
912 921
922 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
923 GFP_KERNEL);
924 if (!unmap) {
925 err = -ENOMEM;
926 goto free_resources;
927 }
928
913 /* test xor */ 929 /* test xor */
914 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, 930 for (i = 0; i < src_count; i++) {
915 DMA_FROM_DEVICE); 931 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
932 0, PAGE_SIZE, DMA_TO_DEVICE);
933 dma_srcs[i] = unmap->addr[i];
934 unmap->to_cnt++;
935 }
916 936
917 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) 937 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
918 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 938 DMA_FROM_DEVICE);
919 0, PAGE_SIZE, DMA_TO_DEVICE); 939 dest_dma = unmap->addr[src_count];
940 unmap->from_cnt = 1;
941 unmap->len = PAGE_SIZE;
920 942
921 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 943 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
922 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); 944 src_count, PAGE_SIZE, 0);
923 945
924 cookie = mv_xor_tx_submit(tx); 946 cookie = mv_xor_tx_submit(tx);
925 mv_xor_issue_pending(dma_chan); 947 mv_xor_issue_pending(dma_chan);
@@ -948,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
948 } 970 }
949 971
950free_resources: 972free_resources:
973 dmaengine_unmap_put(unmap);
951 mv_xor_free_chan_resources(dma_chan); 974 mv_xor_free_chan_resources(dma_chan);
952out: 975out:
953 src_idx = MV_XOR_NUM_SRC_TEST; 976 src_idx = src_count;
954 while (src_idx--) 977 while (src_idx--)
955 __free_page(xor_srcs[src_idx]); 978 __free_page(xor_srcs[src_idx]);
956 __free_page(dest); 979 __free_page(dest);
@@ -1176,6 +1199,7 @@ static int mv_xor_probe(struct platform_device *pdev)
1176 int i = 0; 1199 int i = 0;
1177 1200
1178 for_each_child_of_node(pdev->dev.of_node, np) { 1201 for_each_child_of_node(pdev->dev.of_node, np) {
1202 struct mv_xor_chan *chan;
1179 dma_cap_mask_t cap_mask; 1203 dma_cap_mask_t cap_mask;
1180 int irq; 1204 int irq;
1181 1205
@@ -1193,21 +1217,21 @@ static int mv_xor_probe(struct platform_device *pdev)
1193 goto err_channel_add; 1217 goto err_channel_add;
1194 } 1218 }
1195 1219
1196 xordev->channels[i] = 1220 chan = mv_xor_channel_add(xordev, pdev, i,
1197 mv_xor_channel_add(xordev, pdev, i, 1221 cap_mask, irq);
1198 cap_mask, irq); 1222 if (IS_ERR(chan)) {
1199 if (IS_ERR(xordev->channels[i])) { 1223 ret = PTR_ERR(chan);
1200 ret = PTR_ERR(xordev->channels[i]);
1201 xordev->channels[i] = NULL;
1202 irq_dispose_mapping(irq); 1224 irq_dispose_mapping(irq);
1203 goto err_channel_add; 1225 goto err_channel_add;
1204 } 1226 }
1205 1227
1228 xordev->channels[i] = chan;
1206 i++; 1229 i++;
1207 } 1230 }
1208 } else if (pdata && pdata->channels) { 1231 } else if (pdata && pdata->channels) {
1209 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { 1232 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1210 struct mv_xor_channel_data *cd; 1233 struct mv_xor_channel_data *cd;
1234 struct mv_xor_chan *chan;
1211 int irq; 1235 int irq;
1212 1236
1213 cd = &pdata->channels[i]; 1237 cd = &pdata->channels[i];
@@ -1222,13 +1246,14 @@ static int mv_xor_probe(struct platform_device *pdev)
1222 goto err_channel_add; 1246 goto err_channel_add;
1223 } 1247 }
1224 1248
1225 xordev->channels[i] = 1249 chan = mv_xor_channel_add(xordev, pdev, i,
1226 mv_xor_channel_add(xordev, pdev, i, 1250 cd->cap_mask, irq);
1227 cd->cap_mask, irq); 1251 if (IS_ERR(chan)) {
1228 if (IS_ERR(xordev->channels[i])) { 1252 ret = PTR_ERR(chan);
1229 ret = PTR_ERR(xordev->channels[i]);
1230 goto err_channel_add; 1253 goto err_channel_add;
1231 } 1254 }
1255
1256 xordev->channels[i] = chan;
1232 } 1257 }
1233 } 1258 }
1234 1259
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index cdf0483b8f2d..536632f6479c 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2492,12 +2492,9 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2492 2492
2493static inline void _init_desc(struct dma_pl330_desc *desc) 2493static inline void _init_desc(struct dma_pl330_desc *desc)
2494{ 2494{
2495 desc->pchan = NULL;
2496 desc->req.x = &desc->px; 2495 desc->req.x = &desc->px;
2497 desc->req.token = desc; 2496 desc->req.token = desc;
2498 desc->rqcfg.swap = SWAP_NO; 2497 desc->rqcfg.swap = SWAP_NO;
2499 desc->rqcfg.privileged = 0;
2500 desc->rqcfg.insnaccess = 0;
2501 desc->rqcfg.scctl = SCCTRL0; 2498 desc->rqcfg.scctl = SCCTRL0;
2502 desc->rqcfg.dcctl = DCCTRL0; 2499 desc->rqcfg.dcctl = DCCTRL0;
2503 desc->req.cfg = &desc->rqcfg; 2500 desc->req.cfg = &desc->rqcfg;
@@ -2517,7 +2514,7 @@ static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
2517 if (!pdmac) 2514 if (!pdmac)
2518 return 0; 2515 return 0;
2519 2516
2520 desc = kmalloc(count * sizeof(*desc), flg); 2517 desc = kcalloc(count, sizeof(*desc), flg);
2521 if (!desc) 2518 if (!desc)
2522 return 0; 2519 return 0;
2523 2520
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 8da48c6b2a38..8bba298535b0 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -533,29 +533,6 @@ static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc,
533} 533}
534 534
535/** 535/**
536 * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation
537 */
538static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc,
539 int value, unsigned long flags)
540{
541 struct dma_cdb *hw_desc = desc->hw_desc;
542
543 memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
544 desc->hw_next = NULL;
545 desc->src_cnt = 1;
546 desc->dst_cnt = 1;
547
548 if (flags & DMA_PREP_INTERRUPT)
549 set_bit(PPC440SPE_DESC_INT, &desc->flags);
550 else
551 clear_bit(PPC440SPE_DESC_INT, &desc->flags);
552
553 hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value);
554 hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value);
555 hw_desc->opc = DMA_CDB_OPC_DFILL128;
556}
557
558/**
559 * ppc440spe_desc_set_src_addr - set source address into the descriptor 536 * ppc440spe_desc_set_src_addr - set source address into the descriptor
560 */ 537 */
561static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc, 538static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc,
@@ -1504,8 +1481,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
1504 struct ppc440spe_adma_chan *chan, 1481 struct ppc440spe_adma_chan *chan,
1505 dma_cookie_t cookie) 1482 dma_cookie_t cookie)
1506{ 1483{
1507 int i;
1508
1509 BUG_ON(desc->async_tx.cookie < 0); 1484 BUG_ON(desc->async_tx.cookie < 0);
1510 if (desc->async_tx.cookie > 0) { 1485 if (desc->async_tx.cookie > 0) {
1511 cookie = desc->async_tx.cookie; 1486 cookie = desc->async_tx.cookie;
@@ -3898,7 +3873,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
3898 ppc440spe_adma_prep_dma_interrupt; 3873 ppc440spe_adma_prep_dma_interrupt;
3899 } 3874 }
3900 pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: " 3875 pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: "
3901 "( %s%s%s%s%s%s%s)\n", 3876 "( %s%s%s%s%s%s)\n",
3902 dev_name(adev->dev), 3877 dev_name(adev->dev),
3903 dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "", 3878 dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "",
3904 dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "", 3879 dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "",
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index bae6c29f5502..17686caf64d5 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -406,7 +406,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
406 dma_async_tx_callback callback; 406 dma_async_tx_callback callback;
407 void *param; 407 void *param;
408 struct dma_async_tx_descriptor *txd = &desc->txd; 408 struct dma_async_tx_descriptor *txd = &desc->txd;
409 struct txx9dmac_slave *ds = dc->chan.private;
410 409
411 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", 410 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
412 txd->cookie, desc); 411 txd->cookie, desc);
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index b0bb056458a3..281029daf98c 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1623,7 +1623,6 @@ static struct scsi_host_template scsi_driver_template = {
1623 .cmd_per_lun = 1, 1623 .cmd_per_lun = 1,
1624 .can_queue = 1, 1624 .can_queue = 1,
1625 .sdev_attrs = sbp2_scsi_sysfs_attrs, 1625 .sdev_attrs = sbp2_scsi_sysfs_attrs,
1626 .no_write_same = 1,
1627}; 1626};
1628 1627
1629MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); 1628MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 743fd426f21b..4b9dc836dcf9 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -356,6 +356,7 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
356static struct pstore_info efi_pstore_info = { 356static struct pstore_info efi_pstore_info = {
357 .owner = THIS_MODULE, 357 .owner = THIS_MODULE,
358 .name = "efi", 358 .name = "efi",
359 .flags = PSTORE_FLAGS_FRAGILE,
359 .open = efi_pstore_open, 360 .open = efi_pstore_open,
360 .close = efi_pstore_close, 361 .close = efi_pstore_close,
361 .read = efi_pstore_read, 362 .read = efi_pstore_read,
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index 7b37300973db..2baf0ddf7e02 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -252,7 +252,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
252 252
253 spin_lock_irqsave(&tlmm_lock, irq_flags); 253 spin_lock_irqsave(&tlmm_lock, irq_flags);
254 writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio)); 254 writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
255 clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); 255 clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
256 __clear_bit(gpio, msm_gpio.enabled_irqs); 256 __clear_bit(gpio, msm_gpio.enabled_irqs);
257 spin_unlock_irqrestore(&tlmm_lock, irq_flags); 257 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
258} 258}
@@ -264,7 +264,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
264 264
265 spin_lock_irqsave(&tlmm_lock, irq_flags); 265 spin_lock_irqsave(&tlmm_lock, irq_flags);
266 __set_bit(gpio, msm_gpio.enabled_irqs); 266 __set_bit(gpio, msm_gpio.enabled_irqs);
267 set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); 267 set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
268 writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio)); 268 writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio));
269 spin_unlock_irqrestore(&tlmm_lock, irq_flags); 269 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
270} 270}
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index fe088a30567a..8b7e719a68c3 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -169,7 +169,8 @@ static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
169 u32 pending; 169 u32 pending;
170 unsigned int offset, irqs_handled = 0; 170 unsigned int offset, irqs_handled = 0;
171 171
172 while ((pending = gpio_rcar_read(p, INTDT))) { 172 while ((pending = gpio_rcar_read(p, INTDT) &
173 gpio_rcar_read(p, INTMSK))) {
173 offset = __ffs(pending); 174 offset = __ffs(pending);
174 gpio_rcar_write(p, INTCLR, BIT(offset)); 175 gpio_rcar_write(p, INTCLR, BIT(offset));
175 generic_handle_irq(irq_find_mapping(p->irq_domain, offset)); 176 generic_handle_irq(irq_find_mapping(p->irq_domain, offset));
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index b97d6a6577b9..f9996899c1f2 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -300,7 +300,7 @@ static int twl_direction_in(struct gpio_chip *chip, unsigned offset)
300 if (offset < TWL4030_GPIO_MAX) 300 if (offset < TWL4030_GPIO_MAX)
301 ret = twl4030_set_gpio_direction(offset, 1); 301 ret = twl4030_set_gpio_direction(offset, 1);
302 else 302 else
303 ret = -EINVAL; 303 ret = -EINVAL; /* LED outputs can't be set as input */
304 304
305 if (!ret) 305 if (!ret)
306 priv->direction &= ~BIT(offset); 306 priv->direction &= ~BIT(offset);
@@ -354,11 +354,20 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
354static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value) 354static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
355{ 355{
356 struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip); 356 struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
357 int ret = -EINVAL; 357 int ret = 0;
358 358
359 mutex_lock(&priv->mutex); 359 mutex_lock(&priv->mutex);
360 if (offset < TWL4030_GPIO_MAX) 360 if (offset < TWL4030_GPIO_MAX) {
361 ret = twl4030_set_gpio_direction(offset, 0); 361 ret = twl4030_set_gpio_direction(offset, 0);
362 if (ret) {
363 mutex_unlock(&priv->mutex);
364 return ret;
365 }
366 }
367
368 /*
369 * LED gpios i.e. offset >= TWL4030_GPIO_MAX are always output
370 */
362 371
363 priv->direction |= BIT(offset); 372 priv->direction |= BIT(offset);
364 mutex_unlock(&priv->mutex); 373 mutex_unlock(&priv->mutex);
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index eef09ec9a5ff..a72cae03b99b 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -103,6 +103,7 @@ void armada_drm_queue_unref_work(struct drm_device *,
103extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs; 103extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
104 104
105int armada_fbdev_init(struct drm_device *); 105int armada_fbdev_init(struct drm_device *);
106void armada_fbdev_lastclose(struct drm_device *);
106void armada_fbdev_fini(struct drm_device *); 107void armada_fbdev_fini(struct drm_device *);
107 108
108int armada_overlay_plane_create(struct drm_device *, unsigned long); 109int armada_overlay_plane_create(struct drm_device *, unsigned long);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 4f2b28354915..62d0ff3efddf 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -321,6 +321,11 @@ static struct drm_ioctl_desc armada_ioctls[] = {
321 DRM_UNLOCKED), 321 DRM_UNLOCKED),
322}; 322};
323 323
324static void armada_drm_lastclose(struct drm_device *dev)
325{
326 armada_fbdev_lastclose(dev);
327}
328
324static const struct file_operations armada_drm_fops = { 329static const struct file_operations armada_drm_fops = {
325 .owner = THIS_MODULE, 330 .owner = THIS_MODULE,
326 .llseek = no_llseek, 331 .llseek = no_llseek,
@@ -337,7 +342,7 @@ static struct drm_driver armada_drm_driver = {
337 .open = NULL, 342 .open = NULL,
338 .preclose = NULL, 343 .preclose = NULL,
339 .postclose = NULL, 344 .postclose = NULL,
340 .lastclose = NULL, 345 .lastclose = armada_drm_lastclose,
341 .unload = armada_drm_unload, 346 .unload = armada_drm_unload,
342 .get_vblank_counter = drm_vblank_count, 347 .get_vblank_counter = drm_vblank_count,
343 .enable_vblank = armada_drm_enable_vblank, 348 .enable_vblank = armada_drm_enable_vblank,
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index dd5ea77dac96..948cb14c561e 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -105,9 +105,9 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
105 drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth); 105 drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
106 drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height); 106 drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
107 107
108 DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n", 108 DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n",
109 dfb->fb.width, dfb->fb.height, 109 dfb->fb.width, dfb->fb.height, dfb->fb.bits_per_pixel,
110 dfb->fb.bits_per_pixel, obj->phys_addr); 110 (unsigned long long)obj->phys_addr);
111 111
112 return 0; 112 return 0;
113 113
@@ -177,6 +177,16 @@ int armada_fbdev_init(struct drm_device *dev)
177 return ret; 177 return ret;
178} 178}
179 179
180void armada_fbdev_lastclose(struct drm_device *dev)
181{
182 struct armada_private *priv = dev->dev_private;
183
184 drm_modeset_lock_all(dev);
185 if (priv->fbdev)
186 drm_fb_helper_restore_fbdev_mode(priv->fbdev);
187 drm_modeset_unlock_all(dev);
188}
189
180void armada_fbdev_fini(struct drm_device *dev) 190void armada_fbdev_fini(struct drm_device *dev)
181{ 191{
182 struct armada_private *priv = dev->dev_private; 192 struct armada_private *priv = dev->dev_private;
@@ -192,11 +202,11 @@ void armada_fbdev_fini(struct drm_device *dev)
192 framebuffer_release(info); 202 framebuffer_release(info);
193 } 203 }
194 204
205 drm_fb_helper_fini(fbh);
206
195 if (fbh->fb) 207 if (fbh->fb)
196 fbh->fb->funcs->destroy(fbh->fb); 208 fbh->fb->funcs->destroy(fbh->fb);
197 209
198 drm_fb_helper_fini(fbh);
199
200 priv->fbdev = NULL; 210 priv->fbdev = NULL;
201 } 211 }
202} 212}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 9f2356bae7fd..887816f43476 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -172,8 +172,9 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
172 obj->dev_addr = obj->linear->start; 172 obj->dev_addr = obj->linear->start;
173 } 173 }
174 174
175 DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n", 175 DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
176 obj, obj->phys_addr, obj->dev_addr); 176 (unsigned long long)obj->phys_addr,
177 (unsigned long long)obj->dev_addr);
177 178
178 return 0; 179 return 0;
179} 180}
@@ -557,7 +558,6 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
557 * refcount on the gem object itself. 558 * refcount on the gem object itself.
558 */ 559 */
559 drm_gem_object_reference(obj); 560 drm_gem_object_reference(obj);
560 dma_buf_put(buf);
561 return obj; 561 return obj;
562 } 562 }
563 } 563 }
@@ -573,6 +573,7 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
573 } 573 }
574 574
575 dobj->obj.import_attach = attach; 575 dobj->obj.import_attach = attach;
576 get_dma_buf(buf);
576 577
577 /* 578 /*
578 * Don't call dma_buf_map_attachment() here - it maps the 579 * Don't call dma_buf_map_attachment() here - it maps the
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 0a1e4a5f4234..8835dcddfac3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -68,6 +68,8 @@
68#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) 68#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
69/* Force reduced-blanking timings for detailed modes */ 69/* Force reduced-blanking timings for detailed modes */
70#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7) 70#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
71/* Force 8bpc */
72#define EDID_QUIRK_FORCE_8BPC (1 << 8)
71 73
72struct detailed_mode_closure { 74struct detailed_mode_closure {
73 struct drm_connector *connector; 75 struct drm_connector *connector;
@@ -128,6 +130,9 @@ static struct edid_quirk {
128 130
129 /* Medion MD 30217 PG */ 131 /* Medion MD 30217 PG */
130 { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, 132 { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
133
134 /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
135 { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
131}; 136};
132 137
133/* 138/*
@@ -3435,6 +3440,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
3435 3440
3436 drm_add_display_info(edid, &connector->display_info); 3441 drm_add_display_info(edid, &connector->display_info);
3437 3442
3443 if (quirks & EDID_QUIRK_FORCE_8BPC)
3444 connector->display_info.bpc = 8;
3445
3438 return num_modes; 3446 return num_modes;
3439} 3447}
3440EXPORT_SYMBOL(drm_add_edid_modes); 3448EXPORT_SYMBOL(drm_add_edid_modes);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index f53d5246979c..66dd3a001cf1 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -566,11 +566,11 @@ err_unload:
566 if (dev->driver->unload) 566 if (dev->driver->unload)
567 dev->driver->unload(dev); 567 dev->driver->unload(dev);
568err_primary_node: 568err_primary_node:
569 drm_put_minor(dev->primary); 569 drm_unplug_minor(dev->primary);
570err_render_node: 570err_render_node:
571 drm_put_minor(dev->render); 571 drm_unplug_minor(dev->render);
572err_control_node: 572err_control_node:
573 drm_put_minor(dev->control); 573 drm_unplug_minor(dev->control);
574err_agp: 574err_agp:
575 if (dev->driver->bus->agp_destroy) 575 if (dev->driver->bus->agp_destroy)
576 dev->driver->bus->agp_destroy(dev); 576 dev->driver->bus->agp_destroy(dev);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 0cab2d045135..5c648425c1e0 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -83,6 +83,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev)
83 drm_i915_private_t *dev_priv = dev->dev_private; 83 drm_i915_private_t *dev_priv = dev->dev_private;
84 struct drm_i915_master_private *master_priv; 84 struct drm_i915_master_private *master_priv;
85 85
86 /*
87 * The dri breadcrumb update races against the drm master disappearing.
88 * Instead of trying to fix this (this is by far not the only ums issue)
89 * just don't do the update in kms mode.
90 */
91 if (drm_core_check_feature(dev, DRIVER_MODESET))
92 return;
93
86 if (dev->primary->master) { 94 if (dev->primary->master) {
87 master_priv = dev->primary->master->driver_priv; 95 master_priv = dev->primary->master->driver_priv;
88 if (master_priv->sarea_priv) 96 if (master_priv->sarea_priv)
@@ -1490,16 +1498,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1490 spin_lock_init(&dev_priv->uncore.lock); 1498 spin_lock_init(&dev_priv->uncore.lock);
1491 spin_lock_init(&dev_priv->mm.object_stat_lock); 1499 spin_lock_init(&dev_priv->mm.object_stat_lock);
1492 mutex_init(&dev_priv->dpio_lock); 1500 mutex_init(&dev_priv->dpio_lock);
1493 mutex_init(&dev_priv->rps.hw_lock);
1494 mutex_init(&dev_priv->modeset_restore_lock); 1501 mutex_init(&dev_priv->modeset_restore_lock);
1495 1502
1496 mutex_init(&dev_priv->pc8.lock); 1503 intel_pm_setup(dev);
1497 dev_priv->pc8.requirements_met = false;
1498 dev_priv->pc8.gpu_idle = false;
1499 dev_priv->pc8.irqs_disabled = false;
1500 dev_priv->pc8.enabled = false;
1501 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
1502 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
1503 1504
1504 intel_display_crc_init(dev); 1505 intel_display_crc_init(dev);
1505 1506
@@ -1603,7 +1604,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1603 } 1604 }
1604 1605
1605 intel_irq_init(dev); 1606 intel_irq_init(dev);
1606 intel_pm_init(dev);
1607 intel_uncore_sanitize(dev); 1607 intel_uncore_sanitize(dev);
1608 1608
1609 /* Try to make sure MCHBAR is enabled before poking at it */ 1609 /* Try to make sure MCHBAR is enabled before poking at it */
@@ -1848,8 +1848,10 @@ void i915_driver_lastclose(struct drm_device * dev)
1848 1848
1849void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 1849void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1850{ 1850{
1851 mutex_lock(&dev->struct_mutex);
1851 i915_gem_context_close(dev, file_priv); 1852 i915_gem_context_close(dev, file_priv);
1852 i915_gem_release(dev, file_priv); 1853 i915_gem_release(dev, file_priv);
1854 mutex_unlock(&dev->struct_mutex);
1853} 1855}
1854 1856
1855void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 1857void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2e367a1c6a64..5b7b7e06cb3a 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -651,6 +651,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
651 intel_modeset_init_hw(dev); 651 intel_modeset_init_hw(dev);
652 652
653 drm_modeset_lock_all(dev); 653 drm_modeset_lock_all(dev);
654 drm_mode_config_reset(dev);
654 intel_modeset_setup_hw_state(dev, true); 655 intel_modeset_setup_hw_state(dev, true);
655 drm_modeset_unlock_all(dev); 656 drm_modeset_unlock_all(dev);
656 657
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ccdbecca070d..90fcccba17b0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1755,8 +1755,13 @@ struct drm_i915_file_private {
1755#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1755#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1756#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 1756#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
1757 ((dev)->pdev->device & 0xFF00) == 0x0C00) 1757 ((dev)->pdev->device & 0xFF00) == 0x0C00)
1758#define IS_ULT(dev) (IS_HASWELL(dev) && \ 1758#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
1759 (((dev)->pdev->device & 0xf) == 0x2 || \
1760 ((dev)->pdev->device & 0xf) == 0x6 || \
1761 ((dev)->pdev->device & 0xf) == 0xe))
1762#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
1759 ((dev)->pdev->device & 0xFF00) == 0x0A00) 1763 ((dev)->pdev->device & 0xFF00) == 0x0A00)
1764#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
1760#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ 1765#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
1761 ((dev)->pdev->device & 0x00F0) == 0x0020) 1766 ((dev)->pdev->device & 0x00F0) == 0x0020)
1762#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 1767#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
@@ -1901,9 +1906,7 @@ void i915_queue_hangcheck(struct drm_device *dev);
1901void i915_handle_error(struct drm_device *dev, bool wedged); 1906void i915_handle_error(struct drm_device *dev, bool wedged);
1902 1907
1903extern void intel_irq_init(struct drm_device *dev); 1908extern void intel_irq_init(struct drm_device *dev);
1904extern void intel_pm_init(struct drm_device *dev);
1905extern void intel_hpd_init(struct drm_device *dev); 1909extern void intel_hpd_init(struct drm_device *dev);
1906extern void intel_pm_init(struct drm_device *dev);
1907 1910
1908extern void intel_uncore_sanitize(struct drm_device *dev); 1911extern void intel_uncore_sanitize(struct drm_device *dev);
1909extern void intel_uncore_early_sanitize(struct drm_device *dev); 1912extern void intel_uncore_early_sanitize(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 72a3df32292f..b0f42b9ca037 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -347,10 +347,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
347{ 347{
348 struct drm_i915_file_private *file_priv = file->driver_priv; 348 struct drm_i915_file_private *file_priv = file->driver_priv;
349 349
350 mutex_lock(&dev->struct_mutex);
351 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 350 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
352 idr_destroy(&file_priv->context_idr); 351 idr_destroy(&file_priv->context_idr);
353 mutex_unlock(&dev->struct_mutex);
354} 352}
355 353
356static struct i915_hw_context * 354static struct i915_hw_context *
@@ -423,11 +421,21 @@ static int do_switch(struct i915_hw_context *to)
423 if (ret) 421 if (ret)
424 return ret; 422 return ret;
425 423
426 /* Clear this page out of any CPU caches for coherent swap-in/out. Note 424 /*
425 * Pin can switch back to the default context if we end up calling into
426 * evict_everything - as a last ditch gtt defrag effort that also
427 * switches to the default context. Hence we need to reload from here.
428 */
429 from = ring->last_context;
430
431 /*
432 * Clear this page out of any CPU caches for coherent swap-in/out. Note
427 * that thanks to write = false in this call and us not setting any gpu 433 * that thanks to write = false in this call and us not setting any gpu
428 * write domains when putting a context object onto the active list 434 * write domains when putting a context object onto the active list
429 * (when switching away from it), this won't block. 435 * (when switching away from it), this won't block.
430 * XXX: We need a real interface to do this instead of trickery. */ 436 *
437 * XXX: We need a real interface to do this instead of trickery.
438 */
431 ret = i915_gem_object_set_to_gtt_domain(to->obj, false); 439 ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
432 if (ret) { 440 if (ret) {
433 i915_gem_object_unpin(to->obj); 441 i915_gem_object_unpin(to->obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index b7376533633d..8f3adc7d0dc8 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -88,6 +88,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
88 } else 88 } else
89 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); 89 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
90 90
91search_again:
91 /* First see if there is a large enough contiguous idle region... */ 92 /* First see if there is a large enough contiguous idle region... */
92 list_for_each_entry(vma, &vm->inactive_list, mm_list) { 93 list_for_each_entry(vma, &vm->inactive_list, mm_list) {
93 if (mark_free(vma, &unwind_list)) 94 if (mark_free(vma, &unwind_list))
@@ -115,10 +116,17 @@ none:
115 list_del_init(&vma->exec_list); 116 list_del_init(&vma->exec_list);
116 } 117 }
117 118
118 /* We expect the caller to unpin, evict all and try again, or give up. 119 /* Can we unpin some objects such as idle hw contents,
119 * So calling i915_gem_evict_vm() is unnecessary. 120 * or pending flips?
120 */ 121 */
121 return -ENOSPC; 122 ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev);
123 if (ret)
124 return ret;
125
126 /* Only idle the GPU and repeat the search once */
127 i915_gem_retire_requests(dev);
128 nonblocking = true;
129 goto search_again;
122 130
123found: 131found:
124 /* drm_mm doesn't allow any other other operations while 132 /* drm_mm doesn't allow any other other operations while
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 38cb8d44a013..c79dd2b1f70e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -337,8 +337,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
337 kfree(ppgtt->gen8_pt_dma_addr[i]); 337 kfree(ppgtt->gen8_pt_dma_addr[i]);
338 } 338 }
339 339
340 __free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT); 340 __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
341 __free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT); 341 __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
342} 342}
343 343
344/** 344/**
@@ -1241,6 +1241,11 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
1241 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; 1241 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
1242 if (bdw_gmch_ctl) 1242 if (bdw_gmch_ctl)
1243 bdw_gmch_ctl = 1 << bdw_gmch_ctl; 1243 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
1244 if (bdw_gmch_ctl > 4) {
1245 WARN_ON(!i915_preliminary_hw_support);
1246 return 4<<20;
1247 }
1248
1244 return bdw_gmch_ctl << 20; 1249 return bdw_gmch_ctl << 20;
1245} 1250}
1246 1251
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 080f6fd4e839..8b8bde7dce53 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -9135,7 +9135,7 @@ intel_pipe_config_compare(struct drm_device *dev,
9135 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 9135 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
9136 PIPE_CONF_CHECK_I(pipe_bpp); 9136 PIPE_CONF_CHECK_I(pipe_bpp);
9137 9137
9138 if (!IS_HASWELL(dev)) { 9138 if (!HAS_DDI(dev)) {
9139 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); 9139 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
9140 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 9140 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
9141 } 9141 }
@@ -11036,8 +11036,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
11036 } 11036 }
11037 11037
11038 intel_modeset_check_state(dev); 11038 intel_modeset_check_state(dev);
11039
11040 drm_mode_config_reset(dev);
11041} 11039}
11042 11040
11043void intel_modeset_gem_init(struct drm_device *dev) 11041void intel_modeset_gem_init(struct drm_device *dev)
@@ -11046,7 +11044,10 @@ void intel_modeset_gem_init(struct drm_device *dev)
11046 11044
11047 intel_setup_overlay(dev); 11045 intel_setup_overlay(dev);
11048 11046
11047 drm_modeset_lock_all(dev);
11048 drm_mode_config_reset(dev);
11049 intel_modeset_setup_hw_state(dev, false); 11049 intel_modeset_setup_hw_state(dev, false);
11050 drm_modeset_unlock_all(dev);
11050} 11051}
11051 11052
11052void intel_modeset_cleanup(struct drm_device *dev) 11053void intel_modeset_cleanup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a18e88b3e425..79f91f26e288 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -821,6 +821,7 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
821 uint32_t sprite_width, int pixel_size, 821 uint32_t sprite_width, int pixel_size,
822 bool enabled, bool scaled); 822 bool enabled, bool scaled);
823void intel_init_pm(struct drm_device *dev); 823void intel_init_pm(struct drm_device *dev);
824void intel_pm_setup(struct drm_device *dev);
824bool intel_fbc_enabled(struct drm_device *dev); 825bool intel_fbc_enabled(struct drm_device *dev);
825void intel_update_fbc(struct drm_device *dev); 826void intel_update_fbc(struct drm_device *dev);
826void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 827void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index f161ac02c4f6..e6f782d1c669 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -451,7 +451,9 @@ static u32 intel_panel_get_backlight(struct drm_device *dev,
451 451
452 spin_lock_irqsave(&dev_priv->backlight.lock, flags); 452 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
453 453
454 if (HAS_PCH_SPLIT(dev)) { 454 if (IS_BROADWELL(dev)) {
455 val = I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
456 } else if (HAS_PCH_SPLIT(dev)) {
455 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 457 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
456 } else { 458 } else {
457 if (IS_VALLEYVIEW(dev)) 459 if (IS_VALLEYVIEW(dev))
@@ -479,6 +481,13 @@ static u32 intel_panel_get_backlight(struct drm_device *dev,
479 return val; 481 return val;
480} 482}
481 483
484static void intel_bdw_panel_set_backlight(struct drm_device *dev, u32 level)
485{
486 struct drm_i915_private *dev_priv = dev->dev_private;
487 u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
488 I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
489}
490
482static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level) 491static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
483{ 492{
484 struct drm_i915_private *dev_priv = dev->dev_private; 493 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -496,7 +505,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev,
496 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); 505 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
497 level = intel_panel_compute_brightness(dev, pipe, level); 506 level = intel_panel_compute_brightness(dev, pipe, level);
498 507
499 if (HAS_PCH_SPLIT(dev)) 508 if (IS_BROADWELL(dev))
509 return intel_bdw_panel_set_backlight(dev, level);
510 else if (HAS_PCH_SPLIT(dev))
500 return intel_pch_panel_set_backlight(dev, level); 511 return intel_pch_panel_set_backlight(dev, level);
501 512
502 if (is_backlight_combination_mode(dev)) { 513 if (is_backlight_combination_mode(dev)) {
@@ -666,7 +677,16 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
666 POSTING_READ(reg); 677 POSTING_READ(reg);
667 I915_WRITE(reg, tmp | BLM_PWM_ENABLE); 678 I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
668 679
669 if (HAS_PCH_SPLIT(dev) && 680 if (IS_BROADWELL(dev)) {
681 /*
682 * Broadwell requires PCH override to drive the PCH
683 * backlight pin. The above will configure the CPU
684 * backlight pin, which we don't plan to use.
685 */
686 tmp = I915_READ(BLC_PWM_PCH_CTL1);
687 tmp |= BLM_PCH_OVERRIDE_ENABLE | BLM_PCH_PWM_ENABLE;
688 I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
689 } else if (HAS_PCH_SPLIT(dev) &&
670 !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) { 690 !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
671 tmp = I915_READ(BLC_PWM_PCH_CTL1); 691 tmp = I915_READ(BLC_PWM_PCH_CTL1);
672 tmp |= BLM_PCH_PWM_ENABLE; 692 tmp |= BLM_PCH_PWM_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 6e0d5e075b15..3657ab43c8fd 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5685,6 +5685,7 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
5685{ 5685{
5686 struct drm_i915_private *dev_priv = dev->dev_private; 5686 struct drm_i915_private *dev_priv = dev->dev_private;
5687 bool is_enabled, enable_requested; 5687 bool is_enabled, enable_requested;
5688 unsigned long irqflags;
5688 uint32_t tmp; 5689 uint32_t tmp;
5689 5690
5690 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 5691 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
@@ -5702,9 +5703,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
5702 HSW_PWR_WELL_STATE_ENABLED), 20)) 5703 HSW_PWR_WELL_STATE_ENABLED), 20))
5703 DRM_ERROR("Timeout enabling power well\n"); 5704 DRM_ERROR("Timeout enabling power well\n");
5704 } 5705 }
5706
5707 if (IS_BROADWELL(dev)) {
5708 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
5709 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
5710 dev_priv->de_irq_mask[PIPE_B]);
5711 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
5712 ~dev_priv->de_irq_mask[PIPE_B] |
5713 GEN8_PIPE_VBLANK);
5714 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
5715 dev_priv->de_irq_mask[PIPE_C]);
5716 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
5717 ~dev_priv->de_irq_mask[PIPE_C] |
5718 GEN8_PIPE_VBLANK);
5719 POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
5720 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
5721 }
5705 } else { 5722 } else {
5706 if (enable_requested) { 5723 if (enable_requested) {
5707 unsigned long irqflags;
5708 enum pipe p; 5724 enum pipe p;
5709 5725
5710 I915_WRITE(HSW_PWR_WELL_DRIVER, 0); 5726 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
@@ -6130,10 +6146,19 @@ int vlv_freq_opcode(int ddr_freq, int val)
6130 return val; 6146 return val;
6131} 6147}
6132 6148
6133void intel_pm_init(struct drm_device *dev) 6149void intel_pm_setup(struct drm_device *dev)
6134{ 6150{
6135 struct drm_i915_private *dev_priv = dev->dev_private; 6151 struct drm_i915_private *dev_priv = dev->dev_private;
6136 6152
6153 mutex_init(&dev_priv->rps.hw_lock);
6154
6155 mutex_init(&dev_priv->pc8.lock);
6156 dev_priv->pc8.requirements_met = false;
6157 dev_priv->pc8.gpu_idle = false;
6158 dev_priv->pc8.irqs_disabled = false;
6159 dev_priv->pc8.enabled = false;
6160 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
6161 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
6137 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, 6162 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
6138 intel_gen6_powersave_work); 6163 intel_gen6_powersave_work);
6139} 6164}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b620337e6d67..c2f09d456300 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -965,6 +965,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
965 } else if (IS_GEN6(ring->dev)) { 965 } else if (IS_GEN6(ring->dev)) {
966 mmio = RING_HWS_PGA_GEN6(ring->mmio_base); 966 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
967 } else { 967 } else {
968 /* XXX: gen8 returns to sanity */
968 mmio = RING_HWS_PGA(ring->mmio_base); 969 mmio = RING_HWS_PGA(ring->mmio_base);
969 } 970 }
970 971
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 0b02078a0b84..25cbe073c388 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -784,6 +784,7 @@ static int gen6_do_reset(struct drm_device *dev)
784int intel_gpu_reset(struct drm_device *dev) 784int intel_gpu_reset(struct drm_device *dev)
785{ 785{
786 switch (INTEL_INFO(dev)->gen) { 786 switch (INTEL_INFO(dev)->gen) {
787 case 8:
787 case 7: 788 case 7:
788 case 6: return gen6_do_reset(dev); 789 case 6: return gen6_do_reset(dev);
789 case 5: return ironlake_do_reset(dev); 790 case 5: return ironlake_do_reset(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 7a3759f1c41a..98a22e6e27a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -858,6 +858,12 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
858 if (nouveau_runtime_pm == 0) 858 if (nouveau_runtime_pm == 0)
859 return -EINVAL; 859 return -EINVAL;
860 860
861 /* are we optimus enabled? */
862 if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
863 DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
864 return -EINVAL;
865 }
866
861 nv_debug_level(SILENT); 867 nv_debug_level(SILENT);
862 drm_kms_helper_poll_disable(drm_dev); 868 drm_kms_helper_poll_disable(drm_dev);
863 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); 869 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 80a20120e625..b1970596a782 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1196,7 +1196,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1196 } else if ((rdev->family == CHIP_TAHITI) || 1196 } else if ((rdev->family == CHIP_TAHITI) ||
1197 (rdev->family == CHIP_PITCAIRN)) 1197 (rdev->family == CHIP_PITCAIRN))
1198 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16); 1198 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
1199 else if (rdev->family == CHIP_VERDE) 1199 else if ((rdev->family == CHIP_VERDE) ||
1200 (rdev->family == CHIP_OLAND) ||
1201 (rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */
1200 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16); 1202 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
1201 1203
1202 switch (radeon_crtc->crtc_id) { 1204 switch (radeon_crtc->crtc_id) {
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 0300727a4f70..d08b83c6267b 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -458,7 +458,7 @@ int cik_copy_dma(struct radeon_device *rdev,
458 radeon_ring_write(ring, 0); /* src/dst endian swap */ 458 radeon_ring_write(ring, 0); /* src/dst endian swap */
459 radeon_ring_write(ring, src_offset & 0xffffffff); 459 radeon_ring_write(ring, src_offset & 0xffffffff);
460 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff); 460 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
461 radeon_ring_write(ring, dst_offset & 0xfffffffc); 461 radeon_ring_write(ring, dst_offset & 0xffffffff);
462 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff); 462 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
463 src_offset += cur_size_in_bytes; 463 src_offset += cur_size_in_bytes;
464 dst_offset += cur_size_in_bytes; 464 dst_offset += cur_size_in_bytes;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e354ce94cdd1..c0425bb6223a 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2021,7 +2021,7 @@ static struct radeon_asic ci_asic = {
2021 .hdmi_setmode = &evergreen_hdmi_setmode, 2021 .hdmi_setmode = &evergreen_hdmi_setmode,
2022 }, 2022 },
2023 .copy = { 2023 .copy = {
2024 .blit = NULL, 2024 .blit = &cik_copy_cpdma,
2025 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 2025 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2026 .dma = &cik_copy_dma, 2026 .dma = &cik_copy_dma,
2027 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 2027 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -2122,7 +2122,7 @@ static struct radeon_asic kv_asic = {
2122 .hdmi_setmode = &evergreen_hdmi_setmode, 2122 .hdmi_setmode = &evergreen_hdmi_setmode,
2123 }, 2123 },
2124 .copy = { 2124 .copy = {
2125 .blit = NULL, 2125 .blit = &cik_copy_cpdma,
2126 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 2126 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2127 .dma = &cik_copy_dma, 2127 .dma = &cik_copy_dma,
2128 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 2128 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 9f5ff28864f6..1958b36ad0e5 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -508,15 +508,6 @@ static const struct file_operations radeon_driver_kms_fops = {
508#endif 508#endif
509}; 509};
510 510
511
512static void
513radeon_pci_shutdown(struct pci_dev *pdev)
514{
515 struct drm_device *dev = pci_get_drvdata(pdev);
516
517 radeon_driver_unload_kms(dev);
518}
519
520static struct drm_driver kms_driver = { 511static struct drm_driver kms_driver = {
521 .driver_features = 512 .driver_features =
522 DRIVER_USE_AGP | 513 DRIVER_USE_AGP |
@@ -586,7 +577,6 @@ static struct pci_driver radeon_kms_pci_driver = {
586 .probe = radeon_pci_probe, 577 .probe = radeon_pci_probe,
587 .remove = radeon_pci_remove, 578 .remove = radeon_pci_remove,
588 .driver.pm = &radeon_pm_ops, 579 .driver.pm = &radeon_pm_ops,
589 .shutdown = radeon_pci_shutdown,
590}; 580};
591 581
592static int __init radeon_init(void) 582static int __init radeon_init(void)
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 1c560629575a..e7dab069cccf 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -162,6 +162,16 @@ static void rs690_mc_init(struct radeon_device *rdev)
162 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); 162 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
163 base = G_000100_MC_FB_START(base) << 16; 163 base = G_000100_MC_FB_START(base) << 16;
164 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 164 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
165 /* Some boards seem to be configured for 128MB of sideport memory,
166 * but really only have 64MB. Just skip the sideport and use
167 * UMA memory.
168 */
169 if (rdev->mc.igp_sideport_enabled &&
170 (rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
171 base += 128 * 1024 * 1024;
172 rdev->mc.real_vram_size -= 128 * 1024 * 1024;
173 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
174 }
165 175
166 /* Use K8 direct mapping for fast fb access. */ 176 /* Use K8 direct mapping for fast fb access. */
167 rdev->fastfb_working = false; 177 rdev->fastfb_working = false;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index b249ab9b1eb2..6440eeac22d2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -169,9 +169,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
169 } 169 }
170 170
171 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + 171 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
172 drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; 172 vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
173 page_last = vma_pages(vma) + 173 page_last = vma_pages(vma) + vma->vm_pgoff -
174 drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; 174 drm_vma_node_start(&bo->vma_node);
175 175
176 if (unlikely(page_offset >= bo->num_pages)) { 176 if (unlikely(page_offset >= bo->num_pages)) {
177 retval = VM_FAULT_SIGBUS; 177 retval = VM_FAULT_SIGBUS;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index a51f48e3e917..45d5b5ab6ca9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -68,6 +68,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
68 SVGA_FIFO_3D_HWVERSION)); 68 SVGA_FIFO_3D_HWVERSION));
69 break; 69 break;
70 } 70 }
71 case DRM_VMW_PARAM_MAX_SURF_MEMORY:
72 param->value = dev_priv->memory_size;
73 break;
71 default: 74 default:
72 DRM_ERROR("Illegal vmwgfx get param request: %d\n", 75 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
73 param->param); 76 param->param);
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index acb7f90359a3..749a6cadab8b 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -200,7 +200,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
200 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), 200 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
201 .address = 1, 201 .address = 1,
202 .scan_index = 1, 202 .scan_index = 1,
203 .scan_type = IIO_ST('u', 12, 16, 0), 203 .scan_type = {
204 .sign = 'u',
205 .realbits = 12,
206 .storagebits = 16,
207 .shift = 0,
208 .endianness = IIO_BE,
209 },
204 }, 210 },
205 .channel[1] = { 211 .channel[1] = {
206 .type = IIO_VOLTAGE, 212 .type = IIO_VOLTAGE,
@@ -210,7 +216,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
210 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), 216 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
211 .address = 0, 217 .address = 0,
212 .scan_index = 0, 218 .scan_index = 0,
213 .scan_type = IIO_ST('u', 12, 16, 0), 219 .scan_type = {
220 .sign = 'u',
221 .realbits = 12,
222 .storagebits = 16,
223 .shift = 0,
224 .endianness = IIO_BE,
225 },
214 }, 226 },
215 .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2), 227 .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2),
216 .int_vref_mv = 2500, 228 .int_vref_mv = 2500,
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index 3fb7757a1028..368660dfe135 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -651,7 +651,12 @@ static const struct iio_chan_spec adis16448_channels[] = {
651 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), 651 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
652 .address = ADIS16448_BARO_OUT, 652 .address = ADIS16448_BARO_OUT,
653 .scan_index = ADIS16400_SCAN_BARO, 653 .scan_index = ADIS16400_SCAN_BARO,
654 .scan_type = IIO_ST('s', 16, 16, 0), 654 .scan_type = {
655 .sign = 's',
656 .realbits = 16,
657 .storagebits = 16,
658 .endianness = IIO_BE,
659 },
655 }, 660 },
656 ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12), 661 ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
657 IIO_CHAN_SOFT_TIMESTAMP(11) 662 IIO_CHAN_SOFT_TIMESTAMP(11)
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 21df57130018..0922e39b0ea9 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -387,7 +387,7 @@ static int cm36651_read_int_time(struct cm36651_data *cm36651,
387 return -EINVAL; 387 return -EINVAL;
388 } 388 }
389 389
390 return IIO_VAL_INT_PLUS_MICRO; 390 return IIO_VAL_INT;
391} 391}
392 392
393static int cm36651_write_int_time(struct cm36651_data *cm36651, 393static int cm36651_write_int_time(struct cm36651_data *cm36651,
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 6be57c38638d..9804fca6bf06 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -207,7 +207,9 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
207 isert_conn->conn_rx_descs = NULL; 207 isert_conn->conn_rx_descs = NULL;
208} 208}
209 209
210static void isert_cq_tx_work(struct work_struct *);
210static void isert_cq_tx_callback(struct ib_cq *, void *); 211static void isert_cq_tx_callback(struct ib_cq *, void *);
212static void isert_cq_rx_work(struct work_struct *);
211static void isert_cq_rx_callback(struct ib_cq *, void *); 213static void isert_cq_rx_callback(struct ib_cq *, void *);
212 214
213static int 215static int
@@ -259,26 +261,36 @@ isert_create_device_ib_res(struct isert_device *device)
259 cq_desc[i].device = device; 261 cq_desc[i].device = device;
260 cq_desc[i].cq_index = i; 262 cq_desc[i].cq_index = i;
261 263
264 INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
262 device->dev_rx_cq[i] = ib_create_cq(device->ib_device, 265 device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
263 isert_cq_rx_callback, 266 isert_cq_rx_callback,
264 isert_cq_event_callback, 267 isert_cq_event_callback,
265 (void *)&cq_desc[i], 268 (void *)&cq_desc[i],
266 ISER_MAX_RX_CQ_LEN, i); 269 ISER_MAX_RX_CQ_LEN, i);
267 if (IS_ERR(device->dev_rx_cq[i])) 270 if (IS_ERR(device->dev_rx_cq[i])) {
271 ret = PTR_ERR(device->dev_rx_cq[i]);
272 device->dev_rx_cq[i] = NULL;
268 goto out_cq; 273 goto out_cq;
274 }
269 275
276 INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
270 device->dev_tx_cq[i] = ib_create_cq(device->ib_device, 277 device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
271 isert_cq_tx_callback, 278 isert_cq_tx_callback,
272 isert_cq_event_callback, 279 isert_cq_event_callback,
273 (void *)&cq_desc[i], 280 (void *)&cq_desc[i],
274 ISER_MAX_TX_CQ_LEN, i); 281 ISER_MAX_TX_CQ_LEN, i);
275 if (IS_ERR(device->dev_tx_cq[i])) 282 if (IS_ERR(device->dev_tx_cq[i])) {
283 ret = PTR_ERR(device->dev_tx_cq[i]);
284 device->dev_tx_cq[i] = NULL;
276 goto out_cq; 285 goto out_cq;
286 }
277 287
278 if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP)) 288 ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
289 if (ret)
279 goto out_cq; 290 goto out_cq;
280 291
281 if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP)) 292 ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
293 if (ret)
282 goto out_cq; 294 goto out_cq;
283 } 295 }
284 296
@@ -1724,7 +1736,6 @@ isert_cq_tx_callback(struct ib_cq *cq, void *context)
1724{ 1736{
1725 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; 1737 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1726 1738
1727 INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
1728 queue_work(isert_comp_wq, &cq_desc->cq_tx_work); 1739 queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
1729} 1740}
1730 1741
@@ -1768,7 +1779,6 @@ isert_cq_rx_callback(struct ib_cq *cq, void *context)
1768{ 1779{
1769 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; 1780 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1770 1781
1771 INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
1772 queue_work(isert_rx_wq, &cq_desc->cq_rx_work); 1782 queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
1773} 1783}
1774 1784
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 82cec63a9011..3ee78f02e5d7 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -149,8 +149,9 @@ static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p,
149static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p, 149static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
150 int irq, int do_mask) 150 int irq, int do_mask)
151{ 151{
152 int bitfield_width = 4; /* PRIO assumed to have fixed bitfield width */ 152 /* The PRIO register is assumed to be 32-bit with fixed 4-bit fields. */
153 int shift = (7 - irq) * bitfield_width; /* PRIO assumed to be 32-bit */ 153 int bitfield_width = 4;
154 int shift = 32 - (irq + 1) * bitfield_width;
154 155
155 intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO, 156 intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO,
156 shift, bitfield_width, 157 shift, bitfield_width,
@@ -159,8 +160,9 @@ static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
159 160
160static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value) 161static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value)
161{ 162{
163 /* The SENSE register is assumed to be 32-bit. */
162 int bitfield_width = p->config.sense_bitfield_width; 164 int bitfield_width = p->config.sense_bitfield_width;
163 int shift = (7 - irq) * bitfield_width; /* SENSE assumed to be 32-bit */ 165 int shift = 32 - (irq + 1) * bitfield_width;
164 166
165 dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value); 167 dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value);
166 168
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 5f9a7ad9b964..8aeec0b4601a 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -625,6 +625,7 @@ static int ems_usb_start(struct ems_usb *dev)
625 usb_unanchor_urb(urb); 625 usb_unanchor_urb(urb);
626 usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf, 626 usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
627 urb->transfer_dma); 627 urb->transfer_dma);
628 usb_free_urb(urb);
628 break; 629 break;
629 } 630 }
630 631
@@ -798,8 +799,8 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
798 * allowed (MAX_TX_URBS). 799 * allowed (MAX_TX_URBS).
799 */ 800 */
800 if (!context) { 801 if (!context) {
801 usb_unanchor_urb(urb);
802 usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); 802 usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
803 usb_free_urb(urb);
803 804
804 netdev_warn(netdev, "couldn't find free context\n"); 805 netdev_warn(netdev, "couldn't find free context\n");
805 806
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 8ee9d1556e6e..263dd921edc4 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -927,6 +927,9 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
927 /* set LED in default state (end of init phase) */ 927 /* set LED in default state (end of init phase) */
928 pcan_usb_pro_set_led(dev, 0, 1); 928 pcan_usb_pro_set_led(dev, 0, 1);
929 929
930 kfree(bi);
931 kfree(fi);
932
930 return 0; 933 return 0;
931 934
932 err_out: 935 err_out:
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index b1cb0ffb15c7..6055d397a29e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -447,8 +447,9 @@ irqreturn_t qlcnic_83xx_intr(int irq, void *data)
447 447
448 qlcnic_83xx_poll_process_aen(adapter); 448 qlcnic_83xx_poll_process_aen(adapter);
449 449
450 if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) { 450 if (ahw->diag_test) {
451 ahw->diag_cnt++; 451 if (ahw->diag_test == QLCNIC_INTERRUPT_TEST)
452 ahw->diag_cnt++;
452 qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); 453 qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
453 return IRQ_HANDLED; 454 return IRQ_HANDLED;
454 } 455 }
@@ -1345,11 +1346,6 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
1345 } 1346 }
1346 1347
1347 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { 1348 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
1348 /* disable and free mailbox interrupt */
1349 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
1350 qlcnic_83xx_enable_mbx_poll(adapter);
1351 qlcnic_83xx_free_mbx_intr(adapter);
1352 }
1353 adapter->ahw->loopback_state = 0; 1349 adapter->ahw->loopback_state = 0;
1354 adapter->ahw->hw_ops->setup_link_event(adapter, 1); 1350 adapter->ahw->hw_ops->setup_link_event(adapter, 1);
1355 } 1351 }
@@ -1363,33 +1359,20 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
1363{ 1359{
1364 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1360 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1365 struct qlcnic_host_sds_ring *sds_ring; 1361 struct qlcnic_host_sds_ring *sds_ring;
1366 int ring, err; 1362 int ring;
1367 1363
1368 clear_bit(__QLCNIC_DEV_UP, &adapter->state); 1364 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
1369 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { 1365 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
1370 for (ring = 0; ring < adapter->drv_sds_rings; ring++) { 1366 for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
1371 sds_ring = &adapter->recv_ctx->sds_rings[ring]; 1367 sds_ring = &adapter->recv_ctx->sds_rings[ring];
1372 qlcnic_83xx_disable_intr(adapter, sds_ring); 1368 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1373 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) 1369 qlcnic_83xx_disable_intr(adapter, sds_ring);
1374 qlcnic_83xx_enable_mbx_poll(adapter);
1375 } 1370 }
1376 } 1371 }
1377 1372
1378 qlcnic_fw_destroy_ctx(adapter); 1373 qlcnic_fw_destroy_ctx(adapter);
1379 qlcnic_detach(adapter); 1374 qlcnic_detach(adapter);
1380 1375
1381 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
1382 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
1383 err = qlcnic_83xx_setup_mbx_intr(adapter);
1384 qlcnic_83xx_disable_mbx_poll(adapter);
1385 if (err) {
1386 dev_err(&adapter->pdev->dev,
1387 "%s: failed to setup mbx interrupt\n",
1388 __func__);
1389 goto out;
1390 }
1391 }
1392 }
1393 adapter->ahw->diag_test = 0; 1376 adapter->ahw->diag_test = 0;
1394 adapter->drv_sds_rings = drv_sds_rings; 1377 adapter->drv_sds_rings = drv_sds_rings;
1395 1378
@@ -1399,9 +1382,6 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
1399 if (netif_running(netdev)) 1382 if (netif_running(netdev))
1400 __qlcnic_up(adapter, netdev); 1383 __qlcnic_up(adapter, netdev);
1401 1384
1402 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST &&
1403 !(adapter->flags & QLCNIC_MSIX_ENABLED))
1404 qlcnic_83xx_disable_mbx_poll(adapter);
1405out: 1385out:
1406 netif_device_attach(netdev); 1386 netif_device_attach(netdev);
1407} 1387}
@@ -3754,6 +3734,19 @@ static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter,
3754 return; 3734 return;
3755} 3735}
3756 3736
3737static inline void qlcnic_dump_mailbox_registers(struct qlcnic_adapter *adapter)
3738{
3739 struct qlcnic_hardware_context *ahw = adapter->ahw;
3740 u32 offset;
3741
3742 offset = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
3743 dev_info(&adapter->pdev->dev, "Mbx interrupt mask=0x%x, Mbx interrupt enable=0x%x, Host mbx control=0x%x, Fw mbx control=0x%x",
3744 readl(ahw->pci_base0 + offset),
3745 QLCRDX(ahw, QLCNIC_MBX_INTR_ENBL),
3746 QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL),
3747 QLCRDX(ahw, QLCNIC_FW_MBX_CTRL));
3748}
3749
3757static void qlcnic_83xx_mailbox_worker(struct work_struct *work) 3750static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
3758{ 3751{
3759 struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox, 3752 struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
@@ -3798,6 +3791,8 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
3798 __func__, cmd->cmd_op, cmd->type, ahw->pci_func, 3791 __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
3799 ahw->op_mode); 3792 ahw->op_mode);
3800 clear_bit(QLC_83XX_MBX_READY, &mbx->status); 3793 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
3794 qlcnic_dump_mailbox_registers(adapter);
3795 qlcnic_83xx_get_mbx_data(adapter, cmd);
3801 qlcnic_dump_mbx(adapter, cmd); 3796 qlcnic_dump_mbx(adapter, cmd);
3802 qlcnic_83xx_idc_request_reset(adapter, 3797 qlcnic_83xx_idc_request_reset(adapter,
3803 QLCNIC_FORCE_FW_DUMP_KEY); 3798 QLCNIC_FORCE_FW_DUMP_KEY);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 4cae6caa6bfa..a6a33508e401 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -662,4 +662,5 @@ pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
662 pci_channel_state_t); 662 pci_channel_state_t);
663pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *); 663pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
664void qlcnic_83xx_io_resume(struct pci_dev *); 664void qlcnic_83xx_io_resume(struct pci_dev *);
665void qlcnic_83xx_stop_hw(struct qlcnic_adapter *);
665#endif 666#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 89208e5b25d6..918e18ddf038 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -740,6 +740,7 @@ static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter)
740 adapter->ahw->idc.err_code = -EIO; 740 adapter->ahw->idc.err_code = -EIO;
741 dev_err(&adapter->pdev->dev, 741 dev_err(&adapter->pdev->dev,
742 "%s: Device in unknown state\n", __func__); 742 "%s: Device in unknown state\n", __func__);
743 clear_bit(__QLCNIC_RESETTING, &adapter->state);
743 return 0; 744 return 0;
744} 745}
745 746
@@ -818,7 +819,6 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
818 struct qlcnic_hardware_context *ahw = adapter->ahw; 819 struct qlcnic_hardware_context *ahw = adapter->ahw;
819 struct qlcnic_mailbox *mbx = ahw->mailbox; 820 struct qlcnic_mailbox *mbx = ahw->mailbox;
820 int ret = 0; 821 int ret = 0;
821 u32 owner;
822 u32 val; 822 u32 val;
823 823
824 /* Perform NIC configuration based ready state entry actions */ 824 /* Perform NIC configuration based ready state entry actions */
@@ -848,9 +848,9 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
848 set_bit(__QLCNIC_RESETTING, &adapter->state); 848 set_bit(__QLCNIC_RESETTING, &adapter->state);
849 qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); 849 qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
850 } else { 850 } else {
851 owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); 851 netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
852 if (ahw->pci_func == owner) 852 __func__);
853 qlcnic_dump_fw(adapter); 853 qlcnic_83xx_idc_enter_failed_state(adapter, 1);
854 } 854 }
855 return -EIO; 855 return -EIO;
856 } 856 }
@@ -948,13 +948,26 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter)
948 return 0; 948 return 0;
949} 949}
950 950
951static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) 951static void qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
952{ 952{
953 dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__); 953 struct qlcnic_hardware_context *ahw = adapter->ahw;
954 u32 val, owner;
955
956 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
957 if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
958 owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
959 if (ahw->pci_func == owner) {
960 qlcnic_83xx_stop_hw(adapter);
961 qlcnic_dump_fw(adapter);
962 }
963 }
964
965 netdev_warn(adapter->netdev, "%s: Reboot will be required to recover the adapter!!\n",
966 __func__);
954 clear_bit(__QLCNIC_RESETTING, &adapter->state); 967 clear_bit(__QLCNIC_RESETTING, &adapter->state);
955 adapter->ahw->idc.err_code = -EIO; 968 ahw->idc.err_code = -EIO;
956 969
957 return 0; 970 return;
958} 971}
959 972
960static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter) 973static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter)
@@ -1063,12 +1076,6 @@ void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work)
1063 adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state; 1076 adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state;
1064 qlcnic_83xx_periodic_tasks(adapter); 1077 qlcnic_83xx_periodic_tasks(adapter);
1065 1078
1066 /* Do not reschedule if firmaware is in hanged state and auto
1067 * recovery is disabled
1068 */
1069 if ((adapter->flags & QLCNIC_FW_HANG) && !qlcnic_auto_fw_reset)
1070 return;
1071
1072 /* Re-schedule the function */ 1079 /* Re-schedule the function */
1073 if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status)) 1080 if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status))
1074 qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, 1081 qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
@@ -1219,10 +1226,10 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key)
1219 } 1226 }
1220 1227
1221 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); 1228 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
1222 if ((val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) || 1229 if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
1223 !qlcnic_auto_fw_reset) { 1230 netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
1224 dev_err(&adapter->pdev->dev, 1231 __func__);
1225 "%s:failed, device in non reset mode\n", __func__); 1232 qlcnic_83xx_idc_enter_failed_state(adapter, 0);
1226 qlcnic_83xx_unlock_driver(adapter); 1233 qlcnic_83xx_unlock_driver(adapter);
1227 return; 1234 return;
1228 } 1235 }
@@ -1254,24 +1261,24 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
1254 if (size & 0xF) 1261 if (size & 0xF)
1255 size = (size + 16) & ~0xF; 1262 size = (size + 16) & ~0xF;
1256 1263
1257 p_cache = kzalloc(size, GFP_KERNEL); 1264 p_cache = vzalloc(size);
1258 if (p_cache == NULL) 1265 if (p_cache == NULL)
1259 return -ENOMEM; 1266 return -ENOMEM;
1260 1267
1261 ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache, 1268 ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache,
1262 size / sizeof(u32)); 1269 size / sizeof(u32));
1263 if (ret) { 1270 if (ret) {
1264 kfree(p_cache); 1271 vfree(p_cache);
1265 return ret; 1272 return ret;
1266 } 1273 }
1267 /* 16 byte write to MS memory */ 1274 /* 16 byte write to MS memory */
1268 ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache, 1275 ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache,
1269 size / 16); 1276 size / 16);
1270 if (ret) { 1277 if (ret) {
1271 kfree(p_cache); 1278 vfree(p_cache);
1272 return ret; 1279 return ret;
1273 } 1280 }
1274 kfree(p_cache); 1281 vfree(p_cache);
1275 1282
1276 return ret; 1283 return ret;
1277} 1284}
@@ -1939,7 +1946,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
1939 p_dev->ahw->reset.seq_index = index; 1946 p_dev->ahw->reset.seq_index = index;
1940} 1947}
1941 1948
1942static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev) 1949void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
1943{ 1950{
1944 p_dev->ahw->reset.seq_index = 0; 1951 p_dev->ahw->reset.seq_index = 0;
1945 1952
@@ -1994,6 +2001,14 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
1994 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); 2001 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
1995 if (!(val & QLC_83XX_IDC_GRACEFULL_RESET)) 2002 if (!(val & QLC_83XX_IDC_GRACEFULL_RESET))
1996 qlcnic_dump_fw(adapter); 2003 qlcnic_dump_fw(adapter);
2004
2005 if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
2006 netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
2007 __func__);
2008 qlcnic_83xx_idc_enter_failed_state(adapter, 1);
2009 return err;
2010 }
2011
1997 qlcnic_83xx_init_hw(adapter); 2012 qlcnic_83xx_init_hw(adapter);
1998 2013
1999 if (qlcnic_83xx_copy_bootloader(adapter)) 2014 if (qlcnic_83xx_copy_bootloader(adapter))
@@ -2073,8 +2088,8 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
2073 ahw->nic_mode = QLCNIC_DEFAULT_MODE; 2088 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
2074 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; 2089 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
2075 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; 2090 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
2076 adapter->max_sds_rings = ahw->max_rx_ques; 2091 adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
2077 adapter->max_tx_rings = ahw->max_tx_ques; 2092 adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
2078 } else { 2093 } else {
2079 return -EIO; 2094 return -EIO;
2080 } 2095 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index b36c02fafcfd..e3be2760665c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -667,30 +667,25 @@ qlcnic_set_ringparam(struct net_device *dev,
667static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter, 667static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter,
668 u8 rx_ring, u8 tx_ring) 668 u8 rx_ring, u8 tx_ring)
669{ 669{
670 if (rx_ring == 0 || tx_ring == 0)
671 return -EINVAL;
672
670 if (rx_ring != 0) { 673 if (rx_ring != 0) {
671 if (rx_ring > adapter->max_sds_rings) { 674 if (rx_ring > adapter->max_sds_rings) {
672 netdev_err(adapter->netdev, "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n", 675 netdev_err(adapter->netdev,
676 "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n",
673 rx_ring, adapter->max_sds_rings); 677 rx_ring, adapter->max_sds_rings);
674 return -EINVAL; 678 return -EINVAL;
675 } 679 }
676 } 680 }
677 681
678 if (tx_ring != 0) { 682 if (tx_ring != 0) {
679 if (qlcnic_82xx_check(adapter) && 683 if (tx_ring > adapter->max_tx_rings) {
680 (tx_ring > adapter->max_tx_rings)) {
681 netdev_err(adapter->netdev, 684 netdev_err(adapter->netdev,
682 "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n", 685 "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n",
683 tx_ring, adapter->max_tx_rings); 686 tx_ring, adapter->max_tx_rings);
684 return -EINVAL; 687 return -EINVAL;
685 } 688 }
686
687 if (qlcnic_83xx_check(adapter) &&
688 (tx_ring > QLCNIC_SINGLE_RING)) {
689 netdev_err(adapter->netdev,
690 "Invalid ring count, Tx ring count %d should not be greater than %d driver Tx rings.\n",
691 tx_ring, QLCNIC_SINGLE_RING);
692 return -EINVAL;
693 }
694 } 689 }
695 690
696 return 0; 691 return 0;
@@ -948,6 +943,7 @@ static int qlcnic_irq_test(struct net_device *netdev)
948 struct qlcnic_hardware_context *ahw = adapter->ahw; 943 struct qlcnic_hardware_context *ahw = adapter->ahw;
949 struct qlcnic_cmd_args cmd; 944 struct qlcnic_cmd_args cmd;
950 int ret, drv_sds_rings = adapter->drv_sds_rings; 945 int ret, drv_sds_rings = adapter->drv_sds_rings;
946 int drv_tx_rings = adapter->drv_tx_rings;
951 947
952 if (qlcnic_83xx_check(adapter)) 948 if (qlcnic_83xx_check(adapter))
953 return qlcnic_83xx_interrupt_test(netdev); 949 return qlcnic_83xx_interrupt_test(netdev);
@@ -980,6 +976,7 @@ free_diag_res:
980 976
981clear_diag_irq: 977clear_diag_irq:
982 adapter->drv_sds_rings = drv_sds_rings; 978 adapter->drv_sds_rings = drv_sds_rings;
979 adapter->drv_tx_rings = drv_tx_rings;
983 clear_bit(__QLCNIC_RESETTING, &adapter->state); 980 clear_bit(__QLCNIC_RESETTING, &adapter->state);
984 981
985 return ret; 982 return ret;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 0149c9495347..eda6c691d897 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -687,17 +687,11 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
687 if (adapter->ahw->linkup && !linkup) { 687 if (adapter->ahw->linkup && !linkup) {
688 netdev_info(netdev, "NIC Link is down\n"); 688 netdev_info(netdev, "NIC Link is down\n");
689 adapter->ahw->linkup = 0; 689 adapter->ahw->linkup = 0;
690 if (netif_running(netdev)) { 690 netif_carrier_off(netdev);
691 netif_carrier_off(netdev);
692 netif_tx_stop_all_queues(netdev);
693 }
694 } else if (!adapter->ahw->linkup && linkup) { 691 } else if (!adapter->ahw->linkup && linkup) {
695 netdev_info(netdev, "NIC Link is up\n"); 692 netdev_info(netdev, "NIC Link is up\n");
696 adapter->ahw->linkup = 1; 693 adapter->ahw->linkup = 1;
697 if (netif_running(netdev)) { 694 netif_carrier_on(netdev);
698 netif_carrier_on(netdev);
699 netif_wake_queue(netdev);
700 }
701 } 695 }
702} 696}
703 697
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 05c1eef8df13..2c8cac0c6a55 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1178,6 +1178,7 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
1178 } else { 1178 } else {
1179 adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE; 1179 adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
1180 adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS; 1180 adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS;
1181 adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
1181 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; 1182 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
1182 } 1183 }
1183 1184
@@ -1940,7 +1941,6 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1940 qlcnic_detach(adapter); 1941 qlcnic_detach(adapter);
1941 1942
1942 adapter->drv_sds_rings = QLCNIC_SINGLE_RING; 1943 adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
1943 adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
1944 adapter->ahw->diag_test = test; 1944 adapter->ahw->diag_test = test;
1945 adapter->ahw->linkup = 0; 1945 adapter->ahw->linkup = 0;
1946 1946
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 524f713f6017..f8135725bcf6 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -327,7 +327,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
327 return -EINVAL; 327 return -EINVAL;
328 328
329 nvdev->start_remove = true; 329 nvdev->start_remove = true;
330 cancel_delayed_work_sync(&ndevctx->dwork);
331 cancel_work_sync(&ndevctx->work); 330 cancel_work_sync(&ndevctx->work);
332 netif_tx_disable(ndev); 331 netif_tx_disable(ndev);
333 rndis_filter_device_remove(hdev); 332 rndis_filter_device_remove(hdev);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index e884ee1fe7ed..27bbe58dcbe7 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1197,6 +1197,9 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
1197 1197
1198 err = -EPROTO; 1198 err = -EPROTO;
1199 1199
1200 if (fragment)
1201 goto out;
1202
1200 switch (ip_hdr(skb)->protocol) { 1203 switch (ip_hdr(skb)->protocol) {
1201 case IPPROTO_TCP: 1204 case IPPROTO_TCP:
1202 err = maybe_pull_tail(skb, 1205 err = maybe_pull_tail(skb,
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index a344f3d52361..330ef2d06567 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -24,8 +24,8 @@ config PHY_EXYNOS_MIPI_VIDEO
24config OMAP_USB2 24config OMAP_USB2
25 tristate "OMAP USB2 PHY Driver" 25 tristate "OMAP USB2 PHY Driver"
26 depends on ARCH_OMAP2PLUS 26 depends on ARCH_OMAP2PLUS
27 depends on USB_PHY
27 select GENERIC_PHY 28 select GENERIC_PHY
28 select USB_PHY
29 select OMAP_CONTROL_USB 29 select OMAP_CONTROL_USB
30 help 30 help
31 Enable this to support the transceiver that is part of SOC. This 31 Enable this to support the transceiver that is part of SOC. This
@@ -36,8 +36,8 @@ config OMAP_USB2
36config TWL4030_USB 36config TWL4030_USB
37 tristate "TWL4030 USB Transceiver Driver" 37 tristate "TWL4030 USB Transceiver Driver"
38 depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS 38 depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
39 depends on USB_PHY
39 select GENERIC_PHY 40 select GENERIC_PHY
40 select USB_PHY
41 help 41 help
42 Enable this to support the USB OTG transceiver on TWL4030 42 Enable this to support the USB OTG transceiver on TWL4030
43 family chips (including the TWL5030 and TPS659x0 devices). 43 family chips (including the TWL5030 and TPS659x0 devices).
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 03cf8fb81554..58e0e9739028 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -437,23 +437,18 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
437 int id; 437 int id;
438 struct phy *phy; 438 struct phy *phy;
439 439
440 if (!dev) { 440 if (WARN_ON(!dev))
441 dev_WARN(dev, "no device provided for PHY\n"); 441 return ERR_PTR(-EINVAL);
442 ret = -EINVAL;
443 goto err0;
444 }
445 442
446 phy = kzalloc(sizeof(*phy), GFP_KERNEL); 443 phy = kzalloc(sizeof(*phy), GFP_KERNEL);
447 if (!phy) { 444 if (!phy)
448 ret = -ENOMEM; 445 return ERR_PTR(-ENOMEM);
449 goto err0;
450 }
451 446
452 id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL); 447 id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL);
453 if (id < 0) { 448 if (id < 0) {
454 dev_err(dev, "unable to get id\n"); 449 dev_err(dev, "unable to get id\n");
455 ret = id; 450 ret = id;
456 goto err0; 451 goto free_phy;
457 } 452 }
458 453
459 device_initialize(&phy->dev); 454 device_initialize(&phy->dev);
@@ -468,11 +463,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
468 463
469 ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id); 464 ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id);
470 if (ret) 465 if (ret)
471 goto err1; 466 goto put_dev;
472 467
473 ret = device_add(&phy->dev); 468 ret = device_add(&phy->dev);
474 if (ret) 469 if (ret)
475 goto err1; 470 goto put_dev;
476 471
477 if (pm_runtime_enabled(dev)) { 472 if (pm_runtime_enabled(dev)) {
478 pm_runtime_enable(&phy->dev); 473 pm_runtime_enable(&phy->dev);
@@ -481,12 +476,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
481 476
482 return phy; 477 return phy;
483 478
484err1: 479put_dev:
485 ida_remove(&phy_ida, phy->id);
486 put_device(&phy->dev); 480 put_device(&phy->dev);
481 ida_remove(&phy_ida, phy->id);
482free_phy:
487 kfree(phy); 483 kfree(phy);
488
489err0:
490 return ERR_PTR(ret); 484 return ERR_PTR(ret);
491} 485}
492EXPORT_SYMBOL_GPL(phy_create); 486EXPORT_SYMBOL_GPL(phy_create);
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 11bd0d970a52..e2142956a8e5 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -254,7 +254,7 @@ struct sh_pfc_soc_info {
254#define PINMUX_GPIO(_pin) \ 254#define PINMUX_GPIO(_pin) \
255 [GPIO_##_pin] = { \ 255 [GPIO_##_pin] = { \
256 .pin = (u16)-1, \ 256 .pin = (u16)-1, \
257 .name = __stringify(name), \ 257 .name = __stringify(GPIO_##_pin), \
258 .enum_id = _pin##_DATA, \ 258 .enum_id = _pin##_DATA, \
259 } 259 }
260 260
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 333677d68d0e..9e61922d8230 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -438,7 +438,7 @@ common_reg:
438 platform_set_drvdata(pdev, s2mps11); 438 platform_set_drvdata(pdev, s2mps11);
439 439
440 config.dev = &pdev->dev; 440 config.dev = &pdev->dev;
441 config.regmap = iodev->regmap; 441 config.regmap = iodev->regmap_pmic;
442 config.driver_data = s2mps11; 442 config.driver_data = s2mps11;
443 for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) { 443 for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) {
444 if (!reg_np) { 444 if (!reg_np) {
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 596480022b0a..38a1257e76e1 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -471,7 +471,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
471 schedule_delayed_work(&tgt->sess_del_work, 0); 471 schedule_delayed_work(&tgt->sess_del_work, 0);
472 else 472 else
473 schedule_delayed_work(&tgt->sess_del_work, 473 schedule_delayed_work(&tgt->sess_del_work,
474 jiffies - sess->expires); 474 sess->expires - jiffies);
475} 475}
476 476
477/* ha->hardware_lock supposed to be held on entry */ 477/* ha->hardware_lock supposed to be held on entry */
@@ -550,13 +550,14 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
550 struct scsi_qla_host *vha = tgt->vha; 550 struct scsi_qla_host *vha = tgt->vha;
551 struct qla_hw_data *ha = vha->hw; 551 struct qla_hw_data *ha = vha->hw;
552 struct qla_tgt_sess *sess; 552 struct qla_tgt_sess *sess;
553 unsigned long flags; 553 unsigned long flags, elapsed;
554 554
555 spin_lock_irqsave(&ha->hardware_lock, flags); 555 spin_lock_irqsave(&ha->hardware_lock, flags);
556 while (!list_empty(&tgt->del_sess_list)) { 556 while (!list_empty(&tgt->del_sess_list)) {
557 sess = list_entry(tgt->del_sess_list.next, typeof(*sess), 557 sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
558 del_list_entry); 558 del_list_entry);
559 if (time_after_eq(jiffies, sess->expires)) { 559 elapsed = jiffies;
560 if (time_after_eq(elapsed, sess->expires)) {
560 qlt_undelete_sess(sess); 561 qlt_undelete_sess(sess);
561 562
562 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 563 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
@@ -566,7 +567,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
566 ha->tgt.tgt_ops->put_sess(sess); 567 ha->tgt.tgt_ops->put_sess(sess);
567 } else { 568 } else {
568 schedule_delayed_work(&tgt->sess_del_work, 569 schedule_delayed_work(&tgt->sess_del_work,
569 jiffies - sess->expires); 570 sess->expires - elapsed);
570 break; 571 break;
571 } 572 }
572 } 573 }
@@ -4290,6 +4291,7 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
4290 if (rc != 0) { 4291 if (rc != 0) {
4291 ha->tgt.tgt_ops = NULL; 4292 ha->tgt.tgt_ops = NULL;
4292 ha->tgt.target_lport_ptr = NULL; 4293 ha->tgt.target_lport_ptr = NULL;
4294 scsi_host_put(host);
4293 } 4295 }
4294 mutex_unlock(&qla_tgt_mutex); 4296 mutex_unlock(&qla_tgt_mutex);
4295 return rc; 4297 return rc;
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 8f02bf66e20b..4964d2a2fc7d 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -446,7 +446,7 @@ int comedi_load_firmware(struct comedi_device *dev,
446 release_firmware(fw); 446 release_firmware(fw);
447 } 447 }
448 448
449 return ret; 449 return ret < 0 ? ret : 0;
450} 450}
451EXPORT_SYMBOL_GPL(comedi_load_firmware); 451EXPORT_SYMBOL_GPL(comedi_load_firmware);
452 452
diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
index 432e3f9c3301..c55f234b29e6 100644
--- a/drivers/staging/comedi/drivers/8255_pci.c
+++ b/drivers/staging/comedi/drivers/8255_pci.c
@@ -63,7 +63,8 @@ enum pci_8255_boardid {
63 BOARD_ADLINK_PCI7296, 63 BOARD_ADLINK_PCI7296,
64 BOARD_CB_PCIDIO24, 64 BOARD_CB_PCIDIO24,
65 BOARD_CB_PCIDIO24H, 65 BOARD_CB_PCIDIO24H,
66 BOARD_CB_PCIDIO48H, 66 BOARD_CB_PCIDIO48H_OLD,
67 BOARD_CB_PCIDIO48H_NEW,
67 BOARD_CB_PCIDIO96H, 68 BOARD_CB_PCIDIO96H,
68 BOARD_NI_PCIDIO96, 69 BOARD_NI_PCIDIO96,
69 BOARD_NI_PCIDIO96B, 70 BOARD_NI_PCIDIO96B,
@@ -106,11 +107,16 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = {
106 .dio_badr = 2, 107 .dio_badr = 2,
107 .n_8255 = 1, 108 .n_8255 = 1,
108 }, 109 },
109 [BOARD_CB_PCIDIO48H] = { 110 [BOARD_CB_PCIDIO48H_OLD] = {
110 .name = "cb_pci-dio48h", 111 .name = "cb_pci-dio48h",
111 .dio_badr = 1, 112 .dio_badr = 1,
112 .n_8255 = 2, 113 .n_8255 = 2,
113 }, 114 },
115 [BOARD_CB_PCIDIO48H_NEW] = {
116 .name = "cb_pci-dio48h",
117 .dio_badr = 2,
118 .n_8255 = 2,
119 },
114 [BOARD_CB_PCIDIO96H] = { 120 [BOARD_CB_PCIDIO96H] = {
115 .name = "cb_pci-dio96h", 121 .name = "cb_pci-dio96h",
116 .dio_badr = 2, 122 .dio_badr = 2,
@@ -263,7 +269,10 @@ static DEFINE_PCI_DEVICE_TABLE(pci_8255_pci_table) = {
263 { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 }, 269 { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 },
264 { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 }, 270 { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 },
265 { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H }, 271 { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H },
266 { PCI_VDEVICE(CB, 0x000b), BOARD_CB_PCIDIO48H }, 272 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, 0x0000, 0x0000),
273 .driver_data = BOARD_CB_PCIDIO48H_OLD },
274 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, PCI_VENDOR_ID_CB, 0x000b),
275 .driver_data = BOARD_CB_PCIDIO48H_NEW },
267 { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H }, 276 { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H },
268 { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 }, 277 { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 },
269 { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B }, 278 { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B },
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index 99421f90d189..0485d7f39867 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -451,7 +451,12 @@ done:
451 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ 451 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
452 BIT(IIO_CHAN_INFO_SAMP_FREQ), \ 452 BIT(IIO_CHAN_INFO_SAMP_FREQ), \
453 .scan_index = idx, \ 453 .scan_index = idx, \
454 .scan_type = IIO_ST('s', 16, 16, IIO_BE), \ 454 .scan_type = { \
455 .sign = 's', \
456 .realbits = 16, \
457 .storagebits = 16, \
458 .endianness = IIO_BE, \
459 }, \
455 } 460 }
456 461
457static const struct iio_chan_spec hmc5843_channels[] = { 462static const struct iio_chan_spec hmc5843_channels[] = {
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 6bd015ac9d68..96e4eee344ef 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -88,8 +88,9 @@ static int imx_drm_driver_unload(struct drm_device *drm)
88 88
89 imx_drm_device_put(); 89 imx_drm_device_put();
90 90
91 drm_mode_config_cleanup(imxdrm->drm); 91 drm_vblank_cleanup(imxdrm->drm);
92 drm_kms_helper_poll_fini(imxdrm->drm); 92 drm_kms_helper_poll_fini(imxdrm->drm);
93 drm_mode_config_cleanup(imxdrm->drm);
93 94
94 return 0; 95 return 0;
95} 96}
@@ -199,8 +200,8 @@ static void imx_drm_driver_preclose(struct drm_device *drm,
199 if (!file->is_master) 200 if (!file->is_master)
200 return; 201 return;
201 202
202 for (i = 0; i < 4; i++) 203 for (i = 0; i < MAX_CRTC; i++)
203 imx_drm_disable_vblank(drm , i); 204 imx_drm_disable_vblank(drm, i);
204} 205}
205 206
206static const struct file_operations imx_drm_driver_fops = { 207static const struct file_operations imx_drm_driver_fops = {
@@ -376,8 +377,6 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
376 struct imx_drm_device *imxdrm = __imx_drm_device(); 377 struct imx_drm_device *imxdrm = __imx_drm_device();
377 int ret; 378 int ret;
378 379
379 drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
380 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
381 ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256); 380 ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
382 if (ret) 381 if (ret)
383 return ret; 382 return ret;
@@ -385,6 +384,9 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
385 drm_crtc_helper_add(imx_drm_crtc->crtc, 384 drm_crtc_helper_add(imx_drm_crtc->crtc,
386 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); 385 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
387 386
387 drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
388 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
389
388 drm_mode_group_reinit(imxdrm->drm); 390 drm_mode_group_reinit(imxdrm->drm);
389 391
390 return 0; 392 return 0;
@@ -428,11 +430,11 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
428 ret = drm_mode_group_init_legacy_group(imxdrm->drm, 430 ret = drm_mode_group_init_legacy_group(imxdrm->drm,
429 &imxdrm->drm->primary->mode_group); 431 &imxdrm->drm->primary->mode_group);
430 if (ret) 432 if (ret)
431 goto err_init; 433 goto err_kms;
432 434
433 ret = drm_vblank_init(imxdrm->drm, MAX_CRTC); 435 ret = drm_vblank_init(imxdrm->drm, MAX_CRTC);
434 if (ret) 436 if (ret)
435 goto err_init; 437 goto err_kms;
436 438
437 /* 439 /*
438 * with vblank_disable_allowed = true, vblank interrupt will be disabled 440 * with vblank_disable_allowed = true, vblank interrupt will be disabled
@@ -441,12 +443,19 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
441 */ 443 */
442 imxdrm->drm->vblank_disable_allowed = true; 444 imxdrm->drm->vblank_disable_allowed = true;
443 445
444 if (!imx_drm_device_get()) 446 if (!imx_drm_device_get()) {
445 ret = -EINVAL; 447 ret = -EINVAL;
448 goto err_vblank;
449 }
446 450
447 ret = 0; 451 mutex_unlock(&imxdrm->mutex);
452 return 0;
448 453
449err_init: 454err_vblank:
455 drm_vblank_cleanup(drm);
456err_kms:
457 drm_kms_helper_poll_fini(drm);
458 drm_mode_config_cleanup(drm);
450 mutex_unlock(&imxdrm->mutex); 459 mutex_unlock(&imxdrm->mutex);
451 460
452 return ret; 461 return ret;
@@ -492,6 +501,15 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
492 501
493 mutex_lock(&imxdrm->mutex); 502 mutex_lock(&imxdrm->mutex);
494 503
504 /*
505 * The vblank arrays are dimensioned by MAX_CRTC - we can't
506 * pass IDs greater than this to those functions.
507 */
508 if (imxdrm->pipes >= MAX_CRTC) {
509 ret = -EINVAL;
510 goto err_busy;
511 }
512
495 if (imxdrm->drm->open_count) { 513 if (imxdrm->drm->open_count) {
496 ret = -EBUSY; 514 ret = -EBUSY;
497 goto err_busy; 515 goto err_busy;
@@ -528,6 +546,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
528 return 0; 546 return 0;
529 547
530err_register: 548err_register:
549 list_del(&imx_drm_crtc->list);
531 kfree(imx_drm_crtc); 550 kfree(imx_drm_crtc);
532err_alloc: 551err_alloc:
533err_busy: 552err_busy:
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c
index 680f4c8fa081..2c44fef8d58b 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/staging/imx-drm/imx-tve.c
@@ -114,7 +114,6 @@ struct imx_tve {
114 struct drm_encoder encoder; 114 struct drm_encoder encoder;
115 struct imx_drm_encoder *imx_drm_encoder; 115 struct imx_drm_encoder *imx_drm_encoder;
116 struct device *dev; 116 struct device *dev;
117 spinlock_t enable_lock; /* serializes tve_enable/disable */
118 spinlock_t lock; /* register lock */ 117 spinlock_t lock; /* register lock */
119 bool enabled; 118 bool enabled;
120 int mode; 119 int mode;
@@ -146,10 +145,8 @@ __releases(&tve->lock)
146 145
147static void tve_enable(struct imx_tve *tve) 146static void tve_enable(struct imx_tve *tve)
148{ 147{
149 unsigned long flags;
150 int ret; 148 int ret;
151 149
152 spin_lock_irqsave(&tve->enable_lock, flags);
153 if (!tve->enabled) { 150 if (!tve->enabled) {
154 tve->enabled = true; 151 tve->enabled = true;
155 clk_prepare_enable(tve->clk); 152 clk_prepare_enable(tve->clk);
@@ -169,23 +166,18 @@ static void tve_enable(struct imx_tve *tve)
169 TVE_CD_SM_IEN | 166 TVE_CD_SM_IEN |
170 TVE_CD_LM_IEN | 167 TVE_CD_LM_IEN |
171 TVE_CD_MON_END_IEN); 168 TVE_CD_MON_END_IEN);
172
173 spin_unlock_irqrestore(&tve->enable_lock, flags);
174} 169}
175 170
176static void tve_disable(struct imx_tve *tve) 171static void tve_disable(struct imx_tve *tve)
177{ 172{
178 unsigned long flags;
179 int ret; 173 int ret;
180 174
181 spin_lock_irqsave(&tve->enable_lock, flags);
182 if (tve->enabled) { 175 if (tve->enabled) {
183 tve->enabled = false; 176 tve->enabled = false;
184 ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, 177 ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
185 TVE_IPU_CLK_EN | TVE_EN, 0); 178 TVE_IPU_CLK_EN | TVE_EN, 0);
186 clk_disable_unprepare(tve->clk); 179 clk_disable_unprepare(tve->clk);
187 } 180 }
188 spin_unlock_irqrestore(&tve->enable_lock, flags);
189} 181}
190 182
191static int tve_setup_tvout(struct imx_tve *tve) 183static int tve_setup_tvout(struct imx_tve *tve)
@@ -601,7 +593,6 @@ static int imx_tve_probe(struct platform_device *pdev)
601 593
602 tve->dev = &pdev->dev; 594 tve->dev = &pdev->dev;
603 spin_lock_init(&tve->lock); 595 spin_lock_init(&tve->lock);
604 spin_lock_init(&tve->enable_lock);
605 596
606 ddc_node = of_parse_phandle(np, "ddc", 0); 597 ddc_node = of_parse_phandle(np, "ddc", 0);
607 if (ddc_node) { 598 if (ddc_node) {
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
index 7a22ce619ed2..97ca6924dbb3 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
@@ -996,35 +996,35 @@ static const struct ipu_platform_reg client_reg[] = {
996 }, 996 },
997}; 997};
998 998
999static DEFINE_MUTEX(ipu_client_id_mutex);
999static int ipu_client_id; 1000static int ipu_client_id;
1000 1001
1001static int ipu_add_subdevice_pdata(struct device *dev,
1002 const struct ipu_platform_reg *reg)
1003{
1004 struct platform_device *pdev;
1005
1006 pdev = platform_device_register_data(dev, reg->name, ipu_client_id++,
1007 &reg->pdata, sizeof(struct ipu_platform_reg));
1008
1009 return PTR_ERR_OR_ZERO(pdev);
1010}
1011
1012static int ipu_add_client_devices(struct ipu_soc *ipu) 1002static int ipu_add_client_devices(struct ipu_soc *ipu)
1013{ 1003{
1014 int ret; 1004 struct device *dev = ipu->dev;
1015 int i; 1005 unsigned i;
1006 int id, ret;
1007
1008 mutex_lock(&ipu_client_id_mutex);
1009 id = ipu_client_id;
1010 ipu_client_id += ARRAY_SIZE(client_reg);
1011 mutex_unlock(&ipu_client_id_mutex);
1016 1012
1017 for (i = 0; i < ARRAY_SIZE(client_reg); i++) { 1013 for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
1018 const struct ipu_platform_reg *reg = &client_reg[i]; 1014 const struct ipu_platform_reg *reg = &client_reg[i];
1019 ret = ipu_add_subdevice_pdata(ipu->dev, reg); 1015 struct platform_device *pdev;
1020 if (ret) 1016
1017 pdev = platform_device_register_data(dev, reg->name,
1018 id++, &reg->pdata, sizeof(reg->pdata));
1019
1020 if (IS_ERR(pdev))
1021 goto err_register; 1021 goto err_register;
1022 } 1022 }
1023 1023
1024 return 0; 1024 return 0;
1025 1025
1026err_register: 1026err_register:
1027 platform_device_unregister_children(to_platform_device(ipu->dev)); 1027 platform_device_unregister_children(to_platform_device(dev));
1028 1028
1029 return ret; 1029 return ret;
1030} 1030}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index d70e9119e906..00867190413c 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -465,6 +465,7 @@ int iscsit_del_np(struct iscsi_np *np)
465 */ 465 */
466 send_sig(SIGINT, np->np_thread, 1); 466 send_sig(SIGINT, np->np_thread, 1);
467 kthread_stop(np->np_thread); 467 kthread_stop(np->np_thread);
468 np->np_thread = NULL;
468 } 469 }
469 470
470 np->np_transport->iscsit_free_np(np); 471 np->np_transport->iscsit_free_np(np);
@@ -823,24 +824,22 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
823 if (((hdr->flags & ISCSI_FLAG_CMD_READ) || 824 if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
824 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) { 825 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
825 /* 826 /*
826 * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2) 827 * From RFC-3720 Section 10.3.1:
827 * that adds support for RESERVE/RELEASE. There is a bug 828 *
828 * add with this new functionality that sets R/W bits when 829 * "Either or both of R and W MAY be 1 when either the
829 * neither CDB carries any READ or WRITE datapayloads. 830 * Expected Data Transfer Length and/or Bidirectional Read
831 * Expected Data Transfer Length are 0"
832 *
833 * For this case, go ahead and clear the unnecssary bits
834 * to avoid any confusion with ->data_direction.
830 */ 835 */
831 if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) { 836 hdr->flags &= ~ISCSI_FLAG_CMD_READ;
832 hdr->flags &= ~ISCSI_FLAG_CMD_READ; 837 hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
833 hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
834 goto done;
835 }
836 838
837 pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" 839 pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
838 " set when Expected Data Transfer Length is 0 for" 840 " set when Expected Data Transfer Length is 0 for"
839 " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]); 841 " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
840 return iscsit_add_reject_cmd(cmd,
841 ISCSI_REASON_BOOKMARK_INVALID, buf);
842 } 842 }
843done:
844 843
845 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) && 844 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
846 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) { 845 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index e3318edb233d..1c0088fe9e99 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -474,7 +474,8 @@ static ssize_t __iscsi_##prefix##_store_##name( \
474 \ 474 \
475 if (!capable(CAP_SYS_ADMIN)) \ 475 if (!capable(CAP_SYS_ADMIN)) \
476 return -EPERM; \ 476 return -EPERM; \
477 \ 477 if (count >= sizeof(auth->name)) \
478 return -EINVAL; \
478 snprintf(auth->name, sizeof(auth->name), "%s", page); \ 479 snprintf(auth->name, sizeof(auth->name), "%s", page); \
479 if (!strncmp("NULL", auth->name, 4)) \ 480 if (!strncmp("NULL", auth->name, 4)) \
480 auth->naf_flags &= ~flags; \ 481 auth->naf_flags &= ~flags; \
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 4eb93b2b6473..e29279e6b577 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1403,11 +1403,6 @@ old_sess_out:
1403 1403
1404out: 1404out:
1405 stop = kthread_should_stop(); 1405 stop = kthread_should_stop();
1406 if (!stop && signal_pending(current)) {
1407 spin_lock_bh(&np->np_thread_lock);
1408 stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN);
1409 spin_unlock_bh(&np->np_thread_lock);
1410 }
1411 /* Wait for another socket.. */ 1406 /* Wait for another socket.. */
1412 if (!stop) 1407 if (!stop)
1413 return 1; 1408 return 1;
@@ -1415,7 +1410,6 @@ exit:
1415 iscsi_stop_login_thread_timer(np); 1410 iscsi_stop_login_thread_timer(np);
1416 spin_lock_bh(&np->np_thread_lock); 1411 spin_lock_bh(&np->np_thread_lock);
1417 np->np_thread_state = ISCSI_NP_THREAD_EXIT; 1412 np->np_thread_state = ISCSI_NP_THREAD_EXIT;
1418 np->np_thread = NULL;
1419 spin_unlock_bh(&np->np_thread_lock); 1413 spin_unlock_bh(&np->np_thread_lock);
1420 1414
1421 return 0; 1415 return 0;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 207b340498a3..d06de84b069b 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1106,6 +1106,11 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1106 dev->dev_attrib.block_size = block_size; 1106 dev->dev_attrib.block_size = block_size;
1107 pr_debug("dev[%p]: SE Device block_size changed to %u\n", 1107 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1108 dev, block_size); 1108 dev, block_size);
1109
1110 if (dev->dev_attrib.max_bytes_per_io)
1111 dev->dev_attrib.hw_max_sectors =
1112 dev->dev_attrib.max_bytes_per_io / block_size;
1113
1109 return 0; 1114 return 0;
1110} 1115}
1111 1116
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 0e34cda3271e..78241a53b555 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -66,9 +66,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
66 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" 66 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
67 " Target Core Stack %s\n", hba->hba_id, FD_VERSION, 67 " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
68 TARGET_CORE_MOD_VERSION); 68 TARGET_CORE_MOD_VERSION);
69 pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" 69 pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
70 " MaxSectors: %u\n", 70 hba->hba_id, fd_host->fd_host_id);
71 hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
72 71
73 return 0; 72 return 0;
74} 73}
@@ -220,7 +219,8 @@ static int fd_configure_device(struct se_device *dev)
220 } 219 }
221 220
222 dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; 221 dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
223 dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; 222 dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
223 dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
224 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; 224 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
225 225
226 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { 226 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 37ffc5bd2399..d7772c167685 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -7,7 +7,10 @@
7#define FD_DEVICE_QUEUE_DEPTH 32 7#define FD_DEVICE_QUEUE_DEPTH 32
8#define FD_MAX_DEVICE_QUEUE_DEPTH 128 8#define FD_MAX_DEVICE_QUEUE_DEPTH 128
9#define FD_BLOCKSIZE 512 9#define FD_BLOCKSIZE 512
10#define FD_MAX_SECTORS 2048 10/*
11 * Limited by the number of iovecs (2048) per vfs_[writev,readv] call
12 */
13#define FD_MAX_BYTES 8388608
11 14
12#define RRF_EMULATE_CDB 0x01 15#define RRF_EMULATE_CDB 0x01
13#define RRF_GOT_LBA 0x02 16#define RRF_GOT_LBA 0x02
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index f697f8baec54..2a573de19a9f 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -278,7 +278,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
278 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 278 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
279 acl->se_tpg = tpg; 279 acl->se_tpg = tpg;
280 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 280 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
281 spin_lock_init(&acl->stats_lock);
282 acl->dynamic_node_acl = 1; 281 acl->dynamic_node_acl = 1;
283 282
284 tpg->se_tpg_tfo->set_default_node_attributes(acl); 283 tpg->se_tpg_tfo->set_default_node_attributes(acl);
@@ -406,7 +405,6 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
406 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 405 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
407 acl->se_tpg = tpg; 406 acl->se_tpg = tpg;
408 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 407 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
409 spin_lock_init(&acl->stats_lock);
410 408
411 tpg->se_tpg_tfo->set_default_node_attributes(acl); 409 tpg->se_tpg_tfo->set_default_node_attributes(acl);
412 410
@@ -658,15 +656,9 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
658 spin_lock_init(&lun->lun_sep_lock); 656 spin_lock_init(&lun->lun_sep_lock);
659 init_completion(&lun->lun_ref_comp); 657 init_completion(&lun->lun_ref_comp);
660 658
661 ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
662 if (ret < 0)
663 return ret;
664
665 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); 659 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
666 if (ret < 0) { 660 if (ret < 0)
667 percpu_ref_cancel_init(&lun->lun_ref);
668 return ret; 661 return ret;
669 }
670 662
671 return 0; 663 return 0;
672} 664}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 268b62768f2b..34aacaaae14a 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -93,6 +93,7 @@ struct n_tty_data {
93 size_t canon_head; 93 size_t canon_head;
94 size_t echo_head; 94 size_t echo_head;
95 size_t echo_commit; 95 size_t echo_commit;
96 size_t echo_mark;
96 DECLARE_BITMAP(char_map, 256); 97 DECLARE_BITMAP(char_map, 256);
97 98
98 /* private to n_tty_receive_overrun (single-threaded) */ 99 /* private to n_tty_receive_overrun (single-threaded) */
@@ -336,6 +337,7 @@ static void reset_buffer_flags(struct n_tty_data *ldata)
336{ 337{
337 ldata->read_head = ldata->canon_head = ldata->read_tail = 0; 338 ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
338 ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0; 339 ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
340 ldata->echo_mark = 0;
339 ldata->line_start = 0; 341 ldata->line_start = 0;
340 342
341 ldata->erasing = 0; 343 ldata->erasing = 0;
@@ -787,6 +789,7 @@ static void commit_echoes(struct tty_struct *tty)
787 size_t head; 789 size_t head;
788 790
789 head = ldata->echo_head; 791 head = ldata->echo_head;
792 ldata->echo_mark = head;
790 old = ldata->echo_commit - ldata->echo_tail; 793 old = ldata->echo_commit - ldata->echo_tail;
791 794
792 /* Process committed echoes if the accumulated # of bytes 795 /* Process committed echoes if the accumulated # of bytes
@@ -811,10 +814,11 @@ static void process_echoes(struct tty_struct *tty)
811 size_t echoed; 814 size_t echoed;
812 815
813 if ((!L_ECHO(tty) && !L_ECHONL(tty)) || 816 if ((!L_ECHO(tty) && !L_ECHONL(tty)) ||
814 ldata->echo_commit == ldata->echo_tail) 817 ldata->echo_mark == ldata->echo_tail)
815 return; 818 return;
816 819
817 mutex_lock(&ldata->output_lock); 820 mutex_lock(&ldata->output_lock);
821 ldata->echo_commit = ldata->echo_mark;
818 echoed = __process_echoes(tty); 822 echoed = __process_echoes(tty);
819 mutex_unlock(&ldata->output_lock); 823 mutex_unlock(&ldata->output_lock);
820 824
@@ -822,6 +826,7 @@ static void process_echoes(struct tty_struct *tty)
822 tty->ops->flush_chars(tty); 826 tty->ops->flush_chars(tty);
823} 827}
824 828
829/* NB: echo_mark and echo_head should be equivalent here */
825static void flush_echoes(struct tty_struct *tty) 830static void flush_echoes(struct tty_struct *tty)
826{ 831{
827 struct n_tty_data *ldata = tty->disc_data; 832 struct n_tty_data *ldata = tty->disc_data;
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 4658e3e0ec42..06525f10e364 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -96,7 +96,8 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value)
96 if (offset == UART_LCR) { 96 if (offset == UART_LCR) {
97 int tries = 1000; 97 int tries = 1000;
98 while (tries--) { 98 while (tries--) {
99 if (value == p->serial_in(p, UART_LCR)) 99 unsigned int lcr = p->serial_in(p, UART_LCR);
100 if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
100 return; 101 return;
101 dw8250_force_idle(p); 102 dw8250_force_idle(p);
102 writeb(value, p->membase + (UART_LCR << p->regshift)); 103 writeb(value, p->membase + (UART_LCR << p->regshift));
@@ -132,7 +133,8 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
132 if (offset == UART_LCR) { 133 if (offset == UART_LCR) {
133 int tries = 1000; 134 int tries = 1000;
134 while (tries--) { 135 while (tries--) {
135 if (value == p->serial_in(p, UART_LCR)) 136 unsigned int lcr = p->serial_in(p, UART_LCR);
137 if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
136 return; 138 return;
137 dw8250_force_idle(p); 139 dw8250_force_idle(p);
138 writel(value, p->membase + (UART_LCR << p->regshift)); 140 writel(value, p->membase + (UART_LCR << p->regshift));
@@ -455,6 +457,8 @@ MODULE_DEVICE_TABLE(of, dw8250_of_match);
455static const struct acpi_device_id dw8250_acpi_match[] = { 457static const struct acpi_device_id dw8250_acpi_match[] = {
456 { "INT33C4", 0 }, 458 { "INT33C4", 0 },
457 { "INT33C5", 0 }, 459 { "INT33C5", 0 },
460 { "INT3434", 0 },
461 { "INT3435", 0 },
458 { "80860F0A", 0 }, 462 { "80860F0A", 0 },
459 { }, 463 { },
460}; 464};
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index e46e9f3f19b9..f619ad5b5eae 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -240,6 +240,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
240 continue; 240 continue;
241 } 241 }
242 242
243#ifdef SUPPORT_SYSRQ
243 /* 244 /*
244 * uart_handle_sysrq_char() doesn't work if 245 * uart_handle_sysrq_char() doesn't work if
245 * spinlocked, for some reason 246 * spinlocked, for some reason
@@ -253,6 +254,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
253 } 254 }
254 spin_lock(&port->lock); 255 spin_lock(&port->lock);
255 } 256 }
257#endif
256 258
257 port->icount.rx++; 259 port->icount.rx++;
258 260
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index 22fad8ad5ac2..d8a55e87877f 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -86,11 +86,21 @@ static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem)
86 return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); 86 return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
87} 87}
88 88
89/*
90 * ldsem_cmpxchg() updates @*old with the last-known sem->count value.
91 * Returns 1 if count was successfully changed; @*old will have @new value.
92 * Returns 0 if count was not changed; @*old will have most recent sem->count
93 */
89static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem) 94static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
90{ 95{
91 long tmp = *old; 96 long tmp = atomic_long_cmpxchg(&sem->count, *old, new);
92 *old = atomic_long_cmpxchg(&sem->count, *old, new); 97 if (tmp == *old) {
93 return *old == tmp; 98 *old = new;
99 return 1;
100 } else {
101 *old = tmp;
102 return 0;
103 }
94} 104}
95 105
96/* 106/*
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 5d8981c5235e..6e73f8cd60e5 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -642,6 +642,10 @@ static int ci_hdrc_probe(struct platform_device *pdev)
642 : CI_ROLE_GADGET; 642 : CI_ROLE_GADGET;
643 } 643 }
644 644
645 /* only update vbus status for peripheral */
646 if (ci->role == CI_ROLE_GADGET)
647 ci_handle_vbus_change(ci);
648
645 ret = ci_role_start(ci, ci->role); 649 ret = ci_role_start(ci, ci->role);
646 if (ret) { 650 if (ret) {
647 dev_err(dev, "can't start %s role\n", ci_role(ci)->name); 651 dev_err(dev, "can't start %s role\n", ci_role(ci)->name);
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 59e6020ea753..526cd77563d8 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -88,7 +88,8 @@ static int host_start(struct ci_hdrc *ci)
88 return ret; 88 return ret;
89 89
90disable_reg: 90disable_reg:
91 regulator_disable(ci->platdata->reg_vbus); 91 if (ci->platdata->reg_vbus)
92 regulator_disable(ci->platdata->reg_vbus);
92 93
93put_hcd: 94put_hcd:
94 usb_put_hcd(hcd); 95 usb_put_hcd(hcd);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 77e4a17cfb44..73a39ef93ec5 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1795,9 +1795,6 @@ static int udc_start(struct ci_hdrc *ci)
1795 pm_runtime_no_callbacks(&ci->gadget.dev); 1795 pm_runtime_no_callbacks(&ci->gadget.dev);
1796 pm_runtime_enable(&ci->gadget.dev); 1796 pm_runtime_enable(&ci->gadget.dev);
1797 1797
1798 /* Update ci->vbus_active */
1799 ci_handle_vbus_change(ci);
1800
1801 return retval; 1798 return retval;
1802 1799
1803destroy_eps: 1800destroy_eps:
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 4d387596f3f0..0b23a8639311 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -854,13 +854,11 @@ static int wdm_manage_power(struct usb_interface *intf, int on)
854{ 854{
855 /* need autopm_get/put here to ensure the usbcore sees the new value */ 855 /* need autopm_get/put here to ensure the usbcore sees the new value */
856 int rv = usb_autopm_get_interface(intf); 856 int rv = usb_autopm_get_interface(intf);
857 if (rv < 0)
858 goto err;
859 857
860 intf->needs_remote_wakeup = on; 858 intf->needs_remote_wakeup = on;
861 usb_autopm_put_interface(intf); 859 if (!rv)
862err: 860 usb_autopm_put_interface(intf);
863 return rv; 861 return 0;
864} 862}
865 863
866static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id) 864static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 74f9cf02da07..a49217ae3533 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -455,9 +455,6 @@ static int dwc3_probe(struct platform_device *pdev)
455 if (IS_ERR(regs)) 455 if (IS_ERR(regs))
456 return PTR_ERR(regs); 456 return PTR_ERR(regs);
457 457
458 usb_phy_set_suspend(dwc->usb2_phy, 0);
459 usb_phy_set_suspend(dwc->usb3_phy, 0);
460
461 spin_lock_init(&dwc->lock); 458 spin_lock_init(&dwc->lock);
462 platform_set_drvdata(pdev, dwc); 459 platform_set_drvdata(pdev, dwc);
463 460
@@ -488,6 +485,9 @@ static int dwc3_probe(struct platform_device *pdev)
488 goto err0; 485 goto err0;
489 } 486 }
490 487
488 usb_phy_set_suspend(dwc->usb2_phy, 0);
489 usb_phy_set_suspend(dwc->usb3_phy, 0);
490
491 ret = dwc3_event_buffers_setup(dwc); 491 ret = dwc3_event_buffers_setup(dwc);
492 if (ret) { 492 if (ret) {
493 dev_err(dwc->dev, "failed to setup event buffers\n"); 493 dev_err(dwc->dev, "failed to setup event buffers\n");
@@ -569,6 +569,8 @@ err2:
569 dwc3_event_buffers_cleanup(dwc); 569 dwc3_event_buffers_cleanup(dwc);
570 570
571err1: 571err1:
572 usb_phy_set_suspend(dwc->usb2_phy, 1);
573 usb_phy_set_suspend(dwc->usb3_phy, 1);
572 dwc3_core_exit(dwc); 574 dwc3_core_exit(dwc);
573 575
574err0: 576err0:
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 418444ebb1b8..8c356af79409 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -136,23 +136,27 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
136 struct ohci_hcd *ohci; 136 struct ohci_hcd *ohci;
137 int retval; 137 int retval;
138 struct usb_hcd *hcd = NULL; 138 struct usb_hcd *hcd = NULL;
139 139 struct device *dev = &pdev->dev;
140 if (pdev->num_resources != 2) { 140 struct resource *res;
141 pr_debug("hcd probe: invalid num_resources"); 141 int irq;
142 return -ENODEV; 142
143 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
144 if (!res) {
145 dev_dbg(dev, "hcd probe: missing memory resource\n");
146 return -ENXIO;
143 } 147 }
144 148
145 if ((pdev->resource[0].flags != IORESOURCE_MEM) 149 irq = platform_get_irq(pdev, 0);
146 || (pdev->resource[1].flags != IORESOURCE_IRQ)) { 150 if (irq < 0) {
147 pr_debug("hcd probe: invalid resource type\n"); 151 dev_dbg(dev, "hcd probe: missing irq resource\n");
148 return -ENODEV; 152 return irq;
149 } 153 }
150 154
151 hcd = usb_create_hcd(driver, &pdev->dev, "at91"); 155 hcd = usb_create_hcd(driver, &pdev->dev, "at91");
152 if (!hcd) 156 if (!hcd)
153 return -ENOMEM; 157 return -ENOMEM;
154 hcd->rsrc_start = pdev->resource[0].start; 158 hcd->rsrc_start = res->start;
155 hcd->rsrc_len = resource_size(&pdev->resource[0]); 159 hcd->rsrc_len = resource_size(res);
156 160
157 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { 161 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
158 pr_debug("request_mem_region failed\n"); 162 pr_debug("request_mem_region failed\n");
@@ -199,7 +203,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
199 ohci->num_ports = board->ports; 203 ohci->num_ports = board->ports;
200 at91_start_hc(pdev); 204 at91_start_hc(pdev);
201 205
202 retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED); 206 retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
203 if (retval == 0) 207 if (retval == 0)
204 return retval; 208 return retval;
205 209
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index b8dffd59eb25..73f5208714a4 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -128,7 +128,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
128 * any other sleep) on Haswell machines with LPT and LPT-LP 128 * any other sleep) on Haswell machines with LPT and LPT-LP
129 * with the new Intel BIOS 129 * with the new Intel BIOS
130 */ 130 */
131 xhci->quirks |= XHCI_SPURIOUS_WAKEUP; 131 /* Limit the quirk to only known vendors, as this triggers
132 * yet another BIOS bug on some other machines
133 * https://bugzilla.kernel.org/show_bug.cgi?id=66171
134 */
135 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
136 xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
132 } 137 }
133 if (pdev->vendor == PCI_VENDOR_ID_ETRON && 138 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
134 pdev->device == PCI_DEVICE_ID_ASROCK_P67) { 139 pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 3e9383698c85..7d1451d5bbea 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -7,7 +7,8 @@ config USB_PHY
7 def_bool n 7 def_bool n
8 8
9config USB_OTG_FSM 9config USB_OTG_FSM
10 bool "USB 2.0 OTG FSM implementation" 10 tristate "USB 2.0 OTG FSM implementation"
11 depends on USB
11 select USB_OTG 12 select USB_OTG
12 select USB_PHY 13 select USB_PHY
13 help 14 help
@@ -37,6 +38,7 @@ config FSL_USB2_OTG
37config ISP1301_OMAP 38config ISP1301_OMAP
38 tristate "Philips ISP1301 with OMAP OTG" 39 tristate "Philips ISP1301 with OMAP OTG"
39 depends on I2C && ARCH_OMAP_OTG 40 depends on I2C && ARCH_OMAP_OTG
41 depends on USB
40 select USB_PHY 42 select USB_PHY
41 help 43 help
42 If you say yes here you get support for the Philips ISP1301 44 If you say yes here you get support for the Philips ISP1301
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index 82232acf1ab6..bbe4f8e6e8d7 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -876,7 +876,7 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
876 876
877 tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start, 877 tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start,
878 resource_size(res)); 878 resource_size(res));
879 if (!tegra_phy->regs) { 879 if (!tegra_phy->pad_regs) {
880 dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n"); 880 dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n");
881 return -ENOMEM; 881 return -ENOMEM;
882 } 882 }
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index d2682ba58211..214172b68d5d 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -127,7 +127,8 @@ static inline int twl6030_writeb(struct twl6030_usb *twl, u8 module,
127 127
128static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address) 128static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address)
129{ 129{
130 u8 data, ret = 0; 130 u8 data;
131 int ret;
131 132
132 ret = twl_i2c_read_u8(module, &data, address); 133 ret = twl_i2c_read_u8(module, &data, address);
133 if (ret >= 0) 134 if (ret >= 0)
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 496b7e39d5be..cc7a24154490 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -251,6 +251,7 @@ static void option_instat_callback(struct urb *urb);
251#define ZTE_PRODUCT_MF628 0x0015 251#define ZTE_PRODUCT_MF628 0x0015
252#define ZTE_PRODUCT_MF626 0x0031 252#define ZTE_PRODUCT_MF626 0x0031
253#define ZTE_PRODUCT_MC2718 0xffe8 253#define ZTE_PRODUCT_MC2718 0xffe8
254#define ZTE_PRODUCT_AC2726 0xfff1
254 255
255#define BENQ_VENDOR_ID 0x04a5 256#define BENQ_VENDOR_ID 0x04a5
256#define BENQ_PRODUCT_H10 0x4068 257#define BENQ_PRODUCT_H10 0x4068
@@ -1453,6 +1454,7 @@ static const struct usb_device_id option_ids[] = {
1453 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, 1454 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
1454 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, 1455 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
1455 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, 1456 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
1457 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
1456 1458
1457 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, 1459 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
1458 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, 1460 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c
index fca4c752a4ed..eae2c873b39f 100644
--- a/drivers/usb/serial/zte_ev.c
+++ b/drivers/usb/serial/zte_ev.c
@@ -281,8 +281,7 @@ static const struct usb_device_id id_table[] = {
281 { USB_DEVICE(0x19d2, 0xfffd) }, 281 { USB_DEVICE(0x19d2, 0xfffd) },
282 { USB_DEVICE(0x19d2, 0xfffc) }, 282 { USB_DEVICE(0x19d2, 0xfffc) },
283 { USB_DEVICE(0x19d2, 0xfffb) }, 283 { USB_DEVICE(0x19d2, 0xfffb) },
284 /* AC2726, AC8710_V3 */ 284 /* AC8710_V3 */
285 { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) },
286 { USB_DEVICE(0x19d2, 0xfff6) }, 285 { USB_DEVICE(0x19d2, 0xfff6) },
287 { USB_DEVICE(0x19d2, 0xfff7) }, 286 { USB_DEVICE(0x19d2, 0xfff7) },
288 { USB_DEVICE(0x19d2, 0xfff8) }, 287 { USB_DEVICE(0x19d2, 0xfff8) },
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 55ea73f7c70b..4c02e2b94103 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -350,17 +350,19 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
350 350
351 pfn = page_to_pfn(page); 351 pfn = page_to_pfn(page);
352 352
353 set_phys_to_machine(pfn, frame_list[i]);
354
355#ifdef CONFIG_XEN_HAVE_PVMMU 353#ifdef CONFIG_XEN_HAVE_PVMMU
356 /* Link back into the page tables if not highmem. */ 354 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
357 if (xen_pv_domain() && !PageHighMem(page)) { 355 set_phys_to_machine(pfn, frame_list[i]);
358 int ret; 356
359 ret = HYPERVISOR_update_va_mapping( 357 /* Link back into the page tables if not highmem. */
360 (unsigned long)__va(pfn << PAGE_SHIFT), 358 if (!PageHighMem(page)) {
361 mfn_pte(frame_list[i], PAGE_KERNEL), 359 int ret;
362 0); 360 ret = HYPERVISOR_update_va_mapping(
363 BUG_ON(ret); 361 (unsigned long)__va(pfn << PAGE_SHIFT),
362 mfn_pte(frame_list[i], PAGE_KERNEL),
363 0);
364 BUG_ON(ret);
365 }
364 } 366 }
365#endif 367#endif
366 368
@@ -378,7 +380,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
378 enum bp_state state = BP_DONE; 380 enum bp_state state = BP_DONE;
379 unsigned long pfn, i; 381 unsigned long pfn, i;
380 struct page *page; 382 struct page *page;
381 struct page *scratch_page;
382 int ret; 383 int ret;
383 struct xen_memory_reservation reservation = { 384 struct xen_memory_reservation reservation = {
384 .address_bits = 0, 385 .address_bits = 0,
@@ -411,27 +412,29 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
411 412
412 scrub_page(page); 413 scrub_page(page);
413 414
415#ifdef CONFIG_XEN_HAVE_PVMMU
414 /* 416 /*
415 * Ballooned out frames are effectively replaced with 417 * Ballooned out frames are effectively replaced with
416 * a scratch frame. Ensure direct mappings and the 418 * a scratch frame. Ensure direct mappings and the
417 * p2m are consistent. 419 * p2m are consistent.
418 */ 420 */
419 scratch_page = get_balloon_scratch_page();
420#ifdef CONFIG_XEN_HAVE_PVMMU
421 if (xen_pv_domain() && !PageHighMem(page)) {
422 ret = HYPERVISOR_update_va_mapping(
423 (unsigned long)__va(pfn << PAGE_SHIFT),
424 pfn_pte(page_to_pfn(scratch_page),
425 PAGE_KERNEL_RO), 0);
426 BUG_ON(ret);
427 }
428#endif
429 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 421 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
430 unsigned long p; 422 unsigned long p;
423 struct page *scratch_page = get_balloon_scratch_page();
424
425 if (!PageHighMem(page)) {
426 ret = HYPERVISOR_update_va_mapping(
427 (unsigned long)__va(pfn << PAGE_SHIFT),
428 pfn_pte(page_to_pfn(scratch_page),
429 PAGE_KERNEL_RO), 0);
430 BUG_ON(ret);
431 }
431 p = page_to_pfn(scratch_page); 432 p = page_to_pfn(scratch_page);
432 __set_phys_to_machine(pfn, pfn_to_mfn(p)); 433 __set_phys_to_machine(pfn, pfn_to_mfn(p));
434
435 put_balloon_scratch_page();
433 } 436 }
434 put_balloon_scratch_page(); 437#endif
435 438
436 balloon_append(pfn_to_page(pfn)); 439 balloon_append(pfn_to_page(pfn));
437 } 440 }
@@ -627,15 +630,17 @@ static int __init balloon_init(void)
627 if (!xen_domain()) 630 if (!xen_domain())
628 return -ENODEV; 631 return -ENODEV;
629 632
630 for_each_online_cpu(cpu) 633 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
631 { 634 for_each_online_cpu(cpu)
632 per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL); 635 {
633 if (per_cpu(balloon_scratch_page, cpu) == NULL) { 636 per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
634 pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu); 637 if (per_cpu(balloon_scratch_page, cpu) == NULL) {
635 return -ENOMEM; 638 pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
639 return -ENOMEM;
640 }
636 } 641 }
642 register_cpu_notifier(&balloon_cpu_notifier);
637 } 643 }
638 register_cpu_notifier(&balloon_cpu_notifier);
639 644
640 pr_info("Initialising balloon driver\n"); 645 pr_info("Initialising balloon driver\n");
641 646
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 028387192b60..aa846a48f400 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1176,7 +1176,8 @@ static int gnttab_setup(void)
1176 gnttab_shared.addr = xen_remap(xen_hvm_resume_frames, 1176 gnttab_shared.addr = xen_remap(xen_hvm_resume_frames,
1177 PAGE_SIZE * max_nr_gframes); 1177 PAGE_SIZE * max_nr_gframes);
1178 if (gnttab_shared.addr == NULL) { 1178 if (gnttab_shared.addr == NULL) {
1179 pr_warn("Failed to ioremap gnttab share frames!\n"); 1179 pr_warn("Failed to ioremap gnttab share frames (addr=0x%08lx)!\n",
1180 xen_hvm_resume_frames);
1180 return -ENOMEM; 1181 return -ENOMEM;
1181 } 1182 }
1182 } 1183 }
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 8e74590fa1bb..569a13b9e856 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -533,12 +533,17 @@ static void privcmd_close(struct vm_area_struct *vma)
533{ 533{
534 struct page **pages = vma->vm_private_data; 534 struct page **pages = vma->vm_private_data;
535 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 535 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
536 int rc;
536 537
537 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) 538 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
538 return; 539 return;
539 540
540 xen_unmap_domain_mfn_range(vma, numpgs, pages); 541 rc = xen_unmap_domain_mfn_range(vma, numpgs, pages);
541 free_xenballooned_pages(numpgs, pages); 542 if (rc == 0)
543 free_xenballooned_pages(numpgs, pages);
544 else
545 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
546 numpgs, rc);
542 kfree(pages); 547 kfree(pages);
543} 548}
544 549
diff --git a/fs/aio.c b/fs/aio.c
index 6efb7f6cb22e..062a5f6a1448 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -244,9 +244,14 @@ static void aio_free_ring(struct kioctx *ctx)
244 int i; 244 int i;
245 245
246 for (i = 0; i < ctx->nr_pages; i++) { 246 for (i = 0; i < ctx->nr_pages; i++) {
247 struct page *page;
247 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, 248 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
248 page_count(ctx->ring_pages[i])); 249 page_count(ctx->ring_pages[i]));
249 put_page(ctx->ring_pages[i]); 250 page = ctx->ring_pages[i];
251 if (!page)
252 continue;
253 ctx->ring_pages[i] = NULL;
254 put_page(page);
250 } 255 }
251 256
252 put_aio_ring_file(ctx); 257 put_aio_ring_file(ctx);
@@ -280,18 +285,38 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
280 unsigned long flags; 285 unsigned long flags;
281 int rc; 286 int rc;
282 287
288 rc = 0;
289
290 /* Make sure the old page hasn't already been changed */
291 spin_lock(&mapping->private_lock);
292 ctx = mapping->private_data;
293 if (ctx) {
294 pgoff_t idx;
295 spin_lock_irqsave(&ctx->completion_lock, flags);
296 idx = old->index;
297 if (idx < (pgoff_t)ctx->nr_pages) {
298 if (ctx->ring_pages[idx] != old)
299 rc = -EAGAIN;
300 } else
301 rc = -EINVAL;
302 spin_unlock_irqrestore(&ctx->completion_lock, flags);
303 } else
304 rc = -EINVAL;
305 spin_unlock(&mapping->private_lock);
306
307 if (rc != 0)
308 return rc;
309
283 /* Writeback must be complete */ 310 /* Writeback must be complete */
284 BUG_ON(PageWriteback(old)); 311 BUG_ON(PageWriteback(old));
285 put_page(old); 312 get_page(new);
286 313
287 rc = migrate_page_move_mapping(mapping, new, old, NULL, mode); 314 rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1);
288 if (rc != MIGRATEPAGE_SUCCESS) { 315 if (rc != MIGRATEPAGE_SUCCESS) {
289 get_page(old); 316 put_page(new);
290 return rc; 317 return rc;
291 } 318 }
292 319
293 get_page(new);
294
295 /* We can potentially race against kioctx teardown here. Use the 320 /* We can potentially race against kioctx teardown here. Use the
296 * address_space's private data lock to protect the mapping's 321 * address_space's private data lock to protect the mapping's
297 * private_data. 322 * private_data.
@@ -303,13 +328,24 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
303 spin_lock_irqsave(&ctx->completion_lock, flags); 328 spin_lock_irqsave(&ctx->completion_lock, flags);
304 migrate_page_copy(new, old); 329 migrate_page_copy(new, old);
305 idx = old->index; 330 idx = old->index;
306 if (idx < (pgoff_t)ctx->nr_pages) 331 if (idx < (pgoff_t)ctx->nr_pages) {
307 ctx->ring_pages[idx] = new; 332 /* And only do the move if things haven't changed */
333 if (ctx->ring_pages[idx] == old)
334 ctx->ring_pages[idx] = new;
335 else
336 rc = -EAGAIN;
337 } else
338 rc = -EINVAL;
308 spin_unlock_irqrestore(&ctx->completion_lock, flags); 339 spin_unlock_irqrestore(&ctx->completion_lock, flags);
309 } else 340 } else
310 rc = -EBUSY; 341 rc = -EBUSY;
311 spin_unlock(&mapping->private_lock); 342 spin_unlock(&mapping->private_lock);
312 343
344 if (rc == MIGRATEPAGE_SUCCESS)
345 put_page(old);
346 else
347 put_page(new);
348
313 return rc; 349 return rc;
314} 350}
315#endif 351#endif
@@ -326,7 +362,7 @@ static int aio_setup_ring(struct kioctx *ctx)
326 struct aio_ring *ring; 362 struct aio_ring *ring;
327 unsigned nr_events = ctx->max_reqs; 363 unsigned nr_events = ctx->max_reqs;
328 struct mm_struct *mm = current->mm; 364 struct mm_struct *mm = current->mm;
329 unsigned long size, populate; 365 unsigned long size, unused;
330 int nr_pages; 366 int nr_pages;
331 int i; 367 int i;
332 struct file *file; 368 struct file *file;
@@ -347,6 +383,20 @@ static int aio_setup_ring(struct kioctx *ctx)
347 return -EAGAIN; 383 return -EAGAIN;
348 } 384 }
349 385
386 ctx->aio_ring_file = file;
387 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
388 / sizeof(struct io_event);
389
390 ctx->ring_pages = ctx->internal_pages;
391 if (nr_pages > AIO_RING_PAGES) {
392 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
393 GFP_KERNEL);
394 if (!ctx->ring_pages) {
395 put_aio_ring_file(ctx);
396 return -ENOMEM;
397 }
398 }
399
350 for (i = 0; i < nr_pages; i++) { 400 for (i = 0; i < nr_pages; i++) {
351 struct page *page; 401 struct page *page;
352 page = find_or_create_page(file->f_inode->i_mapping, 402 page = find_or_create_page(file->f_inode->i_mapping,
@@ -358,19 +408,14 @@ static int aio_setup_ring(struct kioctx *ctx)
358 SetPageUptodate(page); 408 SetPageUptodate(page);
359 SetPageDirty(page); 409 SetPageDirty(page);
360 unlock_page(page); 410 unlock_page(page);
411
412 ctx->ring_pages[i] = page;
361 } 413 }
362 ctx->aio_ring_file = file; 414 ctx->nr_pages = i;
363 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
364 / sizeof(struct io_event);
365 415
366 ctx->ring_pages = ctx->internal_pages; 416 if (unlikely(i != nr_pages)) {
367 if (nr_pages > AIO_RING_PAGES) { 417 aio_free_ring(ctx);
368 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), 418 return -EAGAIN;
369 GFP_KERNEL);
370 if (!ctx->ring_pages) {
371 put_aio_ring_file(ctx);
372 return -ENOMEM;
373 }
374 } 419 }
375 420
376 ctx->mmap_size = nr_pages * PAGE_SIZE; 421 ctx->mmap_size = nr_pages * PAGE_SIZE;
@@ -379,9 +424,9 @@ static int aio_setup_ring(struct kioctx *ctx)
379 down_write(&mm->mmap_sem); 424 down_write(&mm->mmap_sem);
380 ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, 425 ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
381 PROT_READ | PROT_WRITE, 426 PROT_READ | PROT_WRITE,
382 MAP_SHARED | MAP_POPULATE, 0, &populate); 427 MAP_SHARED, 0, &unused);
428 up_write(&mm->mmap_sem);
383 if (IS_ERR((void *)ctx->mmap_base)) { 429 if (IS_ERR((void *)ctx->mmap_base)) {
384 up_write(&mm->mmap_sem);
385 ctx->mmap_size = 0; 430 ctx->mmap_size = 0;
386 aio_free_ring(ctx); 431 aio_free_ring(ctx);
387 return -EAGAIN; 432 return -EAGAIN;
@@ -389,27 +434,6 @@ static int aio_setup_ring(struct kioctx *ctx)
389 434
390 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); 435 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
391 436
392 /* We must do this while still holding mmap_sem for write, as we
393 * need to be protected against userspace attempting to mremap()
394 * or munmap() the ring buffer.
395 */
396 ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages,
397 1, 0, ctx->ring_pages, NULL);
398
399 /* Dropping the reference here is safe as the page cache will hold
400 * onto the pages for us. It is also required so that page migration
401 * can unmap the pages and get the right reference count.
402 */
403 for (i = 0; i < ctx->nr_pages; i++)
404 put_page(ctx->ring_pages[i]);
405
406 up_write(&mm->mmap_sem);
407
408 if (unlikely(ctx->nr_pages != nr_pages)) {
409 aio_free_ring(ctx);
410 return -EAGAIN;
411 }
412
413 ctx->user_id = ctx->mmap_base; 437 ctx->user_id = ctx->mmap_base;
414 ctx->nr_events = nr_events; /* trusted copy */ 438 ctx->nr_events = nr_events; /* trusted copy */
415 439
@@ -652,7 +676,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
652 aio_nr += ctx->max_reqs; 676 aio_nr += ctx->max_reqs;
653 spin_unlock(&aio_nr_lock); 677 spin_unlock(&aio_nr_lock);
654 678
655 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ 679 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
680 percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */
656 681
657 err = ioctx_add_table(ctx, mm); 682 err = ioctx_add_table(ctx, mm);
658 if (err) 683 if (err)
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 1e561c059539..ec3ba43b9faa 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -210,9 +210,13 @@ static int readpage_nounlock(struct file *filp, struct page *page)
210 if (err < 0) { 210 if (err < 0) {
211 SetPageError(page); 211 SetPageError(page);
212 goto out; 212 goto out;
213 } else if (err < PAGE_CACHE_SIZE) { 213 } else {
214 if (err < PAGE_CACHE_SIZE) {
214 /* zero fill remainder of page */ 215 /* zero fill remainder of page */
215 zero_user_segment(page, err, PAGE_CACHE_SIZE); 216 zero_user_segment(page, err, PAGE_CACHE_SIZE);
217 } else {
218 flush_dcache_page(page);
219 }
216 } 220 }
217 SetPageUptodate(page); 221 SetPageUptodate(page);
218 222
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 9a8e396aed89..278fd2891288 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -978,7 +978,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
978 struct ceph_mds_reply_inode *ininfo; 978 struct ceph_mds_reply_inode *ininfo;
979 struct ceph_vino vino; 979 struct ceph_vino vino;
980 struct ceph_fs_client *fsc = ceph_sb_to_client(sb); 980 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
981 int i = 0;
982 int err = 0; 981 int err = 0;
983 982
984 dout("fill_trace %p is_dentry %d is_target %d\n", req, 983 dout("fill_trace %p is_dentry %d is_target %d\n", req,
@@ -1039,6 +1038,29 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1039 } 1038 }
1040 } 1039 }
1041 1040
1041 if (rinfo->head->is_target) {
1042 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1043 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1044
1045 in = ceph_get_inode(sb, vino);
1046 if (IS_ERR(in)) {
1047 err = PTR_ERR(in);
1048 goto done;
1049 }
1050 req->r_target_inode = in;
1051
1052 err = fill_inode(in, &rinfo->targeti, NULL,
1053 session, req->r_request_started,
1054 (le32_to_cpu(rinfo->head->result) == 0) ?
1055 req->r_fmode : -1,
1056 &req->r_caps_reservation);
1057 if (err < 0) {
1058 pr_err("fill_inode badness %p %llx.%llx\n",
1059 in, ceph_vinop(in));
1060 goto done;
1061 }
1062 }
1063
1042 /* 1064 /*
1043 * ignore null lease/binding on snapdir ENOENT, or else we 1065 * ignore null lease/binding on snapdir ENOENT, or else we
1044 * will have trouble splicing in the virtual snapdir later 1066 * will have trouble splicing in the virtual snapdir later
@@ -1108,7 +1130,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1108 ceph_dentry(req->r_old_dentry)->offset); 1130 ceph_dentry(req->r_old_dentry)->offset);
1109 1131
1110 dn = req->r_old_dentry; /* use old_dentry */ 1132 dn = req->r_old_dentry; /* use old_dentry */
1111 in = dn->d_inode;
1112 } 1133 }
1113 1134
1114 /* null dentry? */ 1135 /* null dentry? */
@@ -1130,44 +1151,28 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1130 } 1151 }
1131 1152
1132 /* attach proper inode */ 1153 /* attach proper inode */
1133 ininfo = rinfo->targeti.in; 1154 if (!dn->d_inode) {
1134 vino.ino = le64_to_cpu(ininfo->ino); 1155 ihold(in);
1135 vino.snap = le64_to_cpu(ininfo->snapid);
1136 in = dn->d_inode;
1137 if (!in) {
1138 in = ceph_get_inode(sb, vino);
1139 if (IS_ERR(in)) {
1140 pr_err("fill_trace bad get_inode "
1141 "%llx.%llx\n", vino.ino, vino.snap);
1142 err = PTR_ERR(in);
1143 d_drop(dn);
1144 goto done;
1145 }
1146 dn = splice_dentry(dn, in, &have_lease, true); 1156 dn = splice_dentry(dn, in, &have_lease, true);
1147 if (IS_ERR(dn)) { 1157 if (IS_ERR(dn)) {
1148 err = PTR_ERR(dn); 1158 err = PTR_ERR(dn);
1149 goto done; 1159 goto done;
1150 } 1160 }
1151 req->r_dentry = dn; /* may have spliced */ 1161 req->r_dentry = dn; /* may have spliced */
1152 ihold(in); 1162 } else if (dn->d_inode && dn->d_inode != in) {
1153 } else if (ceph_ino(in) == vino.ino &&
1154 ceph_snap(in) == vino.snap) {
1155 ihold(in);
1156 } else {
1157 dout(" %p links to %p %llx.%llx, not %llx.%llx\n", 1163 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1158 dn, in, ceph_ino(in), ceph_snap(in), 1164 dn, dn->d_inode, ceph_vinop(dn->d_inode),
1159 vino.ino, vino.snap); 1165 ceph_vinop(in));
1160 have_lease = false; 1166 have_lease = false;
1161 in = NULL;
1162 } 1167 }
1163 1168
1164 if (have_lease) 1169 if (have_lease)
1165 update_dentry_lease(dn, rinfo->dlease, session, 1170 update_dentry_lease(dn, rinfo->dlease, session,
1166 req->r_request_started); 1171 req->r_request_started);
1167 dout(" final dn %p\n", dn); 1172 dout(" final dn %p\n", dn);
1168 i++; 1173 } else if (!req->r_aborted &&
1169 } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP || 1174 (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1170 req->r_op == CEPH_MDS_OP_MKSNAP) && !req->r_aborted) { 1175 req->r_op == CEPH_MDS_OP_MKSNAP)) {
1171 struct dentry *dn = req->r_dentry; 1176 struct dentry *dn = req->r_dentry;
1172 1177
1173 /* fill out a snapdir LOOKUPSNAP dentry */ 1178 /* fill out a snapdir LOOKUPSNAP dentry */
@@ -1177,52 +1182,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1177 ininfo = rinfo->targeti.in; 1182 ininfo = rinfo->targeti.in;
1178 vino.ino = le64_to_cpu(ininfo->ino); 1183 vino.ino = le64_to_cpu(ininfo->ino);
1179 vino.snap = le64_to_cpu(ininfo->snapid); 1184 vino.snap = le64_to_cpu(ininfo->snapid);
1180 in = ceph_get_inode(sb, vino);
1181 if (IS_ERR(in)) {
1182 pr_err("fill_inode get_inode badness %llx.%llx\n",
1183 vino.ino, vino.snap);
1184 err = PTR_ERR(in);
1185 d_delete(dn);
1186 goto done;
1187 }
1188 dout(" linking snapped dir %p to dn %p\n", in, dn); 1185 dout(" linking snapped dir %p to dn %p\n", in, dn);
1186 ihold(in);
1189 dn = splice_dentry(dn, in, NULL, true); 1187 dn = splice_dentry(dn, in, NULL, true);
1190 if (IS_ERR(dn)) { 1188 if (IS_ERR(dn)) {
1191 err = PTR_ERR(dn); 1189 err = PTR_ERR(dn);
1192 goto done; 1190 goto done;
1193 } 1191 }
1194 req->r_dentry = dn; /* may have spliced */ 1192 req->r_dentry = dn; /* may have spliced */
1195 ihold(in);
1196 rinfo->head->is_dentry = 1; /* fool notrace handlers */
1197 }
1198
1199 if (rinfo->head->is_target) {
1200 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1201 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1202
1203 if (in == NULL || ceph_ino(in) != vino.ino ||
1204 ceph_snap(in) != vino.snap) {
1205 in = ceph_get_inode(sb, vino);
1206 if (IS_ERR(in)) {
1207 err = PTR_ERR(in);
1208 goto done;
1209 }
1210 }
1211 req->r_target_inode = in;
1212
1213 err = fill_inode(in,
1214 &rinfo->targeti, NULL,
1215 session, req->r_request_started,
1216 (le32_to_cpu(rinfo->head->result) == 0) ?
1217 req->r_fmode : -1,
1218 &req->r_caps_reservation);
1219 if (err < 0) {
1220 pr_err("fill_inode badness %p %llx.%llx\n",
1221 in, ceph_vinop(in));
1222 goto done;
1223 }
1224 } 1193 }
1225
1226done: 1194done:
1227 dout("fill_trace done err=%d\n", err); 1195 dout("fill_trace done err=%d\n", err);
1228 return err; 1196 return err;
@@ -1272,7 +1240,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1272 struct qstr dname; 1240 struct qstr dname;
1273 struct dentry *dn; 1241 struct dentry *dn;
1274 struct inode *in; 1242 struct inode *in;
1275 int err = 0, i; 1243 int err = 0, ret, i;
1276 struct inode *snapdir = NULL; 1244 struct inode *snapdir = NULL;
1277 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; 1245 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1278 struct ceph_dentry_info *di; 1246 struct ceph_dentry_info *di;
@@ -1305,6 +1273,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1305 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir); 1273 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
1306 } 1274 }
1307 1275
1276 /* FIXME: release caps/leases if error occurs */
1308 for (i = 0; i < rinfo->dir_nr; i++) { 1277 for (i = 0; i < rinfo->dir_nr; i++) {
1309 struct ceph_vino vino; 1278 struct ceph_vino vino;
1310 1279
@@ -1329,9 +1298,10 @@ retry_lookup:
1329 err = -ENOMEM; 1298 err = -ENOMEM;
1330 goto out; 1299 goto out;
1331 } 1300 }
1332 err = ceph_init_dentry(dn); 1301 ret = ceph_init_dentry(dn);
1333 if (err < 0) { 1302 if (ret < 0) {
1334 dput(dn); 1303 dput(dn);
1304 err = ret;
1335 goto out; 1305 goto out;
1336 } 1306 }
1337 } else if (dn->d_inode && 1307 } else if (dn->d_inode &&
@@ -1351,9 +1321,6 @@ retry_lookup:
1351 spin_unlock(&parent->d_lock); 1321 spin_unlock(&parent->d_lock);
1352 } 1322 }
1353 1323
1354 di = dn->d_fsdata;
1355 di->offset = ceph_make_fpos(frag, i + r_readdir_offset);
1356
1357 /* inode */ 1324 /* inode */
1358 if (dn->d_inode) { 1325 if (dn->d_inode) {
1359 in = dn->d_inode; 1326 in = dn->d_inode;
@@ -1366,26 +1333,39 @@ retry_lookup:
1366 err = PTR_ERR(in); 1333 err = PTR_ERR(in);
1367 goto out; 1334 goto out;
1368 } 1335 }
1369 dn = splice_dentry(dn, in, NULL, false);
1370 if (IS_ERR(dn))
1371 dn = NULL;
1372 } 1336 }
1373 1337
1374 if (fill_inode(in, &rinfo->dir_in[i], NULL, session, 1338 if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
1375 req->r_request_started, -1, 1339 req->r_request_started, -1,
1376 &req->r_caps_reservation) < 0) { 1340 &req->r_caps_reservation) < 0) {
1377 pr_err("fill_inode badness on %p\n", in); 1341 pr_err("fill_inode badness on %p\n", in);
1342 if (!dn->d_inode)
1343 iput(in);
1344 d_drop(dn);
1378 goto next_item; 1345 goto next_item;
1379 } 1346 }
1380 if (dn) 1347
1381 update_dentry_lease(dn, rinfo->dir_dlease[i], 1348 if (!dn->d_inode) {
1382 req->r_session, 1349 dn = splice_dentry(dn, in, NULL, false);
1383 req->r_request_started); 1350 if (IS_ERR(dn)) {
1351 err = PTR_ERR(dn);
1352 dn = NULL;
1353 goto next_item;
1354 }
1355 }
1356
1357 di = dn->d_fsdata;
1358 di->offset = ceph_make_fpos(frag, i + r_readdir_offset);
1359
1360 update_dentry_lease(dn, rinfo->dir_dlease[i],
1361 req->r_session,
1362 req->r_request_started);
1384next_item: 1363next_item:
1385 if (dn) 1364 if (dn)
1386 dput(dn); 1365 dput(dn);
1387 } 1366 }
1388 req->r_did_prepopulate = true; 1367 if (err == 0)
1368 req->r_did_prepopulate = true;
1389 1369
1390out: 1370out:
1391 if (snapdir) { 1371 if (snapdir) {
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index b8e93a40a5d3..78c3c2097787 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -443,8 +443,11 @@ int pstore_register(struct pstore_info *psi)
443 pstore_get_records(0); 443 pstore_get_records(0);
444 444
445 kmsg_dump_register(&pstore_dumper); 445 kmsg_dump_register(&pstore_dumper);
446 pstore_register_console(); 446
447 pstore_register_ftrace(); 447 if ((psi->flags & PSTORE_FLAGS_FRAGILE) == 0) {
448 pstore_register_console();
449 pstore_register_ftrace();
450 }
448 451
449 if (pstore_update_ms >= 0) { 452 if (pstore_update_ms >= 0) {
450 pstore_timer.expires = jiffies + 453 pstore_timer.expires = jiffies +
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index b94f93685093..35e7d08fe629 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -609,7 +609,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
609 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; 609 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
610 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj; 610 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
611 struct sysfs_open_file *of; 611 struct sysfs_open_file *of;
612 bool has_read, has_write, has_mmap; 612 bool has_read, has_write;
613 int error = -EACCES; 613 int error = -EACCES;
614 614
615 /* need attr_sd for attr and ops, its parent for kobj */ 615 /* need attr_sd for attr and ops, its parent for kobj */
@@ -621,7 +621,6 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
621 621
622 has_read = battr->read || battr->mmap; 622 has_read = battr->read || battr->mmap;
623 has_write = battr->write || battr->mmap; 623 has_write = battr->write || battr->mmap;
624 has_mmap = battr->mmap;
625 } else { 624 } else {
626 const struct sysfs_ops *ops = sysfs_file_ops(attr_sd); 625 const struct sysfs_ops *ops = sysfs_file_ops(attr_sd);
627 626
@@ -633,7 +632,6 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
633 632
634 has_read = ops->show; 633 has_read = ops->show;
635 has_write = ops->store; 634 has_write = ops->store;
636 has_mmap = false;
637 } 635 }
638 636
639 /* check perms and supported operations */ 637 /* check perms and supported operations */
@@ -661,9 +659,9 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
661 * open file has a separate mutex, it's okay as long as those don't 659 * open file has a separate mutex, it's okay as long as those don't
662 * happen on the same file. At this point, we can't easily give 660 * happen on the same file. At this point, we can't easily give
663 * each file a separate locking class. Let's differentiate on 661 * each file a separate locking class. Let's differentiate on
664 * whether the file has mmap or not for now. 662 * whether the file is bin or not for now.
665 */ 663 */
666 if (has_mmap) 664 if (sysfs_is_bin(attr_sd))
667 mutex_init(&of->mutex); 665 mutex_init(&of->mutex);
668 else 666 else
669 mutex_init(&of->mutex); 667 mutex_init(&of->mutex);
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 3ef11b22e750..3b2c14b6f0fb 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -1635,7 +1635,7 @@ xfs_bmap_last_extent(
1635 * blocks at the end of the file which do not start at the previous data block, 1635 * blocks at the end of the file which do not start at the previous data block,
1636 * we will try to align the new blocks at stripe unit boundaries. 1636 * we will try to align the new blocks at stripe unit boundaries.
1637 * 1637 *
1638 * Returns 0 in bma->aeof if the file (fork) is empty as any new write will be 1638 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1639 * at, or past the EOF. 1639 * at, or past the EOF.
1640 */ 1640 */
1641STATIC int 1641STATIC int
@@ -1650,9 +1650,14 @@ xfs_bmap_isaeof(
1650 bma->aeof = 0; 1650 bma->aeof = 0;
1651 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, 1651 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1652 &is_empty); 1652 &is_empty);
1653 if (error || is_empty) 1653 if (error)
1654 return error; 1654 return error;
1655 1655
1656 if (is_empty) {
1657 bma->aeof = 1;
1658 return 0;
1659 }
1660
1656 /* 1661 /*
1657 * Check if we are allocation or past the last extent, or at least into 1662 * Check if we are allocation or past the last extent, or at least into
1658 * the last delayed allocated extent. 1663 * the last delayed allocated extent.
@@ -3643,10 +3648,19 @@ xfs_bmap_btalloc(
3643 int isaligned; 3648 int isaligned;
3644 int tryagain; 3649 int tryagain;
3645 int error; 3650 int error;
3651 int stripe_align;
3646 3652
3647 ASSERT(ap->length); 3653 ASSERT(ap->length);
3648 3654
3649 mp = ap->ip->i_mount; 3655 mp = ap->ip->i_mount;
3656
3657 /* stripe alignment for allocation is determined by mount parameters */
3658 stripe_align = 0;
3659 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
3660 stripe_align = mp->m_swidth;
3661 else if (mp->m_dalign)
3662 stripe_align = mp->m_dalign;
3663
3650 align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0; 3664 align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
3651 if (unlikely(align)) { 3665 if (unlikely(align)) {
3652 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, 3666 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
@@ -3655,6 +3669,8 @@ xfs_bmap_btalloc(
3655 ASSERT(!error); 3669 ASSERT(!error);
3656 ASSERT(ap->length); 3670 ASSERT(ap->length);
3657 } 3671 }
3672
3673
3658 nullfb = *ap->firstblock == NULLFSBLOCK; 3674 nullfb = *ap->firstblock == NULLFSBLOCK;
3659 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); 3675 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3660 if (nullfb) { 3676 if (nullfb) {
@@ -3730,7 +3746,7 @@ xfs_bmap_btalloc(
3730 */ 3746 */
3731 if (!ap->flist->xbf_low && ap->aeof) { 3747 if (!ap->flist->xbf_low && ap->aeof) {
3732 if (!ap->offset) { 3748 if (!ap->offset) {
3733 args.alignment = mp->m_dalign; 3749 args.alignment = stripe_align;
3734 atype = args.type; 3750 atype = args.type;
3735 isaligned = 1; 3751 isaligned = 1;
3736 /* 3752 /*
@@ -3755,13 +3771,13 @@ xfs_bmap_btalloc(
3755 * of minlen+alignment+slop doesn't go up 3771 * of minlen+alignment+slop doesn't go up
3756 * between the calls. 3772 * between the calls.
3757 */ 3773 */
3758 if (blen > mp->m_dalign && blen <= args.maxlen) 3774 if (blen > stripe_align && blen <= args.maxlen)
3759 nextminlen = blen - mp->m_dalign; 3775 nextminlen = blen - stripe_align;
3760 else 3776 else
3761 nextminlen = args.minlen; 3777 nextminlen = args.minlen;
3762 if (nextminlen + mp->m_dalign > args.minlen + 1) 3778 if (nextminlen + stripe_align > args.minlen + 1)
3763 args.minalignslop = 3779 args.minalignslop =
3764 nextminlen + mp->m_dalign - 3780 nextminlen + stripe_align -
3765 args.minlen - 1; 3781 args.minlen - 1;
3766 else 3782 else
3767 args.minalignslop = 0; 3783 args.minalignslop = 0;
@@ -3783,7 +3799,7 @@ xfs_bmap_btalloc(
3783 */ 3799 */
3784 args.type = atype; 3800 args.type = atype;
3785 args.fsbno = ap->blkno; 3801 args.fsbno = ap->blkno;
3786 args.alignment = mp->m_dalign; 3802 args.alignment = stripe_align;
3787 args.minlen = nextminlen; 3803 args.minlen = nextminlen;
3788 args.minalignslop = 0; 3804 args.minalignslop = 0;
3789 isaligned = 1; 3805 isaligned = 1;
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 5887e41c0323..1394106ed22d 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1187,7 +1187,12 @@ xfs_zero_remaining_bytes(
1187 XFS_BUF_UNWRITE(bp); 1187 XFS_BUF_UNWRITE(bp);
1188 XFS_BUF_READ(bp); 1188 XFS_BUF_READ(bp);
1189 XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); 1189 XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
1190 xfsbdstrat(mp, bp); 1190
1191 if (XFS_FORCED_SHUTDOWN(mp)) {
1192 error = XFS_ERROR(EIO);
1193 break;
1194 }
1195 xfs_buf_iorequest(bp);
1191 error = xfs_buf_iowait(bp); 1196 error = xfs_buf_iowait(bp);
1192 if (error) { 1197 if (error) {
1193 xfs_buf_ioerror_alert(bp, 1198 xfs_buf_ioerror_alert(bp,
@@ -1200,7 +1205,12 @@ xfs_zero_remaining_bytes(
1200 XFS_BUF_UNDONE(bp); 1205 XFS_BUF_UNDONE(bp);
1201 XFS_BUF_UNREAD(bp); 1206 XFS_BUF_UNREAD(bp);
1202 XFS_BUF_WRITE(bp); 1207 XFS_BUF_WRITE(bp);
1203 xfsbdstrat(mp, bp); 1208
1209 if (XFS_FORCED_SHUTDOWN(mp)) {
1210 error = XFS_ERROR(EIO);
1211 break;
1212 }
1213 xfs_buf_iorequest(bp);
1204 error = xfs_buf_iowait(bp); 1214 error = xfs_buf_iowait(bp);
1205 if (error) { 1215 if (error) {
1206 xfs_buf_ioerror_alert(bp, 1216 xfs_buf_ioerror_alert(bp,
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index c7f0b77dcb00..afe7645e4b2b 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -698,7 +698,11 @@ xfs_buf_read_uncached(
698 bp->b_flags |= XBF_READ; 698 bp->b_flags |= XBF_READ;
699 bp->b_ops = ops; 699 bp->b_ops = ops;
700 700
701 xfsbdstrat(target->bt_mount, bp); 701 if (XFS_FORCED_SHUTDOWN(target->bt_mount)) {
702 xfs_buf_relse(bp);
703 return NULL;
704 }
705 xfs_buf_iorequest(bp);
702 xfs_buf_iowait(bp); 706 xfs_buf_iowait(bp);
703 return bp; 707 return bp;
704} 708}
@@ -1089,7 +1093,7 @@ xfs_bioerror(
1089 * This is meant for userdata errors; metadata bufs come with 1093 * This is meant for userdata errors; metadata bufs come with
1090 * iodone functions attached, so that we can track down errors. 1094 * iodone functions attached, so that we can track down errors.
1091 */ 1095 */
1092STATIC int 1096int
1093xfs_bioerror_relse( 1097xfs_bioerror_relse(
1094 struct xfs_buf *bp) 1098 struct xfs_buf *bp)
1095{ 1099{
@@ -1152,7 +1156,7 @@ xfs_bwrite(
1152 ASSERT(xfs_buf_islocked(bp)); 1156 ASSERT(xfs_buf_islocked(bp));
1153 1157
1154 bp->b_flags |= XBF_WRITE; 1158 bp->b_flags |= XBF_WRITE;
1155 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q); 1159 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | XBF_WRITE_FAIL);
1156 1160
1157 xfs_bdstrat_cb(bp); 1161 xfs_bdstrat_cb(bp);
1158 1162
@@ -1164,25 +1168,6 @@ xfs_bwrite(
1164 return error; 1168 return error;
1165} 1169}
1166 1170
1167/*
1168 * Wrapper around bdstrat so that we can stop data from going to disk in case
1169 * we are shutting down the filesystem. Typically user data goes thru this
1170 * path; one of the exceptions is the superblock.
1171 */
1172void
1173xfsbdstrat(
1174 struct xfs_mount *mp,
1175 struct xfs_buf *bp)
1176{
1177 if (XFS_FORCED_SHUTDOWN(mp)) {
1178 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1179 xfs_bioerror_relse(bp);
1180 return;
1181 }
1182
1183 xfs_buf_iorequest(bp);
1184}
1185
1186STATIC void 1171STATIC void
1187_xfs_buf_ioend( 1172_xfs_buf_ioend(
1188 xfs_buf_t *bp, 1173 xfs_buf_t *bp,
@@ -1516,6 +1501,12 @@ xfs_wait_buftarg(
1516 struct xfs_buf *bp; 1501 struct xfs_buf *bp;
1517 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); 1502 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1518 list_del_init(&bp->b_lru); 1503 list_del_init(&bp->b_lru);
1504 if (bp->b_flags & XBF_WRITE_FAIL) {
1505 xfs_alert(btp->bt_mount,
1506"Corruption Alert: Buffer at block 0x%llx had permanent write failures!\n"
1507"Please run xfs_repair to determine the extent of the problem.",
1508 (long long)bp->b_bn);
1509 }
1519 xfs_buf_rele(bp); 1510 xfs_buf_rele(bp);
1520 } 1511 }
1521 if (loop++ != 0) 1512 if (loop++ != 0)
@@ -1799,7 +1790,7 @@ __xfs_buf_delwri_submit(
1799 1790
1800 blk_start_plug(&plug); 1791 blk_start_plug(&plug);
1801 list_for_each_entry_safe(bp, n, io_list, b_list) { 1792 list_for_each_entry_safe(bp, n, io_list, b_list) {
1802 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC); 1793 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL);
1803 bp->b_flags |= XBF_WRITE; 1794 bp->b_flags |= XBF_WRITE;
1804 1795
1805 if (!wait) { 1796 if (!wait) {
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index e65683361017..1cf21a4a9f22 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -45,6 +45,7 @@ typedef enum {
45#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ 45#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
46#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ 46#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
47#define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ 47#define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */
48#define XBF_WRITE_FAIL (1 << 24)/* async writes have failed on this buffer */
48 49
49/* I/O hints for the BIO layer */ 50/* I/O hints for the BIO layer */
50#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */ 51#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */
@@ -70,6 +71,7 @@ typedef unsigned int xfs_buf_flags_t;
70 { XBF_ASYNC, "ASYNC" }, \ 71 { XBF_ASYNC, "ASYNC" }, \
71 { XBF_DONE, "DONE" }, \ 72 { XBF_DONE, "DONE" }, \
72 { XBF_STALE, "STALE" }, \ 73 { XBF_STALE, "STALE" }, \
74 { XBF_WRITE_FAIL, "WRITE_FAIL" }, \
73 { XBF_SYNCIO, "SYNCIO" }, \ 75 { XBF_SYNCIO, "SYNCIO" }, \
74 { XBF_FUA, "FUA" }, \ 76 { XBF_FUA, "FUA" }, \
75 { XBF_FLUSH, "FLUSH" }, \ 77 { XBF_FLUSH, "FLUSH" }, \
@@ -80,6 +82,7 @@ typedef unsigned int xfs_buf_flags_t;
80 { _XBF_DELWRI_Q, "DELWRI_Q" }, \ 82 { _XBF_DELWRI_Q, "DELWRI_Q" }, \
81 { _XBF_COMPOUND, "COMPOUND" } 83 { _XBF_COMPOUND, "COMPOUND" }
82 84
85
83/* 86/*
84 * Internal state flags. 87 * Internal state flags.
85 */ 88 */
@@ -269,9 +272,6 @@ extern void xfs_buf_unlock(xfs_buf_t *);
269 272
270/* Buffer Read and Write Routines */ 273/* Buffer Read and Write Routines */
271extern int xfs_bwrite(struct xfs_buf *bp); 274extern int xfs_bwrite(struct xfs_buf *bp);
272
273extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
274
275extern void xfs_buf_ioend(xfs_buf_t *, int); 275extern void xfs_buf_ioend(xfs_buf_t *, int);
276extern void xfs_buf_ioerror(xfs_buf_t *, int); 276extern void xfs_buf_ioerror(xfs_buf_t *, int);
277extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func); 277extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
@@ -282,6 +282,8 @@ extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
282#define xfs_buf_zero(bp, off, len) \ 282#define xfs_buf_zero(bp, off, len) \
283 xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO) 283 xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
284 284
285extern int xfs_bioerror_relse(struct xfs_buf *);
286
285static inline int xfs_buf_geterror(xfs_buf_t *bp) 287static inline int xfs_buf_geterror(xfs_buf_t *bp)
286{ 288{
287 return bp ? bp->b_error : ENOMEM; 289 return bp ? bp->b_error : ENOMEM;
@@ -301,7 +303,8 @@ extern void xfs_buf_terminate(void);
301 303
302#define XFS_BUF_ZEROFLAGS(bp) \ 304#define XFS_BUF_ZEROFLAGS(bp) \
303 ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \ 305 ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \
304 XBF_SYNCIO|XBF_FUA|XBF_FLUSH)) 306 XBF_SYNCIO|XBF_FUA|XBF_FLUSH| \
307 XBF_WRITE_FAIL))
305 308
306void xfs_buf_stale(struct xfs_buf *bp); 309void xfs_buf_stale(struct xfs_buf *bp);
307#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE) 310#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE)
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index a64f67ba25d3..2227b9b050bb 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -496,6 +496,14 @@ xfs_buf_item_unpin(
496 } 496 }
497} 497}
498 498
499/*
500 * Buffer IO error rate limiting. Limit it to no more than 10 messages per 30
501 * seconds so as to not spam logs too much on repeated detection of the same
502 * buffer being bad..
503 */
504
505DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10);
506
499STATIC uint 507STATIC uint
500xfs_buf_item_push( 508xfs_buf_item_push(
501 struct xfs_log_item *lip, 509 struct xfs_log_item *lip,
@@ -524,6 +532,14 @@ xfs_buf_item_push(
524 532
525 trace_xfs_buf_item_push(bip); 533 trace_xfs_buf_item_push(bip);
526 534
535 /* has a previous flush failed due to IO errors? */
536 if ((bp->b_flags & XBF_WRITE_FAIL) &&
537 ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) {
538 xfs_warn(bp->b_target->bt_mount,
539"Detected failing async write on buffer block 0x%llx. Retrying async write.\n",
540 (long long)bp->b_bn);
541 }
542
527 if (!xfs_buf_delwri_queue(bp, buffer_list)) 543 if (!xfs_buf_delwri_queue(bp, buffer_list))
528 rval = XFS_ITEM_FLUSHING; 544 rval = XFS_ITEM_FLUSHING;
529 xfs_buf_unlock(bp); 545 xfs_buf_unlock(bp);
@@ -1096,8 +1112,9 @@ xfs_buf_iodone_callbacks(
1096 1112
1097 xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */ 1113 xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */
1098 1114
1099 if (!XFS_BUF_ISSTALE(bp)) { 1115 if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL))) {
1100 bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE; 1116 bp->b_flags |= XBF_WRITE | XBF_ASYNC |
1117 XBF_DONE | XBF_WRITE_FAIL;
1101 xfs_buf_iorequest(bp); 1118 xfs_buf_iorequest(bp);
1102 } else { 1119 } else {
1103 xfs_buf_relse(bp); 1120 xfs_buf_relse(bp);
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index 56369d4509d5..48c7d18f68c3 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -2067,12 +2067,12 @@ xfs_dir2_node_lookup(
2067 */ 2067 */
2068int /* error */ 2068int /* error */
2069xfs_dir2_node_removename( 2069xfs_dir2_node_removename(
2070 xfs_da_args_t *args) /* operation arguments */ 2070 struct xfs_da_args *args) /* operation arguments */
2071{ 2071{
2072 xfs_da_state_blk_t *blk; /* leaf block */ 2072 struct xfs_da_state_blk *blk; /* leaf block */
2073 int error; /* error return value */ 2073 int error; /* error return value */
2074 int rval; /* operation return value */ 2074 int rval; /* operation return value */
2075 xfs_da_state_t *state; /* btree cursor */ 2075 struct xfs_da_state *state; /* btree cursor */
2076 2076
2077 trace_xfs_dir2_node_removename(args); 2077 trace_xfs_dir2_node_removename(args);
2078 2078
@@ -2084,19 +2084,18 @@ xfs_dir2_node_removename(
2084 state->mp = args->dp->i_mount; 2084 state->mp = args->dp->i_mount;
2085 state->blocksize = state->mp->m_dirblksize; 2085 state->blocksize = state->mp->m_dirblksize;
2086 state->node_ents = state->mp->m_dir_node_ents; 2086 state->node_ents = state->mp->m_dir_node_ents;
2087 /* 2087
2088 * Look up the entry we're deleting, set up the cursor. 2088 /* Look up the entry we're deleting, set up the cursor. */
2089 */
2090 error = xfs_da3_node_lookup_int(state, &rval); 2089 error = xfs_da3_node_lookup_int(state, &rval);
2091 if (error) 2090 if (error)
2092 rval = error; 2091 goto out_free;
2093 /* 2092
2094 * Didn't find it, upper layer screwed up. 2093 /* Didn't find it, upper layer screwed up. */
2095 */
2096 if (rval != EEXIST) { 2094 if (rval != EEXIST) {
2097 xfs_da_state_free(state); 2095 error = rval;
2098 return rval; 2096 goto out_free;
2099 } 2097 }
2098
2100 blk = &state->path.blk[state->path.active - 1]; 2099 blk = &state->path.blk[state->path.active - 1];
2101 ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC); 2100 ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC);
2102 ASSERT(state->extravalid); 2101 ASSERT(state->extravalid);
@@ -2107,7 +2106,7 @@ xfs_dir2_node_removename(
2107 error = xfs_dir2_leafn_remove(args, blk->bp, blk->index, 2106 error = xfs_dir2_leafn_remove(args, blk->bp, blk->index,
2108 &state->extrablk, &rval); 2107 &state->extrablk, &rval);
2109 if (error) 2108 if (error)
2110 return error; 2109 goto out_free;
2111 /* 2110 /*
2112 * Fix the hash values up the btree. 2111 * Fix the hash values up the btree.
2113 */ 2112 */
@@ -2122,6 +2121,7 @@ xfs_dir2_node_removename(
2122 */ 2121 */
2123 if (!error) 2122 if (!error)
2124 error = xfs_dir2_node_to_leaf(state); 2123 error = xfs_dir2_node_to_leaf(state);
2124out_free:
2125 xfs_da_state_free(state); 2125 xfs_da_state_free(state);
2126 return error; 2126 return error;
2127} 2127}
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 27e0e544e963..104455b8046c 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -618,7 +618,8 @@ xfs_setattr_nonsize(
618 } 618 }
619 if (!gid_eq(igid, gid)) { 619 if (!gid_eq(igid, gid)) {
620 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) { 620 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) {
621 ASSERT(!XFS_IS_PQUOTA_ON(mp)); 621 ASSERT(xfs_sb_version_has_pquotino(&mp->m_sb) ||
622 !XFS_IS_PQUOTA_ON(mp));
622 ASSERT(mask & ATTR_GID); 623 ASSERT(mask & ATTR_GID);
623 ASSERT(gdqp); 624 ASSERT(gdqp);
624 olddquot2 = xfs_qm_vop_chown(tp, ip, 625 olddquot2 = xfs_qm_vop_chown(tp, ip,
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index b6b669df40f3..eae16920655b 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -193,7 +193,10 @@ xlog_bread_noalign(
193 bp->b_io_length = nbblks; 193 bp->b_io_length = nbblks;
194 bp->b_error = 0; 194 bp->b_error = 0;
195 195
196 xfsbdstrat(log->l_mp, bp); 196 if (XFS_FORCED_SHUTDOWN(log->l_mp))
197 return XFS_ERROR(EIO);
198
199 xfs_buf_iorequest(bp);
197 error = xfs_buf_iowait(bp); 200 error = xfs_buf_iowait(bp);
198 if (error) 201 if (error)
199 xfs_buf_ioerror_alert(bp, __func__); 202 xfs_buf_ioerror_alert(bp, __func__);
@@ -4397,7 +4400,13 @@ xlog_do_recover(
4397 XFS_BUF_READ(bp); 4400 XFS_BUF_READ(bp);
4398 XFS_BUF_UNASYNC(bp); 4401 XFS_BUF_UNASYNC(bp);
4399 bp->b_ops = &xfs_sb_buf_ops; 4402 bp->b_ops = &xfs_sb_buf_ops;
4400 xfsbdstrat(log->l_mp, bp); 4403
4404 if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
4405 xfs_buf_relse(bp);
4406 return XFS_ERROR(EIO);
4407 }
4408
4409 xfs_buf_iorequest(bp);
4401 error = xfs_buf_iowait(bp); 4410 error = xfs_buf_iowait(bp);
4402 if (error) { 4411 if (error) {
4403 xfs_buf_ioerror_alert(bp, __func__); 4412 xfs_buf_ioerror_alert(bp, __func__);
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 14a4996cfec6..dd88f0e27bd8 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -134,8 +134,6 @@ xfs_qm_dqpurge(
134{ 134{
135 struct xfs_mount *mp = dqp->q_mount; 135 struct xfs_mount *mp = dqp->q_mount;
136 struct xfs_quotainfo *qi = mp->m_quotainfo; 136 struct xfs_quotainfo *qi = mp->m_quotainfo;
137 struct xfs_dquot *gdqp = NULL;
138 struct xfs_dquot *pdqp = NULL;
139 137
140 xfs_dqlock(dqp); 138 xfs_dqlock(dqp);
141 if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { 139 if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
@@ -143,21 +141,6 @@ xfs_qm_dqpurge(
143 return EAGAIN; 141 return EAGAIN;
144 } 142 }
145 143
146 /*
147 * If this quota has a hint attached, prepare for releasing it now.
148 */
149 gdqp = dqp->q_gdquot;
150 if (gdqp) {
151 xfs_dqlock(gdqp);
152 dqp->q_gdquot = NULL;
153 }
154
155 pdqp = dqp->q_pdquot;
156 if (pdqp) {
157 xfs_dqlock(pdqp);
158 dqp->q_pdquot = NULL;
159 }
160
161 dqp->dq_flags |= XFS_DQ_FREEING; 144 dqp->dq_flags |= XFS_DQ_FREEING;
162 145
163 xfs_dqflock(dqp); 146 xfs_dqflock(dqp);
@@ -206,11 +189,47 @@ xfs_qm_dqpurge(
206 XFS_STATS_DEC(xs_qm_dquot_unused); 189 XFS_STATS_DEC(xs_qm_dquot_unused);
207 190
208 xfs_qm_dqdestroy(dqp); 191 xfs_qm_dqdestroy(dqp);
192 return 0;
193}
194
195/*
196 * Release the group or project dquot pointers the user dquots maybe carrying
197 * around as a hint, and proceed to purge the user dquot cache if requested.
198*/
199STATIC int
200xfs_qm_dqpurge_hints(
201 struct xfs_dquot *dqp,
202 void *data)
203{
204 struct xfs_dquot *gdqp = NULL;
205 struct xfs_dquot *pdqp = NULL;
206 uint flags = *((uint *)data);
207
208 xfs_dqlock(dqp);
209 if (dqp->dq_flags & XFS_DQ_FREEING) {
210 xfs_dqunlock(dqp);
211 return EAGAIN;
212 }
213
214 /* If this quota has a hint attached, prepare for releasing it now */
215 gdqp = dqp->q_gdquot;
216 if (gdqp)
217 dqp->q_gdquot = NULL;
218
219 pdqp = dqp->q_pdquot;
220 if (pdqp)
221 dqp->q_pdquot = NULL;
222
223 xfs_dqunlock(dqp);
209 224
210 if (gdqp) 225 if (gdqp)
211 xfs_qm_dqput(gdqp); 226 xfs_qm_dqrele(gdqp);
212 if (pdqp) 227 if (pdqp)
213 xfs_qm_dqput(pdqp); 228 xfs_qm_dqrele(pdqp);
229
230 if (flags & XFS_QMOPT_UQUOTA)
231 return xfs_qm_dqpurge(dqp, NULL);
232
214 return 0; 233 return 0;
215} 234}
216 235
@@ -222,8 +241,18 @@ xfs_qm_dqpurge_all(
222 struct xfs_mount *mp, 241 struct xfs_mount *mp,
223 uint flags) 242 uint flags)
224{ 243{
225 if (flags & XFS_QMOPT_UQUOTA) 244 /*
226 xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL); 245 * We have to release group/project dquot hint(s) from the user dquot
246 * at first if they are there, otherwise we would run into an infinite
247 * loop while walking through radix tree to purge other type of dquots
248 * since their refcount is not zero if the user dquot refers to them
249 * as hint.
250 *
251 * Call the special xfs_qm_dqpurge_hints() will end up go through the
252 * general xfs_qm_dqpurge() against user dquot cache if requested.
253 */
254 xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge_hints, &flags);
255
227 if (flags & XFS_QMOPT_GQUOTA) 256 if (flags & XFS_QMOPT_GQUOTA)
228 xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL); 257 xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
229 if (flags & XFS_QMOPT_PQUOTA) 258 if (flags & XFS_QMOPT_PQUOTA)
@@ -2082,24 +2111,21 @@ xfs_qm_vop_create_dqattach(
2082 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 2111 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2083 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 2112 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
2084 2113
2085 if (udqp) { 2114 if (udqp && XFS_IS_UQUOTA_ON(mp)) {
2086 ASSERT(ip->i_udquot == NULL); 2115 ASSERT(ip->i_udquot == NULL);
2087 ASSERT(XFS_IS_UQUOTA_ON(mp));
2088 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); 2116 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
2089 2117
2090 ip->i_udquot = xfs_qm_dqhold(udqp); 2118 ip->i_udquot = xfs_qm_dqhold(udqp);
2091 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); 2119 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
2092 } 2120 }
2093 if (gdqp) { 2121 if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
2094 ASSERT(ip->i_gdquot == NULL); 2122 ASSERT(ip->i_gdquot == NULL);
2095 ASSERT(XFS_IS_GQUOTA_ON(mp));
2096 ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id)); 2123 ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
2097 ip->i_gdquot = xfs_qm_dqhold(gdqp); 2124 ip->i_gdquot = xfs_qm_dqhold(gdqp);
2098 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); 2125 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
2099 } 2126 }
2100 if (pdqp) { 2127 if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
2101 ASSERT(ip->i_pdquot == NULL); 2128 ASSERT(ip->i_pdquot == NULL);
2102 ASSERT(XFS_IS_PQUOTA_ON(mp));
2103 ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id)); 2129 ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
2104 2130
2105 ip->i_pdquot = xfs_qm_dqhold(pdqp); 2131 ip->i_pdquot = xfs_qm_dqhold(pdqp);
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index c035d11b7734..647b6f1d8923 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -314,7 +314,18 @@ xfs_trans_read_buf_map(
314 ASSERT(bp->b_iodone == NULL); 314 ASSERT(bp->b_iodone == NULL);
315 XFS_BUF_READ(bp); 315 XFS_BUF_READ(bp);
316 bp->b_ops = ops; 316 bp->b_ops = ops;
317 xfsbdstrat(tp->t_mountp, bp); 317
318 /*
319 * XXX(hch): clean up the error handling here to be less
320 * of a mess..
321 */
322 if (XFS_FORCED_SHUTDOWN(mp)) {
323 trace_xfs_bdstrat_shut(bp, _RET_IP_);
324 xfs_bioerror_relse(bp);
325 } else {
326 xfs_buf_iorequest(bp);
327 }
328
318 error = xfs_buf_iowait(bp); 329 error = xfs_buf_iowait(bp);
319 if (error) { 330 if (error) {
320 xfs_buf_ioerror_alert(bp, __func__); 331 xfs_buf_ioerror_alert(bp, __func__);
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index f330d28e4d0e..db0923458940 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -217,7 +217,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
217#endif 217#endif
218 218
219#ifndef pte_accessible 219#ifndef pte_accessible
220# define pte_accessible(pte) ((void)(pte),1) 220# define pte_accessible(mm, pte) ((void)(pte), 1)
221#endif 221#endif
222 222
223#ifndef flush_tlb_fix_spurious_fault 223#ifndef flush_tlb_fix_spurious_fault
@@ -599,11 +599,10 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
599#ifdef CONFIG_TRANSPARENT_HUGEPAGE 599#ifdef CONFIG_TRANSPARENT_HUGEPAGE
600 barrier(); 600 barrier();
601#endif 601#endif
602 if (pmd_none(pmdval)) 602 if (pmd_none(pmdval) || pmd_trans_huge(pmdval))
603 return 1; 603 return 1;
604 if (unlikely(pmd_bad(pmdval))) { 604 if (unlikely(pmd_bad(pmdval))) {
605 if (!pmd_trans_huge(pmdval)) 605 pmd_clear_bad(pmd);
606 pmd_clear_bad(pmd);
607 return 1; 606 return 1;
608 } 607 }
609 return 0; 608 return 0;
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index ddf2b420ac8f..1cd3f5d767a8 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -3,13 +3,11 @@
3 3
4#include <linux/thread_info.h> 4#include <linux/thread_info.h>
5 5
6/* 6#define PREEMPT_ENABLED (0)
7 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users 7
8 * that think a non-zero value indicates we cannot preempt.
9 */
10static __always_inline int preempt_count(void) 8static __always_inline int preempt_count(void)
11{ 9{
12 return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED; 10 return current_thread_info()->preempt_count;
13} 11}
14 12
15static __always_inline int *preempt_count_ptr(void) 13static __always_inline int *preempt_count_ptr(void)
@@ -17,11 +15,6 @@ static __always_inline int *preempt_count_ptr(void)
17 return &current_thread_info()->preempt_count; 15 return &current_thread_info()->preempt_count;
18} 16}
19 17
20/*
21 * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the
22 * alternative is loosing a reschedule. Better schedule too often -- also this
23 * should be a very rare operation.
24 */
25static __always_inline void preempt_count_set(int pc) 18static __always_inline void preempt_count_set(int pc)
26{ 19{
27 *preempt_count_ptr() = pc; 20 *preempt_count_ptr() = pc;
@@ -41,28 +34,17 @@ static __always_inline void preempt_count_set(int pc)
41 task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ 34 task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
42} while (0) 35} while (0)
43 36
44/*
45 * We fold the NEED_RESCHED bit into the preempt count such that
46 * preempt_enable() can decrement and test for needing to reschedule with a
47 * single instruction.
48 *
49 * We invert the actual bit, so that when the decrement hits 0 we know we both
50 * need to resched (the bit is cleared) and can resched (no preempt count).
51 */
52
53static __always_inline void set_preempt_need_resched(void) 37static __always_inline void set_preempt_need_resched(void)
54{ 38{
55 *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED;
56} 39}
57 40
58static __always_inline void clear_preempt_need_resched(void) 41static __always_inline void clear_preempt_need_resched(void)
59{ 42{
60 *preempt_count_ptr() |= PREEMPT_NEED_RESCHED;
61} 43}
62 44
63static __always_inline bool test_preempt_need_resched(void) 45static __always_inline bool test_preempt_need_resched(void)
64{ 46{
65 return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED); 47 return false;
66} 48}
67 49
68/* 50/*
@@ -81,7 +63,12 @@ static __always_inline void __preempt_count_sub(int val)
81 63
82static __always_inline bool __preempt_count_dec_and_test(void) 64static __always_inline bool __preempt_count_dec_and_test(void)
83{ 65{
84 return !--*preempt_count_ptr(); 66 /*
67 * Because of load-store architectures cannot do per-cpu atomic
68 * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
69 * lost.
70 */
71 return !--*preempt_count_ptr() && tif_need_resched();
85} 72}
86 73
87/* 74/*
@@ -89,7 +76,7 @@ static __always_inline bool __preempt_count_dec_and_test(void)
89 */ 76 */
90static __always_inline bool should_resched(void) 77static __always_inline bool should_resched(void)
91{ 78{
92 return unlikely(!*preempt_count_ptr()); 79 return unlikely(!preempt_count() && tif_need_resched());
93} 80}
94 81
95#ifdef CONFIG_PREEMPT 82#ifdef CONFIG_PREEMPT
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index c8929c3832db..4bfde0e99ed5 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -19,7 +19,7 @@
19 19
20#define USE_CMPXCHG_LOCKREF \ 20#define USE_CMPXCHG_LOCKREF \
21 (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \ 21 (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
22 IS_ENABLED(CONFIG_SMP) && !BLOATED_SPINLOCKS) 22 IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
23 23
24struct lockref { 24struct lockref {
25 union { 25 union {
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 69ed5f5e9f6e..c45c089bfdac 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -133,4 +133,34 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
133 return ret; 133 return ret;
134} 134}
135 135
136#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
137
138#ifndef mul_u64_u32_shr
139static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
140{
141 return (u64)(((unsigned __int128)a * mul) >> shift);
142}
143#endif /* mul_u64_u32_shr */
144
145#else
146
147#ifndef mul_u64_u32_shr
148static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
149{
150 u32 ah, al;
151 u64 ret;
152
153 al = a;
154 ah = a >> 32;
155
156 ret = ((u64)al * mul) >> shift;
157 if (ah)
158 ret += ((u64)ah * mul) << (32 - shift);
159
160 return ret;
161}
162#endif /* mul_u64_u32_shr */
163
164#endif
165
136#endif /* _LINUX_MATH64_H */ 166#endif /* _LINUX_MATH64_H */
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index f5096b58b20d..f015c059e159 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -55,7 +55,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
55 struct page *newpage, struct page *page); 55 struct page *newpage, struct page *page);
56extern int migrate_page_move_mapping(struct address_space *mapping, 56extern int migrate_page_move_mapping(struct address_space *mapping,
57 struct page *newpage, struct page *page, 57 struct page *newpage, struct page *page,
58 struct buffer_head *head, enum migrate_mode mode); 58 struct buffer_head *head, enum migrate_mode mode,
59 int extra_count);
59#else 60#else
60 61
61static inline void putback_lru_pages(struct list_head *l) {} 62static inline void putback_lru_pages(struct list_head *l) {}
@@ -90,10 +91,19 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
90#endif /* CONFIG_MIGRATION */ 91#endif /* CONFIG_MIGRATION */
91 92
92#ifdef CONFIG_NUMA_BALANCING 93#ifdef CONFIG_NUMA_BALANCING
94extern bool pmd_trans_migrating(pmd_t pmd);
95extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
93extern int migrate_misplaced_page(struct page *page, 96extern int migrate_misplaced_page(struct page *page,
94 struct vm_area_struct *vma, int node); 97 struct vm_area_struct *vma, int node);
95extern bool migrate_ratelimited(int node); 98extern bool migrate_ratelimited(int node);
96#else 99#else
100static inline bool pmd_trans_migrating(pmd_t pmd)
101{
102 return false;
103}
104static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
105{
106}
97static inline int migrate_misplaced_page(struct page *page, 107static inline int migrate_misplaced_page(struct page *page,
98 struct vm_area_struct *vma, int node) 108 struct vm_area_struct *vma, int node)
99{ 109{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1cedd000cf29..35527173cf50 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1317,7 +1317,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
1317#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ 1317#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
1318 1318
1319#if USE_SPLIT_PTE_PTLOCKS 1319#if USE_SPLIT_PTE_PTLOCKS
1320#if BLOATED_SPINLOCKS 1320#if ALLOC_SPLIT_PTLOCKS
1321extern bool ptlock_alloc(struct page *page); 1321extern bool ptlock_alloc(struct page *page);
1322extern void ptlock_free(struct page *page); 1322extern void ptlock_free(struct page *page);
1323 1323
@@ -1325,7 +1325,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
1325{ 1325{
1326 return page->ptl; 1326 return page->ptl;
1327} 1327}
1328#else /* BLOATED_SPINLOCKS */ 1328#else /* ALLOC_SPLIT_PTLOCKS */
1329static inline bool ptlock_alloc(struct page *page) 1329static inline bool ptlock_alloc(struct page *page)
1330{ 1330{
1331 return true; 1331 return true;
@@ -1339,7 +1339,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
1339{ 1339{
1340 return &page->ptl; 1340 return &page->ptl;
1341} 1341}
1342#endif /* BLOATED_SPINLOCKS */ 1342#endif /* ALLOC_SPLIT_PTLOCKS */
1343 1343
1344static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 1344static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1345{ 1345{
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index bd299418a934..290901a8c1de 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -26,6 +26,7 @@ struct address_space;
26#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) 26#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
27#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ 27#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
28 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) 28 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
29#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
29 30
30/* 31/*
31 * Each physical page in the system has a struct page associated with 32 * Each physical page in the system has a struct page associated with
@@ -155,7 +156,7 @@ struct page {
155 * system if PG_buddy is set. 156 * system if PG_buddy is set.
156 */ 157 */
157#if USE_SPLIT_PTE_PTLOCKS 158#if USE_SPLIT_PTE_PTLOCKS
158#if BLOATED_SPINLOCKS 159#if ALLOC_SPLIT_PTLOCKS
159 spinlock_t *ptl; 160 spinlock_t *ptl;
160#else 161#else
161 spinlock_t ptl; 162 spinlock_t ptl;
@@ -443,6 +444,14 @@ struct mm_struct {
443 /* numa_scan_seq prevents two threads setting pte_numa */ 444 /* numa_scan_seq prevents two threads setting pte_numa */
444 int numa_scan_seq; 445 int numa_scan_seq;
445#endif 446#endif
447#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
448 /*
449 * An operation with batched TLB flushing is going on. Anything that
450 * can move process memory needs to flush the TLB when moving a
451 * PROT_NONE or PROT_NUMA mapped page.
452 */
453 bool tlb_flush_pending;
454#endif
446 struct uprobes_state uprobes_state; 455 struct uprobes_state uprobes_state;
447}; 456};
448 457
@@ -459,4 +468,45 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
459 return mm->cpu_vm_mask_var; 468 return mm->cpu_vm_mask_var;
460} 469}
461 470
471#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
472/*
473 * Memory barriers to keep this state in sync are graciously provided by
474 * the page table locks, outside of which no page table modifications happen.
475 * The barriers below prevent the compiler from re-ordering the instructions
476 * around the memory barriers that are already present in the code.
477 */
478static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
479{
480 barrier();
481 return mm->tlb_flush_pending;
482}
483static inline void set_tlb_flush_pending(struct mm_struct *mm)
484{
485 mm->tlb_flush_pending = true;
486
487 /*
488 * Guarantee that the tlb_flush_pending store does not leak into the
489 * critical section updating the page tables
490 */
491 smp_mb__before_spinlock();
492}
493/* Clearing is done after a TLB flush, which also provides a barrier. */
494static inline void clear_tlb_flush_pending(struct mm_struct *mm)
495{
496 barrier();
497 mm->tlb_flush_pending = false;
498}
499#else
500static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
501{
502 return false;
503}
504static inline void set_tlb_flush_pending(struct mm_struct *mm)
505{
506}
507static inline void clear_tlb_flush_pending(struct mm_struct *mm)
508{
509}
510#endif
511
462#endif /* _LINUX_MM_TYPES_H */ 512#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index abd437d0a8a7..ece0c6bbfcc5 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -51,6 +51,7 @@ struct pstore_info {
51 char *buf; 51 char *buf;
52 size_t bufsize; 52 size_t bufsize;
53 struct mutex read_mutex; /* serialize open/read/close */ 53 struct mutex read_mutex; /* serialize open/read/close */
54 int flags;
54 int (*open)(struct pstore_info *psi); 55 int (*open)(struct pstore_info *psi);
55 int (*close)(struct pstore_info *psi); 56 int (*close)(struct pstore_info *psi);
56 ssize_t (*read)(u64 *id, enum pstore_type_id *type, 57 ssize_t (*read)(u64 *id, enum pstore_type_id *type,
@@ -70,6 +71,8 @@ struct pstore_info {
70 void *data; 71 void *data;
71}; 72};
72 73
74#define PSTORE_FLAGS_FRAGILE 1
75
73#ifdef CONFIG_PSTORE 76#ifdef CONFIG_PSTORE
74extern int pstore_register(struct pstore_info *); 77extern int pstore_register(struct pstore_info *);
75extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); 78extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 8e00f9f6f963..9e7db9e73cc1 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -43,6 +43,7 @@ extern int unregister_reboot_notifier(struct notifier_block *);
43 * Architecture-specific implementations of sys_reboot commands. 43 * Architecture-specific implementations of sys_reboot commands.
44 */ 44 */
45 45
46extern void migrate_to_reboot_cpu(void);
46extern void machine_restart(char *cmd); 47extern void machine_restart(char *cmd);
47extern void machine_halt(void); 48extern void machine_halt(void);
48extern void machine_power_off(void); 49extern void machine_power_off(void);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 768b037dfacb..53f97eb8dbc7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -440,8 +440,6 @@ struct task_cputime {
440 .sum_exec_runtime = 0, \ 440 .sum_exec_runtime = 0, \
441 } 441 }
442 442
443#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)
444
445#ifdef CONFIG_PREEMPT_COUNT 443#ifdef CONFIG_PREEMPT_COUNT
446#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) 444#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED)
447#else 445#else
@@ -932,7 +930,8 @@ struct pipe_inode_info;
932struct uts_namespace; 930struct uts_namespace;
933 931
934struct load_weight { 932struct load_weight {
935 unsigned long weight, inv_weight; 933 unsigned long weight;
934 u32 inv_weight;
936}; 935};
937 936
938struct sched_avg { 937struct sched_avg {
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 45412a6afa69..321301c0a643 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -517,10 +517,6 @@ struct se_node_acl {
517 u32 acl_index; 517 u32 acl_index;
518#define MAX_ACL_TAG_SIZE 64 518#define MAX_ACL_TAG_SIZE 64
519 char acl_tag[MAX_ACL_TAG_SIZE]; 519 char acl_tag[MAX_ACL_TAG_SIZE];
520 u64 num_cmds;
521 u64 read_bytes;
522 u64 write_bytes;
523 spinlock_t stats_lock;
524 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 520 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
525 atomic_t acl_pr_ref_count; 521 atomic_t acl_pr_ref_count;
526 struct se_dev_entry **device_list; 522 struct se_dev_entry **device_list;
@@ -624,6 +620,7 @@ struct se_dev_attrib {
624 u32 unmap_granularity; 620 u32 unmap_granularity;
625 u32 unmap_granularity_alignment; 621 u32 unmap_granularity_alignment;
626 u32 max_write_same_len; 622 u32 max_write_same_len;
623 u32 max_bytes_per_io;
627 struct se_device *da_dev; 624 struct se_device *da_dev;
628 struct config_group da_group; 625 struct config_group da_group;
629}; 626};
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index bcb0912afe7a..f854ca4a1372 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -75,6 +75,7 @@
75#define DRM_VMW_PARAM_FIFO_CAPS 4 75#define DRM_VMW_PARAM_FIFO_CAPS 4
76#define DRM_VMW_PARAM_MAX_FB_SIZE 5 76#define DRM_VMW_PARAM_MAX_FB_SIZE 5
77#define DRM_VMW_PARAM_FIFO_HW_VERSION 6 77#define DRM_VMW_PARAM_FIFO_HW_VERSION 6
78#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
78 79
79/** 80/**
80 * struct drm_vmw_getparam_arg 81 * struct drm_vmw_getparam_arg
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index e1802d6153ae..959d454f76a1 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -679,6 +679,7 @@ enum perf_event_type {
679 * 679 *
680 * { u64 weight; } && PERF_SAMPLE_WEIGHT 680 * { u64 weight; } && PERF_SAMPLE_WEIGHT
681 * { u64 data_src; } && PERF_SAMPLE_DATA_SRC 681 * { u64 data_src; } && PERF_SAMPLE_DATA_SRC
682 * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
682 * }; 683 * };
683 */ 684 */
684 PERF_RECORD_SAMPLE = 9, 685 PERF_RECORD_SAMPLE = 9,
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index 65e12099ef89..ae665ac59c36 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -146,7 +146,7 @@ struct blkif_request_segment_aligned {
146struct blkif_request_rw { 146struct blkif_request_rw {
147 uint8_t nr_segments; /* number of segments */ 147 uint8_t nr_segments; /* number of segments */
148 blkif_vdev_t handle; /* only for read/write requests */ 148 blkif_vdev_t handle; /* only for read/write requests */
149#ifdef CONFIG_X86_64 149#ifndef CONFIG_X86_32
150 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */ 150 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */
151#endif 151#endif
152 uint64_t id; /* private guest value, echoed in resp */ 152 uint64_t id; /* private guest value, echoed in resp */
@@ -163,7 +163,7 @@ struct blkif_request_discard {
163 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */ 163 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */
164#define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ 164#define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */
165 blkif_vdev_t _pad1; /* only for read/write requests */ 165 blkif_vdev_t _pad1; /* only for read/write requests */
166#ifdef CONFIG_X86_64 166#ifndef CONFIG_X86_32
167 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ 167 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/
168#endif 168#endif
169 uint64_t id; /* private guest value, echoed in resp */ 169 uint64_t id; /* private guest value, echoed in resp */
@@ -175,7 +175,7 @@ struct blkif_request_discard {
175struct blkif_request_other { 175struct blkif_request_other {
176 uint8_t _pad1; 176 uint8_t _pad1;
177 blkif_vdev_t _pad2; /* only for read/write requests */ 177 blkif_vdev_t _pad2; /* only for read/write requests */
178#ifdef CONFIG_X86_64 178#ifndef CONFIG_X86_32
179 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ 179 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/
180#endif 180#endif
181 uint64_t id; /* private guest value, echoed in resp */ 181 uint64_t id; /* private guest value, echoed in resp */
@@ -184,7 +184,7 @@ struct blkif_request_other {
184struct blkif_request_indirect { 184struct blkif_request_indirect {
185 uint8_t indirect_op; 185 uint8_t indirect_op;
186 uint16_t nr_segments; 186 uint16_t nr_segments;
187#ifdef CONFIG_X86_64 187#ifndef CONFIG_X86_32
188 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ 188 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */
189#endif 189#endif
190 uint64_t id; 190 uint64_t id;
@@ -192,7 +192,7 @@ struct blkif_request_indirect {
192 blkif_vdev_t handle; 192 blkif_vdev_t handle;
193 uint16_t _pad2; 193 uint16_t _pad2;
194 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; 194 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
195#ifdef CONFIG_X86_64 195#ifndef CONFIG_X86_32
196 uint32_t _pad3; /* make it 64 byte aligned */ 196 uint32_t _pad3; /* make it 64 byte aligned */
197#else 197#else
198 uint64_t _pad3; /* make it 64 byte aligned */ 198 uint64_t _pad3; /* make it 64 byte aligned */
diff --git a/init/Kconfig b/init/Kconfig
index 79383d3aa5dc..4e5d96ab2034 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -809,6 +809,12 @@ config GENERIC_SCHED_CLOCK
809config ARCH_SUPPORTS_NUMA_BALANCING 809config ARCH_SUPPORTS_NUMA_BALANCING
810 bool 810 bool
811 811
812#
813# For architectures that know their GCC __int128 support is sound
814#
815config ARCH_SUPPORTS_INT128
816 bool
817
812# For architectures that (ab)use NUMA to represent different memory regions 818# For architectures that (ab)use NUMA to represent different memory regions
813# all cpu-local but of different latencies, such as SuperH. 819# all cpu-local but of different latencies, such as SuperH.
814# 820#
diff --git a/kernel/Makefile b/kernel/Makefile
index bbaf7d59c1bb..bc010ee272b6 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -137,9 +137,10 @@ $(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE
137############################################################################### 137###############################################################################
138ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y) 138ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y)
139X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509) 139X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509)
140X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += signing_key.x509 140X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += $(objtree)/signing_key.x509
141X509_CERTIFICATES := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \ 141X509_CERTIFICATES-raw := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \
142 $(or $(realpath $(CERT)),$(CERT)))) 142 $(or $(realpath $(CERT)),$(CERT))))
143X509_CERTIFICATES := $(subst $(realpath $(objtree))/,,$(X509_CERTIFICATES-raw))
143 144
144ifeq ($(X509_CERTIFICATES),) 145ifeq ($(X509_CERTIFICATES),)
145$(warning *** No X.509 certificates found ***) 146$(warning *** No X.509 certificates found ***)
@@ -164,9 +165,9 @@ $(obj)/x509_certificate_list: $(X509_CERTIFICATES) $(obj)/.x509.list
164targets += $(obj)/.x509.list 165targets += $(obj)/.x509.list
165$(obj)/.x509.list: 166$(obj)/.x509.list:
166 @echo $(X509_CERTIFICATES) >$@ 167 @echo $(X509_CERTIFICATES) >$@
168endif
167 169
168clean-files := x509_certificate_list .x509.list 170clean-files := x509_certificate_list .x509.list
169endif
170 171
171ifeq ($(CONFIG_MODULE_SIG),y) 172ifeq ($(CONFIG_MODULE_SIG),y)
172############################################################################### 173###############################################################################
diff --git a/kernel/bounds.c b/kernel/bounds.c
index 5253204afdca..9fd4246b04b8 100644
--- a/kernel/bounds.c
+++ b/kernel/bounds.c
@@ -22,6 +22,6 @@ void foo(void)
22#ifdef CONFIG_SMP 22#ifdef CONFIG_SMP
23 DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS)); 23 DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
24#endif 24#endif
25 DEFINE(BLOATED_SPINLOCKS, sizeof(spinlock_t) > sizeof(int)); 25 DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
26 /* End of constants */ 26 /* End of constants */
27} 27}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 72348dc192c1..f5744010a8d2 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1396,6 +1396,8 @@ event_sched_out(struct perf_event *event,
1396 if (event->state != PERF_EVENT_STATE_ACTIVE) 1396 if (event->state != PERF_EVENT_STATE_ACTIVE)
1397 return; 1397 return;
1398 1398
1399 perf_pmu_disable(event->pmu);
1400
1399 event->state = PERF_EVENT_STATE_INACTIVE; 1401 event->state = PERF_EVENT_STATE_INACTIVE;
1400 if (event->pending_disable) { 1402 if (event->pending_disable) {
1401 event->pending_disable = 0; 1403 event->pending_disable = 0;
@@ -1412,6 +1414,8 @@ event_sched_out(struct perf_event *event,
1412 ctx->nr_freq--; 1414 ctx->nr_freq--;
1413 if (event->attr.exclusive || !cpuctx->active_oncpu) 1415 if (event->attr.exclusive || !cpuctx->active_oncpu)
1414 cpuctx->exclusive = 0; 1416 cpuctx->exclusive = 0;
1417
1418 perf_pmu_enable(event->pmu);
1415} 1419}
1416 1420
1417static void 1421static void
@@ -1652,6 +1656,7 @@ event_sched_in(struct perf_event *event,
1652 struct perf_event_context *ctx) 1656 struct perf_event_context *ctx)
1653{ 1657{
1654 u64 tstamp = perf_event_time(event); 1658 u64 tstamp = perf_event_time(event);
1659 int ret = 0;
1655 1660
1656 if (event->state <= PERF_EVENT_STATE_OFF) 1661 if (event->state <= PERF_EVENT_STATE_OFF)
1657 return 0; 1662 return 0;
@@ -1674,10 +1679,13 @@ event_sched_in(struct perf_event *event,
1674 */ 1679 */
1675 smp_wmb(); 1680 smp_wmb();
1676 1681
1682 perf_pmu_disable(event->pmu);
1683
1677 if (event->pmu->add(event, PERF_EF_START)) { 1684 if (event->pmu->add(event, PERF_EF_START)) {
1678 event->state = PERF_EVENT_STATE_INACTIVE; 1685 event->state = PERF_EVENT_STATE_INACTIVE;
1679 event->oncpu = -1; 1686 event->oncpu = -1;
1680 return -EAGAIN; 1687 ret = -EAGAIN;
1688 goto out;
1681 } 1689 }
1682 1690
1683 event->tstamp_running += tstamp - event->tstamp_stopped; 1691 event->tstamp_running += tstamp - event->tstamp_stopped;
@@ -1693,7 +1701,10 @@ event_sched_in(struct perf_event *event,
1693 if (event->attr.exclusive) 1701 if (event->attr.exclusive)
1694 cpuctx->exclusive = 1; 1702 cpuctx->exclusive = 1;
1695 1703
1696 return 0; 1704out:
1705 perf_pmu_enable(event->pmu);
1706
1707 return ret;
1697} 1708}
1698 1709
1699static int 1710static int
@@ -2743,6 +2754,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2743 if (!event_filter_match(event)) 2754 if (!event_filter_match(event))
2744 continue; 2755 continue;
2745 2756
2757 perf_pmu_disable(event->pmu);
2758
2746 hwc = &event->hw; 2759 hwc = &event->hw;
2747 2760
2748 if (hwc->interrupts == MAX_INTERRUPTS) { 2761 if (hwc->interrupts == MAX_INTERRUPTS) {
@@ -2752,7 +2765,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2752 } 2765 }
2753 2766
2754 if (!event->attr.freq || !event->attr.sample_freq) 2767 if (!event->attr.freq || !event->attr.sample_freq)
2755 continue; 2768 goto next;
2756 2769
2757 /* 2770 /*
2758 * stop the event and update event->count 2771 * stop the event and update event->count
@@ -2774,6 +2787,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2774 perf_adjust_period(event, period, delta, false); 2787 perf_adjust_period(event, period, delta, false);
2775 2788
2776 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); 2789 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
2790 next:
2791 perf_pmu_enable(event->pmu);
2777 } 2792 }
2778 2793
2779 perf_pmu_enable(ctx->pmu); 2794 perf_pmu_enable(ctx->pmu);
diff --git a/kernel/fork.c b/kernel/fork.c
index 728d5be9548c..5721f0e3f2da 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -537,6 +537,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
537 spin_lock_init(&mm->page_table_lock); 537 spin_lock_init(&mm->page_table_lock);
538 mm_init_aio(mm); 538 mm_init_aio(mm);
539 mm_init_owner(mm, p); 539 mm_init_owner(mm, p);
540 clear_tlb_flush_pending(mm);
540 541
541 if (likely(!mm_alloc_pgd(mm))) { 542 if (likely(!mm_alloc_pgd(mm))) {
542 mm->def_flags = 0; 543 mm->def_flags = 0;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index d0d8fca54065..9c970167e402 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1680,6 +1680,7 @@ int kernel_kexec(void)
1680 { 1680 {
1681 kexec_in_progress = true; 1681 kexec_in_progress = true;
1682 kernel_restart_prepare(NULL); 1682 kernel_restart_prepare(NULL);
1683 migrate_to_reboot_cpu();
1683 printk(KERN_EMERG "Starting new kernel\n"); 1684 printk(KERN_EMERG "Starting new kernel\n");
1684 machine_shutdown(); 1685 machine_shutdown();
1685 } 1686 }
diff --git a/kernel/reboot.c b/kernel/reboot.c
index f813b3474646..662c83fc16b7 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -104,7 +104,7 @@ int unregister_reboot_notifier(struct notifier_block *nb)
104} 104}
105EXPORT_SYMBOL(unregister_reboot_notifier); 105EXPORT_SYMBOL(unregister_reboot_notifier);
106 106
107static void migrate_to_reboot_cpu(void) 107void migrate_to_reboot_cpu(void)
108{ 108{
109 /* The boot cpu is always logical cpu 0 */ 109 /* The boot cpu is always logical cpu 0 */
110 int cpu = reboot_cpu; 110 int cpu = reboot_cpu;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e85cda20ab2b..a88f4a485c5e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4902,6 +4902,7 @@ DEFINE_PER_CPU(struct sched_domain *, sd_asym);
4902static void update_top_cache_domain(int cpu) 4902static void update_top_cache_domain(int cpu)
4903{ 4903{
4904 struct sched_domain *sd; 4904 struct sched_domain *sd;
4905 struct sched_domain *busy_sd = NULL;
4905 int id = cpu; 4906 int id = cpu;
4906 int size = 1; 4907 int size = 1;
4907 4908
@@ -4909,9 +4910,9 @@ static void update_top_cache_domain(int cpu)
4909 if (sd) { 4910 if (sd) {
4910 id = cpumask_first(sched_domain_span(sd)); 4911 id = cpumask_first(sched_domain_span(sd));
4911 size = cpumask_weight(sched_domain_span(sd)); 4912 size = cpumask_weight(sched_domain_span(sd));
4912 sd = sd->parent; /* sd_busy */ 4913 busy_sd = sd->parent; /* sd_busy */
4913 } 4914 }
4914 rcu_assign_pointer(per_cpu(sd_busy, cpu), sd); 4915 rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
4915 4916
4916 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 4917 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
4917 per_cpu(sd_llc_size, cpu) = size; 4918 per_cpu(sd_llc_size, cpu) = size;
@@ -5112,6 +5113,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
5112 * die on a /0 trap. 5113 * die on a /0 trap.
5113 */ 5114 */
5114 sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span); 5115 sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
5116 sg->sgp->power_orig = sg->sgp->power;
5115 5117
5116 /* 5118 /*
5117 * Make sure the first group of this domain contains the 5119 * Make sure the first group of this domain contains the
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index fd773ade1a31..c7395d97e4cb 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -178,59 +178,61 @@ void sched_init_granularity(void)
178 update_sysctl(); 178 update_sysctl();
179} 179}
180 180
181#if BITS_PER_LONG == 32 181#define WMULT_CONST (~0U)
182# define WMULT_CONST (~0UL)
183#else
184# define WMULT_CONST (1UL << 32)
185#endif
186
187#define WMULT_SHIFT 32 182#define WMULT_SHIFT 32
188 183
189/* 184static void __update_inv_weight(struct load_weight *lw)
190 * Shift right and round: 185{
191 */ 186 unsigned long w;
192#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) 187
188 if (likely(lw->inv_weight))
189 return;
190
191 w = scale_load_down(lw->weight);
192
193 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
194 lw->inv_weight = 1;
195 else if (unlikely(!w))
196 lw->inv_weight = WMULT_CONST;
197 else
198 lw->inv_weight = WMULT_CONST / w;
199}
193 200
194/* 201/*
195 * delta *= weight / lw 202 * delta_exec * weight / lw.weight
203 * OR
204 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
205 *
206 * Either weight := NICE_0_LOAD and lw \e prio_to_wmult[], in which case
207 * we're guaranteed shift stays positive because inv_weight is guaranteed to
208 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
209 *
210 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
211 * weight/lw.weight <= 1, and therefore our shift will also be positive.
196 */ 212 */
197static unsigned long 213static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
198calc_delta_mine(unsigned long delta_exec, unsigned long weight,
199 struct load_weight *lw)
200{ 214{
201 u64 tmp; 215 u64 fact = scale_load_down(weight);
202 216 int shift = WMULT_SHIFT;
203 /*
204 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
205 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
206 * 2^SCHED_LOAD_RESOLUTION.
207 */
208 if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
209 tmp = (u64)delta_exec * scale_load_down(weight);
210 else
211 tmp = (u64)delta_exec;
212 217
213 if (!lw->inv_weight) { 218 __update_inv_weight(lw);
214 unsigned long w = scale_load_down(lw->weight);
215 219
216 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) 220 if (unlikely(fact >> 32)) {
217 lw->inv_weight = 1; 221 while (fact >> 32) {
218 else if (unlikely(!w)) 222 fact >>= 1;
219 lw->inv_weight = WMULT_CONST; 223 shift--;
220 else 224 }
221 lw->inv_weight = WMULT_CONST / w;
222 } 225 }
223 226
224 /* 227 /* hint to use a 32x32->64 mul */
225 * Check whether we'd overflow the 64-bit multiplication: 228 fact = (u64)(u32)fact * lw->inv_weight;
226 */ 229
227 if (unlikely(tmp > WMULT_CONST)) 230 while (fact >> 32) {
228 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight, 231 fact >>= 1;
229 WMULT_SHIFT/2); 232 shift--;
230 else 233 }
231 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
232 234
233 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); 235 return mul_u64_u32_shr(delta_exec, fact, shift);
234} 236}
235 237
236 238
@@ -443,7 +445,7 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
443#endif /* CONFIG_FAIR_GROUP_SCHED */ 445#endif /* CONFIG_FAIR_GROUP_SCHED */
444 446
445static __always_inline 447static __always_inline
446void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec); 448void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
447 449
448/************************************************************** 450/**************************************************************
449 * Scheduling class tree data structure manipulation methods: 451 * Scheduling class tree data structure manipulation methods:
@@ -612,11 +614,10 @@ int sched_proc_update_handler(struct ctl_table *table, int write,
612/* 614/*
613 * delta /= w 615 * delta /= w
614 */ 616 */
615static inline unsigned long 617static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
616calc_delta_fair(unsigned long delta, struct sched_entity *se)
617{ 618{
618 if (unlikely(se->load.weight != NICE_0_LOAD)) 619 if (unlikely(se->load.weight != NICE_0_LOAD))
619 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load); 620 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
620 621
621 return delta; 622 return delta;
622} 623}
@@ -665,7 +666,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
665 update_load_add(&lw, se->load.weight); 666 update_load_add(&lw, se->load.weight);
666 load = &lw; 667 load = &lw;
667 } 668 }
668 slice = calc_delta_mine(slice, se->load.weight, load); 669 slice = __calc_delta(slice, se->load.weight, load);
669 } 670 }
670 return slice; 671 return slice;
671} 672}
@@ -703,47 +704,32 @@ void init_task_runnable_average(struct task_struct *p)
703#endif 704#endif
704 705
705/* 706/*
706 * Update the current task's runtime statistics. Skip current tasks that 707 * Update the current task's runtime statistics.
707 * are not in our scheduling class.
708 */ 708 */
709static inline void
710__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
711 unsigned long delta_exec)
712{
713 unsigned long delta_exec_weighted;
714
715 schedstat_set(curr->statistics.exec_max,
716 max((u64)delta_exec, curr->statistics.exec_max));
717
718 curr->sum_exec_runtime += delta_exec;
719 schedstat_add(cfs_rq, exec_clock, delta_exec);
720 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
721
722 curr->vruntime += delta_exec_weighted;
723 update_min_vruntime(cfs_rq);
724}
725
726static void update_curr(struct cfs_rq *cfs_rq) 709static void update_curr(struct cfs_rq *cfs_rq)
727{ 710{
728 struct sched_entity *curr = cfs_rq->curr; 711 struct sched_entity *curr = cfs_rq->curr;
729 u64 now = rq_clock_task(rq_of(cfs_rq)); 712 u64 now = rq_clock_task(rq_of(cfs_rq));
730 unsigned long delta_exec; 713 u64 delta_exec;
731 714
732 if (unlikely(!curr)) 715 if (unlikely(!curr))
733 return; 716 return;
734 717
735 /* 718 delta_exec = now - curr->exec_start;
736 * Get the amount of time the current task was running 719 if (unlikely((s64)delta_exec <= 0))
737 * since the last time we changed load (this cannot
738 * overflow on 32 bits):
739 */
740 delta_exec = (unsigned long)(now - curr->exec_start);
741 if (!delta_exec)
742 return; 720 return;
743 721
744 __update_curr(cfs_rq, curr, delta_exec);
745 curr->exec_start = now; 722 curr->exec_start = now;
746 723
724 schedstat_set(curr->statistics.exec_max,
725 max(delta_exec, curr->statistics.exec_max));
726
727 curr->sum_exec_runtime += delta_exec;
728 schedstat_add(cfs_rq, exec_clock, delta_exec);
729
730 curr->vruntime += calc_delta_fair(delta_exec, curr);
731 update_min_vruntime(cfs_rq);
732
747 if (entity_is_task(curr)) { 733 if (entity_is_task(curr)) {
748 struct task_struct *curtask = task_of(curr); 734 struct task_struct *curtask = task_of(curr);
749 735
@@ -1752,6 +1738,13 @@ void task_numa_work(struct callback_head *work)
1752 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) 1738 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
1753 continue; 1739 continue;
1754 1740
1741 /*
1742 * Skip inaccessible VMAs to avoid any confusion between
1743 * PROT_NONE and NUMA hinting ptes
1744 */
1745 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
1746 continue;
1747
1755 do { 1748 do {
1756 start = max(start, vma->vm_start); 1749 start = max(start, vma->vm_start);
1757 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); 1750 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
@@ -3015,8 +3008,7 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3015 } 3008 }
3016} 3009}
3017 3010
3018static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, 3011static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3019 unsigned long delta_exec)
3020{ 3012{
3021 /* dock delta_exec before expiring quota (as it could span periods) */ 3013 /* dock delta_exec before expiring quota (as it could span periods) */
3022 cfs_rq->runtime_remaining -= delta_exec; 3014 cfs_rq->runtime_remaining -= delta_exec;
@@ -3034,7 +3026,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
3034} 3026}
3035 3027
3036static __always_inline 3028static __always_inline
3037void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) 3029void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3038{ 3030{
3039 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) 3031 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
3040 return; 3032 return;
@@ -3574,8 +3566,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3574 return rq_clock_task(rq_of(cfs_rq)); 3566 return rq_clock_task(rq_of(cfs_rq));
3575} 3567}
3576 3568
3577static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, 3569static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
3578 unsigned long delta_exec) {}
3579static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 3570static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
3580static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} 3571static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
3581static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 3572static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 7d57275fc396..1c4065575fa2 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -901,6 +901,13 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
901{ 901{
902 struct rq *rq = rq_of_rt_rq(rt_rq); 902 struct rq *rq = rq_of_rt_rq(rt_rq);
903 903
904#ifdef CONFIG_RT_GROUP_SCHED
905 /*
906 * Change rq's cpupri only if rt_rq is the top queue.
907 */
908 if (&rq->rt != rt_rq)
909 return;
910#endif
904 if (rq->online && prio < prev_prio) 911 if (rq->online && prio < prev_prio)
905 cpupri_set(&rq->rd->cpupri, rq->cpu, prio); 912 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
906} 913}
@@ -910,6 +917,13 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
910{ 917{
911 struct rq *rq = rq_of_rt_rq(rt_rq); 918 struct rq *rq = rq_of_rt_rq(rt_rq);
912 919
920#ifdef CONFIG_RT_GROUP_SCHED
921 /*
922 * Change rq's cpupri only if rt_rq is the top queue.
923 */
924 if (&rq->rt != rt_rq)
925 return;
926#endif
913 if (rq->online && rt_rq->highest_prio.curr != prev_prio) 927 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
914 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); 928 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
915} 929}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0e9f9eaade2f..72a0f81dc5a8 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -775,7 +775,7 @@ static int ftrace_profile_init(void)
775 int cpu; 775 int cpu;
776 int ret = 0; 776 int ret = 0;
777 777
778 for_each_online_cpu(cpu) { 778 for_each_possible_cpu(cpu) {
779 ret = ftrace_profile_init_cpu(cpu); 779 ret = ftrace_profile_init_cpu(cpu);
780 if (ret) 780 if (ret)
781 break; 781 break;
diff --git a/kernel/user.c b/kernel/user.c
index a3a0dbfda329..c006131beb77 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -51,9 +51,9 @@ struct user_namespace init_user_ns = {
51 .owner = GLOBAL_ROOT_UID, 51 .owner = GLOBAL_ROOT_UID,
52 .group = GLOBAL_ROOT_GID, 52 .group = GLOBAL_ROOT_GID,
53 .proc_inum = PROC_USER_INIT_INO, 53 .proc_inum = PROC_USER_INIT_INO,
54#ifdef CONFIG_KEYS_KERBEROS_CACHE 54#ifdef CONFIG_PERSISTENT_KEYRINGS
55 .krb_cache_register_sem = 55 .persistent_keyring_register_sem =
56 __RWSEM_INITIALIZER(init_user_ns.krb_cache_register_sem), 56 __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem),
57#endif 57#endif
58}; 58};
59EXPORT_SYMBOL_GPL(init_user_ns); 59EXPORT_SYMBOL_GPL(init_user_ns);
diff --git a/mm/Kconfig b/mm/Kconfig
index eb69f352401d..723bbe04a0b0 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -543,7 +543,7 @@ config ZSWAP
543 543
544config MEM_SOFT_DIRTY 544config MEM_SOFT_DIRTY
545 bool "Track memory changes" 545 bool "Track memory changes"
546 depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY 546 depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS
547 select PROC_PAGE_MONITOR 547 select PROC_PAGE_MONITOR
548 help 548 help
549 This option enables memory changes tracking by introducing a 549 This option enables memory changes tracking by introducing a
diff --git a/mm/compaction.c b/mm/compaction.c
index 805165bcd3dd..f58bcd016f43 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -134,6 +134,10 @@ static void update_pageblock_skip(struct compact_control *cc,
134 bool migrate_scanner) 134 bool migrate_scanner)
135{ 135{
136 struct zone *zone = cc->zone; 136 struct zone *zone = cc->zone;
137
138 if (cc->ignore_skip_hint)
139 return;
140
137 if (!page) 141 if (!page)
138 return; 142 return;
139 143
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 33a5dc492810..7de1bf85f683 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -882,6 +882,10 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
882 ret = 0; 882 ret = 0;
883 goto out_unlock; 883 goto out_unlock;
884 } 884 }
885
886 /* mmap_sem prevents this happening but warn if that changes */
887 WARN_ON(pmd_trans_migrating(pmd));
888
885 if (unlikely(pmd_trans_splitting(pmd))) { 889 if (unlikely(pmd_trans_splitting(pmd))) {
886 /* split huge page running from under us */ 890 /* split huge page running from under us */
887 spin_unlock(src_ptl); 891 spin_unlock(src_ptl);
@@ -1243,6 +1247,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1243 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) 1247 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1244 return ERR_PTR(-EFAULT); 1248 return ERR_PTR(-EFAULT);
1245 1249
1250 /* Full NUMA hinting faults to serialise migration in fault paths */
1251 if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
1252 goto out;
1253
1246 page = pmd_page(*pmd); 1254 page = pmd_page(*pmd);
1247 VM_BUG_ON(!PageHead(page)); 1255 VM_BUG_ON(!PageHead(page));
1248 if (flags & FOLL_TOUCH) { 1256 if (flags & FOLL_TOUCH) {
@@ -1295,6 +1303,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1295 if (unlikely(!pmd_same(pmd, *pmdp))) 1303 if (unlikely(!pmd_same(pmd, *pmdp)))
1296 goto out_unlock; 1304 goto out_unlock;
1297 1305
1306 /*
1307 * If there are potential migrations, wait for completion and retry
1308 * without disrupting NUMA hinting information. Do not relock and
1309 * check_same as the page may no longer be mapped.
1310 */
1311 if (unlikely(pmd_trans_migrating(*pmdp))) {
1312 spin_unlock(ptl);
1313 wait_migrate_huge_page(vma->anon_vma, pmdp);
1314 goto out;
1315 }
1316
1298 page = pmd_page(pmd); 1317 page = pmd_page(pmd);
1299 BUG_ON(is_huge_zero_page(page)); 1318 BUG_ON(is_huge_zero_page(page));
1300 page_nid = page_to_nid(page); 1319 page_nid = page_to_nid(page);
@@ -1323,23 +1342,22 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1323 /* If the page was locked, there are no parallel migrations */ 1342 /* If the page was locked, there are no parallel migrations */
1324 if (page_locked) 1343 if (page_locked)
1325 goto clear_pmdnuma; 1344 goto clear_pmdnuma;
1345 }
1326 1346
1327 /* 1347 /* Migration could have started since the pmd_trans_migrating check */
1328 * Otherwise wait for potential migrations and retry. We do 1348 if (!page_locked) {
1329 * relock and check_same as the page may no longer be mapped.
1330 * As the fault is being retried, do not account for it.
1331 */
1332 spin_unlock(ptl); 1349 spin_unlock(ptl);
1333 wait_on_page_locked(page); 1350 wait_on_page_locked(page);
1334 page_nid = -1; 1351 page_nid = -1;
1335 goto out; 1352 goto out;
1336 } 1353 }
1337 1354
1338 /* Page is misplaced, serialise migrations and parallel THP splits */ 1355 /*
1356 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
1357 * to serialises splits
1358 */
1339 get_page(page); 1359 get_page(page);
1340 spin_unlock(ptl); 1360 spin_unlock(ptl);
1341 if (!page_locked)
1342 lock_page(page);
1343 anon_vma = page_lock_anon_vma_read(page); 1361 anon_vma = page_lock_anon_vma_read(page);
1344 1362
1345 /* Confirm the PMD did not change while page_table_lock was released */ 1363 /* Confirm the PMD did not change while page_table_lock was released */
@@ -1351,6 +1369,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1351 goto out_unlock; 1369 goto out_unlock;
1352 } 1370 }
1353 1371
1372 /* Bail if we fail to protect against THP splits for any reason */
1373 if (unlikely(!anon_vma)) {
1374 put_page(page);
1375 page_nid = -1;
1376 goto clear_pmdnuma;
1377 }
1378
1354 /* 1379 /*
1355 * Migrate the THP to the requested node, returns with page unlocked 1380 * Migrate the THP to the requested node, returns with page unlocked
1356 * and pmd_numa cleared. 1381 * and pmd_numa cleared.
@@ -1517,6 +1542,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1517 ret = 1; 1542 ret = 1;
1518 if (!prot_numa) { 1543 if (!prot_numa) {
1519 entry = pmdp_get_and_clear(mm, addr, pmd); 1544 entry = pmdp_get_and_clear(mm, addr, pmd);
1545 if (pmd_numa(entry))
1546 entry = pmd_mknonnuma(entry);
1520 entry = pmd_modify(entry, newprot); 1547 entry = pmd_modify(entry, newprot);
1521 ret = HPAGE_PMD_NR; 1548 ret = HPAGE_PMD_NR;
1522 BUG_ON(pmd_write(entry)); 1549 BUG_ON(pmd_write(entry));
@@ -1531,7 +1558,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1531 */ 1558 */
1532 if (!is_huge_zero_page(page) && 1559 if (!is_huge_zero_page(page) &&
1533 !pmd_numa(*pmd)) { 1560 !pmd_numa(*pmd)) {
1534 entry = pmdp_get_and_clear(mm, addr, pmd); 1561 entry = *pmd;
1535 entry = pmd_mknuma(entry); 1562 entry = pmd_mknuma(entry);
1536 ret = HPAGE_PMD_NR; 1563 ret = HPAGE_PMD_NR;
1537 } 1564 }
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index b7c171602ba1..db08af92c6fc 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1505,10 +1505,16 @@ static int soft_offline_huge_page(struct page *page, int flags)
1505 if (ret > 0) 1505 if (ret > 0)
1506 ret = -EIO; 1506 ret = -EIO;
1507 } else { 1507 } else {
1508 set_page_hwpoison_huge_page(hpage); 1508 /* overcommit hugetlb page will be freed to buddy */
1509 dequeue_hwpoisoned_huge_page(hpage); 1509 if (PageHuge(page)) {
1510 atomic_long_add(1 << compound_order(hpage), 1510 set_page_hwpoison_huge_page(hpage);
1511 &num_poisoned_pages); 1511 dequeue_hwpoisoned_huge_page(hpage);
1512 atomic_long_add(1 << compound_order(hpage),
1513 &num_poisoned_pages);
1514 } else {
1515 SetPageHWPoison(page);
1516 atomic_long_inc(&num_poisoned_pages);
1517 }
1512 } 1518 }
1513 return ret; 1519 return ret;
1514} 1520}
diff --git a/mm/memory.c b/mm/memory.c
index 5d9025f3b3e1..6768ce9e57d2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4271,7 +4271,7 @@ void copy_user_huge_page(struct page *dst, struct page *src,
4271} 4271}
4272#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 4272#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
4273 4273
4274#if USE_SPLIT_PTE_PTLOCKS && BLOATED_SPINLOCKS 4274#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
4275bool ptlock_alloc(struct page *page) 4275bool ptlock_alloc(struct page *page)
4276{ 4276{
4277 spinlock_t *ptl; 4277 spinlock_t *ptl;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index eca4a3129129..0cd2c4d4e270 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1197,14 +1197,16 @@ static struct page *new_vma_page(struct page *page, unsigned long private, int *
1197 break; 1197 break;
1198 vma = vma->vm_next; 1198 vma = vma->vm_next;
1199 } 1199 }
1200
1201 if (PageHuge(page)) {
1202 if (vma)
1203 return alloc_huge_page_noerr(vma, address, 1);
1204 else
1205 return NULL;
1206 }
1200 /* 1207 /*
1201 * queue_pages_range() confirms that @page belongs to some vma, 1208 * if !vma, alloc_page_vma() will use task or system default policy
1202 * so vma shouldn't be NULL.
1203 */ 1209 */
1204 BUG_ON(!vma);
1205
1206 if (PageHuge(page))
1207 return alloc_huge_page_noerr(vma, address, 1);
1208 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 1210 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1209} 1211}
1210#else 1212#else
@@ -1318,7 +1320,7 @@ static long do_mbind(unsigned long start, unsigned long len,
1318 if (nr_failed && (flags & MPOL_MF_STRICT)) 1320 if (nr_failed && (flags & MPOL_MF_STRICT))
1319 err = -EIO; 1321 err = -EIO;
1320 } else 1322 } else
1321 putback_lru_pages(&pagelist); 1323 putback_movable_pages(&pagelist);
1322 1324
1323 up_write(&mm->mmap_sem); 1325 up_write(&mm->mmap_sem);
1324 mpol_out: 1326 mpol_out:
diff --git a/mm/migrate.c b/mm/migrate.c
index bb940045fe85..9194375b2307 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -36,6 +36,7 @@
36#include <linux/hugetlb_cgroup.h> 36#include <linux/hugetlb_cgroup.h>
37#include <linux/gfp.h> 37#include <linux/gfp.h>
38#include <linux/balloon_compaction.h> 38#include <linux/balloon_compaction.h>
39#include <linux/mmu_notifier.h>
39 40
40#include <asm/tlbflush.h> 41#include <asm/tlbflush.h>
41 42
@@ -316,14 +317,15 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
316 */ 317 */
317int migrate_page_move_mapping(struct address_space *mapping, 318int migrate_page_move_mapping(struct address_space *mapping,
318 struct page *newpage, struct page *page, 319 struct page *newpage, struct page *page,
319 struct buffer_head *head, enum migrate_mode mode) 320 struct buffer_head *head, enum migrate_mode mode,
321 int extra_count)
320{ 322{
321 int expected_count = 0; 323 int expected_count = 1 + extra_count;
322 void **pslot; 324 void **pslot;
323 325
324 if (!mapping) { 326 if (!mapping) {
325 /* Anonymous page without mapping */ 327 /* Anonymous page without mapping */
326 if (page_count(page) != 1) 328 if (page_count(page) != expected_count)
327 return -EAGAIN; 329 return -EAGAIN;
328 return MIGRATEPAGE_SUCCESS; 330 return MIGRATEPAGE_SUCCESS;
329 } 331 }
@@ -333,7 +335,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
333 pslot = radix_tree_lookup_slot(&mapping->page_tree, 335 pslot = radix_tree_lookup_slot(&mapping->page_tree,
334 page_index(page)); 336 page_index(page));
335 337
336 expected_count = 2 + page_has_private(page); 338 expected_count += 1 + page_has_private(page);
337 if (page_count(page) != expected_count || 339 if (page_count(page) != expected_count ||
338 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { 340 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
339 spin_unlock_irq(&mapping->tree_lock); 341 spin_unlock_irq(&mapping->tree_lock);
@@ -583,7 +585,7 @@ int migrate_page(struct address_space *mapping,
583 585
584 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 586 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
585 587
586 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode); 588 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
587 589
588 if (rc != MIGRATEPAGE_SUCCESS) 590 if (rc != MIGRATEPAGE_SUCCESS)
589 return rc; 591 return rc;
@@ -610,7 +612,7 @@ int buffer_migrate_page(struct address_space *mapping,
610 612
611 head = page_buffers(page); 613 head = page_buffers(page);
612 614
613 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode); 615 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
614 616
615 if (rc != MIGRATEPAGE_SUCCESS) 617 if (rc != MIGRATEPAGE_SUCCESS)
616 return rc; 618 return rc;
@@ -1654,6 +1656,18 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
1654 return 1; 1656 return 1;
1655} 1657}
1656 1658
1659bool pmd_trans_migrating(pmd_t pmd)
1660{
1661 struct page *page = pmd_page(pmd);
1662 return PageLocked(page);
1663}
1664
1665void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
1666{
1667 struct page *page = pmd_page(*pmd);
1668 wait_on_page_locked(page);
1669}
1670
1657/* 1671/*
1658 * Attempt to migrate a misplaced page to the specified destination 1672 * Attempt to migrate a misplaced page to the specified destination
1659 * node. Caller is expected to have an elevated reference count on 1673 * node. Caller is expected to have an elevated reference count on
@@ -1716,12 +1730,14 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1716 struct page *page, int node) 1730 struct page *page, int node)
1717{ 1731{
1718 spinlock_t *ptl; 1732 spinlock_t *ptl;
1719 unsigned long haddr = address & HPAGE_PMD_MASK;
1720 pg_data_t *pgdat = NODE_DATA(node); 1733 pg_data_t *pgdat = NODE_DATA(node);
1721 int isolated = 0; 1734 int isolated = 0;
1722 struct page *new_page = NULL; 1735 struct page *new_page = NULL;
1723 struct mem_cgroup *memcg = NULL; 1736 struct mem_cgroup *memcg = NULL;
1724 int page_lru = page_is_file_cache(page); 1737 int page_lru = page_is_file_cache(page);
1738 unsigned long mmun_start = address & HPAGE_PMD_MASK;
1739 unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
1740 pmd_t orig_entry;
1725 1741
1726 /* 1742 /*
1727 * Rate-limit the amount of data that is being migrated to a node. 1743 * Rate-limit the amount of data that is being migrated to a node.
@@ -1744,6 +1760,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1744 goto out_fail; 1760 goto out_fail;
1745 } 1761 }
1746 1762
1763 if (mm_tlb_flush_pending(mm))
1764 flush_tlb_range(vma, mmun_start, mmun_end);
1765
1747 /* Prepare a page as a migration target */ 1766 /* Prepare a page as a migration target */
1748 __set_page_locked(new_page); 1767 __set_page_locked(new_page);
1749 SetPageSwapBacked(new_page); 1768 SetPageSwapBacked(new_page);
@@ -1755,9 +1774,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1755 WARN_ON(PageLRU(new_page)); 1774 WARN_ON(PageLRU(new_page));
1756 1775
1757 /* Recheck the target PMD */ 1776 /* Recheck the target PMD */
1777 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1758 ptl = pmd_lock(mm, pmd); 1778 ptl = pmd_lock(mm, pmd);
1759 if (unlikely(!pmd_same(*pmd, entry))) { 1779 if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
1780fail_putback:
1760 spin_unlock(ptl); 1781 spin_unlock(ptl);
1782 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1761 1783
1762 /* Reverse changes made by migrate_page_copy() */ 1784 /* Reverse changes made by migrate_page_copy() */
1763 if (TestClearPageActive(new_page)) 1785 if (TestClearPageActive(new_page))
@@ -1774,7 +1796,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1774 putback_lru_page(page); 1796 putback_lru_page(page);
1775 mod_zone_page_state(page_zone(page), 1797 mod_zone_page_state(page_zone(page),
1776 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR); 1798 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
1777 goto out_fail; 1799
1800 goto out_unlock;
1778 } 1801 }
1779 1802
1780 /* 1803 /*
@@ -1786,16 +1809,35 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1786 */ 1809 */
1787 mem_cgroup_prepare_migration(page, new_page, &memcg); 1810 mem_cgroup_prepare_migration(page, new_page, &memcg);
1788 1811
1812 orig_entry = *pmd;
1789 entry = mk_pmd(new_page, vma->vm_page_prot); 1813 entry = mk_pmd(new_page, vma->vm_page_prot);
1790 entry = pmd_mknonnuma(entry);
1791 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1792 entry = pmd_mkhuge(entry); 1814 entry = pmd_mkhuge(entry);
1815 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1793 1816
1794 pmdp_clear_flush(vma, haddr, pmd); 1817 /*
1795 set_pmd_at(mm, haddr, pmd, entry); 1818 * Clear the old entry under pagetable lock and establish the new PTE.
1796 page_add_new_anon_rmap(new_page, vma, haddr); 1819 * Any parallel GUP will either observe the old page blocking on the
1820 * page lock, block on the page table lock or observe the new page.
1821 * The SetPageUptodate on the new page and page_add_new_anon_rmap
1822 * guarantee the copy is visible before the pagetable update.
1823 */
1824 flush_cache_range(vma, mmun_start, mmun_end);
1825 page_add_new_anon_rmap(new_page, vma, mmun_start);
1826 pmdp_clear_flush(vma, mmun_start, pmd);
1827 set_pmd_at(mm, mmun_start, pmd, entry);
1828 flush_tlb_range(vma, mmun_start, mmun_end);
1797 update_mmu_cache_pmd(vma, address, &entry); 1829 update_mmu_cache_pmd(vma, address, &entry);
1830
1831 if (page_count(page) != 2) {
1832 set_pmd_at(mm, mmun_start, pmd, orig_entry);
1833 flush_tlb_range(vma, mmun_start, mmun_end);
1834 update_mmu_cache_pmd(vma, address, &entry);
1835 page_remove_rmap(new_page);
1836 goto fail_putback;
1837 }
1838
1798 page_remove_rmap(page); 1839 page_remove_rmap(page);
1840
1799 /* 1841 /*
1800 * Finish the charge transaction under the page table lock to 1842 * Finish the charge transaction under the page table lock to
1801 * prevent split_huge_page() from dividing up the charge 1843 * prevent split_huge_page() from dividing up the charge
@@ -1803,6 +1845,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1803 */ 1845 */
1804 mem_cgroup_end_migration(memcg, page, new_page, true); 1846 mem_cgroup_end_migration(memcg, page, new_page, true);
1805 spin_unlock(ptl); 1847 spin_unlock(ptl);
1848 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1806 1849
1807 unlock_page(new_page); 1850 unlock_page(new_page);
1808 unlock_page(page); 1851 unlock_page(page);
@@ -1820,10 +1863,15 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1820out_fail: 1863out_fail:
1821 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); 1864 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
1822out_dropref: 1865out_dropref:
1823 entry = pmd_mknonnuma(entry); 1866 ptl = pmd_lock(mm, pmd);
1824 set_pmd_at(mm, haddr, pmd, entry); 1867 if (pmd_same(*pmd, entry)) {
1825 update_mmu_cache_pmd(vma, address, &entry); 1868 entry = pmd_mknonnuma(entry);
1869 set_pmd_at(mm, mmun_start, pmd, entry);
1870 update_mmu_cache_pmd(vma, address, &entry);
1871 }
1872 spin_unlock(ptl);
1826 1873
1874out_unlock:
1827 unlock_page(page); 1875 unlock_page(page);
1828 put_page(page); 1876 put_page(page);
1829 return 0; 1877 return 0;
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 26667971c824..bb53a6591aea 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -52,17 +52,21 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
52 pte_t ptent; 52 pte_t ptent;
53 bool updated = false; 53 bool updated = false;
54 54
55 ptent = ptep_modify_prot_start(mm, addr, pte);
56 if (!prot_numa) { 55 if (!prot_numa) {
56 ptent = ptep_modify_prot_start(mm, addr, pte);
57 if (pte_numa(ptent))
58 ptent = pte_mknonnuma(ptent);
57 ptent = pte_modify(ptent, newprot); 59 ptent = pte_modify(ptent, newprot);
58 updated = true; 60 updated = true;
59 } else { 61 } else {
60 struct page *page; 62 struct page *page;
61 63
64 ptent = *pte;
62 page = vm_normal_page(vma, addr, oldpte); 65 page = vm_normal_page(vma, addr, oldpte);
63 if (page) { 66 if (page) {
64 if (!pte_numa(oldpte)) { 67 if (!pte_numa(oldpte)) {
65 ptent = pte_mknuma(ptent); 68 ptent = pte_mknuma(ptent);
69 set_pte_at(mm, addr, pte, ptent);
66 updated = true; 70 updated = true;
67 } 71 }
68 } 72 }
@@ -79,7 +83,10 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
79 83
80 if (updated) 84 if (updated)
81 pages++; 85 pages++;
82 ptep_modify_prot_commit(mm, addr, pte, ptent); 86
87 /* Only !prot_numa always clears the pte */
88 if (!prot_numa)
89 ptep_modify_prot_commit(mm, addr, pte, ptent);
83 } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { 90 } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
84 swp_entry_t entry = pte_to_swp_entry(oldpte); 91 swp_entry_t entry = pte_to_swp_entry(oldpte);
85 92
@@ -181,6 +188,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
181 BUG_ON(addr >= end); 188 BUG_ON(addr >= end);
182 pgd = pgd_offset(mm, addr); 189 pgd = pgd_offset(mm, addr);
183 flush_cache_range(vma, addr, end); 190 flush_cache_range(vma, addr, end);
191 set_tlb_flush_pending(mm);
184 do { 192 do {
185 next = pgd_addr_end(addr, end); 193 next = pgd_addr_end(addr, end);
186 if (pgd_none_or_clear_bad(pgd)) 194 if (pgd_none_or_clear_bad(pgd))
@@ -192,6 +200,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
192 /* Only flush the TLB if we actually modified any entries: */ 200 /* Only flush the TLB if we actually modified any entries: */
193 if (pages) 201 if (pages)
194 flush_tlb_range(vma, start, end); 202 flush_tlb_range(vma, start, end);
203 clear_tlb_flush_pending(mm);
195 204
196 return pages; 205 return pages;
197} 206}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 580a5f075ed0..5248fe070aa4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1816,7 +1816,7 @@ static void zlc_clear_zones_full(struct zonelist *zonelist)
1816 1816
1817static bool zone_local(struct zone *local_zone, struct zone *zone) 1817static bool zone_local(struct zone *local_zone, struct zone *zone)
1818{ 1818{
1819 return node_distance(local_zone->node, zone->node) == LOCAL_DISTANCE; 1819 return local_zone->node == zone->node;
1820} 1820}
1821 1821
1822static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 1822static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
@@ -1913,18 +1913,17 @@ zonelist_scan:
1913 * page was allocated in should have no effect on the 1913 * page was allocated in should have no effect on the
1914 * time the page has in memory before being reclaimed. 1914 * time the page has in memory before being reclaimed.
1915 * 1915 *
1916 * When zone_reclaim_mode is enabled, try to stay in 1916 * Try to stay in local zones in the fastpath. If
1917 * local zones in the fastpath. If that fails, the 1917 * that fails, the slowpath is entered, which will do
1918 * slowpath is entered, which will do another pass 1918 * another pass starting with the local zones, but
1919 * starting with the local zones, but ultimately fall 1919 * ultimately fall back to remote zones that do not
1920 * back to remote zones that do not partake in the 1920 * partake in the fairness round-robin cycle of this
1921 * fairness round-robin cycle of this zonelist. 1921 * zonelist.
1922 */ 1922 */
1923 if (alloc_flags & ALLOC_WMARK_LOW) { 1923 if (alloc_flags & ALLOC_WMARK_LOW) {
1924 if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) 1924 if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
1925 continue; 1925 continue;
1926 if (zone_reclaim_mode && 1926 if (!zone_local(preferred_zone, zone))
1927 !zone_local(preferred_zone, zone))
1928 continue; 1927 continue;
1929 } 1928 }
1930 /* 1929 /*
@@ -2390,7 +2389,7 @@ static void prepare_slowpath(gfp_t gfp_mask, unsigned int order,
2390 * thrash fairness information for zones that are not 2389 * thrash fairness information for zones that are not
2391 * actually part of this zonelist's round-robin cycle. 2390 * actually part of this zonelist's round-robin cycle.
2392 */ 2391 */
2393 if (zone_reclaim_mode && !zone_local(preferred_zone, zone)) 2392 if (!zone_local(preferred_zone, zone))
2394 continue; 2393 continue;
2395 mod_zone_page_state(zone, NR_ALLOC_BATCH, 2394 mod_zone_page_state(zone, NR_ALLOC_BATCH,
2396 high_wmark_pages(zone) - 2395 high_wmark_pages(zone) -
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index cbb38545d9d6..a8b919925934 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -110,9 +110,10 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
110pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, 110pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
111 pte_t *ptep) 111 pte_t *ptep)
112{ 112{
113 struct mm_struct *mm = (vma)->vm_mm;
113 pte_t pte; 114 pte_t pte;
114 pte = ptep_get_and_clear((vma)->vm_mm, address, ptep); 115 pte = ptep_get_and_clear(mm, address, ptep);
115 if (pte_accessible(pte)) 116 if (pte_accessible(mm, pte))
116 flush_tlb_page(vma, address); 117 flush_tlb_page(vma, address);
117 return pte; 118 return pte;
118} 119}
@@ -191,6 +192,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
191void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 192void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
192 pmd_t *pmdp) 193 pmd_t *pmdp)
193{ 194{
195 pmd_t entry = *pmdp;
196 if (pmd_numa(entry))
197 entry = pmd_mknonnuma(entry);
194 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp)); 198 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
195 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 199 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
196} 200}
diff --git a/mm/rmap.c b/mm/rmap.c
index 55c8b8dc9ffb..068522d8502a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -600,7 +600,11 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
600 spinlock_t *ptl; 600 spinlock_t *ptl;
601 601
602 if (unlikely(PageHuge(page))) { 602 if (unlikely(PageHuge(page))) {
603 /* when pud is not present, pte will be NULL */
603 pte = huge_pte_offset(mm, address); 604 pte = huge_pte_offset(mm, address);
605 if (!pte)
606 return NULL;
607
604 ptl = huge_pte_lockptr(page_hstate(page), mm, pte); 608 ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
605 goto check; 609 goto check;
606 } 610 }
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index ca15f32821fb..36b1443f9ae4 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1161,6 +1161,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1161 neigh->parms->reachable_time : 1161 neigh->parms->reachable_time :
1162 0))); 1162 0)));
1163 neigh->nud_state = new; 1163 neigh->nud_state = new;
1164 notify = 1;
1164 } 1165 }
1165 1166
1166 if (lladdr != neigh->ha) { 1167 if (lladdr != neigh->ha) {
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index f13bd91d9a56..a313c3fbeb46 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -423,6 +423,7 @@ static void synproxy_tg4_destroy(const struct xt_tgdtor_param *par)
423static struct xt_target synproxy_tg4_reg __read_mostly = { 423static struct xt_target synproxy_tg4_reg __read_mostly = {
424 .name = "SYNPROXY", 424 .name = "SYNPROXY",
425 .family = NFPROTO_IPV4, 425 .family = NFPROTO_IPV4,
426 .hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD),
426 .target = synproxy_tg4, 427 .target = synproxy_tg4,
427 .targetsize = sizeof(struct xt_synproxy_info), 428 .targetsize = sizeof(struct xt_synproxy_info),
428 .checkentry = synproxy_tg4_check, 429 .checkentry = synproxy_tg4_check,
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c
index fff5ba1a33b7..4a5e94ac314a 100644
--- a/net/ipv4/netfilter/nft_reject_ipv4.c
+++ b/net/ipv4/netfilter/nft_reject_ipv4.c
@@ -72,7 +72,7 @@ static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
72{ 72{
73 const struct nft_reject *priv = nft_expr_priv(expr); 73 const struct nft_reject *priv = nft_expr_priv(expr);
74 74
75 if (nla_put_be32(skb, NFTA_REJECT_TYPE, priv->type)) 75 if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
76 goto nla_put_failure; 76 goto nla_put_failure;
77 77
78 switch (priv->type) { 78 switch (priv->type) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 62c19fdd102d..f140048334ce 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1600,20 +1600,15 @@ static void flush_stack(struct sock **stack, unsigned int count,
1600} 1600}
1601 1601
1602/* For TCP sockets, sk_rx_dst is protected by socket lock 1602/* For TCP sockets, sk_rx_dst is protected by socket lock
1603 * For UDP, we use sk_dst_lock to guard against concurrent changes. 1603 * For UDP, we use xchg() to guard against concurrent changes.
1604 */ 1604 */
1605static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) 1605static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
1606{ 1606{
1607 struct dst_entry *old; 1607 struct dst_entry *old;
1608 1608
1609 spin_lock(&sk->sk_dst_lock); 1609 dst_hold(dst);
1610 old = sk->sk_rx_dst; 1610 old = xchg(&sk->sk_rx_dst, dst);
1611 if (likely(old != dst)) { 1611 dst_release(old);
1612 dst_hold(dst);
1613 sk->sk_rx_dst = dst;
1614 dst_release(old);
1615 }
1616 spin_unlock(&sk->sk_dst_lock);
1617} 1612}
1618 1613
1619/* 1614/*
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index f78f41aca8e9..a0d17270117c 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -446,6 +446,7 @@ static void synproxy_tg6_destroy(const struct xt_tgdtor_param *par)
446static struct xt_target synproxy_tg6_reg __read_mostly = { 446static struct xt_target synproxy_tg6_reg __read_mostly = {
447 .name = "SYNPROXY", 447 .name = "SYNPROXY",
448 .family = NFPROTO_IPV6, 448 .family = NFPROTO_IPV6,
449 .hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD),
449 .target = synproxy_tg6, 450 .target = synproxy_tg6,
450 .targetsize = sizeof(struct xt_synproxy_info), 451 .targetsize = sizeof(struct xt_synproxy_info),
451 .checkentry = synproxy_tg6_check, 452 .checkentry = synproxy_tg6_check,
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
index 53c452efb40b..5e68b94ee640 100644
--- a/net/sctp/probe.c
+++ b/net/sctp/probe.c
@@ -38,6 +38,7 @@
38#include <net/sctp/sctp.h> 38#include <net/sctp/sctp.h>
39#include <net/sctp/sm.h> 39#include <net/sctp/sm.h>
40 40
41MODULE_SOFTDEP("pre: sctp");
41MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>"); 42MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>");
42MODULE_DESCRIPTION("SCTP snooper"); 43MODULE_DESCRIPTION("SCTP snooper");
43MODULE_LICENSE("GPL"); 44MODULE_LICENSE("GPL");
@@ -182,6 +183,20 @@ static struct jprobe sctp_recv_probe = {
182 .entry = jsctp_sf_eat_sack, 183 .entry = jsctp_sf_eat_sack,
183}; 184};
184 185
186static __init int sctp_setup_jprobe(void)
187{
188 int ret = register_jprobe(&sctp_recv_probe);
189
190 if (ret) {
191 if (request_module("sctp"))
192 goto out;
193 ret = register_jprobe(&sctp_recv_probe);
194 }
195
196out:
197 return ret;
198}
199
185static __init int sctpprobe_init(void) 200static __init int sctpprobe_init(void)
186{ 201{
187 int ret = -ENOMEM; 202 int ret = -ENOMEM;
@@ -202,7 +217,7 @@ static __init int sctpprobe_init(void)
202 &sctpprobe_fops)) 217 &sctpprobe_fops))
203 goto free_kfifo; 218 goto free_kfifo;
204 219
205 ret = register_jprobe(&sctp_recv_probe); 220 ret = sctp_setup_jprobe();
206 if (ret) 221 if (ret)
207 goto remove_proc; 222 goto remove_proc;
208 223
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index a0ca162e5bd5..a427623ee574 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -718,7 +718,9 @@ static int unix_autobind(struct socket *sock)
718 int err; 718 int err;
719 unsigned int retries = 0; 719 unsigned int retries = 0;
720 720
721 mutex_lock(&u->readlock); 721 err = mutex_lock_interruptible(&u->readlock);
722 if (err)
723 return err;
722 724
723 err = 0; 725 err = 0;
724 if (u->addr) 726 if (u->addr)
@@ -877,7 +879,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
877 goto out; 879 goto out;
878 addr_len = err; 880 addr_len = err;
879 881
880 mutex_lock(&u->readlock); 882 err = mutex_lock_interruptible(&u->readlock);
883 if (err)
884 goto out;
881 885
882 err = -EINVAL; 886 err = -EINVAL;
883 if (u->addr) 887 if (u->addr)
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 6e03b465e44e..a2104671f51d 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1937,6 +1937,8 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
1937 case SNDRV_PCM_STATE_DISCONNECTED: 1937 case SNDRV_PCM_STATE_DISCONNECTED:
1938 err = -EBADFD; 1938 err = -EBADFD;
1939 goto _endloop; 1939 goto _endloop;
1940 case SNDRV_PCM_STATE_PAUSED:
1941 continue;
1940 } 1942 }
1941 if (!tout) { 1943 if (!tout) {
1942 snd_printd("%s write error (DMA or IRQ trouble?)\n", 1944 snd_printd("%s write error (DMA or IRQ trouble?)\n",
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 27aa14007cbd..956871d8b3d2 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -3433,6 +3433,10 @@ static void check_probe_mask(struct azx *chip, int dev)
3433 * white/black-list for enable_msi 3433 * white/black-list for enable_msi
3434 */ 3434 */
3435static struct snd_pci_quirk msi_black_list[] = { 3435static struct snd_pci_quirk msi_black_list[] = {
3436 SND_PCI_QUIRK(0x103c, 0x2191, "HP", 0), /* AMD Hudson */
3437 SND_PCI_QUIRK(0x103c, 0x2192, "HP", 0), /* AMD Hudson */
3438 SND_PCI_QUIRK(0x103c, 0x21f7, "HP", 0), /* AMD Hudson */
3439 SND_PCI_QUIRK(0x103c, 0x21fa, "HP", 0), /* AMD Hudson */
3436 SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */ 3440 SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
3437 SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */ 3441 SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
3438 SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */ 3442 SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 34de5dc2fe9b..c5646941539a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4247,12 +4247,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4247 SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4247 SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4248 SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4248 SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4249 SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4249 SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4250 SND_PCI_QUIRK(0x1028, 0x0610, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4250 SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4251 SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4251 SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4252 SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4252 SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS), 4253 SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS),
4253 SND_PCI_QUIRK(0x1028, 0x061f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4254 SND_PCI_QUIRK(0x1028, 0x061f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4255 SND_PCI_QUIRK(0x1028, 0x0629, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4254 SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS), 4256 SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS),
4257 SND_PCI_QUIRK(0x1028, 0x063e, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4255 SND_PCI_QUIRK(0x1028, 0x063f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4258 SND_PCI_QUIRK(0x1028, 0x063f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4259 SND_PCI_QUIRK(0x1028, 0x0640, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4256 SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 4260 SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
4257 SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 4261 SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
4258 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 4262 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 8697cedccd21..1ead3c977a51 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -648,7 +648,7 @@ static int atmel_ssc_prepare(struct snd_pcm_substream *substream,
648 648
649 dma_params = ssc_p->dma_params[dir]; 649 dma_params = ssc_p->dma_params[dir];
650 650
651 ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable); 651 ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable);
652 ssc_writel(ssc_p->ssc->regs, IDR, dma_params->mask->ssc_error); 652 ssc_writel(ssc_p->ssc->regs, IDR, dma_params->mask->ssc_error);
653 653
654 pr_debug("%s enabled SSC_SR=0x%08x\n", 654 pr_debug("%s enabled SSC_SR=0x%08x\n",
@@ -657,6 +657,33 @@ static int atmel_ssc_prepare(struct snd_pcm_substream *substream,
657 return 0; 657 return 0;
658} 658}
659 659
660static int atmel_ssc_trigger(struct snd_pcm_substream *substream,
661 int cmd, struct snd_soc_dai *dai)
662{
663 struct atmel_ssc_info *ssc_p = &ssc_info[dai->id];
664 struct atmel_pcm_dma_params *dma_params;
665 int dir;
666
667 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
668 dir = 0;
669 else
670 dir = 1;
671
672 dma_params = ssc_p->dma_params[dir];
673
674 switch (cmd) {
675 case SNDRV_PCM_TRIGGER_START:
676 case SNDRV_PCM_TRIGGER_RESUME:
677 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
678 ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable);
679 break;
680 default:
681 ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable);
682 break;
683 }
684
685 return 0;
686}
660 687
661#ifdef CONFIG_PM 688#ifdef CONFIG_PM
662static int atmel_ssc_suspend(struct snd_soc_dai *cpu_dai) 689static int atmel_ssc_suspend(struct snd_soc_dai *cpu_dai)
@@ -731,6 +758,7 @@ static const struct snd_soc_dai_ops atmel_ssc_dai_ops = {
731 .startup = atmel_ssc_startup, 758 .startup = atmel_ssc_startup,
732 .shutdown = atmel_ssc_shutdown, 759 .shutdown = atmel_ssc_shutdown,
733 .prepare = atmel_ssc_prepare, 760 .prepare = atmel_ssc_prepare,
761 .trigger = atmel_ssc_trigger,
734 .hw_params = atmel_ssc_hw_params, 762 .hw_params = atmel_ssc_hw_params,
735 .set_fmt = atmel_ssc_set_dai_fmt, 763 .set_fmt = atmel_ssc_set_dai_fmt,
736 .set_clkdiv = atmel_ssc_set_dai_clkdiv, 764 .set_clkdiv = atmel_ssc_set_dai_clkdiv,
diff --git a/sound/soc/atmel/sam9x5_wm8731.c b/sound/soc/atmel/sam9x5_wm8731.c
index 1b372283bd01..7d6a9055874b 100644
--- a/sound/soc/atmel/sam9x5_wm8731.c
+++ b/sound/soc/atmel/sam9x5_wm8731.c
@@ -109,7 +109,7 @@ static int sam9x5_wm8731_driver_probe(struct platform_device *pdev)
109 dai->stream_name = "WM8731 PCM"; 109 dai->stream_name = "WM8731 PCM";
110 dai->codec_dai_name = "wm8731-hifi"; 110 dai->codec_dai_name = "wm8731-hifi";
111 dai->init = sam9x5_wm8731_init; 111 dai->init = sam9x5_wm8731_init;
112 dai->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF 112 dai->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF
113 | SND_SOC_DAIFMT_CBM_CFM; 113 | SND_SOC_DAIFMT_CBM_CFM;
114 114
115 ret = snd_soc_of_parse_card_name(card, "atmel,model"); 115 ret = snd_soc_of_parse_card_name(card, "atmel,model");
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 99b359e19d35..0ab2dc296474 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -1012,7 +1012,7 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
1012 { "AEC Loopback", "HPOUT3L", "OUT3L" }, 1012 { "AEC Loopback", "HPOUT3L", "OUT3L" },
1013 { "AEC Loopback", "HPOUT3R", "OUT3R" }, 1013 { "AEC Loopback", "HPOUT3R", "OUT3R" },
1014 { "HPOUT3L", NULL, "OUT3L" }, 1014 { "HPOUT3L", NULL, "OUT3L" },
1015 { "HPOUT3R", NULL, "OUT3L" }, 1015 { "HPOUT3R", NULL, "OUT3R" },
1016 1016
1017 { "AEC Loopback", "SPKOUTL", "OUT4L" }, 1017 { "AEC Loopback", "SPKOUTL", "OUT4L" },
1018 { "SPKOUTLN", NULL, "OUT4L" }, 1018 { "SPKOUTLN", NULL, "OUT4L" },
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 3938fb1c203e..53bbfac6a83a 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -1444,7 +1444,7 @@ static int wm8904_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
1444 1444
1445 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 1445 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
1446 case SND_SOC_DAIFMT_DSP_B: 1446 case SND_SOC_DAIFMT_DSP_B:
1447 aif1 |= WM8904_AIF_LRCLK_INV; 1447 aif1 |= 0x3 | WM8904_AIF_LRCLK_INV;
1448 case SND_SOC_DAIFMT_DSP_A: 1448 case SND_SOC_DAIFMT_DSP_A:
1449 aif1 |= 0x3; 1449 aif1 |= 0x3;
1450 break; 1450 break;
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 543c5c2631b6..0f17ed3e29f4 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2439,7 +2439,20 @@ static void wm8962_configure_bclk(struct snd_soc_codec *codec)
2439 snd_soc_update_bits(codec, WM8962_CLOCKING_4, 2439 snd_soc_update_bits(codec, WM8962_CLOCKING_4,
2440 WM8962_SYSCLK_RATE_MASK, clocking4); 2440 WM8962_SYSCLK_RATE_MASK, clocking4);
2441 2441
2442 /* DSPCLK_DIV can be only generated correctly after enabling SYSCLK.
2443 * So we here provisionally enable it and then disable it afterward
2444 * if current bias_level hasn't reached SND_SOC_BIAS_ON.
2445 */
2446 if (codec->dapm.bias_level != SND_SOC_BIAS_ON)
2447 snd_soc_update_bits(codec, WM8962_CLOCKING2,
2448 WM8962_SYSCLK_ENA_MASK, WM8962_SYSCLK_ENA);
2449
2442 dspclk = snd_soc_read(codec, WM8962_CLOCKING1); 2450 dspclk = snd_soc_read(codec, WM8962_CLOCKING1);
2451
2452 if (codec->dapm.bias_level != SND_SOC_BIAS_ON)
2453 snd_soc_update_bits(codec, WM8962_CLOCKING2,
2454 WM8962_SYSCLK_ENA_MASK, 0);
2455
2443 if (dspclk < 0) { 2456 if (dspclk < 0) {
2444 dev_err(codec->dev, "Failed to read DSPCLK: %d\n", dspclk); 2457 dev_err(codec->dev, "Failed to read DSPCLK: %d\n", dspclk);
2445 return; 2458 return;
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 46ec0e9744d4..4fbcab63e61f 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -1474,13 +1474,17 @@ static int wm_adsp2_ena(struct wm_adsp *dsp)
1474 return ret; 1474 return ret;
1475 1475
1476 /* Wait for the RAM to start, should be near instantaneous */ 1476 /* Wait for the RAM to start, should be near instantaneous */
1477 count = 0; 1477 for (count = 0; count < 10; ++count) {
1478 do {
1479 ret = regmap_read(dsp->regmap, dsp->base + ADSP2_STATUS1, 1478 ret = regmap_read(dsp->regmap, dsp->base + ADSP2_STATUS1,
1480 &val); 1479 &val);
1481 if (ret != 0) 1480 if (ret != 0)
1482 return ret; 1481 return ret;
1483 } while (!(val & ADSP2_RAM_RDY) && ++count < 10); 1482
1483 if (val & ADSP2_RAM_RDY)
1484 break;
1485
1486 msleep(1);
1487 }
1484 1488
1485 if (!(val & ADSP2_RAM_RDY)) { 1489 if (!(val & ADSP2_RAM_RDY)) {
1486 adsp_err(dsp, "Failed to start DSP RAM\n"); 1490 adsp_err(dsp, "Failed to start DSP RAM\n");
diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c
index 61e48852b9e8..3fd76bc391de 100644
--- a/sound/soc/fsl/imx-wm8962.c
+++ b/sound/soc/fsl/imx-wm8962.c
@@ -130,8 +130,6 @@ static int imx_wm8962_set_bias_level(struct snd_soc_card *card,
130 break; 130 break;
131 } 131 }
132 132
133 dapm->bias_level = level;
134
135 return 0; 133 return 0;
136} 134}
137 135
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index 0b18f654b413..3920a5e8125f 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -473,17 +473,17 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk[2] = {
473 .playback = { 473 .playback = {
474 .channels_min = 1, 474 .channels_min = 1,
475 .channels_max = 2, 475 .channels_max = 2,
476 .rates = SNDRV_PCM_RATE_8000_192000 | 476 .rates = SNDRV_PCM_RATE_CONTINUOUS,
477 SNDRV_PCM_RATE_CONTINUOUS | 477 .rate_min = 5512,
478 SNDRV_PCM_RATE_KNOT, 478 .rate_max = 192000,
479 .formats = KIRKWOOD_I2S_FORMATS, 479 .formats = KIRKWOOD_I2S_FORMATS,
480 }, 480 },
481 .capture = { 481 .capture = {
482 .channels_min = 1, 482 .channels_min = 1,
483 .channels_max = 2, 483 .channels_max = 2,
484 .rates = SNDRV_PCM_RATE_8000_192000 | 484 .rates = SNDRV_PCM_RATE_CONTINUOUS,
485 SNDRV_PCM_RATE_CONTINUOUS | 485 .rate_min = 5512,
486 SNDRV_PCM_RATE_KNOT, 486 .rate_max = 192000,
487 .formats = KIRKWOOD_I2S_FORMATS, 487 .formats = KIRKWOOD_I2S_FORMATS,
488 }, 488 },
489 .ops = &kirkwood_i2s_dai_ops, 489 .ops = &kirkwood_i2s_dai_ops,
@@ -494,17 +494,17 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk[2] = {
494 .playback = { 494 .playback = {
495 .channels_min = 1, 495 .channels_min = 1,
496 .channels_max = 2, 496 .channels_max = 2,
497 .rates = SNDRV_PCM_RATE_8000_192000 | 497 .rates = SNDRV_PCM_RATE_CONTINUOUS,
498 SNDRV_PCM_RATE_CONTINUOUS | 498 .rate_min = 5512,
499 SNDRV_PCM_RATE_KNOT, 499 .rate_max = 192000,
500 .formats = KIRKWOOD_SPDIF_FORMATS, 500 .formats = KIRKWOOD_SPDIF_FORMATS,
501 }, 501 },
502 .capture = { 502 .capture = {
503 .channels_min = 1, 503 .channels_min = 1,
504 .channels_max = 2, 504 .channels_max = 2,
505 .rates = SNDRV_PCM_RATE_8000_192000 | 505 .rates = SNDRV_PCM_RATE_CONTINUOUS,
506 SNDRV_PCM_RATE_CONTINUOUS | 506 .rate_min = 5512,
507 SNDRV_PCM_RATE_KNOT, 507 .rate_max = 192000,
508 .formats = KIRKWOOD_SPDIF_FORMATS, 508 .formats = KIRKWOOD_SPDIF_FORMATS,
509 }, 509 },
510 .ops = &kirkwood_i2s_dai_ops, 510 .ops = &kirkwood_i2s_dai_ops,
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index cbc9c96ce1f4..41949af3baae 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -305,6 +305,20 @@ static void dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm,
305 } 305 }
306} 306}
307 307
308static void dmaengine_pcm_release_chan(struct dmaengine_pcm *pcm)
309{
310 unsigned int i;
311
312 for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE;
313 i++) {
314 if (!pcm->chan[i])
315 continue;
316 dma_release_channel(pcm->chan[i]);
317 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
318 break;
319 }
320}
321
308/** 322/**
309 * snd_dmaengine_pcm_register - Register a dmaengine based PCM device 323 * snd_dmaengine_pcm_register - Register a dmaengine based PCM device
310 * @dev: The parent device for the PCM device 324 * @dev: The parent device for the PCM device
@@ -315,6 +329,7 @@ int snd_dmaengine_pcm_register(struct device *dev,
315 const struct snd_dmaengine_pcm_config *config, unsigned int flags) 329 const struct snd_dmaengine_pcm_config *config, unsigned int flags)
316{ 330{
317 struct dmaengine_pcm *pcm; 331 struct dmaengine_pcm *pcm;
332 int ret;
318 333
319 pcm = kzalloc(sizeof(*pcm), GFP_KERNEL); 334 pcm = kzalloc(sizeof(*pcm), GFP_KERNEL);
320 if (!pcm) 335 if (!pcm)
@@ -326,11 +341,20 @@ int snd_dmaengine_pcm_register(struct device *dev,
326 dmaengine_pcm_request_chan_of(pcm, dev); 341 dmaengine_pcm_request_chan_of(pcm, dev);
327 342
328 if (flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE) 343 if (flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
329 return snd_soc_add_platform(dev, &pcm->platform, 344 ret = snd_soc_add_platform(dev, &pcm->platform,
330 &dmaengine_no_residue_pcm_platform); 345 &dmaengine_no_residue_pcm_platform);
331 else 346 else
332 return snd_soc_add_platform(dev, &pcm->platform, 347 ret = snd_soc_add_platform(dev, &pcm->platform,
333 &dmaengine_pcm_platform); 348 &dmaengine_pcm_platform);
349 if (ret)
350 goto err_free_dma;
351
352 return 0;
353
354err_free_dma:
355 dmaengine_pcm_release_chan(pcm);
356 kfree(pcm);
357 return ret;
334} 358}
335EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register); 359EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register);
336 360
@@ -345,7 +369,6 @@ void snd_dmaengine_pcm_unregister(struct device *dev)
345{ 369{
346 struct snd_soc_platform *platform; 370 struct snd_soc_platform *platform;
347 struct dmaengine_pcm *pcm; 371 struct dmaengine_pcm *pcm;
348 unsigned int i;
349 372
350 platform = snd_soc_lookup_platform(dev); 373 platform = snd_soc_lookup_platform(dev);
351 if (!platform) 374 if (!platform)
@@ -353,15 +376,8 @@ void snd_dmaengine_pcm_unregister(struct device *dev)
353 376
354 pcm = soc_platform_to_pcm(platform); 377 pcm = soc_platform_to_pcm(platform);
355 378
356 for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; i++) {
357 if (pcm->chan[i]) {
358 dma_release_channel(pcm->chan[i]);
359 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
360 break;
361 }
362 }
363
364 snd_soc_remove_platform(platform); 379 snd_soc_remove_platform(platform);
380 dmaengine_pcm_release_chan(pcm);
365 kfree(pcm); 381 kfree(pcm);
366} 382}
367EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister); 383EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 11a90cd027fa..891b9a9bcbf8 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -600,12 +600,13 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
600 struct snd_soc_platform *platform = rtd->platform; 600 struct snd_soc_platform *platform = rtd->platform;
601 struct snd_soc_dai *cpu_dai = rtd->cpu_dai; 601 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
602 struct snd_soc_dai *codec_dai = rtd->codec_dai; 602 struct snd_soc_dai *codec_dai = rtd->codec_dai;
603 struct snd_soc_codec *codec = rtd->codec; 603 bool playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
604 604
605 mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass); 605 mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
606 606
607 /* apply codec digital mute */ 607 /* apply codec digital mute */
608 if (!codec->active) 608 if ((playback && codec_dai->playback_active == 1) ||
609 (!playback && codec_dai->capture_active == 1))
609 snd_soc_dai_digital_mute(codec_dai, 1, substream->stream); 610 snd_soc_dai_digital_mute(codec_dai, 1, substream->stream);
610 611
611 /* free any machine hw params */ 612 /* free any machine hw params */
diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c
index 364bf6a907e1..8c819f811470 100644
--- a/sound/soc/tegra/tegra20_i2s.c
+++ b/sound/soc/tegra/tegra20_i2s.c
@@ -74,7 +74,7 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai,
74 unsigned int fmt) 74 unsigned int fmt)
75{ 75{
76 struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(dai); 76 struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(dai);
77 unsigned int mask, val; 77 unsigned int mask = 0, val = 0;
78 78
79 switch (fmt & SND_SOC_DAIFMT_INV_MASK) { 79 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
80 case SND_SOC_DAIFMT_NB_NF: 80 case SND_SOC_DAIFMT_NB_NF:
@@ -83,10 +83,10 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai,
83 return -EINVAL; 83 return -EINVAL;
84 } 84 }
85 85
86 mask = TEGRA20_I2S_CTRL_MASTER_ENABLE; 86 mask |= TEGRA20_I2S_CTRL_MASTER_ENABLE;
87 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { 87 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
88 case SND_SOC_DAIFMT_CBS_CFS: 88 case SND_SOC_DAIFMT_CBS_CFS:
89 val = TEGRA20_I2S_CTRL_MASTER_ENABLE; 89 val |= TEGRA20_I2S_CTRL_MASTER_ENABLE;
90 break; 90 break;
91 case SND_SOC_DAIFMT_CBM_CFM: 91 case SND_SOC_DAIFMT_CBM_CFM:
92 break; 92 break;
diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
index 08bc6931c7c7..8c7c1028e579 100644
--- a/sound/soc/tegra/tegra20_spdif.c
+++ b/sound/soc/tegra/tegra20_spdif.c
@@ -67,15 +67,15 @@ static int tegra20_spdif_hw_params(struct snd_pcm_substream *substream,
67{ 67{
68 struct device *dev = dai->dev; 68 struct device *dev = dai->dev;
69 struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai); 69 struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai);
70 unsigned int mask, val; 70 unsigned int mask = 0, val = 0;
71 int ret, spdifclock; 71 int ret, spdifclock;
72 72
73 mask = TEGRA20_SPDIF_CTRL_PACK | 73 mask |= TEGRA20_SPDIF_CTRL_PACK |
74 TEGRA20_SPDIF_CTRL_BIT_MODE_MASK; 74 TEGRA20_SPDIF_CTRL_BIT_MODE_MASK;
75 switch (params_format(params)) { 75 switch (params_format(params)) {
76 case SNDRV_PCM_FORMAT_S16_LE: 76 case SNDRV_PCM_FORMAT_S16_LE:
77 val = TEGRA20_SPDIF_CTRL_PACK | 77 val |= TEGRA20_SPDIF_CTRL_PACK |
78 TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT; 78 TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT;
79 break; 79 break;
80 default: 80 default:
81 return -EINVAL; 81 return -EINVAL;
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index 231a785b3921..02247fee1cf7 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -118,7 +118,7 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
118 unsigned int fmt) 118 unsigned int fmt)
119{ 119{
120 struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai); 120 struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
121 unsigned int mask, val; 121 unsigned int mask = 0, val = 0;
122 122
123 switch (fmt & SND_SOC_DAIFMT_INV_MASK) { 123 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
124 case SND_SOC_DAIFMT_NB_NF: 124 case SND_SOC_DAIFMT_NB_NF:
@@ -127,10 +127,10 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
127 return -EINVAL; 127 return -EINVAL;
128 } 128 }
129 129
130 mask = TEGRA30_I2S_CTRL_MASTER_ENABLE; 130 mask |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
131 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { 131 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
132 case SND_SOC_DAIFMT_CBS_CFS: 132 case SND_SOC_DAIFMT_CBS_CFS:
133 val = TEGRA30_I2S_CTRL_MASTER_ENABLE; 133 val |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
134 break; 134 break;
135 case SND_SOC_DAIFMT_CBM_CFM: 135 case SND_SOC_DAIFMT_CBM_CFM:
136 break; 136 break;
diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
index dc4de3762111..bcf1d2f0b791 100644
--- a/tools/power/cpupower/utils/cpupower-set.c
+++ b/tools/power/cpupower/utils/cpupower-set.c
@@ -18,9 +18,9 @@
18#include "helpers/bitmask.h" 18#include "helpers/bitmask.h"
19 19
20static struct option set_opts[] = { 20static struct option set_opts[] = {
21 { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'}, 21 { .name = "perf-bias", .has_arg = required_argument, .flag = NULL, .val = 'b'},
22 { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'}, 22 { .name = "sched-mc", .has_arg = required_argument, .flag = NULL, .val = 'm'},
23 { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'}, 23 { .name = "sched-smt", .has_arg = required_argument, .flag = NULL, .val = 's'},
24 { }, 24 { },
25}; 25};
26 26