aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/drm.tmpl12
-rw-r--r--Documentation/debugging-via-ohci1394.txt13
-rw-r--r--Documentation/device-mapper/thin-provisioning.txt5
-rw-r--r--Documentation/email-clients.txt15
-rw-r--r--Documentation/filesystems/proc.txt5
-rw-r--r--Documentation/hwmon/sysfs-interface14
-rw-r--r--Documentation/java.txt8
-rw-r--r--Documentation/virtual/kvm/api.txt2
-rw-r--r--MAINTAINERS11
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/armada-380.dtsi2
-rw-r--r--arch/arm/boot/dts/armada-385.dtsi4
-rw-r--r--arch/arm/boot/dts/at91sam9260.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos4412-trats2.dts2
-rw-r--r--arch/arm/boot/dts/exynos5250-arndale.dts1
-rw-r--r--arch/arm/boot/dts/exynos5420-arndale-octa.dts12
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi24
-rw-r--r--arch/arm/common/bL_switcher.c10
-rw-r--r--arch/arm/configs/exynos_defconfig1
-rw-r--r--arch/arm/include/asm/trusted_foundations.h2
-rw-r--r--arch/arm/include/asm/uaccess.h3
-rw-r--r--arch/arm/kernel/entry-header.S4
-rw-r--r--arch/arm/kernel/unwind.c2
-rw-r--r--arch/arm/mach-at91/at91sam9260_devices.c8
-rw-r--r--arch/arm/mach-exynos/firmware.c15
-rw-r--r--arch/arm/mach-imx/devices/platform-ipu-core.c2
-rw-r--r--arch/arm/mach-mvebu/mvebu-soc-id.c13
-rw-r--r--arch/arm/mach-omap2/board-flash.c2
-rw-r--r--arch/arm/mach-omap2/cclock3xxx_data.c3
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c25
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_54xx_data.c2
-rw-r--r--arch/arm/mm/proc-v7m.S8
-rw-r--r--arch/arm/plat-omap/dma.c10
-rw-r--r--arch/arm64/include/asm/pgtable.h2
-rw-r--r--arch/mips/Makefile2
-rw-r--r--arch/mips/include/asm/cpu-info.h4
-rw-r--r--arch/mips/include/uapi/asm/unistd.h6
-rw-r--r--arch/mips/kernel/branch.c8
-rw-r--r--arch/mips/kernel/ptrace.c14
-rw-r--r--arch/mips/kernel/traps.c4
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_mfgpt.c11
-rw-r--r--arch/mips/mm/page.c4
-rw-r--r--arch/mips/mti-malta/malta-memory.c2
-rw-r--r--arch/mips/pci/pci-rc32434.c1
-rw-r--r--arch/powerpc/Makefile4
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h7
-rw-r--r--arch/powerpc/include/asm/sections.h11
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/kvm.c2
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c2
-rw-r--r--arch/powerpc/kvm/book3s.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S104
-rw-r--r--arch/powerpc/kvm/book3s_pr.c6
-rw-r--r--arch/powerpc/mm/hash_utils_64.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c1
-rw-r--r--arch/x86/include/asm/page_64_types.h2
-rw-r--r--arch/x86/kvm/vmx.c7
-rw-r--r--arch/x86/kvm/x86.c6
-rw-r--r--arch/x86/vdso/vdso32-setup.c3
-rw-r--r--drivers/acpi/thermal.c2
-rw-r--r--drivers/ata/libata-core.c8
-rw-r--r--drivers/block/virtio_blk.c4
-rw-r--r--drivers/clk/clk-divider.c2
-rw-r--r--drivers/clk/st/clkgen-pll.c4
-rw-r--r--drivers/clk/tegra/clk-pll.c64
-rw-r--r--drivers/clocksource/tcb_clksrc.c8
-rw-r--r--drivers/clocksource/timer-marco.c2
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c16
-rw-r--r--drivers/cpufreq/cpufreq_governor.c6
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/dw/core.c11
-rw-r--r--drivers/dma/mv_xor.c8
-rw-r--r--drivers/dma/sa11x0-dma.c4
-rw-r--r--drivers/firewire/core.h4
-rw-r--r--drivers/firewire/ohci.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h30
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c365
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c130
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c11
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c6
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/ntc_thermistor.c15
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c7
-rw-r--r--drivers/input/mouse/Kconfig2
-rw-r--r--drivers/input/mouse/synaptics.c166
-rw-r--r--drivers/input/serio/ambakmi.c3
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/md/dm-cache-target.c2
-rw-r--r--drivers/md/dm-mpath.c14
-rw-r--r--drivers/md/dm-thin.c12
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/net/can/led.c3
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c29
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c5
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.h20
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c1
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c32
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c1
-rw-r--r--drivers/net/team/team.c7
-rw-r--r--drivers/net/usb/ipheth.c10
-rw-r--r--drivers/net/usb/qmi_wwan.c6
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c23
-rw-r--r--drivers/scsi/scsi_transport_sas.c3
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c2
-rw-r--r--drivers/staging/speakup/main.c1
-rw-r--r--drivers/staging/speakup/selection.c52
-rw-r--r--drivers/staging/speakup/speakup.h1
-rw-r--r--drivers/staging/speakup/speakup_acntsa.c8
-rw-r--r--drivers/tty/tty_buffer.c2
-rw-r--r--drivers/usb/core/driver.c9
-rw-r--r--drivers/usb/core/hub.c15
-rw-r--r--drivers/usb/host/pci-quirks.c7
-rw-r--r--drivers/usb/host/xhci-mem.c20
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h5
-rw-r--r--drivers/usb/serial/io_ti.c2
-rw-r--r--drivers/usb/serial/io_usbvend.h2
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--fs/afs/cmservice.c19
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/afs/rxrpc.c86
-rw-r--r--fs/dcache.c153
-rw-r--r--fs/nfsd/nfs4acl.c2
-rw-r--r--fs/nfsd/nfs4state.c15
-rw-r--r--fs/splice.c6
-rw-r--r--include/linux/amba/bus.h1
-rw-r--r--include/linux/dmaengine.h1
-rw-r--r--include/linux/if_team.h1
-rw-r--r--include/linux/netlink.h7
-rw-r--r--include/linux/omap-dma.h2
-rw-r--r--include/net/inetpeer.h1
-rw-r--r--include/uapi/linux/audit.h2
-rw-r--r--include/uapi/linux/usb/Kbuild1
-rw-r--r--include/uapi/linux/usb/cdc-wdm.h2
-rw-r--r--kernel/cpu.c6
-rw-r--r--kernel/futex.c52
-rw-r--r--kernel/kexec.c8
-rw-r--r--kernel/locking/rtmutex.c32
-rw-r--r--kernel/sched/core.c55
-rw-r--r--kernel/sched/cpudeadline.c33
-rw-r--r--kernel/sched/cpudeadline.h6
-rw-r--r--kernel/sched/cpupri.c7
-rw-r--r--kernel/sched/cpupri.h2
-rw-r--r--lib/nlattr.c4
-rw-r--r--net/batman-adv/multicast.c6
-rw-r--r--net/bridge/br_fdb.c8
-rw-r--r--net/bridge/br_input.c4
-rw-r--r--net/bridge/br_private.h7
-rw-r--r--net/bridge/br_vlan.c28
-rw-r--r--net/core/dev.c35
-rw-r--r--net/core/filter.c7
-rw-r--r--net/core/rtnetlink.c10
-rw-r--r--net/ipv4/tcp_input.c11
-rw-r--r--net/ipv6/output_core.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c15
-rw-r--r--net/netlink/af_netlink.c7
-rw-r--r--net/xfrm/xfrm_user.c36
-rw-r--r--sound/core/pcm_dmaengine.c6
-rw-r--r--sound/pci/hda/hda_intel.c3
181 files changed, 1656 insertions, 925 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 677a02553ec0..ba60d93c1855 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -79,7 +79,7 @@
79 <partintro> 79 <partintro>
80 <para> 80 <para>
81 This first part of the DRM Developer's Guide documents core DRM code, 81 This first part of the DRM Developer's Guide documents core DRM code,
82 helper libraries for writting drivers and generic userspace interfaces 82 helper libraries for writing drivers and generic userspace interfaces
83 exposed by DRM drivers. 83 exposed by DRM drivers.
84 </para> 84 </para>
85 </partintro> 85 </partintro>
@@ -459,7 +459,7 @@ char *date;</synopsis>
459 providing a solution to every graphics memory-related problems, GEM 459 providing a solution to every graphics memory-related problems, GEM
460 identified common code between drivers and created a support library to 460 identified common code between drivers and created a support library to
461 share it. GEM has simpler initialization and execution requirements than 461 share it. GEM has simpler initialization and execution requirements than
462 TTM, but has no video RAM management capabitilies and is thus limited to 462 TTM, but has no video RAM management capabilities and is thus limited to
463 UMA devices. 463 UMA devices.
464 </para> 464 </para>
465 <sect2> 465 <sect2>
@@ -889,7 +889,7 @@ int (*prime_fd_to_handle)(struct drm_device *dev,
889 vice versa. Drivers must use the kernel dma-buf buffer sharing framework 889 vice versa. Drivers must use the kernel dma-buf buffer sharing framework
890 to manage the PRIME file descriptors. Similar to the mode setting 890 to manage the PRIME file descriptors. Similar to the mode setting
891 API PRIME is agnostic to the underlying buffer object manager, as 891 API PRIME is agnostic to the underlying buffer object manager, as
892 long as handles are 32bit unsinged integers. 892 long as handles are 32bit unsigned integers.
893 </para> 893 </para>
894 <para> 894 <para>
895 While non-GEM drivers must implement the operations themselves, GEM 895 While non-GEM drivers must implement the operations themselves, GEM
@@ -2356,7 +2356,7 @@ void intel_crt_init(struct drm_device *dev)
2356 first create properties and then create and associate individual instances 2356 first create properties and then create and associate individual instances
2357 of those properties to objects. A property can be instantiated multiple 2357 of those properties to objects. A property can be instantiated multiple
2358 times and associated with different objects. Values are stored in property 2358 times and associated with different objects. Values are stored in property
2359 instances, and all other property information are stored in the propery 2359 instances, and all other property information are stored in the property
2360 and shared between all instances of the property. 2360 and shared between all instances of the property.
2361 </para> 2361 </para>
2362 <para> 2362 <para>
@@ -2697,10 +2697,10 @@ int num_ioctls;</synopsis>
2697 <sect1> 2697 <sect1>
2698 <title>Legacy Support Code</title> 2698 <title>Legacy Support Code</title>
2699 <para> 2699 <para>
2700 The section very brievely covers some of the old legacy support code which 2700 The section very briefly covers some of the old legacy support code which
2701 is only used by old DRM drivers which have done a so-called shadow-attach 2701 is only used by old DRM drivers which have done a so-called shadow-attach
2702 to the underlying device instead of registering as a real driver. This 2702 to the underlying device instead of registering as a real driver. This
2703 also includes some of the old generic buffer mangement and command 2703 also includes some of the old generic buffer management and command
2704 submission code. Do not use any of this in new and modern drivers. 2704 submission code. Do not use any of this in new and modern drivers.
2705 </para> 2705 </para>
2706 2706
diff --git a/Documentation/debugging-via-ohci1394.txt b/Documentation/debugging-via-ohci1394.txt
index fa0151a712f9..5c9a567b3fac 100644
--- a/Documentation/debugging-via-ohci1394.txt
+++ b/Documentation/debugging-via-ohci1394.txt
@@ -25,9 +25,11 @@ using data transfer rates in the order of 10MB/s or more.
25With most FireWire controllers, memory access is limited to the low 4 GB 25With most FireWire controllers, memory access is limited to the low 4 GB
26of physical address space. This can be a problem on IA64 machines where 26of physical address space. This can be a problem on IA64 machines where
27memory is located mostly above that limit, but it is rarely a problem on 27memory is located mostly above that limit, but it is rarely a problem on
28more common hardware such as x86, x86-64 and PowerPC. However, at least 28more common hardware such as x86, x86-64 and PowerPC.
29Agere/LSI FW643e and FW643e2 controllers are known to support access to 29
30physical addresses above 4 GB. 30At least LSI FW643e and FW643e2 controllers are known to support access to
31physical addresses above 4 GB, but this feature is currently not enabled by
32Linux.
31 33
32Together with a early initialization of the OHCI-1394 controller for debugging, 34Together with a early initialization of the OHCI-1394 controller for debugging,
33this facility proved most useful for examining long debugs logs in the printk 35this facility proved most useful for examining long debugs logs in the printk
@@ -101,8 +103,9 @@ Step-by-step instructions for using firescope with early OHCI initialization:
101 compliant, they are based on TI PCILynx chips and require drivers for Win- 103 compliant, they are based on TI PCILynx chips and require drivers for Win-
102 dows operating systems. 104 dows operating systems.
103 105
104 The mentioned kernel log message contains ">4 GB phys DMA" in case of 106 The mentioned kernel log message contains the string "physUB" if the
105 OHCI-1394 controllers which support accesses above this limit. 107 controller implements a writable Physical Upper Bound register. This is
108 required for physical DMA above 4 GB (but not utilized by Linux yet).
106 109
1072) Establish a working FireWire cable connection: 1102) Establish a working FireWire cable connection:
108 111
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
index 05a27e9442bd..2f5173500bd9 100644
--- a/Documentation/device-mapper/thin-provisioning.txt
+++ b/Documentation/device-mapper/thin-provisioning.txt
@@ -309,7 +309,10 @@ ii) Status
309 error_if_no_space|queue_if_no_space 309 error_if_no_space|queue_if_no_space
310 If the pool runs out of data or metadata space, the pool will 310 If the pool runs out of data or metadata space, the pool will
311 either queue or error the IO destined to the data device. The 311 either queue or error the IO destined to the data device. The
312 default is to queue the IO until more space is added. 312 default is to queue the IO until more space is added or the
313 'no_space_timeout' expires. The 'no_space_timeout' dm-thin-pool
314 module parameter can be used to change this timeout -- it
315 defaults to 60 seconds but may be disabled using a value of 0.
313 316
314iii) Messages 317iii) Messages
315 318
diff --git a/Documentation/email-clients.txt b/Documentation/email-clients.txt
index e9f5daccbd02..4e30ebaa9e5b 100644
--- a/Documentation/email-clients.txt
+++ b/Documentation/email-clients.txt
@@ -201,20 +201,15 @@ To beat some sense out of the internal editor, do this:
201 201
202- Edit your Thunderbird config settings so that it won't use format=flowed. 202- Edit your Thunderbird config settings so that it won't use format=flowed.
203 Go to "edit->preferences->advanced->config editor" to bring up the 203 Go to "edit->preferences->advanced->config editor" to bring up the
204 thunderbird's registry editor, and set "mailnews.send_plaintext_flowed" to 204 thunderbird's registry editor.
205 "false".
206 205
207- Disable HTML Format: Set "mail.identity.id1.compose_html" to "false". 206- Set "mailnews.send_plaintext_flowed" to "false"
208 207
209- Enable "preformat" mode: Set "editor.quotesPreformatted" to "true". 208- Set "mailnews.wraplength" from "72" to "0"
210 209
211- Enable UTF8: Set "prefs.converted-to-utf8" to "true". 210- "View" > "Message Body As" > "Plain Text"
212 211
213- Install the "toggle wordwrap" extension. Download the file from: 212- "View" > "Character Encoding" > "Unicode (UTF-8)"
214 https://addons.mozilla.org/thunderbird/addon/2351/
215 Then go to "tools->add ons", select "install" at the bottom of the screen,
216 and browse to where you saved the .xul file. This adds an "Enable
217 Wordwrap" entry under the Options menu of the message composer.
218 213
219~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 214~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
220TkRat (GUI) 215TkRat (GUI)
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 8b9cd8eb3f91..264bcde0c51c 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -1245,8 +1245,9 @@ second). The meanings of the columns are as follows, from left to right:
1245 1245
1246The "intr" line gives counts of interrupts serviced since boot time, for each 1246The "intr" line gives counts of interrupts serviced since boot time, for each
1247of the possible system interrupts. The first column is the total of all 1247of the possible system interrupts. The first column is the total of all
1248interrupts serviced; each subsequent column is the total for that particular 1248interrupts serviced including unnumbered architecture specific interrupts;
1249interrupt. 1249each subsequent column is the total for that particular numbered interrupt.
1250Unnumbered interrupts are not shown, only summed into the total.
1250 1251
1251The "ctxt" line gives the total number of context switches across all CPUs. 1252The "ctxt" line gives the total number of context switches across all CPUs.
1252 1253
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface
index 79f8257dd790..2cc95ad46604 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface
@@ -327,6 +327,13 @@ temp[1-*]_max_hyst
327 from the max value. 327 from the max value.
328 RW 328 RW
329 329
330temp[1-*]_min_hyst
331 Temperature hysteresis value for min limit.
332 Unit: millidegree Celsius
333 Must be reported as an absolute temperature, NOT a delta
334 from the min value.
335 RW
336
330temp[1-*]_input Temperature input value. 337temp[1-*]_input Temperature input value.
331 Unit: millidegree Celsius 338 Unit: millidegree Celsius
332 RO 339 RO
@@ -362,6 +369,13 @@ temp[1-*]_lcrit Temperature critical min value, typically lower than
362 Unit: millidegree Celsius 369 Unit: millidegree Celsius
363 RW 370 RW
364 371
372temp[1-*]_lcrit_hyst
373 Temperature hysteresis value for critical min limit.
374 Unit: millidegree Celsius
375 Must be reported as an absolute temperature, NOT a delta
376 from the critical min value.
377 RW
378
365temp[1-*]_offset 379temp[1-*]_offset
366 Temperature offset which is added to the temperature reading 380 Temperature offset which is added to the temperature reading
367 by the chip. 381 by the chip.
diff --git a/Documentation/java.txt b/Documentation/java.txt
index e6a723281547..418020584ccc 100644
--- a/Documentation/java.txt
+++ b/Documentation/java.txt
@@ -188,6 +188,9 @@ shift
188#define CP_METHODREF 10 188#define CP_METHODREF 10
189#define CP_INTERFACEMETHODREF 11 189#define CP_INTERFACEMETHODREF 11
190#define CP_NAMEANDTYPE 12 190#define CP_NAMEANDTYPE 12
191#define CP_METHODHANDLE 15
192#define CP_METHODTYPE 16
193#define CP_INVOKEDYNAMIC 18
191 194
192/* Define some commonly used error messages */ 195/* Define some commonly used error messages */
193 196
@@ -242,14 +245,19 @@ void skip_constant(FILE *classfile, u_int16_t *cur)
242 break; 245 break;
243 case CP_CLASS: 246 case CP_CLASS:
244 case CP_STRING: 247 case CP_STRING:
248 case CP_METHODTYPE:
245 seekerr = fseek(classfile, 2, SEEK_CUR); 249 seekerr = fseek(classfile, 2, SEEK_CUR);
246 break; 250 break;
251 case CP_METHODHANDLE:
252 seekerr = fseek(classfile, 3, SEEK_CUR);
253 break;
247 case CP_INTEGER: 254 case CP_INTEGER:
248 case CP_FLOAT: 255 case CP_FLOAT:
249 case CP_FIELDREF: 256 case CP_FIELDREF:
250 case CP_METHODREF: 257 case CP_METHODREF:
251 case CP_INTERFACEMETHODREF: 258 case CP_INTERFACEMETHODREF:
252 case CP_NAMEANDTYPE: 259 case CP_NAMEANDTYPE:
260 case CP_INVOKEDYNAMIC:
253 seekerr = fseek(classfile, 4, SEEK_CUR); 261 seekerr = fseek(classfile, 4, SEEK_CUR);
254 break; 262 break;
255 case CP_LONG: 263 case CP_LONG:
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index a9380ba54c8e..b4f53653c106 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2126,7 +2126,7 @@ into the hash PTE second double word).
21264.75 KVM_IRQFD 21264.75 KVM_IRQFD
2127 2127
2128Capability: KVM_CAP_IRQFD 2128Capability: KVM_CAP_IRQFD
2129Architectures: x86 2129Architectures: x86 s390
2130Type: vm ioctl 2130Type: vm ioctl
2131Parameters: struct kvm_irqfd (in) 2131Parameters: struct kvm_irqfd (in)
2132Returns: 0 on success, -1 on error 2132Returns: 0 on success, -1 on error
diff --git a/MAINTAINERS b/MAINTAINERS
index dd33abf44766..a3287090f12c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3158,10 +3158,9 @@ S: Maintained
3158F: drivers/scsi/eata_pio.* 3158F: drivers/scsi/eata_pio.*
3159 3159
3160EBTABLES 3160EBTABLES
3161M: Bart De Schuymer <bart.de.schuymer@pandora.be>
3162L: netfilter-devel@vger.kernel.org 3161L: netfilter-devel@vger.kernel.org
3163W: http://ebtables.sourceforge.net/ 3162W: http://ebtables.sourceforge.net/
3164S: Maintained 3163S: Orphan
3165F: include/linux/netfilter_bridge/ebt_*.h 3164F: include/linux/netfilter_bridge/ebt_*.h
3166F: include/uapi/linux/netfilter_bridge/ebt_*.h 3165F: include/uapi/linux/netfilter_bridge/ebt_*.h
3167F: net/bridge/netfilter/ebt*.c 3166F: net/bridge/netfilter/ebt*.c
@@ -7410,6 +7409,14 @@ F: drivers/rpmsg/
7410F: Documentation/rpmsg.txt 7409F: Documentation/rpmsg.txt
7411F: include/linux/rpmsg.h 7410F: include/linux/rpmsg.h
7412 7411
7412RESET CONTROLLER FRAMEWORK
7413M: Philipp Zabel <p.zabel@pengutronix.de>
7414S: Maintained
7415F: drivers/reset/
7416F: Documentation/devicetree/bindings/reset/
7417F: include/linux/reset.h
7418F: include/linux/reset-controller.h
7419
7413RFKILL 7420RFKILL
7414M: Johannes Berg <johannes@sipsolutions.net> 7421M: Johannes Berg <johannes@sipsolutions.net>
7415L: linux-wireless@vger.kernel.org 7422L: linux-wireless@vger.kernel.org
diff --git a/Makefile b/Makefile
index 9d993787afe0..cdaa5b6a1c4d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 15 2PATCHLEVEL = 15
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc6 4EXTRAVERSION = -rc8
5NAME = Shuffling Zombie Juror 5NAME = Shuffling Zombie Juror
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/armada-380.dtsi b/arch/arm/boot/dts/armada-380.dtsi
index 068031f0f263..6d0f03c98ee9 100644
--- a/arch/arm/boot/dts/armada-380.dtsi
+++ b/arch/arm/boot/dts/armada-380.dtsi
@@ -99,7 +99,7 @@
99 pcie@3,0 { 99 pcie@3,0 {
100 device_type = "pci"; 100 device_type = "pci";
101 assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; 101 assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
102 reg = <0x1000 0 0 0 0>; 102 reg = <0x1800 0 0 0 0>;
103 #address-cells = <3>; 103 #address-cells = <3>;
104 #size-cells = <2>; 104 #size-cells = <2>;
105 #interrupt-cells = <1>; 105 #interrupt-cells = <1>;
diff --git a/arch/arm/boot/dts/armada-385.dtsi b/arch/arm/boot/dts/armada-385.dtsi
index e2919f02e1d4..da801964a257 100644
--- a/arch/arm/boot/dts/armada-385.dtsi
+++ b/arch/arm/boot/dts/armada-385.dtsi
@@ -110,7 +110,7 @@
110 pcie@3,0 { 110 pcie@3,0 {
111 device_type = "pci"; 111 device_type = "pci";
112 assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; 112 assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
113 reg = <0x1000 0 0 0 0>; 113 reg = <0x1800 0 0 0 0>;
114 #address-cells = <3>; 114 #address-cells = <3>;
115 #size-cells = <2>; 115 #size-cells = <2>;
116 #interrupt-cells = <1>; 116 #interrupt-cells = <1>;
@@ -131,7 +131,7 @@
131 pcie@4,0 { 131 pcie@4,0 {
132 device_type = "pci"; 132 device_type = "pci";
133 assigned-addresses = <0x82000800 0 0x48000 0 0x2000>; 133 assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
134 reg = <0x1000 0 0 0 0>; 134 reg = <0x2000 0 0 0 0>;
135 #address-cells = <3>; 135 #address-cells = <3>;
136 #size-cells = <2>; 136 #size-cells = <2>;
137 #interrupt-cells = <1>; 137 #interrupt-cells = <1>;
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index 366fc2cbcd64..c0e0eae16a27 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -641,7 +641,7 @@
641 trigger@3 { 641 trigger@3 {
642 reg = <3>; 642 reg = <3>;
643 trigger-name = "external"; 643 trigger-name = "external";
644 trigger-value = <0x13>; 644 trigger-value = <0xd>;
645 trigger-external; 645 trigger-external;
646 }; 646 };
647 }; 647 };
diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts
index 9583563dd0ef..8a558b7ac999 100644
--- a/arch/arm/boot/dts/exynos4412-trats2.dts
+++ b/arch/arm/boot/dts/exynos4412-trats2.dts
@@ -503,7 +503,7 @@
503 status = "okay"; 503 status = "okay";
504 504
505 ak8975@0c { 505 ak8975@0c {
506 compatible = "ak,ak8975"; 506 compatible = "asahi-kasei,ak8975";
507 reg = <0x0c>; 507 reg = <0x0c>;
508 gpios = <&gpj0 7 0>; 508 gpios = <&gpj0 7 0>;
509 }; 509 };
diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts
index 090f9830b129..cde19c818667 100644
--- a/arch/arm/boot/dts/exynos5250-arndale.dts
+++ b/arch/arm/boot/dts/exynos5250-arndale.dts
@@ -107,6 +107,7 @@
107 regulator-name = "VDD_IOPERI_1.8V"; 107 regulator-name = "VDD_IOPERI_1.8V";
108 regulator-min-microvolt = <1800000>; 108 regulator-min-microvolt = <1800000>;
109 regulator-max-microvolt = <1800000>; 109 regulator-max-microvolt = <1800000>;
110 regulator-always-on;
110 op_mode = <1>; 111 op_mode = <1>;
111 }; 112 };
112 113
diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
index 80a3bf4c5986..896a2a6619e0 100644
--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
@@ -364,16 +364,4 @@
364 gpio-key,wakeup; 364 gpio-key,wakeup;
365 }; 365 };
366 }; 366 };
367
368 amba {
369 mdma1: mdma@11C10000 {
370 /*
371 * MDMA1 can support both secure and non-secure
372 * AXI transactions. When this is enabled in the kernel
373 * for boards that run in secure mode, we are getting
374 * imprecise external aborts causing the kernel to oops.
375 */
376 status = "disabled";
377 };
378 };
379}; 367};
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index c3a9a66c5767..b69fbcb7dcb8 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -219,16 +219,6 @@
219 reg = <0x100440C0 0x20>; 219 reg = <0x100440C0 0x20>;
220 }; 220 };
221 221
222 mau_pd: power-domain@100440E0 {
223 compatible = "samsung,exynos4210-pd";
224 reg = <0x100440E0 0x20>;
225 };
226
227 g2d_pd: power-domain@10044100 {
228 compatible = "samsung,exynos4210-pd";
229 reg = <0x10044100 0x20>;
230 };
231
232 msc_pd: power-domain@10044120 { 222 msc_pd: power-domain@10044120 {
233 compatible = "samsung,exynos4210-pd"; 223 compatible = "samsung,exynos4210-pd";
234 reg = <0x10044120 0x20>; 224 reg = <0x10044120 0x20>;
@@ -336,6 +326,13 @@
336 #dma-cells = <1>; 326 #dma-cells = <1>;
337 #dma-channels = <8>; 327 #dma-channels = <8>;
338 #dma-requests = <1>; 328 #dma-requests = <1>;
329 /*
330 * MDMA1 can support both secure and non-secure
331 * AXI transactions. When this is enabled in the kernel
332 * for boards that run in secure mode, we are getting
333 * imprecise external aborts causing the kernel to oops.
334 */
335 status = "disabled";
339 }; 336 };
340 }; 337 };
341 338
@@ -385,7 +382,7 @@
385 spi_0: spi@12d20000 { 382 spi_0: spi@12d20000 {
386 compatible = "samsung,exynos4210-spi"; 383 compatible = "samsung,exynos4210-spi";
387 reg = <0x12d20000 0x100>; 384 reg = <0x12d20000 0x100>;
388 interrupts = <0 66 0>; 385 interrupts = <0 68 0>;
389 dmas = <&pdma0 5 386 dmas = <&pdma0 5
390 &pdma0 4>; 387 &pdma0 4>;
391 dma-names = "tx", "rx"; 388 dma-names = "tx", "rx";
@@ -401,7 +398,7 @@
401 spi_1: spi@12d30000 { 398 spi_1: spi@12d30000 {
402 compatible = "samsung,exynos4210-spi"; 399 compatible = "samsung,exynos4210-spi";
403 reg = <0x12d30000 0x100>; 400 reg = <0x12d30000 0x100>;
404 interrupts = <0 67 0>; 401 interrupts = <0 69 0>;
405 dmas = <&pdma1 5 402 dmas = <&pdma1 5
406 &pdma1 4>; 403 &pdma1 4>;
407 dma-names = "tx", "rx"; 404 dma-names = "tx", "rx";
@@ -417,7 +414,7 @@
417 spi_2: spi@12d40000 { 414 spi_2: spi@12d40000 {
418 compatible = "samsung,exynos4210-spi"; 415 compatible = "samsung,exynos4210-spi";
419 reg = <0x12d40000 0x100>; 416 reg = <0x12d40000 0x100>;
420 interrupts = <0 68 0>; 417 interrupts = <0 70 0>;
421 dmas = <&pdma0 7 418 dmas = <&pdma0 7
422 &pdma0 6>; 419 &pdma0 6>;
423 dma-names = "tx", "rx"; 420 dma-names = "tx", "rx";
@@ -730,6 +727,5 @@
730 interrupts = <0 112 0>; 727 interrupts = <0 112 0>;
731 clocks = <&clock 471>; 728 clocks = <&clock 471>;
732 clock-names = "secss"; 729 clock-names = "secss";
733 samsung,power-domain = <&g2d_pd>;
734 }; 730 };
735}; 731};
diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c
index f01c0ee0c87e..490f3dced749 100644
--- a/arch/arm/common/bL_switcher.c
+++ b/arch/arm/common/bL_switcher.c
@@ -433,8 +433,12 @@ static void bL_switcher_restore_cpus(void)
433{ 433{
434 int i; 434 int i;
435 435
436 for_each_cpu(i, &bL_switcher_removed_logical_cpus) 436 for_each_cpu(i, &bL_switcher_removed_logical_cpus) {
437 cpu_up(i); 437 struct device *cpu_dev = get_cpu_device(i);
438 int ret = device_online(cpu_dev);
439 if (ret)
440 dev_err(cpu_dev, "switcher: unable to restore CPU\n");
441 }
438} 442}
439 443
440static int bL_switcher_halve_cpus(void) 444static int bL_switcher_halve_cpus(void)
@@ -521,7 +525,7 @@ static int bL_switcher_halve_cpus(void)
521 continue; 525 continue;
522 } 526 }
523 527
524 ret = cpu_down(i); 528 ret = device_offline(get_cpu_device(i));
525 if (ret) { 529 if (ret) {
526 bL_switcher_restore_cpus(); 530 bL_switcher_restore_cpus();
527 return ret; 531 return ret;
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 4ce7b70ea901..e07a227ec0db 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -65,6 +65,7 @@ CONFIG_TCG_TIS_I2C_INFINEON=y
65CONFIG_I2C=y 65CONFIG_I2C=y
66CONFIG_I2C_MUX=y 66CONFIG_I2C_MUX=y
67CONFIG_I2C_ARB_GPIO_CHALLENGE=y 67CONFIG_I2C_ARB_GPIO_CHALLENGE=y
68CONFIG_I2C_EXYNOS5=y
68CONFIG_I2C_S3C2410=y 69CONFIG_I2C_S3C2410=y
69CONFIG_DEBUG_GPIO=y 70CONFIG_DEBUG_GPIO=y
70# CONFIG_HWMON is not set 71# CONFIG_HWMON is not set
diff --git a/arch/arm/include/asm/trusted_foundations.h b/arch/arm/include/asm/trusted_foundations.h
index b5f7705abcb0..624e1d436c6c 100644
--- a/arch/arm/include/asm/trusted_foundations.h
+++ b/arch/arm/include/asm/trusted_foundations.h
@@ -54,7 +54,9 @@ static inline void register_trusted_foundations(
54 */ 54 */
55 pr_err("No support for Trusted Foundations, continuing in degraded mode.\n"); 55 pr_err("No support for Trusted Foundations, continuing in degraded mode.\n");
56 pr_err("Secondary processors as well as CPU PM will be disabled.\n"); 56 pr_err("Secondary processors as well as CPU PM will be disabled.\n");
57#if IS_ENABLED(CONFIG_SMP)
57 setup_max_cpus = 0; 58 setup_max_cpus = 0;
59#endif
58 cpu_idle_poll_ctrl(true); 60 cpu_idle_poll_ctrl(true);
59} 61}
60 62
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 12c3a5decc60..75d95799b6e6 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -171,8 +171,9 @@ extern int __put_user_8(void *, unsigned long long);
171#define __put_user_check(x,p) \ 171#define __put_user_check(x,p) \
172 ({ \ 172 ({ \
173 unsigned long __limit = current_thread_info()->addr_limit - 1; \ 173 unsigned long __limit = current_thread_info()->addr_limit - 1; \
174 const typeof(*(p)) __user *__tmp_p = (p); \
174 register const typeof(*(p)) __r2 asm("r2") = (x); \ 175 register const typeof(*(p)) __r2 asm("r2") = (x); \
175 register const typeof(*(p)) __user *__p asm("r0") = (p);\ 176 register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
176 register unsigned long __l asm("r1") = __limit; \ 177 register unsigned long __l asm("r1") = __limit; \
177 register int __e asm("r0"); \ 178 register int __e asm("r0"); \
178 switch (sizeof(*(__p))) { \ 179 switch (sizeof(*(__p))) { \
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 1420725142ca..efb208de75ec 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -132,6 +132,10 @@
132 orrne r5, V7M_xPSR_FRAMEPTRALIGN 132 orrne r5, V7M_xPSR_FRAMEPTRALIGN
133 biceq r5, V7M_xPSR_FRAMEPTRALIGN 133 biceq r5, V7M_xPSR_FRAMEPTRALIGN
134 134
135 @ ensure bit 0 is cleared in the PC, otherwise behaviour is
136 @ unpredictable
137 bic r4, #1
138
135 @ write basic exception frame 139 @ write basic exception frame
136 stmdb r2!, {r1, r3-r5} 140 stmdb r2!, {r1, r3-r5}
137 ldmia sp, {r1, r3-r5} 141 ldmia sp, {r1, r3-r5}
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 3c217694ebec..cb791ac6a003 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -285,7 +285,7 @@ static int unwind_exec_pop_r4_to_rN(struct unwind_ctrl_block *ctrl,
285 if (unwind_pop_register(ctrl, &vsp, reg)) 285 if (unwind_pop_register(ctrl, &vsp, reg))
286 return -URC_FAILURE; 286 return -URC_FAILURE;
287 287
288 if (insn & 0x80) 288 if (insn & 0x8)
289 if (unwind_pop_register(ctrl, &vsp, 14)) 289 if (unwind_pop_register(ctrl, &vsp, 14))
290 return -URC_FAILURE; 290 return -URC_FAILURE;
291 291
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
index a0282928e9c1..7cd6f19945ed 100644
--- a/arch/arm/mach-at91/at91sam9260_devices.c
+++ b/arch/arm/mach-at91/at91sam9260_devices.c
@@ -1308,19 +1308,19 @@ static struct platform_device at91_adc_device = {
1308static struct at91_adc_trigger at91_adc_triggers[] = { 1308static struct at91_adc_trigger at91_adc_triggers[] = {
1309 [0] = { 1309 [0] = {
1310 .name = "timer-counter-0", 1310 .name = "timer-counter-0",
1311 .value = AT91_ADC_TRGSEL_TC0 | AT91_ADC_TRGEN, 1311 .value = 0x1,
1312 }, 1312 },
1313 [1] = { 1313 [1] = {
1314 .name = "timer-counter-1", 1314 .name = "timer-counter-1",
1315 .value = AT91_ADC_TRGSEL_TC1 | AT91_ADC_TRGEN, 1315 .value = 0x3,
1316 }, 1316 },
1317 [2] = { 1317 [2] = {
1318 .name = "timer-counter-2", 1318 .name = "timer-counter-2",
1319 .value = AT91_ADC_TRGSEL_TC2 | AT91_ADC_TRGEN, 1319 .value = 0x5,
1320 }, 1320 },
1321 [3] = { 1321 [3] = {
1322 .name = "external", 1322 .name = "external",
1323 .value = AT91_ADC_TRGSEL_EXTERNAL | AT91_ADC_TRGEN, 1323 .value = 0xd,
1324 .is_external = true, 1324 .is_external = true,
1325 }, 1325 },
1326}; 1326};
diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c
index 932129ef26c6..aa01c4222b40 100644
--- a/arch/arm/mach-exynos/firmware.c
+++ b/arch/arm/mach-exynos/firmware.c
@@ -18,6 +18,8 @@
18 18
19#include <mach/map.h> 19#include <mach/map.h>
20 20
21#include <plat/cpu.h>
22
21#include "smc.h" 23#include "smc.h"
22 24
23static int exynos_do_idle(void) 25static int exynos_do_idle(void)
@@ -28,13 +30,24 @@ static int exynos_do_idle(void)
28 30
29static int exynos_cpu_boot(int cpu) 31static int exynos_cpu_boot(int cpu)
30{ 32{
33 /*
34 * The second parameter of SMC_CMD_CPU1BOOT command means CPU id.
35 * But, Exynos4212 has only one secondary CPU so second parameter
36 * isn't used for informing secure firmware about CPU id.
37 */
38 if (soc_is_exynos4212())
39 cpu = 0;
40
31 exynos_smc(SMC_CMD_CPU1BOOT, cpu, 0, 0); 41 exynos_smc(SMC_CMD_CPU1BOOT, cpu, 0, 0);
32 return 0; 42 return 0;
33} 43}
34 44
35static int exynos_set_cpu_boot_addr(int cpu, unsigned long boot_addr) 45static int exynos_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
36{ 46{
37 void __iomem *boot_reg = S5P_VA_SYSRAM_NS + 0x1c + 4*cpu; 47 void __iomem *boot_reg = S5P_VA_SYSRAM_NS + 0x1c;
48
49 if (!soc_is_exynos4212())
50 boot_reg += 4*cpu;
38 51
39 __raw_writel(boot_addr, boot_reg); 52 __raw_writel(boot_addr, boot_reg);
40 return 0; 53 return 0;
diff --git a/arch/arm/mach-imx/devices/platform-ipu-core.c b/arch/arm/mach-imx/devices/platform-ipu-core.c
index fc4dd7cedc11..6bd7c3f37ac0 100644
--- a/arch/arm/mach-imx/devices/platform-ipu-core.c
+++ b/arch/arm/mach-imx/devices/platform-ipu-core.c
@@ -77,7 +77,7 @@ struct platform_device *__init imx_alloc_mx3_camera(
77 77
78 pdev = platform_device_alloc("mx3-camera", 0); 78 pdev = platform_device_alloc("mx3-camera", 0);
79 if (!pdev) 79 if (!pdev)
80 goto err; 80 return ERR_PTR(-ENOMEM);
81 81
82 pdev->dev.dma_mask = kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL); 82 pdev->dev.dma_mask = kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
83 if (!pdev->dev.dma_mask) 83 if (!pdev->dev.dma_mask)
diff --git a/arch/arm/mach-mvebu/mvebu-soc-id.c b/arch/arm/mach-mvebu/mvebu-soc-id.c
index f3d4cf53f746..09520e19b78e 100644
--- a/arch/arm/mach-mvebu/mvebu-soc-id.c
+++ b/arch/arm/mach-mvebu/mvebu-soc-id.c
@@ -108,7 +108,18 @@ static int __init mvebu_soc_id_init(void)
108 iounmap(pci_base); 108 iounmap(pci_base);
109 109
110res_ioremap: 110res_ioremap:
111 clk_disable_unprepare(clk); 111 /*
112 * If the PCIe unit is actually enabled and we have PCI
113 * support in the kernel, we intentionally do not release the
114 * reference to the clock. We want to keep it running since
115 * the bootloader does some PCIe link configuration that the
116 * kernel is for now unable to do, and gating the clock would
117 * make us loose this precious configuration.
118 */
119 if (!of_device_is_available(child) || !IS_ENABLED(CONFIG_PCI_MVEBU)) {
120 clk_disable_unprepare(clk);
121 clk_put(clk);
122 }
112 123
113clk_err: 124clk_err:
114 of_node_put(child); 125 of_node_put(child);
diff --git a/arch/arm/mach-omap2/board-flash.c b/arch/arm/mach-omap2/board-flash.c
index ac82512b9c8c..b6885e42c0a0 100644
--- a/arch/arm/mach-omap2/board-flash.c
+++ b/arch/arm/mach-omap2/board-flash.c
@@ -142,7 +142,7 @@ __init board_nand_init(struct mtd_partition *nand_parts, u8 nr_parts, u8 cs,
142 board_nand_data.nr_parts = nr_parts; 142 board_nand_data.nr_parts = nr_parts;
143 board_nand_data.devsize = nand_type; 143 board_nand_data.devsize = nand_type;
144 144
145 board_nand_data.ecc_opt = OMAP_ECC_BCH8_CODE_HW; 145 board_nand_data.ecc_opt = OMAP_ECC_HAM1_CODE_HW;
146 gpmc_nand_init(&board_nand_data, gpmc_t); 146 gpmc_nand_init(&board_nand_data, gpmc_t);
147} 147}
148#endif /* CONFIG_MTD_NAND_OMAP2 || CONFIG_MTD_NAND_OMAP2_MODULE */ 148#endif /* CONFIG_MTD_NAND_OMAP2 || CONFIG_MTD_NAND_OMAP2_MODULE */
diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c
index 8f5121b89688..eb8c75ec3b1a 100644
--- a/arch/arm/mach-omap2/cclock3xxx_data.c
+++ b/arch/arm/mach-omap2/cclock3xxx_data.c
@@ -456,7 +456,8 @@ static struct clk_hw_omap dpll4_m5x2_ck_hw = {
456 .clkdm_name = "dpll4_clkdm", 456 .clkdm_name = "dpll4_clkdm",
457}; 457};
458 458
459DEFINE_STRUCT_CLK(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names, dpll4_m5x2_ck_ops); 459DEFINE_STRUCT_CLK_FLAGS(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names,
460 dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT);
460 461
461static struct clk dpll4_m5x2_ck_3630 = { 462static struct clk dpll4_m5x2_ck_3630 = {
462 .name = "dpll4_m5x2_ck", 463 .name = "dpll4_m5x2_ck",
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index 01fc710c8181..2498ab025fa2 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -14,6 +14,7 @@
14#include <linux/cpuidle.h> 14#include <linux/cpuidle.h>
15#include <linux/cpu_pm.h> 15#include <linux/cpu_pm.h>
16#include <linux/export.h> 16#include <linux/export.h>
17#include <linux/clockchips.h>
17 18
18#include <asm/cpuidle.h> 19#include <asm/cpuidle.h>
19#include <asm/proc-fns.h> 20#include <asm/proc-fns.h>
@@ -83,6 +84,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
83{ 84{
84 struct idle_statedata *cx = state_ptr + index; 85 struct idle_statedata *cx = state_ptr + index;
85 u32 mpuss_can_lose_context = 0; 86 u32 mpuss_can_lose_context = 0;
87 int cpu_id = smp_processor_id();
86 88
87 /* 89 /*
88 * CPU0 has to wait and stay ON until CPU1 is OFF state. 90 * CPU0 has to wait and stay ON until CPU1 is OFF state.
@@ -110,6 +112,8 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
110 mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && 112 mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
111 (cx->mpu_logic_state == PWRDM_POWER_OFF); 113 (cx->mpu_logic_state == PWRDM_POWER_OFF);
112 114
115 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
116
113 /* 117 /*
114 * Call idle CPU PM enter notifier chain so that 118 * Call idle CPU PM enter notifier chain so that
115 * VFP and per CPU interrupt context is saved. 119 * VFP and per CPU interrupt context is saved.
@@ -165,6 +169,8 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
165 if (dev->cpu == 0 && mpuss_can_lose_context) 169 if (dev->cpu == 0 && mpuss_can_lose_context)
166 cpu_cluster_pm_exit(); 170 cpu_cluster_pm_exit();
167 171
172 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
173
168fail: 174fail:
169 cpuidle_coupled_parallel_barrier(dev, &abort_barrier); 175 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
170 cpu_done[dev->cpu] = false; 176 cpu_done[dev->cpu] = false;
@@ -172,6 +178,16 @@ fail:
172 return index; 178 return index;
173} 179}
174 180
181/*
182 * For each cpu, setup the broadcast timer because local timers
183 * stops for the states above C1.
184 */
185static void omap_setup_broadcast_timer(void *arg)
186{
187 int cpu = smp_processor_id();
188 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
189}
190
175static struct cpuidle_driver omap4_idle_driver = { 191static struct cpuidle_driver omap4_idle_driver = {
176 .name = "omap4_idle", 192 .name = "omap4_idle",
177 .owner = THIS_MODULE, 193 .owner = THIS_MODULE,
@@ -189,8 +205,7 @@ static struct cpuidle_driver omap4_idle_driver = {
189 /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */ 205 /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
190 .exit_latency = 328 + 440, 206 .exit_latency = 328 + 440,
191 .target_residency = 960, 207 .target_residency = 960,
192 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED | 208 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
193 CPUIDLE_FLAG_TIMER_STOP,
194 .enter = omap_enter_idle_coupled, 209 .enter = omap_enter_idle_coupled,
195 .name = "C2", 210 .name = "C2",
196 .desc = "CPUx OFF, MPUSS CSWR", 211 .desc = "CPUx OFF, MPUSS CSWR",
@@ -199,8 +214,7 @@ static struct cpuidle_driver omap4_idle_driver = {
199 /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */ 214 /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
200 .exit_latency = 460 + 518, 215 .exit_latency = 460 + 518,
201 .target_residency = 1100, 216 .target_residency = 1100,
202 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED | 217 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
203 CPUIDLE_FLAG_TIMER_STOP,
204 .enter = omap_enter_idle_coupled, 218 .enter = omap_enter_idle_coupled,
205 .name = "C3", 219 .name = "C3",
206 .desc = "CPUx OFF, MPUSS OSWR", 220 .desc = "CPUx OFF, MPUSS OSWR",
@@ -231,5 +245,8 @@ int __init omap4_idle_init(void)
231 if (!cpu_clkdm[0] || !cpu_clkdm[1]) 245 if (!cpu_clkdm[0] || !cpu_clkdm[1])
232 return -ENODEV; 246 return -ENODEV;
233 247
248 /* Configure the broadcast timer on each cpu */
249 on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
250
234 return cpuidle_register(&omap4_idle_driver, cpu_online_mask); 251 return cpuidle_register(&omap4_idle_driver, cpu_online_mask);
235} 252}
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index 892317294fdc..e829664e6a6c 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -895,7 +895,7 @@ static struct omap_hwmod omap54xx_mcpdm_hwmod = {
895 * current exception. 895 * current exception.
896 */ 896 */
897 897
898 .flags = HWMOD_EXT_OPT_MAIN_CLK, 898 .flags = HWMOD_EXT_OPT_MAIN_CLK | HWMOD_SWSUP_SIDLE,
899 .main_clk = "pad_clks_ck", 899 .main_clk = "pad_clks_ck",
900 .prcm = { 900 .prcm = {
901 .omap4 = { 901 .omap4 = {
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index 0c93588fcb91..1ca37c72f12f 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -123,6 +123,11 @@ __v7m_setup:
123 mov pc, lr 123 mov pc, lr
124ENDPROC(__v7m_setup) 124ENDPROC(__v7m_setup)
125 125
126 .align 2
127__v7m_setup_stack:
128 .space 4 * 8 @ 8 registers
129__v7m_setup_stack_top:
130
126 define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 131 define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
127 132
128 .section ".rodata" 133 .section ".rodata"
@@ -152,6 +157,3 @@ __v7m_proc_info:
152 .long nop_cache_fns @ proc_info_list.cache 157 .long nop_cache_fns @ proc_info_list.cache
153 .size __v7m_proc_info, . - __v7m_proc_info 158 .size __v7m_proc_info, . - __v7m_proc_info
154 159
155__v7m_setup_stack:
156 .space 4 * 8 @ 8 registers
157__v7m_setup_stack_top:
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 5f5b975887fc..b5608b1f9fbd 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -70,6 +70,7 @@ static u32 errata;
70 70
71static struct omap_dma_global_context_registers { 71static struct omap_dma_global_context_registers {
72 u32 dma_irqenable_l0; 72 u32 dma_irqenable_l0;
73 u32 dma_irqenable_l1;
73 u32 dma_ocp_sysconfig; 74 u32 dma_ocp_sysconfig;
74 u32 dma_gcr; 75 u32 dma_gcr;
75} omap_dma_global_context; 76} omap_dma_global_context;
@@ -1973,10 +1974,17 @@ static struct irqaction omap24xx_dma_irq;
1973 1974
1974/*----------------------------------------------------------------------------*/ 1975/*----------------------------------------------------------------------------*/
1975 1976
1977/*
1978 * Note that we are currently using only IRQENABLE_L0 and L1.
1979 * As the DSP may be using IRQENABLE_L2 and L3, let's not
1980 * touch those for now.
1981 */
1976void omap_dma_global_context_save(void) 1982void omap_dma_global_context_save(void)
1977{ 1983{
1978 omap_dma_global_context.dma_irqenable_l0 = 1984 omap_dma_global_context.dma_irqenable_l0 =
1979 p->dma_read(IRQENABLE_L0, 0); 1985 p->dma_read(IRQENABLE_L0, 0);
1986 omap_dma_global_context.dma_irqenable_l1 =
1987 p->dma_read(IRQENABLE_L1, 0);
1980 omap_dma_global_context.dma_ocp_sysconfig = 1988 omap_dma_global_context.dma_ocp_sysconfig =
1981 p->dma_read(OCP_SYSCONFIG, 0); 1989 p->dma_read(OCP_SYSCONFIG, 0);
1982 omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0); 1990 omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0);
@@ -1991,6 +1999,8 @@ void omap_dma_global_context_restore(void)
1991 OCP_SYSCONFIG, 0); 1999 OCP_SYSCONFIG, 0);
1992 p->dma_write(omap_dma_global_context.dma_irqenable_l0, 2000 p->dma_write(omap_dma_global_context.dma_irqenable_l0,
1993 IRQENABLE_L0, 0); 2001 IRQENABLE_L0, 0);
2002 p->dma_write(omap_dma_global_context.dma_irqenable_l1,
2003 IRQENABLE_L1, 0);
1994 2004
1995 if (IS_DMA_ERRATA(DMA_ROMCODE_BUG)) 2005 if (IS_DMA_ERRATA(DMA_ROMCODE_BUG))
1996 p->dma_write(0x3 , IRQSTATUS_L0, 0); 2006 p->dma_write(0x3 , IRQSTATUS_L0, 0);
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 90c811f05a2e..7b1c67a0b485 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -266,7 +266,7 @@ static inline pmd_t pte_pmd(pte_t pte)
266 266
267#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) 267#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
268 268
269#define set_pmd_at(mm, addr, pmdp, pmd) set_pmd(pmdp, pmd) 269#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
270 270
271static inline int has_transparent_hugepage(void) 271static inline int has_transparent_hugepage(void)
272{ 272{
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 1a5b4032cb66..60a359cfa328 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -151,7 +151,7 @@ cflags-$(CONFIG_CPU_NEVADA) += $(call cc-option,-march=rm5200,-march=r5000) \
151 -Wa,--trap 151 -Wa,--trap
152cflags-$(CONFIG_CPU_RM7000) += $(call cc-option,-march=rm7000,-march=r5000) \ 152cflags-$(CONFIG_CPU_RM7000) += $(call cc-option,-march=rm7000,-march=r5000) \
153 -Wa,--trap 153 -Wa,--trap
154cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1,-march=r5000) \ 154cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1 -mno-mdmx -mno-mips3d,-march=r5000) \
155 -Wa,--trap 155 -Wa,--trap
156cflags-$(CONFIG_CPU_R8000) += -march=r8000 -Wa,--trap 156cflags-$(CONFIG_CPU_R8000) += -march=r8000 -Wa,--trap
157cflags-$(CONFIG_CPU_R10000) += $(call cc-option,-march=r10000,-march=r8000) \ 157cflags-$(CONFIG_CPU_R10000) += $(call cc-option,-march=r10000,-march=r8000) \
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index dc2135be2a3a..ff2707ab3295 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -39,14 +39,14 @@ struct cache_desc {
39#define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */ 39#define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */
40 40
41struct cpuinfo_mips { 41struct cpuinfo_mips {
42 unsigned int udelay_val; 42 unsigned long asid_cache;
43 unsigned int asid_cache;
44 43
45 /* 44 /*
46 * Capability and feature descriptor structure for MIPS CPU 45 * Capability and feature descriptor structure for MIPS CPU
47 */ 46 */
48 unsigned long options; 47 unsigned long options;
49 unsigned long ases; 48 unsigned long ases;
49 unsigned int udelay_val;
50 unsigned int processor_id; 50 unsigned int processor_id;
51 unsigned int fpu_id; 51 unsigned int fpu_id;
52 unsigned int msa_id; 52 unsigned int msa_id;
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 2692abb28e36..5805414777e0 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -381,7 +381,7 @@
381#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 381#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
382 382
383#define __NR_O32_Linux 4000 383#define __NR_O32_Linux 4000
384#define __NR_O32_Linux_syscalls 350 384#define __NR_O32_Linux_syscalls 351
385 385
386#if _MIPS_SIM == _MIPS_SIM_ABI64 386#if _MIPS_SIM == _MIPS_SIM_ABI64
387 387
@@ -710,7 +710,7 @@
710#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 710#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
711 711
712#define __NR_64_Linux 5000 712#define __NR_64_Linux 5000
713#define __NR_64_Linux_syscalls 310 713#define __NR_64_Linux_syscalls 311
714 714
715#if _MIPS_SIM == _MIPS_SIM_NABI32 715#if _MIPS_SIM == _MIPS_SIM_NABI32
716 716
@@ -1043,6 +1043,6 @@
1043#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1043#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
1044 1044
1045#define __NR_N32_Linux 6000 1045#define __NR_N32_Linux 6000
1046#define __NR_N32_Linux_syscalls 314 1046#define __NR_N32_Linux_syscalls 315
1047 1047
1048#endif /* _UAPI_ASM_UNISTD_H */ 1048#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 4d78bf445a9c..76122ff5cb5e 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -317,7 +317,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
317 if (regs->regs[insn.i_format.rs] == 317 if (regs->regs[insn.i_format.rs] ==
318 regs->regs[insn.i_format.rt]) { 318 regs->regs[insn.i_format.rt]) {
319 epc = epc + 4 + (insn.i_format.simmediate << 2); 319 epc = epc + 4 + (insn.i_format.simmediate << 2);
320 if (insn.i_format.rt == beql_op) 320 if (insn.i_format.opcode == beql_op)
321 ret = BRANCH_LIKELY_TAKEN; 321 ret = BRANCH_LIKELY_TAKEN;
322 } else 322 } else
323 epc += 8; 323 epc += 8;
@@ -329,7 +329,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
329 if (regs->regs[insn.i_format.rs] != 329 if (regs->regs[insn.i_format.rs] !=
330 regs->regs[insn.i_format.rt]) { 330 regs->regs[insn.i_format.rt]) {
331 epc = epc + 4 + (insn.i_format.simmediate << 2); 331 epc = epc + 4 + (insn.i_format.simmediate << 2);
332 if (insn.i_format.rt == bnel_op) 332 if (insn.i_format.opcode == bnel_op)
333 ret = BRANCH_LIKELY_TAKEN; 333 ret = BRANCH_LIKELY_TAKEN;
334 } else 334 } else
335 epc += 8; 335 epc += 8;
@@ -341,7 +341,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
341 /* rt field assumed to be zero */ 341 /* rt field assumed to be zero */
342 if ((long)regs->regs[insn.i_format.rs] <= 0) { 342 if ((long)regs->regs[insn.i_format.rs] <= 0) {
343 epc = epc + 4 + (insn.i_format.simmediate << 2); 343 epc = epc + 4 + (insn.i_format.simmediate << 2);
344 if (insn.i_format.rt == bnel_op) 344 if (insn.i_format.opcode == blezl_op)
345 ret = BRANCH_LIKELY_TAKEN; 345 ret = BRANCH_LIKELY_TAKEN;
346 } else 346 } else
347 epc += 8; 347 epc += 8;
@@ -353,7 +353,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
353 /* rt field assumed to be zero */ 353 /* rt field assumed to be zero */
354 if ((long)regs->regs[insn.i_format.rs] > 0) { 354 if ((long)regs->regs[insn.i_format.rs] > 0) {
355 epc = epc + 4 + (insn.i_format.simmediate << 2); 355 epc = epc + 4 + (insn.i_format.simmediate << 2);
356 if (insn.i_format.rt == bnel_op) 356 if (insn.i_format.opcode == bgtzl_op)
357 ret = BRANCH_LIKELY_TAKEN; 357 ret = BRANCH_LIKELY_TAKEN;
358 } else 358 } else
359 epc += 8; 359 epc += 8;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 71f85f427034..f639ccd5060c 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -163,7 +163,7 @@ int ptrace_get_watch_regs(struct task_struct *child,
163 enum pt_watch_style style; 163 enum pt_watch_style style;
164 int i; 164 int i;
165 165
166 if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0) 166 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
167 return -EIO; 167 return -EIO;
168 if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs))) 168 if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
169 return -EIO; 169 return -EIO;
@@ -177,14 +177,14 @@ int ptrace_get_watch_regs(struct task_struct *child,
177#endif 177#endif
178 178
179 __put_user(style, &addr->style); 179 __put_user(style, &addr->style);
180 __put_user(current_cpu_data.watch_reg_use_cnt, 180 __put_user(boot_cpu_data.watch_reg_use_cnt,
181 &addr->WATCH_STYLE.num_valid); 181 &addr->WATCH_STYLE.num_valid);
182 for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { 182 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
183 __put_user(child->thread.watch.mips3264.watchlo[i], 183 __put_user(child->thread.watch.mips3264.watchlo[i],
184 &addr->WATCH_STYLE.watchlo[i]); 184 &addr->WATCH_STYLE.watchlo[i]);
185 __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff, 185 __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
186 &addr->WATCH_STYLE.watchhi[i]); 186 &addr->WATCH_STYLE.watchhi[i]);
187 __put_user(current_cpu_data.watch_reg_masks[i], 187 __put_user(boot_cpu_data.watch_reg_masks[i],
188 &addr->WATCH_STYLE.watch_masks[i]); 188 &addr->WATCH_STYLE.watch_masks[i]);
189 } 189 }
190 for (; i < 8; i++) { 190 for (; i < 8; i++) {
@@ -204,12 +204,12 @@ int ptrace_set_watch_regs(struct task_struct *child,
204 unsigned long lt[NUM_WATCH_REGS]; 204 unsigned long lt[NUM_WATCH_REGS];
205 u16 ht[NUM_WATCH_REGS]; 205 u16 ht[NUM_WATCH_REGS];
206 206
207 if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0) 207 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
208 return -EIO; 208 return -EIO;
209 if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs))) 209 if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
210 return -EIO; 210 return -EIO;
211 /* Check the values. */ 211 /* Check the values. */
212 for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { 212 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
213 __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]); 213 __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
214#ifdef CONFIG_32BIT 214#ifdef CONFIG_32BIT
215 if (lt[i] & __UA_LIMIT) 215 if (lt[i] & __UA_LIMIT)
@@ -228,7 +228,7 @@ int ptrace_set_watch_regs(struct task_struct *child,
228 return -EINVAL; 228 return -EINVAL;
229 } 229 }
230 /* Install them. */ 230 /* Install them. */
231 for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { 231 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
232 if (lt[i] & 7) 232 if (lt[i] & 7)
233 watch_active = 1; 233 watch_active = 1;
234 child->thread.watch.mips3264.watchlo[i] = lt[i]; 234 child->thread.watch.mips3264.watchlo[i] = lt[i];
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 074e857ced28..8119ac2fdfc9 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1545,7 +1545,7 @@ asmlinkage void cache_parity_error(void)
1545 reg_val & (1<<30) ? "secondary" : "primary", 1545 reg_val & (1<<30) ? "secondary" : "primary",
1546 reg_val & (1<<31) ? "data" : "insn"); 1546 reg_val & (1<<31) ? "data" : "insn");
1547 if (cpu_has_mips_r2 && 1547 if (cpu_has_mips_r2 &&
1548 ((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) { 1548 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1549 pr_err("Error bits: %s%s%s%s%s%s%s%s\n", 1549 pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1550 reg_val & (1<<29) ? "ED " : "", 1550 reg_val & (1<<29) ? "ED " : "",
1551 reg_val & (1<<28) ? "ET " : "", 1551 reg_val & (1<<28) ? "ET " : "",
@@ -1585,7 +1585,7 @@ asmlinkage void do_ftlb(void)
1585 1585
1586 /* For the moment, report the problem and hang. */ 1586 /* For the moment, report the problem and hang. */
1587 if (cpu_has_mips_r2 && 1587 if (cpu_has_mips_r2 &&
1588 ((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) { 1588 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1589 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", 1589 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1590 read_c0_ecc()); 1590 read_c0_ecc());
1591 pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); 1591 pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
diff --git a/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c b/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c
index c639b9db0012..12c75db23420 100644
--- a/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c
+++ b/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c
@@ -27,8 +27,7 @@
27 27
28#include <cs5536/cs5536_mfgpt.h> 28#include <cs5536/cs5536_mfgpt.h>
29 29
30DEFINE_SPINLOCK(mfgpt_lock); 30static DEFINE_RAW_SPINLOCK(mfgpt_lock);
31EXPORT_SYMBOL(mfgpt_lock);
32 31
33static u32 mfgpt_base; 32static u32 mfgpt_base;
34 33
@@ -55,7 +54,7 @@ EXPORT_SYMBOL(enable_mfgpt0_counter);
55static void init_mfgpt_timer(enum clock_event_mode mode, 54static void init_mfgpt_timer(enum clock_event_mode mode,
56 struct clock_event_device *evt) 55 struct clock_event_device *evt)
57{ 56{
58 spin_lock(&mfgpt_lock); 57 raw_spin_lock(&mfgpt_lock);
59 58
60 switch (mode) { 59 switch (mode) {
61 case CLOCK_EVT_MODE_PERIODIC: 60 case CLOCK_EVT_MODE_PERIODIC:
@@ -79,7 +78,7 @@ static void init_mfgpt_timer(enum clock_event_mode mode,
79 /* Nothing to do here */ 78 /* Nothing to do here */
80 break; 79 break;
81 } 80 }
82 spin_unlock(&mfgpt_lock); 81 raw_spin_unlock(&mfgpt_lock);
83} 82}
84 83
85static struct clock_event_device mfgpt_clockevent = { 84static struct clock_event_device mfgpt_clockevent = {
@@ -157,7 +156,7 @@ static cycle_t mfgpt_read(struct clocksource *cs)
157 static int old_count; 156 static int old_count;
158 static u32 old_jifs; 157 static u32 old_jifs;
159 158
160 spin_lock_irqsave(&mfgpt_lock, flags); 159 raw_spin_lock_irqsave(&mfgpt_lock, flags);
161 /* 160 /*
162 * Although our caller may have the read side of xtime_lock, 161 * Although our caller may have the read side of xtime_lock,
163 * this is now a seqlock, and we are cheating in this routine 162 * this is now a seqlock, and we are cheating in this routine
@@ -191,7 +190,7 @@ static cycle_t mfgpt_read(struct clocksource *cs)
191 old_count = count; 190 old_count = count;
192 old_jifs = jifs; 191 old_jifs = jifs;
193 192
194 spin_unlock_irqrestore(&mfgpt_lock, flags); 193 raw_spin_unlock_irqrestore(&mfgpt_lock, flags);
195 194
196 return (cycle_t) (jifs * COMPARE) + count; 195 return (cycle_t) (jifs * COMPARE) + count;
197} 196}
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 58033c44690d..b611102e23b5 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -273,7 +273,7 @@ void build_clear_page(void)
273 uasm_i_ori(&buf, A2, A0, off); 273 uasm_i_ori(&buf, A2, A0, off);
274 274
275 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) 275 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
276 uasm_i_lui(&buf, AT, 0xa000); 276 uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
277 277
278 off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size) 278 off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
279 * cache_line_size : 0; 279 * cache_line_size : 0;
@@ -424,7 +424,7 @@ void build_copy_page(void)
424 uasm_i_ori(&buf, A2, A0, off); 424 uasm_i_ori(&buf, A2, A0, off);
425 425
426 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) 426 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
427 uasm_i_lui(&buf, AT, 0xa000); 427 uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
428 428
429 off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) * 429 off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
430 cache_line_size : 0; 430 cache_line_size : 0;
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index 6d0f4ab3632d..f2364e419682 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -27,7 +27,7 @@ unsigned long physical_memsize = 0L;
27fw_memblock_t * __init fw_getmdesc(int eva) 27fw_memblock_t * __init fw_getmdesc(int eva)
28{ 28{
29 char *memsize_str, *ememsize_str __maybe_unused = NULL, *ptr; 29 char *memsize_str, *ememsize_str __maybe_unused = NULL, *ptr;
30 unsigned long memsize, ememsize __maybe_unused = 0; 30 unsigned long memsize = 0, ememsize __maybe_unused = 0;
31 static char cmdline[COMMAND_LINE_SIZE] __initdata; 31 static char cmdline[COMMAND_LINE_SIZE] __initdata;
32 int tmp; 32 int tmp;
33 33
diff --git a/arch/mips/pci/pci-rc32434.c b/arch/mips/pci/pci-rc32434.c
index b128cb973ebe..7f6ce6d734c0 100644
--- a/arch/mips/pci/pci-rc32434.c
+++ b/arch/mips/pci/pci-rc32434.c
@@ -53,7 +53,6 @@ static struct resource rc32434_res_pci_mem1 = {
53 .start = 0x50000000, 53 .start = 0x50000000,
54 .end = 0x5FFFFFFF, 54 .end = 0x5FFFFFFF,
55 .flags = IORESOURCE_MEM, 55 .flags = IORESOURCE_MEM,
56 .parent = &rc32434_res_pci_mem1,
57 .sibling = NULL, 56 .sibling = NULL,
58 .child = &rc32434_res_pci_mem2 57 .child = &rc32434_res_pci_mem2
59}; 58};
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 4c0cedf4e2c7..ce4c68a4a823 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -150,7 +150,9 @@ endif
150 150
151CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell) 151CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell)
152 152
153KBUILD_CPPFLAGS += -Iarch/$(ARCH) 153asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1)
154
155KBUILD_CPPFLAGS += -Iarch/$(ARCH) $(asinstr)
154KBUILD_AFLAGS += -Iarch/$(ARCH) 156KBUILD_AFLAGS += -Iarch/$(ARCH)
155KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y) 157KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
156CPP = $(CC) -E $(KBUILD_CFLAGS) 158CPP = $(CC) -E $(KBUILD_CFLAGS)
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 6586a40a46ce..cded7c1278ef 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -318,11 +318,16 @@ n:
318 addi reg,reg,(name - 0b)@l; 318 addi reg,reg,(name - 0b)@l;
319 319
320#ifdef __powerpc64__ 320#ifdef __powerpc64__
321#ifdef HAVE_AS_ATHIGH
322#define __AS_ATHIGH high
323#else
324#define __AS_ATHIGH h
325#endif
321#define LOAD_REG_IMMEDIATE(reg,expr) \ 326#define LOAD_REG_IMMEDIATE(reg,expr) \
322 lis reg,(expr)@highest; \ 327 lis reg,(expr)@highest; \
323 ori reg,reg,(expr)@higher; \ 328 ori reg,reg,(expr)@higher; \
324 rldicr reg,reg,32,31; \ 329 rldicr reg,reg,32,31; \
325 oris reg,reg,(expr)@h; \ 330 oris reg,reg,(expr)@__AS_ATHIGH; \
326 ori reg,reg,(expr)@l; 331 ori reg,reg,(expr)@l;
327 332
328#define LOAD_REG_ADDR(reg,name) \ 333#define LOAD_REG_ADDR(reg,name) \
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index d0e784e0ff48..521790330672 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -39,6 +39,17 @@ static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
39 (unsigned long)_stext < end; 39 (unsigned long)_stext < end;
40} 40}
41 41
42static inline int overlaps_kvm_tmp(unsigned long start, unsigned long end)
43{
44#ifdef CONFIG_KVM_GUEST
45 extern char kvm_tmp[];
46 return start < (unsigned long)kvm_tmp &&
47 (unsigned long)&kvm_tmp[1024 * 1024] < end;
48#else
49 return 0;
50#endif
51}
52
42#undef dereference_function_descriptor 53#undef dereference_function_descriptor
43static inline void *dereference_function_descriptor(void *ptr) 54static inline void *dereference_function_descriptor(void *ptr)
44{ 55{
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 3ddf70276706..ea4dc3a89c1f 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -361,3 +361,4 @@ SYSCALL(finit_module)
361SYSCALL(ni_syscall) /* sys_kcmp */ 361SYSCALL(ni_syscall) /* sys_kcmp */
362SYSCALL_SPU(sched_setattr) 362SYSCALL_SPU(sched_setattr)
363SYSCALL_SPU(sched_getattr) 363SYSCALL_SPU(sched_getattr)
364SYSCALL_SPU(renameat2)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 4494f029b632..9b892bbd9d84 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
12#include <uapi/asm/unistd.h> 12#include <uapi/asm/unistd.h>
13 13
14 14
15#define __NR_syscalls 357 15#define __NR_syscalls 358
16 16
17#define __NR__exit __NR_exit 17#define __NR__exit __NR_exit
18#define NR_syscalls __NR_syscalls 18#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 881bf2e2560d..2d526f7b48da 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -379,5 +379,6 @@
379#define __NR_kcmp 354 379#define __NR_kcmp 354
380#define __NR_sched_setattr 355 380#define __NR_sched_setattr 355
381#define __NR_sched_getattr 356 381#define __NR_sched_getattr 356
382#define __NR_renameat2 357
382 383
383#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ 384#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 6a0175297b0d..dd8695f6cb6d 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -74,7 +74,7 @@
74#define KVM_INST_MTSRIN 0x7c0001e4 74#define KVM_INST_MTSRIN 0x7c0001e4
75 75
76static bool kvm_patching_worked = true; 76static bool kvm_patching_worked = true;
77static char kvm_tmp[1024 * 1024]; 77char kvm_tmp[1024 * 1024];
78static int kvm_tmp_index; 78static int kvm_tmp_index;
79 79
80static inline void kvm_patch_ins(u32 *inst, u32 new_inst) 80static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 59d229a2a3e0..879b3aacac32 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -237,7 +237,7 @@ static void wake_offline_cpus(void)
237 if (!cpu_online(cpu)) { 237 if (!cpu_online(cpu)) {
238 printk(KERN_INFO "kexec: Waking offline cpu %d.\n", 238 printk(KERN_INFO "kexec: Waking offline cpu %d.\n",
239 cpu); 239 cpu);
240 cpu_up(cpu); 240 WARN_ON(cpu_up(cpu));
241 } 241 }
242 } 242 }
243} 243}
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 94e597e6f15c..7af190a266b3 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -886,7 +886,7 @@ static int kvmppc_book3s_init(void)
886 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 886 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
887 if (r) 887 if (r)
888 return r; 888 return r;
889#ifdef CONFIG_KVM_BOOK3S_32 889#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
890 r = kvmppc_book3s_init_pr(); 890 r = kvmppc_book3s_init_pr();
891#endif 891#endif
892 return r; 892 return r;
@@ -895,7 +895,7 @@ static int kvmppc_book3s_init(void)
895 895
896static void kvmppc_book3s_exit(void) 896static void kvmppc_book3s_exit(void)
897{ 897{
898#ifdef CONFIG_KVM_BOOK3S_32 898#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
899 kvmppc_book3s_exit_pr(); 899 kvmppc_book3s_exit_pr();
900#endif 900#endif
901 kvm_exit(); 901 kvm_exit();
@@ -905,7 +905,7 @@ module_init(kvmppc_book3s_init);
905module_exit(kvmppc_book3s_exit); 905module_exit(kvmppc_book3s_exit);
906 906
907/* On 32bit this is our one and only kernel module */ 907/* On 32bit this is our one and only kernel module */
908#ifdef CONFIG_KVM_BOOK3S_32 908#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
909MODULE_ALIAS_MISCDEV(KVM_MINOR); 909MODULE_ALIAS_MISCDEV(KVM_MINOR);
910MODULE_ALIAS("devname:kvm"); 910MODULE_ALIAS("devname:kvm");
911#endif 911#endif
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 1d6c56ad5b60..8fcc36306a02 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -234,7 +234,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
234 pte_size = psize; 234 pte_size = psize;
235 pte = lookup_linux_pte_and_update(pgdir, hva, writing, 235 pte = lookup_linux_pte_and_update(pgdir, hva, writing,
236 &pte_size); 236 &pte_size);
237 if (pte_present(pte)) { 237 if (pte_present(pte) && !pte_numa(pte)) {
238 if (writing && !pte_write(pte)) 238 if (writing && !pte_write(pte))
239 /* make the actual HPTE be read-only */ 239 /* make the actual HPTE be read-only */
240 ptel = hpte_make_readonly(ptel); 240 ptel = hpte_make_readonly(ptel);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index b031f932c0cc..07c8b5b0f9d2 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1323,6 +1323,110 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1323 mr r3, r9 1323 mr r3, r9
1324 bl kvmppc_save_fp 1324 bl kvmppc_save_fp
1325 1325
1326#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1327BEGIN_FTR_SECTION
1328 b 2f
1329END_FTR_SECTION_IFCLR(CPU_FTR_TM)
1330 /* Turn on TM. */
1331 mfmsr r8
1332 li r0, 1
1333 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1334 mtmsrd r8
1335
1336 ld r5, VCPU_MSR(r9)
1337 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
1338 beq 1f /* TM not active in guest. */
1339
1340 li r3, TM_CAUSE_KVM_RESCHED
1341
1342 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
1343 li r5, 0
1344 mtmsrd r5, 1
1345
1346 /* All GPRs are volatile at this point. */
1347 TRECLAIM(R3)
1348
1349 /* Temporarily store r13 and r9 so we have some regs to play with */
1350 SET_SCRATCH0(r13)
1351 GET_PACA(r13)
1352 std r9, PACATMSCRATCH(r13)
1353 ld r9, HSTATE_KVM_VCPU(r13)
1354
1355 /* Get a few more GPRs free. */
1356 std r29, VCPU_GPRS_TM(29)(r9)
1357 std r30, VCPU_GPRS_TM(30)(r9)
1358 std r31, VCPU_GPRS_TM(31)(r9)
1359
1360 /* Save away PPR and DSCR soon so don't run with user values. */
1361 mfspr r31, SPRN_PPR
1362 HMT_MEDIUM
1363 mfspr r30, SPRN_DSCR
1364 ld r29, HSTATE_DSCR(r13)
1365 mtspr SPRN_DSCR, r29
1366
1367 /* Save all but r9, r13 & r29-r31 */
1368 reg = 0
1369 .rept 29
1370 .if (reg != 9) && (reg != 13)
1371 std reg, VCPU_GPRS_TM(reg)(r9)
1372 .endif
1373 reg = reg + 1
1374 .endr
1375 /* ... now save r13 */
1376 GET_SCRATCH0(r4)
1377 std r4, VCPU_GPRS_TM(13)(r9)
1378 /* ... and save r9 */
1379 ld r4, PACATMSCRATCH(r13)
1380 std r4, VCPU_GPRS_TM(9)(r9)
1381
1382 /* Reload stack pointer and TOC. */
1383 ld r1, HSTATE_HOST_R1(r13)
1384 ld r2, PACATOC(r13)
1385
1386 /* Set MSR RI now we have r1 and r13 back. */
1387 li r5, MSR_RI
1388 mtmsrd r5, 1
1389
1390 /* Save away checkpinted SPRs. */
1391 std r31, VCPU_PPR_TM(r9)
1392 std r30, VCPU_DSCR_TM(r9)
1393 mflr r5
1394 mfcr r6
1395 mfctr r7
1396 mfspr r8, SPRN_AMR
1397 mfspr r10, SPRN_TAR
1398 std r5, VCPU_LR_TM(r9)
1399 stw r6, VCPU_CR_TM(r9)
1400 std r7, VCPU_CTR_TM(r9)
1401 std r8, VCPU_AMR_TM(r9)
1402 std r10, VCPU_TAR_TM(r9)
1403
1404 /* Restore r12 as trap number. */
1405 lwz r12, VCPU_TRAP(r9)
1406
1407 /* Save FP/VSX. */
1408 addi r3, r9, VCPU_FPRS_TM
1409 bl .store_fp_state
1410 addi r3, r9, VCPU_VRS_TM
1411 bl .store_vr_state
1412 mfspr r6, SPRN_VRSAVE
1413 stw r6, VCPU_VRSAVE_TM(r9)
14141:
1415 /*
1416 * We need to save these SPRs after the treclaim so that the software
1417 * error code is recorded correctly in the TEXASR. Also the user may
1418 * change these outside of a transaction, so they must always be
1419 * context switched.
1420 */
1421 mfspr r5, SPRN_TFHAR
1422 mfspr r6, SPRN_TFIAR
1423 mfspr r7, SPRN_TEXASR
1424 std r5, VCPU_TFHAR(r9)
1425 std r6, VCPU_TFIAR(r9)
1426 std r7, VCPU_TEXASR(r9)
14272:
1428#endif
1429
1326 /* Increment yield count if they have a VPA */ 1430 /* Increment yield count if they have a VPA */
1327 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1431 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1328 cmpdi r8, 0 1432 cmpdi r8, 0
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index c5c052a9729c..02f1defd8bb9 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1153,7 +1153,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1153 goto free_vcpu; 1153 goto free_vcpu;
1154 vcpu->arch.book3s = vcpu_book3s; 1154 vcpu->arch.book3s = vcpu_book3s;
1155 1155
1156#ifdef CONFIG_KVM_BOOK3S_32 1156#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1157 vcpu->arch.shadow_vcpu = 1157 vcpu->arch.shadow_vcpu =
1158 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL); 1158 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1159 if (!vcpu->arch.shadow_vcpu) 1159 if (!vcpu->arch.shadow_vcpu)
@@ -1198,7 +1198,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1198uninit_vcpu: 1198uninit_vcpu:
1199 kvm_vcpu_uninit(vcpu); 1199 kvm_vcpu_uninit(vcpu);
1200free_shadow_vcpu: 1200free_shadow_vcpu:
1201#ifdef CONFIG_KVM_BOOK3S_32 1201#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1202 kfree(vcpu->arch.shadow_vcpu); 1202 kfree(vcpu->arch.shadow_vcpu);
1203free_vcpu3s: 1203free_vcpu3s:
1204#endif 1204#endif
@@ -1215,7 +1215,7 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
1215 1215
1216 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); 1216 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1217 kvm_vcpu_uninit(vcpu); 1217 kvm_vcpu_uninit(vcpu);
1218#ifdef CONFIG_KVM_BOOK3S_32 1218#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1219 kfree(vcpu->arch.shadow_vcpu); 1219 kfree(vcpu->arch.shadow_vcpu);
1220#endif 1220#endif
1221 vfree(vcpu_book3s); 1221 vfree(vcpu_book3s);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index d766d6ee33fe..06ba83b036d3 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -207,6 +207,10 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
207 if (overlaps_kernel_text(vaddr, vaddr + step)) 207 if (overlaps_kernel_text(vaddr, vaddr + step))
208 tprot &= ~HPTE_R_N; 208 tprot &= ~HPTE_R_N;
209 209
210 /* Make kvm guest trampolines executable */
211 if (overlaps_kvm_tmp(vaddr, vaddr + step))
212 tprot &= ~HPTE_R_N;
213
210 /* 214 /*
211 * If relocatable, check if it overlaps interrupt vectors that 215 * If relocatable, check if it overlaps interrupt vectors that
212 * are copied down to real 0. For relocatable kernel 216 * are copied down to real 0. For relocatable kernel
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index b3ecb8f5b6ce..9ae6664ff08c 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -158,6 +158,7 @@ int kvm_dev_ioctl_check_extension(long ext)
158 case KVM_CAP_ONE_REG: 158 case KVM_CAP_ONE_REG:
159 case KVM_CAP_ENABLE_CAP: 159 case KVM_CAP_ENABLE_CAP:
160 case KVM_CAP_S390_CSS_SUPPORT: 160 case KVM_CAP_S390_CSS_SUPPORT:
161 case KVM_CAP_IRQFD:
161 case KVM_CAP_IOEVENTFD: 162 case KVM_CAP_IOEVENTFD:
162 case KVM_CAP_DEVICE_CTRL: 163 case KVM_CAP_DEVICE_CTRL:
163 case KVM_CAP_ENABLE_CAP_VM: 164 case KVM_CAP_ENABLE_CAP_VM:
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 8de6d9cf3b95..678205195ae1 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -1,7 +1,7 @@
1#ifndef _ASM_X86_PAGE_64_DEFS_H 1#ifndef _ASM_X86_PAGE_64_DEFS_H
2#define _ASM_X86_PAGE_64_DEFS_H 2#define _ASM_X86_PAGE_64_DEFS_H
3 3
4#define THREAD_SIZE_ORDER 1 4#define THREAD_SIZE_ORDER 2
5#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 5#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
6#define CURRENT_MASK (~(THREAD_SIZE - 1)) 6#define CURRENT_MASK (~(THREAD_SIZE - 1))
7 7
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 33e8c028842f..138ceffc6377 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7778,7 +7778,8 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7778 7778
7779 exec_control = vmcs12->pin_based_vm_exec_control; 7779 exec_control = vmcs12->pin_based_vm_exec_control;
7780 exec_control |= vmcs_config.pin_based_exec_ctrl; 7780 exec_control |= vmcs_config.pin_based_exec_ctrl;
7781 exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; 7781 exec_control &= ~(PIN_BASED_VMX_PREEMPTION_TIMER |
7782 PIN_BASED_POSTED_INTR);
7782 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control); 7783 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
7783 7784
7784 vmx->nested.preemption_timer_expired = false; 7785 vmx->nested.preemption_timer_expired = false;
@@ -7815,7 +7816,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7815 if (!vmx->rdtscp_enabled) 7816 if (!vmx->rdtscp_enabled)
7816 exec_control &= ~SECONDARY_EXEC_RDTSCP; 7817 exec_control &= ~SECONDARY_EXEC_RDTSCP;
7817 /* Take the following fields only from vmcs12 */ 7818 /* Take the following fields only from vmcs12 */
7818 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 7819 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
7820 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
7821 SECONDARY_EXEC_APIC_REGISTER_VIRT);
7819 if (nested_cpu_has(vmcs12, 7822 if (nested_cpu_has(vmcs12,
7820 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) 7823 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
7821 exec_control |= vmcs12->secondary_vm_exec_control; 7824 exec_control |= vmcs12->secondary_vm_exec_control;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b6c0bacca9bd..20316c67b824 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -106,6 +106,8 @@ EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
106static u32 tsc_tolerance_ppm = 250; 106static u32 tsc_tolerance_ppm = 250;
107module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); 107module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
108 108
109static bool backwards_tsc_observed = false;
110
109#define KVM_NR_SHARED_MSRS 16 111#define KVM_NR_SHARED_MSRS 16
110 112
111struct kvm_shared_msrs_global { 113struct kvm_shared_msrs_global {
@@ -1486,7 +1488,8 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
1486 &ka->master_kernel_ns, 1488 &ka->master_kernel_ns,
1487 &ka->master_cycle_now); 1489 &ka->master_cycle_now);
1488 1490
1489 ka->use_master_clock = host_tsc_clocksource & vcpus_matched; 1491 ka->use_master_clock = host_tsc_clocksource && vcpus_matched
1492 && !backwards_tsc_observed;
1490 1493
1491 if (ka->use_master_clock) 1494 if (ka->use_master_clock)
1492 atomic_set(&kvm_guest_has_master_clock, 1); 1495 atomic_set(&kvm_guest_has_master_clock, 1);
@@ -6945,6 +6948,7 @@ int kvm_arch_hardware_enable(void *garbage)
6945 */ 6948 */
6946 if (backwards_tsc) { 6949 if (backwards_tsc) {
6947 u64 delta_cyc = max_tsc - local_tsc; 6950 u64 delta_cyc = max_tsc - local_tsc;
6951 backwards_tsc_observed = true;
6948 list_for_each_entry(kvm, &vm_list, vm_list) { 6952 list_for_each_entry(kvm, &vm_list, vm_list) {
6949 kvm_for_each_vcpu(i, vcpu, kvm) { 6953 kvm_for_each_vcpu(i, vcpu, kvm) {
6950 vcpu->arch.tsc_offset_adjustment += delta_cyc; 6954 vcpu->arch.tsc_offset_adjustment += delta_cyc;
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
index e1f220e3ca68..310c5f0dbef1 100644
--- a/arch/x86/vdso/vdso32-setup.c
+++ b/arch/x86/vdso/vdso32-setup.c
@@ -155,6 +155,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
155 unsigned long addr; 155 unsigned long addr;
156 int ret = 0; 156 int ret = 0;
157 struct vm_area_struct *vma; 157 struct vm_area_struct *vma;
158 static struct page *no_pages[] = {NULL};
158 159
159#ifdef CONFIG_X86_X32_ABI 160#ifdef CONFIG_X86_X32_ABI
160 if (test_thread_flag(TIF_X32)) 161 if (test_thread_flag(TIF_X32))
@@ -193,7 +194,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
193 addr - VDSO_OFFSET(VDSO_PREV_PAGES), 194 addr - VDSO_OFFSET(VDSO_PREV_PAGES),
194 VDSO_OFFSET(VDSO_PREV_PAGES), 195 VDSO_OFFSET(VDSO_PREV_PAGES),
195 VM_READ, 196 VM_READ,
196 NULL); 197 no_pages);
197 198
198 if (IS_ERR(vma)) { 199 if (IS_ERR(vma)) {
199 ret = PTR_ERR(vma); 200 ret = PTR_ERR(vma);
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index c1e31a41f949..25bbc55dca89 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -1278,8 +1278,8 @@ static int __init acpi_thermal_init(void)
1278 1278
1279static void __exit acpi_thermal_exit(void) 1279static void __exit acpi_thermal_exit(void)
1280{ 1280{
1281 destroy_workqueue(acpi_thermal_pm_queue);
1282 acpi_bus_unregister_driver(&acpi_thermal_driver); 1281 acpi_bus_unregister_driver(&acpi_thermal_driver);
1282 destroy_workqueue(acpi_thermal_pm_queue);
1283 1283
1284 return; 1284 return;
1285} 1285}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index ea83828bfea9..18d97d5c7d90 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4224,10 +4224,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4224 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4224 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4225 4225
4226 /* devices that don't properly handle queued TRIM commands */ 4226 /* devices that don't properly handle queued TRIM commands */
4227 { "Micron_M500*", "MU0[1-4]*", ATA_HORKAGE_NO_NCQ_TRIM, }, 4227 { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4228 { "Crucial_CT???M500SSD*", "MU0[1-4]*", ATA_HORKAGE_NO_NCQ_TRIM, }, 4228 { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4229 { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, 4229 { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4230 { "Crucial_CT???M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, 4230 { "Crucial_CT???M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4231 4231
4232 /* 4232 /*
4233 * Some WD SATA-I drives spin up and down erratically when the link 4233 * Some WD SATA-I drives spin up and down erratically when the link
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6d8a87f252de..cb9b1f8326c3 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -144,11 +144,11 @@ static void virtblk_done(struct virtqueue *vq)
144 if (unlikely(virtqueue_is_broken(vq))) 144 if (unlikely(virtqueue_is_broken(vq)))
145 break; 145 break;
146 } while (!virtqueue_enable_cb(vq)); 146 } while (!virtqueue_enable_cb(vq));
147 spin_unlock_irqrestore(&vblk->vq_lock, flags);
148 147
149 /* In case queue is stopped waiting for more buffers. */ 148 /* In case queue is stopped waiting for more buffers. */
150 if (req_done) 149 if (req_done)
151 blk_mq_start_stopped_hw_queues(vblk->disk->queue); 150 blk_mq_start_stopped_hw_queues(vblk->disk->queue);
151 spin_unlock_irqrestore(&vblk->vq_lock, flags);
152} 152}
153 153
154static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) 154static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
@@ -202,8 +202,8 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
202 err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num); 202 err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num);
203 if (err) { 203 if (err) {
204 virtqueue_kick(vblk->vq); 204 virtqueue_kick(vblk->vq);
205 spin_unlock_irqrestore(&vblk->vq_lock, flags);
206 blk_mq_stop_hw_queue(hctx); 205 blk_mq_stop_hw_queue(hctx);
206 spin_unlock_irqrestore(&vblk->vq_lock, flags);
207 /* Out of mem doesn't actually happen, since we fall back 207 /* Out of mem doesn't actually happen, since we fall back
208 * to direct descriptors */ 208 * to direct descriptors */
209 if (err == -ENOMEM || err == -ENOSPC) 209 if (err == -ENOMEM || err == -ENOSPC)
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 4637697c139f..3fbee4540228 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -147,7 +147,7 @@ static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
147static int _round_up_table(const struct clk_div_table *table, int div) 147static int _round_up_table(const struct clk_div_table *table, int div)
148{ 148{
149 const struct clk_div_table *clkt; 149 const struct clk_div_table *clkt;
150 int up = _get_table_maxdiv(table); 150 int up = INT_MAX;
151 151
152 for (clkt = table; clkt->div; clkt++) { 152 for (clkt = table; clkt->div; clkt++) {
153 if (clkt->div == div) 153 if (clkt->div == div)
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index bca0a0badbfa..a886702f7c8b 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -521,8 +521,10 @@ static struct clk * __init clkgen_odf_register(const char *parent_name,
521 gate->lock = odf_lock; 521 gate->lock = odf_lock;
522 522
523 div = kzalloc(sizeof(*div), GFP_KERNEL); 523 div = kzalloc(sizeof(*div), GFP_KERNEL);
524 if (!div) 524 if (!div) {
525 kfree(gate);
525 return ERR_PTR(-ENOMEM); 526 return ERR_PTR(-ENOMEM);
527 }
526 528
527 div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO; 529 div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
528 div->reg = reg + pll_data->odf[odf].offset; 530 div->reg = reg + pll_data->odf[odf].offset;
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index e1769addf435..6aad8abc69a2 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -58,9 +58,9 @@
58#define PLLDU_LFCON_SET_DIVN 600 58#define PLLDU_LFCON_SET_DIVN 600
59 59
60#define PLLE_BASE_DIVCML_SHIFT 24 60#define PLLE_BASE_DIVCML_SHIFT 24
61#define PLLE_BASE_DIVCML_WIDTH 4 61#define PLLE_BASE_DIVCML_MASK 0xf
62#define PLLE_BASE_DIVP_SHIFT 16 62#define PLLE_BASE_DIVP_SHIFT 16
63#define PLLE_BASE_DIVP_WIDTH 7 63#define PLLE_BASE_DIVP_WIDTH 6
64#define PLLE_BASE_DIVN_SHIFT 8 64#define PLLE_BASE_DIVN_SHIFT 8
65#define PLLE_BASE_DIVN_WIDTH 8 65#define PLLE_BASE_DIVN_WIDTH 8
66#define PLLE_BASE_DIVM_SHIFT 0 66#define PLLE_BASE_DIVM_SHIFT 0
@@ -183,6 +183,14 @@
183#define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\ 183#define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\
184 mask(p->params->div_nmp->divp_width)) 184 mask(p->params->div_nmp->divp_width))
185 185
186#define divm_shift(p) (p)->params->div_nmp->divm_shift
187#define divn_shift(p) (p)->params->div_nmp->divn_shift
188#define divp_shift(p) (p)->params->div_nmp->divp_shift
189
190#define divm_mask_shifted(p) (divm_mask(p) << divm_shift(p))
191#define divn_mask_shifted(p) (divn_mask(p) << divn_shift(p))
192#define divp_mask_shifted(p) (divp_mask(p) << divp_shift(p))
193
186#define divm_max(p) (divm_mask(p)) 194#define divm_max(p) (divm_mask(p))
187#define divn_max(p) (divn_mask(p)) 195#define divn_max(p) (divn_mask(p))
188#define divp_max(p) (1 << (divp_mask(p))) 196#define divp_max(p) (1 << (divp_mask(p)))
@@ -476,13 +484,12 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll,
476 } else { 484 } else {
477 val = pll_readl_base(pll); 485 val = pll_readl_base(pll);
478 486
479 val &= ~((divm_mask(pll) << div_nmp->divm_shift) | 487 val &= ~(divm_mask_shifted(pll) | divn_mask_shifted(pll) |
480 (divn_mask(pll) << div_nmp->divn_shift) | 488 divp_mask_shifted(pll));
481 (divp_mask(pll) << div_nmp->divp_shift));
482 489
483 val |= ((cfg->m << div_nmp->divm_shift) | 490 val |= (cfg->m << divm_shift(pll)) |
484 (cfg->n << div_nmp->divn_shift) | 491 (cfg->n << divn_shift(pll)) |
485 (cfg->p << div_nmp->divp_shift)); 492 (cfg->p << divp_shift(pll));
486 493
487 pll_writel_base(val, pll); 494 pll_writel_base(val, pll);
488 } 495 }
@@ -730,11 +737,12 @@ static int clk_plle_enable(struct clk_hw *hw)
730 if (pll->params->flags & TEGRA_PLLE_CONFIGURE) { 737 if (pll->params->flags & TEGRA_PLLE_CONFIGURE) {
731 /* configure dividers */ 738 /* configure dividers */
732 val = pll_readl_base(pll); 739 val = pll_readl_base(pll);
733 val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll)); 740 val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) |
734 val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT); 741 divm_mask_shifted(pll));
735 val |= sel.m << pll->params->div_nmp->divm_shift; 742 val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT);
736 val |= sel.n << pll->params->div_nmp->divn_shift; 743 val |= sel.m << divm_shift(pll);
737 val |= sel.p << pll->params->div_nmp->divp_shift; 744 val |= sel.n << divn_shift(pll);
745 val |= sel.p << divp_shift(pll);
738 val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT; 746 val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
739 pll_writel_base(val, pll); 747 pll_writel_base(val, pll);
740 } 748 }
@@ -745,10 +753,11 @@ static int clk_plle_enable(struct clk_hw *hw)
745 pll_writel_misc(val, pll); 753 pll_writel_misc(val, pll);
746 754
747 val = readl(pll->clk_base + PLLE_SS_CTRL); 755 val = readl(pll->clk_base + PLLE_SS_CTRL);
756 val &= ~PLLE_SS_COEFFICIENTS_MASK;
748 val |= PLLE_SS_DISABLE; 757 val |= PLLE_SS_DISABLE;
749 writel(val, pll->clk_base + PLLE_SS_CTRL); 758 writel(val, pll->clk_base + PLLE_SS_CTRL);
750 759
751 val |= pll_readl_base(pll); 760 val = pll_readl_base(pll);
752 val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE); 761 val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE);
753 pll_writel_base(val, pll); 762 pll_writel_base(val, pll);
754 763
@@ -1292,10 +1301,11 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
1292 pll_writel(val, PLLE_SS_CTRL, pll); 1301 pll_writel(val, PLLE_SS_CTRL, pll);
1293 1302
1294 val = pll_readl_base(pll); 1303 val = pll_readl_base(pll);
1295 val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll)); 1304 val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) |
1296 val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT); 1305 divm_mask_shifted(pll));
1297 val |= sel.m << pll->params->div_nmp->divm_shift; 1306 val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT);
1298 val |= sel.n << pll->params->div_nmp->divn_shift; 1307 val |= sel.m << divm_shift(pll);
1308 val |= sel.n << divn_shift(pll);
1299 val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT; 1309 val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
1300 pll_writel_base(val, pll); 1310 pll_writel_base(val, pll);
1301 udelay(1); 1311 udelay(1);
@@ -1410,6 +1420,15 @@ struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
1410 return clk; 1420 return clk;
1411} 1421}
1412 1422
1423static struct div_nmp pll_e_nmp = {
1424 .divn_shift = PLLE_BASE_DIVN_SHIFT,
1425 .divn_width = PLLE_BASE_DIVN_WIDTH,
1426 .divm_shift = PLLE_BASE_DIVM_SHIFT,
1427 .divm_width = PLLE_BASE_DIVM_WIDTH,
1428 .divp_shift = PLLE_BASE_DIVP_SHIFT,
1429 .divp_width = PLLE_BASE_DIVP_WIDTH,
1430};
1431
1413struct clk *tegra_clk_register_plle(const char *name, const char *parent_name, 1432struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
1414 void __iomem *clk_base, void __iomem *pmc, 1433 void __iomem *clk_base, void __iomem *pmc,
1415 unsigned long flags, struct tegra_clk_pll_params *pll_params, 1434 unsigned long flags, struct tegra_clk_pll_params *pll_params,
@@ -1420,6 +1439,10 @@ struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
1420 1439
1421 pll_params->flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS; 1440 pll_params->flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS;
1422 pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE; 1441 pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
1442
1443 if (!pll_params->div_nmp)
1444 pll_params->div_nmp = &pll_e_nmp;
1445
1423 pll = _tegra_init_pll(clk_base, pmc, pll_params, lock); 1446 pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
1424 if (IS_ERR(pll)) 1447 if (IS_ERR(pll))
1425 return ERR_CAST(pll); 1448 return ERR_CAST(pll);
@@ -1557,9 +1580,8 @@ struct clk *tegra_clk_register_pllre(const char *name, const char *parent_name,
1557 int m; 1580 int m;
1558 1581
1559 m = _pll_fixed_mdiv(pll_params, parent_rate); 1582 m = _pll_fixed_mdiv(pll_params, parent_rate);
1560 val = m << PLL_BASE_DIVM_SHIFT; 1583 val = m << divm_shift(pll);
1561 val |= (pll_params->vco_min / parent_rate) 1584 val |= (pll_params->vco_min / parent_rate) << divn_shift(pll);
1562 << PLL_BASE_DIVN_SHIFT;
1563 pll_writel_base(val, pll); 1585 pll_writel_base(val, pll);
1564 } 1586 }
1565 1587
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 00fdd1170284..a8d7ea14f183 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -100,7 +100,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
100 || tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) { 100 || tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) {
101 __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR)); 101 __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
102 __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); 102 __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
103 clk_disable_unprepare(tcd->clk); 103 clk_disable(tcd->clk);
104 } 104 }
105 105
106 switch (m) { 106 switch (m) {
@@ -109,7 +109,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
109 * of oneshot, we get lower overhead and improved accuracy. 109 * of oneshot, we get lower overhead and improved accuracy.
110 */ 110 */
111 case CLOCK_EVT_MODE_PERIODIC: 111 case CLOCK_EVT_MODE_PERIODIC:
112 clk_prepare_enable(tcd->clk); 112 clk_enable(tcd->clk);
113 113
114 /* slow clock, count up to RC, then irq and restart */ 114 /* slow clock, count up to RC, then irq and restart */
115 __raw_writel(timer_clock 115 __raw_writel(timer_clock
@@ -126,7 +126,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
126 break; 126 break;
127 127
128 case CLOCK_EVT_MODE_ONESHOT: 128 case CLOCK_EVT_MODE_ONESHOT:
129 clk_prepare_enable(tcd->clk); 129 clk_enable(tcd->clk);
130 130
131 /* slow clock, count up to RC, then irq and stop */ 131 /* slow clock, count up to RC, then irq and stop */
132 __raw_writel(timer_clock | ATMEL_TC_CPCSTOP 132 __raw_writel(timer_clock | ATMEL_TC_CPCSTOP
@@ -194,7 +194,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
194 ret = clk_prepare_enable(t2_clk); 194 ret = clk_prepare_enable(t2_clk);
195 if (ret) 195 if (ret)
196 return ret; 196 return ret;
197 clk_disable_unprepare(t2_clk); 197 clk_disable(t2_clk);
198 198
199 clkevt.regs = tc->regs; 199 clkevt.regs = tc->regs;
200 clkevt.clk = t2_clk; 200 clkevt.clk = t2_clk;
diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c
index b52e1c078b99..7f5374dbefd9 100644
--- a/drivers/clocksource/timer-marco.c
+++ b/drivers/clocksource/timer-marco.c
@@ -199,7 +199,7 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
199 199
200 action->dev_id = ce; 200 action->dev_id = ce;
201 BUG_ON(setup_irq(ce->irq, action)); 201 BUG_ON(setup_irq(ce->irq, action));
202 irq_set_affinity(action->irq, cpumask_of(cpu)); 202 irq_force_affinity(action->irq, cpumask_of(cpu));
203 203
204 clockevents_register_device(ce); 204 clockevents_register_device(ce);
205 return 0; 205 return 0;
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 1bf6bbac3e03..09b9129c7bd3 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -130,7 +130,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
130 return -ENOENT; 130 return -ENOENT;
131 } 131 }
132 132
133 cpu_reg = devm_regulator_get_optional(cpu_dev, "cpu0"); 133 cpu_reg = regulator_get_optional(cpu_dev, "cpu0");
134 if (IS_ERR(cpu_reg)) { 134 if (IS_ERR(cpu_reg)) {
135 /* 135 /*
136 * If cpu0 regulator supply node is present, but regulator is 136 * If cpu0 regulator supply node is present, but regulator is
@@ -145,23 +145,23 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
145 PTR_ERR(cpu_reg)); 145 PTR_ERR(cpu_reg));
146 } 146 }
147 147
148 cpu_clk = devm_clk_get(cpu_dev, NULL); 148 cpu_clk = clk_get(cpu_dev, NULL);
149 if (IS_ERR(cpu_clk)) { 149 if (IS_ERR(cpu_clk)) {
150 ret = PTR_ERR(cpu_clk); 150 ret = PTR_ERR(cpu_clk);
151 pr_err("failed to get cpu0 clock: %d\n", ret); 151 pr_err("failed to get cpu0 clock: %d\n", ret);
152 goto out_put_node; 152 goto out_put_reg;
153 } 153 }
154 154
155 ret = of_init_opp_table(cpu_dev); 155 ret = of_init_opp_table(cpu_dev);
156 if (ret) { 156 if (ret) {
157 pr_err("failed to init OPP table: %d\n", ret); 157 pr_err("failed to init OPP table: %d\n", ret);
158 goto out_put_node; 158 goto out_put_clk;
159 } 159 }
160 160
161 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); 161 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
162 if (ret) { 162 if (ret) {
163 pr_err("failed to init cpufreq table: %d\n", ret); 163 pr_err("failed to init cpufreq table: %d\n", ret);
164 goto out_put_node; 164 goto out_put_clk;
165 } 165 }
166 166
167 of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance); 167 of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance);
@@ -216,6 +216,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
216 216
217out_free_table: 217out_free_table:
218 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); 218 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
219out_put_clk:
220 if (!IS_ERR(cpu_clk))
221 clk_put(cpu_clk);
222out_put_reg:
223 if (!IS_ERR(cpu_reg))
224 regulator_put(cpu_reg);
219out_put_node: 225out_put_node:
220 of_node_put(np); 226 of_node_put(np);
221 return ret; 227 return ret;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index ba43991ba98a..e1c6433b16e0 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -366,6 +366,11 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
366 break; 366 break;
367 367
368 case CPUFREQ_GOV_LIMITS: 368 case CPUFREQ_GOV_LIMITS:
369 mutex_lock(&dbs_data->mutex);
370 if (!cpu_cdbs->cur_policy) {
371 mutex_unlock(&dbs_data->mutex);
372 break;
373 }
369 mutex_lock(&cpu_cdbs->timer_mutex); 374 mutex_lock(&cpu_cdbs->timer_mutex);
370 if (policy->max < cpu_cdbs->cur_policy->cur) 375 if (policy->max < cpu_cdbs->cur_policy->cur)
371 __cpufreq_driver_target(cpu_cdbs->cur_policy, 376 __cpufreq_driver_target(cpu_cdbs->cur_policy,
@@ -375,6 +380,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
375 policy->min, CPUFREQ_RELATION_L); 380 policy->min, CPUFREQ_RELATION_L);
376 dbs_check_cpu(dbs_data, cpu); 381 dbs_check_cpu(dbs_data, cpu);
377 mutex_unlock(&cpu_cdbs->timer_mutex); 382 mutex_unlock(&cpu_cdbs->timer_mutex);
383 mutex_unlock(&dbs_data->mutex);
378 break; 384 break;
379 } 385 }
380 return 0; 386 return 0;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index a886713937fd..d5d30ed863ce 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1009,6 +1009,7 @@ static void dmaengine_unmap(struct kref *kref)
1009 dma_unmap_page(dev, unmap->addr[i], unmap->len, 1009 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1010 DMA_BIDIRECTIONAL); 1010 DMA_BIDIRECTIONAL);
1011 } 1011 }
1012 cnt = unmap->map_cnt;
1012 mempool_free(unmap, __get_unmap_pool(cnt)->pool); 1013 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1013} 1014}
1014 1015
@@ -1074,6 +1075,7 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1074 memset(unmap, 0, sizeof(*unmap)); 1075 memset(unmap, 0, sizeof(*unmap));
1075 kref_init(&unmap->kref); 1076 kref_init(&unmap->kref);
1076 unmap->dev = dev; 1077 unmap->dev = dev;
1078 unmap->map_cnt = nr;
1077 1079
1078 return unmap; 1080 return unmap;
1079} 1081}
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index cfdbb92aae1d..7a740769c2fa 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1548,11 +1548,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1548 /* Disable BLOCK interrupts as well */ 1548 /* Disable BLOCK interrupts as well */
1549 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); 1549 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1550 1550
1551 err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt,
1552 IRQF_SHARED, "dw_dmac", dw);
1553 if (err)
1554 return err;
1555
1556 /* Create a pool of consistent memory blocks for hardware descriptors */ 1551 /* Create a pool of consistent memory blocks for hardware descriptors */
1557 dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, 1552 dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
1558 sizeof(struct dw_desc), 4, 0); 1553 sizeof(struct dw_desc), 4, 0);
@@ -1563,6 +1558,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1563 1558
1564 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); 1559 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1565 1560
1561 err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
1562 "dw_dmac", dw);
1563 if (err)
1564 return err;
1565
1566 INIT_LIST_HEAD(&dw->dma.channels); 1566 INIT_LIST_HEAD(&dw->dma.channels);
1567 for (i = 0; i < nr_channels; i++) { 1567 for (i = 0; i < nr_channels; i++) {
1568 struct dw_dma_chan *dwc = &dw->chan[i]; 1568 struct dw_dma_chan *dwc = &dw->chan[i];
@@ -1667,6 +1667,7 @@ int dw_dma_remove(struct dw_dma_chip *chip)
1667 dw_dma_off(dw); 1667 dw_dma_off(dw);
1668 dma_async_device_unregister(&dw->dma); 1668 dma_async_device_unregister(&dw->dma);
1669 1669
1670 free_irq(chip->irq, dw);
1670 tasklet_kill(&dw->tasklet); 1671 tasklet_kill(&dw->tasklet);
1671 1672
1672 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, 1673 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 766b68ed505c..394cbc5c93e3 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -191,12 +191,10 @@ static void mv_set_mode(struct mv_xor_chan *chan,
191 191
192static void mv_chan_activate(struct mv_xor_chan *chan) 192static void mv_chan_activate(struct mv_xor_chan *chan)
193{ 193{
194 u32 activation;
195
196 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); 194 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
197 activation = readl_relaxed(XOR_ACTIVATION(chan)); 195
198 activation |= 0x1; 196 /* writel ensures all descriptors are flushed before activation */
199 writel_relaxed(activation, XOR_ACTIVATION(chan)); 197 writel(BIT(0), XOR_ACTIVATION(chan));
200} 198}
201 199
202static char mv_chan_is_busy(struct mv_xor_chan *chan) 200static char mv_chan_is_busy(struct mv_xor_chan *chan)
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index ab26d46bbe15..5ebdfbc1051e 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -113,11 +113,9 @@ struct sa11x0_dma_phy {
113 struct sa11x0_dma_desc *txd_load; 113 struct sa11x0_dma_desc *txd_load;
114 unsigned sg_done; 114 unsigned sg_done;
115 struct sa11x0_dma_desc *txd_done; 115 struct sa11x0_dma_desc *txd_done;
116#ifdef CONFIG_PM_SLEEP
117 u32 dbs[2]; 116 u32 dbs[2];
118 u32 dbt[2]; 117 u32 dbt[2];
119 u32 dcsr; 118 u32 dcsr;
120#endif
121}; 119};
122 120
123struct sa11x0_dma_dev { 121struct sa11x0_dma_dev {
@@ -984,7 +982,6 @@ static int sa11x0_dma_remove(struct platform_device *pdev)
984 return 0; 982 return 0;
985} 983}
986 984
987#ifdef CONFIG_PM_SLEEP
988static int sa11x0_dma_suspend(struct device *dev) 985static int sa11x0_dma_suspend(struct device *dev)
989{ 986{
990 struct sa11x0_dma_dev *d = dev_get_drvdata(dev); 987 struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
@@ -1054,7 +1051,6 @@ static int sa11x0_dma_resume(struct device *dev)
1054 1051
1055 return 0; 1052 return 0;
1056} 1053}
1057#endif
1058 1054
1059static const struct dev_pm_ops sa11x0_dma_pm_ops = { 1055static const struct dev_pm_ops sa11x0_dma_pm_ops = {
1060 .suspend_noirq = sa11x0_dma_suspend, 1056 .suspend_noirq = sa11x0_dma_suspend,
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index c98764aeeec6..f477308b6e9c 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -237,8 +237,8 @@ static inline bool is_next_generation(int new_generation, int old_generation)
237 237
238#define LOCAL_BUS 0xffc0 238#define LOCAL_BUS 0xffc0
239 239
240/* arbitrarily chosen maximum range for physical DMA: 128 TB */ 240/* OHCI-1394's default upper bound for physical DMA: 4 GB */
241#define FW_MAX_PHYSICAL_RANGE (128ULL << 40) 241#define FW_MAX_PHYSICAL_RANGE (1ULL << 32)
242 242
243void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); 243void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
244void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); 244void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 8db663219560..586f2f7f6993 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -3716,7 +3716,7 @@ static int pci_probe(struct pci_dev *dev,
3716 version >> 16, version & 0xff, ohci->card.index, 3716 version >> 16, version & 0xff, ohci->card.index,
3717 ohci->n_ir, ohci->n_it, ohci->quirks, 3717 ohci->n_ir, ohci->n_it, ohci->quirks,
3718 reg_read(ohci, OHCI1394_PhyUpperBound) ? 3718 reg_read(ohci, OHCI1394_PhyUpperBound) ?
3719 ", >4 GB phys DMA" : ""); 3719 ", physUB" : "");
3720 3720
3721 return 0; 3721 return 0;
3722 3722
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 96177eec0a0e..eedb023af27d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1833,7 +1833,6 @@ int i915_driver_unload(struct drm_device *dev)
1833 flush_workqueue(dev_priv->wq); 1833 flush_workqueue(dev_priv->wq);
1834 1834
1835 mutex_lock(&dev->struct_mutex); 1835 mutex_lock(&dev->struct_mutex);
1836 i915_gem_free_all_phys_object(dev);
1837 i915_gem_cleanup_ringbuffer(dev); 1836 i915_gem_cleanup_ringbuffer(dev);
1838 i915_gem_context_fini(dev); 1837 i915_gem_context_fini(dev);
1839 WARN_ON(dev_priv->mm.aliasing_ppgtt); 1838 WARN_ON(dev_priv->mm.aliasing_ppgtt);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 108e1ec2fa4b..388c028e223c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -242,18 +242,6 @@ struct intel_ddi_plls {
242#define WATCH_LISTS 0 242#define WATCH_LISTS 0
243#define WATCH_GTT 0 243#define WATCH_GTT 0
244 244
245#define I915_GEM_PHYS_CURSOR_0 1
246#define I915_GEM_PHYS_CURSOR_1 2
247#define I915_GEM_PHYS_OVERLAY_REGS 3
248#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
249
250struct drm_i915_gem_phys_object {
251 int id;
252 struct page **page_list;
253 drm_dma_handle_t *handle;
254 struct drm_i915_gem_object *cur_obj;
255};
256
257struct opregion_header; 245struct opregion_header;
258struct opregion_acpi; 246struct opregion_acpi;
259struct opregion_swsci; 247struct opregion_swsci;
@@ -1187,9 +1175,6 @@ struct i915_gem_mm {
1187 /** Bit 6 swizzling required for Y tiling */ 1175 /** Bit 6 swizzling required for Y tiling */
1188 uint32_t bit_6_swizzle_y; 1176 uint32_t bit_6_swizzle_y;
1189 1177
1190 /* storage for physical objects */
1191 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
1192
1193 /* accounting, useful for userland debugging */ 1178 /* accounting, useful for userland debugging */
1194 spinlock_t object_stat_lock; 1179 spinlock_t object_stat_lock;
1195 size_t object_memory; 1180 size_t object_memory;
@@ -1769,7 +1754,7 @@ struct drm_i915_gem_object {
1769 struct drm_file *pin_filp; 1754 struct drm_file *pin_filp;
1770 1755
1771 /** for phy allocated objects */ 1756 /** for phy allocated objects */
1772 struct drm_i915_gem_phys_object *phys_obj; 1757 drm_dma_handle_t *phys_handle;
1773}; 1758};
1774 1759
1775#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 1760#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
@@ -2204,10 +2189,12 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
2204#define PIN_MAPPABLE 0x1 2189#define PIN_MAPPABLE 0x1
2205#define PIN_NONBLOCK 0x2 2190#define PIN_NONBLOCK 0x2
2206#define PIN_GLOBAL 0x4 2191#define PIN_GLOBAL 0x4
2192#define PIN_OFFSET_BIAS 0x8
2193#define PIN_OFFSET_MASK (~4095)
2207int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 2194int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
2208 struct i915_address_space *vm, 2195 struct i915_address_space *vm,
2209 uint32_t alignment, 2196 uint32_t alignment,
2210 unsigned flags); 2197 uint64_t flags);
2211int __must_check i915_vma_unbind(struct i915_vma *vma); 2198int __must_check i915_vma_unbind(struct i915_vma *vma);
2212int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2199int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
2213void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); 2200void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
@@ -2334,13 +2321,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2334 u32 alignment, 2321 u32 alignment,
2335 struct intel_ring_buffer *pipelined); 2322 struct intel_ring_buffer *pipelined);
2336void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); 2323void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
2337int i915_gem_attach_phys_object(struct drm_device *dev, 2324int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
2338 struct drm_i915_gem_object *obj,
2339 int id,
2340 int align); 2325 int align);
2341void i915_gem_detach_phys_object(struct drm_device *dev,
2342 struct drm_i915_gem_object *obj);
2343void i915_gem_free_all_phys_object(struct drm_device *dev);
2344int i915_gem_open(struct drm_device *dev, struct drm_file *file); 2326int i915_gem_open(struct drm_device *dev, struct drm_file *file);
2345void i915_gem_release(struct drm_device *dev, struct drm_file *file); 2327void i915_gem_release(struct drm_device *dev, struct drm_file *file);
2346 2328
@@ -2465,6 +2447,8 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
2465 int min_size, 2447 int min_size,
2466 unsigned alignment, 2448 unsigned alignment,
2467 unsigned cache_level, 2449 unsigned cache_level,
2450 unsigned long start,
2451 unsigned long end,
2468 unsigned flags); 2452 unsigned flags);
2469int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 2453int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
2470int i915_gem_evict_everything(struct drm_device *dev); 2454int i915_gem_evict_everything(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2871ce75f438..3326770c9ed2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,10 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
43static __must_check int 43static __must_check int
44i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 44i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45 bool readonly); 45 bool readonly);
46static int i915_gem_phys_pwrite(struct drm_device *dev,
47 struct drm_i915_gem_object *obj,
48 struct drm_i915_gem_pwrite *args,
49 struct drm_file *file);
50 46
51static void i915_gem_write_fence(struct drm_device *dev, int reg, 47static void i915_gem_write_fence(struct drm_device *dev, int reg,
52 struct drm_i915_gem_object *obj); 48 struct drm_i915_gem_object *obj);
@@ -209,6 +205,128 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
209 return 0; 205 return 0;
210} 206}
211 207
208static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
209{
210 drm_dma_handle_t *phys = obj->phys_handle;
211
212 if (!phys)
213 return;
214
215 if (obj->madv == I915_MADV_WILLNEED) {
216 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
217 char *vaddr = phys->vaddr;
218 int i;
219
220 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
221 struct page *page = shmem_read_mapping_page(mapping, i);
222 if (!IS_ERR(page)) {
223 char *dst = kmap_atomic(page);
224 memcpy(dst, vaddr, PAGE_SIZE);
225 drm_clflush_virt_range(dst, PAGE_SIZE);
226 kunmap_atomic(dst);
227
228 set_page_dirty(page);
229 mark_page_accessed(page);
230 page_cache_release(page);
231 }
232 vaddr += PAGE_SIZE;
233 }
234 i915_gem_chipset_flush(obj->base.dev);
235 }
236
237#ifdef CONFIG_X86
238 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
239#endif
240 drm_pci_free(obj->base.dev, phys);
241 obj->phys_handle = NULL;
242}
243
244int
245i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
246 int align)
247{
248 drm_dma_handle_t *phys;
249 struct address_space *mapping;
250 char *vaddr;
251 int i;
252
253 if (obj->phys_handle) {
254 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
255 return -EBUSY;
256
257 return 0;
258 }
259
260 if (obj->madv != I915_MADV_WILLNEED)
261 return -EFAULT;
262
263 if (obj->base.filp == NULL)
264 return -EINVAL;
265
266 /* create a new object */
267 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
268 if (!phys)
269 return -ENOMEM;
270
271 vaddr = phys->vaddr;
272#ifdef CONFIG_X86
273 set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
274#endif
275 mapping = file_inode(obj->base.filp)->i_mapping;
276 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
277 struct page *page;
278 char *src;
279
280 page = shmem_read_mapping_page(mapping, i);
281 if (IS_ERR(page)) {
282#ifdef CONFIG_X86
283 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
284#endif
285 drm_pci_free(obj->base.dev, phys);
286 return PTR_ERR(page);
287 }
288
289 src = kmap_atomic(page);
290 memcpy(vaddr, src, PAGE_SIZE);
291 kunmap_atomic(src);
292
293 mark_page_accessed(page);
294 page_cache_release(page);
295
296 vaddr += PAGE_SIZE;
297 }
298
299 obj->phys_handle = phys;
300 return 0;
301}
302
303static int
304i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
305 struct drm_i915_gem_pwrite *args,
306 struct drm_file *file_priv)
307{
308 struct drm_device *dev = obj->base.dev;
309 void *vaddr = obj->phys_handle->vaddr + args->offset;
310 char __user *user_data = to_user_ptr(args->data_ptr);
311
312 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
313 unsigned long unwritten;
314
315 /* The physical object once assigned is fixed for the lifetime
316 * of the obj, so we can safely drop the lock and continue
317 * to access vaddr.
318 */
319 mutex_unlock(&dev->struct_mutex);
320 unwritten = copy_from_user(vaddr, user_data, args->size);
321 mutex_lock(&dev->struct_mutex);
322 if (unwritten)
323 return -EFAULT;
324 }
325
326 i915_gem_chipset_flush(dev);
327 return 0;
328}
329
212void *i915_gem_object_alloc(struct drm_device *dev) 330void *i915_gem_object_alloc(struct drm_device *dev)
213{ 331{
214 struct drm_i915_private *dev_priv = dev->dev_private; 332 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -921,8 +1039,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
921 * pread/pwrite currently are reading and writing from the CPU 1039 * pread/pwrite currently are reading and writing from the CPU
922 * perspective, requiring manual detiling by the client. 1040 * perspective, requiring manual detiling by the client.
923 */ 1041 */
924 if (obj->phys_obj) { 1042 if (obj->phys_handle) {
925 ret = i915_gem_phys_pwrite(dev, obj, args, file); 1043 ret = i915_gem_phys_pwrite(obj, args, file);
926 goto out; 1044 goto out;
927 } 1045 }
928 1046
@@ -3208,12 +3326,14 @@ static struct i915_vma *
3208i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, 3326i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3209 struct i915_address_space *vm, 3327 struct i915_address_space *vm,
3210 unsigned alignment, 3328 unsigned alignment,
3211 unsigned flags) 3329 uint64_t flags)
3212{ 3330{
3213 struct drm_device *dev = obj->base.dev; 3331 struct drm_device *dev = obj->base.dev;
3214 struct drm_i915_private *dev_priv = dev->dev_private; 3332 struct drm_i915_private *dev_priv = dev->dev_private;
3215 u32 size, fence_size, fence_alignment, unfenced_alignment; 3333 u32 size, fence_size, fence_alignment, unfenced_alignment;
3216 size_t gtt_max = 3334 unsigned long start =
3335 flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3336 unsigned long end =
3217 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; 3337 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3218 struct i915_vma *vma; 3338 struct i915_vma *vma;
3219 int ret; 3339 int ret;
@@ -3242,11 +3362,11 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3242 /* If the object is bigger than the entire aperture, reject it early 3362 /* If the object is bigger than the entire aperture, reject it early
3243 * before evicting everything in a vain attempt to find space. 3363 * before evicting everything in a vain attempt to find space.
3244 */ 3364 */
3245 if (obj->base.size > gtt_max) { 3365 if (obj->base.size > end) {
3246 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", 3366 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
3247 obj->base.size, 3367 obj->base.size,
3248 flags & PIN_MAPPABLE ? "mappable" : "total", 3368 flags & PIN_MAPPABLE ? "mappable" : "total",
3249 gtt_max); 3369 end);
3250 return ERR_PTR(-E2BIG); 3370 return ERR_PTR(-E2BIG);
3251 } 3371 }
3252 3372
@@ -3263,12 +3383,15 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3263search_free: 3383search_free:
3264 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, 3384 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3265 size, alignment, 3385 size, alignment,
3266 obj->cache_level, 0, gtt_max, 3386 obj->cache_level,
3387 start, end,
3267 DRM_MM_SEARCH_DEFAULT, 3388 DRM_MM_SEARCH_DEFAULT,
3268 DRM_MM_CREATE_DEFAULT); 3389 DRM_MM_CREATE_DEFAULT);
3269 if (ret) { 3390 if (ret) {
3270 ret = i915_gem_evict_something(dev, vm, size, alignment, 3391 ret = i915_gem_evict_something(dev, vm, size, alignment,
3271 obj->cache_level, flags); 3392 obj->cache_level,
3393 start, end,
3394 flags);
3272 if (ret == 0) 3395 if (ret == 0)
3273 goto search_free; 3396 goto search_free;
3274 3397
@@ -3828,11 +3951,30 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3828 return ret; 3951 return ret;
3829} 3952}
3830 3953
3954static bool
3955i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
3956{
3957 struct drm_i915_gem_object *obj = vma->obj;
3958
3959 if (alignment &&
3960 vma->node.start & (alignment - 1))
3961 return true;
3962
3963 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
3964 return true;
3965
3966 if (flags & PIN_OFFSET_BIAS &&
3967 vma->node.start < (flags & PIN_OFFSET_MASK))
3968 return true;
3969
3970 return false;
3971}
3972
3831int 3973int
3832i915_gem_object_pin(struct drm_i915_gem_object *obj, 3974i915_gem_object_pin(struct drm_i915_gem_object *obj,
3833 struct i915_address_space *vm, 3975 struct i915_address_space *vm,
3834 uint32_t alignment, 3976 uint32_t alignment,
3835 unsigned flags) 3977 uint64_t flags)
3836{ 3978{
3837 struct i915_vma *vma; 3979 struct i915_vma *vma;
3838 int ret; 3980 int ret;
@@ -3845,15 +3987,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3845 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) 3987 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3846 return -EBUSY; 3988 return -EBUSY;
3847 3989
3848 if ((alignment && 3990 if (i915_vma_misplaced(vma, alignment, flags)) {
3849 vma->node.start & (alignment - 1)) ||
3850 (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
3851 WARN(vma->pin_count, 3991 WARN(vma->pin_count,
3852 "bo is already pinned with incorrect alignment:" 3992 "bo is already pinned with incorrect alignment:"
3853 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," 3993 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3854 " obj->map_and_fenceable=%d\n", 3994 " obj->map_and_fenceable=%d\n",
3855 i915_gem_obj_offset(obj, vm), alignment, 3995 i915_gem_obj_offset(obj, vm), alignment,
3856 flags & PIN_MAPPABLE, 3996 !!(flags & PIN_MAPPABLE),
3857 obj->map_and_fenceable); 3997 obj->map_and_fenceable);
3858 ret = i915_vma_unbind(vma); 3998 ret = i915_vma_unbind(vma);
3859 if (ret) 3999 if (ret)
@@ -4163,9 +4303,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4163 4303
4164 trace_i915_gem_object_destroy(obj); 4304 trace_i915_gem_object_destroy(obj);
4165 4305
4166 if (obj->phys_obj)
4167 i915_gem_detach_phys_object(dev, obj);
4168
4169 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { 4306 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4170 int ret; 4307 int ret;
4171 4308
@@ -4183,6 +4320,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4183 } 4320 }
4184 } 4321 }
4185 4322
4323 i915_gem_object_detach_phys(obj);
4324
4186 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up 4325 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4187 * before progressing. */ 4326 * before progressing. */
4188 if (obj->stolen) 4327 if (obj->stolen)
@@ -4646,190 +4785,6 @@ i915_gem_load(struct drm_device *dev)
4646 register_shrinker(&dev_priv->mm.inactive_shrinker); 4785 register_shrinker(&dev_priv->mm.inactive_shrinker);
4647} 4786}
4648 4787
4649/*
4650 * Create a physically contiguous memory object for this object
4651 * e.g. for cursor + overlay regs
4652 */
4653static int i915_gem_init_phys_object(struct drm_device *dev,
4654 int id, int size, int align)
4655{
4656 struct drm_i915_private *dev_priv = dev->dev_private;
4657 struct drm_i915_gem_phys_object *phys_obj;
4658 int ret;
4659
4660 if (dev_priv->mm.phys_objs[id - 1] || !size)
4661 return 0;
4662
4663 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
4664 if (!phys_obj)
4665 return -ENOMEM;
4666
4667 phys_obj->id = id;
4668
4669 phys_obj->handle = drm_pci_alloc(dev, size, align);
4670 if (!phys_obj->handle) {
4671 ret = -ENOMEM;
4672 goto kfree_obj;
4673 }
4674#ifdef CONFIG_X86
4675 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4676#endif
4677
4678 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4679
4680 return 0;
4681kfree_obj:
4682 kfree(phys_obj);
4683 return ret;
4684}
4685
4686static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4687{
4688 struct drm_i915_private *dev_priv = dev->dev_private;
4689 struct drm_i915_gem_phys_object *phys_obj;
4690
4691 if (!dev_priv->mm.phys_objs[id - 1])
4692 return;
4693
4694 phys_obj = dev_priv->mm.phys_objs[id - 1];
4695 if (phys_obj->cur_obj) {
4696 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4697 }
4698
4699#ifdef CONFIG_X86
4700 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4701#endif
4702 drm_pci_free(dev, phys_obj->handle);
4703 kfree(phys_obj);
4704 dev_priv->mm.phys_objs[id - 1] = NULL;
4705}
4706
4707void i915_gem_free_all_phys_object(struct drm_device *dev)
4708{
4709 int i;
4710
4711 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4712 i915_gem_free_phys_object(dev, i);
4713}
4714
4715void i915_gem_detach_phys_object(struct drm_device *dev,
4716 struct drm_i915_gem_object *obj)
4717{
4718 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4719 char *vaddr;
4720 int i;
4721 int page_count;
4722
4723 if (!obj->phys_obj)
4724 return;
4725 vaddr = obj->phys_obj->handle->vaddr;
4726
4727 page_count = obj->base.size / PAGE_SIZE;
4728 for (i = 0; i < page_count; i++) {
4729 struct page *page = shmem_read_mapping_page(mapping, i);
4730 if (!IS_ERR(page)) {
4731 char *dst = kmap_atomic(page);
4732 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4733 kunmap_atomic(dst);
4734
4735 drm_clflush_pages(&page, 1);
4736
4737 set_page_dirty(page);
4738 mark_page_accessed(page);
4739 page_cache_release(page);
4740 }
4741 }
4742 i915_gem_chipset_flush(dev);
4743
4744 obj->phys_obj->cur_obj = NULL;
4745 obj->phys_obj = NULL;
4746}
4747
4748int
4749i915_gem_attach_phys_object(struct drm_device *dev,
4750 struct drm_i915_gem_object *obj,
4751 int id,
4752 int align)
4753{
4754 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4755 struct drm_i915_private *dev_priv = dev->dev_private;
4756 int ret = 0;
4757 int page_count;
4758 int i;
4759
4760 if (id > I915_MAX_PHYS_OBJECT)
4761 return -EINVAL;
4762
4763 if (obj->phys_obj) {
4764 if (obj->phys_obj->id == id)
4765 return 0;
4766 i915_gem_detach_phys_object(dev, obj);
4767 }
4768
4769 /* create a new object */
4770 if (!dev_priv->mm.phys_objs[id - 1]) {
4771 ret = i915_gem_init_phys_object(dev, id,
4772 obj->base.size, align);
4773 if (ret) {
4774 DRM_ERROR("failed to init phys object %d size: %zu\n",
4775 id, obj->base.size);
4776 return ret;
4777 }
4778 }
4779
4780 /* bind to the object */
4781 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4782 obj->phys_obj->cur_obj = obj;
4783
4784 page_count = obj->base.size / PAGE_SIZE;
4785
4786 for (i = 0; i < page_count; i++) {
4787 struct page *page;
4788 char *dst, *src;
4789
4790 page = shmem_read_mapping_page(mapping, i);
4791 if (IS_ERR(page))
4792 return PTR_ERR(page);
4793
4794 src = kmap_atomic(page);
4795 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4796 memcpy(dst, src, PAGE_SIZE);
4797 kunmap_atomic(src);
4798
4799 mark_page_accessed(page);
4800 page_cache_release(page);
4801 }
4802
4803 return 0;
4804}
4805
4806static int
4807i915_gem_phys_pwrite(struct drm_device *dev,
4808 struct drm_i915_gem_object *obj,
4809 struct drm_i915_gem_pwrite *args,
4810 struct drm_file *file_priv)
4811{
4812 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4813 char __user *user_data = to_user_ptr(args->data_ptr);
4814
4815 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4816 unsigned long unwritten;
4817
4818 /* The physical object once assigned is fixed for the lifetime
4819 * of the obj, so we can safely drop the lock and continue
4820 * to access vaddr.
4821 */
4822 mutex_unlock(&dev->struct_mutex);
4823 unwritten = copy_from_user(vaddr, user_data, args->size);
4824 mutex_lock(&dev->struct_mutex);
4825 if (unwritten)
4826 return -EFAULT;
4827 }
4828
4829 i915_gem_chipset_flush(dev);
4830 return 0;
4831}
4832
4833void i915_gem_release(struct drm_device *dev, struct drm_file *file) 4788void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4834{ 4789{
4835 struct drm_i915_file_private *file_priv = file->driver_priv; 4790 struct drm_i915_file_private *file_priv = file->driver_priv;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 75fca63dc8c1..bbf4b12d842e 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -68,9 +68,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
68int 68int
69i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, 69i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
70 int min_size, unsigned alignment, unsigned cache_level, 70 int min_size, unsigned alignment, unsigned cache_level,
71 unsigned long start, unsigned long end,
71 unsigned flags) 72 unsigned flags)
72{ 73{
73 struct drm_i915_private *dev_priv = dev->dev_private;
74 struct list_head eviction_list, unwind_list; 74 struct list_head eviction_list, unwind_list;
75 struct i915_vma *vma; 75 struct i915_vma *vma;
76 int ret = 0; 76 int ret = 0;
@@ -102,11 +102,10 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
102 */ 102 */
103 103
104 INIT_LIST_HEAD(&unwind_list); 104 INIT_LIST_HEAD(&unwind_list);
105 if (flags & PIN_MAPPABLE) { 105 if (start != 0 || end != vm->total) {
106 BUG_ON(!i915_is_ggtt(vm));
107 drm_mm_init_scan_with_range(&vm->mm, min_size, 106 drm_mm_init_scan_with_range(&vm->mm, min_size,
108 alignment, cache_level, 0, 107 alignment, cache_level,
109 dev_priv->gtt.mappable_end); 108 start, end);
110 } else 109 } else
111 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); 110 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
112 111
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 2c9d9cbaf653..20fef6c50267 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -35,6 +35,9 @@
35 35
36#define __EXEC_OBJECT_HAS_PIN (1<<31) 36#define __EXEC_OBJECT_HAS_PIN (1<<31)
37#define __EXEC_OBJECT_HAS_FENCE (1<<30) 37#define __EXEC_OBJECT_HAS_FENCE (1<<30)
38#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
39
40#define BATCH_OFFSET_BIAS (256*1024)
38 41
39struct eb_vmas { 42struct eb_vmas {
40 struct list_head vmas; 43 struct list_head vmas;
@@ -545,7 +548,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
545 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; 548 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
546 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 549 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
547 bool need_fence; 550 bool need_fence;
548 unsigned flags; 551 uint64_t flags;
549 int ret; 552 int ret;
550 553
551 flags = 0; 554 flags = 0;
@@ -559,6 +562,8 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
559 562
560 if (entry->flags & EXEC_OBJECT_NEEDS_GTT) 563 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
561 flags |= PIN_GLOBAL; 564 flags |= PIN_GLOBAL;
565 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
566 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
562 567
563 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); 568 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
564 if (ret) 569 if (ret)
@@ -592,6 +597,36 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
592 return 0; 597 return 0;
593} 598}
594 599
600static bool
601eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
602{
603 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
604 struct drm_i915_gem_object *obj = vma->obj;
605 bool need_fence, need_mappable;
606
607 need_fence =
608 has_fenced_gpu_access &&
609 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
610 obj->tiling_mode != I915_TILING_NONE;
611 need_mappable = need_fence || need_reloc_mappable(vma);
612
613 WARN_ON((need_mappable || need_fence) &&
614 !i915_is_ggtt(vma->vm));
615
616 if (entry->alignment &&
617 vma->node.start & (entry->alignment - 1))
618 return true;
619
620 if (need_mappable && !obj->map_and_fenceable)
621 return true;
622
623 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
624 vma->node.start < BATCH_OFFSET_BIAS)
625 return true;
626
627 return false;
628}
629
595static int 630static int
596i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 631i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
597 struct list_head *vmas, 632 struct list_head *vmas,
@@ -653,26 +688,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
653 688
654 /* Unbind any ill-fitting objects or pin. */ 689 /* Unbind any ill-fitting objects or pin. */
655 list_for_each_entry(vma, vmas, exec_list) { 690 list_for_each_entry(vma, vmas, exec_list) {
656 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
657 bool need_fence, need_mappable;
658
659 obj = vma->obj;
660
661 if (!drm_mm_node_allocated(&vma->node)) 691 if (!drm_mm_node_allocated(&vma->node))
662 continue; 692 continue;
663 693
664 need_fence = 694 if (eb_vma_misplaced(vma, has_fenced_gpu_access))
665 has_fenced_gpu_access &&
666 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
667 obj->tiling_mode != I915_TILING_NONE;
668 need_mappable = need_fence || need_reloc_mappable(vma);
669
670 WARN_ON((need_mappable || need_fence) &&
671 !i915_is_ggtt(vma->vm));
672
673 if ((entry->alignment &&
674 vma->node.start & (entry->alignment - 1)) ||
675 (need_mappable && !obj->map_and_fenceable))
676 ret = i915_vma_unbind(vma); 695 ret = i915_vma_unbind(vma);
677 else 696 else
678 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); 697 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
@@ -773,9 +792,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
773 * relocations were valid. 792 * relocations were valid.
774 */ 793 */
775 for (j = 0; j < exec[i].relocation_count; j++) { 794 for (j = 0; j < exec[i].relocation_count; j++) {
776 if (copy_to_user(&user_relocs[j].presumed_offset, 795 if (__copy_to_user(&user_relocs[j].presumed_offset,
777 &invalid_offset, 796 &invalid_offset,
778 sizeof(invalid_offset))) { 797 sizeof(invalid_offset))) {
779 ret = -EFAULT; 798 ret = -EFAULT;
780 mutex_lock(&dev->struct_mutex); 799 mutex_lock(&dev->struct_mutex);
781 goto err; 800 goto err;
@@ -999,6 +1018,25 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
999 return 0; 1018 return 0;
1000} 1019}
1001 1020
1021static struct drm_i915_gem_object *
1022eb_get_batch(struct eb_vmas *eb)
1023{
1024 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
1025
1026 /*
1027 * SNA is doing fancy tricks with compressing batch buffers, which leads
1028 * to negative relocation deltas. Usually that works out ok since the
1029 * relocate address is still positive, except when the batch is placed
1030 * very low in the GTT. Ensure this doesn't happen.
1031 *
1032 * Note that actual hangs have only been observed on gen7, but for
1033 * paranoia do it everywhere.
1034 */
1035 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
1036
1037 return vma->obj;
1038}
1039
1002static int 1040static int
1003i915_gem_do_execbuffer(struct drm_device *dev, void *data, 1041i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1004 struct drm_file *file, 1042 struct drm_file *file,
@@ -1153,7 +1191,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1153 goto err; 1191 goto err;
1154 1192
1155 /* take note of the batch buffer before we might reorder the lists */ 1193 /* take note of the batch buffer before we might reorder the lists */
1156 batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj; 1194 batch_obj = eb_get_batch(eb);
1157 1195
1158 /* Move the objects en-masse into the GTT, evicting if necessary. */ 1196 /* Move the objects en-masse into the GTT, evicting if necessary. */
1159 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 1197 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
@@ -1355,18 +1393,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1355 1393
1356 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); 1394 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1357 if (!ret) { 1395 if (!ret) {
1396 struct drm_i915_gem_exec_object __user *user_exec_list =
1397 to_user_ptr(args->buffers_ptr);
1398
1358 /* Copy the new buffer offsets back to the user's exec list. */ 1399 /* Copy the new buffer offsets back to the user's exec list. */
1359 for (i = 0; i < args->buffer_count; i++) 1400 for (i = 0; i < args->buffer_count; i++) {
1360 exec_list[i].offset = exec2_list[i].offset; 1401 ret = __copy_to_user(&user_exec_list[i].offset,
1361 /* ... and back out to userspace */ 1402 &exec2_list[i].offset,
1362 ret = copy_to_user(to_user_ptr(args->buffers_ptr), 1403 sizeof(user_exec_list[i].offset));
1363 exec_list, 1404 if (ret) {
1364 sizeof(*exec_list) * args->buffer_count); 1405 ret = -EFAULT;
1365 if (ret) { 1406 DRM_DEBUG("failed to copy %d exec entries "
1366 ret = -EFAULT; 1407 "back to user (%d)\n",
1367 DRM_DEBUG("failed to copy %d exec entries " 1408 args->buffer_count, ret);
1368 "back to user (%d)\n", 1409 break;
1369 args->buffer_count, ret); 1410 }
1370 } 1411 }
1371 } 1412 }
1372 1413
@@ -1412,14 +1453,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1412 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); 1453 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1413 if (!ret) { 1454 if (!ret) {
1414 /* Copy the new buffer offsets back to the user's exec list. */ 1455 /* Copy the new buffer offsets back to the user's exec list. */
1415 ret = copy_to_user(to_user_ptr(args->buffers_ptr), 1456 struct drm_i915_gem_exec_object2 *user_exec_list =
1416 exec2_list, 1457 to_user_ptr(args->buffers_ptr);
1417 sizeof(*exec2_list) * args->buffer_count); 1458 int i;
1418 if (ret) { 1459
1419 ret = -EFAULT; 1460 for (i = 0; i < args->buffer_count; i++) {
1420 DRM_DEBUG("failed to copy %d exec entries " 1461 ret = __copy_to_user(&user_exec_list[i].offset,
1421 "back to user (%d)\n", 1462 &exec2_list[i].offset,
1422 args->buffer_count, ret); 1463 sizeof(user_exec_list[i].offset));
1464 if (ret) {
1465 ret = -EFAULT;
1466 DRM_DEBUG("failed to copy %d exec entries "
1467 "back to user\n",
1468 args->buffer_count);
1469 break;
1470 }
1423 } 1471 }
1424 } 1472 }
1425 1473
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 154b0f8bb88d..5deb22864c52 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1089,7 +1089,9 @@ alloc:
1089 if (ret == -ENOSPC && !retried) { 1089 if (ret == -ENOSPC && !retried) {
1090 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, 1090 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
1091 GEN6_PD_SIZE, GEN6_PD_ALIGN, 1091 GEN6_PD_SIZE, GEN6_PD_ALIGN,
1092 I915_CACHE_NONE, 0); 1092 I915_CACHE_NONE,
1093 0, dev_priv->gtt.base.total,
1094 0);
1093 if (ret) 1095 if (ret)
1094 return ret; 1096 return ret;
1095 1097
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 48aa516a1ac0..5b60e25baa32 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7825,14 +7825,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
7825 addr = i915_gem_obj_ggtt_offset(obj); 7825 addr = i915_gem_obj_ggtt_offset(obj);
7826 } else { 7826 } else {
7827 int align = IS_I830(dev) ? 16 * 1024 : 256; 7827 int align = IS_I830(dev) ? 16 * 1024 : 256;
7828 ret = i915_gem_attach_phys_object(dev, obj, 7828 ret = i915_gem_object_attach_phys(obj, align);
7829 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
7830 align);
7831 if (ret) { 7829 if (ret) {
7832 DRM_DEBUG_KMS("failed to attach phys object\n"); 7830 DRM_DEBUG_KMS("failed to attach phys object\n");
7833 goto fail_locked; 7831 goto fail_locked;
7834 } 7832 }
7835 addr = obj->phys_obj->handle->busaddr; 7833 addr = obj->phys_handle->busaddr;
7836 } 7834 }
7837 7835
7838 if (IS_GEN2(dev)) 7836 if (IS_GEN2(dev))
@@ -7840,10 +7838,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
7840 7838
7841 finish: 7839 finish:
7842 if (intel_crtc->cursor_bo) { 7840 if (intel_crtc->cursor_bo) {
7843 if (INTEL_INFO(dev)->cursor_needs_physical) { 7841 if (!INTEL_INFO(dev)->cursor_needs_physical)
7844 if (intel_crtc->cursor_bo != obj)
7845 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
7846 } else
7847 i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); 7842 i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
7848 drm_gem_object_unreference(&intel_crtc->cursor_bo->base); 7843 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
7849 } 7844 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d8adc9104dca..129db0c7d835 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -193,7 +193,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
193 struct overlay_registers __iomem *regs; 193 struct overlay_registers __iomem *regs;
194 194
195 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 195 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
196 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; 196 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
197 else 197 else
198 regs = io_mapping_map_wc(dev_priv->gtt.mappable, 198 regs = io_mapping_map_wc(dev_priv->gtt.mappable,
199 i915_gem_obj_ggtt_offset(overlay->reg_bo)); 199 i915_gem_obj_ggtt_offset(overlay->reg_bo));
@@ -1340,14 +1340,12 @@ void intel_setup_overlay(struct drm_device *dev)
1340 overlay->reg_bo = reg_bo; 1340 overlay->reg_bo = reg_bo;
1341 1341
1342 if (OVERLAY_NEEDS_PHYSICAL(dev)) { 1342 if (OVERLAY_NEEDS_PHYSICAL(dev)) {
1343 ret = i915_gem_attach_phys_object(dev, reg_bo, 1343 ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
1344 I915_GEM_PHYS_OVERLAY_REGS,
1345 PAGE_SIZE);
1346 if (ret) { 1344 if (ret) {
1347 DRM_ERROR("failed to attach phys overlay regs\n"); 1345 DRM_ERROR("failed to attach phys overlay regs\n");
1348 goto out_free_bo; 1346 goto out_free_bo;
1349 } 1347 }
1350 overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; 1348 overlay->flip_addr = reg_bo->phys_handle->busaddr;
1351 } else { 1349 } else {
1352 ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE); 1350 ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
1353 if (ret) { 1351 if (ret) {
@@ -1428,7 +1426,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1428 /* Cast to make sparse happy, but it's wc memory anyway, so 1426 /* Cast to make sparse happy, but it's wc memory anyway, so
1429 * equivalent to the wc io mapping on X86. */ 1427 * equivalent to the wc io mapping on X86. */
1430 regs = (struct overlay_registers __iomem *) 1428 regs = (struct overlay_registers __iomem *)
1431 overlay->reg_bo->phys_obj->handle->vaddr; 1429 overlay->reg_bo->phys_handle->vaddr;
1432 else 1430 else
1433 regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, 1431 regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1434 i915_gem_obj_ggtt_offset(overlay->reg_bo)); 1432 i915_gem_obj_ggtt_offset(overlay->reg_bo));
@@ -1462,7 +1460,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
1462 error->dovsta = I915_READ(DOVSTA); 1460 error->dovsta = I915_READ(DOVSTA);
1463 error->isr = I915_READ(ISR); 1461 error->isr = I915_READ(ISR);
1464 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1462 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1465 error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; 1463 error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
1466 else 1464 else
1467 error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo); 1465 error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
1468 1466
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 2b6e0ebcc13a..41ecf8a60611 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -152,6 +152,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
152 uint32_t domain = r->write_domain ? 152 uint32_t domain = r->write_domain ?
153 r->write_domain : r->read_domains; 153 r->write_domain : r->read_domains;
154 154
155 if (domain & RADEON_GEM_DOMAIN_CPU) {
156 DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
157 "for command submission\n");
158 return -EINVAL;
159 }
160
155 p->relocs[i].domain = domain; 161 p->relocs[i].domain = domain;
156 if (domain == RADEON_GEM_DOMAIN_VRAM) 162 if (domain == RADEON_GEM_DOMAIN_VRAM)
157 domain |= RADEON_GEM_DOMAIN_GTT; 163 domain |= RADEON_GEM_DOMAIN_GTT;
@@ -342,10 +348,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
342 return -EINVAL; 348 return -EINVAL;
343 349
344 /* we only support VM on some SI+ rings */ 350 /* we only support VM on some SI+ rings */
345 if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) && 351 if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
346 ((p->cs_flags & RADEON_CS_USE_VM) == 0)) { 352 if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
347 DRM_ERROR("Ring %d requires VM!\n", p->ring); 353 DRM_ERROR("Ring %d requires VM!\n", p->ring);
348 return -EINVAL; 354 return -EINVAL;
355 }
356 } else {
357 if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
358 DRM_ERROR("VM not supported on ring %d!\n",
359 p->ring);
360 return -EINVAL;
361 }
349 } 362 }
350 } 363 }
351 364
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0e770bbf7e29..14671406212f 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1533,11 +1533,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1533 1533
1534 radeon_restore_bios_scratch_regs(rdev); 1534 radeon_restore_bios_scratch_regs(rdev);
1535 1535
1536 if (fbcon) {
1537 radeon_fbdev_set_suspend(rdev, 0);
1538 console_unlock();
1539 }
1540
1541 /* init dig PHYs, disp eng pll */ 1536 /* init dig PHYs, disp eng pll */
1542 if (rdev->is_atom_bios) { 1537 if (rdev->is_atom_bios) {
1543 radeon_atom_encoder_init(rdev); 1538 radeon_atom_encoder_init(rdev);
@@ -1562,6 +1557,12 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1562 } 1557 }
1563 1558
1564 drm_kms_helper_poll_enable(dev); 1559 drm_kms_helper_poll_enable(dev);
1560
1561 if (fbcon) {
1562 radeon_fbdev_set_suspend(rdev, 0);
1563 console_unlock();
1564 }
1565
1565 return 0; 1566 return 0;
1566} 1567}
1567 1568
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index f00dbbf4d806..356b733caafe 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -862,7 +862,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
862 unsigned *fb_div, unsigned *ref_div) 862 unsigned *fb_div, unsigned *ref_div)
863{ 863{
864 /* limit reference * post divider to a maximum */ 864 /* limit reference * post divider to a maximum */
865 ref_div_max = min(128 / post_div, ref_div_max); 865 ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
866 866
867 /* get matching reference and feedback divider */ 867 /* get matching reference and feedback divider */
868 *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); 868 *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index d9ab99f47612..1f426696de36 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -130,10 +130,10 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
130 struct list_head *head) 130 struct list_head *head)
131{ 131{
132 struct radeon_cs_reloc *list; 132 struct radeon_cs_reloc *list;
133 unsigned i, idx, size; 133 unsigned i, idx;
134 134
135 size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_cs_reloc); 135 list = kmalloc_array(vm->max_pde_used + 1,
136 list = kmalloc(size, GFP_KERNEL); 136 sizeof(struct radeon_cs_reloc), GFP_KERNEL);
137 if (!list) 137 if (!list)
138 return NULL; 138 return NULL;
139 139
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index bc196f49ec53..4af0da96c2e2 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1053,7 +1053,7 @@ config SENSORS_PC87427
1053 1053
1054config SENSORS_NTC_THERMISTOR 1054config SENSORS_NTC_THERMISTOR
1055 tristate "NTC thermistor support" 1055 tristate "NTC thermistor support"
1056 depends on (!OF && !IIO) || (OF && IIO) 1056 depends on !OF || IIO=n || IIO
1057 help 1057 help
1058 This driver supports NTC thermistors sensor reading and its 1058 This driver supports NTC thermistors sensor reading and its
1059 interpretation. The driver can also monitor the temperature and 1059 interpretation. The driver can also monitor the temperature and
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 8a17f01e8672..e76feb86a1d4 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -44,6 +44,7 @@ struct ntc_compensation {
44 unsigned int ohm; 44 unsigned int ohm;
45}; 45};
46 46
47/* Order matters, ntc_match references the entries by index */
47static const struct platform_device_id ntc_thermistor_id[] = { 48static const struct platform_device_id ntc_thermistor_id[] = {
48 { "ncp15wb473", TYPE_NCPXXWB473 }, 49 { "ncp15wb473", TYPE_NCPXXWB473 },
49 { "ncp18wb473", TYPE_NCPXXWB473 }, 50 { "ncp18wb473", TYPE_NCPXXWB473 },
@@ -141,7 +142,7 @@ struct ntc_data {
141 char name[PLATFORM_NAME_SIZE]; 142 char name[PLATFORM_NAME_SIZE];
142}; 143};
143 144
144#ifdef CONFIG_OF 145#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO)
145static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) 146static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
146{ 147{
147 struct iio_channel *channel = pdata->chan; 148 struct iio_channel *channel = pdata->chan;
@@ -163,15 +164,15 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
163 164
164static const struct of_device_id ntc_match[] = { 165static const struct of_device_id ntc_match[] = {
165 { .compatible = "ntc,ncp15wb473", 166 { .compatible = "ntc,ncp15wb473",
166 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 167 .data = &ntc_thermistor_id[0] },
167 { .compatible = "ntc,ncp18wb473", 168 { .compatible = "ntc,ncp18wb473",
168 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 169 .data = &ntc_thermistor_id[1] },
169 { .compatible = "ntc,ncp21wb473", 170 { .compatible = "ntc,ncp21wb473",
170 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 171 .data = &ntc_thermistor_id[2] },
171 { .compatible = "ntc,ncp03wb473", 172 { .compatible = "ntc,ncp03wb473",
172 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 173 .data = &ntc_thermistor_id[3] },
173 { .compatible = "ntc,ncp15wl333", 174 { .compatible = "ntc,ncp15wl333",
174 .data = &ntc_thermistor_id[TYPE_NCPXXWL333] }, 175 .data = &ntc_thermistor_id[4] },
175 { }, 176 { },
176}; 177};
177MODULE_DEVICE_TABLE(of, ntc_match); 178MODULE_DEVICE_TABLE(of, ntc_match);
@@ -223,6 +224,8 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
223 return NULL; 224 return NULL;
224} 225}
225 226
227#define ntc_match NULL
228
226static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata) 229static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata)
227{ } 230{ }
228#endif 231#endif
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 76842d7dc2e3..ffc7ad3a2c88 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -71,7 +71,7 @@ config KEYBOARD_ATKBD
71 default y 71 default y
72 select SERIO 72 select SERIO
73 select SERIO_LIBPS2 73 select SERIO_LIBPS2
74 select SERIO_I8042 if X86 74 select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
75 select SERIO_GSCPS2 if GSC 75 select SERIO_GSCPS2 if GSC
76 help 76 help
77 Say Y here if you want to use a standard AT or PS/2 keyboard. Usually 77 Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index d8241ba0afa0..a15063bea700 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -111,6 +111,8 @@ struct pxa27x_keypad {
111 unsigned short keycodes[MAX_KEYPAD_KEYS]; 111 unsigned short keycodes[MAX_KEYPAD_KEYS];
112 int rotary_rel_code[2]; 112 int rotary_rel_code[2];
113 113
114 unsigned int row_shift;
115
114 /* state row bits of each column scan */ 116 /* state row bits of each column scan */
115 uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS]; 117 uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS];
116 uint32_t direct_key_state; 118 uint32_t direct_key_state;
@@ -467,7 +469,8 @@ scan:
467 if ((bits_changed & (1 << row)) == 0) 469 if ((bits_changed & (1 << row)) == 0)
468 continue; 470 continue;
469 471
470 code = MATRIX_SCAN_CODE(row, col, MATRIX_ROW_SHIFT); 472 code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
473
471 input_event(input_dev, EV_MSC, MSC_SCAN, code); 474 input_event(input_dev, EV_MSC, MSC_SCAN, code);
472 input_report_key(input_dev, keypad->keycodes[code], 475 input_report_key(input_dev, keypad->keycodes[code],
473 new_state[col] & (1 << row)); 476 new_state[col] & (1 << row));
@@ -802,6 +805,8 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
802 goto failed_put_clk; 805 goto failed_put_clk;
803 } 806 }
804 807
808 keypad->row_shift = get_count_order(pdata->matrix_key_cols);
809
805 if ((pdata->enable_rotary0 && keypad->rotary_rel_code[0] != -1) || 810 if ((pdata->enable_rotary0 && keypad->rotary_rel_code[0] != -1) ||
806 (pdata->enable_rotary1 && keypad->rotary_rel_code[1] != -1)) { 811 (pdata->enable_rotary1 && keypad->rotary_rel_code[1] != -1)) {
807 input_dev->evbit[0] |= BIT_MASK(EV_REL); 812 input_dev->evbit[0] |= BIT_MASK(EV_REL);
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index effa9c5f2c5c..6b8441f7bc32 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -17,7 +17,7 @@ config MOUSE_PS2
17 default y 17 default y
18 select SERIO 18 select SERIO
19 select SERIO_LIBPS2 19 select SERIO_LIBPS2
20 select SERIO_I8042 if X86 20 select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
21 select SERIO_GSCPS2 if GSC 21 select SERIO_GSCPS2 if GSC
22 help 22 help
23 Say Y here if you have a PS/2 mouse connected to your system. This 23 Say Y here if you have a PS/2 mouse connected to your system. This
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index d68d33fb5ac2..c5ec703c727e 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -117,6 +117,31 @@ void synaptics_reset(struct psmouse *psmouse)
117} 117}
118 118
119#ifdef CONFIG_MOUSE_PS2_SYNAPTICS 119#ifdef CONFIG_MOUSE_PS2_SYNAPTICS
120struct min_max_quirk {
121 const char * const *pnp_ids;
122 int x_min, x_max, y_min, y_max;
123};
124
125static const struct min_max_quirk min_max_pnpid_table[] = {
126 {
127 (const char * const []){"LEN0033", NULL},
128 1024, 5052, 2258, 4832
129 },
130 {
131 (const char * const []){"LEN0035", "LEN0042", NULL},
132 1232, 5710, 1156, 4696
133 },
134 {
135 (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL},
136 1024, 5112, 2024, 4832
137 },
138 {
139 (const char * const []){"LEN2001", NULL},
140 1024, 5022, 2508, 4832
141 },
142 { }
143};
144
120/* This list has been kindly provided by Synaptics. */ 145/* This list has been kindly provided by Synaptics. */
121static const char * const topbuttonpad_pnp_ids[] = { 146static const char * const topbuttonpad_pnp_ids[] = {
122 "LEN0017", 147 "LEN0017",
@@ -129,7 +154,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
129 "LEN002D", 154 "LEN002D",
130 "LEN002E", 155 "LEN002E",
131 "LEN0033", /* Helix */ 156 "LEN0033", /* Helix */
132 "LEN0034", /* T431s, T540, X1 Carbon 2nd */ 157 "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
133 "LEN0035", /* X240 */ 158 "LEN0035", /* X240 */
134 "LEN0036", /* T440 */ 159 "LEN0036", /* T440 */
135 "LEN0037", 160 "LEN0037",
@@ -142,7 +167,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
142 "LEN0048", 167 "LEN0048",
143 "LEN0049", 168 "LEN0049",
144 "LEN2000", 169 "LEN2000",
145 "LEN2001", 170 "LEN2001", /* Edge E431 */
146 "LEN2002", 171 "LEN2002",
147 "LEN2003", 172 "LEN2003",
148 "LEN2004", /* L440 */ 173 "LEN2004", /* L440 */
@@ -156,6 +181,18 @@ static const char * const topbuttonpad_pnp_ids[] = {
156 NULL 181 NULL
157}; 182};
158 183
184static bool matches_pnp_id(struct psmouse *psmouse, const char * const ids[])
185{
186 int i;
187
188 if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4))
189 for (i = 0; ids[i]; i++)
190 if (strstr(psmouse->ps2dev.serio->firmware_id, ids[i]))
191 return true;
192
193 return false;
194}
195
159/***************************************************************************** 196/*****************************************************************************
160 * Synaptics communications functions 197 * Synaptics communications functions
161 ****************************************************************************/ 198 ****************************************************************************/
@@ -304,20 +341,20 @@ static int synaptics_identify(struct psmouse *psmouse)
304 * Resolution is left zero if touchpad does not support the query 341 * Resolution is left zero if touchpad does not support the query
305 */ 342 */
306 343
307static const int *quirk_min_max;
308
309static int synaptics_resolution(struct psmouse *psmouse) 344static int synaptics_resolution(struct psmouse *psmouse)
310{ 345{
311 struct synaptics_data *priv = psmouse->private; 346 struct synaptics_data *priv = psmouse->private;
312 unsigned char resp[3]; 347 unsigned char resp[3];
348 int i;
313 349
314 if (quirk_min_max) { 350 for (i = 0; min_max_pnpid_table[i].pnp_ids; i++)
315 priv->x_min = quirk_min_max[0]; 351 if (matches_pnp_id(psmouse, min_max_pnpid_table[i].pnp_ids)) {
316 priv->x_max = quirk_min_max[1]; 352 priv->x_min = min_max_pnpid_table[i].x_min;
317 priv->y_min = quirk_min_max[2]; 353 priv->x_max = min_max_pnpid_table[i].x_max;
318 priv->y_max = quirk_min_max[3]; 354 priv->y_min = min_max_pnpid_table[i].y_min;
319 return 0; 355 priv->y_max = min_max_pnpid_table[i].y_max;
320 } 356 return 0;
357 }
321 358
322 if (SYN_ID_MAJOR(priv->identity) < 4) 359 if (SYN_ID_MAJOR(priv->identity) < 4)
323 return 0; 360 return 0;
@@ -1365,17 +1402,8 @@ static void set_input_params(struct psmouse *psmouse,
1365 1402
1366 if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { 1403 if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
1367 __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit); 1404 __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
1368 /* See if this buttonpad has a top button area */ 1405 if (matches_pnp_id(psmouse, topbuttonpad_pnp_ids))
1369 if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4)) { 1406 __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit);
1370 for (i = 0; topbuttonpad_pnp_ids[i]; i++) {
1371 if (strstr(psmouse->ps2dev.serio->firmware_id,
1372 topbuttonpad_pnp_ids[i])) {
1373 __set_bit(INPUT_PROP_TOPBUTTONPAD,
1374 dev->propbit);
1375 break;
1376 }
1377 }
1378 }
1379 /* Clickpads report only left button */ 1407 /* Clickpads report only left button */
1380 __clear_bit(BTN_RIGHT, dev->keybit); 1408 __clear_bit(BTN_RIGHT, dev->keybit);
1381 __clear_bit(BTN_MIDDLE, dev->keybit); 1409 __clear_bit(BTN_MIDDLE, dev->keybit);
@@ -1547,104 +1575,10 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = {
1547 { } 1575 { }
1548}; 1576};
1549 1577
1550static const struct dmi_system_id min_max_dmi_table[] __initconst = {
1551#if defined(CONFIG_DMI)
1552 {
1553 /* Lenovo ThinkPad Helix */
1554 .matches = {
1555 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1556 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
1557 },
1558 .driver_data = (int []){1024, 5052, 2258, 4832},
1559 },
1560 {
1561 /* Lenovo ThinkPad X240 */
1562 .matches = {
1563 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1564 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
1565 },
1566 .driver_data = (int []){1232, 5710, 1156, 4696},
1567 },
1568 {
1569 /* Lenovo ThinkPad Edge E431 */
1570 .matches = {
1571 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1572 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Edge E431"),
1573 },
1574 .driver_data = (int []){1024, 5022, 2508, 4832},
1575 },
1576 {
1577 /* Lenovo ThinkPad T431s */
1578 .matches = {
1579 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1580 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T431"),
1581 },
1582 .driver_data = (int []){1024, 5112, 2024, 4832},
1583 },
1584 {
1585 /* Lenovo ThinkPad T440s */
1586 .matches = {
1587 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1588 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
1589 },
1590 .driver_data = (int []){1024, 5112, 2024, 4832},
1591 },
1592 {
1593 /* Lenovo ThinkPad L440 */
1594 .matches = {
1595 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1596 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L440"),
1597 },
1598 .driver_data = (int []){1024, 5112, 2024, 4832},
1599 },
1600 {
1601 /* Lenovo ThinkPad T540p */
1602 .matches = {
1603 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1604 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
1605 },
1606 .driver_data = (int []){1024, 5056, 2058, 4832},
1607 },
1608 {
1609 /* Lenovo ThinkPad L540 */
1610 .matches = {
1611 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1612 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L540"),
1613 },
1614 .driver_data = (int []){1024, 5112, 2024, 4832},
1615 },
1616 {
1617 /* Lenovo Yoga S1 */
1618 .matches = {
1619 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1620 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
1621 "ThinkPad S1 Yoga"),
1622 },
1623 .driver_data = (int []){1232, 5710, 1156, 4696},
1624 },
1625 {
1626 /* Lenovo ThinkPad X1 Carbon Haswell (3rd generation) */
1627 .matches = {
1628 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1629 DMI_MATCH(DMI_PRODUCT_VERSION,
1630 "ThinkPad X1 Carbon 2nd"),
1631 },
1632 .driver_data = (int []){1024, 5112, 2024, 4832},
1633 },
1634#endif
1635 { }
1636};
1637
1638void __init synaptics_module_init(void) 1578void __init synaptics_module_init(void)
1639{ 1579{
1640 const struct dmi_system_id *min_max_dmi;
1641
1642 impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table); 1580 impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
1643 broken_olpc_ec = dmi_check_system(olpc_dmi_table); 1581 broken_olpc_ec = dmi_check_system(olpc_dmi_table);
1644
1645 min_max_dmi = dmi_first_match(min_max_dmi_table);
1646 if (min_max_dmi)
1647 quirk_min_max = min_max_dmi->driver_data;
1648} 1582}
1649 1583
1650static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode) 1584static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 762b08432de0..8b748d99b934 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -79,7 +79,8 @@ static int amba_kmi_open(struct serio *io)
79 writeb(divisor, KMICLKDIV); 79 writeb(divisor, KMICLKDIV);
80 writeb(KMICR_EN, KMICR); 80 writeb(KMICR_EN, KMICR);
81 81
82 ret = request_irq(kmi->irq, amba_kmi_int, 0, "kmi-pl050", kmi); 82 ret = request_irq(kmi->irq, amba_kmi_int, IRQF_SHARED, "kmi-pl050",
83 kmi);
83 if (ret) { 84 if (ret) {
84 printk(KERN_ERR "kmi: failed to claim IRQ%d\n", kmi->irq); 85 printk(KERN_ERR "kmi: failed to claim IRQ%d\n", kmi->irq);
85 writeb(0, KMICR); 86 writeb(0, KMICR);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 68edc9db2c64..b845e9370871 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -640,7 +640,7 @@ config TOUCHSCREEN_WM9713
640 640
641config TOUCHSCREEN_WM97XX_ATMEL 641config TOUCHSCREEN_WM97XX_ATMEL
642 tristate "WM97xx Atmel accelerated touch" 642 tristate "WM97xx Atmel accelerated touch"
643 depends on TOUCHSCREEN_WM97XX && (AVR32 || ARCH_AT91) 643 depends on TOUCHSCREEN_WM97XX && AVR32
644 help 644 help
645 Say Y here for support for streaming mode with WM97xx touchscreens 645 Say Y here for support for streaming mode with WM97xx touchscreens
646 on Atmel AT91 or AVR32 systems with an AC97C module. 646 on Atmel AT91 or AVR32 systems with an AC97C module.
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 9380be7b1895..5f054c44b485 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2178,6 +2178,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2178 ti->num_discard_bios = 1; 2178 ti->num_discard_bios = 1;
2179 ti->discards_supported = true; 2179 ti->discards_supported = true;
2180 ti->discard_zeroes_data_unsupported = true; 2180 ti->discard_zeroes_data_unsupported = true;
2181 /* Discard bios must be split on a block boundary */
2182 ti->split_discard_bios = true;
2181 2183
2182 cache->features = ca->features; 2184 cache->features = ca->features;
2183 ti->per_bio_data_size = get_per_bio_data_size(cache); 2185 ti->per_bio_data_size = get_per_bio_data_size(cache);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index fa0f6cbd6a41..ebfa411d1a7d 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -445,11 +445,11 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
445 else 445 else
446 m->saved_queue_if_no_path = queue_if_no_path; 446 m->saved_queue_if_no_path = queue_if_no_path;
447 m->queue_if_no_path = queue_if_no_path; 447 m->queue_if_no_path = queue_if_no_path;
448 if (!m->queue_if_no_path)
449 dm_table_run_md_queue_async(m->ti->table);
450
451 spin_unlock_irqrestore(&m->lock, flags); 448 spin_unlock_irqrestore(&m->lock, flags);
452 449
450 if (!queue_if_no_path)
451 dm_table_run_md_queue_async(m->ti->table);
452
453 return 0; 453 return 0;
454} 454}
455 455
@@ -954,7 +954,7 @@ out:
954 */ 954 */
955static int reinstate_path(struct pgpath *pgpath) 955static int reinstate_path(struct pgpath *pgpath)
956{ 956{
957 int r = 0; 957 int r = 0, run_queue = 0;
958 unsigned long flags; 958 unsigned long flags;
959 struct multipath *m = pgpath->pg->m; 959 struct multipath *m = pgpath->pg->m;
960 960
@@ -978,7 +978,7 @@ static int reinstate_path(struct pgpath *pgpath)
978 978
979 if (!m->nr_valid_paths++) { 979 if (!m->nr_valid_paths++) {
980 m->current_pgpath = NULL; 980 m->current_pgpath = NULL;
981 dm_table_run_md_queue_async(m->ti->table); 981 run_queue = 1;
982 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { 982 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
983 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) 983 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
984 m->pg_init_in_progress++; 984 m->pg_init_in_progress++;
@@ -991,6 +991,8 @@ static int reinstate_path(struct pgpath *pgpath)
991 991
992out: 992out:
993 spin_unlock_irqrestore(&m->lock, flags); 993 spin_unlock_irqrestore(&m->lock, flags);
994 if (run_queue)
995 dm_table_run_md_queue_async(m->ti->table);
994 996
995 return r; 997 return r;
996} 998}
@@ -1566,8 +1568,8 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
1566 } 1568 }
1567 if (m->pg_init_required) 1569 if (m->pg_init_required)
1568 __pg_init_all_paths(m); 1570 __pg_init_all_paths(m);
1569 dm_table_run_md_queue_async(m->ti->table);
1570 spin_unlock_irqrestore(&m->lock, flags); 1571 spin_unlock_irqrestore(&m->lock, flags);
1572 dm_table_run_md_queue_async(m->ti->table);
1571 } 1573 }
1572 1574
1573 return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); 1575 return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 2e71de8e0048..242ac2ea5f29 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -27,7 +27,9 @@
27#define MAPPING_POOL_SIZE 1024 27#define MAPPING_POOL_SIZE 1024
28#define PRISON_CELLS 1024 28#define PRISON_CELLS 1024
29#define COMMIT_PERIOD HZ 29#define COMMIT_PERIOD HZ
30#define NO_SPACE_TIMEOUT (HZ * 60) 30#define NO_SPACE_TIMEOUT_SECS 60
31
32static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
31 33
32DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, 34DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
33 "A percentage of time allocated for copy on write"); 35 "A percentage of time allocated for copy on write");
@@ -1670,6 +1672,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1670 struct pool_c *pt = pool->ti->private; 1672 struct pool_c *pt = pool->ti->private;
1671 bool needs_check = dm_pool_metadata_needs_check(pool->pmd); 1673 bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
1672 enum pool_mode old_mode = get_pool_mode(pool); 1674 enum pool_mode old_mode = get_pool_mode(pool);
1675 unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
1673 1676
1674 /* 1677 /*
1675 * Never allow the pool to transition to PM_WRITE mode if user 1678 * Never allow the pool to transition to PM_WRITE mode if user
@@ -1732,8 +1735,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1732 pool->process_prepared_mapping = process_prepared_mapping; 1735 pool->process_prepared_mapping = process_prepared_mapping;
1733 pool->process_prepared_discard = process_prepared_discard_passdown; 1736 pool->process_prepared_discard = process_prepared_discard_passdown;
1734 1737
1735 if (!pool->pf.error_if_no_space) 1738 if (!pool->pf.error_if_no_space && no_space_timeout)
1736 queue_delayed_work(pool->wq, &pool->no_space_timeout, NO_SPACE_TIMEOUT); 1739 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
1737 break; 1740 break;
1738 1741
1739 case PM_WRITE: 1742 case PM_WRITE:
@@ -3508,6 +3511,9 @@ static void dm_thin_exit(void)
3508module_init(dm_thin_init); 3511module_init(dm_thin_init);
3509module_exit(dm_thin_exit); 3512module_exit(dm_thin_exit);
3510 3513
3514module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
3515MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
3516
3511MODULE_DESCRIPTION(DM_NAME " thin provisioning target"); 3517MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
3512MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 3518MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3513MODULE_LICENSE("GPL"); 3519MODULE_LICENSE("GPL");
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 237b7e0ddc7a..2382cfc9bb3f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7381,8 +7381,10 @@ void md_do_sync(struct md_thread *thread)
7381 /* just incase thread restarts... */ 7381 /* just incase thread restarts... */
7382 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 7382 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7383 return; 7383 return;
7384 if (mddev->ro) /* never try to sync a read-only array */ 7384 if (mddev->ro) {/* never try to sync a read-only array */
7385 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7385 return; 7386 return;
7387 }
7386 7388
7387 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 7389 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7388 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { 7390 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
@@ -7824,6 +7826,7 @@ void md_check_recovery(struct mddev *mddev)
7824 /* There is no thread, but we need to call 7826 /* There is no thread, but we need to call
7825 * ->spare_active and clear saved_raid_disk 7827 * ->spare_active and clear saved_raid_disk
7826 */ 7828 */
7829 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7827 md_reap_sync_thread(mddev); 7830 md_reap_sync_thread(mddev);
7828 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7831 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7829 goto unlock; 7832 goto unlock;
diff --git a/drivers/net/can/led.c b/drivers/net/can/led.c
index a3d99a8fd2d1..ab7f1b01be49 100644
--- a/drivers/net/can/led.c
+++ b/drivers/net/can/led.c
@@ -97,6 +97,9 @@ static int can_led_notifier(struct notifier_block *nb, unsigned long msg,
97 if (!priv) 97 if (!priv)
98 return NOTIFY_DONE; 98 return NOTIFY_DONE;
99 99
100 if (!priv->tx_led_trig || !priv->rx_led_trig)
101 return NOTIFY_DONE;
102
100 if (msg == NETDEV_CHANGENAME) { 103 if (msg == NETDEV_CHANGENAME) {
101 snprintf(name, sizeof(name), "%s-tx", netdev->name); 104 snprintf(name, sizeof(name), "%s-tx", netdev->name);
102 led_trigger_rename_static(name, priv->tx_led_trig); 105 led_trigger_rename_static(name, priv->tx_led_trig);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index f0e2a4d4f621..edb718661850 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -39,6 +39,7 @@ source "drivers/net/ethernet/cisco/Kconfig"
39config CX_ECAT 39config CX_ECAT
40 tristate "Beckhoff CX5020 EtherCAT master support" 40 tristate "Beckhoff CX5020 EtherCAT master support"
41 depends on PCI 41 depends on PCI
42 depends on X86 || COMPILE_TEST
42 ---help--- 43 ---help---
43 Driver for EtherCAT master module located on CCAT FPGA 44 Driver for EtherCAT master module located on CCAT FPGA
44 that can be found on Beckhoff CX5020, and possibly other of CX 45 that can be found on Beckhoff CX5020, and possibly other of CX
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index d18441ebe944..23da47925fa3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -906,6 +906,18 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
906 bd_prod = RX_BD(bd_prod); 906 bd_prod = RX_BD(bd_prod);
907 bd_cons = RX_BD(bd_cons); 907 bd_cons = RX_BD(bd_cons);
908 908
909 /* A rmb() is required to ensure that the CQE is not read
910 * before it is written by the adapter DMA. PCI ordering
911 * rules will make sure the other fields are written before
912 * the marker at the end of struct eth_fast_path_rx_cqe
913 * but without rmb() a weakly ordered processor can process
914 * stale data. Without the barrier TPA state-machine might
915 * enter inconsistent state and kernel stack might be
916 * provided with incorrect packet description - these lead
917 * to various kernel crashed.
918 */
919 rmb();
920
909 cqe_fp_flags = cqe_fp->type_error_flags; 921 cqe_fp_flags = cqe_fp->type_error_flags;
910 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 922 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
911 923
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index ff2bdd80f0aa..cf14218697e4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13283,8 +13283,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13283 netdev_reset_tc(bp->dev); 13283 netdev_reset_tc(bp->dev);
13284 13284
13285 del_timer_sync(&bp->timer); 13285 del_timer_sync(&bp->timer);
13286 cancel_delayed_work(&bp->sp_task); 13286 cancel_delayed_work_sync(&bp->sp_task);
13287 cancel_delayed_work(&bp->period_task); 13287 cancel_delayed_work_sync(&bp->period_task);
13288 13288
13289 spin_lock_bh(&bp->stats_lock); 13289 spin_lock_bh(&bp->stats_lock);
13290 bp->stats_state = STATS_STATE_DISABLED; 13290 bp->stats_state = STATS_STATE_DISABLED;
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 09f3fefcbf9c..a4b25bc7113a 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -608,6 +608,10 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
608 pr_err("%s: Bad type %d\n", __func__, ulp_type); 608 pr_err("%s: Bad type %d\n", __func__, ulp_type);
609 return -EINVAL; 609 return -EINVAL;
610 } 610 }
611
612 if (ulp_type == CNIC_ULP_ISCSI)
613 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
614
611 mutex_lock(&cnic_lock); 615 mutex_lock(&cnic_lock);
612 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 616 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
613 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); 617 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
@@ -620,9 +624,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
620 } 624 }
621 mutex_unlock(&cnic_lock); 625 mutex_unlock(&cnic_lock);
622 626
623 if (ulp_type == CNIC_ULP_ISCSI) 627 if (ulp_type == CNIC_ULP_FCOE)
624 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
625 else if (ulp_type == CNIC_ULP_FCOE)
626 dev->fcoe_cap = NULL; 628 dev->fcoe_cap = NULL;
627 629
628 synchronize_rcu(); 630 synchronize_rcu();
@@ -1039,21 +1041,17 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1039 struct cnic_local *cp = dev->cnic_priv; 1041 struct cnic_local *cp = dev->cnic_priv;
1040 struct cnic_uio_dev *udev; 1042 struct cnic_uio_dev *udev;
1041 1043
1042 read_lock(&cnic_dev_lock);
1043 list_for_each_entry(udev, &cnic_udev_list, list) { 1044 list_for_each_entry(udev, &cnic_udev_list, list) {
1044 if (udev->pdev == dev->pcidev) { 1045 if (udev->pdev == dev->pcidev) {
1045 udev->dev = dev; 1046 udev->dev = dev;
1046 if (__cnic_alloc_uio_rings(udev, pages)) { 1047 if (__cnic_alloc_uio_rings(udev, pages)) {
1047 udev->dev = NULL; 1048 udev->dev = NULL;
1048 read_unlock(&cnic_dev_lock);
1049 return -ENOMEM; 1049 return -ENOMEM;
1050 } 1050 }
1051 cp->udev = udev; 1051 cp->udev = udev;
1052 read_unlock(&cnic_dev_lock);
1053 return 0; 1052 return 0;
1054 } 1053 }
1055 } 1054 }
1056 read_unlock(&cnic_dev_lock);
1057 1055
1058 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC); 1056 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1059 if (!udev) 1057 if (!udev)
@@ -1067,9 +1065,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1067 if (__cnic_alloc_uio_rings(udev, pages)) 1065 if (__cnic_alloc_uio_rings(udev, pages))
1068 goto err_udev; 1066 goto err_udev;
1069 1067
1070 write_lock(&cnic_dev_lock);
1071 list_add(&udev->list, &cnic_udev_list); 1068 list_add(&udev->list, &cnic_udev_list);
1072 write_unlock(&cnic_dev_lock);
1073 1069
1074 pci_dev_get(udev->pdev); 1070 pci_dev_get(udev->pdev);
1075 1071
@@ -5624,20 +5620,27 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5624{ 5620{
5625 int if_type; 5621 int if_type;
5626 5622
5627 rcu_read_lock();
5628 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 5623 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5629 struct cnic_ulp_ops *ulp_ops; 5624 struct cnic_ulp_ops *ulp_ops;
5630 void *ctx; 5625 void *ctx;
5631 5626
5632 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); 5627 mutex_lock(&cnic_lock);
5633 if (!ulp_ops || !ulp_ops->indicate_netevent) 5628 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
5629 lockdep_is_held(&cnic_lock));
5630 if (!ulp_ops || !ulp_ops->indicate_netevent) {
5631 mutex_unlock(&cnic_lock);
5634 continue; 5632 continue;
5633 }
5635 5634
5636 ctx = cp->ulp_handle[if_type]; 5635 ctx = cp->ulp_handle[if_type];
5637 5636
5637 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5638 mutex_unlock(&cnic_lock);
5639
5638 ulp_ops->indicate_netevent(ctx, event, vlan_id); 5640 ulp_ops->indicate_netevent(ctx, event, vlan_id);
5641
5642 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5639 } 5643 }
5640 rcu_read_unlock();
5641} 5644}
5642 5645
5643/* netdev event handler */ 5646/* netdev event handler */
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 9d75fef6396f..63eb959a28aa 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -682,10 +682,7 @@ static int mal_probe(struct platform_device *ofdev)
682 goto fail6; 682 goto fail6;
683 683
684 /* Enable all MAL SERR interrupt sources */ 684 /* Enable all MAL SERR interrupt sources */
685 if (mal->version == 2) 685 set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS);
686 set_mal_dcrn(mal, MAL_IER, MAL2_IER_EVENTS);
687 else
688 set_mal_dcrn(mal, MAL_IER, MAL1_IER_EVENTS);
689 686
690 /* Enable EOB interrupt */ 687 /* Enable EOB interrupt */
691 mal_enable_eob_irq(mal); 688 mal_enable_eob_irq(mal);
diff --git a/drivers/net/ethernet/ibm/emac/mal.h b/drivers/net/ethernet/ibm/emac/mal.h
index e431a32e3d69..eeade2ea8334 100644
--- a/drivers/net/ethernet/ibm/emac/mal.h
+++ b/drivers/net/ethernet/ibm/emac/mal.h
@@ -95,24 +95,20 @@
95 95
96 96
97#define MAL_IER 0x02 97#define MAL_IER 0x02
98/* MAL IER bits */
98#define MAL_IER_DE 0x00000010 99#define MAL_IER_DE 0x00000010
99#define MAL_IER_OTE 0x00000004 100#define MAL_IER_OTE 0x00000004
100#define MAL_IER_OE 0x00000002 101#define MAL_IER_OE 0x00000002
101#define MAL_IER_PE 0x00000001 102#define MAL_IER_PE 0x00000001
102/* MAL V1 IER bits */
103#define MAL1_IER_NWE 0x00000008
104#define MAL1_IER_SOC_EVENTS MAL1_IER_NWE
105#define MAL1_IER_EVENTS (MAL1_IER_SOC_EVENTS | MAL_IER_DE | \
106 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
107 103
108/* MAL V2 IER bits */ 104/* PLB read/write/timeout errors */
109#define MAL2_IER_PT 0x00000080 105#define MAL_IER_PTE 0x00000080
110#define MAL2_IER_PRE 0x00000040 106#define MAL_IER_PRE 0x00000040
111#define MAL2_IER_PWE 0x00000020 107#define MAL_IER_PWE 0x00000020
112#define MAL2_IER_SOC_EVENTS (MAL2_IER_PT | MAL2_IER_PRE | MAL2_IER_PWE)
113#define MAL2_IER_EVENTS (MAL2_IER_SOC_EVENTS | MAL_IER_DE | \
114 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
115 108
109#define MAL_IER_SOC_EVENTS (MAL_IER_PTE | MAL_IER_PRE | MAL_IER_PWE)
110#define MAL_IER_EVENTS (MAL_IER_SOC_EVENTS | MAL_IER_DE | \
111 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
116 112
117#define MAL_TXCASR 0x04 113#define MAL_TXCASR 0x04
118#define MAL_TXCARR 0x05 114#define MAL_TXCARR 0x05
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index 4fb2f96da23b..a01182cce965 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -45,6 +45,7 @@
45 45
46/* RGMIIx_SSR */ 46/* RGMIIx_SSR */
47#define RGMII_SSR_MASK(idx) (0x7 << ((idx) * 8)) 47#define RGMII_SSR_MASK(idx) (0x7 << ((idx) * 8))
48#define RGMII_SSR_10(idx) (0x1 << ((idx) * 8))
48#define RGMII_SSR_100(idx) (0x2 << ((idx) * 8)) 49#define RGMII_SSR_100(idx) (0x2 << ((idx) * 8))
49#define RGMII_SSR_1000(idx) (0x4 << ((idx) * 8)) 50#define RGMII_SSR_1000(idx) (0x4 << ((idx) * 8))
50 51
@@ -139,6 +140,8 @@ void rgmii_set_speed(struct platform_device *ofdev, int input, int speed)
139 ssr |= RGMII_SSR_1000(input); 140 ssr |= RGMII_SSR_1000(input);
140 else if (speed == SPEED_100) 141 else if (speed == SPEED_100)
141 ssr |= RGMII_SSR_100(input); 142 ssr |= RGMII_SSR_100(input);
143 else if (speed == SPEED_10)
144 ssr |= RGMII_SSR_10(input);
142 145
143 out_be32(&p->ssr, ssr); 146 out_be32(&p->ssr, ssr);
144 147
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 38e9a4c9099c..19606a44672b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2012,6 +2012,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2012 if (!mlx4_is_slave(dev)) { 2012 if (!mlx4_is_slave(dev)) {
2013 mlx4_init_mac_table(dev, &info->mac_table); 2013 mlx4_init_mac_table(dev, &info->mac_table);
2014 mlx4_init_vlan_table(dev, &info->vlan_table); 2014 mlx4_init_vlan_table(dev, &info->vlan_table);
2015 mlx4_init_roce_gid_table(dev, &info->gid_table);
2015 info->base_qpn = mlx4_get_base_qpn(dev, port); 2016 info->base_qpn = mlx4_get_base_qpn(dev, port);
2016 } 2017 }
2017 2018
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 9dd1b30ea757..4b416edb4c1e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -696,6 +696,17 @@ struct mlx4_mac_table {
696 int max; 696 int max;
697}; 697};
698 698
699#define MLX4_ROCE_GID_ENTRY_SIZE 16
700
701struct mlx4_roce_gid_entry {
702 u8 raw[MLX4_ROCE_GID_ENTRY_SIZE];
703};
704
705struct mlx4_roce_gid_table {
706 struct mlx4_roce_gid_entry roce_gids[MLX4_ROCE_MAX_GIDS];
707 struct mutex mutex;
708};
709
699#define MLX4_MAX_VLAN_NUM 128 710#define MLX4_MAX_VLAN_NUM 128
700#define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2) 711#define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2)
701 712
@@ -759,6 +770,7 @@ struct mlx4_port_info {
759 struct device_attribute port_mtu_attr; 770 struct device_attribute port_mtu_attr;
760 struct mlx4_mac_table mac_table; 771 struct mlx4_mac_table mac_table;
761 struct mlx4_vlan_table vlan_table; 772 struct mlx4_vlan_table vlan_table;
773 struct mlx4_roce_gid_table gid_table;
762 int base_qpn; 774 int base_qpn;
763}; 775};
764 776
@@ -789,10 +801,6 @@ enum {
789 MLX4_USE_RR = 1, 801 MLX4_USE_RR = 1,
790}; 802};
791 803
792struct mlx4_roce_gid_entry {
793 u8 raw[16];
794};
795
796struct mlx4_priv { 804struct mlx4_priv {
797 struct mlx4_dev dev; 805 struct mlx4_dev dev;
798 806
@@ -840,7 +848,6 @@ struct mlx4_priv {
840 int fs_hash_mode; 848 int fs_hash_mode;
841 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; 849 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
842 __be64 slave_node_guids[MLX4_MFUNC_MAX]; 850 __be64 slave_node_guids[MLX4_MFUNC_MAX];
843 struct mlx4_roce_gid_entry roce_gids[MLX4_MAX_PORTS][MLX4_ROCE_MAX_GIDS];
844 851
845 atomic_t opreq_count; 852 atomic_t opreq_count;
846 struct work_struct opreq_task; 853 struct work_struct opreq_task;
@@ -1141,6 +1148,8 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
1141 1148
1142void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); 1149void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
1143void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); 1150void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
1151void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
1152 struct mlx4_roce_gid_table *table);
1144void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); 1153void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
1145int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); 1154int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
1146 1155
@@ -1150,6 +1159,7 @@ int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
1150 enum mlx4_resource resource_type, 1159 enum mlx4_resource resource_type,
1151 u64 resource_id, int *slave); 1160 u64 resource_id, int *slave);
1152void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id); 1161void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
1162void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave);
1153int mlx4_init_resource_tracker(struct mlx4_dev *dev); 1163int mlx4_init_resource_tracker(struct mlx4_dev *dev);
1154 1164
1155void mlx4_free_resource_tracker(struct mlx4_dev *dev, 1165void mlx4_free_resource_tracker(struct mlx4_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 376f2f1d445e..7ab97174886d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -75,6 +75,16 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
75 table->total = 0; 75 table->total = 0;
76} 76}
77 77
78void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
79 struct mlx4_roce_gid_table *table)
80{
81 int i;
82
83 mutex_init(&table->mutex);
84 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++)
85 memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE);
86}
87
78static int validate_index(struct mlx4_dev *dev, 88static int validate_index(struct mlx4_dev *dev,
79 struct mlx4_mac_table *table, int index) 89 struct mlx4_mac_table *table, int index)
80{ 90{
@@ -583,6 +593,84 @@ int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
583} 593}
584EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix); 594EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
585 595
596static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave,
597 int port, struct mlx4_cmd_mailbox *mailbox)
598{
599 struct mlx4_roce_gid_entry *gid_entry_mbox;
600 struct mlx4_priv *priv = mlx4_priv(dev);
601 int num_gids, base, offset;
602 int i, err;
603
604 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
605 base = mlx4_get_base_gid_ix(dev, slave, port);
606
607 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
608
609 mutex_lock(&(priv->port[port].gid_table.mutex));
610 /* Zero-out gids belonging to that slave in the port GID table */
611 for (i = 0, offset = base; i < num_gids; offset++, i++)
612 memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
613 zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE);
614
615 /* Now, copy roce port gids table to mailbox for passing to FW */
616 gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf;
617 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
618 memcpy(gid_entry_mbox->raw,
619 priv->port[port].gid_table.roce_gids[i].raw,
620 MLX4_ROCE_GID_ENTRY_SIZE);
621
622 err = mlx4_cmd(dev, mailbox->dma,
623 ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8), 1,
624 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
625 MLX4_CMD_NATIVE);
626 mutex_unlock(&(priv->port[port].gid_table.mutex));
627 return err;
628}
629
630
631void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
632{
633 struct mlx4_active_ports actv_ports;
634 struct mlx4_cmd_mailbox *mailbox;
635 int num_eth_ports, err;
636 int i;
637
638 if (slave < 0 || slave > dev->num_vfs)
639 return;
640
641 actv_ports = mlx4_get_active_ports(dev, slave);
642
643 for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) {
644 if (test_bit(i, actv_ports.ports)) {
645 if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
646 continue;
647 num_eth_ports++;
648 }
649 }
650
651 if (!num_eth_ports)
652 return;
653
654 /* have ETH ports. Alloc mailbox for SET_PORT command */
655 mailbox = mlx4_alloc_cmd_mailbox(dev);
656 if (IS_ERR(mailbox))
657 return;
658
659 for (i = 0; i < dev->caps.num_ports; i++) {
660 if (test_bit(i, actv_ports.ports)) {
661 if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
662 continue;
663 err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox);
664 if (err)
665 mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n",
666 slave, i + 1, err);
667 }
668 }
669
670 mlx4_free_cmd_mailbox(dev, mailbox);
671 return;
672}
673
586static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, 674static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
587 u8 op_mod, struct mlx4_cmd_mailbox *inbox) 675 u8 op_mod, struct mlx4_cmd_mailbox *inbox)
588{ 676{
@@ -691,10 +779,12 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
691 /* 2. Check that do not have duplicates in OTHER 779 /* 2. Check that do not have duplicates in OTHER
692 * entries in the port GID table 780 * entries in the port GID table
693 */ 781 */
782
783 mutex_lock(&(priv->port[port].gid_table.mutex));
694 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { 784 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
695 if (i >= base && i < base + num_gids) 785 if (i >= base && i < base + num_gids)
696 continue; /* don't compare to slave's current gids */ 786 continue; /* don't compare to slave's current gids */
697 gid_entry_tbl = &priv->roce_gids[port - 1][i]; 787 gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i];
698 if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry))) 788 if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
699 continue; 789 continue;
700 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 790 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
@@ -707,6 +797,7 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
707 /* found duplicate */ 797 /* found duplicate */
708 mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n", 798 mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
709 slave, i); 799 slave, i);
800 mutex_unlock(&(priv->port[port].gid_table.mutex));
710 return -EINVAL; 801 return -EINVAL;
711 } 802 }
712 } 803 }
@@ -715,16 +806,24 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
715 /* insert slave GIDs with memcpy, starting at slave's base index */ 806 /* insert slave GIDs with memcpy, starting at slave's base index */
716 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 807 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
717 for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++) 808 for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
718 memcpy(priv->roce_gids[port - 1][offset].raw, gid_entry_mbox->raw, 16); 809 memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
810 gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE);
719 811
720 /* Now, copy roce port gids table to current mailbox for passing to FW */ 812 /* Now, copy roce port gids table to current mailbox for passing to FW */
721 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 813 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
722 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++) 814 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
723 memcpy(gid_entry_mbox->raw, priv->roce_gids[port - 1][i].raw, 16); 815 memcpy(gid_entry_mbox->raw,
724 816 priv->port[port].gid_table.roce_gids[i].raw,
725 break; 817 MLX4_ROCE_GID_ENTRY_SIZE);
818
819 err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
820 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
821 MLX4_CMD_NATIVE);
822 mutex_unlock(&(priv->port[port].gid_table.mutex));
823 return err;
726 } 824 }
727 return mlx4_cmd(dev, inbox->dma, in_mod, op_mod, 825
826 return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
728 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 827 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
729 MLX4_CMD_NATIVE); 828 MLX4_CMD_NATIVE);
730 } 829 }
@@ -1097,7 +1196,8 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1097 num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; 1196 num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
1098 1197
1099 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { 1198 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
1100 if (!memcmp(priv->roce_gids[port - 1][i].raw, gid, 16)) { 1199 if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
1200 MLX4_ROCE_GID_ENTRY_SIZE)) {
1101 found_ix = i; 1201 found_ix = i;
1102 break; 1202 break;
1103 } 1203 }
@@ -1185,7 +1285,8 @@ int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
1185 if (!mlx4_is_master(dev)) 1285 if (!mlx4_is_master(dev))
1186 return -EINVAL; 1286 return -EINVAL;
1187 1287
1188 memcpy(gid, priv->roce_gids[port - 1][slave_id].raw, 16); 1288 memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw,
1289 MLX4_ROCE_GID_ENTRY_SIZE);
1189 return 0; 1290 return 0;
1190} 1291}
1191EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave); 1292EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index dd821b363686..b6cddef24391 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -586,6 +586,7 @@ void mlx4_free_resource_tracker(struct mlx4_dev *dev,
586 } 586 }
587 /* free master's vlans */ 587 /* free master's vlans */
588 i = dev->caps.function; 588 i = dev->caps.function;
589 mlx4_reset_roce_gids(dev, i);
589 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 590 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
590 rem_slave_vlans(dev, i); 591 rem_slave_vlans(dev, i);
591 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 592 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
@@ -4672,7 +4673,7 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4672void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) 4673void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4673{ 4674{
4674 struct mlx4_priv *priv = mlx4_priv(dev); 4675 struct mlx4_priv *priv = mlx4_priv(dev);
4675 4676 mlx4_reset_roce_gids(dev, slave);
4676 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 4677 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4677 rem_slave_vlans(dev, slave); 4678 rem_slave_vlans(dev, slave);
4678 rem_slave_macs(dev, slave); 4679 rem_slave_macs(dev, slave);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index a51fe18f09a8..561cb11ca58c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -1020,6 +1020,7 @@ static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
1020 struct qlcnic_dcb_cee *peer; 1020 struct qlcnic_dcb_cee *peer;
1021 int i; 1021 int i;
1022 1022
1023 memset(info, 0, sizeof(*info));
1023 *app_count = 0; 1024 *app_count = 0;
1024 1025
1025 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) 1026 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 6a94ede699b4..b19a323c8647 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -307,6 +307,27 @@ static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
307}; 307};
308 308
309static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { 309static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
310 [EDMR] = 0x0000,
311 [EDTRR] = 0x0004,
312 [EDRRR] = 0x0008,
313 [TDLAR] = 0x000c,
314 [RDLAR] = 0x0010,
315 [EESR] = 0x0014,
316 [EESIPR] = 0x0018,
317 [TRSCER] = 0x001c,
318 [RMFCR] = 0x0020,
319 [TFTR] = 0x0024,
320 [FDR] = 0x0028,
321 [RMCR] = 0x002c,
322 [EDOCR] = 0x0030,
323 [FCFTR] = 0x0034,
324 [RPADIR] = 0x0038,
325 [TRIMD] = 0x003c,
326 [RBWAR] = 0x0040,
327 [RDFAR] = 0x0044,
328 [TBRAR] = 0x004c,
329 [TDFAR] = 0x0050,
330
310 [ECMR] = 0x0160, 331 [ECMR] = 0x0160,
311 [ECSR] = 0x0164, 332 [ECSR] = 0x0164,
312 [ECSIPR] = 0x0168, 333 [ECSIPR] = 0x0168,
@@ -546,7 +567,6 @@ static struct sh_eth_cpu_data sh7757_data = {
546 .register_type = SH_ETH_REG_FAST_SH4, 567 .register_type = SH_ETH_REG_FAST_SH4,
547 568
548 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 569 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
549 .rmcr_value = RMCR_RNC,
550 570
551 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 571 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
552 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 572 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
@@ -624,7 +644,6 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
624 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 644 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
625 EESR_TDE | EESR_ECI, 645 EESR_TDE | EESR_ECI,
626 .fdr_value = 0x0000072f, 646 .fdr_value = 0x0000072f,
627 .rmcr_value = RMCR_RNC,
628 647
629 .irq_flags = IRQF_SHARED, 648 .irq_flags = IRQF_SHARED,
630 .apr = 1, 649 .apr = 1,
@@ -752,7 +771,6 @@ static struct sh_eth_cpu_data r8a7740_data = {
752 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 771 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
753 EESR_TDE | EESR_ECI, 772 EESR_TDE | EESR_ECI,
754 .fdr_value = 0x0000070f, 773 .fdr_value = 0x0000070f,
755 .rmcr_value = RMCR_RNC,
756 774
757 .apr = 1, 775 .apr = 1,
758 .mpr = 1, 776 .mpr = 1,
@@ -784,7 +802,6 @@ static struct sh_eth_cpu_data r7s72100_data = {
784 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 802 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
785 EESR_TDE | EESR_ECI, 803 EESR_TDE | EESR_ECI,
786 .fdr_value = 0x0000070f, 804 .fdr_value = 0x0000070f,
787 .rmcr_value = RMCR_RNC,
788 805
789 .no_psr = 1, 806 .no_psr = 1,
790 .apr = 1, 807 .apr = 1,
@@ -833,9 +850,6 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
833 if (!cd->fdr_value) 850 if (!cd->fdr_value)
834 cd->fdr_value = DEFAULT_FDR_INIT; 851 cd->fdr_value = DEFAULT_FDR_INIT;
835 852
836 if (!cd->rmcr_value)
837 cd->rmcr_value = DEFAULT_RMCR_VALUE;
838
839 if (!cd->tx_check) 853 if (!cd->tx_check)
840 cd->tx_check = DEFAULT_TX_CHECK; 854 cd->tx_check = DEFAULT_TX_CHECK;
841 855
@@ -1287,8 +1301,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
1287 sh_eth_write(ndev, mdp->cd->fdr_value, FDR); 1301 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1288 sh_eth_write(ndev, 0, TFTR); 1302 sh_eth_write(ndev, 0, TFTR);
1289 1303
1290 /* Frame recv control */ 1304 /* Frame recv control (enable multiple-packets per rx irq) */
1291 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR); 1305 sh_eth_write(ndev, RMCR_RNC, RMCR);
1292 1306
1293 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER); 1307 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
1294 1308
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index d55e37cd5fec..b37c427144ee 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -319,7 +319,6 @@ enum TD_STS_BIT {
319enum RMCR_BIT { 319enum RMCR_BIT {
320 RMCR_RNC = 0x00000001, 320 RMCR_RNC = 0x00000001,
321}; 321};
322#define DEFAULT_RMCR_VALUE 0x00000000
323 322
324/* ECMR */ 323/* ECMR */
325enum FELIC_MODE_BIT { 324enum FELIC_MODE_BIT {
@@ -466,7 +465,6 @@ struct sh_eth_cpu_data {
466 unsigned long fdr_value; 465 unsigned long fdr_value;
467 unsigned long fcftr_value; 466 unsigned long fcftr_value;
468 unsigned long rpadir_value; 467 unsigned long rpadir_value;
469 unsigned long rmcr_value;
470 468
471 /* interrupt checking mask */ 469 /* interrupt checking mask */
472 unsigned long tx_check; 470 unsigned long tx_check;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index a0fc151da40d..5e13fa5524ae 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2477,6 +2477,8 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
2477 goto out_disable_resources; 2477 goto out_disable_resources;
2478 } 2478 }
2479 2479
2480 netif_carrier_off(dev);
2481
2480 retval = register_netdev(dev); 2482 retval = register_netdev(dev);
2481 if (retval) { 2483 if (retval) {
2482 SMSC_WARN(pdata, probe, "Error %i registering device", retval); 2484 SMSC_WARN(pdata, probe, "Error %i registering device", retval);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index f32d730f55cc..35a139e9a833 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1567,7 +1567,6 @@ static int emac_dev_open(struct net_device *ndev)
1567 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, 1567 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ,
1568 res_num))) { 1568 res_num))) {
1569 for (irq_num = res->start; irq_num <= res->end; irq_num++) { 1569 for (irq_num = res->start; irq_num <= res->end; irq_num++) {
1570 dev_err(emac_dev, "Request IRQ %d\n", irq_num);
1571 if (request_irq(irq_num, emac_irq, 0, ndev->name, 1570 if (request_irq(irq_num, emac_irq, 0, ndev->name,
1572 ndev)) { 1571 ndev)) {
1573 dev_err(emac_dev, 1572 dev_err(emac_dev,
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 9a9ce8debefa..b4958c7ffa84 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1724,6 +1724,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
1724 * to traverse list in reverse under rcu_read_lock 1724 * to traverse list in reverse under rcu_read_lock
1725 */ 1725 */
1726 mutex_lock(&team->lock); 1726 mutex_lock(&team->lock);
1727 team->port_mtu_change_allowed = true;
1727 list_for_each_entry(port, &team->port_list, list) { 1728 list_for_each_entry(port, &team->port_list, list) {
1728 err = dev_set_mtu(port->dev, new_mtu); 1729 err = dev_set_mtu(port->dev, new_mtu);
1729 if (err) { 1730 if (err) {
@@ -1732,6 +1733,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
1732 goto unwind; 1733 goto unwind;
1733 } 1734 }
1734 } 1735 }
1736 team->port_mtu_change_allowed = false;
1735 mutex_unlock(&team->lock); 1737 mutex_unlock(&team->lock);
1736 1738
1737 dev->mtu = new_mtu; 1739 dev->mtu = new_mtu;
@@ -1741,6 +1743,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
1741unwind: 1743unwind:
1742 list_for_each_entry_continue_reverse(port, &team->port_list, list) 1744 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1743 dev_set_mtu(port->dev, dev->mtu); 1745 dev_set_mtu(port->dev, dev->mtu);
1746 team->port_mtu_change_allowed = false;
1744 mutex_unlock(&team->lock); 1747 mutex_unlock(&team->lock);
1745 1748
1746 return err; 1749 return err;
@@ -2851,7 +2854,9 @@ static int team_device_event(struct notifier_block *unused,
2851 break; 2854 break;
2852 case NETDEV_PRECHANGEMTU: 2855 case NETDEV_PRECHANGEMTU:
2853 /* Forbid to change mtu of underlaying device */ 2856 /* Forbid to change mtu of underlaying device */
2854 return NOTIFY_BAD; 2857 if (!port->team->port_mtu_change_allowed)
2858 return NOTIFY_BAD;
2859 break;
2855 case NETDEV_PRE_TYPE_CHANGE: 2860 case NETDEV_PRE_TYPE_CHANGE:
2856 /* Forbid to change type of underlaying device */ 2861 /* Forbid to change type of underlaying device */
2857 return NOTIFY_BAD; 2862 return NOTIFY_BAD;
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index f72570708edb..76465b117b72 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -59,6 +59,8 @@
59#define USB_PRODUCT_IPHONE_3GS 0x1294 59#define USB_PRODUCT_IPHONE_3GS 0x1294
60#define USB_PRODUCT_IPHONE_4 0x1297 60#define USB_PRODUCT_IPHONE_4 0x1297
61#define USB_PRODUCT_IPAD 0x129a 61#define USB_PRODUCT_IPAD 0x129a
62#define USB_PRODUCT_IPAD_2 0x12a2
63#define USB_PRODUCT_IPAD_3 0x12a6
62#define USB_PRODUCT_IPAD_MINI 0x12ab 64#define USB_PRODUCT_IPAD_MINI 0x12ab
63#define USB_PRODUCT_IPHONE_4_VZW 0x129c 65#define USB_PRODUCT_IPHONE_4_VZW 0x129c
64#define USB_PRODUCT_IPHONE_4S 0x12a0 66#define USB_PRODUCT_IPHONE_4S 0x12a0
@@ -107,6 +109,14 @@ static struct usb_device_id ipheth_table[] = {
107 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 109 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
108 IPHETH_USBINTF_PROTO) }, 110 IPHETH_USBINTF_PROTO) },
109 { USB_DEVICE_AND_INTERFACE_INFO( 111 { USB_DEVICE_AND_INTERFACE_INFO(
112 USB_VENDOR_APPLE, USB_PRODUCT_IPAD_2,
113 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
114 IPHETH_USBINTF_PROTO) },
115 { USB_DEVICE_AND_INTERFACE_INFO(
116 USB_VENDOR_APPLE, USB_PRODUCT_IPAD_3,
117 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
118 IPHETH_USBINTF_PROTO) },
119 { USB_DEVICE_AND_INTERFACE_INFO(
110 USB_VENDOR_APPLE, USB_PRODUCT_IPAD_MINI, 120 USB_VENDOR_APPLE, USB_PRODUCT_IPAD_MINI,
111 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 121 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
112 IPHETH_USBINTF_PROTO) }, 122 IPHETH_USBINTF_PROTO) },
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 83208d4fdc59..dc4bf06948c7 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -748,11 +748,15 @@ static const struct usb_device_id products[] = {
748 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ 748 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
749 {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC73xx */ 749 {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC73xx */
750 {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC73xx */ 750 {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC73xx */
751 {QMI_FIXED_INTF(0x1199, 0x68c0, 11)}, /* Sierra Wireless MC73xx */
752 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 751 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
753 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ 752 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
754 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ 753 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
755 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */ 754 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
755 {QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */
756 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
757 {QMI_FIXED_INTF(0x1199, 0x9055, 8)}, /* Netgear AirCard 341U */
758 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
759 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
756 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 760 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
757 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 761 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
758 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 762 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index 9802b67040cc..2c61281bebd7 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -523,17 +523,6 @@ static int wmt_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
523 return GPIOF_DIR_IN; 523 return GPIOF_DIR_IN;
524} 524}
525 525
526static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
527{
528 return pinctrl_gpio_direction_input(chip->base + offset);
529}
530
531static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
532 int value)
533{
534 return pinctrl_gpio_direction_output(chip->base + offset);
535}
536
537static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset) 526static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset)
538{ 527{
539 struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev); 528 struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev);
@@ -568,6 +557,18 @@ static void wmt_gpio_set_value(struct gpio_chip *chip, unsigned offset,
568 wmt_clearbits(data, reg_data_out, BIT(bit)); 557 wmt_clearbits(data, reg_data_out, BIT(bit));
569} 558}
570 559
560static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
561{
562 return pinctrl_gpio_direction_input(chip->base + offset);
563}
564
565static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
566 int value)
567{
568 wmt_gpio_set_value(chip, offset, value);
569 return pinctrl_gpio_direction_output(chip->base + offset);
570}
571
571static struct gpio_chip wmt_gpio_chip = { 572static struct gpio_chip wmt_gpio_chip = {
572 .label = "gpio-wmt", 573 .label = "gpio-wmt",
573 .owner = THIS_MODULE, 574 .owner = THIS_MODULE,
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 1b681427dde0..c341f855fadc 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1621,8 +1621,6 @@ void sas_rphy_free(struct sas_rphy *rphy)
1621 list_del(&rphy->list); 1621 list_del(&rphy->list);
1622 mutex_unlock(&sas_host->lock); 1622 mutex_unlock(&sas_host->lock);
1623 1623
1624 sas_bsg_remove(shost, rphy);
1625
1626 transport_destroy_device(dev); 1624 transport_destroy_device(dev);
1627 1625
1628 put_device(dev); 1626 put_device(dev);
@@ -1681,6 +1679,7 @@ sas_rphy_remove(struct sas_rphy *rphy)
1681 } 1679 }
1682 1680
1683 sas_rphy_unlink(rphy); 1681 sas_rphy_unlink(rphy);
1682 sas_bsg_remove(NULL, rphy);
1684 transport_remove_device(dev); 1683 transport_remove_device(dev);
1685 device_del(dev); 1684 device_del(dev);
1686} 1685}
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
index 171a71d20c88..728bf7f14f7b 100644
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ b/drivers/staging/comedi/drivers/ni_daq_700.c
@@ -139,6 +139,8 @@ static int daq700_ai_rinsn(struct comedi_device *dev,
139 /* write channel to multiplexer */ 139 /* write channel to multiplexer */
140 /* set mask scan bit high to disable scanning */ 140 /* set mask scan bit high to disable scanning */
141 outb(chan | 0x80, dev->iobase + CMD_R1); 141 outb(chan | 0x80, dev->iobase + CMD_R1);
142 /* mux needs 2us to really settle [Fred Brooks]. */
143 udelay(2);
142 144
143 /* convert n samples */ 145 /* convert n samples */
144 for (n = 0; n < insn->n; n++) { 146 for (n = 0; n < insn->n; n++) {
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 11d0a9d8ee59..b7dd1539bbc4 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -171,7 +171,7 @@ inline int rtllib_put_snap(u8 *data, u16 h_proto)
171 snap->oui[1] = oui[1]; 171 snap->oui[1] = oui[1];
172 snap->oui[2] = oui[2]; 172 snap->oui[2] = oui[2];
173 173
174 *(u16 *)(data + SNAP_SIZE) = h_proto; 174 *(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
175 175
176 return SNAP_SIZE + sizeof(u16); 176 return SNAP_SIZE + sizeof(u16);
177} 177}
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 3b6e5358c723..7de79d59a4cd 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -2218,6 +2218,7 @@ static void __exit speakup_exit(void)
2218 unregister_keyboard_notifier(&keyboard_notifier_block); 2218 unregister_keyboard_notifier(&keyboard_notifier_block);
2219 unregister_vt_notifier(&vt_notifier_block); 2219 unregister_vt_notifier(&vt_notifier_block);
2220 speakup_unregister_devsynth(); 2220 speakup_unregister_devsynth();
2221 speakup_cancel_paste();
2221 del_timer(&cursor_timer); 2222 del_timer(&cursor_timer);
2222 kthread_stop(speakup_task); 2223 kthread_stop(speakup_task);
2223 speakup_task = NULL; 2224 speakup_task = NULL;
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index f0fb00392d6b..ca04d3669acc 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -4,6 +4,10 @@
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/device.h> /* for dev_warn */ 5#include <linux/device.h> /* for dev_warn */
6#include <linux/selection.h> 6#include <linux/selection.h>
7#include <linux/workqueue.h>
8#include <linux/tty.h>
9#include <linux/tty_flip.h>
10#include <asm/cmpxchg.h>
7 11
8#include "speakup.h" 12#include "speakup.h"
9 13
@@ -121,31 +125,61 @@ int speakup_set_selection(struct tty_struct *tty)
121 return 0; 125 return 0;
122} 126}
123 127
124/* TODO: move to some helper thread, probably. That'd fix having to check for 128struct speakup_paste_work {
125 * in_atomic(). */ 129 struct work_struct work;
126int speakup_paste_selection(struct tty_struct *tty) 130 struct tty_struct *tty;
131};
132
133static void __speakup_paste_selection(struct work_struct *work)
127{ 134{
135 struct speakup_paste_work *spw =
136 container_of(work, struct speakup_paste_work, work);
137 struct tty_struct *tty = xchg(&spw->tty, NULL);
128 struct vc_data *vc = (struct vc_data *) tty->driver_data; 138 struct vc_data *vc = (struct vc_data *) tty->driver_data;
129 int pasted = 0, count; 139 int pasted = 0, count;
140 struct tty_ldisc *ld;
130 DECLARE_WAITQUEUE(wait, current); 141 DECLARE_WAITQUEUE(wait, current);
142
143 ld = tty_ldisc_ref_wait(tty);
144 tty_buffer_lock_exclusive(&vc->port);
145
131 add_wait_queue(&vc->paste_wait, &wait); 146 add_wait_queue(&vc->paste_wait, &wait);
132 while (sel_buffer && sel_buffer_lth > pasted) { 147 while (sel_buffer && sel_buffer_lth > pasted) {
133 set_current_state(TASK_INTERRUPTIBLE); 148 set_current_state(TASK_INTERRUPTIBLE);
134 if (test_bit(TTY_THROTTLED, &tty->flags)) { 149 if (test_bit(TTY_THROTTLED, &tty->flags)) {
135 if (in_atomic())
136 /* if we are in an interrupt handler, abort */
137 break;
138 schedule(); 150 schedule();
139 continue; 151 continue;
140 } 152 }
141 count = sel_buffer_lth - pasted; 153 count = sel_buffer_lth - pasted;
142 count = min_t(int, count, tty->receive_room); 154 count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL,
143 tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted, 155 count);
144 NULL, count);
145 pasted += count; 156 pasted += count;
146 } 157 }
147 remove_wait_queue(&vc->paste_wait, &wait); 158 remove_wait_queue(&vc->paste_wait, &wait);
148 current->state = TASK_RUNNING; 159 current->state = TASK_RUNNING;
160
161 tty_buffer_unlock_exclusive(&vc->port);
162 tty_ldisc_deref(ld);
163 tty_kref_put(tty);
164}
165
166static struct speakup_paste_work speakup_paste_work = {
167 .work = __WORK_INITIALIZER(speakup_paste_work.work,
168 __speakup_paste_selection)
169};
170
171int speakup_paste_selection(struct tty_struct *tty)
172{
173 if (cmpxchg(&speakup_paste_work.tty, NULL, tty) != NULL)
174 return -EBUSY;
175
176 tty_kref_get(tty);
177 schedule_work_on(WORK_CPU_UNBOUND, &speakup_paste_work.work);
149 return 0; 178 return 0;
150} 179}
151 180
181void speakup_cancel_paste(void)
182{
183 cancel_work_sync(&speakup_paste_work.work);
184 tty_kref_put(speakup_paste_work.tty);
185}
diff --git a/drivers/staging/speakup/speakup.h b/drivers/staging/speakup/speakup.h
index a7bcceec436a..898dce5e1243 100644
--- a/drivers/staging/speakup/speakup.h
+++ b/drivers/staging/speakup/speakup.h
@@ -75,6 +75,7 @@ extern void synth_buffer_clear(void);
75extern void speakup_clear_selection(void); 75extern void speakup_clear_selection(void);
76extern int speakup_set_selection(struct tty_struct *tty); 76extern int speakup_set_selection(struct tty_struct *tty);
77extern int speakup_paste_selection(struct tty_struct *tty); 77extern int speakup_paste_selection(struct tty_struct *tty);
78extern void speakup_cancel_paste(void);
78extern void speakup_register_devsynth(void); 79extern void speakup_register_devsynth(void);
79extern void speakup_unregister_devsynth(void); 80extern void speakup_unregister_devsynth(void);
80extern void synth_write(const char *buf, size_t count); 81extern void synth_write(const char *buf, size_t count);
diff --git a/drivers/staging/speakup/speakup_acntsa.c b/drivers/staging/speakup/speakup_acntsa.c
index c7f014ed9628..5079dbd5d7ad 100644
--- a/drivers/staging/speakup/speakup_acntsa.c
+++ b/drivers/staging/speakup/speakup_acntsa.c
@@ -60,15 +60,15 @@ static struct kobj_attribute vol_attribute =
60 __ATTR(vol, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store); 60 __ATTR(vol, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
61 61
62static struct kobj_attribute delay_time_attribute = 62static struct kobj_attribute delay_time_attribute =
63 __ATTR(delay_time, S_IRUSR|S_IRUGO, spk_var_show, spk_var_store); 63 __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
64static struct kobj_attribute direct_attribute = 64static struct kobj_attribute direct_attribute =
65 __ATTR(direct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store); 65 __ATTR(direct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
66static struct kobj_attribute full_time_attribute = 66static struct kobj_attribute full_time_attribute =
67 __ATTR(full_time, S_IRUSR|S_IRUGO, spk_var_show, spk_var_store); 67 __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
68static struct kobj_attribute jiffy_delta_attribute = 68static struct kobj_attribute jiffy_delta_attribute =
69 __ATTR(jiffy_delta, S_IRUSR|S_IRUGO, spk_var_show, spk_var_store); 69 __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
70static struct kobj_attribute trigger_time_attribute = 70static struct kobj_attribute trigger_time_attribute =
71 __ATTR(trigger_time, S_IRUSR|S_IRUGO, spk_var_show, spk_var_store); 71 __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
72 72
73/* 73/*
74 * Create a group of attributes so that we can create and destroy them all 74 * Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index cf78d1985cd8..143deb62467d 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -60,6 +60,7 @@ void tty_buffer_lock_exclusive(struct tty_port *port)
60 atomic_inc(&buf->priority); 60 atomic_inc(&buf->priority);
61 mutex_lock(&buf->lock); 61 mutex_lock(&buf->lock);
62} 62}
63EXPORT_SYMBOL_GPL(tty_buffer_lock_exclusive);
63 64
64void tty_buffer_unlock_exclusive(struct tty_port *port) 65void tty_buffer_unlock_exclusive(struct tty_port *port)
65{ 66{
@@ -73,6 +74,7 @@ void tty_buffer_unlock_exclusive(struct tty_port *port)
73 if (restart) 74 if (restart)
74 queue_work(system_unbound_wq, &buf->work); 75 queue_work(system_unbound_wq, &buf->work);
75} 76}
77EXPORT_SYMBOL_GPL(tty_buffer_unlock_exclusive);
76 78
77/** 79/**
78 * tty_buffer_space_avail - return unused buffer space 80 * tty_buffer_space_avail - return unused buffer space
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 888881e5f292..4aeb10034de7 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1822,10 +1822,13 @@ int usb_runtime_suspend(struct device *dev)
1822 if (status == -EAGAIN || status == -EBUSY) 1822 if (status == -EAGAIN || status == -EBUSY)
1823 usb_mark_last_busy(udev); 1823 usb_mark_last_busy(udev);
1824 1824
1825 /* The PM core reacts badly unless the return code is 0, 1825 /*
1826 * -EAGAIN, or -EBUSY, so always return -EBUSY on an error. 1826 * The PM core reacts badly unless the return code is 0,
1827 * -EAGAIN, or -EBUSY, so always return -EBUSY on an error
1828 * (except for root hubs, because they don't suspend through
1829 * an upstream port like other USB devices).
1827 */ 1830 */
1828 if (status != 0) 1831 if (status != 0 && udev->parent)
1829 return -EBUSY; 1832 return -EBUSY;
1830 return status; 1833 return status;
1831} 1834}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 090469ebfcff..229a73f64304 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1691,8 +1691,19 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
1691 */ 1691 */
1692 pm_runtime_set_autosuspend_delay(&hdev->dev, 0); 1692 pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
1693 1693
1694 /* Hubs have proper suspend/resume support. */ 1694 /*
1695 usb_enable_autosuspend(hdev); 1695 * Hubs have proper suspend/resume support, except for root hubs
1696 * where the controller driver doesn't have bus_suspend and
1697 * bus_resume methods.
1698 */
1699 if (hdev->parent) { /* normal device */
1700 usb_enable_autosuspend(hdev);
1701 } else { /* root hub */
1702 const struct hc_driver *drv = bus_to_hcd(hdev->bus)->driver;
1703
1704 if (drv->bus_suspend && drv->bus_resume)
1705 usb_enable_autosuspend(hdev);
1706 }
1696 1707
1697 if (hdev->level == MAX_TOPO_LEVEL) { 1708 if (hdev->level == MAX_TOPO_LEVEL) {
1698 dev_err(&intf->dev, 1709 dev_err(&intf->dev,
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 00661d305143..4a6d3dd68572 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -847,6 +847,13 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
847 bool ehci_found = false; 847 bool ehci_found = false;
848 struct pci_dev *companion = NULL; 848 struct pci_dev *companion = NULL;
849 849
850 /* Sony VAIO t-series with subsystem device ID 90a8 is not capable of
851 * switching ports from EHCI to xHCI
852 */
853 if (xhci_pdev->subsystem_vendor == PCI_VENDOR_ID_SONY &&
854 xhci_pdev->subsystem_device == 0x90a8)
855 return;
856
850 /* make sure an intel EHCI controller exists */ 857 /* make sure an intel EHCI controller exists */
851 for_each_pci_dev(companion) { 858 for_each_pci_dev(companion) {
852 if (companion->class == PCI_CLASS_SERIAL_USB_EHCI && 859 if (companion->class == PCI_CLASS_SERIAL_USB_EHCI &&
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index c089668308ad..b1a8a5f4bbb8 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1822,6 +1822,16 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1822 kfree(cur_cd); 1822 kfree(cur_cd);
1823 } 1823 }
1824 1824
1825 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1826 for (i = 0; i < num_ports; i++) {
1827 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1828 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1829 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1830 while (!list_empty(ep))
1831 list_del_init(ep->next);
1832 }
1833 }
1834
1825 for (i = 1; i < MAX_HC_SLOTS; ++i) 1835 for (i = 1; i < MAX_HC_SLOTS; ++i)
1826 xhci_free_virt_device(xhci, i); 1836 xhci_free_virt_device(xhci, i);
1827 1837
@@ -1857,16 +1867,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1857 if (!xhci->rh_bw) 1867 if (!xhci->rh_bw)
1858 goto no_bw; 1868 goto no_bw;
1859 1869
1860 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1861 for (i = 0; i < num_ports; i++) {
1862 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1863 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1864 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1865 while (!list_empty(ep))
1866 list_del_init(ep->next);
1867 }
1868 }
1869
1870 for (i = 0; i < num_ports; i++) { 1870 for (i = 0; i < num_ports; i++) {
1871 struct xhci_tt_bw_info *tt, *n; 1871 struct xhci_tt_bw_info *tt, *n;
1872 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) { 1872 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 7c6e1dedeb06..edf3b124583c 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -580,6 +580,8 @@ static const struct usb_device_id id_table_combined[] = {
580 { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) }, 580 { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) },
581 { USB_DEVICE(FTDI_VID, FTDI_TIAO_UMPA_PID), 581 { USB_DEVICE(FTDI_VID, FTDI_TIAO_UMPA_PID),
582 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 582 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
583 { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
584 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
583 /* 585 /*
584 * ELV devices: 586 * ELV devices:
585 */ 587 */
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 993c93df6874..500474c48f4b 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -538,6 +538,11 @@
538 */ 538 */
539#define FTDI_TIAO_UMPA_PID 0x8a98 /* TIAO/DIYGADGET USB Multi-Protocol Adapter */ 539#define FTDI_TIAO_UMPA_PID 0x8a98 /* TIAO/DIYGADGET USB Multi-Protocol Adapter */
540 540
541/*
542 * NovaTech product ids (FTDI_VID)
543 */
544#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
545
541 546
542/********************************/ 547/********************************/
543/** third-party VID/PID combos **/ 548/** third-party VID/PID combos **/
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index df90dae53eb9..c0a42e9e6777 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -821,7 +821,7 @@ static int build_i2c_fw_hdr(__u8 *header, struct device *dev)
821 firmware_rec = (struct ti_i2c_firmware_rec*)i2c_header->Data; 821 firmware_rec = (struct ti_i2c_firmware_rec*)i2c_header->Data;
822 822
823 i2c_header->Type = I2C_DESC_TYPE_FIRMWARE_BLANK; 823 i2c_header->Type = I2C_DESC_TYPE_FIRMWARE_BLANK;
824 i2c_header->Size = (__u16)buffer_size; 824 i2c_header->Size = cpu_to_le16(buffer_size);
825 i2c_header->CheckSum = cs; 825 i2c_header->CheckSum = cs;
826 firmware_rec->Ver_Major = OperationalMajorVersion; 826 firmware_rec->Ver_Major = OperationalMajorVersion;
827 firmware_rec->Ver_Minor = OperationalMinorVersion; 827 firmware_rec->Ver_Minor = OperationalMinorVersion;
diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h
index 51f83fbb73bb..6f6a856bc37c 100644
--- a/drivers/usb/serial/io_usbvend.h
+++ b/drivers/usb/serial/io_usbvend.h
@@ -594,7 +594,7 @@ struct edge_boot_descriptor {
594 594
595struct ti_i2c_desc { 595struct ti_i2c_desc {
596 __u8 Type; // Type of descriptor 596 __u8 Type; // Type of descriptor
597 __u16 Size; // Size of data only not including header 597 __le16 Size; // Size of data only not including header
598 __u8 CheckSum; // Checksum (8 bit sum of data only) 598 __u8 CheckSum; // Checksum (8 bit sum of data only)
599 __u8 Data[0]; // Data starts here 599 __u8 Data[0]; // Data starts here
600} __attribute__((packed)); 600} __attribute__((packed));
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index f213ee978516..948a19f0cdf7 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -161,6 +161,7 @@ static void option_instat_callback(struct urb *urb);
161#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000 161#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000
162#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 162#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
163#define NOVATELWIRELESS_PRODUCT_E362 0x9010 163#define NOVATELWIRELESS_PRODUCT_E362 0x9010
164#define NOVATELWIRELESS_PRODUCT_E371 0x9011
164#define NOVATELWIRELESS_PRODUCT_G2 0xA010 165#define NOVATELWIRELESS_PRODUCT_G2 0xA010
165#define NOVATELWIRELESS_PRODUCT_MC551 0xB001 166#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
166 167
@@ -1012,6 +1013,7 @@ static const struct usb_device_id option_ids[] = {
1012 /* Novatel Ovation MC551 a.k.a. Verizon USB551L */ 1013 /* Novatel Ovation MC551 a.k.a. Verizon USB551L */
1013 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, 1014 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
1014 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) }, 1015 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
1016 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) },
1015 1017
1016 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, 1018 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
1017 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, 1019 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 1c8c6cc6de30..4b0eff6da674 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -130,6 +130,15 @@ static void afs_cm_destructor(struct afs_call *call)
130{ 130{
131 _enter(""); 131 _enter("");
132 132
133 /* Break the callbacks here so that we do it after the final ACK is
134 * received. The step number here must match the final number in
135 * afs_deliver_cb_callback().
136 */
137 if (call->unmarshall == 6) {
138 ASSERT(call->server && call->count && call->request);
139 afs_break_callbacks(call->server, call->count, call->request);
140 }
141
133 afs_put_server(call->server); 142 afs_put_server(call->server);
134 call->server = NULL; 143 call->server = NULL;
135 kfree(call->buffer); 144 kfree(call->buffer);
@@ -272,6 +281,16 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
272 _debug("trailer"); 281 _debug("trailer");
273 if (skb->len != 0) 282 if (skb->len != 0)
274 return -EBADMSG; 283 return -EBADMSG;
284
285 /* Record that the message was unmarshalled successfully so
286 * that the call destructor can know do the callback breaking
287 * work, even if the final ACK isn't received.
288 *
289 * If the step number changes, then afs_cm_destructor() must be
290 * updated also.
291 */
292 call->unmarshall++;
293 case 6:
275 break; 294 break;
276 } 295 }
277 296
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index be75b500005d..590b55f46d61 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -75,7 +75,7 @@ struct afs_call {
75 const struct afs_call_type *type; /* type of call */ 75 const struct afs_call_type *type; /* type of call */
76 const struct afs_wait_mode *wait_mode; /* completion wait mode */ 76 const struct afs_wait_mode *wait_mode; /* completion wait mode */
77 wait_queue_head_t waitq; /* processes awaiting completion */ 77 wait_queue_head_t waitq; /* processes awaiting completion */
78 work_func_t async_workfn; 78 void (*async_workfn)(struct afs_call *call); /* asynchronous work function */
79 struct work_struct async_work; /* asynchronous work processor */ 79 struct work_struct async_work; /* asynchronous work processor */
80 struct work_struct work; /* actual work processor */ 80 struct work_struct work; /* actual work processor */
81 struct sk_buff_head rx_queue; /* received packets */ 81 struct sk_buff_head rx_queue; /* received packets */
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index ef943df73b8c..03a3beb17004 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -25,7 +25,7 @@ static void afs_wake_up_call_waiter(struct afs_call *);
25static int afs_wait_for_call_to_complete(struct afs_call *); 25static int afs_wait_for_call_to_complete(struct afs_call *);
26static void afs_wake_up_async_call(struct afs_call *); 26static void afs_wake_up_async_call(struct afs_call *);
27static int afs_dont_wait_for_call_to_complete(struct afs_call *); 27static int afs_dont_wait_for_call_to_complete(struct afs_call *);
28static void afs_process_async_call(struct work_struct *); 28static void afs_process_async_call(struct afs_call *);
29static void afs_rx_interceptor(struct sock *, unsigned long, struct sk_buff *); 29static void afs_rx_interceptor(struct sock *, unsigned long, struct sk_buff *);
30static int afs_deliver_cm_op_id(struct afs_call *, struct sk_buff *, bool); 30static int afs_deliver_cm_op_id(struct afs_call *, struct sk_buff *, bool);
31 31
@@ -58,6 +58,13 @@ static void afs_collect_incoming_call(struct work_struct *);
58static struct sk_buff_head afs_incoming_calls; 58static struct sk_buff_head afs_incoming_calls;
59static DECLARE_WORK(afs_collect_incoming_call_work, afs_collect_incoming_call); 59static DECLARE_WORK(afs_collect_incoming_call_work, afs_collect_incoming_call);
60 60
61static void afs_async_workfn(struct work_struct *work)
62{
63 struct afs_call *call = container_of(work, struct afs_call, async_work);
64
65 call->async_workfn(call);
66}
67
61/* 68/*
62 * open an RxRPC socket and bind it to be a server for callback notifications 69 * open an RxRPC socket and bind it to be a server for callback notifications
63 * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT 70 * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
@@ -184,6 +191,28 @@ static void afs_free_call(struct afs_call *call)
184} 191}
185 192
186/* 193/*
194 * End a call but do not free it
195 */
196static void afs_end_call_nofree(struct afs_call *call)
197{
198 if (call->rxcall) {
199 rxrpc_kernel_end_call(call->rxcall);
200 call->rxcall = NULL;
201 }
202 if (call->type->destructor)
203 call->type->destructor(call);
204}
205
206/*
207 * End a call and free it
208 */
209static void afs_end_call(struct afs_call *call)
210{
211 afs_end_call_nofree(call);
212 afs_free_call(call);
213}
214
215/*
187 * allocate a call with flat request and reply buffers 216 * allocate a call with flat request and reply buffers
188 */ 217 */
189struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type, 218struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type,
@@ -326,7 +355,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
326 atomic_read(&afs_outstanding_calls)); 355 atomic_read(&afs_outstanding_calls));
327 356
328 call->wait_mode = wait_mode; 357 call->wait_mode = wait_mode;
329 INIT_WORK(&call->async_work, afs_process_async_call); 358 call->async_workfn = afs_process_async_call;
359 INIT_WORK(&call->async_work, afs_async_workfn);
330 360
331 memset(&srx, 0, sizeof(srx)); 361 memset(&srx, 0, sizeof(srx));
332 srx.srx_family = AF_RXRPC; 362 srx.srx_family = AF_RXRPC;
@@ -383,11 +413,8 @@ error_do_abort:
383 rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT); 413 rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT);
384 while ((skb = skb_dequeue(&call->rx_queue))) 414 while ((skb = skb_dequeue(&call->rx_queue)))
385 afs_free_skb(skb); 415 afs_free_skb(skb);
386 rxrpc_kernel_end_call(rxcall);
387 call->rxcall = NULL;
388error_kill_call: 416error_kill_call:
389 call->type->destructor(call); 417 afs_end_call(call);
390 afs_free_call(call);
391 _leave(" = %d", ret); 418 _leave(" = %d", ret);
392 return ret; 419 return ret;
393} 420}
@@ -509,12 +536,8 @@ static void afs_deliver_to_call(struct afs_call *call)
509 if (call->state >= AFS_CALL_COMPLETE) { 536 if (call->state >= AFS_CALL_COMPLETE) {
510 while ((skb = skb_dequeue(&call->rx_queue))) 537 while ((skb = skb_dequeue(&call->rx_queue)))
511 afs_free_skb(skb); 538 afs_free_skb(skb);
512 if (call->incoming) { 539 if (call->incoming)
513 rxrpc_kernel_end_call(call->rxcall); 540 afs_end_call(call);
514 call->rxcall = NULL;
515 call->type->destructor(call);
516 afs_free_call(call);
517 }
518 } 541 }
519 542
520 _leave(""); 543 _leave("");
@@ -564,10 +587,7 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
564 } 587 }
565 588
566 _debug("call complete"); 589 _debug("call complete");
567 rxrpc_kernel_end_call(call->rxcall); 590 afs_end_call(call);
568 call->rxcall = NULL;
569 call->type->destructor(call);
570 afs_free_call(call);
571 _leave(" = %d", ret); 591 _leave(" = %d", ret);
572 return ret; 592 return ret;
573} 593}
@@ -603,11 +623,8 @@ static int afs_dont_wait_for_call_to_complete(struct afs_call *call)
603/* 623/*
604 * delete an asynchronous call 624 * delete an asynchronous call
605 */ 625 */
606static void afs_delete_async_call(struct work_struct *work) 626static void afs_delete_async_call(struct afs_call *call)
607{ 627{
608 struct afs_call *call =
609 container_of(work, struct afs_call, async_work);
610
611 _enter(""); 628 _enter("");
612 629
613 afs_free_call(call); 630 afs_free_call(call);
@@ -620,11 +637,8 @@ static void afs_delete_async_call(struct work_struct *work)
620 * - on a multiple-thread workqueue this work item may try to run on several 637 * - on a multiple-thread workqueue this work item may try to run on several
621 * CPUs at the same time 638 * CPUs at the same time
622 */ 639 */
623static void afs_process_async_call(struct work_struct *work) 640static void afs_process_async_call(struct afs_call *call)
624{ 641{
625 struct afs_call *call =
626 container_of(work, struct afs_call, async_work);
627
628 _enter(""); 642 _enter("");
629 643
630 if (!skb_queue_empty(&call->rx_queue)) 644 if (!skb_queue_empty(&call->rx_queue))
@@ -637,10 +651,7 @@ static void afs_process_async_call(struct work_struct *work)
637 call->reply = NULL; 651 call->reply = NULL;
638 652
639 /* kill the call */ 653 /* kill the call */
640 rxrpc_kernel_end_call(call->rxcall); 654 afs_end_call_nofree(call);
641 call->rxcall = NULL;
642 if (call->type->destructor)
643 call->type->destructor(call);
644 655
645 /* we can't just delete the call because the work item may be 656 /* we can't just delete the call because the work item may be
646 * queued */ 657 * queued */
@@ -663,13 +674,6 @@ void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb)
663 call->reply_size += len; 674 call->reply_size += len;
664} 675}
665 676
666static void afs_async_workfn(struct work_struct *work)
667{
668 struct afs_call *call = container_of(work, struct afs_call, async_work);
669
670 call->async_workfn(work);
671}
672
673/* 677/*
674 * accept the backlog of incoming calls 678 * accept the backlog of incoming calls
675 */ 679 */
@@ -790,10 +794,7 @@ void afs_send_empty_reply(struct afs_call *call)
790 _debug("oom"); 794 _debug("oom");
791 rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT); 795 rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
792 default: 796 default:
793 rxrpc_kernel_end_call(call->rxcall); 797 afs_end_call(call);
794 call->rxcall = NULL;
795 call->type->destructor(call);
796 afs_free_call(call);
797 _leave(" [error]"); 798 _leave(" [error]");
798 return; 799 return;
799 } 800 }
@@ -823,17 +824,16 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
823 call->state = AFS_CALL_AWAIT_ACK; 824 call->state = AFS_CALL_AWAIT_ACK;
824 n = rxrpc_kernel_send_data(call->rxcall, &msg, len); 825 n = rxrpc_kernel_send_data(call->rxcall, &msg, len);
825 if (n >= 0) { 826 if (n >= 0) {
827 /* Success */
826 _leave(" [replied]"); 828 _leave(" [replied]");
827 return; 829 return;
828 } 830 }
831
829 if (n == -ENOMEM) { 832 if (n == -ENOMEM) {
830 _debug("oom"); 833 _debug("oom");
831 rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT); 834 rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
832 } 835 }
833 rxrpc_kernel_end_call(call->rxcall); 836 afs_end_call(call);
834 call->rxcall = NULL;
835 call->type->destructor(call);
836 afs_free_call(call);
837 _leave(" [error]"); 837 _leave(" [error]");
838} 838}
839 839
diff --git a/fs/dcache.c b/fs/dcache.c
index 42ae01eefc07..be2bea834bf4 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -441,42 +441,12 @@ void d_drop(struct dentry *dentry)
441} 441}
442EXPORT_SYMBOL(d_drop); 442EXPORT_SYMBOL(d_drop);
443 443
444/* 444static void __dentry_kill(struct dentry *dentry)
445 * Finish off a dentry we've decided to kill.
446 * dentry->d_lock must be held, returns with it unlocked.
447 * If ref is non-zero, then decrement the refcount too.
448 * Returns dentry requiring refcount drop, or NULL if we're done.
449 */
450static struct dentry *
451dentry_kill(struct dentry *dentry, int unlock_on_failure)
452 __releases(dentry->d_lock)
453{ 445{
454 struct inode *inode;
455 struct dentry *parent = NULL; 446 struct dentry *parent = NULL;
456 bool can_free = true; 447 bool can_free = true;
457
458 if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
459 can_free = dentry->d_flags & DCACHE_MAY_FREE;
460 spin_unlock(&dentry->d_lock);
461 goto out;
462 }
463
464 inode = dentry->d_inode;
465 if (inode && !spin_trylock(&inode->i_lock)) {
466relock:
467 if (unlock_on_failure) {
468 spin_unlock(&dentry->d_lock);
469 cpu_relax();
470 }
471 return dentry; /* try again with same dentry */
472 }
473 if (!IS_ROOT(dentry)) 448 if (!IS_ROOT(dentry))
474 parent = dentry->d_parent; 449 parent = dentry->d_parent;
475 if (parent && !spin_trylock(&parent->d_lock)) {
476 if (inode)
477 spin_unlock(&inode->i_lock);
478 goto relock;
479 }
480 450
481 /* 451 /*
482 * The dentry is now unrecoverably dead to the world. 452 * The dentry is now unrecoverably dead to the world.
@@ -520,9 +490,72 @@ relock:
520 can_free = false; 490 can_free = false;
521 } 491 }
522 spin_unlock(&dentry->d_lock); 492 spin_unlock(&dentry->d_lock);
523out:
524 if (likely(can_free)) 493 if (likely(can_free))
525 dentry_free(dentry); 494 dentry_free(dentry);
495}
496
497/*
498 * Finish off a dentry we've decided to kill.
499 * dentry->d_lock must be held, returns with it unlocked.
500 * If ref is non-zero, then decrement the refcount too.
501 * Returns dentry requiring refcount drop, or NULL if we're done.
502 */
503static struct dentry *dentry_kill(struct dentry *dentry)
504 __releases(dentry->d_lock)
505{
506 struct inode *inode = dentry->d_inode;
507 struct dentry *parent = NULL;
508
509 if (inode && unlikely(!spin_trylock(&inode->i_lock)))
510 goto failed;
511
512 if (!IS_ROOT(dentry)) {
513 parent = dentry->d_parent;
514 if (unlikely(!spin_trylock(&parent->d_lock))) {
515 if (inode)
516 spin_unlock(&inode->i_lock);
517 goto failed;
518 }
519 }
520
521 __dentry_kill(dentry);
522 return parent;
523
524failed:
525 spin_unlock(&dentry->d_lock);
526 cpu_relax();
527 return dentry; /* try again with same dentry */
528}
529
530static inline struct dentry *lock_parent(struct dentry *dentry)
531{
532 struct dentry *parent = dentry->d_parent;
533 if (IS_ROOT(dentry))
534 return NULL;
535 if (likely(spin_trylock(&parent->d_lock)))
536 return parent;
537 spin_unlock(&dentry->d_lock);
538 rcu_read_lock();
539again:
540 parent = ACCESS_ONCE(dentry->d_parent);
541 spin_lock(&parent->d_lock);
542 /*
543 * We can't blindly lock dentry until we are sure
544 * that we won't violate the locking order.
545 * Any changes of dentry->d_parent must have
546 * been done with parent->d_lock held, so
547 * spin_lock() above is enough of a barrier
548 * for checking if it's still our child.
549 */
550 if (unlikely(parent != dentry->d_parent)) {
551 spin_unlock(&parent->d_lock);
552 goto again;
553 }
554 rcu_read_unlock();
555 if (parent != dentry)
556 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
557 else
558 parent = NULL;
526 return parent; 559 return parent;
527} 560}
528 561
@@ -579,7 +612,7 @@ repeat:
579 return; 612 return;
580 613
581kill_it: 614kill_it:
582 dentry = dentry_kill(dentry, 1); 615 dentry = dentry_kill(dentry);
583 if (dentry) 616 if (dentry)
584 goto repeat; 617 goto repeat;
585} 618}
@@ -797,8 +830,11 @@ static void shrink_dentry_list(struct list_head *list)
797 struct dentry *dentry, *parent; 830 struct dentry *dentry, *parent;
798 831
799 while (!list_empty(list)) { 832 while (!list_empty(list)) {
833 struct inode *inode;
800 dentry = list_entry(list->prev, struct dentry, d_lru); 834 dentry = list_entry(list->prev, struct dentry, d_lru);
801 spin_lock(&dentry->d_lock); 835 spin_lock(&dentry->d_lock);
836 parent = lock_parent(dentry);
837
802 /* 838 /*
803 * The dispose list is isolated and dentries are not accounted 839 * The dispose list is isolated and dentries are not accounted
804 * to the LRU here, so we can simply remove it from the list 840 * to the LRU here, so we can simply remove it from the list
@@ -812,26 +848,33 @@ static void shrink_dentry_list(struct list_head *list)
812 */ 848 */
813 if ((int)dentry->d_lockref.count > 0) { 849 if ((int)dentry->d_lockref.count > 0) {
814 spin_unlock(&dentry->d_lock); 850 spin_unlock(&dentry->d_lock);
851 if (parent)
852 spin_unlock(&parent->d_lock);
815 continue; 853 continue;
816 } 854 }
817 855
818 parent = dentry_kill(dentry, 0); 856
819 /* 857 if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
820 * If dentry_kill returns NULL, we have nothing more to do. 858 bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
821 */ 859 spin_unlock(&dentry->d_lock);
822 if (!parent) 860 if (parent)
861 spin_unlock(&parent->d_lock);
862 if (can_free)
863 dentry_free(dentry);
823 continue; 864 continue;
865 }
824 866
825 if (unlikely(parent == dentry)) { 867 inode = dentry->d_inode;
826 /* 868 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
827 * trylocks have failed and d_lock has been held the
828 * whole time, so it could not have been added to any
829 * other lists. Just add it back to the shrink list.
830 */
831 d_shrink_add(dentry, list); 869 d_shrink_add(dentry, list);
832 spin_unlock(&dentry->d_lock); 870 spin_unlock(&dentry->d_lock);
871 if (parent)
872 spin_unlock(&parent->d_lock);
833 continue; 873 continue;
834 } 874 }
875
876 __dentry_kill(dentry);
877
835 /* 878 /*
836 * We need to prune ancestors too. This is necessary to prevent 879 * We need to prune ancestors too. This is necessary to prevent
837 * quadratic behavior of shrink_dcache_parent(), but is also 880 * quadratic behavior of shrink_dcache_parent(), but is also
@@ -839,8 +882,26 @@ static void shrink_dentry_list(struct list_head *list)
839 * fragmentation. 882 * fragmentation.
840 */ 883 */
841 dentry = parent; 884 dentry = parent;
842 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) 885 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
843 dentry = dentry_kill(dentry, 1); 886 parent = lock_parent(dentry);
887 if (dentry->d_lockref.count != 1) {
888 dentry->d_lockref.count--;
889 spin_unlock(&dentry->d_lock);
890 if (parent)
891 spin_unlock(&parent->d_lock);
892 break;
893 }
894 inode = dentry->d_inode; /* can't be NULL */
895 if (unlikely(!spin_trylock(&inode->i_lock))) {
896 spin_unlock(&dentry->d_lock);
897 if (parent)
898 spin_unlock(&parent->d_lock);
899 cpu_relax();
900 continue;
901 }
902 __dentry_kill(dentry);
903 dentry = parent;
904 }
844 } 905 }
845} 906}
846 907
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index b6f46013dddf..f66c66b9f182 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -590,7 +590,7 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
590 add_to_mask(state, &state->groups->aces[i].perms); 590 add_to_mask(state, &state->groups->aces[i].perms);
591 } 591 }
592 592
593 if (!state->users->n && !state->groups->n) { 593 if (state->users->n || state->groups->n) {
594 pace++; 594 pace++;
595 pace->e_tag = ACL_MASK; 595 pace->e_tag = ACL_MASK;
596 low_mode_from_nfs4(state->mask.allow, &pace->e_perm, flags); 596 low_mode_from_nfs4(state->mask.allow, &pace->e_perm, flags);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 32b699bebb9c..9a77a5a21557 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3717,9 +3717,16 @@ out:
3717static __be32 3717static __be32
3718nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp) 3718nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
3719{ 3719{
3720 if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner))) 3720 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
3721
3722 if (check_for_locks(stp->st_file, lo))
3721 return nfserr_locks_held; 3723 return nfserr_locks_held;
3722 release_lock_stateid(stp); 3724 /*
3725 * Currently there's a 1-1 lock stateid<->lockowner
3726 * correspondance, and we have to delete the lockowner when we
3727 * delete the lock stateid:
3728 */
3729 unhash_lockowner(lo);
3723 return nfs_ok; 3730 return nfs_ok;
3724} 3731}
3725 3732
@@ -4159,6 +4166,10 @@ static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, c
4159 4166
4160 if (!same_owner_str(&lo->lo_owner, owner, clid)) 4167 if (!same_owner_str(&lo->lo_owner, owner, clid))
4161 return false; 4168 return false;
4169 if (list_empty(&lo->lo_owner.so_stateids)) {
4170 WARN_ON_ONCE(1);
4171 return false;
4172 }
4162 lst = list_first_entry(&lo->lo_owner.so_stateids, 4173 lst = list_first_entry(&lo->lo_owner.so_stateids,
4163 struct nfs4_ol_stateid, st_perstateowner); 4174 struct nfs4_ol_stateid, st_perstateowner);
4164 return lst->st_file->fi_inode == inode; 4175 return lst->st_file->fi_inode == inode;
diff --git a/fs/splice.c b/fs/splice.c
index 9bc07d2b53cf..e246954ea48c 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1537,7 +1537,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
1537 struct iovec iovstack[UIO_FASTIOV]; 1537 struct iovec iovstack[UIO_FASTIOV];
1538 struct iovec *iov = iovstack; 1538 struct iovec *iov = iovstack;
1539 struct iov_iter iter; 1539 struct iov_iter iter;
1540 ssize_t count = 0; 1540 ssize_t count;
1541 1541
1542 pipe = get_pipe_info(file); 1542 pipe = get_pipe_info(file);
1543 if (!pipe) 1543 if (!pipe)
@@ -1546,8 +1546,9 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
1546 ret = rw_copy_check_uvector(READ, uiov, nr_segs, 1546 ret = rw_copy_check_uvector(READ, uiov, nr_segs,
1547 ARRAY_SIZE(iovstack), iovstack, &iov); 1547 ARRAY_SIZE(iovstack), iovstack, &iov);
1548 if (ret <= 0) 1548 if (ret <= 0)
1549 return ret; 1549 goto out;
1550 1550
1551 count = ret;
1551 iov_iter_init(&iter, iov, nr_segs, count, 0); 1552 iov_iter_init(&iter, iov, nr_segs, count, 0);
1552 1553
1553 sd.len = 0; 1554 sd.len = 0;
@@ -1560,6 +1561,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
1560 ret = __splice_from_pipe(pipe, &sd, pipe_to_user); 1561 ret = __splice_from_pipe(pipe, &sd, pipe_to_user);
1561 pipe_unlock(pipe); 1562 pipe_unlock(pipe);
1562 1563
1564out:
1563 if (iov != iovstack) 1565 if (iov != iovstack)
1564 kfree(iov); 1566 kfree(iov);
1565 1567
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 63b5eff0a80f..fdd7e1b61f60 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -47,6 +47,7 @@ struct amba_driver {
47enum amba_vendor { 47enum amba_vendor {
48 AMBA_VENDOR_ARM = 0x41, 48 AMBA_VENDOR_ARM = 0x41,
49 AMBA_VENDOR_ST = 0x80, 49 AMBA_VENDOR_ST = 0x80,
50 AMBA_VENDOR_QCOM = 0x51,
50}; 51};
51 52
52extern struct bus_type amba_bustype; 53extern struct bus_type amba_bustype;
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 8300fb87b84a..72cb0ddb9678 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -429,6 +429,7 @@ typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
429typedef void (*dma_async_tx_callback)(void *dma_async_param); 429typedef void (*dma_async_tx_callback)(void *dma_async_param);
430 430
431struct dmaengine_unmap_data { 431struct dmaengine_unmap_data {
432 u8 map_cnt;
432 u8 to_cnt; 433 u8 to_cnt;
433 u8 from_cnt; 434 u8 from_cnt;
434 u8 bidi_cnt; 435 u8 bidi_cnt;
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index a899dc24be15..a6aa970758a2 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -194,6 +194,7 @@ struct team {
194 bool user_carrier_enabled; 194 bool user_carrier_enabled;
195 bool queue_override_enabled; 195 bool queue_override_enabled;
196 struct list_head *qom_lists; /* array of queue override mapping lists */ 196 struct list_head *qom_lists; /* array of queue override mapping lists */
197 bool port_mtu_change_allowed;
197 struct { 198 struct {
198 unsigned int count; 199 unsigned int count;
199 unsigned int interval; /* in ms */ 200 unsigned int interval; /* in ms */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 7a28115dd396..9e572daa15d5 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -16,9 +16,10 @@ static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
16} 16}
17 17
18enum netlink_skb_flags { 18enum netlink_skb_flags {
19 NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */ 19 NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */
20 NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */ 20 NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */
21 NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */ 21 NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */
22 NETLINK_SKB_DST = 0x8, /* Dst set in sendto or sendmsg */
22}; 23};
23 24
24struct netlink_skb_parms { 25struct netlink_skb_parms {
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
index 41a13e70f41f..7944cdc27bed 100644
--- a/include/linux/omap-dma.h
+++ b/include/linux/omap-dma.h
@@ -10,7 +10,7 @@
10 10
11struct dma_chan; 11struct dma_chan;
12 12
13#if defined(CONFIG_DMA_OMAP) || defined(CONFIG_DMA_OMAP_MODULE) 13#if defined(CONFIG_DMA_OMAP) || (defined(CONFIG_DMA_OMAP_MODULE) && defined(MODULE))
14bool omap_dma_filter_fn(struct dma_chan *, void *); 14bool omap_dma_filter_fn(struct dma_chan *, void *);
15#else 15#else
16static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d) 16static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d)
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 823ec7bb9c67..01d590ee5e7e 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -171,5 +171,4 @@ static inline void inet_peer_refcheck(const struct inet_peer *p)
171{ 171{
172 WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0); 172 WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
173} 173}
174
175#endif /* _NET_INETPEER_H */ 174#endif /* _NET_INETPEER_H */
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index b21ea454bd33..cf6714752b69 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -357,7 +357,7 @@ enum {
357#define AUDIT_ARCH_MIPS64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|\ 357#define AUDIT_ARCH_MIPS64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|\
358 __AUDIT_ARCH_CONVENTION_MIPS64_N32) 358 __AUDIT_ARCH_CONVENTION_MIPS64_N32)
359#define AUDIT_ARCH_MIPSEL64 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) 359#define AUDIT_ARCH_MIPSEL64 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
360#define AUDIT_ARCH_MIPSEL64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE\ 360#define AUDIT_ARCH_MIPSEL64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE|\
361 __AUDIT_ARCH_CONVENTION_MIPS64_N32) 361 __AUDIT_ARCH_CONVENTION_MIPS64_N32)
362#define AUDIT_ARCH_OPENRISC (EM_OPENRISC) 362#define AUDIT_ARCH_OPENRISC (EM_OPENRISC)
363#define AUDIT_ARCH_PARISC (EM_PARISC) 363#define AUDIT_ARCH_PARISC (EM_PARISC)
diff --git a/include/uapi/linux/usb/Kbuild b/include/uapi/linux/usb/Kbuild
index 6cb4ea826834..4cc4d6e7e523 100644
--- a/include/uapi/linux/usb/Kbuild
+++ b/include/uapi/linux/usb/Kbuild
@@ -1,6 +1,7 @@
1# UAPI Header export list 1# UAPI Header export list
2header-y += audio.h 2header-y += audio.h
3header-y += cdc.h 3header-y += cdc.h
4header-y += cdc-wdm.h
4header-y += ch11.h 5header-y += ch11.h
5header-y += ch9.h 6header-y += ch9.h
6header-y += functionfs.h 7header-y += functionfs.h
diff --git a/include/uapi/linux/usb/cdc-wdm.h b/include/uapi/linux/usb/cdc-wdm.h
index f03134feebd6..0dc132e75030 100644
--- a/include/uapi/linux/usb/cdc-wdm.h
+++ b/include/uapi/linux/usb/cdc-wdm.h
@@ -9,6 +9,8 @@
9#ifndef _UAPI__LINUX_USB_CDC_WDM_H 9#ifndef _UAPI__LINUX_USB_CDC_WDM_H
10#define _UAPI__LINUX_USB_CDC_WDM_H 10#define _UAPI__LINUX_USB_CDC_WDM_H
11 11
12#include <linux/types.h>
13
12/* 14/*
13 * This IOCTL is used to retrieve the wMaxCommand for the device, 15 * This IOCTL is used to retrieve the wMaxCommand for the device,
14 * defining the message limit for both reading and writing. 16 * defining the message limit for both reading and writing.
diff --git a/kernel/cpu.c b/kernel/cpu.c
index a9e710eef0e2..247979a1b815 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -726,10 +726,12 @@ void set_cpu_present(unsigned int cpu, bool present)
726 726
727void set_cpu_online(unsigned int cpu, bool online) 727void set_cpu_online(unsigned int cpu, bool online)
728{ 728{
729 if (online) 729 if (online) {
730 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); 730 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
731 else 731 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
732 } else {
732 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); 733 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
734 }
733} 735}
734 736
735void set_cpu_active(unsigned int cpu, bool active) 737void set_cpu_active(unsigned int cpu, bool active)
diff --git a/kernel/futex.c b/kernel/futex.c
index 5f589279e462..81dbe773ce4c 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -745,7 +745,8 @@ void exit_pi_state_list(struct task_struct *curr)
745 745
746static int 746static int
747lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, 747lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
748 union futex_key *key, struct futex_pi_state **ps) 748 union futex_key *key, struct futex_pi_state **ps,
749 struct task_struct *task)
749{ 750{
750 struct futex_pi_state *pi_state = NULL; 751 struct futex_pi_state *pi_state = NULL;
751 struct futex_q *this, *next; 752 struct futex_q *this, *next;
@@ -786,6 +787,16 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
786 return -EINVAL; 787 return -EINVAL;
787 } 788 }
788 789
790 /*
791 * Protect against a corrupted uval. If uval
792 * is 0x80000000 then pid is 0 and the waiter
793 * bit is set. So the deadlock check in the
794 * calling code has failed and we did not fall
795 * into the check above due to !pid.
796 */
797 if (task && pi_state->owner == task)
798 return -EDEADLK;
799
789 atomic_inc(&pi_state->refcount); 800 atomic_inc(&pi_state->refcount);
790 *ps = pi_state; 801 *ps = pi_state;
791 802
@@ -803,6 +814,11 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
803 if (!p) 814 if (!p)
804 return -ESRCH; 815 return -ESRCH;
805 816
817 if (!p->mm) {
818 put_task_struct(p);
819 return -EPERM;
820 }
821
806 /* 822 /*
807 * We need to look at the task state flags to figure out, 823 * We need to look at the task state flags to figure out,
808 * whether the task is exiting. To protect against the do_exit 824 * whether the task is exiting. To protect against the do_exit
@@ -935,7 +951,7 @@ retry:
935 * We dont have the lock. Look up the PI state (or create it if 951 * We dont have the lock. Look up the PI state (or create it if
936 * we are the first waiter): 952 * we are the first waiter):
937 */ 953 */
938 ret = lookup_pi_state(uval, hb, key, ps); 954 ret = lookup_pi_state(uval, hb, key, ps, task);
939 955
940 if (unlikely(ret)) { 956 if (unlikely(ret)) {
941 switch (ret) { 957 switch (ret) {
@@ -1347,7 +1363,7 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1347 * 1363 *
1348 * Return: 1364 * Return:
1349 * 0 - failed to acquire the lock atomically; 1365 * 0 - failed to acquire the lock atomically;
1350 * 1 - acquired the lock; 1366 * >0 - acquired the lock, return value is vpid of the top_waiter
1351 * <0 - error 1367 * <0 - error
1352 */ 1368 */
1353static int futex_proxy_trylock_atomic(u32 __user *pifutex, 1369static int futex_proxy_trylock_atomic(u32 __user *pifutex,
@@ -1358,7 +1374,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1358{ 1374{
1359 struct futex_q *top_waiter = NULL; 1375 struct futex_q *top_waiter = NULL;
1360 u32 curval; 1376 u32 curval;
1361 int ret; 1377 int ret, vpid;
1362 1378
1363 if (get_futex_value_locked(&curval, pifutex)) 1379 if (get_futex_value_locked(&curval, pifutex))
1364 return -EFAULT; 1380 return -EFAULT;
@@ -1386,11 +1402,13 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1386 * the contended case or if set_waiters is 1. The pi_state is returned 1402 * the contended case or if set_waiters is 1. The pi_state is returned
1387 * in ps in contended cases. 1403 * in ps in contended cases.
1388 */ 1404 */
1405 vpid = task_pid_vnr(top_waiter->task);
1389 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, 1406 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1390 set_waiters); 1407 set_waiters);
1391 if (ret == 1) 1408 if (ret == 1) {
1392 requeue_pi_wake_futex(top_waiter, key2, hb2); 1409 requeue_pi_wake_futex(top_waiter, key2, hb2);
1393 1410 return vpid;
1411 }
1394 return ret; 1412 return ret;
1395} 1413}
1396 1414
@@ -1421,7 +1439,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1421 struct futex_pi_state *pi_state = NULL; 1439 struct futex_pi_state *pi_state = NULL;
1422 struct futex_hash_bucket *hb1, *hb2; 1440 struct futex_hash_bucket *hb1, *hb2;
1423 struct futex_q *this, *next; 1441 struct futex_q *this, *next;
1424 u32 curval2;
1425 1442
1426 if (requeue_pi) { 1443 if (requeue_pi) {
1427 /* 1444 /*
@@ -1509,16 +1526,25 @@ retry_private:
1509 * At this point the top_waiter has either taken uaddr2 or is 1526 * At this point the top_waiter has either taken uaddr2 or is
1510 * waiting on it. If the former, then the pi_state will not 1527 * waiting on it. If the former, then the pi_state will not
1511 * exist yet, look it up one more time to ensure we have a 1528 * exist yet, look it up one more time to ensure we have a
1512 * reference to it. 1529 * reference to it. If the lock was taken, ret contains the
1530 * vpid of the top waiter task.
1513 */ 1531 */
1514 if (ret == 1) { 1532 if (ret > 0) {
1515 WARN_ON(pi_state); 1533 WARN_ON(pi_state);
1516 drop_count++; 1534 drop_count++;
1517 task_count++; 1535 task_count++;
1518 ret = get_futex_value_locked(&curval2, uaddr2); 1536 /*
1519 if (!ret) 1537 * If we acquired the lock, then the user
1520 ret = lookup_pi_state(curval2, hb2, &key2, 1538 * space value of uaddr2 should be vpid. It
1521 &pi_state); 1539 * cannot be changed by the top waiter as it
1540 * is blocked on hb2 lock if it tries to do
1541 * so. If something fiddled with it behind our
1542 * back the pi state lookup might unearth
1543 * it. So we rather use the known value than
1544 * rereading and handing potential crap to
1545 * lookup_pi_state.
1546 */
1547 ret = lookup_pi_state(ret, hb2, &key2, &pi_state, NULL);
1522 } 1548 }
1523 1549
1524 switch (ret) { 1550 switch (ret) {
diff --git a/kernel/kexec.c b/kernel/kexec.c
index c8380ad203bc..28c57069ef68 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1683,6 +1683,14 @@ int kernel_kexec(void)
1683 kexec_in_progress = true; 1683 kexec_in_progress = true;
1684 kernel_restart_prepare(NULL); 1684 kernel_restart_prepare(NULL);
1685 migrate_to_reboot_cpu(); 1685 migrate_to_reboot_cpu();
1686
1687 /*
1688 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1689 * no further code needs to use CPU hotplug (which is true in
1690 * the reboot case). However, the kexec path depends on using
1691 * CPU hotplug again; so re-enable it here.
1692 */
1693 cpu_hotplug_enable();
1686 printk(KERN_EMERG "Starting new kernel\n"); 1694 printk(KERN_EMERG "Starting new kernel\n");
1687 machine_shutdown(); 1695 machine_shutdown();
1688 } 1696 }
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index aa4dff04b594..a620d4d08ca6 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -343,9 +343,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
343 * top_waiter can be NULL, when we are in the deboosting 343 * top_waiter can be NULL, when we are in the deboosting
344 * mode! 344 * mode!
345 */ 345 */
346 if (top_waiter && (!task_has_pi_waiters(task) || 346 if (top_waiter) {
347 top_waiter != task_top_pi_waiter(task))) 347 if (!task_has_pi_waiters(task))
348 goto out_unlock_pi; 348 goto out_unlock_pi;
349 /*
350 * If deadlock detection is off, we stop here if we
351 * are not the top pi waiter of the task.
352 */
353 if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
354 goto out_unlock_pi;
355 }
349 356
350 /* 357 /*
351 * When deadlock detection is off then we check, if further 358 * When deadlock detection is off then we check, if further
@@ -361,7 +368,12 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
361 goto retry; 368 goto retry;
362 } 369 }
363 370
364 /* Deadlock detection */ 371 /*
372 * Deadlock detection. If the lock is the same as the original
373 * lock which caused us to walk the lock chain or if the
374 * current lock is owned by the task which initiated the chain
375 * walk, we detected a deadlock.
376 */
365 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { 377 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
366 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); 378 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
367 raw_spin_unlock(&lock->wait_lock); 379 raw_spin_unlock(&lock->wait_lock);
@@ -527,6 +539,18 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
527 unsigned long flags; 539 unsigned long flags;
528 int chain_walk = 0, res; 540 int chain_walk = 0, res;
529 541
542 /*
543 * Early deadlock detection. We really don't want the task to
544 * enqueue on itself just to untangle the mess later. It's not
545 * only an optimization. We drop the locks, so another waiter
546 * can come in before the chain walk detects the deadlock. So
547 * the other will detect the deadlock and return -EDEADLOCK,
548 * which is wrong, as the other waiter is not in a deadlock
549 * situation.
550 */
551 if (detect_deadlock && owner == task)
552 return -EDEADLK;
553
530 raw_spin_lock_irqsave(&task->pi_lock, flags); 554 raw_spin_lock_irqsave(&task->pi_lock, flags);
531 __rt_mutex_adjust_prio(task); 555 __rt_mutex_adjust_prio(task);
532 waiter->task = task; 556 waiter->task = task;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 204d3d281809..0a7251678982 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3195,17 +3195,40 @@ __getparam_dl(struct task_struct *p, struct sched_attr *attr)
3195 * We ask for the deadline not being zero, and greater or equal 3195 * We ask for the deadline not being zero, and greater or equal
3196 * than the runtime, as well as the period of being zero or 3196 * than the runtime, as well as the period of being zero or
3197 * greater than deadline. Furthermore, we have to be sure that 3197 * greater than deadline. Furthermore, we have to be sure that
3198 * user parameters are above the internal resolution (1us); we 3198 * user parameters are above the internal resolution of 1us (we
3199 * check sched_runtime only since it is always the smaller one. 3199 * check sched_runtime only since it is always the smaller one) and
3200 * below 2^63 ns (we have to check both sched_deadline and
3201 * sched_period, as the latter can be zero).
3200 */ 3202 */
3201static bool 3203static bool
3202__checkparam_dl(const struct sched_attr *attr) 3204__checkparam_dl(const struct sched_attr *attr)
3203{ 3205{
3204 return attr && attr->sched_deadline != 0 && 3206 /* deadline != 0 */
3205 (attr->sched_period == 0 || 3207 if (attr->sched_deadline == 0)
3206 (s64)(attr->sched_period - attr->sched_deadline) >= 0) && 3208 return false;
3207 (s64)(attr->sched_deadline - attr->sched_runtime ) >= 0 && 3209
3208 attr->sched_runtime >= (2 << (DL_SCALE - 1)); 3210 /*
3211 * Since we truncate DL_SCALE bits, make sure we're at least
3212 * that big.
3213 */
3214 if (attr->sched_runtime < (1ULL << DL_SCALE))
3215 return false;
3216
3217 /*
3218 * Since we use the MSB for wrap-around and sign issues, make
3219 * sure it's not set (mind that period can be equal to zero).
3220 */
3221 if (attr->sched_deadline & (1ULL << 63) ||
3222 attr->sched_period & (1ULL << 63))
3223 return false;
3224
3225 /* runtime <= deadline <= period (if period != 0) */
3226 if ((attr->sched_period != 0 &&
3227 attr->sched_period < attr->sched_deadline) ||
3228 attr->sched_deadline < attr->sched_runtime)
3229 return false;
3230
3231 return true;
3209} 3232}
3210 3233
3211/* 3234/*
@@ -3658,8 +3681,12 @@ SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
3658 if (!uattr || pid < 0 || flags) 3681 if (!uattr || pid < 0 || flags)
3659 return -EINVAL; 3682 return -EINVAL;
3660 3683
3661 if (sched_copy_attr(uattr, &attr)) 3684 retval = sched_copy_attr(uattr, &attr);
3662 return -EFAULT; 3685 if (retval)
3686 return retval;
3687
3688 if (attr.sched_policy < 0)
3689 return -EINVAL;
3663 3690
3664 rcu_read_lock(); 3691 rcu_read_lock();
3665 retval = -ESRCH; 3692 retval = -ESRCH;
@@ -3709,7 +3736,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
3709 */ 3736 */
3710SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 3737SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
3711{ 3738{
3712 struct sched_param lp; 3739 struct sched_param lp = { .sched_priority = 0 };
3713 struct task_struct *p; 3740 struct task_struct *p;
3714 int retval; 3741 int retval;
3715 3742
@@ -3726,11 +3753,8 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
3726 if (retval) 3753 if (retval)
3727 goto out_unlock; 3754 goto out_unlock;
3728 3755
3729 if (task_has_dl_policy(p)) { 3756 if (task_has_rt_policy(p))
3730 retval = -EINVAL; 3757 lp.sched_priority = p->rt_priority;
3731 goto out_unlock;
3732 }
3733 lp.sched_priority = p->rt_priority;
3734 rcu_read_unlock(); 3758 rcu_read_unlock();
3735 3759
3736 /* 3760 /*
@@ -5052,7 +5076,6 @@ static int sched_cpu_active(struct notifier_block *nfb,
5052 unsigned long action, void *hcpu) 5076 unsigned long action, void *hcpu)
5053{ 5077{
5054 switch (action & ~CPU_TASKS_FROZEN) { 5078 switch (action & ~CPU_TASKS_FROZEN) {
5055 case CPU_STARTING:
5056 case CPU_DOWN_FAILED: 5079 case CPU_DOWN_FAILED:
5057 set_cpu_active((long)hcpu, true); 5080 set_cpu_active((long)hcpu, true);
5058 return NOTIFY_OK; 5081 return NOTIFY_OK;
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index ab001b5d5048..bd95963dae80 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/gfp.h> 14#include <linux/gfp.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/slab.h>
16#include "cpudeadline.h" 17#include "cpudeadline.h"
17 18
18static inline int parent(int i) 19static inline int parent(int i)
@@ -39,8 +40,10 @@ static void cpudl_exchange(struct cpudl *cp, int a, int b)
39{ 40{
40 int cpu_a = cp->elements[a].cpu, cpu_b = cp->elements[b].cpu; 41 int cpu_a = cp->elements[a].cpu, cpu_b = cp->elements[b].cpu;
41 42
42 swap(cp->elements[a], cp->elements[b]); 43 swap(cp->elements[a].cpu, cp->elements[b].cpu);
43 swap(cp->cpu_to_idx[cpu_a], cp->cpu_to_idx[cpu_b]); 44 swap(cp->elements[a].dl , cp->elements[b].dl );
45
46 swap(cp->elements[cpu_a].idx, cp->elements[cpu_b].idx);
44} 47}
45 48
46static void cpudl_heapify(struct cpudl *cp, int idx) 49static void cpudl_heapify(struct cpudl *cp, int idx)
@@ -140,7 +143,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
140 WARN_ON(!cpu_present(cpu)); 143 WARN_ON(!cpu_present(cpu));
141 144
142 raw_spin_lock_irqsave(&cp->lock, flags); 145 raw_spin_lock_irqsave(&cp->lock, flags);
143 old_idx = cp->cpu_to_idx[cpu]; 146 old_idx = cp->elements[cpu].idx;
144 if (!is_valid) { 147 if (!is_valid) {
145 /* remove item */ 148 /* remove item */
146 if (old_idx == IDX_INVALID) { 149 if (old_idx == IDX_INVALID) {
@@ -155,8 +158,8 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
155 cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl; 158 cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl;
156 cp->elements[old_idx].cpu = new_cpu; 159 cp->elements[old_idx].cpu = new_cpu;
157 cp->size--; 160 cp->size--;
158 cp->cpu_to_idx[new_cpu] = old_idx; 161 cp->elements[new_cpu].idx = old_idx;
159 cp->cpu_to_idx[cpu] = IDX_INVALID; 162 cp->elements[cpu].idx = IDX_INVALID;
160 while (old_idx > 0 && dl_time_before( 163 while (old_idx > 0 && dl_time_before(
161 cp->elements[parent(old_idx)].dl, 164 cp->elements[parent(old_idx)].dl,
162 cp->elements[old_idx].dl)) { 165 cp->elements[old_idx].dl)) {
@@ -173,7 +176,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
173 cp->size++; 176 cp->size++;
174 cp->elements[cp->size - 1].dl = 0; 177 cp->elements[cp->size - 1].dl = 0;
175 cp->elements[cp->size - 1].cpu = cpu; 178 cp->elements[cp->size - 1].cpu = cpu;
176 cp->cpu_to_idx[cpu] = cp->size - 1; 179 cp->elements[cpu].idx = cp->size - 1;
177 cpudl_change_key(cp, cp->size - 1, dl); 180 cpudl_change_key(cp, cp->size - 1, dl);
178 cpumask_clear_cpu(cpu, cp->free_cpus); 181 cpumask_clear_cpu(cpu, cp->free_cpus);
179 } else { 182 } else {
@@ -195,10 +198,21 @@ int cpudl_init(struct cpudl *cp)
195 memset(cp, 0, sizeof(*cp)); 198 memset(cp, 0, sizeof(*cp));
196 raw_spin_lock_init(&cp->lock); 199 raw_spin_lock_init(&cp->lock);
197 cp->size = 0; 200 cp->size = 0;
198 for (i = 0; i < NR_CPUS; i++) 201
199 cp->cpu_to_idx[i] = IDX_INVALID; 202 cp->elements = kcalloc(nr_cpu_ids,
200 if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) 203 sizeof(struct cpudl_item),
204 GFP_KERNEL);
205 if (!cp->elements)
206 return -ENOMEM;
207
208 if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {
209 kfree(cp->elements);
201 return -ENOMEM; 210 return -ENOMEM;
211 }
212
213 for_each_possible_cpu(i)
214 cp->elements[i].idx = IDX_INVALID;
215
202 cpumask_setall(cp->free_cpus); 216 cpumask_setall(cp->free_cpus);
203 217
204 return 0; 218 return 0;
@@ -211,4 +225,5 @@ int cpudl_init(struct cpudl *cp)
211void cpudl_cleanup(struct cpudl *cp) 225void cpudl_cleanup(struct cpudl *cp)
212{ 226{
213 free_cpumask_var(cp->free_cpus); 227 free_cpumask_var(cp->free_cpus);
228 kfree(cp->elements);
214} 229}
diff --git a/kernel/sched/cpudeadline.h b/kernel/sched/cpudeadline.h
index a202789a412c..538c9796ad4a 100644
--- a/kernel/sched/cpudeadline.h
+++ b/kernel/sched/cpudeadline.h
@@ -5,17 +5,17 @@
5 5
6#define IDX_INVALID -1 6#define IDX_INVALID -1
7 7
8struct array_item { 8struct cpudl_item {
9 u64 dl; 9 u64 dl;
10 int cpu; 10 int cpu;
11 int idx;
11}; 12};
12 13
13struct cpudl { 14struct cpudl {
14 raw_spinlock_t lock; 15 raw_spinlock_t lock;
15 int size; 16 int size;
16 int cpu_to_idx[NR_CPUS];
17 struct array_item elements[NR_CPUS];
18 cpumask_var_t free_cpus; 17 cpumask_var_t free_cpus;
18 struct cpudl_item *elements;
19}; 19};
20 20
21 21
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 3031bac8aa3e..8834243abee2 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -30,6 +30,7 @@
30#include <linux/gfp.h> 30#include <linux/gfp.h>
31#include <linux/sched.h> 31#include <linux/sched.h>
32#include <linux/sched/rt.h> 32#include <linux/sched/rt.h>
33#include <linux/slab.h>
33#include "cpupri.h" 34#include "cpupri.h"
34 35
35/* Convert between a 140 based task->prio, and our 102 based cpupri */ 36/* Convert between a 140 based task->prio, and our 102 based cpupri */
@@ -218,8 +219,13 @@ int cpupri_init(struct cpupri *cp)
218 goto cleanup; 219 goto cleanup;
219 } 220 }
220 221
222 cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL);
223 if (!cp->cpu_to_pri)
224 goto cleanup;
225
221 for_each_possible_cpu(i) 226 for_each_possible_cpu(i)
222 cp->cpu_to_pri[i] = CPUPRI_INVALID; 227 cp->cpu_to_pri[i] = CPUPRI_INVALID;
228
223 return 0; 229 return 0;
224 230
225cleanup: 231cleanup:
@@ -236,6 +242,7 @@ void cpupri_cleanup(struct cpupri *cp)
236{ 242{
237 int i; 243 int i;
238 244
245 kfree(cp->cpu_to_pri);
239 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) 246 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
240 free_cpumask_var(cp->pri_to_cpu[i].mask); 247 free_cpumask_var(cp->pri_to_cpu[i].mask);
241} 248}
diff --git a/kernel/sched/cpupri.h b/kernel/sched/cpupri.h
index f6d756173491..6b033347fdfd 100644
--- a/kernel/sched/cpupri.h
+++ b/kernel/sched/cpupri.h
@@ -17,7 +17,7 @@ struct cpupri_vec {
17 17
18struct cpupri { 18struct cpupri {
19 struct cpupri_vec pri_to_cpu[CPUPRI_NR_PRIORITIES]; 19 struct cpupri_vec pri_to_cpu[CPUPRI_NR_PRIORITIES];
20 int cpu_to_pri[NR_CPUS]; 20 int *cpu_to_pri;
21}; 21};
22 22
23#ifdef CONFIG_SMP 23#ifdef CONFIG_SMP
diff --git a/lib/nlattr.c b/lib/nlattr.c
index fc6754720ced..10ad042d01be 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -201,8 +201,8 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
201 } 201 }
202 202
203 if (unlikely(rem > 0)) 203 if (unlikely(rem > 0))
204 printk(KERN_WARNING "netlink: %d bytes leftover after parsing " 204 pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
205 "attributes.\n", rem); 205 rem, current->comm);
206 206
207 err = 0; 207 err = 0;
208errout: 208errout:
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 8c7ca811de6e..96b66fd30f96 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -415,7 +415,7 @@ batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv)
415 hlist_for_each_entry_rcu(tmp_orig_node, 415 hlist_for_each_entry_rcu(tmp_orig_node,
416 &bat_priv->mcast.want_all_ipv4_list, 416 &bat_priv->mcast.want_all_ipv4_list,
417 mcast_want_all_ipv4_node) { 417 mcast_want_all_ipv4_node) {
418 if (!atomic_inc_not_zero(&orig_node->refcount)) 418 if (!atomic_inc_not_zero(&tmp_orig_node->refcount))
419 continue; 419 continue;
420 420
421 orig_node = tmp_orig_node; 421 orig_node = tmp_orig_node;
@@ -442,7 +442,7 @@ batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv)
442 hlist_for_each_entry_rcu(tmp_orig_node, 442 hlist_for_each_entry_rcu(tmp_orig_node,
443 &bat_priv->mcast.want_all_ipv6_list, 443 &bat_priv->mcast.want_all_ipv6_list,
444 mcast_want_all_ipv6_node) { 444 mcast_want_all_ipv6_node) {
445 if (!atomic_inc_not_zero(&orig_node->refcount)) 445 if (!atomic_inc_not_zero(&tmp_orig_node->refcount))
446 continue; 446 continue;
447 447
448 orig_node = tmp_orig_node; 448 orig_node = tmp_orig_node;
@@ -493,7 +493,7 @@ batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv)
493 hlist_for_each_entry_rcu(tmp_orig_node, 493 hlist_for_each_entry_rcu(tmp_orig_node,
494 &bat_priv->mcast.want_all_unsnoopables_list, 494 &bat_priv->mcast.want_all_unsnoopables_list,
495 mcast_want_all_unsnoopables_node) { 495 mcast_want_all_unsnoopables_node) {
496 if (!atomic_inc_not_zero(&orig_node->refcount)) 496 if (!atomic_inc_not_zero(&tmp_orig_node->refcount))
497 continue; 497 continue;
498 498
499 orig_node = tmp_orig_node; 499 orig_node = tmp_orig_node;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 2c45c069ea1a..b524c36c1273 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -538,6 +538,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
538{ 538{
539 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; 539 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
540 struct net_bridge_fdb_entry *fdb; 540 struct net_bridge_fdb_entry *fdb;
541 bool fdb_modified = false;
541 542
542 /* some users want to always flood. */ 543 /* some users want to always flood. */
543 if (hold_time(br) == 0) 544 if (hold_time(br) == 0)
@@ -558,10 +559,15 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
558 source->dev->name); 559 source->dev->name);
559 } else { 560 } else {
560 /* fastpath: update of existing entry */ 561 /* fastpath: update of existing entry */
561 fdb->dst = source; 562 if (unlikely(source != fdb->dst)) {
563 fdb->dst = source;
564 fdb_modified = true;
565 }
562 fdb->updated = jiffies; 566 fdb->updated = jiffies;
563 if (unlikely(added_by_user)) 567 if (unlikely(added_by_user))
564 fdb->added_by_user = 1; 568 fdb->added_by_user = 1;
569 if (unlikely(fdb_modified))
570 fdb_notify(br, fdb, RTM_NEWNEIGH);
565 } 571 }
566 } else { 572 } else {
567 spin_lock(&br->hash_lock); 573 spin_lock(&br->hash_lock);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 7985deaff52f..04d6348fd530 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -147,8 +147,8 @@ static int br_handle_local_finish(struct sk_buff *skb)
147 struct net_bridge_port *p = br_port_get_rcu(skb->dev); 147 struct net_bridge_port *p = br_port_get_rcu(skb->dev);
148 u16 vid = 0; 148 u16 vid = 0;
149 149
150 br_vlan_get_tag(skb, &vid); 150 /* check if vlan is allowed, to avoid spoofing */
151 if (p->flags & BR_LEARNING) 151 if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid))
152 br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false); 152 br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
153 return 0; /* process further */ 153 return 0; /* process further */
154} 154}
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 53d6e32965fc..bc17210d4c52 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -589,6 +589,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
589 struct sk_buff *skb, u16 *vid); 589 struct sk_buff *skb, u16 *vid);
590bool br_allowed_egress(struct net_bridge *br, const struct net_port_vlans *v, 590bool br_allowed_egress(struct net_bridge *br, const struct net_port_vlans *v,
591 const struct sk_buff *skb); 591 const struct sk_buff *skb);
592bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid);
592struct sk_buff *br_handle_vlan(struct net_bridge *br, 593struct sk_buff *br_handle_vlan(struct net_bridge *br,
593 const struct net_port_vlans *v, 594 const struct net_port_vlans *v,
594 struct sk_buff *skb); 595 struct sk_buff *skb);
@@ -660,6 +661,12 @@ static inline bool br_allowed_egress(struct net_bridge *br,
660 return true; 661 return true;
661} 662}
662 663
664static inline bool br_should_learn(struct net_bridge_port *p,
665 struct sk_buff *skb, u16 *vid)
666{
667 return true;
668}
669
663static inline struct sk_buff *br_handle_vlan(struct net_bridge *br, 670static inline struct sk_buff *br_handle_vlan(struct net_bridge *br,
664 const struct net_port_vlans *v, 671 const struct net_port_vlans *v,
665 struct sk_buff *skb) 672 struct sk_buff *skb)
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 24c5cc55589f..fcc95390f862 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -241,6 +241,34 @@ bool br_allowed_egress(struct net_bridge *br,
241 return false; 241 return false;
242} 242}
243 243
244/* Called under RCU */
245bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
246{
247 struct net_bridge *br = p->br;
248 struct net_port_vlans *v;
249
250 if (!br->vlan_enabled)
251 return true;
252
253 v = rcu_dereference(p->vlan_info);
254 if (!v)
255 return false;
256
257 br_vlan_get_tag(skb, vid);
258 if (!*vid) {
259 *vid = br_get_pvid(v);
260 if (*vid == VLAN_N_VID)
261 return false;
262
263 return true;
264 }
265
266 if (test_bit(*vid, v->vlan_bitmap))
267 return true;
268
269 return false;
270}
271
244/* Must be protected by RTNL. 272/* Must be protected by RTNL.
245 * Must be called with vid in range from 1 to 4094 inclusive. 273 * Must be called with vid in range from 1 to 4094 inclusive.
246 */ 274 */
diff --git a/net/core/dev.c b/net/core/dev.c
index 1ba2cfe3f8e8..5367bfba0947 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2289,8 +2289,8 @@ EXPORT_SYMBOL(skb_checksum_help);
2289 2289
2290__be16 skb_network_protocol(struct sk_buff *skb, int *depth) 2290__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2291{ 2291{
2292 unsigned int vlan_depth = skb->mac_len;
2292 __be16 type = skb->protocol; 2293 __be16 type = skb->protocol;
2293 int vlan_depth = skb->mac_len;
2294 2294
2295 /* Tunnel gso handlers can set protocol to ethernet. */ 2295 /* Tunnel gso handlers can set protocol to ethernet. */
2296 if (type == htons(ETH_P_TEB)) { 2296 if (type == htons(ETH_P_TEB)) {
@@ -2303,15 +2303,30 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2303 type = eth->h_proto; 2303 type = eth->h_proto;
2304 } 2304 }
2305 2305
2306 while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { 2306 /* if skb->protocol is 802.1Q/AD then the header should already be
2307 struct vlan_hdr *vh; 2307 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
2308 2308 * ETH_HLEN otherwise
2309 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) 2309 */
2310 return 0; 2310 if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
2311 2311 if (vlan_depth) {
2312 vh = (struct vlan_hdr *)(skb->data + vlan_depth); 2312 if (unlikely(WARN_ON(vlan_depth < VLAN_HLEN)))
2313 type = vh->h_vlan_encapsulated_proto; 2313 return 0;
2314 vlan_depth += VLAN_HLEN; 2314 vlan_depth -= VLAN_HLEN;
2315 } else {
2316 vlan_depth = ETH_HLEN;
2317 }
2318 do {
2319 struct vlan_hdr *vh;
2320
2321 if (unlikely(!pskb_may_pull(skb,
2322 vlan_depth + VLAN_HLEN)))
2323 return 0;
2324
2325 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2326 type = vh->h_vlan_encapsulated_proto;
2327 vlan_depth += VLAN_HLEN;
2328 } while (type == htons(ETH_P_8021Q) ||
2329 type == htons(ETH_P_8021AD));
2315 } 2330 }
2316 2331
2317 *depth = vlan_depth; 2332 *depth = vlan_depth;
diff --git a/net/core/filter.c b/net/core/filter.c
index 842f8393121d..9de0c25323b4 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1492,8 +1492,13 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
1492 fp->jited = 0; 1492 fp->jited = 0;
1493 1493
1494 err = sk_chk_filter(fp->insns, fp->len); 1494 err = sk_chk_filter(fp->insns, fp->len);
1495 if (err) 1495 if (err) {
1496 if (sk != NULL)
1497 sk_filter_uncharge(sk, fp);
1498 else
1499 kfree(fp);
1496 return ERR_PTR(err); 1500 return ERR_PTR(err);
1501 }
1497 1502
1498 /* Probe if we can JIT compile the filter and if so, do 1503 /* Probe if we can JIT compile the filter and if so, do
1499 * the compilation of the filter. 1504 * the compilation of the filter.
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index f31268dbc0d1..741b22c62acf 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2045,11 +2045,15 @@ replay:
2045 if (ops->newlink) { 2045 if (ops->newlink) {
2046 err = ops->newlink(net, dev, tb, data); 2046 err = ops->newlink(net, dev, tb, data);
2047 /* Drivers should call free_netdev() in ->destructor 2047 /* Drivers should call free_netdev() in ->destructor
2048 * and unregister it on failure so that device could be 2048 * and unregister it on failure after registration
2049 * finally freed in rtnl_unlock. 2049 * so that device could be finally freed in rtnl_unlock.
2050 */ 2050 */
2051 if (err < 0) 2051 if (err < 0) {
2052 /* If device is not registered at all, free it now */
2053 if (dev->reg_state == NETREG_UNINITIALIZED)
2054 free_netdev(dev);
2052 goto out; 2055 goto out;
2056 }
2053 } else { 2057 } else {
2054 err = register_netdevice(dev); 2058 err = register_netdevice(dev);
2055 if (err < 0) { 2059 if (err < 0) {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 350b2072f0ab..931529d5daa2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2684,13 +2684,12 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
2684 bool recovered = !before(tp->snd_una, tp->high_seq); 2684 bool recovered = !before(tp->snd_una, tp->high_seq);
2685 2685
2686 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ 2686 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
2687 if (flag & FLAG_ORIG_SACK_ACKED) { 2687 /* Step 3.b. A timeout is spurious if not all data are
2688 /* Step 3.b. A timeout is spurious if not all data are 2688 * lost, i.e., never-retransmitted data are (s)acked.
2689 * lost, i.e., never-retransmitted data are (s)acked. 2689 */
2690 */ 2690 if (tcp_try_undo_loss(sk, flag & FLAG_ORIG_SACK_ACKED))
2691 tcp_try_undo_loss(sk, true);
2692 return; 2691 return;
2693 } 2692
2694 if (after(tp->snd_nxt, tp->high_seq) && 2693 if (after(tp->snd_nxt, tp->high_seq) &&
2695 (flag & FLAG_DATA_SACKED || is_dupack)) { 2694 (flag & FLAG_DATA_SACKED || is_dupack)) {
2696 tp->frto = 0; /* Loss was real: 2nd part of step 3.a */ 2695 tp->frto = 0; /* Loss was real: 2nd part of step 3.a */
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 6179ac186ab9..ffa029305a09 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -8,7 +8,6 @@
8#include <net/addrconf.h> 8#include <net/addrconf.h>
9#include <net/secure_seq.h> 9#include <net/secure_seq.h>
10 10
11
12int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) 11int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
13{ 12{
14 u16 offset = sizeof(struct ipv6hdr); 13 u16 offset = sizeof(struct ipv6hdr);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index d9da8c448c76..e6836755c45d 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1392,15 +1392,19 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1392 1392
1393 if (ipip) { 1393 if (ipip) {
1394 __be32 info = ic->un.gateway; 1394 __be32 info = ic->un.gateway;
1395 __u8 type = ic->type;
1396 __u8 code = ic->code;
1395 1397
1396 /* Update the MTU */ 1398 /* Update the MTU */
1397 if (ic->type == ICMP_DEST_UNREACH && 1399 if (ic->type == ICMP_DEST_UNREACH &&
1398 ic->code == ICMP_FRAG_NEEDED) { 1400 ic->code == ICMP_FRAG_NEEDED) {
1399 struct ip_vs_dest *dest = cp->dest; 1401 struct ip_vs_dest *dest = cp->dest;
1400 u32 mtu = ntohs(ic->un.frag.mtu); 1402 u32 mtu = ntohs(ic->un.frag.mtu);
1403 __be16 frag_off = cih->frag_off;
1401 1404
1402 /* Strip outer IP and ICMP, go to IPIP header */ 1405 /* Strip outer IP and ICMP, go to IPIP header */
1403 __skb_pull(skb, ihl + sizeof(_icmph)); 1406 if (pskb_pull(skb, ihl + sizeof(_icmph)) == NULL)
1407 goto ignore_ipip;
1404 offset2 -= ihl + sizeof(_icmph); 1408 offset2 -= ihl + sizeof(_icmph);
1405 skb_reset_network_header(skb); 1409 skb_reset_network_header(skb);
1406 IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n", 1410 IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
@@ -1408,7 +1412,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1408 ipv4_update_pmtu(skb, dev_net(skb->dev), 1412 ipv4_update_pmtu(skb, dev_net(skb->dev),
1409 mtu, 0, 0, 0, 0); 1413 mtu, 0, 0, 0, 0);
1410 /* Client uses PMTUD? */ 1414 /* Client uses PMTUD? */
1411 if (!(cih->frag_off & htons(IP_DF))) 1415 if (!(frag_off & htons(IP_DF)))
1412 goto ignore_ipip; 1416 goto ignore_ipip;
1413 /* Prefer the resulting PMTU */ 1417 /* Prefer the resulting PMTU */
1414 if (dest) { 1418 if (dest) {
@@ -1427,12 +1431,13 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1427 /* Strip outer IP, ICMP and IPIP, go to IP header of 1431 /* Strip outer IP, ICMP and IPIP, go to IP header of
1428 * original request. 1432 * original request.
1429 */ 1433 */
1430 __skb_pull(skb, offset2); 1434 if (pskb_pull(skb, offset2) == NULL)
1435 goto ignore_ipip;
1431 skb_reset_network_header(skb); 1436 skb_reset_network_header(skb);
1432 IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n", 1437 IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n",
1433 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, 1438 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1434 ic->type, ic->code, ntohl(info)); 1439 type, code, ntohl(info));
1435 icmp_send(skb, ic->type, ic->code, info); 1440 icmp_send(skb, type, code, info);
1436 /* ICMP can be shorter but anyways, account it */ 1441 /* ICMP can be shorter but anyways, account it */
1437 ip_vs_out_stats(cp, skb); 1442 ip_vs_out_stats(cp, skb);
1438 1443
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index e0ccd84d4d67..15c731f03fa6 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1377,7 +1377,9 @@ retry:
1377bool __netlink_ns_capable(const struct netlink_skb_parms *nsp, 1377bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1378 struct user_namespace *user_ns, int cap) 1378 struct user_namespace *user_ns, int cap)
1379{ 1379{
1380 return sk_ns_capable(nsp->sk, user_ns, cap); 1380 return ((nsp->flags & NETLINK_SKB_DST) ||
1381 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1382 ns_capable(user_ns, cap);
1381} 1383}
1382EXPORT_SYMBOL(__netlink_ns_capable); 1384EXPORT_SYMBOL(__netlink_ns_capable);
1383 1385
@@ -2323,6 +2325,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
2323 struct sk_buff *skb; 2325 struct sk_buff *skb;
2324 int err; 2326 int err;
2325 struct scm_cookie scm; 2327 struct scm_cookie scm;
2328 u32 netlink_skb_flags = 0;
2326 2329
2327 if (msg->msg_flags&MSG_OOB) 2330 if (msg->msg_flags&MSG_OOB)
2328 return -EOPNOTSUPP; 2331 return -EOPNOTSUPP;
@@ -2344,6 +2347,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
2344 if ((dst_group || dst_portid) && 2347 if ((dst_group || dst_portid) &&
2345 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) 2348 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
2346 goto out; 2349 goto out;
2350 netlink_skb_flags |= NETLINK_SKB_DST;
2347 } else { 2351 } else {
2348 dst_portid = nlk->dst_portid; 2352 dst_portid = nlk->dst_portid;
2349 dst_group = nlk->dst_group; 2353 dst_group = nlk->dst_group;
@@ -2373,6 +2377,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
2373 NETLINK_CB(skb).portid = nlk->portid; 2377 NETLINK_CB(skb).portid = nlk->portid;
2374 NETLINK_CB(skb).dst_group = dst_group; 2378 NETLINK_CB(skb).dst_group = dst_group;
2375 NETLINK_CB(skb).creds = siocb->scm->creds; 2379 NETLINK_CB(skb).creds = siocb->scm->creds;
2380 NETLINK_CB(skb).flags = netlink_skb_flags;
2376 2381
2377 err = -EFAULT; 2382 err = -EFAULT;
2378 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 2383 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index fd9a16a6d1de..412d9dc3a873 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -947,6 +947,20 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
947 return skb; 947 return skb;
948} 948}
949 949
950/* A wrapper for nlmsg_multicast() checking that nlsk is still available.
951 * Must be called with RCU read lock.
952 */
953static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb,
954 u32 pid, unsigned int group)
955{
956 struct sock *nlsk = rcu_dereference(net->xfrm.nlsk);
957
958 if (nlsk)
959 return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
960 else
961 return -1;
962}
963
950static inline size_t xfrm_spdinfo_msgsize(void) 964static inline size_t xfrm_spdinfo_msgsize(void)
951{ 965{
952 return NLMSG_ALIGN(4) 966 return NLMSG_ALIGN(4)
@@ -2228,7 +2242,7 @@ static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2228 if (build_migrate(skb, m, num_migrate, k, sel, dir, type) < 0) 2242 if (build_migrate(skb, m, num_migrate, k, sel, dir, type) < 0)
2229 BUG(); 2243 BUG();
2230 2244
2231 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC); 2245 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MIGRATE);
2232} 2246}
2233#else 2247#else
2234static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 2248static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
@@ -2419,7 +2433,7 @@ static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
2419 return -EMSGSIZE; 2433 return -EMSGSIZE;
2420 } 2434 }
2421 2435
2422 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); 2436 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
2423} 2437}
2424 2438
2425static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c) 2439static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c)
@@ -2434,7 +2448,7 @@ static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event
2434 if (build_aevent(skb, x, c) < 0) 2448 if (build_aevent(skb, x, c) < 0)
2435 BUG(); 2449 BUG();
2436 2450
2437 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC); 2451 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_AEVENTS);
2438} 2452}
2439 2453
2440static int xfrm_notify_sa_flush(const struct km_event *c) 2454static int xfrm_notify_sa_flush(const struct km_event *c)
@@ -2460,7 +2474,7 @@ static int xfrm_notify_sa_flush(const struct km_event *c)
2460 2474
2461 nlmsg_end(skb, nlh); 2475 nlmsg_end(skb, nlh);
2462 2476
2463 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); 2477 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
2464} 2478}
2465 2479
2466static inline size_t xfrm_sa_len(struct xfrm_state *x) 2480static inline size_t xfrm_sa_len(struct xfrm_state *x)
@@ -2547,7 +2561,7 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
2547 2561
2548 nlmsg_end(skb, nlh); 2562 nlmsg_end(skb, nlh);
2549 2563
2550 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); 2564 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
2551 2565
2552out_free_skb: 2566out_free_skb:
2553 kfree_skb(skb); 2567 kfree_skb(skb);
@@ -2638,7 +2652,7 @@ static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
2638 if (build_acquire(skb, x, xt, xp) < 0) 2652 if (build_acquire(skb, x, xt, xp) < 0)
2639 BUG(); 2653 BUG();
2640 2654
2641 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC); 2655 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_ACQUIRE);
2642} 2656}
2643 2657
2644/* User gives us xfrm_user_policy_info followed by an array of 0 2658/* User gives us xfrm_user_policy_info followed by an array of 0
@@ -2752,7 +2766,7 @@ static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct
2752 if (build_polexpire(skb, xp, dir, c) < 0) 2766 if (build_polexpire(skb, xp, dir, c) < 0)
2753 BUG(); 2767 BUG();
2754 2768
2755 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); 2769 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
2756} 2770}
2757 2771
2758static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c) 2772static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
@@ -2814,7 +2828,7 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_e
2814 2828
2815 nlmsg_end(skb, nlh); 2829 nlmsg_end(skb, nlh);
2816 2830
2817 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); 2831 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
2818 2832
2819out_free_skb: 2833out_free_skb:
2820 kfree_skb(skb); 2834 kfree_skb(skb);
@@ -2842,7 +2856,7 @@ static int xfrm_notify_policy_flush(const struct km_event *c)
2842 2856
2843 nlmsg_end(skb, nlh); 2857 nlmsg_end(skb, nlh);
2844 2858
2845 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); 2859 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
2846 2860
2847out_free_skb: 2861out_free_skb:
2848 kfree_skb(skb); 2862 kfree_skb(skb);
@@ -2911,7 +2925,7 @@ static int xfrm_send_report(struct net *net, u8 proto,
2911 if (build_report(skb, proto, sel, addr) < 0) 2925 if (build_report(skb, proto, sel, addr) < 0)
2912 BUG(); 2926 BUG();
2913 2927
2914 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC); 2928 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_REPORT);
2915} 2929}
2916 2930
2917static inline size_t xfrm_mapping_msgsize(void) 2931static inline size_t xfrm_mapping_msgsize(void)
@@ -2963,7 +2977,7 @@ static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
2963 if (build_mapping(skb, x, ipaddr, sport) < 0) 2977 if (build_mapping(skb, x, ipaddr, sport) < 0)
2964 BUG(); 2978 BUG();
2965 2979
2966 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MAPPING, GFP_ATOMIC); 2980 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MAPPING);
2967} 2981}
2968 2982
2969static bool xfrm_is_alive(const struct km_event *c) 2983static bool xfrm_is_alive(const struct km_event *c)
diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c
index 94d08733cb38..76cbb9ec953a 100644
--- a/sound/core/pcm_dmaengine.c
+++ b/sound/core/pcm_dmaengine.c
@@ -182,6 +182,7 @@ static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
182int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd) 182int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
183{ 183{
184 struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); 184 struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
185 struct snd_pcm_runtime *runtime = substream->runtime;
185 int ret; 186 int ret;
186 187
187 switch (cmd) { 188 switch (cmd) {
@@ -196,6 +197,11 @@ int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
196 dmaengine_resume(prtd->dma_chan); 197 dmaengine_resume(prtd->dma_chan);
197 break; 198 break;
198 case SNDRV_PCM_TRIGGER_SUSPEND: 199 case SNDRV_PCM_TRIGGER_SUSPEND:
200 if (runtime->info & SNDRV_PCM_INFO_PAUSE)
201 dmaengine_pause(prtd->dma_chan);
202 else
203 dmaengine_terminate_all(prtd->dma_chan);
204 break;
199 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 205 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
200 dmaengine_pause(prtd->dma_chan); 206 dmaengine_pause(prtd->dma_chan);
201 break; 207 break;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 2c54629d62d1..6cc3cf285558 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1743,6 +1743,9 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
1743 /* Lynx Point */ 1743 /* Lynx Point */
1744 { PCI_DEVICE(0x8086, 0x8c20), 1744 { PCI_DEVICE(0x8086, 0x8c20),
1745 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, 1745 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
1746 /* 9 Series */
1747 { PCI_DEVICE(0x8086, 0x8ca0),
1748 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
1746 /* Wellsburg */ 1749 /* Wellsburg */
1747 { PCI_DEVICE(0x8086, 0x8d20), 1750 { PCI_DEVICE(0x8086, 0x8d20),
1748 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, 1751 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },