aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,bcm11351-pinctrl.txt (renamed from Documentation/devicetree/bindings/pinctrl/brcm,capri-pinctrl.txt)8
-rw-r--r--Documentation/networking/packet_mmap.txt2
-rw-r--r--Documentation/networking/timestamping.txt52
-rw-r--r--MAINTAINERS3
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/bcm11351.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-gta04.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep0020.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep0030.dts2
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi2
-rw-r--r--arch/arm/boot/dts/sun5i-a10s.dtsi2
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi2
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi12
-rw-r--r--arch/arm/configs/tegra_defconfig3
-rw-r--r--arch/arm/mach-omap2/cclock3xxx_data.c2
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c8
-rw-r--r--arch/arm/mach-omap2/dpll3xxx.c92
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c20
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c9
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c21
-rw-r--r--arch/arm/mach-omap2/prminst44xx.c4
-rw-r--r--arch/cris/include/asm/bitops.h2
-rw-r--r--arch/ia64/kernel/uncached.c2
-rw-r--r--arch/mips/Kconfig12
-rw-r--r--arch/mips/alchemy/board-gpr.c4
-rw-r--r--arch/mips/alchemy/board-mtx1.c4
-rw-r--r--arch/mips/bcm47xx/board.c1
-rw-r--r--arch/mips/include/asm/asmmacro.h4
-rw-r--r--arch/mips/include/asm/fpu.h2
-rw-r--r--arch/mips/include/asm/syscall.h10
-rw-r--r--arch/mips/include/uapi/asm/inst.h4
-rw-r--r--arch/mips/kernel/ftrace.c5
-rw-r--r--arch/mips/kernel/r4k_fpu.S16
-rw-r--r--arch/mips/kernel/rtlx-cmp.c3
-rw-r--r--arch/mips/kernel/rtlx-mt.c3
-rw-r--r--arch/mips/math-emu/cp1emu.c6
-rw-r--r--arch/mips/mti-malta/malta-amon.c2
-rw-r--r--arch/mips/mti-malta/malta-int.c4
-rw-r--r--arch/mips/pci/msi-octeon.c1
-rw-r--r--arch/powerpc/platforms/cell/ras.c3
-rw-r--r--arch/x86/Kconfig.cpu4
-rw-r--r--arch/x86/include/asm/barrier.h8
-rw-r--r--arch/x86/include/asm/io.h2
-rw-r--r--arch/x86/include/asm/spinlock.h5
-rw-r--r--arch/x86/kernel/cpu/centaur.c272
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c3
-rw-r--r--arch/x86/kernel/i387.c15
-rw-r--r--arch/x86/kernel/quirks.c2
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/net/bpf_jit.S2
-rw-r--r--arch/x86/um/asm/barrier.h4
-rw-r--r--drivers/acpi/sleep.c37
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/cpufreq/cpufreq.c4
-rw-r--r--drivers/firewire/core-device.c22
-rw-r--r--drivers/firewire/sbp2.c17
-rw-r--r--drivers/gpu/drm/radeon/cik.c5
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c18
-rw-r--r--drivers/hid/hid-lg4ff.c2
-rw-r--r--drivers/hid/hid-sony.c27
-rw-r--r--drivers/hid/hidraw.c4
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c180
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h7
-rw-r--r--drivers/md/dm-cache-target.c11
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c2
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_options.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c37
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h5
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.c16
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c14
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c25
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c14
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c91
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/hyperv/rndis_filter.c21
-rw-r--r--drivers/net/ieee802154/at86rf230.c11
-rw-r--r--drivers/net/phy/phy.c11
-rw-r--r--drivers/net/usb/Makefile2
-rw-r--r--drivers/net/usb/ax88179_178a.c8
-rw-r--r--drivers/net/usb/cdc_ether.c7
-rw-r--r--drivers/net/usb/r8152.c15
-rw-r--r--drivers/net/usb/r815x.c248
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c19
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c7
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c5
-rw-r--r--drivers/net/wireless/mwifiex/11ac.c3
-rw-r--r--drivers/net/wireless/mwifiex/11n.c3
-rw-r--r--drivers/net/wireless/mwifiex/scan.c8
-rw-r--r--drivers/net/wireless/ti/wl1251/rx.c2
-rw-r--r--drivers/net/xen-netback/interface.c3
-rw-r--r--drivers/net/xen-netback/netback.c39
-rw-r--r--drivers/pci/bus.c2
-rw-r--r--drivers/pci/pci.c3
-rw-r--r--drivers/pinctrl/pinctrl-capri.c2
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c15
-rw-r--r--drivers/scsi/be2iscsi/be_main.c2
-rw-r--r--drivers/scsi/isci/host.h5
-rw-r--r--drivers/scsi/isci/port_config.c7
-rw-r--r--drivers/scsi/isci/task.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c46
-rw-r--r--drivers/scsi/storvsc_drv.c3
-rw-r--r--drivers/spi/spi-ath79.c4
-rw-r--r--drivers/spi/spi-atmel.c17
-rw-r--r--drivers/spi/spi-coldfire-qspi.c6
-rw-r--r--drivers/spi/spi-fsl-dspi.c6
-rw-r--r--drivers/spi/spi-imx.c4
-rw-r--r--drivers/spi/spi-topcliff-pch.c15
-rw-r--r--drivers/staging/cxt1e1/linux.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c10
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c16
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c2
-rw-r--r--drivers/target/target_core_sbc.c38
-rw-r--r--drivers/thermal/Kconfig13
-rw-r--r--drivers/thermal/thermal_core.c27
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c11
-rw-r--r--drivers/usb/core/config.c4
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/host/xhci.c14
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/file.c24
-rw-r--r--fs/cifs/transport.c29
-rw-r--r--fs/file.c56
-rw-r--r--fs/file_table.c1
-rw-r--r--fs/hfsplus/catalog.c41
-rw-r--r--fs/hfsplus/hfsplus_fs.h1
-rw-r--r--fs/hfsplus/hfsplus_raw.h6
-rw-r--r--fs/hfsplus/inode.c9
-rw-r--r--fs/namei.c2
-rw-r--r--fs/nfs/delegation.c11
-rw-r--r--fs/nfs/nfs4filelayout.c10
-rw-r--r--fs/nfs/nfs4proc.c24
-rw-r--r--fs/nfs/nfs4state.c14
-rw-r--r--fs/ocfs2/file.c8
-rw-r--r--fs/open.c4
-rw-r--r--fs/proc/base.c1
-rw-r--r--fs/read_write.c40
-rw-r--r--include/kvm/arm_vgic.h5
-rw-r--r--include/linux/audit.h3
-rw-r--r--include/linux/clk/ti.h4
-rw-r--r--include/linux/file.h27
-rw-r--r--include/linux/firewire.h1
-rw-r--r--include/linux/fs.h8
-rw-r--r--include/linux/gfp.h4
-rw-r--r--include/linux/mmzone.h4
-rw-r--r--include/linux/nfs_xdr.h5
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/net/sock.h6
-rw-r--r--include/target/iscsi/iscsi_transport.h1
-rw-r--r--include/trace/events/sunrpc.h4
-rw-r--r--init/main.c2
-rw-r--r--ipc/msg.c2
-rw-r--r--kernel/audit.c31
-rw-r--r--kernel/audit.h2
-rw-r--r--kernel/auditfilter.c10
-rw-r--r--kernel/cpuset.c10
-rw-r--r--kernel/profile.c4
-rw-r--r--kernel/sched/clock.c4
-rw-r--r--kernel/sched/core.c9
-rw-r--r--kernel/stop_machine.c2
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/compaction.c20
-rw-r--r--mm/migrate.c11
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/bridge/br_multicast.c33
-rw-r--r--net/core/skbuff.c100
-rw-r--r--net/core/sock.c5
-rw-r--r--net/ipv4/inet_fragment.c5
-rw-r--r--net/ipv4/tcp_output.c11
-rw-r--r--net/ipv6/addrconf.c5
-rw-r--r--net/ipv6/exthdrs_offload.c4
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/l2tp/l2tp_core.c4
-rw-r--r--net/l2tp/l2tp_core.h1
-rw-r--r--net/l2tp/l2tp_netlink.c4
-rw-r--r--net/l2tp/l2tp_ppp.c13
-rw-r--r--net/mac80211/chan.c6
-rw-r--r--net/mac80211/mesh_ps.c1
-rw-r--r--net/mac80211/sta_info.c1
-rw-r--r--net/sched/sch_api.c7
-rw-r--r--net/sched/sch_fq.c21
-rw-r--r--net/sctp/sm_make_chunk.c4
-rw-r--r--net/sctp/sm_statefuns.c5
-rw-r--r--net/socket.c17
-rw-r--r--net/tipc/config.c9
-rw-r--r--net/tipc/handler.c1
-rw-r--r--net/tipc/name_table.c37
-rw-r--r--net/tipc/server.c14
-rw-r--r--net/tipc/socket.c4
-rw-r--r--net/tipc/subscr.c19
-rw-r--r--net/unix/af_unix.c3
-rw-r--r--net/wireless/core.c2
-rw-r--r--scripts/kallsyms.c3
-rw-r--r--security/keys/keyring.c6
-rw-r--r--sound/pci/hda/patch_realtek.c19
-rw-r--r--sound/soc/codecs/88pm860x-codec.c3
-rw-r--r--sound/soc/codecs/si476x.c2
-rw-r--r--sound/soc/omap/n810.c4
-rw-r--r--sound/soc/soc-pcm.c3
-rw-r--r--tools/net/Makefile2
-rw-r--r--tools/perf/builtin-trace.c10
-rw-r--r--tools/perf/util/machine.c2
-rw-r--r--tools/perf/util/symbol-elf.c6
-rw-r--r--tools/testing/selftests/ipc/msgque.c1
219 files changed, 1511 insertions, 1370 deletions
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,capri-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/brcm,bcm11351-pinctrl.txt
index 9e9e9ef9f852..c119debe6bab 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,capri-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,bcm11351-pinctrl.txt
@@ -1,4 +1,4 @@
1Broadcom Capri Pin Controller 1Broadcom BCM281xx Pin Controller
2 2
3This is a pin controller for the Broadcom BCM281xx SoC family, which includes 3This is a pin controller for the Broadcom BCM281xx SoC family, which includes
4BCM11130, BCM11140, BCM11351, BCM28145, and BCM28155 SoCs. 4BCM11130, BCM11140, BCM11351, BCM28145, and BCM28155 SoCs.
@@ -7,14 +7,14 @@ BCM11130, BCM11140, BCM11351, BCM28145, and BCM28155 SoCs.
7 7
8Required Properties: 8Required Properties:
9 9
10- compatible: Must be "brcm,capri-pinctrl". 10- compatible: Must be "brcm,bcm11351-pinctrl"
11- reg: Base address of the PAD Controller register block and the size 11- reg: Base address of the PAD Controller register block and the size
12 of the block. 12 of the block.
13 13
14For example, the following is the bare minimum node: 14For example, the following is the bare minimum node:
15 15
16 pinctrl@35004800 { 16 pinctrl@35004800 {
17 compatible = "brcm,capri-pinctrl"; 17 compatible = "brcm,bcm11351-pinctrl";
18 reg = <0x35004800 0x430>; 18 reg = <0x35004800 0x430>;
19 }; 19 };
20 20
@@ -119,7 +119,7 @@ Optional Properties (for HDMI pins):
119Example: 119Example:
120// pin controller node 120// pin controller node
121pinctrl@35004800 { 121pinctrl@35004800 {
122 compatible = "brcm,capri-pinctrl"; 122 compatible = "brcmbcm11351-pinctrl";
123 reg = <0x35004800 0x430>; 123 reg = <0x35004800 0x430>;
124 124
125 // pin configuration node 125 // pin configuration node
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 1404674c0a02..6fea79efb4cb 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -453,7 +453,7 @@ TP_STATUS_COPY : This flag indicates that the frame (and associated
453 enabled previously with setsockopt() and 453 enabled previously with setsockopt() and
454 the PACKET_COPY_THRESH option. 454 the PACKET_COPY_THRESH option.
455 455
456 The number of frames than can be buffered to 456 The number of frames that can be buffered to
457 be read with recvfrom is limited like a normal socket. 457 be read with recvfrom is limited like a normal socket.
458 See the SO_RCVBUF option in the socket (7) man page. 458 See the SO_RCVBUF option in the socket (7) man page.
459 459
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index 661d3c316a17..048c92b487f6 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -21,26 +21,38 @@ has such a feature).
21 21
22SO_TIMESTAMPING: 22SO_TIMESTAMPING:
23 23
24Instructs the socket layer which kind of information is wanted. The 24Instructs the socket layer which kind of information should be collected
25parameter is an integer with some of the following bits set. Setting 25and/or reported. The parameter is an integer with some of the following
26other bits is an error and doesn't change the current state. 26bits set. Setting other bits is an error and doesn't change the current
27 27state.
28SOF_TIMESTAMPING_TX_HARDWARE: try to obtain send time stamp in hardware 28
29SOF_TIMESTAMPING_TX_SOFTWARE: if SOF_TIMESTAMPING_TX_HARDWARE is off or 29Four of the bits are requests to the stack to try to generate
30 fails, then do it in software 30timestamps. Any combination of them is valid.
31SOF_TIMESTAMPING_RX_HARDWARE: return the original, unmodified time stamp 31
32 as generated by the hardware 32SOF_TIMESTAMPING_TX_HARDWARE: try to obtain send time stamps in hardware
33SOF_TIMESTAMPING_RX_SOFTWARE: if SOF_TIMESTAMPING_RX_HARDWARE is off or 33SOF_TIMESTAMPING_TX_SOFTWARE: try to obtain send time stamps in software
34 fails, then do it in software 34SOF_TIMESTAMPING_RX_HARDWARE: try to obtain receive time stamps in hardware
35SOF_TIMESTAMPING_RAW_HARDWARE: return original raw hardware time stamp 35SOF_TIMESTAMPING_RX_SOFTWARE: try to obtain receive time stamps in software
36SOF_TIMESTAMPING_SYS_HARDWARE: return hardware time stamp transformed to 36
37 the system time base 37The other three bits control which timestamps will be reported in a
38SOF_TIMESTAMPING_SOFTWARE: return system time stamp generated in 38generated control message. If none of these bits are set or if none of
39 software 39the set bits correspond to data that is available, then the control
40 40message will not be generated:
41SOF_TIMESTAMPING_TX/RX determine how time stamps are generated. 41
42SOF_TIMESTAMPING_RAW/SYS determine how they are reported in the 42SOF_TIMESTAMPING_SOFTWARE: report systime if available
43following control message: 43SOF_TIMESTAMPING_SYS_HARDWARE: report hwtimetrans if available
44SOF_TIMESTAMPING_RAW_HARDWARE: report hwtimeraw if available
45
46It is worth noting that timestamps may be collected for reasons other
47than being requested by a particular socket with
48SOF_TIMESTAMPING_[TR]X_(HARD|SOFT)WARE. For example, most drivers that
49can generate hardware receive timestamps ignore
50SOF_TIMESTAMPING_RX_HARDWARE. It is still a good idea to set that flag
51in case future drivers pay attention.
52
53If timestamps are reported, they will appear in a control message with
54cmsg_level==SOL_SOCKET, cmsg_type==SO_TIMESTAMPING, and a payload like
55this:
44 56
45struct scm_timestamping { 57struct scm_timestamping {
46 struct timespec systime; 58 struct timespec systime;
diff --git a/MAINTAINERS b/MAINTAINERS
index b7befe758429..b3fdb0f004ba 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1738,6 +1738,7 @@ F: include/uapi/linux/bfs_fs.h
1738BLACKFIN ARCHITECTURE 1738BLACKFIN ARCHITECTURE
1739M: Steven Miao <realmz6@gmail.com> 1739M: Steven Miao <realmz6@gmail.com>
1740L: adi-buildroot-devel@lists.sourceforge.net 1740L: adi-buildroot-devel@lists.sourceforge.net
1741T: git git://git.code.sf.net/p/adi-linux/code
1741W: http://blackfin.uclinux.org 1742W: http://blackfin.uclinux.org
1742S: Supported 1743S: Supported
1743F: arch/blackfin/ 1744F: arch/blackfin/
@@ -6002,6 +6003,8 @@ F: include/linux/netdevice.h
6002F: include/uapi/linux/in.h 6003F: include/uapi/linux/in.h
6003F: include/uapi/linux/net.h 6004F: include/uapi/linux/net.h
6004F: include/uapi/linux/netdevice.h 6005F: include/uapi/linux/netdevice.h
6006F: tools/net/
6007F: tools/testing/selftests/net/
6005 6008
6006NETWORKING [IPv4/IPv6] 6009NETWORKING [IPv4/IPv6]
6007M: "David S. Miller" <davem@davemloft.net> 6010M: "David S. Miller" <davem@davemloft.net>
diff --git a/Makefile b/Makefile
index 78209ee1f981..ef779ec26f62 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 14 2PATCHLEVEL = 14
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc5 4EXTRAVERSION = -rc7
5NAME = Shuffling Zombie Juror 5NAME = Shuffling Zombie Juror
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/bcm11351.dtsi b/arch/arm/boot/dts/bcm11351.dtsi
index e491b82f8d67..792fde1b7f75 100644
--- a/arch/arm/boot/dts/bcm11351.dtsi
+++ b/arch/arm/boot/dts/bcm11351.dtsi
@@ -147,7 +147,7 @@
147 }; 147 };
148 148
149 pinctrl@35004800 { 149 pinctrl@35004800 {
150 compatible = "brcm,capri-pinctrl"; 150 compatible = "brcm,bcm11351-pinctrl";
151 reg = <0x35004800 0x430>; 151 reg = <0x35004800 0x430>;
152 }; 152 };
153 153
diff --git a/arch/arm/boot/dts/omap3-gta04.dts b/arch/arm/boot/dts/omap3-gta04.dts
index c551e4af4d83..d3b253bbc885 100644
--- a/arch/arm/boot/dts/omap3-gta04.dts
+++ b/arch/arm/boot/dts/omap3-gta04.dts
@@ -13,7 +13,7 @@
13 13
14/ { 14/ {
15 model = "OMAP3 GTA04"; 15 model = "OMAP3 GTA04";
16 compatible = "ti,omap3-gta04", "ti,omap3"; 16 compatible = "ti,omap3-gta04", "ti,omap36xx", "ti,omap3";
17 17
18 cpus { 18 cpus {
19 cpu@0 { 19 cpu@0 {
diff --git a/arch/arm/boot/dts/omap3-igep0020.dts b/arch/arm/boot/dts/omap3-igep0020.dts
index 25a2b5f652fd..f2779ac75872 100644
--- a/arch/arm/boot/dts/omap3-igep0020.dts
+++ b/arch/arm/boot/dts/omap3-igep0020.dts
@@ -14,7 +14,7 @@
14 14
15/ { 15/ {
16 model = "IGEPv2 (TI OMAP AM/DM37x)"; 16 model = "IGEPv2 (TI OMAP AM/DM37x)";
17 compatible = "isee,omap3-igep0020", "ti,omap3"; 17 compatible = "isee,omap3-igep0020", "ti,omap36xx", "ti,omap3";
18 18
19 leds { 19 leds {
20 pinctrl-names = "default"; 20 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/omap3-igep0030.dts b/arch/arm/boot/dts/omap3-igep0030.dts
index 145c58cfc8ac..2793749eb1ba 100644
--- a/arch/arm/boot/dts/omap3-igep0030.dts
+++ b/arch/arm/boot/dts/omap3-igep0030.dts
@@ -13,7 +13,7 @@
13 13
14/ { 14/ {
15 model = "IGEP COM MODULE (TI OMAP AM/DM37x)"; 15 model = "IGEP COM MODULE (TI OMAP AM/DM37x)";
16 compatible = "isee,omap3-igep0030", "ti,omap3"; 16 compatible = "isee,omap3-igep0030", "ti,omap36xx", "ti,omap3";
17 17
18 leds { 18 leds {
19 pinctrl-names = "default"; 19 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 10666ca8aee1..d4d2763f4794 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -426,7 +426,7 @@
426 }; 426 };
427 427
428 rtp: rtp@01c25000 { 428 rtp: rtp@01c25000 {
429 compatible = "allwinner,sun4i-ts"; 429 compatible = "allwinner,sun4i-a10-ts";
430 reg = <0x01c25000 0x100>; 430 reg = <0x01c25000 0x100>;
431 interrupts = <29>; 431 interrupts = <29>;
432 }; 432 };
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index 64961595e8d6..79fd412005b0 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -383,7 +383,7 @@
383 }; 383 };
384 384
385 rtp: rtp@01c25000 { 385 rtp: rtp@01c25000 {
386 compatible = "allwinner,sun4i-ts"; 386 compatible = "allwinner,sun4i-a10-ts";
387 reg = <0x01c25000 0x100>; 387 reg = <0x01c25000 0x100>;
388 interrupts = <29>; 388 interrupts = <29>;
389 }; 389 };
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index 320335abfccd..c463fd730c91 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -346,7 +346,7 @@
346 }; 346 };
347 347
348 rtp: rtp@01c25000 { 348 rtp: rtp@01c25000 {
349 compatible = "allwinner,sun4i-ts"; 349 compatible = "allwinner,sun4i-a10-ts";
350 reg = <0x01c25000 0x100>; 350 reg = <0x01c25000 0x100>;
351 interrupts = <29>; 351 interrupts = <29>;
352 }; 352 };
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 9ff09484847b..6f25cf559ad0 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -454,7 +454,7 @@
454 rtc: rtc@01c20d00 { 454 rtc: rtc@01c20d00 {
455 compatible = "allwinner,sun7i-a20-rtc"; 455 compatible = "allwinner,sun7i-a20-rtc";
456 reg = <0x01c20d00 0x20>; 456 reg = <0x01c20d00 0x20>;
457 interrupts = <0 24 1>; 457 interrupts = <0 24 4>;
458 }; 458 };
459 459
460 sid: eeprom@01c23800 { 460 sid: eeprom@01c23800 {
@@ -463,7 +463,7 @@
463 }; 463 };
464 464
465 rtp: rtp@01c25000 { 465 rtp: rtp@01c25000 {
466 compatible = "allwinner,sun4i-ts"; 466 compatible = "allwinner,sun4i-a10-ts";
467 reg = <0x01c25000 0x100>; 467 reg = <0x01c25000 0x100>;
468 interrupts = <0 29 4>; 468 interrupts = <0 29 4>;
469 }; 469 };
@@ -596,10 +596,10 @@
596 hstimer@01c60000 { 596 hstimer@01c60000 {
597 compatible = "allwinner,sun7i-a20-hstimer"; 597 compatible = "allwinner,sun7i-a20-hstimer";
598 reg = <0x01c60000 0x1000>; 598 reg = <0x01c60000 0x1000>;
599 interrupts = <0 81 1>, 599 interrupts = <0 81 4>,
600 <0 82 1>, 600 <0 82 4>,
601 <0 83 1>, 601 <0 83 4>,
602 <0 84 1>; 602 <0 84 4>;
603 clocks = <&ahb_gates 28>; 603 clocks = <&ahb_gates 28>;
604 }; 604 };
605 605
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 00fe9e9710fd..27d69b558c5d 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -204,7 +204,10 @@ CONFIG_MMC_BLOCK_MINORS=16
204CONFIG_MMC_SDHCI=y 204CONFIG_MMC_SDHCI=y
205CONFIG_MMC_SDHCI_PLTFM=y 205CONFIG_MMC_SDHCI_PLTFM=y
206CONFIG_MMC_SDHCI_TEGRA=y 206CONFIG_MMC_SDHCI_TEGRA=y
207CONFIG_NEW_LEDS=y
208CONFIG_LEDS_CLASS=y
207CONFIG_LEDS_GPIO=y 209CONFIG_LEDS_GPIO=y
210CONFIG_LEDS_TRIGGERS=y
208CONFIG_LEDS_TRIGGER_TIMER=y 211CONFIG_LEDS_TRIGGER_TIMER=y
209CONFIG_LEDS_TRIGGER_ONESHOT=y 212CONFIG_LEDS_TRIGGER_ONESHOT=y
210CONFIG_LEDS_TRIGGER_HEARTBEAT=y 213CONFIG_LEDS_TRIGGER_HEARTBEAT=y
diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c
index 3b05aea56d1f..11ed9152e665 100644
--- a/arch/arm/mach-omap2/cclock3xxx_data.c
+++ b/arch/arm/mach-omap2/cclock3xxx_data.c
@@ -433,7 +433,9 @@ static const struct clk_ops dpll4_m5x2_ck_ops = {
433 .enable = &omap2_dflt_clk_enable, 433 .enable = &omap2_dflt_clk_enable,
434 .disable = &omap2_dflt_clk_disable, 434 .disable = &omap2_dflt_clk_disable,
435 .is_enabled = &omap2_dflt_clk_is_enabled, 435 .is_enabled = &omap2_dflt_clk_is_enabled,
436 .set_rate = &omap3_clkoutx2_set_rate,
436 .recalc_rate = &omap3_clkoutx2_recalc, 437 .recalc_rate = &omap3_clkoutx2_recalc,
438 .round_rate = &omap3_clkoutx2_round_rate,
437}; 439};
438 440
439static const struct clk_ops dpll4_m5x2_ck_3630_ops = { 441static const struct clk_ops dpll4_m5x2_ck_3630_ops = {
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index 4c158c838d40..01fc710c8181 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -23,6 +23,8 @@
23#include "prm.h" 23#include "prm.h"
24#include "clockdomain.h" 24#include "clockdomain.h"
25 25
26#define MAX_CPUS 2
27
26/* Machine specific information */ 28/* Machine specific information */
27struct idle_statedata { 29struct idle_statedata {
28 u32 cpu_state; 30 u32 cpu_state;
@@ -48,11 +50,11 @@ static struct idle_statedata omap4_idle_data[] = {
48 }, 50 },
49}; 51};
50 52
51static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS]; 53static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS];
52static struct clockdomain *cpu_clkdm[NR_CPUS]; 54static struct clockdomain *cpu_clkdm[MAX_CPUS];
53 55
54static atomic_t abort_barrier; 56static atomic_t abort_barrier;
55static bool cpu_done[NR_CPUS]; 57static bool cpu_done[MAX_CPUS];
56static struct idle_statedata *state_ptr = &omap4_idle_data[0]; 58static struct idle_statedata *state_ptr = &omap4_idle_data[0];
57 59
58/* Private functions */ 60/* Private functions */
diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c
index 3185ced807c9..3c418ea54bbe 100644
--- a/arch/arm/mach-omap2/dpll3xxx.c
+++ b/arch/arm/mach-omap2/dpll3xxx.c
@@ -623,6 +623,32 @@ void omap3_dpll_deny_idle(struct clk_hw_omap *clk)
623 623
624/* Clock control for DPLL outputs */ 624/* Clock control for DPLL outputs */
625 625
626/* Find the parent DPLL for the given clkoutx2 clock */
627static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
628{
629 struct clk_hw_omap *pclk = NULL;
630 struct clk *parent;
631
632 /* Walk up the parents of clk, looking for a DPLL */
633 do {
634 do {
635 parent = __clk_get_parent(hw->clk);
636 hw = __clk_get_hw(parent);
637 } while (hw && (__clk_get_flags(hw->clk) & CLK_IS_BASIC));
638 if (!hw)
639 break;
640 pclk = to_clk_hw_omap(hw);
641 } while (pclk && !pclk->dpll_data);
642
643 /* clk does not have a DPLL as a parent? error in the clock data */
644 if (!pclk) {
645 WARN_ON(1);
646 return NULL;
647 }
648
649 return pclk;
650}
651
626/** 652/**
627 * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate 653 * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate
628 * @clk: DPLL output struct clk 654 * @clk: DPLL output struct clk
@@ -637,27 +663,14 @@ unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
637 unsigned long rate; 663 unsigned long rate;
638 u32 v; 664 u32 v;
639 struct clk_hw_omap *pclk = NULL; 665 struct clk_hw_omap *pclk = NULL;
640 struct clk *parent;
641 666
642 if (!parent_rate) 667 if (!parent_rate)
643 return 0; 668 return 0;
644 669
645 /* Walk up the parents of clk, looking for a DPLL */ 670 pclk = omap3_find_clkoutx2_dpll(hw);
646 do {
647 do {
648 parent = __clk_get_parent(hw->clk);
649 hw = __clk_get_hw(parent);
650 } while (hw && (__clk_get_flags(hw->clk) & CLK_IS_BASIC));
651 if (!hw)
652 break;
653 pclk = to_clk_hw_omap(hw);
654 } while (pclk && !pclk->dpll_data);
655 671
656 /* clk does not have a DPLL as a parent? error in the clock data */ 672 if (!pclk)
657 if (!pclk) {
658 WARN_ON(1);
659 return 0; 673 return 0;
660 }
661 674
662 dd = pclk->dpll_data; 675 dd = pclk->dpll_data;
663 676
@@ -672,6 +685,55 @@ unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
672 return rate; 685 return rate;
673} 686}
674 687
688int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate,
689 unsigned long parent_rate)
690{
691 return 0;
692}
693
694long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate,
695 unsigned long *prate)
696{
697 const struct dpll_data *dd;
698 u32 v;
699 struct clk_hw_omap *pclk = NULL;
700
701 if (!*prate)
702 return 0;
703
704 pclk = omap3_find_clkoutx2_dpll(hw);
705
706 if (!pclk)
707 return 0;
708
709 dd = pclk->dpll_data;
710
711 /* TYPE J does not have a clkoutx2 */
712 if (dd->flags & DPLL_J_TYPE) {
713 *prate = __clk_round_rate(__clk_get_parent(pclk->hw.clk), rate);
714 return *prate;
715 }
716
717 WARN_ON(!dd->enable_mask);
718
719 v = omap2_clk_readl(pclk, dd->control_reg) & dd->enable_mask;
720 v >>= __ffs(dd->enable_mask);
721
722 /* If in bypass, the rate is fixed to the bypass rate*/
723 if (v != OMAP3XXX_EN_DPLL_LOCKED)
724 return *prate;
725
726 if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
727 unsigned long best_parent;
728
729 best_parent = (rate / 2);
730 *prate = __clk_round_rate(__clk_get_parent(hw->clk),
731 best_parent);
732 }
733
734 return *prate * 2;
735}
736
675/* OMAP3/4 non-CORE DPLL clkops */ 737/* OMAP3/4 non-CORE DPLL clkops */
676const struct clk_hw_omap_ops clkhwops_omap3_dpll = { 738const struct clk_hw_omap_ops clkhwops_omap3_dpll = {
677 .allow_idle = omap3_dpll_allow_idle, 739 .allow_idle = omap3_dpll_allow_idle,
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 42d81885c700..1f33f5db10d5 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1947,29 +1947,31 @@ static int _ocp_softreset(struct omap_hwmod *oh)
1947 goto dis_opt_clks; 1947 goto dis_opt_clks;
1948 1948
1949 _write_sysconfig(v, oh); 1949 _write_sysconfig(v, oh);
1950 ret = _clear_softreset(oh, &v);
1951 if (ret)
1952 goto dis_opt_clks;
1953
1954 _write_sysconfig(v, oh);
1955 1950
1956 if (oh->class->sysc->srst_udelay) 1951 if (oh->class->sysc->srst_udelay)
1957 udelay(oh->class->sysc->srst_udelay); 1952 udelay(oh->class->sysc->srst_udelay);
1958 1953
1959 c = _wait_softreset_complete(oh); 1954 c = _wait_softreset_complete(oh);
1960 if (c == MAX_MODULE_SOFTRESET_WAIT) 1955 if (c == MAX_MODULE_SOFTRESET_WAIT) {
1961 pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n", 1956 pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n",
1962 oh->name, MAX_MODULE_SOFTRESET_WAIT); 1957 oh->name, MAX_MODULE_SOFTRESET_WAIT);
1963 else 1958 ret = -ETIMEDOUT;
1959 goto dis_opt_clks;
1960 } else {
1964 pr_debug("omap_hwmod: %s: softreset in %d usec\n", oh->name, c); 1961 pr_debug("omap_hwmod: %s: softreset in %d usec\n", oh->name, c);
1962 }
1963
1964 ret = _clear_softreset(oh, &v);
1965 if (ret)
1966 goto dis_opt_clks;
1967
1968 _write_sysconfig(v, oh);
1965 1969
1966 /* 1970 /*
1967 * XXX add _HWMOD_STATE_WEDGED for modules that don't come back from 1971 * XXX add _HWMOD_STATE_WEDGED for modules that don't come back from
1968 * _wait_target_ready() or _reset() 1972 * _wait_target_ready() or _reset()
1969 */ 1973 */
1970 1974
1971 ret = (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : 0;
1972
1973dis_opt_clks: 1975dis_opt_clks:
1974 if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET) 1976 if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
1975 _disable_optional_clocks(oh); 1977 _disable_optional_clocks(oh);
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 18f333c440db..810c205d668b 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -1365,11 +1365,10 @@ static struct omap_hwmod_class_sysconfig dra7xx_spinlock_sysc = {
1365 .rev_offs = 0x0000, 1365 .rev_offs = 0x0000,
1366 .sysc_offs = 0x0010, 1366 .sysc_offs = 0x0010,
1367 .syss_offs = 0x0014, 1367 .syss_offs = 0x0014,
1368 .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | 1368 .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP |
1369 SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | 1369 SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
1370 SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), 1370 SYSS_HAS_RESET_STATUS),
1371 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | 1371 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
1372 SIDLE_SMART_WKUP),
1373 .sysc_fields = &omap_hwmod_sysc_type1, 1372 .sysc_fields = &omap_hwmod_sysc_type1,
1374}; 1373};
1375 1374
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 3d5b24dcd9a4..c33e07e2f0d4 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -22,6 +22,8 @@
22#include "common-board-devices.h" 22#include "common-board-devices.h"
23#include "dss-common.h" 23#include "dss-common.h"
24#include "control.h" 24#include "control.h"
25#include "omap-secure.h"
26#include "soc.h"
25 27
26struct pdata_init { 28struct pdata_init {
27 const char *compatible; 29 const char *compatible;
@@ -169,6 +171,22 @@ static void __init am3517_evm_legacy_init(void)
169 omap_ctrl_writel(v, AM35XX_CONTROL_IP_SW_RESET); 171 omap_ctrl_writel(v, AM35XX_CONTROL_IP_SW_RESET);
170 omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); /* OCP barrier */ 172 omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); /* OCP barrier */
171} 173}
174
175static void __init nokia_n900_legacy_init(void)
176{
177 hsmmc2_internal_input_clk();
178
179 if (omap_type() == OMAP2_DEVICE_TYPE_SEC) {
180 if (IS_ENABLED(CONFIG_ARM_ERRATA_430973)) {
181 pr_info("RX-51: Enabling ARM errata 430973 workaround\n");
182 /* set IBE to 1 */
183 rx51_secure_update_aux_cr(BIT(6), 0);
184 } else {
185 pr_warning("RX-51: Not enabling ARM errata 430973 workaround\n");
186 pr_warning("Thumb binaries may crash randomly without this workaround\n");
187 }
188 }
189}
172#endif /* CONFIG_ARCH_OMAP3 */ 190#endif /* CONFIG_ARCH_OMAP3 */
173 191
174#ifdef CONFIG_ARCH_OMAP4 192#ifdef CONFIG_ARCH_OMAP4
@@ -239,6 +257,7 @@ struct of_dev_auxdata omap_auxdata_lookup[] __initdata = {
239#endif 257#endif
240#ifdef CONFIG_ARCH_OMAP3 258#ifdef CONFIG_ARCH_OMAP3
241 OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002030, "48002030.pinmux", &pcs_pdata), 259 OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002030, "48002030.pinmux", &pcs_pdata),
260 OF_DEV_AUXDATA("ti,omap3-padconf", 0x480025a0, "480025a0.pinmux", &pcs_pdata),
242 OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002a00, "48002a00.pinmux", &pcs_pdata), 261 OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002a00, "48002a00.pinmux", &pcs_pdata),
243 /* Only on am3517 */ 262 /* Only on am3517 */
244 OF_DEV_AUXDATA("ti,davinci_mdio", 0x5c030000, "davinci_mdio.0", NULL), 263 OF_DEV_AUXDATA("ti,davinci_mdio", 0x5c030000, "davinci_mdio.0", NULL),
@@ -259,7 +278,7 @@ struct of_dev_auxdata omap_auxdata_lookup[] __initdata = {
259static struct pdata_init pdata_quirks[] __initdata = { 278static struct pdata_init pdata_quirks[] __initdata = {
260#ifdef CONFIG_ARCH_OMAP3 279#ifdef CONFIG_ARCH_OMAP3
261 { "compulab,omap3-sbc-t3730", omap3_sbc_t3730_legacy_init, }, 280 { "compulab,omap3-sbc-t3730", omap3_sbc_t3730_legacy_init, },
262 { "nokia,omap3-n900", hsmmc2_internal_input_clk, }, 281 { "nokia,omap3-n900", nokia_n900_legacy_init, },
263 { "nokia,omap3-n9", hsmmc2_internal_input_clk, }, 282 { "nokia,omap3-n9", hsmmc2_internal_input_clk, },
264 { "nokia,omap3-n950", hsmmc2_internal_input_clk, }, 283 { "nokia,omap3-n950", hsmmc2_internal_input_clk, },
265 { "isee,omap3-igep0020", omap3_igep0020_legacy_init, }, 284 { "isee,omap3-igep0020", omap3_igep0020_legacy_init, },
diff --git a/arch/arm/mach-omap2/prminst44xx.c b/arch/arm/mach-omap2/prminst44xx.c
index 6334b96b4097..280f3c58abe5 100644
--- a/arch/arm/mach-omap2/prminst44xx.c
+++ b/arch/arm/mach-omap2/prminst44xx.c
@@ -183,11 +183,11 @@ void omap4_prminst_global_warm_sw_reset(void)
183 OMAP4_PRM_RSTCTRL_OFFSET); 183 OMAP4_PRM_RSTCTRL_OFFSET);
184 v |= OMAP4430_RST_GLOBAL_WARM_SW_MASK; 184 v |= OMAP4430_RST_GLOBAL_WARM_SW_MASK;
185 omap4_prminst_write_inst_reg(v, OMAP4430_PRM_PARTITION, 185 omap4_prminst_write_inst_reg(v, OMAP4430_PRM_PARTITION,
186 OMAP4430_PRM_DEVICE_INST, 186 dev_inst,
187 OMAP4_PRM_RSTCTRL_OFFSET); 187 OMAP4_PRM_RSTCTRL_OFFSET);
188 188
189 /* OCP barrier */ 189 /* OCP barrier */
190 v = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, 190 v = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
191 OMAP4430_PRM_DEVICE_INST, 191 dev_inst,
192 OMAP4_PRM_RSTCTRL_OFFSET); 192 OMAP4_PRM_RSTCTRL_OFFSET);
193} 193}
diff --git a/arch/cris/include/asm/bitops.h b/arch/cris/include/asm/bitops.h
index 184066ceb1f6..053c17b36559 100644
--- a/arch/cris/include/asm/bitops.h
+++ b/arch/cris/include/asm/bitops.h
@@ -144,7 +144,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
144 * definition, which doesn't have the same semantics. We don't want to 144 * definition, which doesn't have the same semantics. We don't want to
145 * use -fno-builtin, so just hide the name ffs. 145 * use -fno-builtin, so just hide the name ffs.
146 */ 146 */
147#define ffs kernel_ffs 147#define ffs(x) kernel_ffs(x)
148 148
149#include <asm-generic/bitops/fls.h> 149#include <asm-generic/bitops/fls.h>
150#include <asm-generic/bitops/__fls.h> 150#include <asm-generic/bitops/__fls.h>
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index a96bcf83a735..20e8a9b21d75 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -98,7 +98,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
98 /* attempt to allocate a granule's worth of cached memory pages */ 98 /* attempt to allocate a granule's worth of cached memory pages */
99 99
100 page = alloc_pages_exact_node(nid, 100 page = alloc_pages_exact_node(nid,
101 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 101 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
102 IA64_GRANULE_SHIFT-PAGE_SHIFT); 102 IA64_GRANULE_SHIFT-PAGE_SHIFT);
103 if (!page) { 103 if (!page) {
104 mutex_unlock(&uc_pool->add_chunk_mutex); 104 mutex_unlock(&uc_pool->add_chunk_mutex);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index dcae3a7035db..153447452d5e 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2353,9 +2353,8 @@ config SECCOMP
2353 If unsure, say Y. Only embedded should say N here. 2353 If unsure, say Y. Only embedded should say N here.
2354 2354
2355config MIPS_O32_FP64_SUPPORT 2355config MIPS_O32_FP64_SUPPORT
2356 bool "Support for O32 binaries using 64-bit FP" 2356 bool "Support for O32 binaries using 64-bit FP (EXPERIMENTAL)"
2357 depends on 32BIT || MIPS32_O32 2357 depends on 32BIT || MIPS32_O32
2358 default y
2359 help 2358 help
2360 When this is enabled, the kernel will support use of 64-bit floating 2359 When this is enabled, the kernel will support use of 64-bit floating
2361 point registers with binaries using the O32 ABI along with the 2360 point registers with binaries using the O32 ABI along with the
@@ -2367,7 +2366,14 @@ config MIPS_O32_FP64_SUPPORT
2367 of your kernel & potentially improve FP emulation performance by 2366 of your kernel & potentially improve FP emulation performance by
2368 saying N here. 2367 saying N here.
2369 2368
2370 If unsure, say Y. 2369 Although binutils currently supports use of this flag the details
2370 concerning its effect upon the O32 ABI in userland are still being
2371 worked on. In order to avoid userland becoming dependant upon current
2372 behaviour before the details have been finalised, this option should
2373 be considered experimental and only enabled by those working upon
2374 said details.
2375
2376 If unsure, say N.
2371 2377
2372config USE_OF 2378config USE_OF
2373 bool 2379 bool
diff --git a/arch/mips/alchemy/board-gpr.c b/arch/mips/alchemy/board-gpr.c
index 9edc35ff8cf1..acf9a2a37f5a 100644
--- a/arch/mips/alchemy/board-gpr.c
+++ b/arch/mips/alchemy/board-gpr.c
@@ -53,10 +53,8 @@ void __init prom_init(void)
53 prom_init_cmdline(); 53 prom_init_cmdline();
54 54
55 memsize_str = prom_getenv("memsize"); 55 memsize_str = prom_getenv("memsize");
56 if (!memsize_str) 56 if (!memsize_str || kstrtoul(memsize_str, 0, &memsize))
57 memsize = 0x04000000; 57 memsize = 0x04000000;
58 else
59 strict_strtoul(memsize_str, 0, &memsize);
60 add_memory_region(0, memsize, BOOT_MEM_RAM); 58 add_memory_region(0, memsize, BOOT_MEM_RAM);
61} 59}
62 60
diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c
index 9969dbab19e3..25a59a23547e 100644
--- a/arch/mips/alchemy/board-mtx1.c
+++ b/arch/mips/alchemy/board-mtx1.c
@@ -52,10 +52,8 @@ void __init prom_init(void)
52 prom_init_cmdline(); 52 prom_init_cmdline();
53 53
54 memsize_str = prom_getenv("memsize"); 54 memsize_str = prom_getenv("memsize");
55 if (!memsize_str) 55 if (!memsize_str || kstrtoul(memsize_str, 0, &memsize))
56 memsize = 0x04000000; 56 memsize = 0x04000000;
57 else
58 strict_strtoul(memsize_str, 0, &memsize);
59 add_memory_region(0, memsize, BOOT_MEM_RAM); 57 add_memory_region(0, memsize, BOOT_MEM_RAM);
60} 58}
61 59
diff --git a/arch/mips/bcm47xx/board.c b/arch/mips/bcm47xx/board.c
index 6d612e2b949b..cdd8246f92b3 100644
--- a/arch/mips/bcm47xx/board.c
+++ b/arch/mips/bcm47xx/board.c
@@ -1,3 +1,4 @@
1#include <linux/errno.h>
1#include <linux/export.h> 2#include <linux/export.h>
2#include <linux/string.h> 3#include <linux/string.h>
3#include <bcm47xx_board.h> 4#include <bcm47xx_board.h>
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 3220c93ea981..69a9a22d014a 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -106,7 +106,7 @@
106 .endm 106 .endm
107 107
108 .macro fpu_save_double thread status tmp 108 .macro fpu_save_double thread status tmp
109#if defined(CONFIG_MIPS64) || defined(CONFIG_CPU_MIPS32_R2) 109#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
110 sll \tmp, \status, 5 110 sll \tmp, \status, 5
111 bgez \tmp, 10f 111 bgez \tmp, 10f
112 fpu_save_16odd \thread 112 fpu_save_16odd \thread
@@ -159,7 +159,7 @@
159 .endm 159 .endm
160 160
161 .macro fpu_restore_double thread status tmp 161 .macro fpu_restore_double thread status tmp
162#if defined(CONFIG_MIPS64) || defined(CONFIG_CPU_MIPS32_R2) 162#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
163 sll \tmp, \status, 5 163 sll \tmp, \status, 5
164 bgez \tmp, 10f # 16 register mode? 164 bgez \tmp, 10f # 16 register mode?
165 165
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 6b9749540edf..58e50cbdb1a6 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -57,7 +57,7 @@ static inline int __enable_fpu(enum fpu_mode mode)
57 return 0; 57 return 0;
58 58
59 case FPU_64BIT: 59 case FPU_64BIT:
60#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_MIPS64)) 60#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT))
61 /* we only have a 32-bit FPU */ 61 /* we only have a 32-bit FPU */
62 return SIGFPE; 62 return SIGFPE;
63#endif 63#endif
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 33e8dbfc1b63..f35b131977e6 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -13,6 +13,7 @@
13#ifndef __ASM_MIPS_SYSCALL_H 13#ifndef __ASM_MIPS_SYSCALL_H
14#define __ASM_MIPS_SYSCALL_H 14#define __ASM_MIPS_SYSCALL_H
15 15
16#include <linux/compiler.h>
16#include <linux/audit.h> 17#include <linux/audit.h>
17#include <linux/elf-em.h> 18#include <linux/elf-em.h>
18#include <linux/kernel.h> 19#include <linux/kernel.h>
@@ -39,14 +40,14 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
39 40
40#ifdef CONFIG_32BIT 41#ifdef CONFIG_32BIT
41 case 4: case 5: case 6: case 7: 42 case 4: case 5: case 6: case 7:
42 return get_user(*arg, (int *)usp + 4 * n); 43 return get_user(*arg, (int *)usp + n);
43#endif 44#endif
44 45
45#ifdef CONFIG_64BIT 46#ifdef CONFIG_64BIT
46 case 4: case 5: case 6: case 7: 47 case 4: case 5: case 6: case 7:
47#ifdef CONFIG_MIPS32_O32 48#ifdef CONFIG_MIPS32_O32
48 if (test_thread_flag(TIF_32BIT_REGS)) 49 if (test_thread_flag(TIF_32BIT_REGS))
49 return get_user(*arg, (int *)usp + 4 * n); 50 return get_user(*arg, (int *)usp + n);
50 else 51 else
51#endif 52#endif
52 *arg = regs->regs[4 + n]; 53 *arg = regs->regs[4 + n];
@@ -57,6 +58,8 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
57 default: 58 default:
58 BUG(); 59 BUG();
59 } 60 }
61
62 unreachable();
60} 63}
61 64
62static inline long syscall_get_return_value(struct task_struct *task, 65static inline long syscall_get_return_value(struct task_struct *task,
@@ -83,11 +86,10 @@ static inline void syscall_get_arguments(struct task_struct *task,
83 unsigned int i, unsigned int n, 86 unsigned int i, unsigned int n,
84 unsigned long *args) 87 unsigned long *args)
85{ 88{
86 unsigned long arg;
87 int ret; 89 int ret;
88 90
89 while (n--) 91 while (n--)
90 ret |= mips_get_syscall_arg(&arg, task, regs, i++); 92 ret |= mips_get_syscall_arg(args++, task, regs, i++);
91 93
92 /* 94 /*
93 * No way to communicate an error because this is a void function. 95 * No way to communicate an error because this is a void function.
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index b39ba25b41cc..f25181b19941 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -163,8 +163,8 @@ enum cop1_sdw_func {
163 */ 163 */
164enum cop1x_func { 164enum cop1x_func {
165 lwxc1_op = 0x00, ldxc1_op = 0x01, 165 lwxc1_op = 0x00, ldxc1_op = 0x01,
166 pfetch_op = 0x07, swxc1_op = 0x08, 166 swxc1_op = 0x08, sdxc1_op = 0x09,
167 sdxc1_op = 0x09, madd_s_op = 0x20, 167 pfetch_op = 0x0f, madd_s_op = 0x20,
168 madd_d_op = 0x21, madd_e_op = 0x22, 168 madd_d_op = 0x21, madd_e_op = 0x22,
169 msub_s_op = 0x28, msub_d_op = 0x29, 169 msub_s_op = 0x28, msub_d_op = 0x29,
170 msub_e_op = 0x2a, nmadd_s_op = 0x30, 170 msub_e_op = 0x2a, nmadd_s_op = 0x30,
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 185ba258361b..374ed74cd516 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -111,11 +111,10 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
111 safe_store_code(new_code1, ip, faulted); 111 safe_store_code(new_code1, ip, faulted);
112 if (unlikely(faulted)) 112 if (unlikely(faulted))
113 return -EFAULT; 113 return -EFAULT;
114 ip += 4; 114 safe_store_code(new_code2, ip + 4, faulted);
115 safe_store_code(new_code2, ip, faulted);
116 if (unlikely(faulted)) 115 if (unlikely(faulted))
117 return -EFAULT; 116 return -EFAULT;
118 flush_icache_range(ip, ip + 8); /* original ip + 12 */ 117 flush_icache_range(ip, ip + 8);
119 return 0; 118 return 0;
120} 119}
121#endif 120#endif
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 253b2fb52026..73b0ddf910d4 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -35,9 +35,9 @@
35LEAF(_save_fp_context) 35LEAF(_save_fp_context)
36 cfc1 t1, fcr31 36 cfc1 t1, fcr31
37 37
38#if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2) 38#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
39 .set push 39 .set push
40#ifdef CONFIG_MIPS32_R2 40#ifdef CONFIG_CPU_MIPS32_R2
41 .set mips64r2 41 .set mips64r2
42 mfc0 t0, CP0_STATUS 42 mfc0 t0, CP0_STATUS
43 sll t0, t0, 5 43 sll t0, t0, 5
@@ -146,11 +146,11 @@ LEAF(_save_fp_context32)
146 * - cp1 status/control register 146 * - cp1 status/control register
147 */ 147 */
148LEAF(_restore_fp_context) 148LEAF(_restore_fp_context)
149 EX lw t0, SC_FPC_CSR(a0) 149 EX lw t1, SC_FPC_CSR(a0)
150 150
151#if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2) 151#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
152 .set push 152 .set push
153#ifdef CONFIG_MIPS32_R2 153#ifdef CONFIG_CPU_MIPS32_R2
154 .set mips64r2 154 .set mips64r2
155 mfc0 t0, CP0_STATUS 155 mfc0 t0, CP0_STATUS
156 sll t0, t0, 5 156 sll t0, t0, 5
@@ -191,7 +191,7 @@ LEAF(_restore_fp_context)
191 EX ldc1 $f26, SC_FPREGS+208(a0) 191 EX ldc1 $f26, SC_FPREGS+208(a0)
192 EX ldc1 $f28, SC_FPREGS+224(a0) 192 EX ldc1 $f28, SC_FPREGS+224(a0)
193 EX ldc1 $f30, SC_FPREGS+240(a0) 193 EX ldc1 $f30, SC_FPREGS+240(a0)
194 ctc1 t0, fcr31 194 ctc1 t1, fcr31
195 jr ra 195 jr ra
196 li v0, 0 # success 196 li v0, 0 # success
197 END(_restore_fp_context) 197 END(_restore_fp_context)
@@ -199,7 +199,7 @@ LEAF(_restore_fp_context)
199#ifdef CONFIG_MIPS32_COMPAT 199#ifdef CONFIG_MIPS32_COMPAT
200LEAF(_restore_fp_context32) 200LEAF(_restore_fp_context32)
201 /* Restore an o32 sigcontext. */ 201 /* Restore an o32 sigcontext. */
202 EX lw t0, SC32_FPC_CSR(a0) 202 EX lw t1, SC32_FPC_CSR(a0)
203 203
204 mfc0 t0, CP0_STATUS 204 mfc0 t0, CP0_STATUS
205 sll t0, t0, 5 205 sll t0, t0, 5
@@ -239,7 +239,7 @@ LEAF(_restore_fp_context32)
239 EX ldc1 $f26, SC32_FPREGS+208(a0) 239 EX ldc1 $f26, SC32_FPREGS+208(a0)
240 EX ldc1 $f28, SC32_FPREGS+224(a0) 240 EX ldc1 $f28, SC32_FPREGS+224(a0)
241 EX ldc1 $f30, SC32_FPREGS+240(a0) 241 EX ldc1 $f30, SC32_FPREGS+240(a0)
242 ctc1 t0, fcr31 242 ctc1 t1, fcr31
243 jr ra 243 jr ra
244 li v0, 0 # success 244 li v0, 0 # success
245 END(_restore_fp_context32) 245 END(_restore_fp_context32)
diff --git a/arch/mips/kernel/rtlx-cmp.c b/arch/mips/kernel/rtlx-cmp.c
index 56dc69635153..758fb3cd2326 100644
--- a/arch/mips/kernel/rtlx-cmp.c
+++ b/arch/mips/kernel/rtlx-cmp.c
@@ -112,5 +112,8 @@ void __exit rtlx_module_exit(void)
112 112
113 for (i = 0; i < RTLX_CHANNELS; i++) 113 for (i = 0; i < RTLX_CHANNELS; i++)
114 device_destroy(mt_class, MKDEV(major, i)); 114 device_destroy(mt_class, MKDEV(major, i));
115
115 unregister_chrdev(major, RTLX_MODULE_NAME); 116 unregister_chrdev(major, RTLX_MODULE_NAME);
117
118 aprp_hook = NULL;
116} 119}
diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c
index 91d61ba422b4..9c1aca00fd54 100644
--- a/arch/mips/kernel/rtlx-mt.c
+++ b/arch/mips/kernel/rtlx-mt.c
@@ -144,5 +144,8 @@ void __exit rtlx_module_exit(void)
144 144
145 for (i = 0; i < RTLX_CHANNELS; i++) 145 for (i = 0; i < RTLX_CHANNELS; i++)
146 device_destroy(mt_class, MKDEV(major, i)); 146 device_destroy(mt_class, MKDEV(major, i));
147
147 unregister_chrdev(major, RTLX_MODULE_NAME); 148 unregister_chrdev(major, RTLX_MODULE_NAME);
149
150 aprp_hook = NULL;
148} 151}
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 506925b2c3f3..0b4e2e38294b 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -1538,10 +1538,10 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1538 break; 1538 break;
1539 } 1539 }
1540 1540
1541 case 0x7: /* 7 */ 1541 case 0x3:
1542 if (MIPSInst_FUNC(ir) != pfetch_op) { 1542 if (MIPSInst_FUNC(ir) != pfetch_op)
1543 return SIGILL; 1543 return SIGILL;
1544 } 1544
1545 /* ignore prefx operation */ 1545 /* ignore prefx operation */
1546 break; 1546 break;
1547 1547
diff --git a/arch/mips/mti-malta/malta-amon.c b/arch/mips/mti-malta/malta-amon.c
index 592ac0427426..84ac523b0ce0 100644
--- a/arch/mips/mti-malta/malta-amon.c
+++ b/arch/mips/mti-malta/malta-amon.c
@@ -72,7 +72,7 @@ int amon_cpu_start(int cpu,
72 return 0; 72 return 0;
73} 73}
74 74
75#ifdef CONFIG_MIPS_VPE_LOADER 75#ifdef CONFIG_MIPS_VPE_LOADER_CMP
76int vpe_run(struct vpe *v) 76int vpe_run(struct vpe *v)
77{ 77{
78 struct vpe_notifications *n; 78 struct vpe_notifications *n;
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index ca3e3a46a42f..2242181a6284 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -119,7 +119,7 @@ static void malta_hw0_irqdispatch(void)
119 119
120 do_IRQ(MALTA_INT_BASE + irq); 120 do_IRQ(MALTA_INT_BASE + irq);
121 121
122#ifdef MIPS_VPE_APSP_API 122#ifdef CONFIG_MIPS_VPE_APSP_API_MT
123 if (aprp_hook) 123 if (aprp_hook)
124 aprp_hook(); 124 aprp_hook();
125#endif 125#endif
@@ -310,7 +310,7 @@ static void ipi_call_dispatch(void)
310 310
311static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) 311static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
312{ 312{
313#ifdef MIPS_VPE_APSP_API 313#ifdef CONFIG_MIPS_VPE_APSP_API_CMP
314 if (aprp_hook) 314 if (aprp_hook)
315 aprp_hook(); 315 aprp_hook();
316#endif 316#endif
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
index d37be36dc659..2b91b0e61566 100644
--- a/arch/mips/pci/msi-octeon.c
+++ b/arch/mips/pci/msi-octeon.c
@@ -150,6 +150,7 @@ msi_irq_allocated:
150 msg.address_lo = 150 msg.address_lo =
151 ((128ul << 20) + CVMX_PCI_MSI_RCV) & 0xffffffff; 151 ((128ul << 20) + CVMX_PCI_MSI_RCV) & 0xffffffff;
152 msg.address_hi = ((128ul << 20) + CVMX_PCI_MSI_RCV) >> 32; 152 msg.address_hi = ((128ul << 20) + CVMX_PCI_MSI_RCV) >> 32;
153 break;
153 case OCTEON_DMA_BAR_TYPE_BIG: 154 case OCTEON_DMA_BAR_TYPE_BIG:
154 /* When using big bar, Bar 0 is based at 0 */ 155 /* When using big bar, Bar 0 is based at 0 */
155 msg.address_lo = (0 + CVMX_PCI_MSI_RCV) & 0xffffffff; 156 msg.address_lo = (0 + CVMX_PCI_MSI_RCV) & 0xffffffff;
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index 5ec1e47a0d77..e865d748179b 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -123,7 +123,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order)
123 123
124 area->nid = nid; 124 area->nid = nid;
125 area->order = order; 125 area->order = order;
126 area->pages = alloc_pages_exact_node(area->nid, GFP_KERNEL|GFP_THISNODE, 126 area->pages = alloc_pages_exact_node(area->nid,
127 GFP_KERNEL|__GFP_THISNODE,
127 area->order); 128 area->order);
128 129
129 if (!area->pages) { 130 if (!area->pages) {
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index c026cca5602c..f3aaf231b4e5 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -341,10 +341,6 @@ config X86_USE_3DNOW
341 def_bool y 341 def_bool y
342 depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML 342 depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML
343 343
344config X86_OOSTORE
345 def_bool y
346 depends on (MWINCHIP3D || MWINCHIPC6) && MTRR
347
348# 344#
349# P6_NOPs are a relatively minor optimization that require a family >= 345# P6_NOPs are a relatively minor optimization that require a family >=
350# 6 processor, except that it is broken on certain VIA chips. 346# 6 processor, except that it is broken on certain VIA chips.
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 04a48903b2eb..69bbb4845020 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -85,11 +85,7 @@
85#else 85#else
86# define smp_rmb() barrier() 86# define smp_rmb() barrier()
87#endif 87#endif
88#ifdef CONFIG_X86_OOSTORE 88#define smp_wmb() barrier()
89# define smp_wmb() wmb()
90#else
91# define smp_wmb() barrier()
92#endif
93#define smp_read_barrier_depends() read_barrier_depends() 89#define smp_read_barrier_depends() read_barrier_depends()
94#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) 90#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
95#else /* !SMP */ 91#else /* !SMP */
@@ -100,7 +96,7 @@
100#define set_mb(var, value) do { var = value; barrier(); } while (0) 96#define set_mb(var, value) do { var = value; barrier(); } while (0)
101#endif /* SMP */ 97#endif /* SMP */
102 98
103#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) 99#if defined(CONFIG_X86_PPRO_FENCE)
104 100
105/* 101/*
106 * For either of these options x86 doesn't have a strong TSO memory 102 * For either of these options x86 doesn't have a strong TSO memory
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 34f69cb9350a..91d9c69a629e 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -237,7 +237,7 @@ memcpy_toio(volatile void __iomem *dst, const void *src, size_t count)
237 237
238static inline void flush_write_buffers(void) 238static inline void flush_write_buffers(void)
239{ 239{
240#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) 240#if defined(CONFIG_X86_PPRO_FENCE)
241 asm volatile("lock; addl $0,0(%%esp)": : :"memory"); 241 asm volatile("lock; addl $0,0(%%esp)": : :"memory");
242#endif 242#endif
243} 243}
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index bf156ded74b5..0f62f5482d91 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -26,10 +26,9 @@
26# define LOCK_PTR_REG "D" 26# define LOCK_PTR_REG "D"
27#endif 27#endif
28 28
29#if defined(CONFIG_X86_32) && \ 29#if defined(CONFIG_X86_32) && (defined(CONFIG_X86_PPRO_FENCE))
30 (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
31/* 30/*
32 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock 31 * On PPro SMP, we use a locked operation to unlock
33 * (PPro errata 66, 92) 32 * (PPro errata 66, 92)
34 */ 33 */
35# define UNLOCK_LOCK_PREFIX LOCK_PREFIX 34# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index 8779edab684e..d8fba5c15fbd 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -8,236 +8,6 @@
8 8
9#include "cpu.h" 9#include "cpu.h"
10 10
11#ifdef CONFIG_X86_OOSTORE
12
13static u32 power2(u32 x)
14{
15 u32 s = 1;
16
17 while (s <= x)
18 s <<= 1;
19
20 return s >>= 1;
21}
22
23
24/*
25 * Set up an actual MCR
26 */
27static void centaur_mcr_insert(int reg, u32 base, u32 size, int key)
28{
29 u32 lo, hi;
30
31 hi = base & ~0xFFF;
32 lo = ~(size-1); /* Size is a power of 2 so this makes a mask */
33 lo &= ~0xFFF; /* Remove the ctrl value bits */
34 lo |= key; /* Attribute we wish to set */
35 wrmsr(reg+MSR_IDT_MCR0, lo, hi);
36 mtrr_centaur_report_mcr(reg, lo, hi); /* Tell the mtrr driver */
37}
38
39/*
40 * Figure what we can cover with MCR's
41 *
42 * Shortcut: We know you can't put 4Gig of RAM on a winchip
43 */
44static u32 ramtop(void)
45{
46 u32 clip = 0xFFFFFFFFUL;
47 u32 top = 0;
48 int i;
49
50 for (i = 0; i < e820.nr_map; i++) {
51 unsigned long start, end;
52
53 if (e820.map[i].addr > 0xFFFFFFFFUL)
54 continue;
55 /*
56 * Don't MCR over reserved space. Ignore the ISA hole
57 * we frob around that catastrophe already
58 */
59 if (e820.map[i].type == E820_RESERVED) {
60 if (e820.map[i].addr >= 0x100000UL &&
61 e820.map[i].addr < clip)
62 clip = e820.map[i].addr;
63 continue;
64 }
65 start = e820.map[i].addr;
66 end = e820.map[i].addr + e820.map[i].size;
67 if (start >= end)
68 continue;
69 if (end > top)
70 top = end;
71 }
72 /*
73 * Everything below 'top' should be RAM except for the ISA hole.
74 * Because of the limited MCR's we want to map NV/ACPI into our
75 * MCR range for gunk in RAM
76 *
77 * Clip might cause us to MCR insufficient RAM but that is an
78 * acceptable failure mode and should only bite obscure boxes with
79 * a VESA hole at 15Mb
80 *
81 * The second case Clip sometimes kicks in is when the EBDA is marked
82 * as reserved. Again we fail safe with reasonable results
83 */
84 if (top > clip)
85 top = clip;
86
87 return top;
88}
89
90/*
91 * Compute a set of MCR's to give maximum coverage
92 */
93static int centaur_mcr_compute(int nr, int key)
94{
95 u32 mem = ramtop();
96 u32 root = power2(mem);
97 u32 base = root;
98 u32 top = root;
99 u32 floor = 0;
100 int ct = 0;
101
102 while (ct < nr) {
103 u32 fspace = 0;
104 u32 high;
105 u32 low;
106
107 /*
108 * Find the largest block we will fill going upwards
109 */
110 high = power2(mem-top);
111
112 /*
113 * Find the largest block we will fill going downwards
114 */
115 low = base/2;
116
117 /*
118 * Don't fill below 1Mb going downwards as there
119 * is an ISA hole in the way.
120 */
121 if (base <= 1024*1024)
122 low = 0;
123
124 /*
125 * See how much space we could cover by filling below
126 * the ISA hole
127 */
128
129 if (floor == 0)
130 fspace = 512*1024;
131 else if (floor == 512*1024)
132 fspace = 128*1024;
133
134 /* And forget ROM space */
135
136 /*
137 * Now install the largest coverage we get
138 */
139 if (fspace > high && fspace > low) {
140 centaur_mcr_insert(ct, floor, fspace, key);
141 floor += fspace;
142 } else if (high > low) {
143 centaur_mcr_insert(ct, top, high, key);
144 top += high;
145 } else if (low > 0) {
146 base -= low;
147 centaur_mcr_insert(ct, base, low, key);
148 } else
149 break;
150 ct++;
151 }
152 /*
153 * We loaded ct values. We now need to set the mask. The caller
154 * must do this bit.
155 */
156 return ct;
157}
158
159static void centaur_create_optimal_mcr(void)
160{
161 int used;
162 int i;
163
164 /*
165 * Allocate up to 6 mcrs to mark as much of ram as possible
166 * as write combining and weak write ordered.
167 *
168 * To experiment with: Linux never uses stack operations for
169 * mmio spaces so we could globally enable stack operation wc
170 *
171 * Load the registers with type 31 - full write combining, all
172 * writes weakly ordered.
173 */
174 used = centaur_mcr_compute(6, 31);
175
176 /*
177 * Wipe unused MCRs
178 */
179 for (i = used; i < 8; i++)
180 wrmsr(MSR_IDT_MCR0+i, 0, 0);
181}
182
183static void winchip2_create_optimal_mcr(void)
184{
185 u32 lo, hi;
186 int used;
187 int i;
188
189 /*
190 * Allocate up to 6 mcrs to mark as much of ram as possible
191 * as write combining, weak store ordered.
192 *
193 * Load the registers with type 25
194 * 8 - weak write ordering
195 * 16 - weak read ordering
196 * 1 - write combining
197 */
198 used = centaur_mcr_compute(6, 25);
199
200 /*
201 * Mark the registers we are using.
202 */
203 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
204 for (i = 0; i < used; i++)
205 lo |= 1<<(9+i);
206 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
207
208 /*
209 * Wipe unused MCRs
210 */
211
212 for (i = used; i < 8; i++)
213 wrmsr(MSR_IDT_MCR0+i, 0, 0);
214}
215
216/*
217 * Handle the MCR key on the Winchip 2.
218 */
219static void winchip2_unprotect_mcr(void)
220{
221 u32 lo, hi;
222 u32 key;
223
224 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
225 lo &= ~0x1C0; /* blank bits 8-6 */
226 key = (lo>>17) & 7;
227 lo |= key<<6; /* replace with unlock key */
228 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
229}
230
231static void winchip2_protect_mcr(void)
232{
233 u32 lo, hi;
234
235 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
236 lo &= ~0x1C0; /* blank bits 8-6 */
237 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
238}
239#endif /* CONFIG_X86_OOSTORE */
240
241#define ACE_PRESENT (1 << 6) 11#define ACE_PRESENT (1 << 6)
242#define ACE_ENABLED (1 << 7) 12#define ACE_ENABLED (1 << 7)
243#define ACE_FCR (1 << 28) /* MSR_VIA_FCR */ 13#define ACE_FCR (1 << 28) /* MSR_VIA_FCR */
@@ -362,20 +132,6 @@ static void init_centaur(struct cpuinfo_x86 *c)
362 fcr_clr = DPDC; 132 fcr_clr = DPDC;
363 printk(KERN_NOTICE "Disabling bugged TSC.\n"); 133 printk(KERN_NOTICE "Disabling bugged TSC.\n");
364 clear_cpu_cap(c, X86_FEATURE_TSC); 134 clear_cpu_cap(c, X86_FEATURE_TSC);
365#ifdef CONFIG_X86_OOSTORE
366 centaur_create_optimal_mcr();
367 /*
368 * Enable:
369 * write combining on non-stack, non-string
370 * write combining on string, all types
371 * weak write ordering
372 *
373 * The C6 original lacks weak read order
374 *
375 * Note 0x120 is write only on Winchip 1
376 */
377 wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
378#endif
379 break; 135 break;
380 case 8: 136 case 8:
381 switch (c->x86_mask) { 137 switch (c->x86_mask) {
@@ -392,40 +148,12 @@ static void init_centaur(struct cpuinfo_x86 *c)
392 fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| 148 fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
393 E2MMX|EAMD3D; 149 E2MMX|EAMD3D;
394 fcr_clr = DPDC; 150 fcr_clr = DPDC;
395#ifdef CONFIG_X86_OOSTORE
396 winchip2_unprotect_mcr();
397 winchip2_create_optimal_mcr();
398 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
399 /*
400 * Enable:
401 * write combining on non-stack, non-string
402 * write combining on string, all types
403 * weak write ordering
404 */
405 lo |= 31;
406 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
407 winchip2_protect_mcr();
408#endif
409 break; 151 break;
410 case 9: 152 case 9:
411 name = "3"; 153 name = "3";
412 fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| 154 fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
413 E2MMX|EAMD3D; 155 E2MMX|EAMD3D;
414 fcr_clr = DPDC; 156 fcr_clr = DPDC;
415#ifdef CONFIG_X86_OOSTORE
416 winchip2_unprotect_mcr();
417 winchip2_create_optimal_mcr();
418 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
419 /*
420 * Enable:
421 * write combining on non-stack, non-string
422 * write combining on string, all types
423 * weak write ordering
424 */
425 lo |= 31;
426 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
427 winchip2_protect_mcr();
428#endif
429 break; 157 break;
430 default: 158 default:
431 name = "??"; 159 name = "??";
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index c88f7f4b03ee..047f540cf3f7 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -3334,6 +3334,8 @@ static int __init uncore_type_init(struct intel_uncore_type *type)
3334 if (!pmus) 3334 if (!pmus)
3335 return -ENOMEM; 3335 return -ENOMEM;
3336 3336
3337 type->pmus = pmus;
3338
3337 type->unconstrainted = (struct event_constraint) 3339 type->unconstrainted = (struct event_constraint)
3338 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1, 3340 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
3339 0, type->num_counters, 0, 0); 3341 0, type->num_counters, 0, 0);
@@ -3369,7 +3371,6 @@ static int __init uncore_type_init(struct intel_uncore_type *type)
3369 } 3371 }
3370 3372
3371 type->pmu_group = &uncore_pmu_attr_group; 3373 type->pmu_group = &uncore_pmu_attr_group;
3372 type->pmus = pmus;
3373 return 0; 3374 return 0;
3374fail: 3375fail:
3375 uncore_type_exit(type); 3376 uncore_type_exit(type);
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index e8368c6dd2a2..d5dd80814419 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -86,10 +86,19 @@ EXPORT_SYMBOL(__kernel_fpu_begin);
86 86
87void __kernel_fpu_end(void) 87void __kernel_fpu_end(void)
88{ 88{
89 if (use_eager_fpu()) 89 if (use_eager_fpu()) {
90 math_state_restore(); 90 /*
91 else 91 * For eager fpu, most the time, tsk_used_math() is true.
92 * Restore the user math as we are done with the kernel usage.
93 * At few instances during thread exit, signal handling etc,
94 * tsk_used_math() is false. Those few places will take proper
95 * actions, so we don't need to restore the math here.
96 */
97 if (likely(tsk_used_math(current)))
98 math_state_restore();
99 } else {
92 stts(); 100 stts();
101 }
93} 102}
94EXPORT_SYMBOL(__kernel_fpu_end); 103EXPORT_SYMBOL(__kernel_fpu_end);
95 104
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 7c6acd4b8995..ff898bbf579d 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -529,7 +529,7 @@ static void quirk_amd_nb_node(struct pci_dev *dev)
529 return; 529 return;
530 530
531 pci_read_config_dword(nb_ht, 0x60, &val); 531 pci_read_config_dword(nb_ht, 0x60, &val);
532 node = val & 7; 532 node = pcibus_to_node(dev->bus) | (val & 7);
533 /* 533 /*
534 * Some hardware may return an invalid node ID, 534 * Some hardware may return an invalid node ID,
535 * so check it first: 535 * so check it first:
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e81df8fce027..2de1bc09a8d4 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3002,10 +3002,8 @@ static int cr8_write_interception(struct vcpu_svm *svm)
3002 u8 cr8_prev = kvm_get_cr8(&svm->vcpu); 3002 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
3003 /* instruction emulation calls kvm_set_cr8() */ 3003 /* instruction emulation calls kvm_set_cr8() */
3004 r = cr_interception(svm); 3004 r = cr_interception(svm);
3005 if (irqchip_in_kernel(svm->vcpu.kvm)) { 3005 if (irqchip_in_kernel(svm->vcpu.kvm))
3006 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3007 return r; 3006 return r;
3008 }
3009 if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) 3007 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
3010 return r; 3008 return r;
3011 kvm_run->exit_reason = KVM_EXIT_SET_TPR; 3009 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
@@ -3567,6 +3565,8 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3567 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) 3565 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3568 return; 3566 return;
3569 3567
3568 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3569
3570 if (irr == -1) 3570 if (irr == -1)
3571 return; 3571 return;
3572 3572
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
index 877b9a1b2152..01495755701b 100644
--- a/arch/x86/net/bpf_jit.S
+++ b/arch/x86/net/bpf_jit.S
@@ -140,7 +140,7 @@ bpf_slow_path_byte_msh:
140 push %r9; \ 140 push %r9; \
141 push SKBDATA; \ 141 push SKBDATA; \
142/* rsi already has offset */ \ 142/* rsi already has offset */ \
143 mov $SIZE,%ecx; /* size */ \ 143 mov $SIZE,%edx; /* size */ \
144 call bpf_internal_load_pointer_neg_helper; \ 144 call bpf_internal_load_pointer_neg_helper; \
145 test %rax,%rax; \ 145 test %rax,%rax; \
146 pop SKBDATA; \ 146 pop SKBDATA; \
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
index 7d01b8c56c00..cc04e67bfd05 100644
--- a/arch/x86/um/asm/barrier.h
+++ b/arch/x86/um/asm/barrier.h
@@ -40,11 +40,7 @@
40#define smp_rmb() barrier() 40#define smp_rmb() barrier()
41#endif /* CONFIG_X86_PPRO_FENCE */ 41#endif /* CONFIG_X86_PPRO_FENCE */
42 42
43#ifdef CONFIG_X86_OOSTORE
44#define smp_wmb() wmb()
45#else /* CONFIG_X86_OOSTORE */
46#define smp_wmb() barrier() 43#define smp_wmb() barrier()
47#endif /* CONFIG_X86_OOSTORE */
48 44
49#define smp_read_barrier_depends() read_barrier_depends() 45#define smp_read_barrier_depends() read_barrier_depends()
50#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) 46#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index b0f6c4a2a119..c40fb2e81bbc 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -71,6 +71,17 @@ static int acpi_sleep_prepare(u32 acpi_state)
71 return 0; 71 return 0;
72} 72}
73 73
74static bool acpi_sleep_state_supported(u8 sleep_state)
75{
76 acpi_status status;
77 u8 type_a, type_b;
78
79 status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b);
80 return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware
81 || (acpi_gbl_FADT.sleep_control.address
82 && acpi_gbl_FADT.sleep_status.address));
83}
84
74#ifdef CONFIG_ACPI_SLEEP 85#ifdef CONFIG_ACPI_SLEEP
75static u32 acpi_target_sleep_state = ACPI_STATE_S0; 86static u32 acpi_target_sleep_state = ACPI_STATE_S0;
76 87
@@ -604,15 +615,9 @@ static void acpi_sleep_suspend_setup(void)
604{ 615{
605 int i; 616 int i;
606 617
607 for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) { 618 for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++)
608 acpi_status status; 619 if (acpi_sleep_state_supported(i))
609 u8 type_a, type_b;
610
611 status = acpi_get_sleep_type_data(i, &type_a, &type_b);
612 if (ACPI_SUCCESS(status)) {
613 sleep_states[i] = 1; 620 sleep_states[i] = 1;
614 }
615 }
616 621
617 suspend_set_ops(old_suspend_ordering ? 622 suspend_set_ops(old_suspend_ordering ?
618 &acpi_suspend_ops_old : &acpi_suspend_ops); 623 &acpi_suspend_ops_old : &acpi_suspend_ops);
@@ -740,11 +745,7 @@ static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
740 745
741static void acpi_sleep_hibernate_setup(void) 746static void acpi_sleep_hibernate_setup(void)
742{ 747{
743 acpi_status status; 748 if (!acpi_sleep_state_supported(ACPI_STATE_S4))
744 u8 type_a, type_b;
745
746 status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b);
747 if (ACPI_FAILURE(status))
748 return; 749 return;
749 750
750 hibernation_set_ops(old_suspend_ordering ? 751 hibernation_set_ops(old_suspend_ordering ?
@@ -793,8 +794,6 @@ static void acpi_power_off(void)
793 794
794int __init acpi_sleep_init(void) 795int __init acpi_sleep_init(void)
795{ 796{
796 acpi_status status;
797 u8 type_a, type_b;
798 char supported[ACPI_S_STATE_COUNT * 3 + 1]; 797 char supported[ACPI_S_STATE_COUNT * 3 + 1];
799 char *pos = supported; 798 char *pos = supported;
800 int i; 799 int i;
@@ -806,13 +805,7 @@ int __init acpi_sleep_init(void)
806 acpi_sleep_suspend_setup(); 805 acpi_sleep_suspend_setup();
807 acpi_sleep_hibernate_setup(); 806 acpi_sleep_hibernate_setup();
808 807
809 status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b); 808 if (acpi_sleep_state_supported(ACPI_STATE_S5)) {
810 /*
811 * Check both ACPI S5 object and ACPI sleep registers to
812 * install pm_power_off_prepare/pm_power_off hook
813 */
814 if (ACPI_SUCCESS(status) && acpi_gbl_FADT.sleep_control.address
815 && acpi_gbl_FADT.sleep_status.address) {
816 sleep_states[ACPI_STATE_S5] = 1; 809 sleep_states[ACPI_STATE_S5] = 1;
817 pm_power_off_prepare = acpi_power_off_prepare; 810 pm_power_off_prepare = acpi_power_off_prepare;
818 pm_power_off = acpi_power_off; 811 pm_power_off = acpi_power_off;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 1a3dbd1b196e..8cb2522d592a 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4175,6 +4175,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4175 4175
4176 /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ 4176 /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
4177 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4177 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4178 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4178 4179
4179 /* Blacklist entries taken from Silicon Image 3124/3132 4180 /* Blacklist entries taken from Silicon Image 3124/3132
4180 Windows driver .inf file - also several Linux problem reports */ 4181 Windows driver .inf file - also several Linux problem reports */
@@ -4224,7 +4225,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4224 4225
4225 /* devices that don't properly handle queued TRIM commands */ 4226 /* devices that don't properly handle queued TRIM commands */
4226 { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, 4227 { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4227 { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, 4228 { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4228 4229
4229 /* 4230 /*
4230 * Some WD SATA-I drives spin up and down erratically when the link 4231 * Some WD SATA-I drives spin up and down erratically when the link
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index cf485d928903..199b52b7c3e1 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1129,7 +1129,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1129 per_cpu(cpufreq_cpu_data, j) = policy; 1129 per_cpu(cpufreq_cpu_data, j) = policy;
1130 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1130 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1131 1131
1132 if (cpufreq_driver->get) { 1132 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1133 policy->cur = cpufreq_driver->get(policy->cpu); 1133 policy->cur = cpufreq_driver->get(policy->cpu);
1134 if (!policy->cur) { 1134 if (!policy->cur) {
1135 pr_err("%s: ->get() failed\n", __func__); 1135 pr_err("%s: ->get() failed\n", __func__);
@@ -2143,7 +2143,7 @@ int cpufreq_update_policy(unsigned int cpu)
2143 * BIOS might change freq behind our back 2143 * BIOS might change freq behind our back
2144 * -> ask driver for current freq and notify governors about a change 2144 * -> ask driver for current freq and notify governors about a change
2145 */ 2145 */
2146 if (cpufreq_driver->get) { 2146 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2147 new_policy.cur = cpufreq_driver->get(cpu); 2147 new_policy.cur = cpufreq_driver->get(cpu);
2148 if (!policy->cur) { 2148 if (!policy->cur) {
2149 pr_debug("Driver did not initialize current freq"); 2149 pr_debug("Driver did not initialize current freq");
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index de4aa409abe2..2c6d5e118ac1 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -916,7 +916,7 @@ static int lookup_existing_device(struct device *dev, void *data)
916 old->config_rom_retries = 0; 916 old->config_rom_retries = 0;
917 fw_notice(card, "rediscovered device %s\n", dev_name(dev)); 917 fw_notice(card, "rediscovered device %s\n", dev_name(dev));
918 918
919 PREPARE_DELAYED_WORK(&old->work, fw_device_update); 919 old->workfn = fw_device_update;
920 fw_schedule_device_work(old, 0); 920 fw_schedule_device_work(old, 0);
921 921
922 if (current_node == card->root_node) 922 if (current_node == card->root_node)
@@ -1075,7 +1075,7 @@ static void fw_device_init(struct work_struct *work)
1075 if (atomic_cmpxchg(&device->state, 1075 if (atomic_cmpxchg(&device->state,
1076 FW_DEVICE_INITIALIZING, 1076 FW_DEVICE_INITIALIZING,
1077 FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { 1077 FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
1078 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); 1078 device->workfn = fw_device_shutdown;
1079 fw_schedule_device_work(device, SHUTDOWN_DELAY); 1079 fw_schedule_device_work(device, SHUTDOWN_DELAY);
1080 } else { 1080 } else {
1081 fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n", 1081 fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n",
@@ -1196,13 +1196,20 @@ static void fw_device_refresh(struct work_struct *work)
1196 dev_name(&device->device), fw_rcode_string(ret)); 1196 dev_name(&device->device), fw_rcode_string(ret));
1197 gone: 1197 gone:
1198 atomic_set(&device->state, FW_DEVICE_GONE); 1198 atomic_set(&device->state, FW_DEVICE_GONE);
1199 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); 1199 device->workfn = fw_device_shutdown;
1200 fw_schedule_device_work(device, SHUTDOWN_DELAY); 1200 fw_schedule_device_work(device, SHUTDOWN_DELAY);
1201 out: 1201 out:
1202 if (node_id == card->root_node->node_id) 1202 if (node_id == card->root_node->node_id)
1203 fw_schedule_bm_work(card, 0); 1203 fw_schedule_bm_work(card, 0);
1204} 1204}
1205 1205
1206static void fw_device_workfn(struct work_struct *work)
1207{
1208 struct fw_device *device = container_of(to_delayed_work(work),
1209 struct fw_device, work);
1210 device->workfn(work);
1211}
1212
1206void fw_node_event(struct fw_card *card, struct fw_node *node, int event) 1213void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1207{ 1214{
1208 struct fw_device *device; 1215 struct fw_device *device;
@@ -1252,7 +1259,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1252 * power-up after getting plugged in. We schedule the 1259 * power-up after getting plugged in. We schedule the
1253 * first config rom scan half a second after bus reset. 1260 * first config rom scan half a second after bus reset.
1254 */ 1261 */
1255 INIT_DELAYED_WORK(&device->work, fw_device_init); 1262 device->workfn = fw_device_init;
1263 INIT_DELAYED_WORK(&device->work, fw_device_workfn);
1256 fw_schedule_device_work(device, INITIAL_DELAY); 1264 fw_schedule_device_work(device, INITIAL_DELAY);
1257 break; 1265 break;
1258 1266
@@ -1268,7 +1276,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1268 if (atomic_cmpxchg(&device->state, 1276 if (atomic_cmpxchg(&device->state,
1269 FW_DEVICE_RUNNING, 1277 FW_DEVICE_RUNNING,
1270 FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) { 1278 FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
1271 PREPARE_DELAYED_WORK(&device->work, fw_device_refresh); 1279 device->workfn = fw_device_refresh;
1272 fw_schedule_device_work(device, 1280 fw_schedule_device_work(device,
1273 device->is_local ? 0 : INITIAL_DELAY); 1281 device->is_local ? 0 : INITIAL_DELAY);
1274 } 1282 }
@@ -1283,7 +1291,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1283 smp_wmb(); /* update node_id before generation */ 1291 smp_wmb(); /* update node_id before generation */
1284 device->generation = card->generation; 1292 device->generation = card->generation;
1285 if (atomic_read(&device->state) == FW_DEVICE_RUNNING) { 1293 if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
1286 PREPARE_DELAYED_WORK(&device->work, fw_device_update); 1294 device->workfn = fw_device_update;
1287 fw_schedule_device_work(device, 0); 1295 fw_schedule_device_work(device, 0);
1288 } 1296 }
1289 break; 1297 break;
@@ -1308,7 +1316,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1308 device = node->data; 1316 device = node->data;
1309 if (atomic_xchg(&device->state, 1317 if (atomic_xchg(&device->state,
1310 FW_DEVICE_GONE) == FW_DEVICE_RUNNING) { 1318 FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
1311 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); 1319 device->workfn = fw_device_shutdown;
1312 fw_schedule_device_work(device, 1320 fw_schedule_device_work(device,
1313 list_empty(&card->link) ? 0 : SHUTDOWN_DELAY); 1321 list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
1314 } 1322 }
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 281029daf98c..7aef911fdc71 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -146,6 +146,7 @@ struct sbp2_logical_unit {
146 */ 146 */
147 int generation; 147 int generation;
148 int retries; 148 int retries;
149 work_func_t workfn;
149 struct delayed_work work; 150 struct delayed_work work;
150 bool has_sdev; 151 bool has_sdev;
151 bool blocked; 152 bool blocked;
@@ -864,7 +865,7 @@ static void sbp2_login(struct work_struct *work)
864 /* set appropriate retry limit(s) in BUSY_TIMEOUT register */ 865 /* set appropriate retry limit(s) in BUSY_TIMEOUT register */
865 sbp2_set_busy_timeout(lu); 866 sbp2_set_busy_timeout(lu);
866 867
867 PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect); 868 lu->workfn = sbp2_reconnect;
868 sbp2_agent_reset(lu); 869 sbp2_agent_reset(lu);
869 870
870 /* This was a re-login. */ 871 /* This was a re-login. */
@@ -918,7 +919,7 @@ static void sbp2_login(struct work_struct *work)
918 * If a bus reset happened, sbp2_update will have requeued 919 * If a bus reset happened, sbp2_update will have requeued
919 * lu->work already. Reset the work from reconnect to login. 920 * lu->work already. Reset the work from reconnect to login.
920 */ 921 */
921 PREPARE_DELAYED_WORK(&lu->work, sbp2_login); 922 lu->workfn = sbp2_login;
922} 923}
923 924
924static void sbp2_reconnect(struct work_struct *work) 925static void sbp2_reconnect(struct work_struct *work)
@@ -952,7 +953,7 @@ static void sbp2_reconnect(struct work_struct *work)
952 lu->retries++ >= 5) { 953 lu->retries++ >= 5) {
953 dev_err(tgt_dev(tgt), "failed to reconnect\n"); 954 dev_err(tgt_dev(tgt), "failed to reconnect\n");
954 lu->retries = 0; 955 lu->retries = 0;
955 PREPARE_DELAYED_WORK(&lu->work, sbp2_login); 956 lu->workfn = sbp2_login;
956 } 957 }
957 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); 958 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
958 959
@@ -972,6 +973,13 @@ static void sbp2_reconnect(struct work_struct *work)
972 sbp2_conditionally_unblock(lu); 973 sbp2_conditionally_unblock(lu);
973} 974}
974 975
976static void sbp2_lu_workfn(struct work_struct *work)
977{
978 struct sbp2_logical_unit *lu = container_of(to_delayed_work(work),
979 struct sbp2_logical_unit, work);
980 lu->workfn(work);
981}
982
975static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) 983static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
976{ 984{
977 struct sbp2_logical_unit *lu; 985 struct sbp2_logical_unit *lu;
@@ -998,7 +1006,8 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
998 lu->blocked = false; 1006 lu->blocked = false;
999 ++tgt->dont_block; 1007 ++tgt->dont_block;
1000 INIT_LIST_HEAD(&lu->orb_list); 1008 INIT_LIST_HEAD(&lu->orb_list);
1001 INIT_DELAYED_WORK(&lu->work, sbp2_login); 1009 lu->workfn = sbp2_login;
1010 INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn);
1002 1011
1003 list_add_tail(&lu->link, &tgt->lu_list); 1012 list_add_tail(&lu->link, &tgt->lu_list);
1004 return 0; 1013 return 0;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index e22be8458d92..bbb17841a9e5 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -4134,8 +4134,11 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
4134{ 4134{
4135 if (enable) 4135 if (enable)
4136 WREG32(CP_MEC_CNTL, 0); 4136 WREG32(CP_MEC_CNTL, 0);
4137 else 4137 else {
4138 WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT)); 4138 WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
4139 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
4140 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
4141 }
4139 udelay(50); 4142 udelay(50);
4140} 4143}
4141 4144
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 1ecb3f1070e3..94626ea90fa5 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -264,6 +264,8 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev)
264 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); 264 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
265 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0); 265 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
266 } 266 }
267 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
268 rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
267} 269}
268 270
269/** 271/**
@@ -291,6 +293,11 @@ void cik_sdma_enable(struct radeon_device *rdev, bool enable)
291 u32 me_cntl, reg_offset; 293 u32 me_cntl, reg_offset;
292 int i; 294 int i;
293 295
296 if (enable == false) {
297 cik_sdma_gfx_stop(rdev);
298 cik_sdma_rlc_stop(rdev);
299 }
300
294 for (i = 0; i < 2; i++) { 301 for (i = 0; i < 2; i++) {
295 if (i == 0) 302 if (i == 0)
296 reg_offset = SDMA0_REGISTER_OFFSET; 303 reg_offset = SDMA0_REGISTER_OFFSET;
@@ -420,10 +427,6 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev)
420 if (!rdev->sdma_fw) 427 if (!rdev->sdma_fw)
421 return -EINVAL; 428 return -EINVAL;
422 429
423 /* stop the gfx rings and rlc compute queues */
424 cik_sdma_gfx_stop(rdev);
425 cik_sdma_rlc_stop(rdev);
426
427 /* halt the MEs */ 430 /* halt the MEs */
428 cik_sdma_enable(rdev, false); 431 cik_sdma_enable(rdev, false);
429 432
@@ -492,9 +495,6 @@ int cik_sdma_resume(struct radeon_device *rdev)
492 */ 495 */
493void cik_sdma_fini(struct radeon_device *rdev) 496void cik_sdma_fini(struct radeon_device *rdev)
494{ 497{
495 /* stop the gfx rings and rlc compute queues */
496 cik_sdma_gfx_stop(rdev);
497 cik_sdma_rlc_stop(rdev);
498 /* halt the MEs */ 498 /* halt the MEs */
499 cik_sdma_enable(rdev, false); 499 cik_sdma_enable(rdev, false);
500 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); 500 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 2aecd6dc2610..66ed3ea71440 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -33,6 +33,13 @@
33#include <linux/vga_switcheroo.h> 33#include <linux/vga_switcheroo.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/pm_runtime.h> 35#include <linux/pm_runtime.h>
36
37#if defined(CONFIG_VGA_SWITCHEROO)
38bool radeon_is_px(void);
39#else
40static inline bool radeon_is_px(void) { return false; }
41#endif
42
36/** 43/**
37 * radeon_driver_unload_kms - Main unload function for KMS. 44 * radeon_driver_unload_kms - Main unload function for KMS.
38 * 45 *
@@ -130,7 +137,8 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
130 "Error during ACPI methods call\n"); 137 "Error during ACPI methods call\n");
131 } 138 }
132 139
133 if (radeon_runtime_pm != 0) { 140 if ((radeon_runtime_pm == 1) ||
141 ((radeon_runtime_pm == -1) && radeon_is_px())) {
134 pm_runtime_use_autosuspend(dev->dev); 142 pm_runtime_use_autosuspend(dev->dev);
135 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 143 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
136 pm_runtime_set_active(dev->dev); 144 pm_runtime_set_active(dev->dev);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a06651309388..214b7992a3aa 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -351,9 +351,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
351 351
352moved: 352moved:
353 if (bo->evicted) { 353 if (bo->evicted) {
354 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); 354 if (bdev->driver->invalidate_caches) {
355 if (ret) 355 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
356 pr_err("Can not flush read caches\n"); 356 if (ret)
357 pr_err("Can not flush read caches\n");
358 }
357 bo->evicted = false; 359 bo->evicted = false;
358 } 360 }
359 361
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 801231c9ae48..0ce48e5a9cb4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -339,11 +339,13 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
339 vma->vm_private_data = bo; 339 vma->vm_private_data = bo;
340 340
341 /* 341 /*
342 * PFNMAP is faster than MIXEDMAP due to reduced page 342 * We'd like to use VM_PFNMAP on shared mappings, where
343 * administration. So use MIXEDMAP only if private VMA, where 343 * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
344 * we need to support COW. 344 * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
345 * bad for performance. Until that has been sorted out, use
346 * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
345 */ 347 */
346 vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP; 348 vma->vm_flags |= VM_MIXEDMAP;
347 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; 349 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
348 return 0; 350 return 0;
349out_unref: 351out_unref:
@@ -359,7 +361,7 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
359 361
360 vma->vm_ops = &ttm_bo_vm_ops; 362 vma->vm_ops = &ttm_bo_vm_ops;
361 vma->vm_private_data = ttm_bo_reference(bo); 363 vma->vm_private_data = ttm_bo_reference(bo);
362 vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP; 364 vma->vm_flags |= VM_MIXEDMAP;
363 vma->vm_flags |= VM_IO | VM_DONTEXPAND; 365 vma->vm_flags |= VM_IO | VM_DONTEXPAND;
364 return 0; 366 return 0;
365} 367}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 82468d902915..e7af580ab977 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -830,6 +830,24 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
830 if (unlikely(ret != 0)) 830 if (unlikely(ret != 0))
831 goto out_unlock; 831 goto out_unlock;
832 832
833 /*
834 * A gb-aware client referencing a shared surface will
835 * expect a backup buffer to be present.
836 */
837 if (dev_priv->has_mob && req->shareable) {
838 uint32_t backup_handle;
839
840 ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
841 res->backup_size,
842 true,
843 &backup_handle,
844 &res->backup);
845 if (unlikely(ret != 0)) {
846 vmw_resource_unreference(&res);
847 goto out_unlock;
848 }
849 }
850
833 tmp = vmw_resource_reference(&srf->res); 851 tmp = vmw_resource_reference(&srf->res);
834 ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, 852 ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
835 req->shareable, VMW_RES_SURFACE, 853 req->shareable, VMW_RES_SURFACE,
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index befe0e336471..24883b4d1a49 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -43,6 +43,7 @@
43#define G25_REV_MIN 0x22 43#define G25_REV_MIN 0x22
44#define G27_REV_MAJ 0x12 44#define G27_REV_MAJ 0x12
45#define G27_REV_MIN 0x38 45#define G27_REV_MIN 0x38
46#define G27_2_REV_MIN 0x39
46 47
47#define to_hid_device(pdev) container_of(pdev, struct hid_device, dev) 48#define to_hid_device(pdev) container_of(pdev, struct hid_device, dev)
48 49
@@ -130,6 +131,7 @@ static const struct lg4ff_usb_revision lg4ff_revs[] = {
130 {DFP_REV_MAJ, DFP_REV_MIN, &native_dfp}, /* Driving Force Pro */ 131 {DFP_REV_MAJ, DFP_REV_MIN, &native_dfp}, /* Driving Force Pro */
131 {G25_REV_MAJ, G25_REV_MIN, &native_g25}, /* G25 */ 132 {G25_REV_MAJ, G25_REV_MIN, &native_g25}, /* G25 */
132 {G27_REV_MAJ, G27_REV_MIN, &native_g27}, /* G27 */ 133 {G27_REV_MAJ, G27_REV_MIN, &native_g27}, /* G27 */
134 {G27_REV_MAJ, G27_2_REV_MIN, &native_g27}, /* G27 v2 */
133}; 135};
134 136
135/* Recalculates X axis value accordingly to currently selected range */ 137/* Recalculates X axis value accordingly to currently selected range */
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 12354055d474..2f19b15f47f2 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -42,6 +42,7 @@
42#define DUALSHOCK4_CONTROLLER_BT BIT(6) 42#define DUALSHOCK4_CONTROLLER_BT BIT(6)
43 43
44#define SONY_LED_SUPPORT (SIXAXIS_CONTROLLER_USB | BUZZ_CONTROLLER | DUALSHOCK4_CONTROLLER_USB) 44#define SONY_LED_SUPPORT (SIXAXIS_CONTROLLER_USB | BUZZ_CONTROLLER | DUALSHOCK4_CONTROLLER_USB)
45#define SONY_FF_SUPPORT (SIXAXIS_CONTROLLER_USB | DUALSHOCK4_CONTROLLER_USB)
45 46
46#define MAX_LEDS 4 47#define MAX_LEDS 4
47 48
@@ -499,6 +500,7 @@ struct sony_sc {
499 __u8 right; 500 __u8 right;
500#endif 501#endif
501 502
503 __u8 worker_initialized;
502 __u8 led_state[MAX_LEDS]; 504 __u8 led_state[MAX_LEDS];
503 __u8 led_count; 505 __u8 led_count;
504}; 506};
@@ -993,22 +995,11 @@ static int sony_init_ff(struct hid_device *hdev)
993 return input_ff_create_memless(input_dev, NULL, sony_play_effect); 995 return input_ff_create_memless(input_dev, NULL, sony_play_effect);
994} 996}
995 997
996static void sony_destroy_ff(struct hid_device *hdev)
997{
998 struct sony_sc *sc = hid_get_drvdata(hdev);
999
1000 cancel_work_sync(&sc->state_worker);
1001}
1002
1003#else 998#else
1004static int sony_init_ff(struct hid_device *hdev) 999static int sony_init_ff(struct hid_device *hdev)
1005{ 1000{
1006 return 0; 1001 return 0;
1007} 1002}
1008
1009static void sony_destroy_ff(struct hid_device *hdev)
1010{
1011}
1012#endif 1003#endif
1013 1004
1014static int sony_set_output_report(struct sony_sc *sc, int req_id, int req_size) 1005static int sony_set_output_report(struct sony_sc *sc, int req_id, int req_size)
@@ -1077,6 +1068,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
1077 if (sc->quirks & SIXAXIS_CONTROLLER_USB) { 1068 if (sc->quirks & SIXAXIS_CONTROLLER_USB) {
1078 hdev->hid_output_raw_report = sixaxis_usb_output_raw_report; 1069 hdev->hid_output_raw_report = sixaxis_usb_output_raw_report;
1079 ret = sixaxis_set_operational_usb(hdev); 1070 ret = sixaxis_set_operational_usb(hdev);
1071
1072 sc->worker_initialized = 1;
1080 INIT_WORK(&sc->state_worker, sixaxis_state_worker); 1073 INIT_WORK(&sc->state_worker, sixaxis_state_worker);
1081 } 1074 }
1082 else if (sc->quirks & SIXAXIS_CONTROLLER_BT) 1075 else if (sc->quirks & SIXAXIS_CONTROLLER_BT)
@@ -1087,6 +1080,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
1087 if (ret < 0) 1080 if (ret < 0)
1088 goto err_stop; 1081 goto err_stop;
1089 1082
1083 sc->worker_initialized = 1;
1090 INIT_WORK(&sc->state_worker, dualshock4_state_worker); 1084 INIT_WORK(&sc->state_worker, dualshock4_state_worker);
1091 } else { 1085 } else {
1092 ret = 0; 1086 ret = 0;
@@ -1101,9 +1095,11 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
1101 goto err_stop; 1095 goto err_stop;
1102 } 1096 }
1103 1097
1104 ret = sony_init_ff(hdev); 1098 if (sc->quirks & SONY_FF_SUPPORT) {
1105 if (ret < 0) 1099 ret = sony_init_ff(hdev);
1106 goto err_stop; 1100 if (ret < 0)
1101 goto err_stop;
1102 }
1107 1103
1108 return 0; 1104 return 0;
1109err_stop: 1105err_stop:
@@ -1120,7 +1116,8 @@ static void sony_remove(struct hid_device *hdev)
1120 if (sc->quirks & SONY_LED_SUPPORT) 1116 if (sc->quirks & SONY_LED_SUPPORT)
1121 sony_leds_remove(hdev); 1117 sony_leds_remove(hdev);
1122 1118
1123 sony_destroy_ff(hdev); 1119 if (sc->worker_initialized)
1120 cancel_work_sync(&sc->state_worker);
1124 1121
1125 hid_hw_stop(hdev); 1122 hid_hw_stop(hdev);
1126} 1123}
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index cb0137b3718d..ab24ce2eb28f 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -320,13 +320,13 @@ static void drop_ref(struct hidraw *hidraw, int exists_bit)
320 hid_hw_close(hidraw->hid); 320 hid_hw_close(hidraw->hid);
321 wake_up_interruptible(&hidraw->wait); 321 wake_up_interruptible(&hidraw->wait);
322 } 322 }
323 device_destroy(hidraw_class,
324 MKDEV(hidraw_major, hidraw->minor));
323 } else { 325 } else {
324 --hidraw->open; 326 --hidraw->open;
325 } 327 }
326 if (!hidraw->open) { 328 if (!hidraw->open) {
327 if (!hidraw->exist) { 329 if (!hidraw->exist) {
328 device_destroy(hidraw_class,
329 MKDEV(hidraw_major, hidraw->minor));
330 hidraw_table[hidraw->minor] = NULL; 330 hidraw_table[hidraw->minor] = NULL;
331 kfree(hidraw); 331 kfree(hidraw);
332 } else { 332 } else {
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index f5ed03164d86..de17c5593d97 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -387,7 +387,7 @@ config I2C_CBUS_GPIO
387 387
388config I2C_CPM 388config I2C_CPM
389 tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)" 389 tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)"
390 depends on (CPM1 || CPM2) && OF_I2C 390 depends on CPM1 || CPM2
391 help 391 help
392 This supports the use of the I2C interface on Freescale 392 This supports the use of the I2C interface on Freescale
393 processors with CPM1 or CPM2. 393 processors with CPM1 or CPM2.
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index d18d08a076e8..8ee228e9ab5a 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -492,12 +492,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
492 isert_conn->state = ISER_CONN_INIT; 492 isert_conn->state = ISER_CONN_INIT;
493 INIT_LIST_HEAD(&isert_conn->conn_accept_node); 493 INIT_LIST_HEAD(&isert_conn->conn_accept_node);
494 init_completion(&isert_conn->conn_login_comp); 494 init_completion(&isert_conn->conn_login_comp);
495 init_waitqueue_head(&isert_conn->conn_wait); 495 init_completion(&isert_conn->conn_wait);
496 init_waitqueue_head(&isert_conn->conn_wait_comp_err); 496 init_completion(&isert_conn->conn_wait_comp_err);
497 kref_init(&isert_conn->conn_kref); 497 kref_init(&isert_conn->conn_kref);
498 kref_get(&isert_conn->conn_kref); 498 kref_get(&isert_conn->conn_kref);
499 mutex_init(&isert_conn->conn_mutex); 499 mutex_init(&isert_conn->conn_mutex);
500 mutex_init(&isert_conn->conn_comp_mutex);
501 spin_lock_init(&isert_conn->conn_lock); 500 spin_lock_init(&isert_conn->conn_lock);
502 501
503 cma_id->context = isert_conn; 502 cma_id->context = isert_conn;
@@ -688,11 +687,11 @@ isert_disconnect_work(struct work_struct *work)
688 687
689 pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 688 pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
690 mutex_lock(&isert_conn->conn_mutex); 689 mutex_lock(&isert_conn->conn_mutex);
691 isert_conn->state = ISER_CONN_DOWN; 690 if (isert_conn->state == ISER_CONN_UP)
691 isert_conn->state = ISER_CONN_TERMINATING;
692 692
693 if (isert_conn->post_recv_buf_count == 0 && 693 if (isert_conn->post_recv_buf_count == 0 &&
694 atomic_read(&isert_conn->post_send_buf_count) == 0) { 694 atomic_read(&isert_conn->post_send_buf_count) == 0) {
695 pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
696 mutex_unlock(&isert_conn->conn_mutex); 695 mutex_unlock(&isert_conn->conn_mutex);
697 goto wake_up; 696 goto wake_up;
698 } 697 }
@@ -712,7 +711,7 @@ isert_disconnect_work(struct work_struct *work)
712 mutex_unlock(&isert_conn->conn_mutex); 711 mutex_unlock(&isert_conn->conn_mutex);
713 712
714wake_up: 713wake_up:
715 wake_up(&isert_conn->conn_wait); 714 complete(&isert_conn->conn_wait);
716 isert_put_conn(isert_conn); 715 isert_put_conn(isert_conn);
717} 716}
718 717
@@ -888,16 +887,17 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
888 * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED 887 * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
889 * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls. 888 * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
890 */ 889 */
891 mutex_lock(&isert_conn->conn_comp_mutex); 890 mutex_lock(&isert_conn->conn_mutex);
892 if (coalesce && 891 if (coalesce && isert_conn->state == ISER_CONN_UP &&
893 ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) { 892 ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
893 tx_desc->llnode_active = true;
894 llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist); 894 llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
895 mutex_unlock(&isert_conn->conn_comp_mutex); 895 mutex_unlock(&isert_conn->conn_mutex);
896 return; 896 return;
897 } 897 }
898 isert_conn->conn_comp_batch = 0; 898 isert_conn->conn_comp_batch = 0;
899 tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist); 899 tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
900 mutex_unlock(&isert_conn->conn_comp_mutex); 900 mutex_unlock(&isert_conn->conn_mutex);
901 901
902 send_wr->send_flags = IB_SEND_SIGNALED; 902 send_wr->send_flags = IB_SEND_SIGNALED;
903} 903}
@@ -1464,7 +1464,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1464 case ISCSI_OP_SCSI_CMD: 1464 case ISCSI_OP_SCSI_CMD:
1465 spin_lock_bh(&conn->cmd_lock); 1465 spin_lock_bh(&conn->cmd_lock);
1466 if (!list_empty(&cmd->i_conn_node)) 1466 if (!list_empty(&cmd->i_conn_node))
1467 list_del(&cmd->i_conn_node); 1467 list_del_init(&cmd->i_conn_node);
1468 spin_unlock_bh(&conn->cmd_lock); 1468 spin_unlock_bh(&conn->cmd_lock);
1469 1469
1470 if (cmd->data_direction == DMA_TO_DEVICE) 1470 if (cmd->data_direction == DMA_TO_DEVICE)
@@ -1476,7 +1476,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1476 case ISCSI_OP_SCSI_TMFUNC: 1476 case ISCSI_OP_SCSI_TMFUNC:
1477 spin_lock_bh(&conn->cmd_lock); 1477 spin_lock_bh(&conn->cmd_lock);
1478 if (!list_empty(&cmd->i_conn_node)) 1478 if (!list_empty(&cmd->i_conn_node))
1479 list_del(&cmd->i_conn_node); 1479 list_del_init(&cmd->i_conn_node);
1480 spin_unlock_bh(&conn->cmd_lock); 1480 spin_unlock_bh(&conn->cmd_lock);
1481 1481
1482 transport_generic_free_cmd(&cmd->se_cmd, 0); 1482 transport_generic_free_cmd(&cmd->se_cmd, 0);
@@ -1486,7 +1486,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1486 case ISCSI_OP_TEXT: 1486 case ISCSI_OP_TEXT:
1487 spin_lock_bh(&conn->cmd_lock); 1487 spin_lock_bh(&conn->cmd_lock);
1488 if (!list_empty(&cmd->i_conn_node)) 1488 if (!list_empty(&cmd->i_conn_node))
1489 list_del(&cmd->i_conn_node); 1489 list_del_init(&cmd->i_conn_node);
1490 spin_unlock_bh(&conn->cmd_lock); 1490 spin_unlock_bh(&conn->cmd_lock);
1491 1491
1492 /* 1492 /*
@@ -1549,6 +1549,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1549 iscsit_stop_dataout_timer(cmd); 1549 iscsit_stop_dataout_timer(cmd);
1550 device->unreg_rdma_mem(isert_cmd, isert_conn); 1550 device->unreg_rdma_mem(isert_cmd, isert_conn);
1551 cmd->write_data_done = wr->cur_rdma_length; 1551 cmd->write_data_done = wr->cur_rdma_length;
1552 wr->send_wr_num = 0;
1552 1553
1553 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1554 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1554 spin_lock_bh(&cmd->istate_lock); 1555 spin_lock_bh(&cmd->istate_lock);
@@ -1589,7 +1590,7 @@ isert_do_control_comp(struct work_struct *work)
1589 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); 1590 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1590 /* 1591 /*
1591 * Call atomic_dec(&isert_conn->post_send_buf_count) 1592 * Call atomic_dec(&isert_conn->post_send_buf_count)
1592 * from isert_free_conn() 1593 * from isert_wait_conn()
1593 */ 1594 */
1594 isert_conn->logout_posted = true; 1595 isert_conn->logout_posted = true;
1595 iscsit_logout_post_handler(cmd, cmd->conn); 1596 iscsit_logout_post_handler(cmd, cmd->conn);
@@ -1613,6 +1614,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1613 struct ib_device *ib_dev) 1614 struct ib_device *ib_dev)
1614{ 1615{
1615 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1616 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1617 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1616 1618
1617 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || 1619 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1618 cmd->i_state == ISTATE_SEND_LOGOUTRSP || 1620 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
@@ -1624,7 +1626,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1624 queue_work(isert_comp_wq, &isert_cmd->comp_work); 1626 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1625 return; 1627 return;
1626 } 1628 }
1627 atomic_dec(&isert_conn->post_send_buf_count); 1629 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
1628 1630
1629 cmd->i_state = ISTATE_SENT_STATUS; 1631 cmd->i_state = ISTATE_SENT_STATUS;
1630 isert_completion_put(tx_desc, isert_cmd, ib_dev); 1632 isert_completion_put(tx_desc, isert_cmd, ib_dev);
@@ -1662,7 +1664,7 @@ __isert_send_completion(struct iser_tx_desc *tx_desc,
1662 case ISER_IB_RDMA_READ: 1664 case ISER_IB_RDMA_READ:
1663 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); 1665 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1664 1666
1665 atomic_dec(&isert_conn->post_send_buf_count); 1667 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1666 isert_completion_rdma_read(tx_desc, isert_cmd); 1668 isert_completion_rdma_read(tx_desc, isert_cmd);
1667 break; 1669 break;
1668 default: 1670 default:
@@ -1691,31 +1693,76 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
1691} 1693}
1692 1694
1693static void 1695static void
1694isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) 1696isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev)
1697{
1698 struct llist_node *llnode;
1699 struct isert_rdma_wr *wr;
1700 struct iser_tx_desc *t;
1701
1702 mutex_lock(&isert_conn->conn_mutex);
1703 llnode = llist_del_all(&isert_conn->conn_comp_llist);
1704 isert_conn->conn_comp_batch = 0;
1705 mutex_unlock(&isert_conn->conn_mutex);
1706
1707 while (llnode) {
1708 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
1709 llnode = llist_next(llnode);
1710 wr = &t->isert_cmd->rdma_wr;
1711
1712 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
1713 isert_completion_put(t, t->isert_cmd, ib_dev);
1714 }
1715}
1716
1717static void
1718isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
1695{ 1719{
1696 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1720 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1721 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1722 struct llist_node *llnode = tx_desc->comp_llnode_batch;
1723 struct isert_rdma_wr *wr;
1724 struct iser_tx_desc *t;
1697 1725
1698 if (tx_desc) { 1726 while (llnode) {
1699 struct isert_cmd *isert_cmd = tx_desc->isert_cmd; 1727 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
1728 llnode = llist_next(llnode);
1729 wr = &t->isert_cmd->rdma_wr;
1700 1730
1701 if (!isert_cmd) 1731 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
1702 isert_unmap_tx_desc(tx_desc, ib_dev); 1732 isert_completion_put(t, t->isert_cmd, ib_dev);
1703 else
1704 isert_completion_put(tx_desc, isert_cmd, ib_dev);
1705 } 1733 }
1734 tx_desc->comp_llnode_batch = NULL;
1706 1735
1707 if (isert_conn->post_recv_buf_count == 0 && 1736 if (!isert_cmd)
1708 atomic_read(&isert_conn->post_send_buf_count) == 0) { 1737 isert_unmap_tx_desc(tx_desc, ib_dev);
1709 pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 1738 else
1710 pr_debug("Calling wake_up from isert_cq_comp_err\n"); 1739 isert_completion_put(tx_desc, isert_cmd, ib_dev);
1740}
1711 1741
1712 mutex_lock(&isert_conn->conn_mutex); 1742static void
1713 if (isert_conn->state != ISER_CONN_DOWN) 1743isert_cq_rx_comp_err(struct isert_conn *isert_conn)
1714 isert_conn->state = ISER_CONN_TERMINATING; 1744{
1715 mutex_unlock(&isert_conn->conn_mutex); 1745 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1746 struct iscsi_conn *conn = isert_conn->conn;
1716 1747
1717 wake_up(&isert_conn->conn_wait_comp_err); 1748 if (isert_conn->post_recv_buf_count)
1749 return;
1750
1751 isert_cq_drain_comp_llist(isert_conn, ib_dev);
1752
1753 if (conn->sess) {
1754 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
1755 target_wait_for_sess_cmds(conn->sess->se_sess);
1718 } 1756 }
1757
1758 while (atomic_read(&isert_conn->post_send_buf_count))
1759 msleep(3000);
1760
1761 mutex_lock(&isert_conn->conn_mutex);
1762 isert_conn->state = ISER_CONN_DOWN;
1763 mutex_unlock(&isert_conn->conn_mutex);
1764
1765 complete(&isert_conn->conn_wait_comp_err);
1719} 1766}
1720 1767
1721static void 1768static void
@@ -1740,8 +1787,14 @@ isert_cq_tx_work(struct work_struct *work)
1740 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); 1787 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1741 pr_debug("TX wc.status: 0x%08x\n", wc.status); 1788 pr_debug("TX wc.status: 0x%08x\n", wc.status);
1742 pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err); 1789 pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
1743 atomic_dec(&isert_conn->post_send_buf_count); 1790
1744 isert_cq_comp_err(tx_desc, isert_conn); 1791 if (wc.wr_id != ISER_FASTREG_LI_WRID) {
1792 if (tx_desc->llnode_active)
1793 continue;
1794
1795 atomic_dec(&isert_conn->post_send_buf_count);
1796 isert_cq_tx_comp_err(tx_desc, isert_conn);
1797 }
1745 } 1798 }
1746 } 1799 }
1747 1800
@@ -1784,7 +1837,7 @@ isert_cq_rx_work(struct work_struct *work)
1784 wc.vendor_err); 1837 wc.vendor_err);
1785 } 1838 }
1786 isert_conn->post_recv_buf_count--; 1839 isert_conn->post_recv_buf_count--;
1787 isert_cq_comp_err(NULL, isert_conn); 1840 isert_cq_rx_comp_err(isert_conn);
1788 } 1841 }
1789 } 1842 }
1790 1843
@@ -2202,6 +2255,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2202 2255
2203 if (!fr_desc->valid) { 2256 if (!fr_desc->valid) {
2204 memset(&inv_wr, 0, sizeof(inv_wr)); 2257 memset(&inv_wr, 0, sizeof(inv_wr));
2258 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
2205 inv_wr.opcode = IB_WR_LOCAL_INV; 2259 inv_wr.opcode = IB_WR_LOCAL_INV;
2206 inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey; 2260 inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
2207 wr = &inv_wr; 2261 wr = &inv_wr;
@@ -2212,6 +2266,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2212 2266
2213 /* Prepare FASTREG WR */ 2267 /* Prepare FASTREG WR */
2214 memset(&fr_wr, 0, sizeof(fr_wr)); 2268 memset(&fr_wr, 0, sizeof(fr_wr));
2269 fr_wr.wr_id = ISER_FASTREG_LI_WRID;
2215 fr_wr.opcode = IB_WR_FAST_REG_MR; 2270 fr_wr.opcode = IB_WR_FAST_REG_MR;
2216 fr_wr.wr.fast_reg.iova_start = 2271 fr_wr.wr.fast_reg.iova_start =
2217 fr_desc->data_frpl->page_list[0] + page_off; 2272 fr_desc->data_frpl->page_list[0] + page_off;
@@ -2377,12 +2432,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2377 isert_init_send_wr(isert_conn, isert_cmd, 2432 isert_init_send_wr(isert_conn, isert_cmd,
2378 &isert_cmd->tx_desc.send_wr, true); 2433 &isert_cmd->tx_desc.send_wr, true);
2379 2434
2380 atomic_inc(&isert_conn->post_send_buf_count); 2435 atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
2381 2436
2382 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 2437 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2383 if (rc) { 2438 if (rc) {
2384 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); 2439 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2385 atomic_dec(&isert_conn->post_send_buf_count); 2440 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
2386 } 2441 }
2387 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n", 2442 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
2388 isert_cmd); 2443 isert_cmd);
@@ -2410,12 +2465,12 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2410 return rc; 2465 return rc;
2411 } 2466 }
2412 2467
2413 atomic_inc(&isert_conn->post_send_buf_count); 2468 atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
2414 2469
2415 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 2470 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2416 if (rc) { 2471 if (rc) {
2417 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); 2472 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2418 atomic_dec(&isert_conn->post_send_buf_count); 2473 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
2419 } 2474 }
2420 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", 2475 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2421 isert_cmd); 2476 isert_cmd);
@@ -2702,22 +2757,11 @@ isert_free_np(struct iscsi_np *np)
2702 kfree(isert_np); 2757 kfree(isert_np);
2703} 2758}
2704 2759
2705static int isert_check_state(struct isert_conn *isert_conn, int state) 2760static void isert_wait_conn(struct iscsi_conn *conn)
2706{
2707 int ret;
2708
2709 mutex_lock(&isert_conn->conn_mutex);
2710 ret = (isert_conn->state == state);
2711 mutex_unlock(&isert_conn->conn_mutex);
2712
2713 return ret;
2714}
2715
2716static void isert_free_conn(struct iscsi_conn *conn)
2717{ 2761{
2718 struct isert_conn *isert_conn = conn->context; 2762 struct isert_conn *isert_conn = conn->context;
2719 2763
2720 pr_debug("isert_free_conn: Starting \n"); 2764 pr_debug("isert_wait_conn: Starting \n");
2721 /* 2765 /*
2722 * Decrement post_send_buf_count for special case when called 2766 * Decrement post_send_buf_count for special case when called
2723 * from isert_do_control_comp() -> iscsit_logout_post_handler() 2767 * from isert_do_control_comp() -> iscsit_logout_post_handler()
@@ -2727,38 +2771,29 @@ static void isert_free_conn(struct iscsi_conn *conn)
2727 atomic_dec(&isert_conn->post_send_buf_count); 2771 atomic_dec(&isert_conn->post_send_buf_count);
2728 2772
2729 if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) { 2773 if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
2730 pr_debug("Calling rdma_disconnect from isert_free_conn\n"); 2774 pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
2731 rdma_disconnect(isert_conn->conn_cm_id); 2775 rdma_disconnect(isert_conn->conn_cm_id);
2732 } 2776 }
2733 /* 2777 /*
2734 * Only wait for conn_wait_comp_err if the isert_conn made it 2778 * Only wait for conn_wait_comp_err if the isert_conn made it
2735 * into full feature phase.. 2779 * into full feature phase..
2736 */ 2780 */
2737 if (isert_conn->state == ISER_CONN_UP) {
2738 pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
2739 isert_conn->state);
2740 mutex_unlock(&isert_conn->conn_mutex);
2741
2742 wait_event(isert_conn->conn_wait_comp_err,
2743 (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
2744
2745 wait_event(isert_conn->conn_wait,
2746 (isert_check_state(isert_conn, ISER_CONN_DOWN)));
2747
2748 isert_put_conn(isert_conn);
2749 return;
2750 }
2751 if (isert_conn->state == ISER_CONN_INIT) { 2781 if (isert_conn->state == ISER_CONN_INIT) {
2752 mutex_unlock(&isert_conn->conn_mutex); 2782 mutex_unlock(&isert_conn->conn_mutex);
2753 isert_put_conn(isert_conn);
2754 return; 2783 return;
2755 } 2784 }
2756 pr_debug("isert_free_conn: wait_event conn_wait %d\n", 2785 if (isert_conn->state == ISER_CONN_UP)
2757 isert_conn->state); 2786 isert_conn->state = ISER_CONN_TERMINATING;
2758 mutex_unlock(&isert_conn->conn_mutex); 2787 mutex_unlock(&isert_conn->conn_mutex);
2759 2788
2760 wait_event(isert_conn->conn_wait, 2789 wait_for_completion(&isert_conn->conn_wait_comp_err);
2761 (isert_check_state(isert_conn, ISER_CONN_DOWN))); 2790
2791 wait_for_completion(&isert_conn->conn_wait);
2792}
2793
2794static void isert_free_conn(struct iscsi_conn *conn)
2795{
2796 struct isert_conn *isert_conn = conn->context;
2762 2797
2763 isert_put_conn(isert_conn); 2798 isert_put_conn(isert_conn);
2764} 2799}
@@ -2771,6 +2806,7 @@ static struct iscsit_transport iser_target_transport = {
2771 .iscsit_setup_np = isert_setup_np, 2806 .iscsit_setup_np = isert_setup_np,
2772 .iscsit_accept_np = isert_accept_np, 2807 .iscsit_accept_np = isert_accept_np,
2773 .iscsit_free_np = isert_free_np, 2808 .iscsit_free_np = isert_free_np,
2809 .iscsit_wait_conn = isert_wait_conn,
2774 .iscsit_free_conn = isert_free_conn, 2810 .iscsit_free_conn = isert_free_conn,
2775 .iscsit_get_login_rx = isert_get_login_rx, 2811 .iscsit_get_login_rx = isert_get_login_rx,
2776 .iscsit_put_login_tx = isert_put_login_tx, 2812 .iscsit_put_login_tx = isert_put_login_tx,
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 708a069002f3..f6ae7f5dd408 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -6,6 +6,7 @@
6 6
7#define ISERT_RDMA_LISTEN_BACKLOG 10 7#define ISERT_RDMA_LISTEN_BACKLOG 10
8#define ISCSI_ISER_SG_TABLESIZE 256 8#define ISCSI_ISER_SG_TABLESIZE 256
9#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
9 10
10enum isert_desc_type { 11enum isert_desc_type {
11 ISCSI_TX_CONTROL, 12 ISCSI_TX_CONTROL,
@@ -45,6 +46,7 @@ struct iser_tx_desc {
45 struct isert_cmd *isert_cmd; 46 struct isert_cmd *isert_cmd;
46 struct llist_node *comp_llnode_batch; 47 struct llist_node *comp_llnode_batch;
47 struct llist_node comp_llnode; 48 struct llist_node comp_llnode;
49 bool llnode_active;
48 struct ib_send_wr send_wr; 50 struct ib_send_wr send_wr;
49} __packed; 51} __packed;
50 52
@@ -116,8 +118,8 @@ struct isert_conn {
116 struct isert_device *conn_device; 118 struct isert_device *conn_device;
117 struct work_struct conn_logout_work; 119 struct work_struct conn_logout_work;
118 struct mutex conn_mutex; 120 struct mutex conn_mutex;
119 wait_queue_head_t conn_wait; 121 struct completion conn_wait;
120 wait_queue_head_t conn_wait_comp_err; 122 struct completion conn_wait_comp_err;
121 struct kref conn_kref; 123 struct kref conn_kref;
122 struct list_head conn_fr_pool; 124 struct list_head conn_fr_pool;
123 int conn_fr_pool_size; 125 int conn_fr_pool_size;
@@ -126,7 +128,6 @@ struct isert_conn {
126#define ISERT_COMP_BATCH_COUNT 8 128#define ISERT_COMP_BATCH_COUNT 8
127 int conn_comp_batch; 129 int conn_comp_batch;
128 struct llist_head conn_comp_llist; 130 struct llist_head conn_comp_llist;
129 struct mutex conn_comp_mutex;
130}; 131};
131 132
132#define ISERT_MAX_CQ 64 133#define ISERT_MAX_CQ 64
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1af70145fab9..074b9c8e4cf0 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -979,12 +979,13 @@ static void issue_copy_real(struct dm_cache_migration *mg)
979 int r; 979 int r;
980 struct dm_io_region o_region, c_region; 980 struct dm_io_region o_region, c_region;
981 struct cache *cache = mg->cache; 981 struct cache *cache = mg->cache;
982 sector_t cblock = from_cblock(mg->cblock);
982 983
983 o_region.bdev = cache->origin_dev->bdev; 984 o_region.bdev = cache->origin_dev->bdev;
984 o_region.count = cache->sectors_per_block; 985 o_region.count = cache->sectors_per_block;
985 986
986 c_region.bdev = cache->cache_dev->bdev; 987 c_region.bdev = cache->cache_dev->bdev;
987 c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block; 988 c_region.sector = cblock * cache->sectors_per_block;
988 c_region.count = cache->sectors_per_block; 989 c_region.count = cache->sectors_per_block;
989 990
990 if (mg->writeback || mg->demote) { 991 if (mg->writeback || mg->demote) {
@@ -2464,20 +2465,18 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2464 bool discarded_block; 2465 bool discarded_block;
2465 struct dm_bio_prison_cell *cell; 2466 struct dm_bio_prison_cell *cell;
2466 struct policy_result lookup_result; 2467 struct policy_result lookup_result;
2467 struct per_bio_data *pb; 2468 struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
2468 2469
2469 if (from_oblock(block) > from_oblock(cache->origin_blocks)) { 2470 if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
2470 /* 2471 /*
2471 * This can only occur if the io goes to a partial block at 2472 * This can only occur if the io goes to a partial block at
2472 * the end of the origin device. We don't cache these. 2473 * the end of the origin device. We don't cache these.
2473 * Just remap to the origin and carry on. 2474 * Just remap to the origin and carry on.
2474 */ 2475 */
2475 remap_to_origin_clear_discard(cache, bio, block); 2476 remap_to_origin(cache, bio);
2476 return DM_MAPIO_REMAPPED; 2477 return DM_MAPIO_REMAPPED;
2477 } 2478 }
2478 2479
2479 pb = init_per_bio_data(bio, pb_data_size);
2480
2481 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { 2480 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
2482 defer_bio(cache, bio); 2481 defer_bio(cache, bio);
2483 return DM_MAPIO_SUBMITTED; 2482 return DM_MAPIO_SUBMITTED;
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index b9e2000969f0..95c894482fdd 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -240,7 +240,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
240 240
241 nid = cpu_to_node(cpu); 241 nid = cpu_to_node(cpu);
242 page = alloc_pages_exact_node(nid, 242 page = alloc_pages_exact_node(nid,
243 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 243 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
244 pg_order); 244 pg_order);
245 if (page == NULL) { 245 if (page == NULL) {
246 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " 246 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index a2c47476804d..e8f133e926aa 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -730,7 +730,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
730 client_info->ntt = 0; 730 client_info->ntt = 0;
731 } 731 }
732 732
733 if (!vlan_get_tag(skb, &client_info->vlan_id)) 733 if (vlan_get_tag(skb, &client_info->vlan_id))
734 client_info->vlan_id = 0; 734 client_info->vlan_id = 0;
735 735
736 if (!client_info->assigned) { 736 if (!client_info->assigned) {
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index c37878432717..298c26509095 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -121,6 +121,7 @@ static struct bond_opt_value bond_resend_igmp_tbl[] = {
121static struct bond_opt_value bond_lp_interval_tbl[] = { 121static struct bond_opt_value bond_lp_interval_tbl[] = {
122 { "minval", 1, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT}, 122 { "minval", 1, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT},
123 { "maxval", INT_MAX, BOND_VALFLAG_MAX}, 123 { "maxval", INT_MAX, BOND_VALFLAG_MAX},
124 { NULL, -1, 0},
124}; 125};
125 126
126static struct bond_option bond_opts[] = { 127static struct bond_option bond_opts[] = {
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index cda25ac45b47..6c9e1c9bdeb8 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -2507,6 +2507,7 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2507 2507
2508 bp->fw_wr_seq++; 2508 bp->fw_wr_seq++;
2509 msg_data |= bp->fw_wr_seq; 2509 msg_data |= bp->fw_wr_seq;
2510 bp->fw_last_msg = msg_data;
2510 2511
2511 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data); 2512 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2512 2513
@@ -4000,8 +4001,23 @@ bnx2_setup_wol(struct bnx2 *bp)
4000 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; 4001 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4001 } 4002 }
4002 4003
4003 if (!(bp->flags & BNX2_FLAG_NO_WOL)) 4004 if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4004 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0); 4005 u32 val;
4006
4007 wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4008 if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4009 bnx2_fw_sync(bp, wol_msg, 1, 0);
4010 return;
4011 }
4012 /* Tell firmware not to power down the PHY yet, otherwise
4013 * the chip will take a long time to respond to MMIO reads.
4014 */
4015 val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4016 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4017 val | BNX2_PORT_FEATURE_ASF_ENABLED);
4018 bnx2_fw_sync(bp, wol_msg, 1, 0);
4019 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4020 }
4005 4021
4006} 4022}
4007 4023
@@ -4033,9 +4049,22 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4033 4049
4034 if (bp->wol) 4050 if (bp->wol)
4035 pci_set_power_state(bp->pdev, PCI_D3hot); 4051 pci_set_power_state(bp->pdev, PCI_D3hot);
4036 } else { 4052 break;
4037 pci_set_power_state(bp->pdev, PCI_D3hot); 4053
4054 }
4055 if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4056 u32 val;
4057
4058 /* Tell firmware not to power down the PHY yet,
4059 * otherwise the other port may not respond to
4060 * MMIO reads.
4061 */
4062 val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4063 val &= ~BNX2_CONDITION_PM_STATE_MASK;
4064 val |= BNX2_CONDITION_PM_STATE_UNPREP;
4065 bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4038 } 4066 }
4067 pci_set_power_state(bp->pdev, PCI_D3hot);
4039 4068
4040 /* No more memory access after this point until 4069 /* No more memory access after this point until
4041 * device is brought back to D0. 4070 * device is brought back to D0.
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index f1cf2c44e7ed..e341bc366fa5 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6900,6 +6900,7 @@ struct bnx2 {
6900 6900
6901 u16 fw_wr_seq; 6901 u16 fw_wr_seq;
6902 u16 fw_drv_pulse_wr_seq; 6902 u16 fw_drv_pulse_wr_seq;
6903 u32 fw_last_msg;
6903 6904
6904 int rx_max_ring; 6905 int rx_max_ring;
6905 int rx_ring_size; 6906 int rx_ring_size;
@@ -7406,6 +7407,10 @@ struct bnx2_rv2p_fw_file {
7406#define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000 7407#define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000
7407#define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000 7408#define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000
7408#define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000 7409#define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000
7410#define BNX2_CONDITION_PM_STATE_MASK 0x00030000
7411#define BNX2_CONDITION_PM_STATE_FULL 0x00030000
7412#define BNX2_CONDITION_PM_STATE_PREP 0x00020000
7413#define BNX2_CONDITION_PM_STATE_UNPREP 0x00010000
7409 7414
7410#define BNX2_BC_STATE_DEBUG_CMD 0x1dc 7415#define BNX2_BC_STATE_DEBUG_CMD 0x1dc
7411#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000 7416#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 1803c3959044..354ae9792bad 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -1704,7 +1704,7 @@ bfa_flash_sem_get(void __iomem *bar)
1704 while (!bfa_raw_sem_get(bar)) { 1704 while (!bfa_raw_sem_get(bar)) {
1705 if (--n <= 0) 1705 if (--n <= 0)
1706 return BFA_STATUS_BADFLASH; 1706 return BFA_STATUS_BADFLASH;
1707 udelay(10000); 1707 mdelay(10);
1708 } 1708 }
1709 return BFA_STATUS_OK; 1709 return BFA_STATUS_OK;
1710} 1710}
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 3190d38e16fb..d0c38e01e99f 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -632,11 +632,16 @@ static void gem_rx_refill(struct macb *bp)
632 "Unable to allocate sk_buff\n"); 632 "Unable to allocate sk_buff\n");
633 break; 633 break;
634 } 634 }
635 bp->rx_skbuff[entry] = skb;
636 635
637 /* now fill corresponding descriptor entry */ 636 /* now fill corresponding descriptor entry */
638 paddr = dma_map_single(&bp->pdev->dev, skb->data, 637 paddr = dma_map_single(&bp->pdev->dev, skb->data,
639 bp->rx_buffer_size, DMA_FROM_DEVICE); 638 bp->rx_buffer_size, DMA_FROM_DEVICE);
639 if (dma_mapping_error(&bp->pdev->dev, paddr)) {
640 dev_kfree_skb(skb);
641 break;
642 }
643
644 bp->rx_skbuff[entry] = skb;
640 645
641 if (entry == RX_RING_SIZE - 1) 646 if (entry == RX_RING_SIZE - 1)
642 paddr |= MACB_BIT(RX_WRAP); 647 paddr |= MACB_BIT(RX_WRAP);
@@ -725,7 +730,7 @@ static int gem_rx(struct macb *bp, int budget)
725 skb_put(skb, len); 730 skb_put(skb, len);
726 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr)); 731 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
727 dma_unmap_single(&bp->pdev->dev, addr, 732 dma_unmap_single(&bp->pdev->dev, addr,
728 len, DMA_FROM_DEVICE); 733 bp->rx_buffer_size, DMA_FROM_DEVICE);
729 734
730 skb->protocol = eth_type_trans(skb, bp->dev); 735 skb->protocol = eth_type_trans(skb, bp->dev);
731 skb_checksum_none_assert(skb); 736 skb_checksum_none_assert(skb);
@@ -1036,11 +1041,15 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1036 } 1041 }
1037 1042
1038 entry = macb_tx_ring_wrap(bp->tx_head); 1043 entry = macb_tx_ring_wrap(bp->tx_head);
1039 bp->tx_head++;
1040 netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry); 1044 netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
1041 mapping = dma_map_single(&bp->pdev->dev, skb->data, 1045 mapping = dma_map_single(&bp->pdev->dev, skb->data,
1042 len, DMA_TO_DEVICE); 1046 len, DMA_TO_DEVICE);
1047 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
1048 kfree_skb(skb);
1049 goto unlock;
1050 }
1043 1051
1052 bp->tx_head++;
1044 tx_skb = &bp->tx_skb[entry]; 1053 tx_skb = &bp->tx_skb[entry];
1045 tx_skb->skb = skb; 1054 tx_skb->skb = skb;
1046 tx_skb->mapping = mapping; 1055 tx_skb->mapping = mapping;
@@ -1066,6 +1075,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1066 if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) 1075 if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1)
1067 netif_stop_queue(dev); 1076 netif_stop_queue(dev);
1068 1077
1078unlock:
1069 spin_unlock_irqrestore(&bp->lock, flags); 1079 spin_unlock_irqrestore(&bp->lock, flags);
1070 1080
1071 return NETDEV_TX_OK; 1081 return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 479a7cba45c0..03a351300013 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -528,13 +528,6 @@ fec_restart(struct net_device *ndev, int duplex)
528 /* Clear any outstanding interrupt. */ 528 /* Clear any outstanding interrupt. */
529 writel(0xffc00000, fep->hwp + FEC_IEVENT); 529 writel(0xffc00000, fep->hwp + FEC_IEVENT);
530 530
531 /* Setup multicast filter. */
532 set_multicast_list(ndev);
533#ifndef CONFIG_M5272
534 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
535 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
536#endif
537
538 /* Set maximum receive buffer size. */ 531 /* Set maximum receive buffer size. */
539 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); 532 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
540 533
@@ -655,6 +648,13 @@ fec_restart(struct net_device *ndev, int duplex)
655 648
656 writel(rcntl, fep->hwp + FEC_R_CNTRL); 649 writel(rcntl, fep->hwp + FEC_R_CNTRL);
657 650
651 /* Setup multicast filter. */
652 set_multicast_list(ndev);
653#ifndef CONFIG_M5272
654 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
655 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
656#endif
657
658 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { 658 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
659 /* enable ENET endian swap */ 659 /* enable ENET endian swap */
660 ecntl |= (1 << 8); 660 ecntl |= (1 << 8);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 4be971590461..1fc8334fc181 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -522,10 +522,21 @@ retry:
522 return rc; 522 return rc;
523} 523}
524 524
525static u64 ibmveth_encode_mac_addr(u8 *mac)
526{
527 int i;
528 u64 encoded = 0;
529
530 for (i = 0; i < ETH_ALEN; i++)
531 encoded = (encoded << 8) | mac[i];
532
533 return encoded;
534}
535
525static int ibmveth_open(struct net_device *netdev) 536static int ibmveth_open(struct net_device *netdev)
526{ 537{
527 struct ibmveth_adapter *adapter = netdev_priv(netdev); 538 struct ibmveth_adapter *adapter = netdev_priv(netdev);
528 u64 mac_address = 0; 539 u64 mac_address;
529 int rxq_entries = 1; 540 int rxq_entries = 1;
530 unsigned long lpar_rc; 541 unsigned long lpar_rc;
531 int rc; 542 int rc;
@@ -579,8 +590,7 @@ static int ibmveth_open(struct net_device *netdev)
579 adapter->rx_queue.num_slots = rxq_entries; 590 adapter->rx_queue.num_slots = rxq_entries;
580 adapter->rx_queue.toggle = 1; 591 adapter->rx_queue.toggle = 1;
581 592
582 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); 593 mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
583 mac_address = mac_address >> 16;
584 594
585 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | 595 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
586 adapter->rx_queue.queue_len; 596 adapter->rx_queue.queue_len;
@@ -1183,8 +1193,8 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1183 /* add the addresses to the filter table */ 1193 /* add the addresses to the filter table */
1184 netdev_for_each_mc_addr(ha, netdev) { 1194 netdev_for_each_mc_addr(ha, netdev) {
1185 /* add the multicast address to the filter table */ 1195 /* add the multicast address to the filter table */
1186 unsigned long mcast_addr = 0; 1196 u64 mcast_addr;
1187 memcpy(((char *)&mcast_addr)+2, ha->addr, ETH_ALEN); 1197 mcast_addr = ibmveth_encode_mac_addr(ha->addr);
1188 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 1198 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1189 IbmVethMcastAddFilter, 1199 IbmVethMcastAddFilter,
1190 mcast_addr); 1200 mcast_addr);
@@ -1372,9 +1382,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1372 1382
1373 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); 1383 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1374 1384
1375 adapter->mac_addr = 0;
1376 memcpy(&adapter->mac_addr, mac_addr_p, ETH_ALEN);
1377
1378 netdev->irq = dev->irq; 1385 netdev->irq = dev->irq;
1379 netdev->netdev_ops = &ibmveth_netdev_ops; 1386 netdev->netdev_ops = &ibmveth_netdev_ops;
1380 netdev->ethtool_ops = &netdev_ethtool_ops; 1387 netdev->ethtool_ops = &netdev_ethtool_ops;
@@ -1383,7 +1390,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1383 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1390 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1384 netdev->features |= netdev->hw_features; 1391 netdev->features |= netdev->hw_features;
1385 1392
1386 memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); 1393 memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
1387 1394
1388 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { 1395 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1389 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; 1396 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 451ba7949e15..1f37499d4398 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -138,7 +138,6 @@ struct ibmveth_adapter {
138 struct napi_struct napi; 138 struct napi_struct napi;
139 struct net_device_stats stats; 139 struct net_device_stats stats;
140 unsigned int mcastFilterSize; 140 unsigned int mcastFilterSize;
141 unsigned long mac_addr;
142 void * buffer_list_addr; 141 void * buffer_list_addr;
143 void * filter_list_addr; 142 void * filter_list_addr;
144 dma_addr_t buffer_list_dma; 143 dma_addr_t buffer_list_dma;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index fad45316200a..84a96f70dfb5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -742,6 +742,14 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
742 err = mlx4_en_uc_steer_add(priv, new_mac, 742 err = mlx4_en_uc_steer_add(priv, new_mac,
743 &qpn, 743 &qpn,
744 &entry->reg_id); 744 &entry->reg_id);
745 if (err)
746 return err;
747 if (priv->tunnel_reg_id) {
748 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
749 priv->tunnel_reg_id = 0;
750 }
751 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
752 &priv->tunnel_reg_id);
745 return err; 753 return err;
746 } 754 }
747 } 755 }
@@ -1792,6 +1800,8 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
1792 mc_list[5] = priv->port; 1800 mc_list[5] = priv->port;
1793 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 1801 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
1794 mc_list, MLX4_PROT_ETH, mclist->reg_id); 1802 mc_list, MLX4_PROT_ETH, mclist->reg_id);
1803 if (mclist->tunnel_reg_id)
1804 mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
1795 } 1805 }
1796 mlx4_en_clear_list(dev); 1806 mlx4_en_clear_list(dev);
1797 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 1807 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 91b69ff4b4a2..7e2995ecea6f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -129,13 +129,14 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
129 [0] = "RSS support", 129 [0] = "RSS support",
130 [1] = "RSS Toeplitz Hash Function support", 130 [1] = "RSS Toeplitz Hash Function support",
131 [2] = "RSS XOR Hash Function support", 131 [2] = "RSS XOR Hash Function support",
132 [3] = "Device manage flow steering support", 132 [3] = "Device managed flow steering support",
133 [4] = "Automatic MAC reassignment support", 133 [4] = "Automatic MAC reassignment support",
134 [5] = "Time stamping support", 134 [5] = "Time stamping support",
135 [6] = "VST (control vlan insertion/stripping) support", 135 [6] = "VST (control vlan insertion/stripping) support",
136 [7] = "FSM (MAC anti-spoofing) support", 136 [7] = "FSM (MAC anti-spoofing) support",
137 [8] = "Dynamic QP updates support", 137 [8] = "Dynamic QP updates support",
138 [9] = "TCP/IP offloads/flow-steering for VXLAN support" 138 [9] = "Device managed flow steering IPoIB support",
139 [10] = "TCP/IP offloads/flow-steering for VXLAN support"
139 }; 140 };
140 int i; 141 int i;
141 142
@@ -859,7 +860,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
859 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); 860 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
860 861
861 /* For guests, disable vxlan tunneling */ 862 /* For guests, disable vxlan tunneling */
862 MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN); 863 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN);
863 field &= 0xf7; 864 field &= 0xf7;
864 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN); 865 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN);
865 866
@@ -869,7 +870,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
869 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); 870 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
870 871
871 /* For guests, disable mw type 2 */ 872 /* For guests, disable mw type 2 */
872 MLX4_GET(bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 873 MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
873 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; 874 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
874 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 875 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
875 876
@@ -883,7 +884,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
883 } 884 }
884 885
885 /* turn off ipoib managed steering for guests */ 886 /* turn off ipoib managed steering for guests */
886 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); 887 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
887 field &= ~0x80; 888 field &= ~0x80;
888 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); 889 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
889 890
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index d711158b0d4b..936c15364739 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -150,6 +150,8 @@ struct mlx4_port_config {
150 struct pci_dev *pdev; 150 struct pci_dev *pdev;
151}; 151};
152 152
153static atomic_t pf_loading = ATOMIC_INIT(0);
154
153int mlx4_check_port_params(struct mlx4_dev *dev, 155int mlx4_check_port_params(struct mlx4_dev *dev,
154 enum mlx4_port_type *port_type) 156 enum mlx4_port_type *port_type)
155{ 157{
@@ -749,7 +751,7 @@ static void mlx4_request_modules(struct mlx4_dev *dev)
749 has_eth_port = true; 751 has_eth_port = true;
750 } 752 }
751 753
752 if (has_ib_port) 754 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
753 request_module_nowait(IB_DRV_NAME); 755 request_module_nowait(IB_DRV_NAME);
754 if (has_eth_port) 756 if (has_eth_port)
755 request_module_nowait(EN_DRV_NAME); 757 request_module_nowait(EN_DRV_NAME);
@@ -1407,6 +1409,11 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
1407 u32 slave_read; 1409 u32 slave_read;
1408 u32 cmd_channel_ver; 1410 u32 cmd_channel_ver;
1409 1411
1412 if (atomic_read(&pf_loading)) {
1413 mlx4_warn(dev, "PF is not ready. Deferring probe\n");
1414 return -EPROBE_DEFER;
1415 }
1416
1410 mutex_lock(&priv->cmd.slave_cmd_mutex); 1417 mutex_lock(&priv->cmd.slave_cmd_mutex);
1411 priv->cmd.max_cmds = 1; 1418 priv->cmd.max_cmds = 1;
1412 mlx4_warn(dev, "Sending reset\n"); 1419 mlx4_warn(dev, "Sending reset\n");
@@ -2319,7 +2326,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2319 2326
2320 if (num_vfs) { 2327 if (num_vfs) {
2321 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs); 2328 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs);
2329
2330 atomic_inc(&pf_loading);
2322 err = pci_enable_sriov(pdev, num_vfs); 2331 err = pci_enable_sriov(pdev, num_vfs);
2332 atomic_dec(&pf_loading);
2333
2323 if (err) { 2334 if (err) {
2324 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", 2335 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
2325 err); 2336 err);
@@ -2684,6 +2695,7 @@ static struct pci_driver mlx4_driver = {
2684 .name = DRV_NAME, 2695 .name = DRV_NAME,
2685 .id_table = mlx4_pci_table, 2696 .id_table = mlx4_pci_table,
2686 .probe = mlx4_init_one, 2697 .probe = mlx4_init_one,
2698 .shutdown = mlx4_remove_one,
2687 .remove = mlx4_remove_one, 2699 .remove = mlx4_remove_one,
2688 .err_handler = &mlx4_err_handler, 2700 .err_handler = &mlx4_err_handler,
2689}; 2701};
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index e9779653cd4c..3ff7bc3e7a23 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -209,7 +209,7 @@ static const struct {
209 [RTL_GIGA_MAC_VER_16] = 209 [RTL_GIGA_MAC_VER_16] =
210 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), 210 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
211 [RTL_GIGA_MAC_VER_17] = 211 [RTL_GIGA_MAC_VER_17] =
212 _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false), 212 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
213 [RTL_GIGA_MAC_VER_18] = 213 [RTL_GIGA_MAC_VER_18] =
214 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), 214 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
215 [RTL_GIGA_MAC_VER_19] = 215 [RTL_GIGA_MAC_VER_19] =
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index 72d282bf33a5..c553f6b5a913 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -151,7 +151,7 @@ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
151 sizeof(struct dma_desc))); 151 sizeof(struct dma_desc)));
152} 152}
153 153
154const struct stmmac_chain_mode_ops chain_mode_ops = { 154const struct stmmac_mode_ops chain_mode_ops = {
155 .init = stmmac_init_dma_chain, 155 .init = stmmac_init_dma_chain,
156 .is_jumbo_frm = stmmac_is_jumbo_frm, 156 .is_jumbo_frm = stmmac_is_jumbo_frm,
157 .jumbo_frm = stmmac_jumbo_frm, 157 .jumbo_frm = stmmac_jumbo_frm,
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 7834a3993946..74610f3aca9e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -419,20 +419,13 @@ struct mii_regs {
419 unsigned int data; /* MII Data */ 419 unsigned int data; /* MII Data */
420}; 420};
421 421
422struct stmmac_ring_mode_ops { 422struct stmmac_mode_ops {
423 unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
424 unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
425 void (*refill_desc3) (void *priv, struct dma_desc *p);
426 void (*init_desc3) (struct dma_desc *p);
427 void (*clean_desc3) (void *priv, struct dma_desc *p);
428 int (*set_16kib_bfsize) (int mtu);
429};
430
431struct stmmac_chain_mode_ops {
432 void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, 423 void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
433 unsigned int extend_desc); 424 unsigned int extend_desc);
434 unsigned int (*is_jumbo_frm) (int len, int ehn_desc); 425 unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
435 unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); 426 unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
427 int (*set_16kib_bfsize)(int mtu);
428 void (*init_desc3)(struct dma_desc *p);
436 void (*refill_desc3) (void *priv, struct dma_desc *p); 429 void (*refill_desc3) (void *priv, struct dma_desc *p);
437 void (*clean_desc3) (void *priv, struct dma_desc *p); 430 void (*clean_desc3) (void *priv, struct dma_desc *p);
438}; 431};
@@ -441,8 +434,7 @@ struct mac_device_info {
441 const struct stmmac_ops *mac; 434 const struct stmmac_ops *mac;
442 const struct stmmac_desc_ops *desc; 435 const struct stmmac_desc_ops *desc;
443 const struct stmmac_dma_ops *dma; 436 const struct stmmac_dma_ops *dma;
444 const struct stmmac_ring_mode_ops *ring; 437 const struct stmmac_mode_ops *mode;
445 const struct stmmac_chain_mode_ops *chain;
446 const struct stmmac_hwtimestamp *ptp; 438 const struct stmmac_hwtimestamp *ptp;
447 struct mii_regs mii; /* MII register Addresses */ 439 struct mii_regs mii; /* MII register Addresses */
448 struct mac_link link; 440 struct mac_link link;
@@ -460,7 +452,7 @@ void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
460void stmmac_set_mac(void __iomem *ioaddr, bool enable); 452void stmmac_set_mac(void __iomem *ioaddr, bool enable);
461 453
462void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); 454void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
463extern const struct stmmac_ring_mode_ops ring_mode_ops; 455extern const struct stmmac_mode_ops ring_mode_ops;
464extern const struct stmmac_chain_mode_ops chain_mode_ops; 456extern const struct stmmac_mode_ops chain_mode_ops;
465 457
466#endif /* __COMMON_H__ */ 458#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index a96c7c2f5f3f..650a4be6bce5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -100,10 +100,9 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
100{ 100{
101 struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; 101 struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
102 102
103 if (unlikely(priv->plat->has_gmac)) 103 /* Fill DES3 in case of RING mode */
104 /* Fill DES3 in case of RING mode */ 104 if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
105 if (priv->dma_buf_sz >= BUF_SIZE_8KiB) 105 p->des3 = p->des2 + BUF_SIZE_8KiB;
106 p->des3 = p->des2 + BUF_SIZE_8KiB;
107} 106}
108 107
109/* In ring mode we need to fill the desc3 because it is used as buffer */ 108/* In ring mode we need to fill the desc3 because it is used as buffer */
@@ -126,7 +125,7 @@ static int stmmac_set_16kib_bfsize(int mtu)
126 return ret; 125 return ret;
127} 126}
128 127
129const struct stmmac_ring_mode_ops ring_mode_ops = { 128const struct stmmac_mode_ops ring_mode_ops = {
130 .is_jumbo_frm = stmmac_is_jumbo_frm, 129 .is_jumbo_frm = stmmac_is_jumbo_frm,
131 .jumbo_frm = stmmac_jumbo_frm, 130 .jumbo_frm = stmmac_jumbo_frm,
132 .refill_desc3 = stmmac_refill_desc3, 131 .refill_desc3 = stmmac_refill_desc3,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 078ad0ec8593..8543e1cfd55e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -92,8 +92,8 @@ static int tc = TC_DEFAULT;
92module_param(tc, int, S_IRUGO | S_IWUSR); 92module_param(tc, int, S_IRUGO | S_IWUSR);
93MODULE_PARM_DESC(tc, "DMA threshold control value"); 93MODULE_PARM_DESC(tc, "DMA threshold control value");
94 94
95#define DMA_BUFFER_SIZE BUF_SIZE_4KiB 95#define DEFAULT_BUFSIZE 1536
96static int buf_sz = DMA_BUFFER_SIZE; 96static int buf_sz = DEFAULT_BUFSIZE;
97module_param(buf_sz, int, S_IRUGO | S_IWUSR); 97module_param(buf_sz, int, S_IRUGO | S_IWUSR);
98MODULE_PARM_DESC(buf_sz, "DMA buffer size"); 98MODULE_PARM_DESC(buf_sz, "DMA buffer size");
99 99
@@ -136,8 +136,8 @@ static void stmmac_verify_args(void)
136 dma_rxsize = DMA_RX_SIZE; 136 dma_rxsize = DMA_RX_SIZE;
137 if (unlikely(dma_txsize < 0)) 137 if (unlikely(dma_txsize < 0))
138 dma_txsize = DMA_TX_SIZE; 138 dma_txsize = DMA_TX_SIZE;
139 if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB))) 139 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
140 buf_sz = DMA_BUFFER_SIZE; 140 buf_sz = DEFAULT_BUFSIZE;
141 if (unlikely(flow_ctrl > 1)) 141 if (unlikely(flow_ctrl > 1))
142 flow_ctrl = FLOW_AUTO; 142 flow_ctrl = FLOW_AUTO;
143 else if (likely(flow_ctrl < 0)) 143 else if (likely(flow_ctrl < 0))
@@ -286,10 +286,25 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
286 286
287 /* MAC core supports the EEE feature. */ 287 /* MAC core supports the EEE feature. */
288 if (priv->dma_cap.eee) { 288 if (priv->dma_cap.eee) {
289 int tx_lpi_timer = priv->tx_lpi_timer;
290
289 /* Check if the PHY supports EEE */ 291 /* Check if the PHY supports EEE */
290 if (phy_init_eee(priv->phydev, 1)) 292 if (phy_init_eee(priv->phydev, 1)) {
293 /* To manage at run-time if the EEE cannot be supported
294 * anymore (for example because the lp caps have been
295 * changed).
296 * In that case the driver disable own timers.
297 */
298 if (priv->eee_active) {
299 pr_debug("stmmac: disable EEE\n");
300 del_timer_sync(&priv->eee_ctrl_timer);
301 priv->hw->mac->set_eee_timer(priv->ioaddr, 0,
302 tx_lpi_timer);
303 }
304 priv->eee_active = 0;
291 goto out; 305 goto out;
292 306 }
307 /* Activate the EEE and start timers */
293 if (!priv->eee_active) { 308 if (!priv->eee_active) {
294 priv->eee_active = 1; 309 priv->eee_active = 1;
295 init_timer(&priv->eee_ctrl_timer); 310 init_timer(&priv->eee_ctrl_timer);
@@ -300,13 +315,13 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
300 315
301 priv->hw->mac->set_eee_timer(priv->ioaddr, 316 priv->hw->mac->set_eee_timer(priv->ioaddr,
302 STMMAC_DEFAULT_LIT_LS, 317 STMMAC_DEFAULT_LIT_LS,
303 priv->tx_lpi_timer); 318 tx_lpi_timer);
304 } else 319 } else
305 /* Set HW EEE according to the speed */ 320 /* Set HW EEE according to the speed */
306 priv->hw->mac->set_eee_pls(priv->ioaddr, 321 priv->hw->mac->set_eee_pls(priv->ioaddr,
307 priv->phydev->link); 322 priv->phydev->link);
308 323
309 pr_info("stmmac: Energy-Efficient Ethernet initialized\n"); 324 pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
310 325
311 ret = true; 326 ret = true;
312 } 327 }
@@ -886,10 +901,10 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
886 ret = BUF_SIZE_8KiB; 901 ret = BUF_SIZE_8KiB;
887 else if (mtu >= BUF_SIZE_2KiB) 902 else if (mtu >= BUF_SIZE_2KiB)
888 ret = BUF_SIZE_4KiB; 903 ret = BUF_SIZE_4KiB;
889 else if (mtu >= DMA_BUFFER_SIZE) 904 else if (mtu > DEFAULT_BUFSIZE)
890 ret = BUF_SIZE_2KiB; 905 ret = BUF_SIZE_2KiB;
891 else 906 else
892 ret = DMA_BUFFER_SIZE; 907 ret = DEFAULT_BUFSIZE;
893 908
894 return ret; 909 return ret;
895} 910}
@@ -951,9 +966,9 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
951 966
952 p->des2 = priv->rx_skbuff_dma[i]; 967 p->des2 = priv->rx_skbuff_dma[i];
953 968
954 if ((priv->mode == STMMAC_RING_MODE) && 969 if ((priv->hw->mode->init_desc3) &&
955 (priv->dma_buf_sz == BUF_SIZE_16KiB)) 970 (priv->dma_buf_sz == BUF_SIZE_16KiB))
956 priv->hw->ring->init_desc3(p); 971 priv->hw->mode->init_desc3(p);
957 972
958 return 0; 973 return 0;
959} 974}
@@ -984,11 +999,8 @@ static int init_dma_desc_rings(struct net_device *dev)
984 unsigned int bfsize = 0; 999 unsigned int bfsize = 0;
985 int ret = -ENOMEM; 1000 int ret = -ENOMEM;
986 1001
987 /* Set the max buffer size according to the DESC mode 1002 if (priv->hw->mode->set_16kib_bfsize)
988 * and the MTU. Note that RING mode allows 16KiB bsize. 1003 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
989 */
990 if (priv->mode == STMMAC_RING_MODE)
991 bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
992 1004
993 if (bfsize < BUF_SIZE_16KiB) 1005 if (bfsize < BUF_SIZE_16KiB)
994 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); 1006 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
@@ -1029,15 +1041,15 @@ static int init_dma_desc_rings(struct net_device *dev)
1029 /* Setup the chained descriptor addresses */ 1041 /* Setup the chained descriptor addresses */
1030 if (priv->mode == STMMAC_CHAIN_MODE) { 1042 if (priv->mode == STMMAC_CHAIN_MODE) {
1031 if (priv->extend_desc) { 1043 if (priv->extend_desc) {
1032 priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy, 1044 priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
1033 rxsize, 1); 1045 rxsize, 1);
1034 priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy, 1046 priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
1035 txsize, 1); 1047 txsize, 1);
1036 } else { 1048 } else {
1037 priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy, 1049 priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
1038 rxsize, 0); 1050 rxsize, 0);
1039 priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy, 1051 priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
1040 txsize, 0); 1052 txsize, 0);
1041 } 1053 }
1042 } 1054 }
1043 1055
@@ -1288,7 +1300,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
1288 DMA_TO_DEVICE); 1300 DMA_TO_DEVICE);
1289 priv->tx_skbuff_dma[entry] = 0; 1301 priv->tx_skbuff_dma[entry] = 0;
1290 } 1302 }
1291 priv->hw->ring->clean_desc3(priv, p); 1303 priv->hw->mode->clean_desc3(priv, p);
1292 1304
1293 if (likely(skb != NULL)) { 1305 if (likely(skb != NULL)) {
1294 dev_kfree_skb(skb); 1306 dev_kfree_skb(skb);
@@ -1844,6 +1856,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1844 int nfrags = skb_shinfo(skb)->nr_frags; 1856 int nfrags = skb_shinfo(skb)->nr_frags;
1845 struct dma_desc *desc, *first; 1857 struct dma_desc *desc, *first;
1846 unsigned int nopaged_len = skb_headlen(skb); 1858 unsigned int nopaged_len = skb_headlen(skb);
1859 unsigned int enh_desc = priv->plat->enh_desc;
1847 1860
1848 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { 1861 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
1849 if (!netif_queue_stopped(dev)) { 1862 if (!netif_queue_stopped(dev)) {
@@ -1871,27 +1884,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1871 first = desc; 1884 first = desc;
1872 1885
1873 /* To program the descriptors according to the size of the frame */ 1886 /* To program the descriptors according to the size of the frame */
1874 if (priv->mode == STMMAC_RING_MODE) { 1887 if (enh_desc)
1875 is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len, 1888 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
1876 priv->plat->enh_desc); 1889
1877 if (unlikely(is_jumbo))
1878 entry = priv->hw->ring->jumbo_frm(priv, skb,
1879 csum_insertion);
1880 } else {
1881 is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len,
1882 priv->plat->enh_desc);
1883 if (unlikely(is_jumbo))
1884 entry = priv->hw->chain->jumbo_frm(priv, skb,
1885 csum_insertion);
1886 }
1887 if (likely(!is_jumbo)) { 1890 if (likely(!is_jumbo)) {
1888 desc->des2 = dma_map_single(priv->device, skb->data, 1891 desc->des2 = dma_map_single(priv->device, skb->data,
1889 nopaged_len, DMA_TO_DEVICE); 1892 nopaged_len, DMA_TO_DEVICE);
1890 priv->tx_skbuff_dma[entry] = desc->des2; 1893 priv->tx_skbuff_dma[entry] = desc->des2;
1891 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, 1894 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
1892 csum_insertion, priv->mode); 1895 csum_insertion, priv->mode);
1893 } else 1896 } else {
1894 desc = first; 1897 desc = first;
1898 entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
1899 }
1895 1900
1896 for (i = 0; i < nfrags; i++) { 1901 for (i = 0; i < nfrags; i++) {
1897 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1902 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -2029,7 +2034,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2029 2034
2030 p->des2 = priv->rx_skbuff_dma[entry]; 2035 p->des2 = priv->rx_skbuff_dma[entry];
2031 2036
2032 priv->hw->ring->refill_desc3(priv, p); 2037 priv->hw->mode->refill_desc3(priv, p);
2033 2038
2034 if (netif_msg_rx_status(priv)) 2039 if (netif_msg_rx_status(priv))
2035 pr_debug("\trefill entry #%d\n", entry); 2040 pr_debug("\trefill entry #%d\n", entry);
@@ -2633,11 +2638,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
2633 2638
2634 /* To use the chained or ring mode */ 2639 /* To use the chained or ring mode */
2635 if (chain_mode) { 2640 if (chain_mode) {
2636 priv->hw->chain = &chain_mode_ops; 2641 priv->hw->mode = &chain_mode_ops;
2637 pr_info(" Chain mode enabled\n"); 2642 pr_info(" Chain mode enabled\n");
2638 priv->mode = STMMAC_CHAIN_MODE; 2643 priv->mode = STMMAC_CHAIN_MODE;
2639 } else { 2644 } else {
2640 priv->hw->ring = &ring_mode_ops; 2645 priv->hw->mode = &ring_mode_ops;
2641 pr_info(" Ring mode enabled\n"); 2646 pr_info(" Ring mode enabled\n");
2642 priv->mode = STMMAC_RING_MODE; 2647 priv->mode = STMMAC_RING_MODE;
2643 } 2648 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index c61bc72b8e90..8fb32a80f1c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -36,7 +36,7 @@ static const struct of_device_id stmmac_dt_ids[] = {
36#ifdef CONFIG_DWMAC_STI 36#ifdef CONFIG_DWMAC_STI
37 { .compatible = "st,stih415-dwmac", .data = &sti_gmac_data}, 37 { .compatible = "st,stih415-dwmac", .data = &sti_gmac_data},
38 { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data}, 38 { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data},
39 { .compatible = "st,stih127-dwmac", .data = &sti_gmac_data}, 39 { .compatible = "st,stid127-dwmac", .data = &sti_gmac_data},
40#endif 40#endif
41 /* SoC specific glue layers should come before generic bindings */ 41 /* SoC specific glue layers should come before generic bindings */
42 { .compatible = "st,spear600-gmac"}, 42 { .compatible = "st,spear600-gmac"},
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 7141a1937360..d6fce9750b95 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -442,6 +442,8 @@ static int netvsc_probe(struct hv_device *dev,
442 if (!net) 442 if (!net)
443 return -ENOMEM; 443 return -ENOMEM;
444 444
445 netif_carrier_off(net);
446
445 net_device_ctx = netdev_priv(net); 447 net_device_ctx = netdev_priv(net);
446 net_device_ctx->device_ctx = dev; 448 net_device_ctx->device_ctx = dev;
447 hv_set_drvdata(dev, net); 449 hv_set_drvdata(dev, net);
@@ -473,6 +475,8 @@ static int netvsc_probe(struct hv_device *dev,
473 pr_err("Unable to register netdev.\n"); 475 pr_err("Unable to register netdev.\n");
474 rndis_filter_device_remove(dev); 476 rndis_filter_device_remove(dev);
475 free_netdev(net); 477 free_netdev(net);
478 } else {
479 schedule_delayed_work(&net_device_ctx->dwork, 0);
476 } 480 }
477 481
478 return ret; 482 return ret;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 1084e5de3ceb..b54fd257652b 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -243,6 +243,22 @@ static int rndis_filter_send_request(struct rndis_device *dev,
243 return ret; 243 return ret;
244} 244}
245 245
246static void rndis_set_link_state(struct rndis_device *rdev,
247 struct rndis_request *request)
248{
249 u32 link_status;
250 struct rndis_query_complete *query_complete;
251
252 query_complete = &request->response_msg.msg.query_complete;
253
254 if (query_complete->status == RNDIS_STATUS_SUCCESS &&
255 query_complete->info_buflen == sizeof(u32)) {
256 memcpy(&link_status, (void *)((unsigned long)query_complete +
257 query_complete->info_buf_offset), sizeof(u32));
258 rdev->link_state = link_status != 0;
259 }
260}
261
246static void rndis_filter_receive_response(struct rndis_device *dev, 262static void rndis_filter_receive_response(struct rndis_device *dev,
247 struct rndis_message *resp) 263 struct rndis_message *resp)
248{ 264{
@@ -272,6 +288,10 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
272 sizeof(struct rndis_message) + RNDIS_EXT_LEN) { 288 sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
273 memcpy(&request->response_msg, resp, 289 memcpy(&request->response_msg, resp,
274 resp->msg_len); 290 resp->msg_len);
291 if (request->request_msg.ndis_msg_type ==
292 RNDIS_MSG_QUERY && request->request_msg.msg.
293 query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
294 rndis_set_link_state(dev, request);
275 } else { 295 } else {
276 netdev_err(ndev, 296 netdev_err(ndev,
277 "rndis response buffer overflow " 297 "rndis response buffer overflow "
@@ -620,7 +640,6 @@ static int rndis_filter_query_device_link_status(struct rndis_device *dev)
620 ret = rndis_filter_query_device(dev, 640 ret = rndis_filter_query_device(dev,
621 RNDIS_OID_GEN_MEDIA_CONNECT_STATUS, 641 RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
622 &link_status, &size); 642 &link_status, &size);
623 dev->link_state = (link_status != 0) ? true : false;
624 643
625 return ret; 644 return ret;
626} 645}
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index ab31544bc254..a30258aad139 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -546,12 +546,12 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
546 int rc; 546 int rc;
547 unsigned long flags; 547 unsigned long flags;
548 548
549 spin_lock(&lp->lock); 549 spin_lock_irqsave(&lp->lock, flags);
550 if (lp->irq_busy) { 550 if (lp->irq_busy) {
551 spin_unlock(&lp->lock); 551 spin_unlock_irqrestore(&lp->lock, flags);
552 return -EBUSY; 552 return -EBUSY;
553 } 553 }
554 spin_unlock(&lp->lock); 554 spin_unlock_irqrestore(&lp->lock, flags);
555 555
556 might_sleep(); 556 might_sleep();
557 557
@@ -725,10 +725,11 @@ static void at86rf230_irqwork_level(struct work_struct *work)
725static irqreturn_t at86rf230_isr(int irq, void *data) 725static irqreturn_t at86rf230_isr(int irq, void *data)
726{ 726{
727 struct at86rf230_local *lp = data; 727 struct at86rf230_local *lp = data;
728 unsigned long flags;
728 729
729 spin_lock(&lp->lock); 730 spin_lock_irqsave(&lp->lock, flags);
730 lp->irq_busy = 1; 731 lp->irq_busy = 1;
731 spin_unlock(&lp->lock); 732 spin_unlock_irqrestore(&lp->lock, flags);
732 733
733 schedule_work(&lp->irqwork); 734 schedule_work(&lp->irqwork);
734 735
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 19c9eca0ef26..76d96b9ebcdb 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -164,9 +164,9 @@ static const struct phy_setting settings[] = {
164 * of that setting. Returns the index of the last setting if 164 * of that setting. Returns the index of the last setting if
165 * none of the others match. 165 * none of the others match.
166 */ 166 */
167static inline int phy_find_setting(int speed, int duplex) 167static inline unsigned int phy_find_setting(int speed, int duplex)
168{ 168{
169 int idx = 0; 169 unsigned int idx = 0;
170 170
171 while (idx < ARRAY_SIZE(settings) && 171 while (idx < ARRAY_SIZE(settings) &&
172 (settings[idx].speed != speed || settings[idx].duplex != duplex)) 172 (settings[idx].speed != speed || settings[idx].duplex != duplex))
@@ -185,7 +185,7 @@ static inline int phy_find_setting(int speed, int duplex)
185 * the mask in features. Returns the index of the last setting 185 * the mask in features. Returns the index of the last setting
186 * if nothing else matches. 186 * if nothing else matches.
187 */ 187 */
188static inline int phy_find_valid(int idx, u32 features) 188static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
189{ 189{
190 while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features)) 190 while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
191 idx++; 191 idx++;
@@ -204,7 +204,7 @@ static inline int phy_find_valid(int idx, u32 features)
204static void phy_sanitize_settings(struct phy_device *phydev) 204static void phy_sanitize_settings(struct phy_device *phydev)
205{ 205{
206 u32 features = phydev->supported; 206 u32 features = phydev->supported;
207 int idx; 207 unsigned int idx;
208 208
209 /* Sanitize settings based on PHY capabilities */ 209 /* Sanitize settings based on PHY capabilities */
210 if ((features & SUPPORTED_Autoneg) == 0) 210 if ((features & SUPPORTED_Autoneg) == 0)
@@ -954,7 +954,8 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
954 (phydev->interface == PHY_INTERFACE_MODE_RGMII))) { 954 (phydev->interface == PHY_INTERFACE_MODE_RGMII))) {
955 int eee_lp, eee_cap, eee_adv; 955 int eee_lp, eee_cap, eee_adv;
956 u32 lp, cap, adv; 956 u32 lp, cap, adv;
957 int idx, status; 957 int status;
958 unsigned int idx;
958 959
959 /* Read phy status to properly get the right settings */ 960 /* Read phy status to properly get the right settings */
960 status = phy_read_status(phydev); 961 status = phy_read_status(phydev);
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index 433f0a00c683..e2797f1e1b31 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_USB_HSO) += hso.o
11obj-$(CONFIG_USB_NET_AX8817X) += asix.o 11obj-$(CONFIG_USB_NET_AX8817X) += asix.o
12asix-y := asix_devices.o asix_common.o ax88172a.o 12asix-y := asix_devices.o asix_common.o ax88172a.o
13obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o 13obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o
14obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o r815x.o 14obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
15obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o 15obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
16obj-$(CONFIG_USB_NET_DM9601) += dm9601.o 16obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
17obj-$(CONFIG_USB_NET_SR9700) += sr9700.o 17obj-$(CONFIG_USB_NET_SR9700) += sr9700.o
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index d2e6fdb25e28..054e59ca6946 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1029,20 +1029,12 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
1029 dev->mii.phy_id = 0x03; 1029 dev->mii.phy_id = 0x03;
1030 dev->mii.supports_gmii = 1; 1030 dev->mii.supports_gmii = 1;
1031 1031
1032 if (usb_device_no_sg_constraint(dev->udev))
1033 dev->can_dma_sg = 1;
1034
1035 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1032 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1036 NETIF_F_RXCSUM; 1033 NETIF_F_RXCSUM;
1037 1034
1038 dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1035 dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1039 NETIF_F_RXCSUM; 1036 NETIF_F_RXCSUM;
1040 1037
1041 if (dev->can_dma_sg) {
1042 dev->net->features |= NETIF_F_SG | NETIF_F_TSO;
1043 dev->net->hw_features |= NETIF_F_SG | NETIF_F_TSO;
1044 }
1045
1046 /* Enable checksum offload */ 1038 /* Enable checksum offload */
1047 *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP | 1039 *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
1048 AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6; 1040 AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 42e176912c8e..bd363b27e854 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -652,6 +652,13 @@ static const struct usb_device_id products[] = {
652 .driver_info = 0, 652 .driver_info = 0,
653}, 653},
654 654
655/* Samsung USB Ethernet Adapters */
656{
657 USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, 0xa101, USB_CLASS_COMM,
658 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
659 .driver_info = 0,
660},
661
655/* WHITELIST!!! 662/* WHITELIST!!!
656 * 663 *
657 * CDC Ether uses two interfaces, not necessarily consecutive. 664 * CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d89dbe395ad2..adb12f349a61 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -449,9 +449,6 @@ enum rtl8152_flags {
449#define MCU_TYPE_PLA 0x0100 449#define MCU_TYPE_PLA 0x0100
450#define MCU_TYPE_USB 0x0000 450#define MCU_TYPE_USB 0x0000
451 451
452#define REALTEK_USB_DEVICE(vend, prod) \
453 USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC)
454
455struct rx_desc { 452struct rx_desc {
456 __le32 opts1; 453 __le32 opts1;
457#define RX_LEN_MASK 0x7fff 454#define RX_LEN_MASK 0x7fff
@@ -2739,6 +2736,12 @@ static int rtl8152_probe(struct usb_interface *intf,
2739 struct net_device *netdev; 2736 struct net_device *netdev;
2740 int ret; 2737 int ret;
2741 2738
2739 if (udev->actconfig->desc.bConfigurationValue != 1) {
2740 usb_driver_set_configuration(udev, 1);
2741 return -ENODEV;
2742 }
2743
2744 usb_reset_device(udev);
2742 netdev = alloc_etherdev(sizeof(struct r8152)); 2745 netdev = alloc_etherdev(sizeof(struct r8152));
2743 if (!netdev) { 2746 if (!netdev) {
2744 dev_err(&intf->dev, "Out of memory\n"); 2747 dev_err(&intf->dev, "Out of memory\n");
@@ -2819,9 +2822,9 @@ static void rtl8152_disconnect(struct usb_interface *intf)
2819 2822
2820/* table of devices that work with this driver */ 2823/* table of devices that work with this driver */
2821static struct usb_device_id rtl8152_table[] = { 2824static struct usb_device_id rtl8152_table[] = {
2822 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)}, 2825 {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)},
2823 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)}, 2826 {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)},
2824 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)}, 2827 {USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)},
2825 {} 2828 {}
2826}; 2829};
2827 2830
diff --git a/drivers/net/usb/r815x.c b/drivers/net/usb/r815x.c
deleted file mode 100644
index f0a8791b7636..000000000000
--- a/drivers/net/usb/r815x.c
+++ /dev/null
@@ -1,248 +0,0 @@
1#include <linux/module.h>
2#include <linux/netdevice.h>
3#include <linux/mii.h>
4#include <linux/usb.h>
5#include <linux/usb/cdc.h>
6#include <linux/usb/usbnet.h>
7
8#define RTL815x_REQT_READ 0xc0
9#define RTL815x_REQT_WRITE 0x40
10#define RTL815x_REQ_GET_REGS 0x05
11#define RTL815x_REQ_SET_REGS 0x05
12
13#define MCU_TYPE_PLA 0x0100
14#define OCP_BASE 0xe86c
15#define BASE_MII 0xa400
16
17#define BYTE_EN_DWORD 0xff
18#define BYTE_EN_WORD 0x33
19#define BYTE_EN_BYTE 0x11
20
21#define R815x_PHY_ID 32
22#define REALTEK_VENDOR_ID 0x0bda
23
24
25static int pla_read_word(struct usb_device *udev, u16 index)
26{
27 int ret;
28 u8 shift = index & 2;
29 __le32 *tmp;
30
31 tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
32 if (!tmp)
33 return -ENOMEM;
34
35 index &= ~3;
36
37 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
38 RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
39 index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
40 if (ret < 0)
41 goto out2;
42
43 ret = __le32_to_cpu(*tmp);
44 ret >>= (shift * 8);
45 ret &= 0xffff;
46
47out2:
48 kfree(tmp);
49 return ret;
50}
51
52static int pla_write_word(struct usb_device *udev, u16 index, u32 data)
53{
54 __le32 *tmp;
55 u32 mask = 0xffff;
56 u16 byen = BYTE_EN_WORD;
57 u8 shift = index & 2;
58 int ret;
59
60 tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
61 if (!tmp)
62 return -ENOMEM;
63
64 data &= mask;
65
66 if (shift) {
67 byen <<= shift;
68 mask <<= (shift * 8);
69 data <<= (shift * 8);
70 index &= ~3;
71 }
72
73 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
74 RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
75 index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
76 if (ret < 0)
77 goto out3;
78
79 data |= __le32_to_cpu(*tmp) & ~mask;
80 *tmp = __cpu_to_le32(data);
81
82 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
83 RTL815x_REQ_SET_REGS, RTL815x_REQT_WRITE,
84 index, MCU_TYPE_PLA | byen, tmp, sizeof(*tmp),
85 500);
86
87out3:
88 kfree(tmp);
89 return ret;
90}
91
92static int ocp_reg_read(struct usbnet *dev, u16 addr)
93{
94 u16 ocp_base, ocp_index;
95 int ret;
96
97 ocp_base = addr & 0xf000;
98 ret = pla_write_word(dev->udev, OCP_BASE, ocp_base);
99 if (ret < 0)
100 goto out;
101
102 ocp_index = (addr & 0x0fff) | 0xb000;
103 ret = pla_read_word(dev->udev, ocp_index);
104
105out:
106 return ret;
107}
108
109static int ocp_reg_write(struct usbnet *dev, u16 addr, u16 data)
110{
111 u16 ocp_base, ocp_index;
112 int ret;
113
114 ocp_base = addr & 0xf000;
115 ret = pla_write_word(dev->udev, OCP_BASE, ocp_base);
116 if (ret < 0)
117 goto out1;
118
119 ocp_index = (addr & 0x0fff) | 0xb000;
120 ret = pla_write_word(dev->udev, ocp_index, data);
121
122out1:
123 return ret;
124}
125
126static int r815x_mdio_read(struct net_device *netdev, int phy_id, int reg)
127{
128 struct usbnet *dev = netdev_priv(netdev);
129 int ret;
130
131 if (phy_id != R815x_PHY_ID)
132 return -EINVAL;
133
134 if (usb_autopm_get_interface(dev->intf) < 0)
135 return -ENODEV;
136
137 ret = ocp_reg_read(dev, BASE_MII + reg * 2);
138
139 usb_autopm_put_interface(dev->intf);
140 return ret;
141}
142
143static
144void r815x_mdio_write(struct net_device *netdev, int phy_id, int reg, int val)
145{
146 struct usbnet *dev = netdev_priv(netdev);
147
148 if (phy_id != R815x_PHY_ID)
149 return;
150
151 if (usb_autopm_get_interface(dev->intf) < 0)
152 return;
153
154 ocp_reg_write(dev, BASE_MII + reg * 2, val);
155
156 usb_autopm_put_interface(dev->intf);
157}
158
159static int r8153_bind(struct usbnet *dev, struct usb_interface *intf)
160{
161 int status;
162
163 status = usbnet_cdc_bind(dev, intf);
164 if (status < 0)
165 return status;
166
167 dev->mii.dev = dev->net;
168 dev->mii.mdio_read = r815x_mdio_read;
169 dev->mii.mdio_write = r815x_mdio_write;
170 dev->mii.phy_id_mask = 0x3f;
171 dev->mii.reg_num_mask = 0x1f;
172 dev->mii.phy_id = R815x_PHY_ID;
173 dev->mii.supports_gmii = 1;
174
175 return status;
176}
177
178static int r8152_bind(struct usbnet *dev, struct usb_interface *intf)
179{
180 int status;
181
182 status = usbnet_cdc_bind(dev, intf);
183 if (status < 0)
184 return status;
185
186 dev->mii.dev = dev->net;
187 dev->mii.mdio_read = r815x_mdio_read;
188 dev->mii.mdio_write = r815x_mdio_write;
189 dev->mii.phy_id_mask = 0x3f;
190 dev->mii.reg_num_mask = 0x1f;
191 dev->mii.phy_id = R815x_PHY_ID;
192 dev->mii.supports_gmii = 0;
193
194 return status;
195}
196
197static const struct driver_info r8152_info = {
198 .description = "RTL8152 ECM Device",
199 .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
200 .bind = r8152_bind,
201 .unbind = usbnet_cdc_unbind,
202 .status = usbnet_cdc_status,
203 .manage_power = usbnet_manage_power,
204};
205
206static const struct driver_info r8153_info = {
207 .description = "RTL8153 ECM Device",
208 .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
209 .bind = r8153_bind,
210 .unbind = usbnet_cdc_unbind,
211 .status = usbnet_cdc_status,
212 .manage_power = usbnet_manage_power,
213};
214
215static const struct usb_device_id products[] = {
216{
217 USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8152, USB_CLASS_COMM,
218 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
219 .driver_info = (unsigned long) &r8152_info,
220},
221
222{
223 USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8153, USB_CLASS_COMM,
224 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
225 .driver_info = (unsigned long) &r8153_info,
226},
227
228 { }, /* END */
229};
230MODULE_DEVICE_TABLE(usb, products);
231
232static struct usb_driver r815x_driver = {
233 .name = "r815x",
234 .id_table = products,
235 .probe = usbnet_probe,
236 .disconnect = usbnet_disconnect,
237 .suspend = usbnet_suspend,
238 .resume = usbnet_resume,
239 .reset_resume = usbnet_resume,
240 .supports_autosuspend = 1,
241 .disable_hub_initiated_lpm = 1,
242};
243
244module_usb_driver(r815x_driver);
245
246MODULE_AUTHOR("Hayes Wang");
247MODULE_DESCRIPTION("Realtek USB ECM device");
248MODULE_LICENSE("GPL");
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 3be786faaaec..0fa3b44f7342 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1762,11 +1762,20 @@ vmxnet3_netpoll(struct net_device *netdev)
1762{ 1762{
1763 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1763 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1764 1764
1765 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) 1765 switch (adapter->intr.type) {
1766 vmxnet3_disable_all_intrs(adapter); 1766#ifdef CONFIG_PCI_MSI
1767 1767 case VMXNET3_IT_MSIX: {
1768 vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size); 1768 int i;
1769 vmxnet3_enable_all_intrs(adapter); 1769 for (i = 0; i < adapter->num_rx_queues; i++)
1770 vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
1771 break;
1772 }
1773#endif
1774 case VMXNET3_IT_MSI:
1775 default:
1776 vmxnet3_intr(0, adapter->netdev);
1777 break;
1778 }
1770 1779
1771} 1780}
1772#endif /* CONFIG_NET_POLL_CONTROLLER */ 1781#endif /* CONFIG_NET_POLL_CONTROLLER */
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
index 76cde6ce6551..18a895a949d4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -872,8 +872,11 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
872 872
873 lockdep_assert_held(&mvm->mutex); 873 lockdep_assert_held(&mvm->mutex);
874 874
875 /* Rssi update while not associated ?! */ 875 /*
876 if (WARN_ON_ONCE(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)) 876 * Rssi update while not associated - can happen since the statistics
877 * are handled asynchronously
878 */
879 if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
877 return; 880 return;
878 881
879 /* No BT - reports should be disabled */ 882 /* No BT - reports should be disabled */
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index f47bcbe2945a..3872ead75488 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -359,13 +359,12 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
359/* 7265 Series */ 359/* 7265 Series */
360 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, 360 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
361 {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, 361 {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
362 {IWL_PCI_DEVICE(0x095A, 0x5112, iwl7265_2ac_cfg)},
363 {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)}, 362 {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)},
364 {IWL_PCI_DEVICE(0x095A, 0x510A, iwl7265_2ac_cfg)},
365 {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, 363 {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
366 {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)}, 364 {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
367 {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, 365 {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
368 {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, 366 {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
367 {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
369 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, 368 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, 369 {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, 370 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c
index 5e0eec4d71c7..5d9a8084665d 100644
--- a/drivers/net/wireless/mwifiex/11ac.c
+++ b/drivers/net/wireless/mwifiex/11ac.c
@@ -189,8 +189,7 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
189 vht_cap->header.len = 189 vht_cap->header.len =
190 cpu_to_le16(sizeof(struct ieee80211_vht_cap)); 190 cpu_to_le16(sizeof(struct ieee80211_vht_cap));
191 memcpy((u8 *)vht_cap + sizeof(struct mwifiex_ie_types_header), 191 memcpy((u8 *)vht_cap + sizeof(struct mwifiex_ie_types_header),
192 (u8 *)bss_desc->bcn_vht_cap + 192 (u8 *)bss_desc->bcn_vht_cap,
193 sizeof(struct ieee_types_header),
194 le16_to_cpu(vht_cap->header.len)); 193 le16_to_cpu(vht_cap->header.len));
195 194
196 mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band); 195 mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band);
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 6261f8c53d44..7db1a89fdd95 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -308,8 +308,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
308 ht_cap->header.len = 308 ht_cap->header.len =
309 cpu_to_le16(sizeof(struct ieee80211_ht_cap)); 309 cpu_to_le16(sizeof(struct ieee80211_ht_cap));
310 memcpy((u8 *) ht_cap + sizeof(struct mwifiex_ie_types_header), 310 memcpy((u8 *) ht_cap + sizeof(struct mwifiex_ie_types_header),
311 (u8 *) bss_desc->bcn_ht_cap + 311 (u8 *)bss_desc->bcn_ht_cap,
312 sizeof(struct ieee_types_header),
313 le16_to_cpu(ht_cap->header.len)); 312 le16_to_cpu(ht_cap->header.len));
314 313
315 mwifiex_fill_cap_info(priv, radio_type, ht_cap); 314 mwifiex_fill_cap_info(priv, radio_type, ht_cap);
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 0a8a26e10f01..668547c2de84 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -2101,12 +2101,12 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
2101 curr_bss->ht_info_offset); 2101 curr_bss->ht_info_offset);
2102 2102
2103 if (curr_bss->bcn_vht_cap) 2103 if (curr_bss->bcn_vht_cap)
2104 curr_bss->bcn_ht_cap = (void *)(curr_bss->beacon_buf + 2104 curr_bss->bcn_vht_cap = (void *)(curr_bss->beacon_buf +
2105 curr_bss->vht_cap_offset); 2105 curr_bss->vht_cap_offset);
2106 2106
2107 if (curr_bss->bcn_vht_oper) 2107 if (curr_bss->bcn_vht_oper)
2108 curr_bss->bcn_ht_oper = (void *)(curr_bss->beacon_buf + 2108 curr_bss->bcn_vht_oper = (void *)(curr_bss->beacon_buf +
2109 curr_bss->vht_info_offset); 2109 curr_bss->vht_info_offset);
2110 2110
2111 if (curr_bss->bcn_bss_co_2040) 2111 if (curr_bss->bcn_bss_co_2040)
2112 curr_bss->bcn_bss_co_2040 = 2112 curr_bss->bcn_bss_co_2040 =
diff --git a/drivers/net/wireless/ti/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c
index 123c4bb50e0a..cde0eaf99714 100644
--- a/drivers/net/wireless/ti/wl1251/rx.c
+++ b/drivers/net/wireless/ti/wl1251/rx.c
@@ -180,7 +180,7 @@ static void wl1251_rx_body(struct wl1251 *wl,
180 wl1251_mem_read(wl, rx_packet_ring_addr, rx_buffer, length); 180 wl1251_mem_read(wl, rx_packet_ring_addr, rx_buffer, length);
181 181
182 /* The actual length doesn't include the target's alignment */ 182 /* The actual length doesn't include the target's alignment */
183 skb->len = desc->length - PLCP_HEADER_LENGTH; 183 skb_trim(skb, desc->length - PLCP_HEADER_LENGTH);
184 184
185 fc = (u16 *)skb->data; 185 fc = (u16 *)skb->data;
186 186
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 7669d49a67e2..301cc037fda8 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -132,8 +132,7 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
132 /* If the skb is GSO then we'll also need an extra slot for the 132 /* If the skb is GSO then we'll also need an extra slot for the
133 * metadata. 133 * metadata.
134 */ 134 */
135 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || 135 if (skb_is_gso(skb))
136 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
137 min_slots_needed++; 136 min_slots_needed++;
138 137
139 /* If the skb can't possibly fit in the remaining slots 138 /* If the skb can't possibly fit in the remaining slots
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index e5284bca2d90..438d0c09b7e6 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -240,7 +240,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
240 struct gnttab_copy *copy_gop; 240 struct gnttab_copy *copy_gop;
241 struct xenvif_rx_meta *meta; 241 struct xenvif_rx_meta *meta;
242 unsigned long bytes; 242 unsigned long bytes;
243 int gso_type; 243 int gso_type = XEN_NETIF_GSO_TYPE_NONE;
244 244
245 /* Data must not cross a page boundary. */ 245 /* Data must not cross a page boundary. */
246 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); 246 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
@@ -299,12 +299,12 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
299 } 299 }
300 300
301 /* Leave a gap for the GSO descriptor. */ 301 /* Leave a gap for the GSO descriptor. */
302 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 302 if (skb_is_gso(skb)) {
303 gso_type = XEN_NETIF_GSO_TYPE_TCPV4; 303 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
304 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 304 gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
305 gso_type = XEN_NETIF_GSO_TYPE_TCPV6; 305 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
306 else 306 gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
307 gso_type = XEN_NETIF_GSO_TYPE_NONE; 307 }
308 308
309 if (*head && ((1 << gso_type) & vif->gso_mask)) 309 if (*head && ((1 << gso_type) & vif->gso_mask))
310 vif->rx.req_cons++; 310 vif->rx.req_cons++;
@@ -338,19 +338,15 @@ static int xenvif_gop_skb(struct sk_buff *skb,
338 int head = 1; 338 int head = 1;
339 int old_meta_prod; 339 int old_meta_prod;
340 int gso_type; 340 int gso_type;
341 int gso_size;
342 341
343 old_meta_prod = npo->meta_prod; 342 old_meta_prod = npo->meta_prod;
344 343
345 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { 344 gso_type = XEN_NETIF_GSO_TYPE_NONE;
346 gso_type = XEN_NETIF_GSO_TYPE_TCPV4; 345 if (skb_is_gso(skb)) {
347 gso_size = skb_shinfo(skb)->gso_size; 346 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
348 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { 347 gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
349 gso_type = XEN_NETIF_GSO_TYPE_TCPV6; 348 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
350 gso_size = skb_shinfo(skb)->gso_size; 349 gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
351 } else {
352 gso_type = XEN_NETIF_GSO_TYPE_NONE;
353 gso_size = 0;
354 } 350 }
355 351
356 /* Set up a GSO prefix descriptor, if necessary */ 352 /* Set up a GSO prefix descriptor, if necessary */
@@ -358,7 +354,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
358 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 354 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
359 meta = npo->meta + npo->meta_prod++; 355 meta = npo->meta + npo->meta_prod++;
360 meta->gso_type = gso_type; 356 meta->gso_type = gso_type;
361 meta->gso_size = gso_size; 357 meta->gso_size = skb_shinfo(skb)->gso_size;
362 meta->size = 0; 358 meta->size = 0;
363 meta->id = req->id; 359 meta->id = req->id;
364 } 360 }
@@ -368,7 +364,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
368 364
369 if ((1 << gso_type) & vif->gso_mask) { 365 if ((1 << gso_type) & vif->gso_mask) {
370 meta->gso_type = gso_type; 366 meta->gso_type = gso_type;
371 meta->gso_size = gso_size; 367 meta->gso_size = skb_shinfo(skb)->gso_size;
372 } else { 368 } else {
373 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; 369 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
374 meta->gso_size = 0; 370 meta->gso_size = 0;
@@ -500,8 +496,9 @@ static void xenvif_rx_action(struct xenvif *vif)
500 size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 496 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
501 max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE); 497 max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
502 } 498 }
503 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || 499 if (skb_is_gso(skb) &&
504 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 500 (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
501 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
505 max_slots_needed++; 502 max_slots_needed++;
506 503
507 /* If the skb may not fit then bail out now */ 504 /* If the skb may not fit then bail out now */
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 00660cc502c5..38901665c770 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -162,8 +162,6 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
162 162
163 avail = *r; 163 avail = *r;
164 pci_clip_resource_to_region(bus, &avail, region); 164 pci_clip_resource_to_region(bus, &avail, region);
165 if (!resource_size(&avail))
166 continue;
167 165
168 /* 166 /*
169 * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to 167 * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6b05f6134b68..fdbc294821e6 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1192,6 +1192,9 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
1192 return err; 1192 return err;
1193 pci_fixup_device(pci_fixup_enable, dev); 1193 pci_fixup_device(pci_fixup_enable, dev);
1194 1194
1195 if (dev->msi_enabled || dev->msix_enabled)
1196 return 0;
1197
1195 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); 1198 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1196 if (pin) { 1199 if (pin) {
1197 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1200 pci_read_config_word(dev, PCI_COMMAND, &cmd);
diff --git a/drivers/pinctrl/pinctrl-capri.c b/drivers/pinctrl/pinctrl-capri.c
index 4669c53f99b0..eb2500212147 100644
--- a/drivers/pinctrl/pinctrl-capri.c
+++ b/drivers/pinctrl/pinctrl-capri.c
@@ -1435,7 +1435,7 @@ int __init capri_pinctrl_probe(struct platform_device *pdev)
1435} 1435}
1436 1436
1437static struct of_device_id capri_pinctrl_of_match[] = { 1437static struct of_device_id capri_pinctrl_of_match[] = {
1438 { .compatible = "brcm,capri-pinctrl", }, 1438 { .compatible = "brcm,bcm11351-pinctrl", },
1439 { }, 1439 { },
1440}; 1440};
1441 1441
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 167f3d00c916..66977ebf13b3 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -183,9 +183,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
183 struct resource r = {0}; 183 struct resource r = {0};
184 int i, flags; 184 int i, flags;
185 185
186 if (acpi_dev_resource_memory(res, &r) 186 if (acpi_dev_resource_address_space(res, &r)
187 || acpi_dev_resource_io(res, &r)
188 || acpi_dev_resource_address_space(res, &r)
189 || acpi_dev_resource_ext_address_space(res, &r)) { 187 || acpi_dev_resource_ext_address_space(res, &r)) {
190 pnp_add_resource(dev, &r); 188 pnp_add_resource(dev, &r);
191 return AE_OK; 189 return AE_OK;
@@ -217,6 +215,17 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
217 } 215 }
218 216
219 switch (res->type) { 217 switch (res->type) {
218 case ACPI_RESOURCE_TYPE_MEMORY24:
219 case ACPI_RESOURCE_TYPE_MEMORY32:
220 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
221 if (acpi_dev_resource_memory(res, &r))
222 pnp_add_resource(dev, &r);
223 break;
224 case ACPI_RESOURCE_TYPE_IO:
225 case ACPI_RESOURCE_TYPE_FIXED_IO:
226 if (acpi_dev_resource_io(res, &r))
227 pnp_add_resource(dev, &r);
228 break;
220 case ACPI_RESOURCE_TYPE_DMA: 229 case ACPI_RESOURCE_TYPE_DMA:
221 dma = &res->data.dma; 230 dma = &res->data.dma;
222 if (dma->channel_count > 0 && dma->channels[0] != (u8) -1) 231 if (dma->channel_count > 0 && dma->channels[0] != (u8) -1)
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 1f375051483a..5642a9b250c2 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -325,7 +325,7 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
325 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE) 325 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
326 continue; 326 continue;
327 327
328 if (abrt_task->sc->device->lun != abrt_task->sc->device->lun) 328 if (sc->device->lun != abrt_task->sc->device->lun)
329 continue; 329 continue;
330 330
331 /* Invalidate WRB Posted for this Task */ 331 /* Invalidate WRB Posted for this Task */
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 4911310a38f5..22a9bb1abae1 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -311,9 +311,8 @@ static inline struct Scsi_Host *to_shost(struct isci_host *ihost)
311} 311}
312 312
313#define for_each_isci_host(id, ihost, pdev) \ 313#define for_each_isci_host(id, ihost, pdev) \
314 for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \ 314 for (id = 0; id < SCI_MAX_CONTROLLERS && \
315 id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \ 315 (ihost = to_pci_info(pdev)->hosts[id]); id++)
316 ihost = to_pci_info(pdev)->hosts[++id])
317 316
318static inline void wait_for_start(struct isci_host *ihost) 317static inline void wait_for_start(struct isci_host *ihost)
319{ 318{
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 85c77f6b802b..ac879745ef80 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -615,13 +615,6 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
615 SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION); 615 SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
616 } else { 616 } else {
617 /* the phy is already the part of the port */ 617 /* the phy is already the part of the port */
618 u32 port_state = iport->sm.current_state_id;
619
620 /* if the PORT'S state is resetting then the link up is from
621 * port hard reset in this case, we need to tell the port
622 * that link up is recieved
623 */
624 BUG_ON(port_state != SCI_PORT_RESETTING);
625 port_agent->phy_ready_mask |= 1 << phy_index; 618 port_agent->phy_ready_mask |= 1 << phy_index;
626 sci_port_link_up(iport, iphy); 619 sci_port_link_up(iport, iphy);
627 } 620 }
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 0d30ca849e8f..5d6fda72d659 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -801,7 +801,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev)
801 /* XXX: need to cleanup any ireqs targeting this 801 /* XXX: need to cleanup any ireqs targeting this
802 * domain_device 802 * domain_device
803 */ 803 */
804 ret = TMF_RESP_FUNC_COMPLETE; 804 ret = -ENODEV;
805 goto out; 805 goto out;
806 } 806 }
807 807
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index e1fe95ef23e1..266724b6b899 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2996,8 +2996,7 @@ struct qla_hw_data {
2996 IS_QLA82XX(ha) || IS_QLA83XX(ha) || \ 2996 IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
2997 IS_QLA8044(ha)) 2997 IS_QLA8044(ha))
2998#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) 2998#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
2999#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \ 2999#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
3000 IS_QLA83XX(ha)) && (ha)->flags.msix_enabled)
3001#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) 3000#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
3002#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) 3001#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
3003#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) 3002#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 9bc86b9e86b1..0a1dcb43d18b 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2880,6 +2880,7 @@ static int
2880qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 2880qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2881{ 2881{
2882#define MIN_MSIX_COUNT 2 2882#define MIN_MSIX_COUNT 2
2883#define ATIO_VECTOR 2
2883 int i, ret; 2884 int i, ret;
2884 struct msix_entry *entries; 2885 struct msix_entry *entries;
2885 struct qla_msix_entry *qentry; 2886 struct qla_msix_entry *qentry;
@@ -2936,34 +2937,47 @@ msix_failed:
2936 } 2937 }
2937 2938
2938 /* Enable MSI-X vectors for the base queue */ 2939 /* Enable MSI-X vectors for the base queue */
2939 for (i = 0; i < ha->msix_count; i++) { 2940 for (i = 0; i < 2; i++) {
2940 qentry = &ha->msix_entries[i]; 2941 qentry = &ha->msix_entries[i];
2941 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { 2942 if (IS_P3P_TYPE(ha))
2942 ret = request_irq(qentry->vector,
2943 qla83xx_msix_entries[i].handler,
2944 0, qla83xx_msix_entries[i].name, rsp);
2945 } else if (IS_P3P_TYPE(ha)) {
2946 ret = request_irq(qentry->vector, 2943 ret = request_irq(qentry->vector,
2947 qla82xx_msix_entries[i].handler, 2944 qla82xx_msix_entries[i].handler,
2948 0, qla82xx_msix_entries[i].name, rsp); 2945 0, qla82xx_msix_entries[i].name, rsp);
2949 } else { 2946 else
2950 ret = request_irq(qentry->vector, 2947 ret = request_irq(qentry->vector,
2951 msix_entries[i].handler, 2948 msix_entries[i].handler,
2952 0, msix_entries[i].name, rsp); 2949 0, msix_entries[i].name, rsp);
2953 } 2950 if (ret)
2954 if (ret) { 2951 goto msix_register_fail;
2955 ql_log(ql_log_fatal, vha, 0x00cb,
2956 "MSI-X: unable to register handler -- %x/%d.\n",
2957 qentry->vector, ret);
2958 qla24xx_disable_msix(ha);
2959 ha->mqenable = 0;
2960 goto msix_out;
2961 }
2962 qentry->have_irq = 1; 2952 qentry->have_irq = 1;
2963 qentry->rsp = rsp; 2953 qentry->rsp = rsp;
2964 rsp->msix = qentry; 2954 rsp->msix = qentry;
2965 } 2955 }
2966 2956
2957 /*
2958 * If target mode is enable, also request the vector for the ATIO
2959 * queue.
2960 */
2961 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
2962 qentry = &ha->msix_entries[ATIO_VECTOR];
2963 ret = request_irq(qentry->vector,
2964 qla83xx_msix_entries[ATIO_VECTOR].handler,
2965 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp);
2966 qentry->have_irq = 1;
2967 qentry->rsp = rsp;
2968 rsp->msix = qentry;
2969 }
2970
2971msix_register_fail:
2972 if (ret) {
2973 ql_log(ql_log_fatal, vha, 0x00cb,
2974 "MSI-X: unable to register handler -- %x/%d.\n",
2975 qentry->vector, ret);
2976 qla24xx_disable_msix(ha);
2977 ha->mqenable = 0;
2978 goto msix_out;
2979 }
2980
2967 /* Enable MSI-X vector for response queue update for queue 0 */ 2981 /* Enable MSI-X vector for response queue update for queue 0 */
2968 if (IS_QLA83XX(ha)) { 2982 if (IS_QLA83XX(ha)) {
2969 if (ha->msixbase && ha->mqiobase && 2983 if (ha->msixbase && ha->mqiobase &&
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 17d740427240..9969fa1ef7c4 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1419,6 +1419,9 @@ static void storvsc_device_destroy(struct scsi_device *sdevice)
1419{ 1419{
1420 struct stor_mem_pools *memp = sdevice->hostdata; 1420 struct stor_mem_pools *memp = sdevice->hostdata;
1421 1421
1422 if (!memp)
1423 return;
1424
1422 mempool_destroy(memp->request_mempool); 1425 mempool_destroy(memp->request_mempool);
1423 kmem_cache_destroy(memp->request_pool); 1426 kmem_cache_destroy(memp->request_pool);
1424 kfree(memp); 1427 kfree(memp);
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 31534b51715a..c3b2fb9b6713 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -132,9 +132,9 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
132 132
133 flags = GPIOF_DIR_OUT; 133 flags = GPIOF_DIR_OUT;
134 if (spi->mode & SPI_CS_HIGH) 134 if (spi->mode & SPI_CS_HIGH)
135 flags |= GPIOF_INIT_HIGH;
136 else
137 flags |= GPIOF_INIT_LOW; 135 flags |= GPIOF_INIT_LOW;
136 else
137 flags |= GPIOF_INIT_HIGH;
138 138
139 status = gpio_request_one(cdata->gpio, flags, 139 status = gpio_request_one(cdata->gpio, flags,
140 dev_name(&spi->dev)); 140 dev_name(&spi->dev));
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index b0842f751016..5d7b07f08326 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1455,6 +1455,14 @@ static int atmel_spi_suspend(struct device *dev)
1455{ 1455{
1456 struct spi_master *master = dev_get_drvdata(dev); 1456 struct spi_master *master = dev_get_drvdata(dev);
1457 struct atmel_spi *as = spi_master_get_devdata(master); 1457 struct atmel_spi *as = spi_master_get_devdata(master);
1458 int ret;
1459
1460 /* Stop the queue running */
1461 ret = spi_master_suspend(master);
1462 if (ret) {
1463 dev_warn(dev, "cannot suspend master\n");
1464 return ret;
1465 }
1458 1466
1459 clk_disable_unprepare(as->clk); 1467 clk_disable_unprepare(as->clk);
1460 return 0; 1468 return 0;
@@ -1464,9 +1472,16 @@ static int atmel_spi_resume(struct device *dev)
1464{ 1472{
1465 struct spi_master *master = dev_get_drvdata(dev); 1473 struct spi_master *master = dev_get_drvdata(dev);
1466 struct atmel_spi *as = spi_master_get_devdata(master); 1474 struct atmel_spi *as = spi_master_get_devdata(master);
1475 int ret;
1467 1476
1468 clk_prepare_enable(as->clk); 1477 clk_prepare_enable(as->clk);
1469 return 0; 1478
1479 /* Start the queue running */
1480 ret = spi_master_resume(master);
1481 if (ret)
1482 dev_err(dev, "problem starting queue (%d)\n", ret);
1483
1484 return ret;
1470} 1485}
1471 1486
1472static SIMPLE_DEV_PM_OPS(atmel_spi_pm_ops, atmel_spi_suspend, atmel_spi_resume); 1487static SIMPLE_DEV_PM_OPS(atmel_spi_pm_ops, atmel_spi_suspend, atmel_spi_resume);
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index cabed8f9119e..28ae470397a9 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -514,7 +514,8 @@ static int mcfqspi_resume(struct device *dev)
514#ifdef CONFIG_PM_RUNTIME 514#ifdef CONFIG_PM_RUNTIME
515static int mcfqspi_runtime_suspend(struct device *dev) 515static int mcfqspi_runtime_suspend(struct device *dev)
516{ 516{
517 struct mcfqspi *mcfqspi = dev_get_drvdata(dev); 517 struct spi_master *master = dev_get_drvdata(dev);
518 struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
518 519
519 clk_disable(mcfqspi->clk); 520 clk_disable(mcfqspi->clk);
520 521
@@ -523,7 +524,8 @@ static int mcfqspi_runtime_suspend(struct device *dev)
523 524
524static int mcfqspi_runtime_resume(struct device *dev) 525static int mcfqspi_runtime_resume(struct device *dev)
525{ 526{
526 struct mcfqspi *mcfqspi = dev_get_drvdata(dev); 527 struct spi_master *master = dev_get_drvdata(dev);
528 struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
527 529
528 clk_enable(mcfqspi->clk); 530 clk_enable(mcfqspi->clk);
529 531
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index ec79f726672a..a25392065d9b 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -420,7 +420,6 @@ static int dspi_suspend(struct device *dev)
420 420
421static int dspi_resume(struct device *dev) 421static int dspi_resume(struct device *dev)
422{ 422{
423
424 struct spi_master *master = dev_get_drvdata(dev); 423 struct spi_master *master = dev_get_drvdata(dev);
425 struct fsl_dspi *dspi = spi_master_get_devdata(master); 424 struct fsl_dspi *dspi = spi_master_get_devdata(master);
426 425
@@ -504,7 +503,7 @@ static int dspi_probe(struct platform_device *pdev)
504 clk_prepare_enable(dspi->clk); 503 clk_prepare_enable(dspi->clk);
505 504
506 init_waitqueue_head(&dspi->waitq); 505 init_waitqueue_head(&dspi->waitq);
507 platform_set_drvdata(pdev, dspi); 506 platform_set_drvdata(pdev, master);
508 507
509 ret = spi_bitbang_start(&dspi->bitbang); 508 ret = spi_bitbang_start(&dspi->bitbang);
510 if (ret != 0) { 509 if (ret != 0) {
@@ -525,7 +524,8 @@ out_master_put:
525 524
526static int dspi_remove(struct platform_device *pdev) 525static int dspi_remove(struct platform_device *pdev)
527{ 526{
528 struct fsl_dspi *dspi = platform_get_drvdata(pdev); 527 struct spi_master *master = platform_get_drvdata(pdev);
528 struct fsl_dspi *dspi = spi_master_get_devdata(master);
529 529
530 /* Disconnect from the SPI framework */ 530 /* Disconnect from the SPI framework */
531 spi_bitbang_stop(&dspi->bitbang); 531 spi_bitbang_stop(&dspi->bitbang);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index a5474ef9d2a0..47f15d97e7fa 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -948,8 +948,8 @@ static int spi_imx_remove(struct platform_device *pdev)
948 spi_bitbang_stop(&spi_imx->bitbang); 948 spi_bitbang_stop(&spi_imx->bitbang);
949 949
950 writel(0, spi_imx->base + MXC_CSPICTRL); 950 writel(0, spi_imx->base + MXC_CSPICTRL);
951 clk_disable_unprepare(spi_imx->clk_ipg); 951 clk_unprepare(spi_imx->clk_ipg);
952 clk_disable_unprepare(spi_imx->clk_per); 952 clk_unprepare(spi_imx->clk_per);
953 spi_master_put(master); 953 spi_master_put(master);
954 954
955 return 0; 955 return 0;
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 2e7f38c7a961..88eb57e858b3 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -915,7 +915,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
915 /* Set Tx DMA */ 915 /* Set Tx DMA */
916 param = &dma->param_tx; 916 param = &dma->param_tx;
917 param->dma_dev = &dma_dev->dev; 917 param->dma_dev = &dma_dev->dev;
918 param->chan_id = data->master->bus_num * 2; /* Tx = 0, 2 */ 918 param->chan_id = data->ch * 2; /* Tx = 0, 2 */;
919 param->tx_reg = data->io_base_addr + PCH_SPDWR; 919 param->tx_reg = data->io_base_addr + PCH_SPDWR;
920 param->width = width; 920 param->width = width;
921 chan = dma_request_channel(mask, pch_spi_filter, param); 921 chan = dma_request_channel(mask, pch_spi_filter, param);
@@ -930,7 +930,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
930 /* Set Rx DMA */ 930 /* Set Rx DMA */
931 param = &dma->param_rx; 931 param = &dma->param_rx;
932 param->dma_dev = &dma_dev->dev; 932 param->dma_dev = &dma_dev->dev;
933 param->chan_id = data->master->bus_num * 2 + 1; /* Rx = Tx + 1 */ 933 param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */;
934 param->rx_reg = data->io_base_addr + PCH_SPDRR; 934 param->rx_reg = data->io_base_addr + PCH_SPDRR;
935 param->width = width; 935 param->width = width;
936 chan = dma_request_channel(mask, pch_spi_filter, param); 936 chan = dma_request_channel(mask, pch_spi_filter, param);
@@ -1452,6 +1452,11 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
1452 1452
1453 pch_spi_set_master_mode(master); 1453 pch_spi_set_master_mode(master);
1454 1454
1455 if (use_dma) {
1456 dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
1457 pch_alloc_dma_buf(board_dat, data);
1458 }
1459
1455 ret = spi_register_master(master); 1460 ret = spi_register_master(master);
1456 if (ret != 0) { 1461 if (ret != 0) {
1457 dev_err(&plat_dev->dev, 1462 dev_err(&plat_dev->dev,
@@ -1459,14 +1464,10 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
1459 goto err_spi_register_master; 1464 goto err_spi_register_master;
1460 } 1465 }
1461 1466
1462 if (use_dma) {
1463 dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
1464 pch_alloc_dma_buf(board_dat, data);
1465 }
1466
1467 return 0; 1467 return 0;
1468 1468
1469err_spi_register_master: 1469err_spi_register_master:
1470 pch_free_dma_buf(board_dat, data);
1470 free_irq(board_dat->pdev->irq, data); 1471 free_irq(board_dat->pdev->irq, data);
1471err_request_irq: 1472err_request_irq:
1472 pch_spi_free_resources(board_dat, data); 1473 pch_spi_free_resources(board_dat, data);
diff --git a/drivers/staging/cxt1e1/linux.c b/drivers/staging/cxt1e1/linux.c
index 4a08e16e42f7..79206cb3fb94 100644
--- a/drivers/staging/cxt1e1/linux.c
+++ b/drivers/staging/cxt1e1/linux.c
@@ -866,6 +866,8 @@ c4_ioctl (struct net_device *ndev, struct ifreq *ifr, int cmd)
866 _IOC_SIZE (iocmd)); 866 _IOC_SIZE (iocmd));
867#endif 867#endif
868 iolen = _IOC_SIZE (iocmd); 868 iolen = _IOC_SIZE (iocmd);
869 if (iolen > sizeof(arg))
870 return -EFAULT;
869 data = ifr->ifr_data + sizeof (iocmd); 871 data = ifr->ifr_data + sizeof (iocmd);
870 if (copy_from_user (&arg, data, iolen)) 872 if (copy_from_user (&arg, data, iolen))
871 return -EFAULT; 873 return -EFAULT;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 7f1a7ce4b771..b83ec378d04f 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -785,7 +785,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
785 spin_unlock_bh(&conn->cmd_lock); 785 spin_unlock_bh(&conn->cmd_lock);
786 786
787 list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) { 787 list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
788 list_del(&cmd->i_conn_node); 788 list_del_init(&cmd->i_conn_node);
789 iscsit_free_cmd(cmd, false); 789 iscsit_free_cmd(cmd, false);
790 } 790 }
791} 791}
@@ -3708,7 +3708,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state
3708 break; 3708 break;
3709 case ISTATE_REMOVE: 3709 case ISTATE_REMOVE:
3710 spin_lock_bh(&conn->cmd_lock); 3710 spin_lock_bh(&conn->cmd_lock);
3711 list_del(&cmd->i_conn_node); 3711 list_del_init(&cmd->i_conn_node);
3712 spin_unlock_bh(&conn->cmd_lock); 3712 spin_unlock_bh(&conn->cmd_lock);
3713 3713
3714 iscsit_free_cmd(cmd, false); 3714 iscsit_free_cmd(cmd, false);
@@ -4151,7 +4151,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
4151 spin_lock_bh(&conn->cmd_lock); 4151 spin_lock_bh(&conn->cmd_lock);
4152 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { 4152 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
4153 4153
4154 list_del(&cmd->i_conn_node); 4154 list_del_init(&cmd->i_conn_node);
4155 spin_unlock_bh(&conn->cmd_lock); 4155 spin_unlock_bh(&conn->cmd_lock);
4156 4156
4157 iscsit_increment_maxcmdsn(cmd, sess); 4157 iscsit_increment_maxcmdsn(cmd, sess);
@@ -4196,6 +4196,10 @@ int iscsit_close_connection(
4196 iscsit_stop_timers_for_cmds(conn); 4196 iscsit_stop_timers_for_cmds(conn);
4197 iscsit_stop_nopin_response_timer(conn); 4197 iscsit_stop_nopin_response_timer(conn);
4198 iscsit_stop_nopin_timer(conn); 4198 iscsit_stop_nopin_timer(conn);
4199
4200 if (conn->conn_transport->iscsit_wait_conn)
4201 conn->conn_transport->iscsit_wait_conn(conn);
4202
4199 iscsit_free_queue_reqs_for_conn(conn); 4203 iscsit_free_queue_reqs_for_conn(conn);
4200 4204
4201 /* 4205 /*
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 33be1fb1df32..4ca8fd2a70db 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -138,7 +138,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
138 list_for_each_entry_safe(cmd, cmd_tmp, 138 list_for_each_entry_safe(cmd, cmd_tmp,
139 &cr->conn_recovery_cmd_list, i_conn_node) { 139 &cr->conn_recovery_cmd_list, i_conn_node) {
140 140
141 list_del(&cmd->i_conn_node); 141 list_del_init(&cmd->i_conn_node);
142 cmd->conn = NULL; 142 cmd->conn = NULL;
143 spin_unlock(&cr->conn_recovery_cmd_lock); 143 spin_unlock(&cr->conn_recovery_cmd_lock);
144 iscsit_free_cmd(cmd, true); 144 iscsit_free_cmd(cmd, true);
@@ -160,7 +160,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
160 list_for_each_entry_safe(cmd, cmd_tmp, 160 list_for_each_entry_safe(cmd, cmd_tmp,
161 &cr->conn_recovery_cmd_list, i_conn_node) { 161 &cr->conn_recovery_cmd_list, i_conn_node) {
162 162
163 list_del(&cmd->i_conn_node); 163 list_del_init(&cmd->i_conn_node);
164 cmd->conn = NULL; 164 cmd->conn = NULL;
165 spin_unlock(&cr->conn_recovery_cmd_lock); 165 spin_unlock(&cr->conn_recovery_cmd_lock);
166 iscsit_free_cmd(cmd, true); 166 iscsit_free_cmd(cmd, true);
@@ -216,7 +216,7 @@ int iscsit_remove_cmd_from_connection_recovery(
216 } 216 }
217 cr = cmd->cr; 217 cr = cmd->cr;
218 218
219 list_del(&cmd->i_conn_node); 219 list_del_init(&cmd->i_conn_node);
220 return --cr->cmd_count; 220 return --cr->cmd_count;
221} 221}
222 222
@@ -297,7 +297,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
297 if (!(cmd->cmd_flags & ICF_OOO_CMDSN)) 297 if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
298 continue; 298 continue;
299 299
300 list_del(&cmd->i_conn_node); 300 list_del_init(&cmd->i_conn_node);
301 301
302 spin_unlock_bh(&conn->cmd_lock); 302 spin_unlock_bh(&conn->cmd_lock);
303 iscsit_free_cmd(cmd, true); 303 iscsit_free_cmd(cmd, true);
@@ -335,7 +335,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
335 /* 335 /*
336 * Only perform connection recovery on ISCSI_OP_SCSI_CMD or 336 * Only perform connection recovery on ISCSI_OP_SCSI_CMD or
337 * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call 337 * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call
338 * list_del(&cmd->i_conn_node); to release the command to the 338 * list_del_init(&cmd->i_conn_node); to release the command to the
339 * session pool and remove it from the connection's list. 339 * session pool and remove it from the connection's list.
340 * 340 *
341 * Also stop the DataOUT timer, which will be restarted after 341 * Also stop the DataOUT timer, which will be restarted after
@@ -351,7 +351,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
351 " CID: %hu\n", cmd->iscsi_opcode, 351 " CID: %hu\n", cmd->iscsi_opcode,
352 cmd->init_task_tag, cmd->cmd_sn, conn->cid); 352 cmd->init_task_tag, cmd->cmd_sn, conn->cid);
353 353
354 list_del(&cmd->i_conn_node); 354 list_del_init(&cmd->i_conn_node);
355 spin_unlock_bh(&conn->cmd_lock); 355 spin_unlock_bh(&conn->cmd_lock);
356 iscsit_free_cmd(cmd, true); 356 iscsit_free_cmd(cmd, true);
357 spin_lock_bh(&conn->cmd_lock); 357 spin_lock_bh(&conn->cmd_lock);
@@ -371,7 +371,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
371 */ 371 */
372 if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd && 372 if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
373 iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) { 373 iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
374 list_del(&cmd->i_conn_node); 374 list_del_init(&cmd->i_conn_node);
375 spin_unlock_bh(&conn->cmd_lock); 375 spin_unlock_bh(&conn->cmd_lock);
376 iscsit_free_cmd(cmd, true); 376 iscsit_free_cmd(cmd, true);
377 spin_lock_bh(&conn->cmd_lock); 377 spin_lock_bh(&conn->cmd_lock);
@@ -393,7 +393,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
393 393
394 cmd->sess = conn->sess; 394 cmd->sess = conn->sess;
395 395
396 list_del(&cmd->i_conn_node); 396 list_del_init(&cmd->i_conn_node);
397 spin_unlock_bh(&conn->cmd_lock); 397 spin_unlock_bh(&conn->cmd_lock);
398 398
399 iscsit_free_all_datain_reqs(cmd); 399 iscsit_free_all_datain_reqs(cmd);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 39761837608d..44a5471de00f 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -137,7 +137,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(
137 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { 137 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
138 138
139 spin_lock(&tpg->tpg_state_lock); 139 spin_lock(&tpg->tpg_state_lock);
140 if (tpg->tpg_state == TPG_STATE_FREE) { 140 if (tpg->tpg_state != TPG_STATE_ACTIVE) {
141 spin_unlock(&tpg->tpg_state_lock); 141 spin_unlock(&tpg->tpg_state_lock);
142 continue; 142 continue;
143 } 143 }
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 42f18fc1067b..77e6531fb0a1 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1079,25 +1079,31 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
1079 left = sectors * dev->prot_length; 1079 left = sectors * dev->prot_length;
1080 1080
1081 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { 1081 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
1082 1082 unsigned int psg_len, copied = 0;
1083 len = min(psg->length, left);
1084 if (offset >= sg->length) {
1085 sg = sg_next(sg);
1086 offset = 0;
1087 }
1088 1083
1089 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1084 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1090 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; 1085 psg_len = min(left, psg->length);
1091 1086 while (psg_len) {
1092 if (read) 1087 len = min(psg_len, sg->length - offset);
1093 memcpy(paddr, addr, len); 1088 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
1094 else 1089
1095 memcpy(addr, paddr, len); 1090 if (read)
1096 1091 memcpy(paddr + copied, addr, len);
1097 left -= len; 1092 else
1098 offset += len; 1093 memcpy(addr, paddr + copied, len);
1094
1095 left -= len;
1096 offset += len;
1097 copied += len;
1098 psg_len -= len;
1099
1100 if (offset >= sg->length) {
1101 sg = sg_next(sg);
1102 offset = 0;
1103 }
1104 kunmap_atomic(addr);
1105 }
1099 kunmap_atomic(paddr); 1106 kunmap_atomic(paddr);
1100 kunmap_atomic(addr);
1101 } 1107 }
1102} 1108}
1103 1109
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 35c066489a19..5f88d767671e 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -136,6 +136,7 @@ config SPEAR_THERMAL
136config RCAR_THERMAL 136config RCAR_THERMAL
137 tristate "Renesas R-Car thermal driver" 137 tristate "Renesas R-Car thermal driver"
138 depends on ARCH_SHMOBILE || COMPILE_TEST 138 depends on ARCH_SHMOBILE || COMPILE_TEST
139 depends on HAS_IOMEM
139 help 140 help
140 Enable this to plug the R-Car thermal sensor driver into the Linux 141 Enable this to plug the R-Car thermal sensor driver into the Linux
141 thermal framework. 142 thermal framework.
@@ -210,8 +211,16 @@ config ACPI_INT3403_THERMAL
210 tristate "ACPI INT3403 thermal driver" 211 tristate "ACPI INT3403 thermal driver"
211 depends on X86 && ACPI 212 depends on X86 && ACPI
212 help 213 help
213 This driver uses ACPI INT3403 device objects. If present, it will 214 Newer laptops and tablets that use ACPI may have thermal sensors
214 register each INT3403 thermal sensor as a thermal zone. 215 outside the core CPU/SOC for thermal safety reasons. These
216 temperature sensors are also exposed for the OS to use via the so
217 called INT3403 ACPI object. This driver will, on devices that have
218 such sensors, expose the temperature information from these sensors
219 to userspace via the normal thermal framework. This means that a wide
220 range of applications and GUI widgets can show this information to
221 the user or use this information for making decisions. For example,
222 the Intel Thermal Daemon can use this information to allow the user
223 to select his laptop to run without turning on the fans.
215 224
216menu "Texas Instruments thermal drivers" 225menu "Texas Instruments thermal drivers"
217source "drivers/thermal/ti-soc-thermal/Kconfig" 226source "drivers/thermal/ti-soc-thermal/Kconfig"
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 338a88bf6662..71b0ec0c370d 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -56,10 +56,15 @@ static LIST_HEAD(thermal_governor_list);
56static DEFINE_MUTEX(thermal_list_lock); 56static DEFINE_MUTEX(thermal_list_lock);
57static DEFINE_MUTEX(thermal_governor_lock); 57static DEFINE_MUTEX(thermal_governor_lock);
58 58
59static struct thermal_governor *def_governor;
60
59static struct thermal_governor *__find_governor(const char *name) 61static struct thermal_governor *__find_governor(const char *name)
60{ 62{
61 struct thermal_governor *pos; 63 struct thermal_governor *pos;
62 64
65 if (!name || !name[0])
66 return def_governor;
67
63 list_for_each_entry(pos, &thermal_governor_list, governor_list) 68 list_for_each_entry(pos, &thermal_governor_list, governor_list)
64 if (!strnicmp(name, pos->name, THERMAL_NAME_LENGTH)) 69 if (!strnicmp(name, pos->name, THERMAL_NAME_LENGTH))
65 return pos; 70 return pos;
@@ -82,17 +87,23 @@ int thermal_register_governor(struct thermal_governor *governor)
82 if (__find_governor(governor->name) == NULL) { 87 if (__find_governor(governor->name) == NULL) {
83 err = 0; 88 err = 0;
84 list_add(&governor->governor_list, &thermal_governor_list); 89 list_add(&governor->governor_list, &thermal_governor_list);
90 if (!def_governor && !strncmp(governor->name,
91 DEFAULT_THERMAL_GOVERNOR, THERMAL_NAME_LENGTH))
92 def_governor = governor;
85 } 93 }
86 94
87 mutex_lock(&thermal_list_lock); 95 mutex_lock(&thermal_list_lock);
88 96
89 list_for_each_entry(pos, &thermal_tz_list, node) { 97 list_for_each_entry(pos, &thermal_tz_list, node) {
98 /*
99 * only thermal zones with specified tz->tzp->governor_name
100 * may run with tz->govenor unset
101 */
90 if (pos->governor) 102 if (pos->governor)
91 continue; 103 continue;
92 if (pos->tzp) 104
93 name = pos->tzp->governor_name; 105 name = pos->tzp->governor_name;
94 else 106
95 name = DEFAULT_THERMAL_GOVERNOR;
96 if (!strnicmp(name, governor->name, THERMAL_NAME_LENGTH)) 107 if (!strnicmp(name, governor->name, THERMAL_NAME_LENGTH))
97 pos->governor = governor; 108 pos->governor = governor;
98 } 109 }
@@ -342,8 +353,8 @@ static void monitor_thermal_zone(struct thermal_zone_device *tz)
342static void handle_non_critical_trips(struct thermal_zone_device *tz, 353static void handle_non_critical_trips(struct thermal_zone_device *tz,
343 int trip, enum thermal_trip_type trip_type) 354 int trip, enum thermal_trip_type trip_type)
344{ 355{
345 if (tz->governor) 356 tz->governor ? tz->governor->throttle(tz, trip) :
346 tz->governor->throttle(tz, trip); 357 def_governor->throttle(tz, trip);
347} 358}
348 359
349static void handle_critical_trips(struct thermal_zone_device *tz, 360static void handle_critical_trips(struct thermal_zone_device *tz,
@@ -1107,7 +1118,7 @@ __thermal_cooling_device_register(struct device_node *np,
1107 INIT_LIST_HEAD(&cdev->thermal_instances); 1118 INIT_LIST_HEAD(&cdev->thermal_instances);
1108 cdev->np = np; 1119 cdev->np = np;
1109 cdev->ops = ops; 1120 cdev->ops = ops;
1110 cdev->updated = true; 1121 cdev->updated = false;
1111 cdev->device.class = &thermal_class; 1122 cdev->device.class = &thermal_class;
1112 cdev->devdata = devdata; 1123 cdev->devdata = devdata;
1113 dev_set_name(&cdev->device, "cooling_device%d", cdev->id); 1124 dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
@@ -1533,7 +1544,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
1533 if (tz->tzp) 1544 if (tz->tzp)
1534 tz->governor = __find_governor(tz->tzp->governor_name); 1545 tz->governor = __find_governor(tz->tzp->governor_name);
1535 else 1546 else
1536 tz->governor = __find_governor(DEFAULT_THERMAL_GOVERNOR); 1547 tz->governor = def_governor;
1537 1548
1538 mutex_unlock(&thermal_governor_lock); 1549 mutex_unlock(&thermal_governor_lock);
1539 1550
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index 972e1c73722a..081fd7e6a9f0 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -68,6 +68,10 @@ struct phy_dev_entry {
68 struct thermal_zone_device *tzone; 68 struct thermal_zone_device *tzone;
69}; 69};
70 70
71static const struct thermal_zone_params pkg_temp_tz_params = {
72 .no_hwmon = true,
73};
74
71/* List maintaining number of package instances */ 75/* List maintaining number of package instances */
72static LIST_HEAD(phy_dev_list); 76static LIST_HEAD(phy_dev_list);
73static DEFINE_MUTEX(phy_dev_list_mutex); 77static DEFINE_MUTEX(phy_dev_list_mutex);
@@ -394,7 +398,6 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
394 int err; 398 int err;
395 u32 tj_max; 399 u32 tj_max;
396 struct phy_dev_entry *phy_dev_entry; 400 struct phy_dev_entry *phy_dev_entry;
397 char buffer[30];
398 int thres_count; 401 int thres_count;
399 u32 eax, ebx, ecx, edx; 402 u32 eax, ebx, ecx, edx;
400 u8 *temp; 403 u8 *temp;
@@ -440,13 +443,11 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
440 phy_dev_entry->first_cpu = cpu; 443 phy_dev_entry->first_cpu = cpu;
441 phy_dev_entry->tj_max = tj_max; 444 phy_dev_entry->tj_max = tj_max;
442 phy_dev_entry->ref_cnt = 1; 445 phy_dev_entry->ref_cnt = 1;
443 snprintf(buffer, sizeof(buffer), "pkg-temp-%d\n", 446 phy_dev_entry->tzone = thermal_zone_device_register("x86_pkg_temp",
444 phy_dev_entry->phys_proc_id);
445 phy_dev_entry->tzone = thermal_zone_device_register(buffer,
446 thres_count, 447 thres_count,
447 (thres_count == MAX_NUMBER_OF_TRIPS) ? 448 (thres_count == MAX_NUMBER_OF_TRIPS) ?
448 0x03 : 0x01, 449 0x03 : 0x01,
449 phy_dev_entry, &tzone_ops, NULL, 0, 0); 450 phy_dev_entry, &tzone_ops, &pkg_temp_tz_params, 0, 0);
450 if (IS_ERR(phy_dev_entry->tzone)) { 451 if (IS_ERR(phy_dev_entry->tzone)) {
451 err = PTR_ERR(phy_dev_entry->tzone); 452 err = PTR_ERR(phy_dev_entry->tzone);
452 goto err_ret_free; 453 goto err_ret_free;
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 8d72f0c65937..062967c90b2a 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -717,6 +717,10 @@ int usb_get_configuration(struct usb_device *dev)
717 result = -ENOMEM; 717 result = -ENOMEM;
718 goto err; 718 goto err;
719 } 719 }
720
721 if (dev->quirks & USB_QUIRK_DELAY_INIT)
722 msleep(100);
723
720 result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno, 724 result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
721 bigbuffer, length); 725 bigbuffer, length);
722 if (result < 0) { 726 if (result < 0) {
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 8f37063c0a49..739ee8e8bdfd 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -47,6 +47,10 @@ static const struct usb_device_id usb_quirk_list[] = {
47 /* Microsoft LifeCam-VX700 v2.0 */ 47 /* Microsoft LifeCam-VX700 v2.0 */
48 { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, 48 { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
49 49
50 /* Logitech HD Pro Webcams C920 and C930e */
51 { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
52 { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
53
50 /* Logitech Quickcam Fusion */ 54 /* Logitech Quickcam Fusion */
51 { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME }, 55 { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
52 56
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6fe577d46fa2..924a6ccdb622 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4733,6 +4733,9 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4733 /* Accept arbitrarily long scatter-gather lists */ 4733 /* Accept arbitrarily long scatter-gather lists */
4734 hcd->self.sg_tablesize = ~0; 4734 hcd->self.sg_tablesize = ~0;
4735 4735
4736 /* support to build packet from discontinuous buffers */
4737 hcd->self.no_sg_constraint = 1;
4738
4736 /* XHCI controllers don't stop the ep queue on short packets :| */ 4739 /* XHCI controllers don't stop the ep queue on short packets :| */
4737 hcd->self.no_stop_on_short = 1; 4740 hcd->self.no_stop_on_short = 1;
4738 4741
@@ -4757,14 +4760,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4757 /* xHCI private pointer was set in xhci_pci_probe for the second 4760 /* xHCI private pointer was set in xhci_pci_probe for the second
4758 * registered roothub. 4761 * registered roothub.
4759 */ 4762 */
4760 xhci = hcd_to_xhci(hcd);
4761 /*
4762 * Support arbitrarily aligned sg-list entries on hosts without
4763 * TD fragment rules (which are currently unsupported).
4764 */
4765 if (xhci->hci_version < 0x100)
4766 hcd->self.no_sg_constraint = 1;
4767
4768 return 0; 4763 return 0;
4769 } 4764 }
4770 4765
@@ -4793,9 +4788,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4793 if (xhci->hci_version > 0x96) 4788 if (xhci->hci_version > 0x96)
4794 xhci->quirks |= XHCI_SPURIOUS_SUCCESS; 4789 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
4795 4790
4796 if (xhci->hci_version < 0x100)
4797 hcd->self.no_sg_constraint = 1;
4798
4799 /* Make sure the HC is halted. */ 4791 /* Make sure the HC is halted. */
4800 retval = xhci_halt(xhci); 4792 retval = xhci_halt(xhci);
4801 if (retval) 4793 if (retval)
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index cf32f0393369..c0f3718b77a8 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -513,7 +513,7 @@ struct cifs_mnt_data {
513static inline unsigned int 513static inline unsigned int
514get_rfc1002_length(void *buf) 514get_rfc1002_length(void *buf)
515{ 515{
516 return be32_to_cpu(*((__be32 *)buf)); 516 return be32_to_cpu(*((__be32 *)buf)) & 0xffffff;
517} 517}
518 518
519static inline void 519static inline void
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 53c15074bb36..834fce759d80 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2579,31 +2579,19 @@ cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2579 struct cifsInodeInfo *cinode = CIFS_I(inode); 2579 struct cifsInodeInfo *cinode = CIFS_I(inode);
2580 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; 2580 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2581 ssize_t rc = -EACCES; 2581 ssize_t rc = -EACCES;
2582 loff_t lock_pos = pos;
2582 2583
2583 BUG_ON(iocb->ki_pos != pos); 2584 if (file->f_flags & O_APPEND)
2584 2585 lock_pos = i_size_read(inode);
2585 /* 2586 /*
2586 * We need to hold the sem to be sure nobody modifies lock list 2587 * We need to hold the sem to be sure nobody modifies lock list
2587 * with a brlock that prevents writing. 2588 * with a brlock that prevents writing.
2588 */ 2589 */
2589 down_read(&cinode->lock_sem); 2590 down_read(&cinode->lock_sem);
2590 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs), 2591 if (!cifs_find_lock_conflict(cfile, lock_pos, iov_length(iov, nr_segs),
2591 server->vals->exclusive_lock_type, NULL, 2592 server->vals->exclusive_lock_type, NULL,
2592 CIFS_WRITE_OP)) { 2593 CIFS_WRITE_OP))
2593 mutex_lock(&inode->i_mutex); 2594 rc = generic_file_aio_write(iocb, iov, nr_segs, pos);
2594 rc = __generic_file_aio_write(iocb, iov, nr_segs,
2595 &iocb->ki_pos);
2596 mutex_unlock(&inode->i_mutex);
2597 }
2598
2599 if (rc > 0) {
2600 ssize_t err;
2601
2602 err = generic_write_sync(file, iocb->ki_pos - rc, rc);
2603 if (err < 0)
2604 rc = err;
2605 }
2606
2607 up_read(&cinode->lock_sem); 2595 up_read(&cinode->lock_sem);
2608 return rc; 2596 return rc;
2609} 2597}
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index b37570952846..18cd5650a5fc 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -270,6 +270,26 @@ cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx,
270 iov->iov_len = rqst->rq_pagesz; 270 iov->iov_len = rqst->rq_pagesz;
271} 271}
272 272
273static unsigned long
274rqst_len(struct smb_rqst *rqst)
275{
276 unsigned int i;
277 struct kvec *iov = rqst->rq_iov;
278 unsigned long buflen = 0;
279
280 /* total up iov array first */
281 for (i = 0; i < rqst->rq_nvec; i++)
282 buflen += iov[i].iov_len;
283
284 /* add in the page array if there is one */
285 if (rqst->rq_npages) {
286 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1);
287 buflen += rqst->rq_tailsz;
288 }
289
290 return buflen;
291}
292
273static int 293static int
274smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) 294smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
275{ 295{
@@ -277,6 +297,7 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
277 struct kvec *iov = rqst->rq_iov; 297 struct kvec *iov = rqst->rq_iov;
278 int n_vec = rqst->rq_nvec; 298 int n_vec = rqst->rq_nvec;
279 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base); 299 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
300 unsigned long send_length;
280 unsigned int i; 301 unsigned int i;
281 size_t total_len = 0, sent; 302 size_t total_len = 0, sent;
282 struct socket *ssocket = server->ssocket; 303 struct socket *ssocket = server->ssocket;
@@ -285,6 +306,14 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
285 if (ssocket == NULL) 306 if (ssocket == NULL)
286 return -ENOTSOCK; 307 return -ENOTSOCK;
287 308
309 /* sanity check send length */
310 send_length = rqst_len(rqst);
311 if (send_length != smb_buf_length + 4) {
312 WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
313 send_length, smb_buf_length);
314 return -EIO;
315 }
316
288 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length); 317 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
289 dump_smb(iov[0].iov_base, iov[0].iov_len); 318 dump_smb(iov[0].iov_base, iov[0].iov_len);
290 319
diff --git a/fs/file.c b/fs/file.c
index db25c2bdfe46..60a45e9f5323 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -683,35 +683,65 @@ EXPORT_SYMBOL(fget_raw);
683 * The fput_needed flag returned by fget_light should be passed to the 683 * The fput_needed flag returned by fget_light should be passed to the
684 * corresponding fput_light. 684 * corresponding fput_light.
685 */ 685 */
686struct file *__fget_light(unsigned int fd, fmode_t mask, int *fput_needed) 686static unsigned long __fget_light(unsigned int fd, fmode_t mask)
687{ 687{
688 struct files_struct *files = current->files; 688 struct files_struct *files = current->files;
689 struct file *file; 689 struct file *file;
690 690
691 *fput_needed = 0;
692 if (atomic_read(&files->count) == 1) { 691 if (atomic_read(&files->count) == 1) {
693 file = __fcheck_files(files, fd); 692 file = __fcheck_files(files, fd);
694 if (file && (file->f_mode & mask)) 693 if (!file || unlikely(file->f_mode & mask))
695 file = NULL; 694 return 0;
695 return (unsigned long)file;
696 } else { 696 } else {
697 file = __fget(fd, mask); 697 file = __fget(fd, mask);
698 if (file) 698 if (!file)
699 *fput_needed = 1; 699 return 0;
700 return FDPUT_FPUT | (unsigned long)file;
700 } 701 }
701
702 return file;
703} 702}
704struct file *fget_light(unsigned int fd, int *fput_needed) 703unsigned long __fdget(unsigned int fd)
705{ 704{
706 return __fget_light(fd, FMODE_PATH, fput_needed); 705 return __fget_light(fd, FMODE_PATH);
707} 706}
708EXPORT_SYMBOL(fget_light); 707EXPORT_SYMBOL(__fdget);
709 708
710struct file *fget_raw_light(unsigned int fd, int *fput_needed) 709unsigned long __fdget_raw(unsigned int fd)
711{ 710{
712 return __fget_light(fd, 0, fput_needed); 711 return __fget_light(fd, 0);
712}
713
714unsigned long __fdget_pos(unsigned int fd)
715{
716 struct files_struct *files = current->files;
717 struct file *file;
718 unsigned long v;
719
720 if (atomic_read(&files->count) == 1) {
721 file = __fcheck_files(files, fd);
722 v = 0;
723 } else {
724 file = __fget(fd, 0);
725 v = FDPUT_FPUT;
726 }
727 if (!file)
728 return 0;
729
730 if (file->f_mode & FMODE_ATOMIC_POS) {
731 if (file_count(file) > 1) {
732 v |= FDPUT_POS_UNLOCK;
733 mutex_lock(&file->f_pos_lock);
734 }
735 }
736 return v | (unsigned long)file;
713} 737}
714 738
739/*
740 * We only lock f_pos if we have threads or if the file might be
741 * shared with another process. In both cases we'll have an elevated
742 * file count (done either by fdget() or by fork()).
743 */
744
715void set_close_on_exec(unsigned int fd, int flag) 745void set_close_on_exec(unsigned int fd, int flag)
716{ 746{
717 struct files_struct *files = current->files; 747 struct files_struct *files = current->files;
diff --git a/fs/file_table.c b/fs/file_table.c
index 5fff9030be34..5b24008ea4f6 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -135,6 +135,7 @@ struct file *get_empty_filp(void)
135 atomic_long_set(&f->f_count, 1); 135 atomic_long_set(&f->f_count, 1);
136 rwlock_init(&f->f_owner.lock); 136 rwlock_init(&f->f_owner.lock);
137 spin_lock_init(&f->f_lock); 137 spin_lock_init(&f->f_lock);
138 mutex_init(&f->f_pos_lock);
138 eventpoll_init_file(f); 139 eventpoll_init_file(f);
139 /* f->f_version: 0 */ 140 /* f->f_version: 0 */
140 return f; 141 return f;
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index 968ce411db53..32602c667b4a 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -103,6 +103,8 @@ static int hfsplus_cat_build_record(hfsplus_cat_entry *entry,
103 folder = &entry->folder; 103 folder = &entry->folder;
104 memset(folder, 0, sizeof(*folder)); 104 memset(folder, 0, sizeof(*folder));
105 folder->type = cpu_to_be16(HFSPLUS_FOLDER); 105 folder->type = cpu_to_be16(HFSPLUS_FOLDER);
106 if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags))
107 folder->flags |= cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT);
106 folder->id = cpu_to_be32(inode->i_ino); 108 folder->id = cpu_to_be32(inode->i_ino);
107 HFSPLUS_I(inode)->create_date = 109 HFSPLUS_I(inode)->create_date =
108 folder->create_date = 110 folder->create_date =
@@ -203,6 +205,36 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
203 return hfs_brec_find(fd, hfs_find_rec_by_key); 205 return hfs_brec_find(fd, hfs_find_rec_by_key);
204} 206}
205 207
208static void hfsplus_subfolders_inc(struct inode *dir)
209{
210 struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
211
212 if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) {
213 /*
214 * Increment subfolder count. Note, the value is only meaningful
215 * for folders with HFSPLUS_HAS_FOLDER_COUNT flag set.
216 */
217 HFSPLUS_I(dir)->subfolders++;
218 }
219}
220
221static void hfsplus_subfolders_dec(struct inode *dir)
222{
223 struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
224
225 if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) {
226 /*
227 * Decrement subfolder count. Note, the value is only meaningful
228 * for folders with HFSPLUS_HAS_FOLDER_COUNT flag set.
229 *
230 * Check for zero. Some subfolders may have been created
231 * by an implementation ignorant of this counter.
232 */
233 if (HFSPLUS_I(dir)->subfolders)
234 HFSPLUS_I(dir)->subfolders--;
235 }
236}
237
206int hfsplus_create_cat(u32 cnid, struct inode *dir, 238int hfsplus_create_cat(u32 cnid, struct inode *dir,
207 struct qstr *str, struct inode *inode) 239 struct qstr *str, struct inode *inode)
208{ 240{
@@ -247,6 +279,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
247 goto err1; 279 goto err1;
248 280
249 dir->i_size++; 281 dir->i_size++;
282 if (S_ISDIR(inode->i_mode))
283 hfsplus_subfolders_inc(dir);
250 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; 284 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
251 hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY); 285 hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
252 286
@@ -336,6 +370,8 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
336 goto out; 370 goto out;
337 371
338 dir->i_size--; 372 dir->i_size--;
373 if (type == HFSPLUS_FOLDER)
374 hfsplus_subfolders_dec(dir);
339 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; 375 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
340 hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY); 376 hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
341 377
@@ -380,6 +416,7 @@ int hfsplus_rename_cat(u32 cnid,
380 416
381 hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset, 417 hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset,
382 src_fd.entrylength); 418 src_fd.entrylength);
419 type = be16_to_cpu(entry.type);
383 420
384 /* create new dir entry with the data from the old entry */ 421 /* create new dir entry with the data from the old entry */
385 hfsplus_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name); 422 hfsplus_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name);
@@ -394,6 +431,8 @@ int hfsplus_rename_cat(u32 cnid,
394 if (err) 431 if (err)
395 goto out; 432 goto out;
396 dst_dir->i_size++; 433 dst_dir->i_size++;
434 if (type == HFSPLUS_FOLDER)
435 hfsplus_subfolders_inc(dst_dir);
397 dst_dir->i_mtime = dst_dir->i_ctime = CURRENT_TIME_SEC; 436 dst_dir->i_mtime = dst_dir->i_ctime = CURRENT_TIME_SEC;
398 437
399 /* finally remove the old entry */ 438 /* finally remove the old entry */
@@ -405,6 +444,8 @@ int hfsplus_rename_cat(u32 cnid,
405 if (err) 444 if (err)
406 goto out; 445 goto out;
407 src_dir->i_size--; 446 src_dir->i_size--;
447 if (type == HFSPLUS_FOLDER)
448 hfsplus_subfolders_dec(src_dir);
408 src_dir->i_mtime = src_dir->i_ctime = CURRENT_TIME_SEC; 449 src_dir->i_mtime = src_dir->i_ctime = CURRENT_TIME_SEC;
409 450
410 /* remove old thread entry */ 451 /* remove old thread entry */
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 08846425b67f..62d571eb69ba 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -242,6 +242,7 @@ struct hfsplus_inode_info {
242 */ 242 */
243 sector_t fs_blocks; 243 sector_t fs_blocks;
244 u8 userflags; /* BSD user file flags */ 244 u8 userflags; /* BSD user file flags */
245 u32 subfolders; /* Subfolder count (HFSX only) */
245 struct list_head open_dir_list; 246 struct list_head open_dir_list;
246 loff_t phys_size; 247 loff_t phys_size;
247 248
diff --git a/fs/hfsplus/hfsplus_raw.h b/fs/hfsplus/hfsplus_raw.h
index 8ffb3a8ffe75..5a126828d85e 100644
--- a/fs/hfsplus/hfsplus_raw.h
+++ b/fs/hfsplus/hfsplus_raw.h
@@ -261,7 +261,7 @@ struct hfsplus_cat_folder {
261 struct DInfo user_info; 261 struct DInfo user_info;
262 struct DXInfo finder_info; 262 struct DXInfo finder_info;
263 __be32 text_encoding; 263 __be32 text_encoding;
264 u32 reserved; 264 __be32 subfolders; /* Subfolder count in HFSX. Reserved in HFS+. */
265} __packed; 265} __packed;
266 266
267/* HFS file info (stolen from hfs.h) */ 267/* HFS file info (stolen from hfs.h) */
@@ -301,11 +301,13 @@ struct hfsplus_cat_file {
301 struct hfsplus_fork_raw rsrc_fork; 301 struct hfsplus_fork_raw rsrc_fork;
302} __packed; 302} __packed;
303 303
304/* File attribute bits */ 304/* File and folder flag bits */
305#define HFSPLUS_FILE_LOCKED 0x0001 305#define HFSPLUS_FILE_LOCKED 0x0001
306#define HFSPLUS_FILE_THREAD_EXISTS 0x0002 306#define HFSPLUS_FILE_THREAD_EXISTS 0x0002
307#define HFSPLUS_XATTR_EXISTS 0x0004 307#define HFSPLUS_XATTR_EXISTS 0x0004
308#define HFSPLUS_ACL_EXISTS 0x0008 308#define HFSPLUS_ACL_EXISTS 0x0008
309#define HFSPLUS_HAS_FOLDER_COUNT 0x0010 /* Folder has subfolder count
310 * (HFSX only) */
309 311
310/* HFS+ catalog thread (part of a cat_entry) */ 312/* HFS+ catalog thread (part of a cat_entry) */
311struct hfsplus_cat_thread { 313struct hfsplus_cat_thread {
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index fa929f325f87..a4f45bd88a63 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -375,6 +375,7 @@ struct inode *hfsplus_new_inode(struct super_block *sb, umode_t mode)
375 hip->extent_state = 0; 375 hip->extent_state = 0;
376 hip->flags = 0; 376 hip->flags = 0;
377 hip->userflags = 0; 377 hip->userflags = 0;
378 hip->subfolders = 0;
378 memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec)); 379 memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec));
379 memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec)); 380 memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
380 hip->alloc_blocks = 0; 381 hip->alloc_blocks = 0;
@@ -494,6 +495,10 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
494 inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date); 495 inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
495 HFSPLUS_I(inode)->create_date = folder->create_date; 496 HFSPLUS_I(inode)->create_date = folder->create_date;
496 HFSPLUS_I(inode)->fs_blocks = 0; 497 HFSPLUS_I(inode)->fs_blocks = 0;
498 if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
499 HFSPLUS_I(inode)->subfolders =
500 be32_to_cpu(folder->subfolders);
501 }
497 inode->i_op = &hfsplus_dir_inode_operations; 502 inode->i_op = &hfsplus_dir_inode_operations;
498 inode->i_fop = &hfsplus_dir_operations; 503 inode->i_fop = &hfsplus_dir_operations;
499 } else if (type == HFSPLUS_FILE) { 504 } else if (type == HFSPLUS_FILE) {
@@ -566,6 +571,10 @@ int hfsplus_cat_write_inode(struct inode *inode)
566 folder->content_mod_date = hfsp_ut2mt(inode->i_mtime); 571 folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
567 folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime); 572 folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
568 folder->valence = cpu_to_be32(inode->i_size - 2); 573 folder->valence = cpu_to_be32(inode->i_size - 2);
574 if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
575 folder->subfolders =
576 cpu_to_be32(HFSPLUS_I(inode)->subfolders);
577 }
569 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset, 578 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
570 sizeof(struct hfsplus_cat_folder)); 579 sizeof(struct hfsplus_cat_folder));
571 } else if (HFSPLUS_IS_RSRC(inode)) { 580 } else if (HFSPLUS_IS_RSRC(inode)) {
diff --git a/fs/namei.c b/fs/namei.c
index 385f7817bfcc..2f730ef9b4b3 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1884,7 +1884,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
1884 1884
1885 nd->path = f.file->f_path; 1885 nd->path = f.file->f_path;
1886 if (flags & LOOKUP_RCU) { 1886 if (flags & LOOKUP_RCU) {
1887 if (f.need_put) 1887 if (f.flags & FDPUT_FPUT)
1888 *fp = f.file; 1888 *fp = f.file;
1889 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); 1889 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
1890 rcu_read_lock(); 1890 rcu_read_lock();
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index ef792f29f831..5d8ccecf5f5c 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -659,16 +659,19 @@ int nfs_async_inode_return_delegation(struct inode *inode,
659 659
660 rcu_read_lock(); 660 rcu_read_lock();
661 delegation = rcu_dereference(NFS_I(inode)->delegation); 661 delegation = rcu_dereference(NFS_I(inode)->delegation);
662 if (delegation == NULL)
663 goto out_enoent;
662 664
663 if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid)) { 665 if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid))
664 rcu_read_unlock(); 666 goto out_enoent;
665 return -ENOENT;
666 }
667 nfs_mark_return_delegation(server, delegation); 667 nfs_mark_return_delegation(server, delegation);
668 rcu_read_unlock(); 668 rcu_read_unlock();
669 669
670 nfs_delegation_run_state_manager(clp); 670 nfs_delegation_run_state_manager(clp);
671 return 0; 671 return 0;
672out_enoent:
673 rcu_read_unlock();
674 return -ENOENT;
672} 675}
673 676
674static struct inode * 677static struct inode *
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 12c8132ad408..b9a35c05b60f 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -324,8 +324,9 @@ static void filelayout_read_prepare(struct rpc_task *task, void *data)
324 &rdata->res.seq_res, 324 &rdata->res.seq_res,
325 task)) 325 task))
326 return; 326 return;
327 nfs4_set_rw_stateid(&rdata->args.stateid, rdata->args.context, 327 if (nfs4_set_rw_stateid(&rdata->args.stateid, rdata->args.context,
328 rdata->args.lock_context, FMODE_READ); 328 rdata->args.lock_context, FMODE_READ) == -EIO)
329 rpc_exit(task, -EIO); /* lost lock, terminate I/O */
329} 330}
330 331
331static void filelayout_read_call_done(struct rpc_task *task, void *data) 332static void filelayout_read_call_done(struct rpc_task *task, void *data)
@@ -435,8 +436,9 @@ static void filelayout_write_prepare(struct rpc_task *task, void *data)
435 &wdata->res.seq_res, 436 &wdata->res.seq_res,
436 task)) 437 task))
437 return; 438 return;
438 nfs4_set_rw_stateid(&wdata->args.stateid, wdata->args.context, 439 if (nfs4_set_rw_stateid(&wdata->args.stateid, wdata->args.context,
439 wdata->args.lock_context, FMODE_WRITE); 440 wdata->args.lock_context, FMODE_WRITE) == -EIO)
441 rpc_exit(task, -EIO); /* lost lock, terminate I/O */
440} 442}
441 443
442static void filelayout_write_call_done(struct rpc_task *task, void *data) 444static void filelayout_write_call_done(struct rpc_task *task, void *data)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 2da6a698b8f7..450bfedbe2f4 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2398,13 +2398,16 @@ static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2398 2398
2399 if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) { 2399 if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) {
2400 /* Use that stateid */ 2400 /* Use that stateid */
2401 } else if (truncate && state != NULL && nfs4_valid_open_stateid(state)) { 2401 } else if (truncate && state != NULL) {
2402 struct nfs_lockowner lockowner = { 2402 struct nfs_lockowner lockowner = {
2403 .l_owner = current->files, 2403 .l_owner = current->files,
2404 .l_pid = current->tgid, 2404 .l_pid = current->tgid,
2405 }; 2405 };
2406 nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE, 2406 if (!nfs4_valid_open_stateid(state))
2407 &lockowner); 2407 return -EBADF;
2408 if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
2409 &lockowner) == -EIO)
2410 return -EBADF;
2408 } else 2411 } else
2409 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 2412 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
2410 2413
@@ -4011,8 +4014,9 @@ static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
4011{ 4014{
4012 nfs4_stateid current_stateid; 4015 nfs4_stateid current_stateid;
4013 4016
4014 if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode)) 4017 /* If the current stateid represents a lost lock, then exit */
4015 return false; 4018 if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO)
4019 return true;
4016 return nfs4_stateid_match(stateid, &current_stateid); 4020 return nfs4_stateid_match(stateid, &current_stateid);
4017} 4021}
4018 4022
@@ -5828,8 +5832,7 @@ struct nfs_release_lockowner_data {
5828 struct nfs4_lock_state *lsp; 5832 struct nfs4_lock_state *lsp;
5829 struct nfs_server *server; 5833 struct nfs_server *server;
5830 struct nfs_release_lockowner_args args; 5834 struct nfs_release_lockowner_args args;
5831 struct nfs4_sequence_args seq_args; 5835 struct nfs_release_lockowner_res res;
5832 struct nfs4_sequence_res seq_res;
5833 unsigned long timestamp; 5836 unsigned long timestamp;
5834}; 5837};
5835 5838
@@ -5837,7 +5840,7 @@ static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata
5837{ 5840{
5838 struct nfs_release_lockowner_data *data = calldata; 5841 struct nfs_release_lockowner_data *data = calldata;
5839 nfs40_setup_sequence(data->server, 5842 nfs40_setup_sequence(data->server,
5840 &data->seq_args, &data->seq_res, task); 5843 &data->args.seq_args, &data->res.seq_res, task);
5841 data->timestamp = jiffies; 5844 data->timestamp = jiffies;
5842} 5845}
5843 5846
@@ -5846,7 +5849,7 @@ static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
5846 struct nfs_release_lockowner_data *data = calldata; 5849 struct nfs_release_lockowner_data *data = calldata;
5847 struct nfs_server *server = data->server; 5850 struct nfs_server *server = data->server;
5848 5851
5849 nfs40_sequence_done(task, &data->seq_res); 5852 nfs40_sequence_done(task, &data->res.seq_res);
5850 5853
5851 switch (task->tk_status) { 5854 switch (task->tk_status) {
5852 case 0: 5855 case 0:
@@ -5887,7 +5890,6 @@ static int nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_st
5887 data = kmalloc(sizeof(*data), GFP_NOFS); 5890 data = kmalloc(sizeof(*data), GFP_NOFS);
5888 if (!data) 5891 if (!data)
5889 return -ENOMEM; 5892 return -ENOMEM;
5890 nfs4_init_sequence(&data->seq_args, &data->seq_res, 0);
5891 data->lsp = lsp; 5893 data->lsp = lsp;
5892 data->server = server; 5894 data->server = server;
5893 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 5895 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
@@ -5895,6 +5897,8 @@ static int nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_st
5895 data->args.lock_owner.s_dev = server->s_dev; 5897 data->args.lock_owner.s_dev = server->s_dev;
5896 5898
5897 msg.rpc_argp = &data->args; 5899 msg.rpc_argp = &data->args;
5900 msg.rpc_resp = &data->res;
5901 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
5898 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 5902 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
5899 return 0; 5903 return 0;
5900} 5904}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index e1a47217c05e..0deb32105ccf 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -974,9 +974,6 @@ static int nfs4_copy_lock_stateid(nfs4_stateid *dst,
974 else if (lsp != NULL && test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) { 974 else if (lsp != NULL && test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) {
975 nfs4_stateid_copy(dst, &lsp->ls_stateid); 975 nfs4_stateid_copy(dst, &lsp->ls_stateid);
976 ret = 0; 976 ret = 0;
977 smp_rmb();
978 if (!list_empty(&lsp->ls_seqid.list))
979 ret = -EWOULDBLOCK;
980 } 977 }
981 spin_unlock(&state->state_lock); 978 spin_unlock(&state->state_lock);
982 nfs4_put_lock_state(lsp); 979 nfs4_put_lock_state(lsp);
@@ -984,10 +981,9 @@ out:
984 return ret; 981 return ret;
985} 982}
986 983
987static int nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state) 984static void nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
988{ 985{
989 const nfs4_stateid *src; 986 const nfs4_stateid *src;
990 int ret;
991 int seq; 987 int seq;
992 988
993 do { 989 do {
@@ -996,12 +992,7 @@ static int nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
996 if (test_bit(NFS_OPEN_STATE, &state->flags)) 992 if (test_bit(NFS_OPEN_STATE, &state->flags))
997 src = &state->open_stateid; 993 src = &state->open_stateid;
998 nfs4_stateid_copy(dst, src); 994 nfs4_stateid_copy(dst, src);
999 ret = 0;
1000 smp_rmb();
1001 if (!list_empty(&state->owner->so_seqid.list))
1002 ret = -EWOULDBLOCK;
1003 } while (read_seqretry(&state->seqlock, seq)); 995 } while (read_seqretry(&state->seqlock, seq));
1004 return ret;
1005} 996}
1006 997
1007/* 998/*
@@ -1026,7 +1017,8 @@ int nfs4_select_rw_stateid(nfs4_stateid *dst, struct nfs4_state *state,
1026 * choose to use. 1017 * choose to use.
1027 */ 1018 */
1028 goto out; 1019 goto out;
1029 ret = nfs4_copy_open_stateid(dst, state); 1020 nfs4_copy_open_stateid(dst, state);
1021 ret = 0;
1030out: 1022out:
1031 if (nfs_server_capable(state->inode, NFS_CAP_STATEID_NFSV41)) 1023 if (nfs_server_capable(state->inode, NFS_CAP_STATEID_NFSV41))
1032 dst->seqid = 0; 1024 dst->seqid = 0;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 8450262bcf2a..51632c40e896 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2393,8 +2393,8 @@ out_dio:
2393 2393
2394 if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || 2394 if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
2395 ((file->f_flags & O_DIRECT) && !direct_io)) { 2395 ((file->f_flags & O_DIRECT) && !direct_io)) {
2396 ret = filemap_fdatawrite_range(file->f_mapping, pos, 2396 ret = filemap_fdatawrite_range(file->f_mapping, *ppos,
2397 pos + count - 1); 2397 *ppos + count - 1);
2398 if (ret < 0) 2398 if (ret < 0)
2399 written = ret; 2399 written = ret;
2400 2400
@@ -2407,8 +2407,8 @@ out_dio:
2407 } 2407 }
2408 2408
2409 if (!ret) 2409 if (!ret)
2410 ret = filemap_fdatawait_range(file->f_mapping, pos, 2410 ret = filemap_fdatawait_range(file->f_mapping, *ppos,
2411 pos + count - 1); 2411 *ppos + count - 1);
2412 } 2412 }
2413 2413
2414 /* 2414 /*
diff --git a/fs/open.c b/fs/open.c
index 4b3e1edf2fe4..b9ed8b25c108 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -705,6 +705,10 @@ static int do_dentry_open(struct file *f,
705 return 0; 705 return 0;
706 } 706 }
707 707
708 /* POSIX.1-2008/SUSv4 Section XSI 2.9.7 */
709 if (S_ISREG(inode->i_mode))
710 f->f_mode |= FMODE_ATOMIC_POS;
711
708 f->f_op = fops_get(inode->i_fop); 712 f->f_op = fops_get(inode->i_fop);
709 if (unlikely(WARN_ON(!f->f_op))) { 713 if (unlikely(WARN_ON(!f->f_op))) {
710 error = -ENODEV; 714 error = -ENODEV;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 51507065263b..b9760628e1fd 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1824,6 +1824,7 @@ static int proc_map_files_get_link(struct dentry *dentry, struct path *path)
1824 if (rc) 1824 if (rc)
1825 goto out_mmput; 1825 goto out_mmput;
1826 1826
1827 rc = -ENOENT;
1827 down_read(&mm->mmap_sem); 1828 down_read(&mm->mmap_sem);
1828 vma = find_exact_vma(mm, vm_start, vm_end); 1829 vma = find_exact_vma(mm, vm_start, vm_end);
1829 if (vma && vma->vm_file) { 1830 if (vma && vma->vm_file) {
diff --git a/fs/read_write.c b/fs/read_write.c
index edc5746a902a..54e19b9392dc 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -264,10 +264,22 @@ loff_t vfs_llseek(struct file *file, loff_t offset, int whence)
264} 264}
265EXPORT_SYMBOL(vfs_llseek); 265EXPORT_SYMBOL(vfs_llseek);
266 266
267static inline struct fd fdget_pos(int fd)
268{
269 return __to_fd(__fdget_pos(fd));
270}
271
272static inline void fdput_pos(struct fd f)
273{
274 if (f.flags & FDPUT_POS_UNLOCK)
275 mutex_unlock(&f.file->f_pos_lock);
276 fdput(f);
277}
278
267SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence) 279SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence)
268{ 280{
269 off_t retval; 281 off_t retval;
270 struct fd f = fdget(fd); 282 struct fd f = fdget_pos(fd);
271 if (!f.file) 283 if (!f.file)
272 return -EBADF; 284 return -EBADF;
273 285
@@ -278,7 +290,7 @@ SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence)
278 if (res != (loff_t)retval) 290 if (res != (loff_t)retval)
279 retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */ 291 retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */
280 } 292 }
281 fdput(f); 293 fdput_pos(f);
282 return retval; 294 return retval;
283} 295}
284 296
@@ -498,7 +510,7 @@ static inline void file_pos_write(struct file *file, loff_t pos)
498 510
499SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count) 511SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
500{ 512{
501 struct fd f = fdget(fd); 513 struct fd f = fdget_pos(fd);
502 ssize_t ret = -EBADF; 514 ssize_t ret = -EBADF;
503 515
504 if (f.file) { 516 if (f.file) {
@@ -506,7 +518,7 @@ SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
506 ret = vfs_read(f.file, buf, count, &pos); 518 ret = vfs_read(f.file, buf, count, &pos);
507 if (ret >= 0) 519 if (ret >= 0)
508 file_pos_write(f.file, pos); 520 file_pos_write(f.file, pos);
509 fdput(f); 521 fdput_pos(f);
510 } 522 }
511 return ret; 523 return ret;
512} 524}
@@ -514,7 +526,7 @@ SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
514SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf, 526SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
515 size_t, count) 527 size_t, count)
516{ 528{
517 struct fd f = fdget(fd); 529 struct fd f = fdget_pos(fd);
518 ssize_t ret = -EBADF; 530 ssize_t ret = -EBADF;
519 531
520 if (f.file) { 532 if (f.file) {
@@ -522,7 +534,7 @@ SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
522 ret = vfs_write(f.file, buf, count, &pos); 534 ret = vfs_write(f.file, buf, count, &pos);
523 if (ret >= 0) 535 if (ret >= 0)
524 file_pos_write(f.file, pos); 536 file_pos_write(f.file, pos);
525 fdput(f); 537 fdput_pos(f);
526 } 538 }
527 539
528 return ret; 540 return ret;
@@ -797,7 +809,7 @@ EXPORT_SYMBOL(vfs_writev);
797SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec, 809SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
798 unsigned long, vlen) 810 unsigned long, vlen)
799{ 811{
800 struct fd f = fdget(fd); 812 struct fd f = fdget_pos(fd);
801 ssize_t ret = -EBADF; 813 ssize_t ret = -EBADF;
802 814
803 if (f.file) { 815 if (f.file) {
@@ -805,7 +817,7 @@ SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
805 ret = vfs_readv(f.file, vec, vlen, &pos); 817 ret = vfs_readv(f.file, vec, vlen, &pos);
806 if (ret >= 0) 818 if (ret >= 0)
807 file_pos_write(f.file, pos); 819 file_pos_write(f.file, pos);
808 fdput(f); 820 fdput_pos(f);
809 } 821 }
810 822
811 if (ret > 0) 823 if (ret > 0)
@@ -817,7 +829,7 @@ SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
817SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec, 829SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
818 unsigned long, vlen) 830 unsigned long, vlen)
819{ 831{
820 struct fd f = fdget(fd); 832 struct fd f = fdget_pos(fd);
821 ssize_t ret = -EBADF; 833 ssize_t ret = -EBADF;
822 834
823 if (f.file) { 835 if (f.file) {
@@ -825,7 +837,7 @@ SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
825 ret = vfs_writev(f.file, vec, vlen, &pos); 837 ret = vfs_writev(f.file, vec, vlen, &pos);
826 if (ret >= 0) 838 if (ret >= 0)
827 file_pos_write(f.file, pos); 839 file_pos_write(f.file, pos);
828 fdput(f); 840 fdput_pos(f);
829 } 841 }
830 842
831 if (ret > 0) 843 if (ret > 0)
@@ -968,7 +980,7 @@ COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd,
968 const struct compat_iovec __user *,vec, 980 const struct compat_iovec __user *,vec,
969 compat_ulong_t, vlen) 981 compat_ulong_t, vlen)
970{ 982{
971 struct fd f = fdget(fd); 983 struct fd f = fdget_pos(fd);
972 ssize_t ret; 984 ssize_t ret;
973 loff_t pos; 985 loff_t pos;
974 986
@@ -978,7 +990,7 @@ COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd,
978 ret = compat_readv(f.file, vec, vlen, &pos); 990 ret = compat_readv(f.file, vec, vlen, &pos);
979 if (ret >= 0) 991 if (ret >= 0)
980 f.file->f_pos = pos; 992 f.file->f_pos = pos;
981 fdput(f); 993 fdput_pos(f);
982 return ret; 994 return ret;
983} 995}
984 996
@@ -1035,7 +1047,7 @@ COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd,
1035 const struct compat_iovec __user *, vec, 1047 const struct compat_iovec __user *, vec,
1036 compat_ulong_t, vlen) 1048 compat_ulong_t, vlen)
1037{ 1049{
1038 struct fd f = fdget(fd); 1050 struct fd f = fdget_pos(fd);
1039 ssize_t ret; 1051 ssize_t ret;
1040 loff_t pos; 1052 loff_t pos;
1041 1053
@@ -1045,7 +1057,7 @@ COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd,
1045 ret = compat_writev(f.file, vec, vlen, &pos); 1057 ret = compat_writev(f.file, vec, vlen, &pos);
1046 if (ret >= 0) 1058 if (ret >= 0)
1047 f.file->f_pos = pos; 1059 f.file->f_pos = pos;
1048 fdput(f); 1060 fdput_pos(f);
1049 return ret; 1061 return ret;
1050} 1062}
1051 1063
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index be85127bfed3..f27000f55a83 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -171,6 +171,11 @@ static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 add
171 return 0; 171 return 0;
172} 172}
173 173
174static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
175{
176 return -ENXIO;
177}
178
174static inline int kvm_vgic_init(struct kvm *kvm) 179static inline int kvm_vgic_init(struct kvm *kvm)
175{ 180{
176 return 0; 181 return 0;
diff --git a/include/linux/audit.h b/include/linux/audit.h
index aa865a9a4c4f..ec1464df4c60 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -43,6 +43,7 @@ struct mq_attr;
43struct mqstat; 43struct mqstat;
44struct audit_watch; 44struct audit_watch;
45struct audit_tree; 45struct audit_tree;
46struct sk_buff;
46 47
47struct audit_krule { 48struct audit_krule {
48 int vers_ops; 49 int vers_ops;
@@ -463,7 +464,7 @@ extern int audit_filter_user(int type);
463extern int audit_filter_type(int type); 464extern int audit_filter_type(int type);
464extern int audit_rule_change(int type, __u32 portid, int seq, 465extern int audit_rule_change(int type, __u32 portid, int seq,
465 void *data, size_t datasz); 466 void *data, size_t datasz);
466extern int audit_list_rules_send(__u32 portid, int seq); 467extern int audit_list_rules_send(struct sk_buff *request_skb, int seq);
467 468
468extern u32 audit_enabled; 469extern u32 audit_enabled;
469#else /* CONFIG_AUDIT */ 470#else /* CONFIG_AUDIT */
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 092b64168d7f..4a21a872dbbd 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -245,6 +245,10 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
245void omap2_init_clk_clkdm(struct clk_hw *clk); 245void omap2_init_clk_clkdm(struct clk_hw *clk);
246unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, 246unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
247 unsigned long parent_rate); 247 unsigned long parent_rate);
248int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate,
249 unsigned long parent_rate);
250long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate,
251 unsigned long *prate);
248int omap2_clkops_enable_clkdm(struct clk_hw *hw); 252int omap2_clkops_enable_clkdm(struct clk_hw *hw);
249void omap2_clkops_disable_clkdm(struct clk_hw *hw); 253void omap2_clkops_disable_clkdm(struct clk_hw *hw);
250int omap2_clk_disable_autoidle_all(void); 254int omap2_clk_disable_autoidle_all(void);
diff --git a/include/linux/file.h b/include/linux/file.h
index cbacf4faf447..4d69123377a2 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -28,33 +28,36 @@ static inline void fput_light(struct file *file, int fput_needed)
28 28
29struct fd { 29struct fd {
30 struct file *file; 30 struct file *file;
31 int need_put; 31 unsigned int flags;
32}; 32};
33#define FDPUT_FPUT 1
34#define FDPUT_POS_UNLOCK 2
33 35
34static inline void fdput(struct fd fd) 36static inline void fdput(struct fd fd)
35{ 37{
36 if (fd.need_put) 38 if (fd.flags & FDPUT_FPUT)
37 fput(fd.file); 39 fput(fd.file);
38} 40}
39 41
40extern struct file *fget(unsigned int fd); 42extern struct file *fget(unsigned int fd);
41extern struct file *fget_light(unsigned int fd, int *fput_needed); 43extern struct file *fget_raw(unsigned int fd);
44extern unsigned long __fdget(unsigned int fd);
45extern unsigned long __fdget_raw(unsigned int fd);
46extern unsigned long __fdget_pos(unsigned int fd);
42 47
43static inline struct fd fdget(unsigned int fd) 48static inline struct fd __to_fd(unsigned long v)
44{ 49{
45 int b; 50 return (struct fd){(struct file *)(v & ~3),v & 3};
46 struct file *f = fget_light(fd, &b);
47 return (struct fd){f,b};
48} 51}
49 52
50extern struct file *fget_raw(unsigned int fd); 53static inline struct fd fdget(unsigned int fd)
51extern struct file *fget_raw_light(unsigned int fd, int *fput_needed); 54{
55 return __to_fd(__fdget(fd));
56}
52 57
53static inline struct fd fdget_raw(unsigned int fd) 58static inline struct fd fdget_raw(unsigned int fd)
54{ 59{
55 int b; 60 return __to_fd(__fdget_raw(fd));
56 struct file *f = fget_raw_light(fd, &b);
57 return (struct fd){f,b};
58} 61}
59 62
60extern int f_dupfd(unsigned int from, struct file *file, unsigned flags); 63extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 5d7782e42b8f..c3683bdf28fe 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -200,6 +200,7 @@ struct fw_device {
200 unsigned irmc:1; 200 unsigned irmc:1;
201 unsigned bc_implemented:2; 201 unsigned bc_implemented:2;
202 202
203 work_func_t workfn;
203 struct delayed_work work; 204 struct delayed_work work;
204 struct fw_attribute_group attribute_group; 205 struct fw_attribute_group attribute_group;
205}; 206};
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 60829565e552..23b2a35d712e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -123,6 +123,9 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
123/* File is opened with O_PATH; almost nothing can be done with it */ 123/* File is opened with O_PATH; almost nothing can be done with it */
124#define FMODE_PATH ((__force fmode_t)0x4000) 124#define FMODE_PATH ((__force fmode_t)0x4000)
125 125
126/* File needs atomic accesses to f_pos */
127#define FMODE_ATOMIC_POS ((__force fmode_t)0x8000)
128
126/* File was opened by fanotify and shouldn't generate fanotify events */ 129/* File was opened by fanotify and shouldn't generate fanotify events */
127#define FMODE_NONOTIFY ((__force fmode_t)0x1000000) 130#define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
128 131
@@ -780,13 +783,14 @@ struct file {
780 const struct file_operations *f_op; 783 const struct file_operations *f_op;
781 784
782 /* 785 /*
783 * Protects f_ep_links, f_flags, f_pos vs i_size in lseek SEEK_CUR. 786 * Protects f_ep_links, f_flags.
784 * Must not be taken from IRQ context. 787 * Must not be taken from IRQ context.
785 */ 788 */
786 spinlock_t f_lock; 789 spinlock_t f_lock;
787 atomic_long_t f_count; 790 atomic_long_t f_count;
788 unsigned int f_flags; 791 unsigned int f_flags;
789 fmode_t f_mode; 792 fmode_t f_mode;
793 struct mutex f_pos_lock;
790 loff_t f_pos; 794 loff_t f_pos;
791 struct fown_struct f_owner; 795 struct fown_struct f_owner;
792 const struct cred *f_cred; 796 const struct cred *f_cred;
@@ -808,7 +812,7 @@ struct file {
808#ifdef CONFIG_DEBUG_WRITECOUNT 812#ifdef CONFIG_DEBUG_WRITECOUNT
809 unsigned long f_mnt_write_state; 813 unsigned long f_mnt_write_state;
810#endif 814#endif
811}; 815} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
812 816
813struct file_handle { 817struct file_handle {
814 __u32 handle_bytes; 818 __u32 handle_bytes;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 0437439bc047..39b81dc7d01a 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -123,6 +123,10 @@ struct vm_area_struct;
123 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ 123 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
124 __GFP_NO_KSWAPD) 124 __GFP_NO_KSWAPD)
125 125
126/*
127 * GFP_THISNODE does not perform any reclaim, you most likely want to
128 * use __GFP_THISNODE to allocate from a given node without fallback!
129 */
126#ifdef CONFIG_NUMA 130#ifdef CONFIG_NUMA
127#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) 131#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
128#else 132#else
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 5f2052c83154..9b61b9bf81ac 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -590,10 +590,10 @@ static inline bool zone_is_empty(struct zone *zone)
590 590
591/* 591/*
592 * The NUMA zonelists are doubled because we need zonelists that restrict the 592 * The NUMA zonelists are doubled because we need zonelists that restrict the
593 * allocations to a single node for GFP_THISNODE. 593 * allocations to a single node for __GFP_THISNODE.
594 * 594 *
595 * [0] : Zonelist with fallback 595 * [0] : Zonelist with fallback
596 * [1] : No fallback (GFP_THISNODE) 596 * [1] : No fallback (__GFP_THISNODE)
597 */ 597 */
598#define MAX_ZONELISTS 2 598#define MAX_ZONELISTS 2
599 599
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index b2fb167b2e6d..5624e4e2763c 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -467,9 +467,14 @@ struct nfs_lockt_res {
467}; 467};
468 468
469struct nfs_release_lockowner_args { 469struct nfs_release_lockowner_args {
470 struct nfs4_sequence_args seq_args;
470 struct nfs_lowner lock_owner; 471 struct nfs_lowner lock_owner;
471}; 472};
472 473
474struct nfs_release_lockowner_res {
475 struct nfs4_sequence_res seq_res;
476};
477
473struct nfs4_delegreturnargs { 478struct nfs4_delegreturnargs {
474 struct nfs4_sequence_args seq_args; 479 struct nfs4_sequence_args seq_args;
475 const struct nfs_fh *fhandle; 480 const struct nfs_fh *fhandle;
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 9260abdd67df..b5b2df60299e 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -410,7 +410,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
410 * 410 *
411 * %GFP_NOWAIT - Allocation will not sleep. 411 * %GFP_NOWAIT - Allocation will not sleep.
412 * 412 *
413 * %GFP_THISNODE - Allocate node-local memory only. 413 * %__GFP_THISNODE - Allocate node-local memory only.
414 * 414 *
415 * %GFP_DMA - Allocation suitable for DMA. 415 * %GFP_DMA - Allocation suitable for DMA.
416 * Should only be used for kmalloc() caches. Otherwise, use a 416 * Should only be used for kmalloc() caches. Otherwise, use a
diff --git a/include/net/sock.h b/include/net/sock.h
index 5c3f7c3624aa..b9586a137cad 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1488,6 +1488,11 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
1488 */ 1488 */
1489#define sock_owned_by_user(sk) ((sk)->sk_lock.owned) 1489#define sock_owned_by_user(sk) ((sk)->sk_lock.owned)
1490 1490
1491static inline void sock_release_ownership(struct sock *sk)
1492{
1493 sk->sk_lock.owned = 0;
1494}
1495
1491/* 1496/*
1492 * Macro so as to not evaluate some arguments when 1497 * Macro so as to not evaluate some arguments when
1493 * lockdep is not enabled. 1498 * lockdep is not enabled.
@@ -2186,7 +2191,6 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2186{ 2191{
2187#define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \ 2192#define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \
2188 (1UL << SOCK_RCVTSTAMP) | \ 2193 (1UL << SOCK_RCVTSTAMP) | \
2189 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
2190 (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \ 2194 (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \
2191 (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \ 2195 (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \
2192 (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE)) 2196 (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE))
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index ae5a17111968..4483fadfa68d 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -12,6 +12,7 @@ struct iscsit_transport {
12 int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *); 12 int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *);
13 int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *); 13 int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *);
14 void (*iscsit_free_np)(struct iscsi_np *); 14 void (*iscsit_free_np)(struct iscsi_np *);
15 void (*iscsit_wait_conn)(struct iscsi_conn *);
15 void (*iscsit_free_conn)(struct iscsi_conn *); 16 void (*iscsit_free_conn)(struct iscsi_conn *);
16 int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *); 17 int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *);
17 int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32); 18 int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32);
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index ddc179b7a105..1fef3e6e9436 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -83,7 +83,7 @@ DECLARE_EVENT_CLASS(rpc_task_running,
83 ), 83 ),
84 84
85 TP_fast_assign( 85 TP_fast_assign(
86 __entry->client_id = clnt->cl_clid; 86 __entry->client_id = clnt ? clnt->cl_clid : -1;
87 __entry->task_id = task->tk_pid; 87 __entry->task_id = task->tk_pid;
88 __entry->action = action; 88 __entry->action = action;
89 __entry->runstate = task->tk_runstate; 89 __entry->runstate = task->tk_runstate;
@@ -91,7 +91,7 @@ DECLARE_EVENT_CLASS(rpc_task_running,
91 __entry->flags = task->tk_flags; 91 __entry->flags = task->tk_flags;
92 ), 92 ),
93 93
94 TP_printk("task:%u@%u flags=%4.4x state=%4.4lx status=%d action=%pf", 94 TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d action=%pf",
95 __entry->task_id, __entry->client_id, 95 __entry->task_id, __entry->client_id,
96 __entry->flags, 96 __entry->flags,
97 __entry->runstate, 97 __entry->runstate,
diff --git a/init/main.c b/init/main.c
index eb03090cdced..9c7fd4c9249f 100644
--- a/init/main.c
+++ b/init/main.c
@@ -561,7 +561,6 @@ asmlinkage void __init start_kernel(void)
561 init_timers(); 561 init_timers();
562 hrtimers_init(); 562 hrtimers_init();
563 softirq_init(); 563 softirq_init();
564 acpi_early_init();
565 timekeeping_init(); 564 timekeeping_init();
566 time_init(); 565 time_init();
567 sched_clock_postinit(); 566 sched_clock_postinit();
@@ -613,6 +612,7 @@ asmlinkage void __init start_kernel(void)
613 calibrate_delay(); 612 calibrate_delay();
614 pidmap_init(); 613 pidmap_init();
615 anon_vma_init(); 614 anon_vma_init();
615 acpi_early_init();
616#ifdef CONFIG_X86 616#ifdef CONFIG_X86
617 if (efi_enabled(EFI_RUNTIME_SERVICES)) 617 if (efi_enabled(EFI_RUNTIME_SERVICES))
618 efi_enter_virtual_mode(); 618 efi_enter_virtual_mode();
diff --git a/ipc/msg.c b/ipc/msg.c
index 245db1140ad6..649853105a5d 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -901,6 +901,8 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
901 return -EINVAL; 901 return -EINVAL;
902 902
903 if (msgflg & MSG_COPY) { 903 if (msgflg & MSG_COPY) {
904 if ((msgflg & MSG_EXCEPT) || !(msgflg & IPC_NOWAIT))
905 return -EINVAL;
904 copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax)); 906 copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax));
905 if (IS_ERR(copy)) 907 if (IS_ERR(copy))
906 return PTR_ERR(copy); 908 return PTR_ERR(copy);
diff --git a/kernel/audit.c b/kernel/audit.c
index 34c5a2310fbf..3392d3e0254a 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -182,7 +182,7 @@ struct audit_buffer {
182 182
183struct audit_reply { 183struct audit_reply {
184 __u32 portid; 184 __u32 portid;
185 pid_t pid; 185 struct net *net;
186 struct sk_buff *skb; 186 struct sk_buff *skb;
187}; 187};
188 188
@@ -500,7 +500,7 @@ int audit_send_list(void *_dest)
500{ 500{
501 struct audit_netlink_list *dest = _dest; 501 struct audit_netlink_list *dest = _dest;
502 struct sk_buff *skb; 502 struct sk_buff *skb;
503 struct net *net = get_net_ns_by_pid(dest->pid); 503 struct net *net = dest->net;
504 struct audit_net *aunet = net_generic(net, audit_net_id); 504 struct audit_net *aunet = net_generic(net, audit_net_id);
505 505
506 /* wait for parent to finish and send an ACK */ 506 /* wait for parent to finish and send an ACK */
@@ -510,6 +510,7 @@ int audit_send_list(void *_dest)
510 while ((skb = __skb_dequeue(&dest->q)) != NULL) 510 while ((skb = __skb_dequeue(&dest->q)) != NULL)
511 netlink_unicast(aunet->nlsk, skb, dest->portid, 0); 511 netlink_unicast(aunet->nlsk, skb, dest->portid, 0);
512 512
513 put_net(net);
513 kfree(dest); 514 kfree(dest);
514 515
515 return 0; 516 return 0;
@@ -543,7 +544,7 @@ out_kfree_skb:
543static int audit_send_reply_thread(void *arg) 544static int audit_send_reply_thread(void *arg)
544{ 545{
545 struct audit_reply *reply = (struct audit_reply *)arg; 546 struct audit_reply *reply = (struct audit_reply *)arg;
546 struct net *net = get_net_ns_by_pid(reply->pid); 547 struct net *net = reply->net;
547 struct audit_net *aunet = net_generic(net, audit_net_id); 548 struct audit_net *aunet = net_generic(net, audit_net_id);
548 549
549 mutex_lock(&audit_cmd_mutex); 550 mutex_lock(&audit_cmd_mutex);
@@ -552,12 +553,13 @@ static int audit_send_reply_thread(void *arg)
552 /* Ignore failure. It'll only happen if the sender goes away, 553 /* Ignore failure. It'll only happen if the sender goes away,
553 because our timeout is set to infinite. */ 554 because our timeout is set to infinite. */
554 netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0); 555 netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0);
556 put_net(net);
555 kfree(reply); 557 kfree(reply);
556 return 0; 558 return 0;
557} 559}
558/** 560/**
559 * audit_send_reply - send an audit reply message via netlink 561 * audit_send_reply - send an audit reply message via netlink
560 * @portid: netlink port to which to send reply 562 * @request_skb: skb of request we are replying to (used to target the reply)
561 * @seq: sequence number 563 * @seq: sequence number
562 * @type: audit message type 564 * @type: audit message type
563 * @done: done (last) flag 565 * @done: done (last) flag
@@ -568,9 +570,11 @@ static int audit_send_reply_thread(void *arg)
568 * Allocates an skb, builds the netlink message, and sends it to the port id. 570 * Allocates an skb, builds the netlink message, and sends it to the port id.
569 * No failure notifications. 571 * No failure notifications.
570 */ 572 */
571static void audit_send_reply(__u32 portid, int seq, int type, int done, 573static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int done,
572 int multi, const void *payload, int size) 574 int multi, const void *payload, int size)
573{ 575{
576 u32 portid = NETLINK_CB(request_skb).portid;
577 struct net *net = sock_net(NETLINK_CB(request_skb).sk);
574 struct sk_buff *skb; 578 struct sk_buff *skb;
575 struct task_struct *tsk; 579 struct task_struct *tsk;
576 struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), 580 struct audit_reply *reply = kmalloc(sizeof(struct audit_reply),
@@ -583,8 +587,8 @@ static void audit_send_reply(__u32 portid, int seq, int type, int done,
583 if (!skb) 587 if (!skb)
584 goto out; 588 goto out;
585 589
590 reply->net = get_net(net);
586 reply->portid = portid; 591 reply->portid = portid;
587 reply->pid = task_pid_vnr(current);
588 reply->skb = skb; 592 reply->skb = skb;
589 593
590 tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); 594 tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply");
@@ -673,8 +677,7 @@ static int audit_get_feature(struct sk_buff *skb)
673 677
674 seq = nlmsg_hdr(skb)->nlmsg_seq; 678 seq = nlmsg_hdr(skb)->nlmsg_seq;
675 679
676 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0, 680 audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &af, sizeof(af));
677 &af, sizeof(af));
678 681
679 return 0; 682 return 0;
680} 683}
@@ -794,8 +797,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
794 s.backlog = skb_queue_len(&audit_skb_queue); 797 s.backlog = skb_queue_len(&audit_skb_queue);
795 s.version = AUDIT_VERSION_LATEST; 798 s.version = AUDIT_VERSION_LATEST;
796 s.backlog_wait_time = audit_backlog_wait_time; 799 s.backlog_wait_time = audit_backlog_wait_time;
797 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0, 800 audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s));
798 &s, sizeof(s));
799 break; 801 break;
800 } 802 }
801 case AUDIT_SET: { 803 case AUDIT_SET: {
@@ -905,7 +907,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
905 seq, data, nlmsg_len(nlh)); 907 seq, data, nlmsg_len(nlh));
906 break; 908 break;
907 case AUDIT_LIST_RULES: 909 case AUDIT_LIST_RULES:
908 err = audit_list_rules_send(NETLINK_CB(skb).portid, seq); 910 err = audit_list_rules_send(skb, seq);
909 break; 911 break;
910 case AUDIT_TRIM: 912 case AUDIT_TRIM:
911 audit_trim_trees(); 913 audit_trim_trees();
@@ -970,8 +972,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
970 memcpy(sig_data->ctx, ctx, len); 972 memcpy(sig_data->ctx, ctx, len);
971 security_release_secctx(ctx, len); 973 security_release_secctx(ctx, len);
972 } 974 }
973 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_SIGNAL_INFO, 975 audit_send_reply(skb, seq, AUDIT_SIGNAL_INFO, 0, 0,
974 0, 0, sig_data, sizeof(*sig_data) + len); 976 sig_data, sizeof(*sig_data) + len);
975 kfree(sig_data); 977 kfree(sig_data);
976 break; 978 break;
977 case AUDIT_TTY_GET: { 979 case AUDIT_TTY_GET: {
@@ -983,8 +985,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
983 s.log_passwd = tsk->signal->audit_tty_log_passwd; 985 s.log_passwd = tsk->signal->audit_tty_log_passwd;
984 spin_unlock(&tsk->sighand->siglock); 986 spin_unlock(&tsk->sighand->siglock);
985 987
986 audit_send_reply(NETLINK_CB(skb).portid, seq, 988 audit_send_reply(skb, seq, AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
987 AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
988 break; 989 break;
989 } 990 }
990 case AUDIT_TTY_SET: { 991 case AUDIT_TTY_SET: {
diff --git a/kernel/audit.h b/kernel/audit.h
index 57cc64d67718..8df132214606 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -247,7 +247,7 @@ extern void audit_panic(const char *message);
247 247
248struct audit_netlink_list { 248struct audit_netlink_list {
249 __u32 portid; 249 __u32 portid;
250 pid_t pid; 250 struct net *net;
251 struct sk_buff_head q; 251 struct sk_buff_head q;
252}; 252};
253 253
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 14a78cca384e..92062fd6cc8c 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -29,6 +29,8 @@
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/security.h> 31#include <linux/security.h>
32#include <net/net_namespace.h>
33#include <net/sock.h>
32#include "audit.h" 34#include "audit.h"
33 35
34/* 36/*
@@ -1065,11 +1067,13 @@ int audit_rule_change(int type, __u32 portid, int seq, void *data,
1065 1067
1066/** 1068/**
1067 * audit_list_rules_send - list the audit rules 1069 * audit_list_rules_send - list the audit rules
1068 * @portid: target portid for netlink audit messages 1070 * @request_skb: skb of request we are replying to (used to target the reply)
1069 * @seq: netlink audit message sequence (serial) number 1071 * @seq: netlink audit message sequence (serial) number
1070 */ 1072 */
1071int audit_list_rules_send(__u32 portid, int seq) 1073int audit_list_rules_send(struct sk_buff *request_skb, int seq)
1072{ 1074{
1075 u32 portid = NETLINK_CB(request_skb).portid;
1076 struct net *net = sock_net(NETLINK_CB(request_skb).sk);
1073 struct task_struct *tsk; 1077 struct task_struct *tsk;
1074 struct audit_netlink_list *dest; 1078 struct audit_netlink_list *dest;
1075 int err = 0; 1079 int err = 0;
@@ -1083,8 +1087,8 @@ int audit_list_rules_send(__u32 portid, int seq)
1083 dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); 1087 dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL);
1084 if (!dest) 1088 if (!dest)
1085 return -ENOMEM; 1089 return -ENOMEM;
1090 dest->net = get_net(net);
1086 dest->portid = portid; 1091 dest->portid = portid;
1087 dest->pid = task_pid_vnr(current);
1088 skb_queue_head_init(&dest->q); 1092 skb_queue_head_init(&dest->q);
1089 1093
1090 mutex_lock(&audit_filter_mutex); 1094 mutex_lock(&audit_filter_mutex);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 4410ac6a55f1..e6b1b66afe52 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -974,12 +974,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
974 * Temporarilly set tasks mems_allowed to target nodes of migration, 974 * Temporarilly set tasks mems_allowed to target nodes of migration,
975 * so that the migration code can allocate pages on these nodes. 975 * so that the migration code can allocate pages on these nodes.
976 * 976 *
977 * Call holding cpuset_mutex, so current's cpuset won't change
978 * during this call, as manage_mutex holds off any cpuset_attach()
979 * calls. Therefore we don't need to take task_lock around the
980 * call to guarantee_online_mems(), as we know no one is changing
981 * our task's cpuset.
982 *
983 * While the mm_struct we are migrating is typically from some 977 * While the mm_struct we are migrating is typically from some
984 * other task, the task_struct mems_allowed that we are hacking 978 * other task, the task_struct mems_allowed that we are hacking
985 * is for our current task, which must allocate new pages for that 979 * is for our current task, which must allocate new pages for that
@@ -996,8 +990,10 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
996 990
997 do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); 991 do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
998 992
993 rcu_read_lock();
999 mems_cs = effective_nodemask_cpuset(task_cs(tsk)); 994 mems_cs = effective_nodemask_cpuset(task_cs(tsk));
1000 guarantee_online_mems(mems_cs, &tsk->mems_allowed); 995 guarantee_online_mems(mems_cs, &tsk->mems_allowed);
996 rcu_read_unlock();
1001} 997}
1002 998
1003/* 999/*
@@ -2486,9 +2482,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
2486 2482
2487 task_lock(current); 2483 task_lock(current);
2488 cs = nearest_hardwall_ancestor(task_cs(current)); 2484 cs = nearest_hardwall_ancestor(task_cs(current));
2485 allowed = node_isset(node, cs->mems_allowed);
2489 task_unlock(current); 2486 task_unlock(current);
2490 2487
2491 allowed = node_isset(node, cs->mems_allowed);
2492 mutex_unlock(&callback_mutex); 2488 mutex_unlock(&callback_mutex);
2493 return allowed; 2489 return allowed;
2494} 2490}
diff --git a/kernel/profile.c b/kernel/profile.c
index 6631e1ef55ab..ebdd9c1a86b4 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -549,14 +549,14 @@ static int create_hash_tables(void)
549 struct page *page; 549 struct page *page;
550 550
551 page = alloc_pages_exact_node(node, 551 page = alloc_pages_exact_node(node,
552 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 552 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
553 0); 553 0);
554 if (!page) 554 if (!page)
555 goto out_cleanup; 555 goto out_cleanup;
556 per_cpu(cpu_profile_hits, cpu)[1] 556 per_cpu(cpu_profile_hits, cpu)[1]
557 = (struct profile_hit *)page_address(page); 557 = (struct profile_hit *)page_address(page);
558 page = alloc_pages_exact_node(node, 558 page = alloc_pages_exact_node(node,
559 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 559 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
560 0); 560 0);
561 if (!page) 561 if (!page)
562 goto out_cleanup; 562 goto out_cleanup;
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index 43c2bcc35761..b30a2924ef14 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -301,14 +301,14 @@ u64 sched_clock_cpu(int cpu)
301 if (unlikely(!sched_clock_running)) 301 if (unlikely(!sched_clock_running))
302 return 0ull; 302 return 0ull;
303 303
304 preempt_disable(); 304 preempt_disable_notrace();
305 scd = cpu_sdc(cpu); 305 scd = cpu_sdc(cpu);
306 306
307 if (cpu != smp_processor_id()) 307 if (cpu != smp_processor_id())
308 clock = sched_clock_remote(scd); 308 clock = sched_clock_remote(scd);
309 else 309 else
310 clock = sched_clock_local(scd); 310 clock = sched_clock_local(scd);
311 preempt_enable(); 311 preempt_enable_notrace();
312 312
313 return clock; 313 return clock;
314} 314}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6edbef296ece..f5c6635b806c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3338,6 +3338,15 @@ recheck:
3338 return -EPERM; 3338 return -EPERM;
3339 } 3339 }
3340 3340
3341 /*
3342 * Can't set/change SCHED_DEADLINE policy at all for now
3343 * (safest behavior); in the future we would like to allow
3344 * unprivileged DL tasks to increase their relative deadline
3345 * or reduce their runtime (both ways reducing utilization)
3346 */
3347 if (dl_policy(policy))
3348 return -EPERM;
3349
3341 /* 3350 /*
3342 * Treat SCHED_IDLE as nice 20. Only allow a switch to 3351 * Treat SCHED_IDLE as nice 20. Only allow a switch to
3343 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 3352 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 84571e09c907..01fbae5b97b7 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -293,7 +293,7 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
293 */ 293 */
294 smp_call_function_single(min(cpu1, cpu2), 294 smp_call_function_single(min(cpu1, cpu2),
295 &irq_cpu_stop_queue_work, 295 &irq_cpu_stop_queue_work,
296 &call_args, 0); 296 &call_args, 1);
297 lg_local_unlock(&stop_cpus_lock); 297 lg_local_unlock(&stop_cpus_lock);
298 preempt_enable(); 298 preempt_enable();
299 299
diff --git a/mm/Kconfig b/mm/Kconfig
index 2d9f1504d75e..2888024e0b0a 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -575,5 +575,5 @@ config PGTABLE_MAPPING
575 then you should select this. This causes zsmalloc to use page table 575 then you should select this. This causes zsmalloc to use page table
576 mapping rather than copying for object mapping. 576 mapping rather than copying for object mapping.
577 577
578 You can check speed with zsmalloc benchmark[1]. 578 You can check speed with zsmalloc benchmark:
579 [1] https://github.com/spartacus06/zsmalloc 579 https://github.com/spartacus06/zsmapbench
diff --git a/mm/compaction.c b/mm/compaction.c
index b48c5259ea33..918577595ea8 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -251,7 +251,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
251{ 251{
252 int nr_scanned = 0, total_isolated = 0; 252 int nr_scanned = 0, total_isolated = 0;
253 struct page *cursor, *valid_page = NULL; 253 struct page *cursor, *valid_page = NULL;
254 unsigned long nr_strict_required = end_pfn - blockpfn;
255 unsigned long flags; 254 unsigned long flags;
256 bool locked = false; 255 bool locked = false;
257 256
@@ -264,11 +263,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
264 263
265 nr_scanned++; 264 nr_scanned++;
266 if (!pfn_valid_within(blockpfn)) 265 if (!pfn_valid_within(blockpfn))
267 continue; 266 goto isolate_fail;
267
268 if (!valid_page) 268 if (!valid_page)
269 valid_page = page; 269 valid_page = page;
270 if (!PageBuddy(page)) 270 if (!PageBuddy(page))
271 continue; 271 goto isolate_fail;
272 272
273 /* 273 /*
274 * The zone lock must be held to isolate freepages. 274 * The zone lock must be held to isolate freepages.
@@ -289,12 +289,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
289 289
290 /* Recheck this is a buddy page under lock */ 290 /* Recheck this is a buddy page under lock */
291 if (!PageBuddy(page)) 291 if (!PageBuddy(page))
292 continue; 292 goto isolate_fail;
293 293
294 /* Found a free page, break it into order-0 pages */ 294 /* Found a free page, break it into order-0 pages */
295 isolated = split_free_page(page); 295 isolated = split_free_page(page);
296 if (!isolated && strict)
297 break;
298 total_isolated += isolated; 296 total_isolated += isolated;
299 for (i = 0; i < isolated; i++) { 297 for (i = 0; i < isolated; i++) {
300 list_add(&page->lru, freelist); 298 list_add(&page->lru, freelist);
@@ -305,7 +303,15 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
305 if (isolated) { 303 if (isolated) {
306 blockpfn += isolated - 1; 304 blockpfn += isolated - 1;
307 cursor += isolated - 1; 305 cursor += isolated - 1;
306 continue;
308 } 307 }
308
309isolate_fail:
310 if (strict)
311 break;
312 else
313 continue;
314
309 } 315 }
310 316
311 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); 317 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
@@ -315,7 +321,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
315 * pages requested were isolated. If there were any failures, 0 is 321 * pages requested were isolated. If there were any failures, 0 is
316 * returned and CMA will fail. 322 * returned and CMA will fail.
317 */ 323 */
318 if (strict && nr_strict_required > total_isolated) 324 if (strict && blockpfn < end_pfn)
319 total_isolated = 0; 325 total_isolated = 0;
320 326
321 if (locked) 327 if (locked)
diff --git a/mm/migrate.c b/mm/migrate.c
index 482a33d89134..b494fdb9a636 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1158,7 +1158,7 @@ static struct page *new_page_node(struct page *p, unsigned long private,
1158 pm->node); 1158 pm->node);
1159 else 1159 else
1160 return alloc_pages_exact_node(pm->node, 1160 return alloc_pages_exact_node(pm->node,
1161 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); 1161 GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
1162} 1162}
1163 1163
1164/* 1164/*
@@ -1544,9 +1544,9 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
1544 struct page *newpage; 1544 struct page *newpage;
1545 1545
1546 newpage = alloc_pages_exact_node(nid, 1546 newpage = alloc_pages_exact_node(nid,
1547 (GFP_HIGHUSER_MOVABLE | GFP_THISNODE | 1547 (GFP_HIGHUSER_MOVABLE |
1548 __GFP_NOMEMALLOC | __GFP_NORETRY | 1548 __GFP_THISNODE | __GFP_NOMEMALLOC |
1549 __GFP_NOWARN) & 1549 __GFP_NORETRY | __GFP_NOWARN) &
1550 ~GFP_IOFS, 0); 1550 ~GFP_IOFS, 0);
1551 1551
1552 return newpage; 1552 return newpage;
@@ -1747,7 +1747,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1747 goto out_dropref; 1747 goto out_dropref;
1748 1748
1749 new_page = alloc_pages_node(node, 1749 new_page = alloc_pages_node(node,
1750 (GFP_TRANSHUGE | GFP_THISNODE) & ~__GFP_WAIT, HPAGE_PMD_ORDER); 1750 (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_WAIT,
1751 HPAGE_PMD_ORDER);
1751 if (!new_page) 1752 if (!new_page)
1752 goto out_fail; 1753 goto out_fail;
1753 1754
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index de51c48c4393..4b65aa492fb6 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -538,6 +538,9 @@ static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev
538 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 538 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
539 struct net_device *real_dev = vlan->real_dev; 539 struct net_device *real_dev = vlan->real_dev;
540 540
541 if (saddr == NULL)
542 saddr = dev->dev_addr;
543
541 return dev_hard_header(skb, real_dev, type, daddr, saddr, len); 544 return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
542} 545}
543 546
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index ef66365b7354..93067ecdb9a2 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1127,9 +1127,10 @@ static void br_multicast_query_received(struct net_bridge *br,
1127 struct net_bridge_port *port, 1127 struct net_bridge_port *port,
1128 struct bridge_mcast_querier *querier, 1128 struct bridge_mcast_querier *querier,
1129 int saddr, 1129 int saddr,
1130 bool is_general_query,
1130 unsigned long max_delay) 1131 unsigned long max_delay)
1131{ 1132{
1132 if (saddr) 1133 if (saddr && is_general_query)
1133 br_multicast_update_querier_timer(br, querier, max_delay); 1134 br_multicast_update_querier_timer(br, querier, max_delay);
1134 else if (timer_pending(&querier->timer)) 1135 else if (timer_pending(&querier->timer))
1135 return; 1136 return;
@@ -1181,8 +1182,16 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1181 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1182 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1182 } 1183 }
1183 1184
1185 /* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer
1186 * all-systems destination addresses (224.0.0.1) for general queries
1187 */
1188 if (!group && iph->daddr != htonl(INADDR_ALLHOSTS_GROUP)) {
1189 err = -EINVAL;
1190 goto out;
1191 }
1192
1184 br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr, 1193 br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr,
1185 max_delay); 1194 !group, max_delay);
1186 1195
1187 if (!group) 1196 if (!group)
1188 goto out; 1197 goto out;
@@ -1228,6 +1237,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1228 unsigned long max_delay; 1237 unsigned long max_delay;
1229 unsigned long now = jiffies; 1238 unsigned long now = jiffies;
1230 const struct in6_addr *group = NULL; 1239 const struct in6_addr *group = NULL;
1240 bool is_general_query;
1231 int err = 0; 1241 int err = 0;
1232 1242
1233 spin_lock(&br->multicast_lock); 1243 spin_lock(&br->multicast_lock);
@@ -1235,6 +1245,12 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1235 (port && port->state == BR_STATE_DISABLED)) 1245 (port && port->state == BR_STATE_DISABLED))
1236 goto out; 1246 goto out;
1237 1247
1248 /* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */
1249 if (!(ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL)) {
1250 err = -EINVAL;
1251 goto out;
1252 }
1253
1238 if (skb->len == sizeof(*mld)) { 1254 if (skb->len == sizeof(*mld)) {
1239 if (!pskb_may_pull(skb, sizeof(*mld))) { 1255 if (!pskb_may_pull(skb, sizeof(*mld))) {
1240 err = -EINVAL; 1256 err = -EINVAL;
@@ -1256,8 +1272,19 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1256 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 1272 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
1257 } 1273 }
1258 1274
1275 is_general_query = group && ipv6_addr_any(group);
1276
1277 /* RFC2710+RFC3810 (MLDv1+MLDv2) require the multicast link layer
1278 * all-nodes destination address (ff02::1) for general queries
1279 */
1280 if (is_general_query && !ipv6_addr_is_ll_all_nodes(&ip6h->daddr)) {
1281 err = -EINVAL;
1282 goto out;
1283 }
1284
1259 br_multicast_query_received(br, port, &br->ip6_querier, 1285 br_multicast_query_received(br, port, &br->ip6_querier,
1260 !ipv6_addr_any(&ip6h->saddr), max_delay); 1286 !ipv6_addr_any(&ip6h->saddr),
1287 is_general_query, max_delay);
1261 1288
1262 if (!group) 1289 if (!group)
1263 goto out; 1290 goto out;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 5d6236d9fdce..869c7afe3b07 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2838,81 +2838,84 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2838 2838
2839/** 2839/**
2840 * skb_segment - Perform protocol segmentation on skb. 2840 * skb_segment - Perform protocol segmentation on skb.
2841 * @skb: buffer to segment 2841 * @head_skb: buffer to segment
2842 * @features: features for the output path (see dev->features) 2842 * @features: features for the output path (see dev->features)
2843 * 2843 *
2844 * This function performs segmentation on the given skb. It returns 2844 * This function performs segmentation on the given skb. It returns
2845 * a pointer to the first in a list of new skbs for the segments. 2845 * a pointer to the first in a list of new skbs for the segments.
2846 * In case of error it returns ERR_PTR(err). 2846 * In case of error it returns ERR_PTR(err).
2847 */ 2847 */
2848struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) 2848struct sk_buff *skb_segment(struct sk_buff *head_skb,
2849 netdev_features_t features)
2849{ 2850{
2850 struct sk_buff *segs = NULL; 2851 struct sk_buff *segs = NULL;
2851 struct sk_buff *tail = NULL; 2852 struct sk_buff *tail = NULL;
2852 struct sk_buff *fskb = skb_shinfo(skb)->frag_list; 2853 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
2853 skb_frag_t *skb_frag = skb_shinfo(skb)->frags; 2854 skb_frag_t *frag = skb_shinfo(head_skb)->frags;
2854 unsigned int mss = skb_shinfo(skb)->gso_size; 2855 unsigned int mss = skb_shinfo(head_skb)->gso_size;
2855 unsigned int doffset = skb->data - skb_mac_header(skb); 2856 unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
2857 struct sk_buff *frag_skb = head_skb;
2856 unsigned int offset = doffset; 2858 unsigned int offset = doffset;
2857 unsigned int tnl_hlen = skb_tnl_header_len(skb); 2859 unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
2858 unsigned int headroom; 2860 unsigned int headroom;
2859 unsigned int len; 2861 unsigned int len;
2860 __be16 proto; 2862 __be16 proto;
2861 bool csum; 2863 bool csum;
2862 int sg = !!(features & NETIF_F_SG); 2864 int sg = !!(features & NETIF_F_SG);
2863 int nfrags = skb_shinfo(skb)->nr_frags; 2865 int nfrags = skb_shinfo(head_skb)->nr_frags;
2864 int err = -ENOMEM; 2866 int err = -ENOMEM;
2865 int i = 0; 2867 int i = 0;
2866 int pos; 2868 int pos;
2867 2869
2868 proto = skb_network_protocol(skb); 2870 proto = skb_network_protocol(head_skb);
2869 if (unlikely(!proto)) 2871 if (unlikely(!proto))
2870 return ERR_PTR(-EINVAL); 2872 return ERR_PTR(-EINVAL);
2871 2873
2872 csum = !!can_checksum_protocol(features, proto); 2874 csum = !!can_checksum_protocol(features, proto);
2873 __skb_push(skb, doffset); 2875 __skb_push(head_skb, doffset);
2874 headroom = skb_headroom(skb); 2876 headroom = skb_headroom(head_skb);
2875 pos = skb_headlen(skb); 2877 pos = skb_headlen(head_skb);
2876 2878
2877 do { 2879 do {
2878 struct sk_buff *nskb; 2880 struct sk_buff *nskb;
2879 skb_frag_t *frag; 2881 skb_frag_t *nskb_frag;
2880 int hsize; 2882 int hsize;
2881 int size; 2883 int size;
2882 2884
2883 len = skb->len - offset; 2885 len = head_skb->len - offset;
2884 if (len > mss) 2886 if (len > mss)
2885 len = mss; 2887 len = mss;
2886 2888
2887 hsize = skb_headlen(skb) - offset; 2889 hsize = skb_headlen(head_skb) - offset;
2888 if (hsize < 0) 2890 if (hsize < 0)
2889 hsize = 0; 2891 hsize = 0;
2890 if (hsize > len || !sg) 2892 if (hsize > len || !sg)
2891 hsize = len; 2893 hsize = len;
2892 2894
2893 if (!hsize && i >= nfrags && skb_headlen(fskb) && 2895 if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
2894 (skb_headlen(fskb) == len || sg)) { 2896 (skb_headlen(list_skb) == len || sg)) {
2895 BUG_ON(skb_headlen(fskb) > len); 2897 BUG_ON(skb_headlen(list_skb) > len);
2896 2898
2897 i = 0; 2899 i = 0;
2898 nfrags = skb_shinfo(fskb)->nr_frags; 2900 nfrags = skb_shinfo(list_skb)->nr_frags;
2899 skb_frag = skb_shinfo(fskb)->frags; 2901 frag = skb_shinfo(list_skb)->frags;
2900 pos += skb_headlen(fskb); 2902 frag_skb = list_skb;
2903 pos += skb_headlen(list_skb);
2901 2904
2902 while (pos < offset + len) { 2905 while (pos < offset + len) {
2903 BUG_ON(i >= nfrags); 2906 BUG_ON(i >= nfrags);
2904 2907
2905 size = skb_frag_size(skb_frag); 2908 size = skb_frag_size(frag);
2906 if (pos + size > offset + len) 2909 if (pos + size > offset + len)
2907 break; 2910 break;
2908 2911
2909 i++; 2912 i++;
2910 pos += size; 2913 pos += size;
2911 skb_frag++; 2914 frag++;
2912 } 2915 }
2913 2916
2914 nskb = skb_clone(fskb, GFP_ATOMIC); 2917 nskb = skb_clone(list_skb, GFP_ATOMIC);
2915 fskb = fskb->next; 2918 list_skb = list_skb->next;
2916 2919
2917 if (unlikely(!nskb)) 2920 if (unlikely(!nskb))
2918 goto err; 2921 goto err;
@@ -2933,7 +2936,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2933 __skb_push(nskb, doffset); 2936 __skb_push(nskb, doffset);
2934 } else { 2937 } else {
2935 nskb = __alloc_skb(hsize + doffset + headroom, 2938 nskb = __alloc_skb(hsize + doffset + headroom,
2936 GFP_ATOMIC, skb_alloc_rx_flag(skb), 2939 GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
2937 NUMA_NO_NODE); 2940 NUMA_NO_NODE);
2938 2941
2939 if (unlikely(!nskb)) 2942 if (unlikely(!nskb))
@@ -2949,12 +2952,12 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2949 segs = nskb; 2952 segs = nskb;
2950 tail = nskb; 2953 tail = nskb;
2951 2954
2952 __copy_skb_header(nskb, skb); 2955 __copy_skb_header(nskb, head_skb);
2953 nskb->mac_len = skb->mac_len; 2956 nskb->mac_len = head_skb->mac_len;
2954 2957
2955 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 2958 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
2956 2959
2957 skb_copy_from_linear_data_offset(skb, -tnl_hlen, 2960 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
2958 nskb->data - tnl_hlen, 2961 nskb->data - tnl_hlen,
2959 doffset + tnl_hlen); 2962 doffset + tnl_hlen);
2960 2963
@@ -2963,30 +2966,32 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2963 2966
2964 if (!sg) { 2967 if (!sg) {
2965 nskb->ip_summed = CHECKSUM_NONE; 2968 nskb->ip_summed = CHECKSUM_NONE;
2966 nskb->csum = skb_copy_and_csum_bits(skb, offset, 2969 nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
2967 skb_put(nskb, len), 2970 skb_put(nskb, len),
2968 len, 0); 2971 len, 0);
2969 continue; 2972 continue;
2970 } 2973 }
2971 2974
2972 frag = skb_shinfo(nskb)->frags; 2975 nskb_frag = skb_shinfo(nskb)->frags;
2973 2976
2974 skb_copy_from_linear_data_offset(skb, offset, 2977 skb_copy_from_linear_data_offset(head_skb, offset,
2975 skb_put(nskb, hsize), hsize); 2978 skb_put(nskb, hsize), hsize);
2976 2979
2977 skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; 2980 skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
2981 SKBTX_SHARED_FRAG;
2978 2982
2979 while (pos < offset + len) { 2983 while (pos < offset + len) {
2980 if (i >= nfrags) { 2984 if (i >= nfrags) {
2981 BUG_ON(skb_headlen(fskb)); 2985 BUG_ON(skb_headlen(list_skb));
2982 2986
2983 i = 0; 2987 i = 0;
2984 nfrags = skb_shinfo(fskb)->nr_frags; 2988 nfrags = skb_shinfo(list_skb)->nr_frags;
2985 skb_frag = skb_shinfo(fskb)->frags; 2989 frag = skb_shinfo(list_skb)->frags;
2990 frag_skb = list_skb;
2986 2991
2987 BUG_ON(!nfrags); 2992 BUG_ON(!nfrags);
2988 2993
2989 fskb = fskb->next; 2994 list_skb = list_skb->next;
2990 } 2995 }
2991 2996
2992 if (unlikely(skb_shinfo(nskb)->nr_frags >= 2997 if (unlikely(skb_shinfo(nskb)->nr_frags >=
@@ -2997,27 +3002,30 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2997 goto err; 3002 goto err;
2998 } 3003 }
2999 3004
3000 *frag = *skb_frag; 3005 if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
3001 __skb_frag_ref(frag); 3006 goto err;
3002 size = skb_frag_size(frag); 3007
3008 *nskb_frag = *frag;
3009 __skb_frag_ref(nskb_frag);
3010 size = skb_frag_size(nskb_frag);
3003 3011
3004 if (pos < offset) { 3012 if (pos < offset) {
3005 frag->page_offset += offset - pos; 3013 nskb_frag->page_offset += offset - pos;
3006 skb_frag_size_sub(frag, offset - pos); 3014 skb_frag_size_sub(nskb_frag, offset - pos);
3007 } 3015 }
3008 3016
3009 skb_shinfo(nskb)->nr_frags++; 3017 skb_shinfo(nskb)->nr_frags++;
3010 3018
3011 if (pos + size <= offset + len) { 3019 if (pos + size <= offset + len) {
3012 i++; 3020 i++;
3013 skb_frag++; 3021 frag++;
3014 pos += size; 3022 pos += size;
3015 } else { 3023 } else {
3016 skb_frag_size_sub(frag, pos + size - (offset + len)); 3024 skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
3017 goto skip_fraglist; 3025 goto skip_fraglist;
3018 } 3026 }
3019 3027
3020 frag++; 3028 nskb_frag++;
3021 } 3029 }
3022 3030
3023skip_fraglist: 3031skip_fraglist:
@@ -3031,7 +3039,7 @@ perform_csum_check:
3031 nskb->len - doffset, 0); 3039 nskb->len - doffset, 0);
3032 nskb->ip_summed = CHECKSUM_NONE; 3040 nskb->ip_summed = CHECKSUM_NONE;
3033 } 3041 }
3034 } while ((offset += len) < skb->len); 3042 } while ((offset += len) < head_skb->len);
3035 3043
3036 return segs; 3044 return segs;
3037 3045
diff --git a/net/core/sock.c b/net/core/sock.c
index 5b6a9431b017..c0fc6bdad1e3 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2357,10 +2357,13 @@ void release_sock(struct sock *sk)
2357 if (sk->sk_backlog.tail) 2357 if (sk->sk_backlog.tail)
2358 __release_sock(sk); 2358 __release_sock(sk);
2359 2359
2360 /* Warning : release_cb() might need to release sk ownership,
2361 * ie call sock_release_ownership(sk) before us.
2362 */
2360 if (sk->sk_prot->release_cb) 2363 if (sk->sk_prot->release_cb)
2361 sk->sk_prot->release_cb(sk); 2364 sk->sk_prot->release_cb(sk);
2362 2365
2363 sk->sk_lock.owned = 0; 2366 sock_release_ownership(sk);
2364 if (waitqueue_active(&sk->sk_lock.wq)) 2367 if (waitqueue_active(&sk->sk_lock.wq))
2365 wake_up(&sk->sk_lock.wq); 2368 wake_up(&sk->sk_lock.wq);
2366 spin_unlock_bh(&sk->sk_lock.slock); 2369 spin_unlock_bh(&sk->sk_lock.slock);
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index bb075fc9a14f..3b01959bf4bb 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -208,7 +208,7 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
208 } 208 }
209 209
210 work = frag_mem_limit(nf) - nf->low_thresh; 210 work = frag_mem_limit(nf) - nf->low_thresh;
211 while (work > 0) { 211 while (work > 0 || force) {
212 spin_lock(&nf->lru_lock); 212 spin_lock(&nf->lru_lock);
213 213
214 if (list_empty(&nf->lru_list)) { 214 if (list_empty(&nf->lru_list)) {
@@ -278,9 +278,10 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
278 278
279 atomic_inc(&qp->refcnt); 279 atomic_inc(&qp->refcnt);
280 hlist_add_head(&qp->list, &hb->chain); 280 hlist_add_head(&qp->list, &hb->chain);
281 inet_frag_lru_add(nf, qp);
281 spin_unlock(&hb->chain_lock); 282 spin_unlock(&hb->chain_lock);
282 read_unlock(&f->lock); 283 read_unlock(&f->lock);
283 inet_frag_lru_add(nf, qp); 284
284 return qp; 285 return qp;
285} 286}
286 287
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index f0eb4e337ec8..17a11e65e57f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -767,6 +767,17 @@ void tcp_release_cb(struct sock *sk)
767 if (flags & (1UL << TCP_TSQ_DEFERRED)) 767 if (flags & (1UL << TCP_TSQ_DEFERRED))
768 tcp_tsq_handler(sk); 768 tcp_tsq_handler(sk);
769 769
770 /* Here begins the tricky part :
771 * We are called from release_sock() with :
772 * 1) BH disabled
773 * 2) sk_lock.slock spinlock held
774 * 3) socket owned by us (sk->sk_lock.owned == 1)
775 *
776 * But following code is meant to be called from BH handlers,
777 * so we should keep BH disabled, but early release socket ownership
778 */
779 sock_release_ownership(sk);
780
770 if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) { 781 if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
771 tcp_write_timer_handler(sk); 782 tcp_write_timer_handler(sk);
772 __sock_put(sk); 783 __sock_put(sk);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index fdbfeca36d63..344e972426df 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1103,8 +1103,11 @@ retry:
1103 * Lifetime is greater than REGEN_ADVANCE time units. In particular, 1103 * Lifetime is greater than REGEN_ADVANCE time units. In particular,
1104 * an implementation must not create a temporary address with a zero 1104 * an implementation must not create a temporary address with a zero
1105 * Preferred Lifetime. 1105 * Preferred Lifetime.
1106 * Use age calculation as in addrconf_verify to avoid unnecessary
1107 * temporary addresses being generated.
1106 */ 1108 */
1107 if (tmp_prefered_lft <= regen_advance) { 1109 age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
1110 if (tmp_prefered_lft <= regen_advance + age) {
1108 in6_ifa_put(ifp); 1111 in6_ifa_put(ifp);
1109 in6_dev_put(idev); 1112 in6_dev_put(idev);
1110 ret = -1; 1113 ret = -1;
diff --git a/net/ipv6/exthdrs_offload.c b/net/ipv6/exthdrs_offload.c
index cf77f3abfd06..447a7fbd1bb6 100644
--- a/net/ipv6/exthdrs_offload.c
+++ b/net/ipv6/exthdrs_offload.c
@@ -25,11 +25,11 @@ int __init ipv6_exthdrs_offload_init(void)
25 int ret; 25 int ret;
26 26
27 ret = inet6_add_offload(&rthdr_offload, IPPROTO_ROUTING); 27 ret = inet6_add_offload(&rthdr_offload, IPPROTO_ROUTING);
28 if (!ret) 28 if (ret)
29 goto out; 29 goto out;
30 30
31 ret = inet6_add_offload(&dstopt_offload, IPPROTO_DSTOPTS); 31 ret = inet6_add_offload(&dstopt_offload, IPPROTO_DSTOPTS);
32 if (!ret) 32 if (ret)
33 goto out_rt; 33 goto out_rt;
34 34
35out: 35out:
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 11dac21e6586..fba54a407bb2 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1513,7 +1513,7 @@ int ip6_route_add(struct fib6_config *cfg)
1513 if (!table) 1513 if (!table)
1514 goto out; 1514 goto out;
1515 1515
1516 rt = ip6_dst_alloc(net, NULL, DST_NOCOUNT, table); 1516 rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
1517 1517
1518 if (!rt) { 1518 if (!rt) {
1519 err = -ENOMEM; 1519 err = -ENOMEM;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 735d0f60c83a..85d9d94c0a3c 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -112,7 +112,6 @@ struct l2tp_net {
112 spinlock_t l2tp_session_hlist_lock; 112 spinlock_t l2tp_session_hlist_lock;
113}; 113};
114 114
115static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
116static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); 115static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
117 116
118static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk) 117static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
@@ -1863,7 +1862,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_delete);
1863/* We come here whenever a session's send_seq, cookie_len or 1862/* We come here whenever a session's send_seq, cookie_len or
1864 * l2specific_len parameters are set. 1863 * l2specific_len parameters are set.
1865 */ 1864 */
1866static void l2tp_session_set_header_len(struct l2tp_session *session, int version) 1865void l2tp_session_set_header_len(struct l2tp_session *session, int version)
1867{ 1866{
1868 if (version == L2TP_HDR_VER_2) { 1867 if (version == L2TP_HDR_VER_2) {
1869 session->hdr_len = 6; 1868 session->hdr_len = 6;
@@ -1876,6 +1875,7 @@ static void l2tp_session_set_header_len(struct l2tp_session *session, int versio
1876 } 1875 }
1877 1876
1878} 1877}
1878EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1879 1879
1880struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) 1880struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
1881{ 1881{
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 1f01ba3435bc..3f93ccd6ba97 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -263,6 +263,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
263 int length, int (*payload_hook)(struct sk_buff *skb)); 263 int length, int (*payload_hook)(struct sk_buff *skb));
264int l2tp_session_queue_purge(struct l2tp_session *session); 264int l2tp_session_queue_purge(struct l2tp_session *session);
265int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); 265int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
266void l2tp_session_set_header_len(struct l2tp_session *session, int version);
266 267
267int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, 268int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb,
268 int hdr_len); 269 int hdr_len);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 4cfd722e9153..bd7387adea9e 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -578,8 +578,10 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
578 if (info->attrs[L2TP_ATTR_RECV_SEQ]) 578 if (info->attrs[L2TP_ATTR_RECV_SEQ])
579 session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]); 579 session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]);
580 580
581 if (info->attrs[L2TP_ATTR_SEND_SEQ]) 581 if (info->attrs[L2TP_ATTR_SEND_SEQ]) {
582 session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]); 582 session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]);
583 l2tp_session_set_header_len(session, session->tunnel->version);
584 }
583 585
584 if (info->attrs[L2TP_ATTR_LNS_MODE]) 586 if (info->attrs[L2TP_ATTR_LNS_MODE])
585 session->lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]); 587 session->lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]);
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index be5fadf34739..5990919356a5 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -254,12 +254,14 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
254 po = pppox_sk(sk); 254 po = pppox_sk(sk);
255 ppp_input(&po->chan, skb); 255 ppp_input(&po->chan, skb);
256 } else { 256 } else {
257 l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: socket not bound\n", 257 l2tp_dbg(session, PPPOL2TP_MSG_DATA,
258 session->name); 258 "%s: recv %d byte data frame, passing to L2TP socket\n",
259 session->name, data_len);
259 260
260 /* Not bound. Nothing we can do, so discard. */ 261 if (sock_queue_rcv_skb(sk, skb) < 0) {
261 atomic_long_inc(&session->stats.rx_errors); 262 atomic_long_inc(&session->stats.rx_errors);
262 kfree_skb(skb); 263 kfree_skb(skb);
264 }
263 } 265 }
264 266
265 return; 267 return;
@@ -1312,6 +1314,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
1312 po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ : 1314 po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
1313 PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; 1315 PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
1314 } 1316 }
1317 l2tp_session_set_header_len(session, session->tunnel->version);
1315 l2tp_info(session, PPPOL2TP_MSG_CONTROL, 1318 l2tp_info(session, PPPOL2TP_MSG_CONTROL,
1316 "%s: set send_seq=%d\n", 1319 "%s: set send_seq=%d\n",
1317 session->name, session->send_seq); 1320 session->name, session->send_seq);
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index f43613a97dd6..0c1ecfdf9a12 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -100,6 +100,12 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
100 } 100 }
101 max_bw = max(max_bw, width); 101 max_bw = max(max_bw, width);
102 } 102 }
103
104 /* use the configured bandwidth in case of monitor interface */
105 sdata = rcu_dereference(local->monitor_sdata);
106 if (sdata && rcu_access_pointer(sdata->vif.chanctx_conf) == conf)
107 max_bw = max(max_bw, conf->def.width);
108
103 rcu_read_unlock(); 109 rcu_read_unlock();
104 110
105 return max_bw; 111 return max_bw;
diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
index 2802f9d9279d..ad8b377b4b9f 100644
--- a/net/mac80211/mesh_ps.c
+++ b/net/mac80211/mesh_ps.c
@@ -36,6 +36,7 @@ static struct sk_buff *mps_qos_null_get(struct sta_info *sta)
36 sdata->vif.addr); 36 sdata->vif.addr);
37 nullfunc->frame_control = fc; 37 nullfunc->frame_control = fc;
38 nullfunc->duration_id = 0; 38 nullfunc->duration_id = 0;
39 nullfunc->seq_ctrl = 0;
39 /* no address resolution for this frame -> set addr 1 immediately */ 40 /* no address resolution for this frame -> set addr 1 immediately */
40 memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); 41 memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
41 memset(skb_put(skb, 2), 0, 2); /* append QoS control field */ 42 memset(skb_put(skb, 2), 0, 2); /* append QoS control field */
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index a023b432143b..137a192e64bc 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1206,6 +1206,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
1206 memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); 1206 memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
1207 memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); 1207 memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
1208 memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN); 1208 memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN);
1209 nullfunc->seq_ctrl = 0;
1209 1210
1210 skb->priority = tid; 1211 skb->priority = tid;
1211 skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]); 1212 skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 1313145e3b86..a07d55e75698 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -273,11 +273,12 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
273 273
274void qdisc_list_add(struct Qdisc *q) 274void qdisc_list_add(struct Qdisc *q)
275{ 275{
276 struct Qdisc *root = qdisc_dev(q)->qdisc; 276 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
277 struct Qdisc *root = qdisc_dev(q)->qdisc;
277 278
278 WARN_ON_ONCE(root == &noop_qdisc); 279 WARN_ON_ONCE(root == &noop_qdisc);
279 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
280 list_add_tail(&q->list, &root->list); 280 list_add_tail(&q->list, &root->list);
281 }
281} 282}
282EXPORT_SYMBOL(qdisc_list_add); 283EXPORT_SYMBOL(qdisc_list_add);
283 284
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 08ef7a42c0e4..21e251766eb1 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -601,6 +601,7 @@ static int fq_resize(struct Qdisc *sch, u32 log)
601{ 601{
602 struct fq_sched_data *q = qdisc_priv(sch); 602 struct fq_sched_data *q = qdisc_priv(sch);
603 struct rb_root *array; 603 struct rb_root *array;
604 void *old_fq_root;
604 u32 idx; 605 u32 idx;
605 606
606 if (q->fq_root && log == q->fq_trees_log) 607 if (q->fq_root && log == q->fq_trees_log)
@@ -615,13 +616,19 @@ static int fq_resize(struct Qdisc *sch, u32 log)
615 for (idx = 0; idx < (1U << log); idx++) 616 for (idx = 0; idx < (1U << log); idx++)
616 array[idx] = RB_ROOT; 617 array[idx] = RB_ROOT;
617 618
618 if (q->fq_root) { 619 sch_tree_lock(sch);
619 fq_rehash(q, q->fq_root, q->fq_trees_log, array, log); 620
620 fq_free(q->fq_root); 621 old_fq_root = q->fq_root;
621 } 622 if (old_fq_root)
623 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
624
622 q->fq_root = array; 625 q->fq_root = array;
623 q->fq_trees_log = log; 626 q->fq_trees_log = log;
624 627
628 sch_tree_unlock(sch);
629
630 fq_free(old_fq_root);
631
625 return 0; 632 return 0;
626} 633}
627 634
@@ -697,9 +704,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
697 q->flow_refill_delay = usecs_to_jiffies(usecs_delay); 704 q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
698 } 705 }
699 706
700 if (!err) 707 if (!err) {
708 sch_tree_unlock(sch);
701 err = fq_resize(sch, fq_log); 709 err = fq_resize(sch, fq_log);
702 710 sch_tree_lock(sch);
711 }
703 while (sch->q.qlen > sch->limit) { 712 while (sch->q.qlen > sch->limit) {
704 struct sk_buff *skb = fq_dequeue(sch); 713 struct sk_buff *skb = fq_dequeue(sch);
705 714
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 632090b961c3..3a1767ef3201 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1421,8 +1421,8 @@ static void sctp_chunk_destroy(struct sctp_chunk *chunk)
1421 BUG_ON(!list_empty(&chunk->list)); 1421 BUG_ON(!list_empty(&chunk->list));
1422 list_del_init(&chunk->transmitted_list); 1422 list_del_init(&chunk->transmitted_list);
1423 1423
1424 /* Free the chunk skb data and the SCTP_chunk stub itself. */ 1424 consume_skb(chunk->skb);
1425 dev_kfree_skb(chunk->skb); 1425 consume_skb(chunk->auth_chunk);
1426 1426
1427 SCTP_DBG_OBJCNT_DEC(chunk); 1427 SCTP_DBG_OBJCNT_DEC(chunk);
1428 kmem_cache_free(sctp_chunk_cachep, chunk); 1428 kmem_cache_free(sctp_chunk_cachep, chunk);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index ae65b6b5973a..01e002430c85 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -760,7 +760,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
760 760
761 /* Make sure that we and the peer are AUTH capable */ 761 /* Make sure that we and the peer are AUTH capable */
762 if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) { 762 if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
763 kfree_skb(chunk->auth_chunk);
764 sctp_association_free(new_asoc); 763 sctp_association_free(new_asoc);
765 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 764 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
766 } 765 }
@@ -775,10 +774,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
775 auth.transport = chunk->transport; 774 auth.transport = chunk->transport;
776 775
777 ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth); 776 ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
778
779 /* We can now safely free the auth_chunk clone */
780 kfree_skb(chunk->auth_chunk);
781
782 if (ret != SCTP_IERROR_NO_ERROR) { 777 if (ret != SCTP_IERROR_NO_ERROR) {
783 sctp_association_free(new_asoc); 778 sctp_association_free(new_asoc);
784 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 779 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
diff --git a/net/socket.c b/net/socket.c
index 879933aaed4c..a19ae1968d37 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -450,16 +450,17 @@ EXPORT_SYMBOL(sockfd_lookup);
450 450
451static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) 451static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
452{ 452{
453 struct file *file; 453 struct fd f = fdget(fd);
454 struct socket *sock; 454 struct socket *sock;
455 455
456 *err = -EBADF; 456 *err = -EBADF;
457 file = fget_light(fd, fput_needed); 457 if (f.file) {
458 if (file) { 458 sock = sock_from_file(f.file, err);
459 sock = sock_from_file(file, err); 459 if (likely(sock)) {
460 if (sock) 460 *fput_needed = f.flags;
461 return sock; 461 return sock;
462 fput_light(file, *fput_needed); 462 }
463 fdput(f);
463 } 464 }
464 return NULL; 465 return NULL;
465} 466}
@@ -1985,6 +1986,10 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
1985{ 1986{
1986 if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) 1987 if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
1987 return -EFAULT; 1988 return -EFAULT;
1989
1990 if (kmsg->msg_namelen < 0)
1991 return -EINVAL;
1992
1988 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) 1993 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
1989 kmsg->msg_namelen = sizeof(struct sockaddr_storage); 1994 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
1990 return 0; 1995 return 0;
diff --git a/net/tipc/config.c b/net/tipc/config.c
index e74eef2e7490..e6d721692ae0 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -376,7 +376,6 @@ static void cfg_conn_msg_event(int conid, struct sockaddr_tipc *addr,
376 struct tipc_cfg_msg_hdr *req_hdr; 376 struct tipc_cfg_msg_hdr *req_hdr;
377 struct tipc_cfg_msg_hdr *rep_hdr; 377 struct tipc_cfg_msg_hdr *rep_hdr;
378 struct sk_buff *rep_buf; 378 struct sk_buff *rep_buf;
379 int ret;
380 379
381 /* Validate configuration message header (ignore invalid message) */ 380 /* Validate configuration message header (ignore invalid message) */
382 req_hdr = (struct tipc_cfg_msg_hdr *)buf; 381 req_hdr = (struct tipc_cfg_msg_hdr *)buf;
@@ -398,12 +397,8 @@ static void cfg_conn_msg_event(int conid, struct sockaddr_tipc *addr,
398 memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr)); 397 memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr));
399 rep_hdr->tcm_len = htonl(rep_buf->len); 398 rep_hdr->tcm_len = htonl(rep_buf->len);
400 rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST); 399 rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST);
401 400 tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data,
402 ret = tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data, 401 rep_buf->len);
403 rep_buf->len);
404 if (ret < 0)
405 pr_err("Sending cfg reply message failed, no memory\n");
406
407 kfree_skb(rep_buf); 402 kfree_skb(rep_buf);
408 } 403 }
409} 404}
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index e4bc8a296744..1fabf160501f 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -58,7 +58,6 @@ unsigned int tipc_k_signal(Handler routine, unsigned long argument)
58 58
59 spin_lock_bh(&qitem_lock); 59 spin_lock_bh(&qitem_lock);
60 if (!handler_enabled) { 60 if (!handler_enabled) {
61 pr_err("Signal request ignored by handler\n");
62 spin_unlock_bh(&qitem_lock); 61 spin_unlock_bh(&qitem_lock);
63 return -ENOPROTOOPT; 62 return -ENOPROTOOPT;
64 } 63 }
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 48302be175ce..042e8e3cabc0 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -941,17 +941,48 @@ int tipc_nametbl_init(void)
941 return 0; 941 return 0;
942} 942}
943 943
944/**
945 * tipc_purge_publications - remove all publications for a given type
946 *
947 * tipc_nametbl_lock must be held when calling this function
948 */
949static void tipc_purge_publications(struct name_seq *seq)
950{
951 struct publication *publ, *safe;
952 struct sub_seq *sseq;
953 struct name_info *info;
954
955 if (!seq->sseqs) {
956 nameseq_delete_empty(seq);
957 return;
958 }
959 sseq = seq->sseqs;
960 info = sseq->info;
961 list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
962 tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node,
963 publ->ref, publ->key);
964 }
965}
966
944void tipc_nametbl_stop(void) 967void tipc_nametbl_stop(void)
945{ 968{
946 u32 i; 969 u32 i;
970 struct name_seq *seq;
971 struct hlist_head *seq_head;
972 struct hlist_node *safe;
947 973
948 /* Verify name table is empty, then release it */ 974 /* Verify name table is empty and purge any lingering
975 * publications, then release the name table
976 */
949 write_lock_bh(&tipc_nametbl_lock); 977 write_lock_bh(&tipc_nametbl_lock);
950 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { 978 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
951 if (hlist_empty(&table.types[i])) 979 if (hlist_empty(&table.types[i]))
952 continue; 980 continue;
953 pr_err("nametbl_stop(): orphaned hash chain detected\n"); 981 seq_head = &table.types[i];
954 break; 982 hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) {
983 tipc_purge_publications(seq);
984 }
985 continue;
955 } 986 }
956 kfree(table.types); 987 kfree(table.types);
957 table.types = NULL; 988 table.types = NULL;
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 373979789a73..646a930eefbf 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -87,7 +87,6 @@ static void tipc_clean_outqueues(struct tipc_conn *con);
87static void tipc_conn_kref_release(struct kref *kref) 87static void tipc_conn_kref_release(struct kref *kref)
88{ 88{
89 struct tipc_conn *con = container_of(kref, struct tipc_conn, kref); 89 struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
90 struct tipc_server *s = con->server;
91 90
92 if (con->sock) { 91 if (con->sock) {
93 tipc_sock_release_local(con->sock); 92 tipc_sock_release_local(con->sock);
@@ -95,10 +94,6 @@ static void tipc_conn_kref_release(struct kref *kref)
95 } 94 }
96 95
97 tipc_clean_outqueues(con); 96 tipc_clean_outqueues(con);
98
99 if (con->conid)
100 s->tipc_conn_shutdown(con->conid, con->usr_data);
101
102 kfree(con); 97 kfree(con);
103} 98}
104 99
@@ -181,6 +176,9 @@ static void tipc_close_conn(struct tipc_conn *con)
181 struct tipc_server *s = con->server; 176 struct tipc_server *s = con->server;
182 177
183 if (test_and_clear_bit(CF_CONNECTED, &con->flags)) { 178 if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
179 if (con->conid)
180 s->tipc_conn_shutdown(con->conid, con->usr_data);
181
184 spin_lock_bh(&s->idr_lock); 182 spin_lock_bh(&s->idr_lock);
185 idr_remove(&s->conn_idr, con->conid); 183 idr_remove(&s->conn_idr, con->conid);
186 s->idr_in_use--; 184 s->idr_in_use--;
@@ -429,10 +427,12 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
429 list_add_tail(&e->list, &con->outqueue); 427 list_add_tail(&e->list, &con->outqueue);
430 spin_unlock_bh(&con->outqueue_lock); 428 spin_unlock_bh(&con->outqueue_lock);
431 429
432 if (test_bit(CF_CONNECTED, &con->flags)) 430 if (test_bit(CF_CONNECTED, &con->flags)) {
433 if (!queue_work(s->send_wq, &con->swork)) 431 if (!queue_work(s->send_wq, &con->swork))
434 conn_put(con); 432 conn_put(con);
435 433 } else {
434 conn_put(con);
435 }
436 return 0; 436 return 0;
437} 437}
438 438
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index a4cf274455aa..0ed0eaa62f29 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -997,7 +997,7 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long timeo)
997 997
998 for (;;) { 998 for (;;) {
999 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 999 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1000 if (skb_queue_empty(&sk->sk_receive_queue)) { 1000 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1001 if (sock->state == SS_DISCONNECTING) { 1001 if (sock->state == SS_DISCONNECTING) {
1002 err = -ENOTCONN; 1002 err = -ENOTCONN;
1003 break; 1003 break;
@@ -1623,7 +1623,7 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
1623 for (;;) { 1623 for (;;) {
1624 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 1624 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
1625 TASK_INTERRUPTIBLE); 1625 TASK_INTERRUPTIBLE);
1626 if (skb_queue_empty(&sk->sk_receive_queue)) { 1626 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1627 release_sock(sk); 1627 release_sock(sk);
1628 timeo = schedule_timeout(timeo); 1628 timeo = schedule_timeout(timeo);
1629 lock_sock(sk); 1629 lock_sock(sk);
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 7cb0bd5b1176..11c9ae00837d 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -96,20 +96,16 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
96{ 96{
97 struct tipc_subscriber *subscriber = sub->subscriber; 97 struct tipc_subscriber *subscriber = sub->subscriber;
98 struct kvec msg_sect; 98 struct kvec msg_sect;
99 int ret;
100 99
101 msg_sect.iov_base = (void *)&sub->evt; 100 msg_sect.iov_base = (void *)&sub->evt;
102 msg_sect.iov_len = sizeof(struct tipc_event); 101 msg_sect.iov_len = sizeof(struct tipc_event);
103
104 sub->evt.event = htohl(event, sub->swap); 102 sub->evt.event = htohl(event, sub->swap);
105 sub->evt.found_lower = htohl(found_lower, sub->swap); 103 sub->evt.found_lower = htohl(found_lower, sub->swap);
106 sub->evt.found_upper = htohl(found_upper, sub->swap); 104 sub->evt.found_upper = htohl(found_upper, sub->swap);
107 sub->evt.port.ref = htohl(port_ref, sub->swap); 105 sub->evt.port.ref = htohl(port_ref, sub->swap);
108 sub->evt.port.node = htohl(node, sub->swap); 106 sub->evt.port.node = htohl(node, sub->swap);
109 ret = tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL, 107 tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL, msg_sect.iov_base,
110 msg_sect.iov_base, msg_sect.iov_len); 108 msg_sect.iov_len);
111 if (ret < 0)
112 pr_err("Sending subscription event failed, no memory\n");
113} 109}
114 110
115/** 111/**
@@ -153,14 +149,6 @@ static void subscr_timeout(struct tipc_subscription *sub)
153 /* The spin lock per subscriber is used to protect its members */ 149 /* The spin lock per subscriber is used to protect its members */
154 spin_lock_bh(&subscriber->lock); 150 spin_lock_bh(&subscriber->lock);
155 151
156 /* Validate if the connection related to the subscriber is
157 * closed (in case subscriber is terminating)
158 */
159 if (subscriber->conid == 0) {
160 spin_unlock_bh(&subscriber->lock);
161 return;
162 }
163
164 /* Validate timeout (in case subscription is being cancelled) */ 152 /* Validate timeout (in case subscription is being cancelled) */
165 if (sub->timeout == TIPC_WAIT_FOREVER) { 153 if (sub->timeout == TIPC_WAIT_FOREVER) {
166 spin_unlock_bh(&subscriber->lock); 154 spin_unlock_bh(&subscriber->lock);
@@ -215,9 +203,6 @@ static void subscr_release(struct tipc_subscriber *subscriber)
215 203
216 spin_lock_bh(&subscriber->lock); 204 spin_lock_bh(&subscriber->lock);
217 205
218 /* Invalidate subscriber reference */
219 subscriber->conid = 0;
220
221 /* Destroy any existing subscriptions for subscriber */ 206 /* Destroy any existing subscriptions for subscriber */
222 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, 207 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
223 subscription_list) { 208 subscription_list) {
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 29fc8bee9702..ce6ec6c2f4de 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -163,9 +163,8 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
163 163
164static inline unsigned int unix_hash_fold(__wsum n) 164static inline unsigned int unix_hash_fold(__wsum n)
165{ 165{
166 unsigned int hash = (__force unsigned int)n; 166 unsigned int hash = (__force unsigned int)csum_fold(n);
167 167
168 hash ^= hash>>16;
169 hash ^= hash>>8; 168 hash ^= hash>>8;
170 return hash&(UNIX_HASH_SIZE-1); 169 return hash&(UNIX_HASH_SIZE-1);
171} 170}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 010892b81a06..a3bf18d11609 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -788,8 +788,6 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev,
788 default: 788 default:
789 break; 789 break;
790 } 790 }
791
792 wdev->beacon_interval = 0;
793} 791}
794 792
795static int cfg80211_netdev_notifier_call(struct notifier_block *nb, 793static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 276e84b8a8e5..10085de886fe 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -330,7 +330,8 @@ static void write_src(void)
330 printf("\tPTR\t_text + %#llx\n", 330 printf("\tPTR\t_text + %#llx\n",
331 table[i].addr - _text); 331 table[i].addr - _text);
332 else 332 else
333 printf("\tPTR\t%#llx\n", table[i].addr); 333 printf("\tPTR\t_text - %#llx\n",
334 _text - table[i].addr);
334 } else { 335 } else {
335 printf("\tPTR\t%#llx\n", table[i].addr); 336 printf("\tPTR\t%#llx\n", table[i].addr);
336 } 337 }
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index d46cbc5e335e..2fb2576dc644 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -1000,7 +1000,11 @@ static int keyring_detect_cycle_iterator(const void *object,
1000 1000
1001 kenter("{%d}", key->serial); 1001 kenter("{%d}", key->serial);
1002 1002
1003 BUG_ON(key != ctx->match_data); 1003 /* We might get a keyring with matching index-key that is nonetheless a
1004 * different keyring. */
1005 if (key != ctx->match_data)
1006 return 0;
1007
1004 ctx->result = ERR_PTR(-EDEADLK); 1008 ctx->result = ERR_PTR(-EDEADLK);
1005 return 1; 1009 return 1;
1006} 1010}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 850296a1e0ff..8d0a84436674 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3616,6 +3616,19 @@ static void alc_fixup_auto_mute_via_amp(struct hda_codec *codec,
3616 } 3616 }
3617} 3617}
3618 3618
3619static void alc_no_shutup(struct hda_codec *codec)
3620{
3621}
3622
3623static void alc_fixup_no_shutup(struct hda_codec *codec,
3624 const struct hda_fixup *fix, int action)
3625{
3626 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
3627 struct alc_spec *spec = codec->spec;
3628 spec->shutup = alc_no_shutup;
3629 }
3630}
3631
3619static void alc_fixup_headset_mode_alc668(struct hda_codec *codec, 3632static void alc_fixup_headset_mode_alc668(struct hda_codec *codec,
3620 const struct hda_fixup *fix, int action) 3633 const struct hda_fixup *fix, int action)
3621{ 3634{
@@ -3844,6 +3857,7 @@ enum {
3844 ALC269_FIXUP_HP_GPIO_LED, 3857 ALC269_FIXUP_HP_GPIO_LED,
3845 ALC269_FIXUP_INV_DMIC, 3858 ALC269_FIXUP_INV_DMIC,
3846 ALC269_FIXUP_LENOVO_DOCK, 3859 ALC269_FIXUP_LENOVO_DOCK,
3860 ALC269_FIXUP_NO_SHUTUP,
3847 ALC286_FIXUP_SONY_MIC_NO_PRESENCE, 3861 ALC286_FIXUP_SONY_MIC_NO_PRESENCE,
3848 ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT, 3862 ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
3849 ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, 3863 ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
@@ -4020,6 +4034,10 @@ static const struct hda_fixup alc269_fixups[] = {
4020 .type = HDA_FIXUP_FUNC, 4034 .type = HDA_FIXUP_FUNC,
4021 .v.func = alc_fixup_inv_dmic_0x12, 4035 .v.func = alc_fixup_inv_dmic_0x12,
4022 }, 4036 },
4037 [ALC269_FIXUP_NO_SHUTUP] = {
4038 .type = HDA_FIXUP_FUNC,
4039 .v.func = alc_fixup_no_shutup,
4040 },
4023 [ALC269_FIXUP_LENOVO_DOCK] = { 4041 [ALC269_FIXUP_LENOVO_DOCK] = {
4024 .type = HDA_FIXUP_PINS, 4042 .type = HDA_FIXUP_PINS,
4025 .v.pins = (const struct hda_pintbl[]) { 4043 .v.pins = (const struct hda_pintbl[]) {
@@ -4405,6 +4423,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4405 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4423 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4406 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4424 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4407 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4425 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4426 SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
4408 SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4427 SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4409 SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), 4428 SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
4410 SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4429 SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index 75d0ad5d2dcb..647a72cda005 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -1328,6 +1328,9 @@ static int pm860x_probe(struct snd_soc_codec *codec)
1328 pm860x->codec = codec; 1328 pm860x->codec = codec;
1329 1329
1330 codec->control_data = pm860x->regmap; 1330 codec->control_data = pm860x->regmap;
1331 ret = snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
1332 if (ret)
1333 return ret;
1331 1334
1332 for (i = 0; i < 4; i++) { 1335 for (i = 0; i < 4; i++) {
1333 ret = request_threaded_irq(pm860x->irq[i], NULL, 1336 ret = request_threaded_irq(pm860x->irq[i], NULL,
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c
index 52e7cb08434b..fa2b8e07f420 100644
--- a/sound/soc/codecs/si476x.c
+++ b/sound/soc/codecs/si476x.c
@@ -210,7 +210,7 @@ out:
210static int si476x_codec_probe(struct snd_soc_codec *codec) 210static int si476x_codec_probe(struct snd_soc_codec *codec)
211{ 211{
212 codec->control_data = dev_get_regmap(codec->dev->parent, NULL); 212 codec->control_data = dev_get_regmap(codec->dev->parent, NULL);
213 return 0; 213 return snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
214} 214}
215 215
216static struct snd_soc_dai_ops si476x_dai_ops = { 216static struct snd_soc_dai_ops si476x_dai_ops = {
diff --git a/sound/soc/omap/n810.c b/sound/soc/omap/n810.c
index 3fde9e402710..d163e18d85d4 100644
--- a/sound/soc/omap/n810.c
+++ b/sound/soc/omap/n810.c
@@ -305,7 +305,9 @@ static int __init n810_soc_init(void)
305 int err; 305 int err;
306 struct device *dev; 306 struct device *dev;
307 307
308 if (!(machine_is_nokia_n810() || machine_is_nokia_n810_wimax())) 308 if (!of_have_populated_dt() ||
309 (!of_machine_is_compatible("nokia,n810") &&
310 !of_machine_is_compatible("nokia,n810-wimax")))
309 return -ENODEV; 311 return -ENODEV;
310 312
311 n810_snd_device = platform_device_alloc("soc-audio", -1); 313 n810_snd_device = platform_device_alloc("soc-audio", -1);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 47e1ce771e65..28522bd03b8e 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1989,6 +1989,7 @@ int soc_dpcm_runtime_update(struct snd_soc_card *card)
1989 1989
1990 paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_PLAYBACK, &list); 1990 paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_PLAYBACK, &list);
1991 if (paths < 0) { 1991 if (paths < 0) {
1992 dpcm_path_put(&list);
1992 dev_warn(fe->dev, "ASoC: %s no valid %s path\n", 1993 dev_warn(fe->dev, "ASoC: %s no valid %s path\n",
1993 fe->dai_link->name, "playback"); 1994 fe->dai_link->name, "playback");
1994 mutex_unlock(&card->mutex); 1995 mutex_unlock(&card->mutex);
@@ -2018,6 +2019,7 @@ capture:
2018 2019
2019 paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_CAPTURE, &list); 2020 paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_CAPTURE, &list);
2020 if (paths < 0) { 2021 if (paths < 0) {
2022 dpcm_path_put(&list);
2021 dev_warn(fe->dev, "ASoC: %s no valid %s path\n", 2023 dev_warn(fe->dev, "ASoC: %s no valid %s path\n",
2022 fe->dai_link->name, "capture"); 2024 fe->dai_link->name, "capture");
2023 mutex_unlock(&card->mutex); 2025 mutex_unlock(&card->mutex);
@@ -2082,6 +2084,7 @@ static int dpcm_fe_dai_open(struct snd_pcm_substream *fe_substream)
2082 fe->dpcm[stream].runtime = fe_substream->runtime; 2084 fe->dpcm[stream].runtime = fe_substream->runtime;
2083 2085
2084 if (dpcm_path_get(fe, stream, &list) <= 0) { 2086 if (dpcm_path_get(fe, stream, &list) <= 0) {
2087 dpcm_path_put(&list);
2085 dev_dbg(fe->dev, "ASoC: %s no valid %s route\n", 2088 dev_dbg(fe->dev, "ASoC: %s no valid %s route\n",
2086 fe->dai_link->name, stream ? "capture" : "playback"); 2089 fe->dai_link->name, stream ? "capture" : "playback");
2087 } 2090 }
diff --git a/tools/net/Makefile b/tools/net/Makefile
index 004cd74734b6..ee577ea03ba5 100644
--- a/tools/net/Makefile
+++ b/tools/net/Makefile
@@ -12,7 +12,7 @@ YACC = bison
12 12
13all : bpf_jit_disasm bpf_dbg bpf_asm 13all : bpf_jit_disasm bpf_dbg bpf_asm
14 14
15bpf_jit_disasm : CFLAGS = -Wall -O2 15bpf_jit_disasm : CFLAGS = -Wall -O2 -DPACKAGE='bpf_jit_disasm'
16bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl 16bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl
17bpf_jit_disasm : bpf_jit_disasm.o 17bpf_jit_disasm : bpf_jit_disasm.o
18 18
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 6aa6fb6f7bd9..f954c26de231 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -825,7 +825,6 @@ static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscal
825 P_SIGNUM(PIPE); 825 P_SIGNUM(PIPE);
826 P_SIGNUM(ALRM); 826 P_SIGNUM(ALRM);
827 P_SIGNUM(TERM); 827 P_SIGNUM(TERM);
828 P_SIGNUM(STKFLT);
829 P_SIGNUM(CHLD); 828 P_SIGNUM(CHLD);
830 P_SIGNUM(CONT); 829 P_SIGNUM(CONT);
831 P_SIGNUM(STOP); 830 P_SIGNUM(STOP);
@@ -841,6 +840,15 @@ static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscal
841 P_SIGNUM(IO); 840 P_SIGNUM(IO);
842 P_SIGNUM(PWR); 841 P_SIGNUM(PWR);
843 P_SIGNUM(SYS); 842 P_SIGNUM(SYS);
843#ifdef SIGEMT
844 P_SIGNUM(EMT);
845#endif
846#ifdef SIGSTKFLT
847 P_SIGNUM(STKFLT);
848#endif
849#ifdef SIGSWI
850 P_SIGNUM(SWI);
851#endif
844 default: break; 852 default: break;
845 } 853 }
846 854
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index c872991e0f65..620a1983b76b 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1213,7 +1213,7 @@ static void ip__resolve_ams(struct machine *machine, struct thread *thread,
1213 */ 1213 */
1214 thread__find_addr_location(thread, machine, m, MAP__FUNCTION, 1214 thread__find_addr_location(thread, machine, m, MAP__FUNCTION,
1215 ip, &al); 1215 ip, &al);
1216 if (al.sym) 1216 if (al.map)
1217 goto found; 1217 goto found;
1218 } 1218 }
1219found: 1219found:
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 3e9f336740fa..516d19fb999b 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -151,15 +151,15 @@ Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
151 151
152 gelf_getshdr(sec, shp); 152 gelf_getshdr(sec, shp);
153 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); 153 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
154 if (!strcmp(name, str)) { 154 if (str && !strcmp(name, str)) {
155 if (idx) 155 if (idx)
156 *idx = cnt; 156 *idx = cnt;
157 break; 157 return sec;
158 } 158 }
159 ++cnt; 159 ++cnt;
160 } 160 }
161 161
162 return sec; 162 return NULL;
163} 163}
164 164
165#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \ 165#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
diff --git a/tools/testing/selftests/ipc/msgque.c b/tools/testing/selftests/ipc/msgque.c
index d66418237d21..aa290c0de6f5 100644
--- a/tools/testing/selftests/ipc/msgque.c
+++ b/tools/testing/selftests/ipc/msgque.c
@@ -201,6 +201,7 @@ int main(int argc, char **argv)
201 201
202 msgque.msq_id = msgget(msgque.key, IPC_CREAT | IPC_EXCL | 0666); 202 msgque.msq_id = msgget(msgque.key, IPC_CREAT | IPC_EXCL | 0666);
203 if (msgque.msq_id == -1) { 203 if (msgque.msq_id == -1) {
204 err = -errno;
204 printf("Can't create queue\n"); 205 printf("Can't create queue\n");
205 goto err_out; 206 goto err_out;
206 } 207 }