aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/device-drivers.tmpl2
-rw-r--r--Documentation/scsi/LICENSE.qla2xxx2
-rw-r--r--MAINTAINERS7
-rw-r--r--Makefile2
-rw-r--r--arch/arc/include/asm/irqflags.h12
-rw-r--r--arch/arm/boot/dts/imx28-m28evk.dts1
-rw-r--r--arch/arm/boot/dts/imx28-sps1.dts1
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi1
-rw-r--r--arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts14
-rw-r--r--arch/arm/mach-imx/clk-imx35.c2
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c3
-rw-r--r--arch/arm/mach-kirkwood/board-iomega_ix2_200.c7
-rw-r--r--arch/arm/mach-mvebu/irq-armada-370-xp.c16
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/irqs.h4
-rw-r--r--arch/arm/mach-s3c24xx/irq.c2
-rw-r--r--arch/c6x/include/asm/irqflags.h2
-rw-r--r--arch/ia64/kernel/palinfo.c77
-rw-r--r--arch/m68k/include/asm/gpio.h20
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c8
-rw-r--r--arch/tile/include/asm/irqflags.h10
-rw-r--r--arch/x86/include/asm/paravirt.h5
-rw-r--r--arch/x86/include/asm/paravirt_types.h2
-rw-r--r--arch/x86/include/asm/tlb.h2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c3
-rw-r--r--arch/x86/kernel/paravirt.c25
-rw-r--r--arch/x86/lguest/boot.c1
-rw-r--r--arch/x86/mm/fault.c6
-rw-r--r--arch/x86/mm/pageattr-test.c2
-rw-r--r--arch/x86/mm/pageattr.c12
-rw-r--r--arch/x86/mm/pgtable.c7
-rw-r--r--arch/x86/xen/mmu.c13
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/partition-generic.c1
-rw-r--r--crypto/gcm.c17
-rw-r--r--drivers/ata/ata_piix.c14
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/ata/libata-scsi.c8
-rw-r--r--drivers/base/regmap/regmap.c3
-rw-r--r--drivers/block/loop.c21
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c327
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h18
-rw-r--r--drivers/cpufreq/intel_pstate.c1
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c2
-rw-r--r--drivers/dma/omap-dma.c20
-rw-r--r--drivers/dma/pl330.c38
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c8
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c4
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c2
-rw-r--r--drivers/misc/vmw_vmci/Kconfig2
-rw-r--r--drivers/net/bonding/bond_main.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c3
-rw-r--r--drivers/net/ethernet/intel/e100.c36
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c9
-rw-r--r--drivers/net/hyperv/netvsc.c17
-rw-r--r--drivers/net/hyperv/netvsc_drv.c2
-rw-r--r--drivers/net/hyperv/rndis_filter.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c46
-rw-r--r--drivers/net/wireless/mwifiex/scan.c11
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig7
-rw-r--r--drivers/net/wireless/rt2x00/Makefile1
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mmio.c216
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mmio.h119
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c176
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h88
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c1
-rw-r--r--drivers/remoteproc/Kconfig2
-rw-r--r--drivers/remoteproc/remoteproc_core.c6
-rw-r--r--drivers/remoteproc/ste_modem_rproc.c7
-rw-r--r--drivers/s390/net/qeth_core.h3
-rw-r--r--drivers/s390/net/qeth_core_main.c19
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ipr.c13
-rw-r--r--drivers/scsi/libsas/sas_expander.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c58
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/st.c8
-rw-r--r--drivers/target/target_core_alua.c3
-rw-r--r--drivers/tty/mxser.c8
-rw-r--r--drivers/tty/serial/8250/8250_pnp.c12
-rw-r--r--drivers/tty/serial/omap-serial.c11
-rw-r--r--drivers/vfio/pci/vfio_pci.c3
-rw-r--r--drivers/vhost/tcm_vhost.c198
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/xen/events.c19
-rw-r--r--fs/btrfs/tree-log.c48
-rw-r--r--fs/cifs/connect.c16
-rw-r--r--fs/ecryptfs/miscdev.c14
-rw-r--r--fs/inode.c2
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/nfs/nfs4client.c45
-rw-r--r--fs/nfs/nfs4proc.c1
-rw-r--r--fs/nfs/nfs4state.c8
-rw-r--r--fs/proc/generic.c119
-rw-r--r--include/asm-generic/tlb.h7
-rw-r--r--include/linux/ata.h2
-rw-r--r--include/linux/capability.h2
-rw-r--r--include/linux/ftrace.h5
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/preempt.h22
-rw-r--r--include/linux/proc_fs.h2
-rw-r--r--include/linux/security.h12
-rw-r--r--include/linux/spinlock_up.h29
-rw-r--r--include/net/iucv/af_iucv.h8
-rw-r--r--include/sound/dmaengine_pcm.h65
-rw-r--r--include/sound/soc.h4
-rw-r--r--kernel/capability.c24
-rw-r--r--kernel/events/core.c4
-rw-r--r--kernel/events/internal.h2
-rw-r--r--kernel/events/ring_buffer.c22
-rw-r--r--kernel/sched/clock.c26
-rw-r--r--kernel/sched/core.c8
-rw-r--r--kernel/sched/cputime.c2
-rw-r--r--kernel/sys.c3
-rw-r--r--kernel/trace/ftrace.c54
-rw-r--r--kernel/trace/trace.c9
-rw-r--r--kernel/trace/trace_stack.c2
-rw-r--r--lib/kobject.c9
-rw-r--r--mm/memory.c1
-rw-r--r--net/atm/common.c2
-rw-r--r--net/ax25/af_ax25.c1
-rw-r--r--net/bluetooth/af_bluetooth.c4
-rw-r--r--net/bluetooth/rfcomm/sock.c1
-rw-r--r--net/bluetooth/sco.c1
-rw-r--r--net/caif/caif_socket.c2
-rw-r--r--net/can/gw.c6
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/ipv4/devinet.c60
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv6/tcp_ipv6.c1
-rw-r--r--net/irda/af_irda.c2
-rw-r--r--net/iucv/af_iucv.c36
-rw-r--r--net/l2tp/l2tp_ip6.c1
-rw-r--r--net/llc/af_llc.c2
-rw-r--r--net/netrom/af_netrom.c1
-rw-r--r--net/nfc/llcp/sock.c3
-rw-r--r--net/rose/af_rose.c1
-rw-r--r--net/sunrpc/clnt.c11
-rw-r--r--net/tipc/socket.c7
-rw-r--r--net/vmw_vsock/af_vsock.c2
-rw-r--r--net/vmw_vsock/vmci_transport.c3
-rw-r--r--net/wireless/sme.c2
-rw-r--r--security/capability.c6
-rw-r--r--security/security.c5
-rw-r--r--security/selinux/hooks.c7
-rw-r--r--sound/soc/Kconfig4
-rw-r--r--sound/soc/Makefile4
-rw-r--r--sound/soc/atmel/atmel-pcm-dma.c6
-rw-r--r--sound/soc/cirrus/ep93xx-pcm.c5
-rw-r--r--sound/soc/codecs/max98088.c2
-rw-r--r--sound/soc/fsl/Kconfig2
-rw-r--r--sound/soc/fsl/fsl_ssi.c19
-rw-r--r--sound/soc/fsl/fsl_ssi.h8
-rw-r--r--sound/soc/fsl/imx-pcm-dma.c76
-rw-r--r--sound/soc/fsl/imx-pcm.c6
-rw-r--r--sound/soc/fsl/imx-pcm.h5
-rw-r--r--sound/soc/fsl/imx-ssi.c22
-rw-r--r--sound/soc/mxs/mxs-pcm.c4
-rw-r--r--sound/soc/omap/omap-pcm.c7
-rw-r--r--sound/soc/pxa/mmp-pcm.c5
-rw-r--r--sound/soc/soc-core.c85
-rw-r--r--sound/soc/soc-dmaengine-pcm.c82
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c284
-rw-r--r--sound/soc/soc-utils.c25
-rw-r--r--sound/soc/spear/spear_pcm.c5
-rw-r--r--sound/soc/tegra/Kconfig2
-rw-r--r--sound/soc/tegra/tegra_pcm.c171
-rw-r--r--sound/soc/ux500/Kconfig2
-rw-r--r--sound/soc/ux500/ux500_pcm.c159
-rw-r--r--sound/usb/mixer_quirks.c4
-rw-r--r--sound/usb/quirks.c2
187 files changed, 2288 insertions, 1443 deletions
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index 7514dbf0a679..c36892c072da 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -227,7 +227,7 @@ X!Isound/sound_firmware.c
227 <chapter id="uart16x50"> 227 <chapter id="uart16x50">
228 <title>16x50 UART Driver</title> 228 <title>16x50 UART Driver</title>
229!Edrivers/tty/serial/serial_core.c 229!Edrivers/tty/serial/serial_core.c
230!Edrivers/tty/serial/8250/8250.c 230!Edrivers/tty/serial/8250/8250_core.c
231 </chapter> 231 </chapter>
232 232
233 <chapter id="fbdev"> 233 <chapter id="fbdev">
diff --git a/Documentation/scsi/LICENSE.qla2xxx b/Documentation/scsi/LICENSE.qla2xxx
index 27a91cf43d6d..5020b7b5a244 100644
--- a/Documentation/scsi/LICENSE.qla2xxx
+++ b/Documentation/scsi/LICENSE.qla2xxx
@@ -1,4 +1,4 @@
1Copyright (c) 2003-2012 QLogic Corporation 1Copyright (c) 2003-2013 QLogic Corporation
2QLogic Linux FC-FCoE Driver 2QLogic Linux FC-FCoE Driver
3 3
4This program includes a device driver for Linux 3.x. 4This program includes a device driver for Linux 3.x.
diff --git a/MAINTAINERS b/MAINTAINERS
index c6108c319db9..8bdd7a7ef2f4 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4941,6 +4941,12 @@ W: logfs.org
4941S: Maintained 4941S: Maintained
4942F: fs/logfs/ 4942F: fs/logfs/
4943 4943
4944LPC32XX MACHINE SUPPORT
4945M: Roland Stigge <stigge@antcom.de>
4946L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
4947S: Maintained
4948F: arch/arm/mach-lpc32xx/
4949
4944LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI) 4950LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
4945M: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com> 4951M: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com>
4946M: Sreekanth Reddy <Sreekanth.Reddy@lsi.com> 4952M: Sreekanth Reddy <Sreekanth.Reddy@lsi.com>
@@ -6951,7 +6957,6 @@ F: drivers/scsi/st*
6951 6957
6952SCTP PROTOCOL 6958SCTP PROTOCOL
6953M: Vlad Yasevich <vyasevich@gmail.com> 6959M: Vlad Yasevich <vyasevich@gmail.com>
6954M: Sridhar Samudrala <sri@us.ibm.com>
6955M: Neil Horman <nhorman@tuxdriver.com> 6960M: Neil Horman <nhorman@tuxdriver.com>
6956L: linux-sctp@vger.kernel.org 6961L: linux-sctp@vger.kernel.org
6957W: http://lksctp.sourceforge.net 6962W: http://lksctp.sourceforge.net
diff --git a/Makefile b/Makefile
index 6db672b15bda..9cf6783c2ec3 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 9 2PATCHLEVEL = 9
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc6 4EXTRAVERSION = -rc7
5NAME = Unicycling Gorilla 5NAME = Unicycling Gorilla
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
index ccd84806b62f..eac071668201 100644
--- a/arch/arc/include/asm/irqflags.h
+++ b/arch/arc/include/asm/irqflags.h
@@ -39,7 +39,7 @@ static inline long arch_local_irq_save(void)
39 " flag.nz %0 \n" 39 " flag.nz %0 \n"
40 : "=r"(temp), "=r"(flags) 40 : "=r"(temp), "=r"(flags)
41 : "n"((STATUS_E1_MASK | STATUS_E2_MASK)) 41 : "n"((STATUS_E1_MASK | STATUS_E2_MASK))
42 : "cc"); 42 : "memory", "cc");
43 43
44 return flags; 44 return flags;
45} 45}
@@ -53,7 +53,8 @@ static inline void arch_local_irq_restore(unsigned long flags)
53 __asm__ __volatile__( 53 __asm__ __volatile__(
54 " flag %0 \n" 54 " flag %0 \n"
55 : 55 :
56 : "r"(flags)); 56 : "r"(flags)
57 : "memory");
57} 58}
58 59
59/* 60/*
@@ -73,7 +74,8 @@ static inline void arch_local_irq_disable(void)
73 " and %0, %0, %1 \n" 74 " and %0, %0, %1 \n"
74 " flag %0 \n" 75 " flag %0 \n"
75 : "=&r"(temp) 76 : "=&r"(temp)
76 : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))); 77 : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))
78 : "memory");
77} 79}
78 80
79/* 81/*
@@ -85,7 +87,9 @@ static inline long arch_local_save_flags(void)
85 87
86 __asm__ __volatile__( 88 __asm__ __volatile__(
87 " lr %0, [status32] \n" 89 " lr %0, [status32] \n"
88 : "=&r"(temp)); 90 : "=&r"(temp)
91 :
92 : "memory");
89 93
90 return temp; 94 return temp;
91} 95}
diff --git a/arch/arm/boot/dts/imx28-m28evk.dts b/arch/arm/boot/dts/imx28-m28evk.dts
index 6ce3d17c3a29..fd36e1cca104 100644
--- a/arch/arm/boot/dts/imx28-m28evk.dts
+++ b/arch/arm/boot/dts/imx28-m28evk.dts
@@ -152,7 +152,6 @@
152 i2c0: i2c@80058000 { 152 i2c0: i2c@80058000 {
153 pinctrl-names = "default"; 153 pinctrl-names = "default";
154 pinctrl-0 = <&i2c0_pins_a>; 154 pinctrl-0 = <&i2c0_pins_a>;
155 clock-frequency = <400000>;
156 status = "okay"; 155 status = "okay";
157 156
158 sgtl5000: codec@0a { 157 sgtl5000: codec@0a {
diff --git a/arch/arm/boot/dts/imx28-sps1.dts b/arch/arm/boot/dts/imx28-sps1.dts
index e6cde8aa7fff..6c6a5442800a 100644
--- a/arch/arm/boot/dts/imx28-sps1.dts
+++ b/arch/arm/boot/dts/imx28-sps1.dts
@@ -70,7 +70,6 @@
70 i2c0: i2c@80058000 { 70 i2c0: i2c@80058000 {
71 pinctrl-names = "default"; 71 pinctrl-names = "default";
72 pinctrl-0 = <&i2c0_pins_a>; 72 pinctrl-0 = <&i2c0_pins_a>;
73 clock-frequency = <400000>;
74 status = "okay"; 73 status = "okay";
75 74
76 rtc: rtc@51 { 75 rtc: rtc@51 {
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index 06ec460b4581..281a223591ff 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -91,6 +91,7 @@
91 compatible = "arm,cortex-a9-twd-timer"; 91 compatible = "arm,cortex-a9-twd-timer";
92 reg = <0x00a00600 0x20>; 92 reg = <0x00a00600 0x20>;
93 interrupts = <1 13 0xf01>; 93 interrupts = <1 13 0xf01>;
94 clocks = <&clks 15>;
94 }; 95 };
95 96
96 L2: l2-cache@00a02000 { 97 L2: l2-cache@00a02000 {
diff --git a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
index 93c3afbef9ee..3694e94f6e99 100644
--- a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
+++ b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
@@ -96,11 +96,11 @@
96 marvell,function = "gpio"; 96 marvell,function = "gpio";
97 }; 97 };
98 pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 { 98 pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 {
99 marvell,pins = "mpp44"; 99 marvell,pins = "mpp46";
100 marvell,function = "gpio"; 100 marvell,function = "gpio";
101 }; 101 };
102 pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 { 102 pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 {
103 marvell,pins = "mpp45"; 103 marvell,pins = "mpp47";
104 marvell,function = "gpio"; 104 marvell,function = "gpio";
105 }; 105 };
106 106
@@ -157,14 +157,14 @@
157 gpios = <&gpio0 16 0>; 157 gpios = <&gpio0 16 0>;
158 linux,default-trigger = "default-on"; 158 linux,default-trigger = "default-on";
159 }; 159 };
160 health_led1 { 160 rebuild_led {
161 label = "status:white:rebuild_led";
162 gpios = <&gpio1 4 0>;
163 };
164 health_led {
161 label = "status:red:health_led"; 165 label = "status:red:health_led";
162 gpios = <&gpio1 5 0>; 166 gpios = <&gpio1 5 0>;
163 }; 167 };
164 health_led2 {
165 label = "status:white:health_led";
166 gpios = <&gpio1 4 0>;
167 };
168 backup_led { 168 backup_led {
169 label = "status:blue:backup_led"; 169 label = "status:blue:backup_led";
170 gpios = <&gpio0 15 0>; 170 gpios = <&gpio0 15 0>;
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c
index e13a8fa5e62c..2193c834f55c 100644
--- a/arch/arm/mach-imx/clk-imx35.c
+++ b/arch/arm/mach-imx/clk-imx35.c
@@ -257,6 +257,7 @@ int __init mx35_clocks_init(void)
257 clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0"); 257 clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
258 clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0"); 258 clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0");
259 clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0"); 259 clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
260 clk_register_clkdev(clk[admux_gate], "audmux", NULL);
260 261
261 clk_prepare_enable(clk[spba_gate]); 262 clk_prepare_enable(clk[spba_gate]);
262 clk_prepare_enable(clk[gpio1_gate]); 263 clk_prepare_enable(clk[gpio1_gate]);
@@ -265,6 +266,7 @@ int __init mx35_clocks_init(void)
265 clk_prepare_enable(clk[iim_gate]); 266 clk_prepare_enable(clk[iim_gate]);
266 clk_prepare_enable(clk[emi_gate]); 267 clk_prepare_enable(clk[emi_gate]);
267 clk_prepare_enable(clk[max_gate]); 268 clk_prepare_enable(clk[max_gate]);
269 clk_prepare_enable(clk[iomuxc_gate]);
268 270
269 /* 271 /*
270 * SCC is needed to boot via mmc after a watchdog reset. The clock code 272 * SCC is needed to boot via mmc after a watchdog reset. The clock code
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 2f9ff93a4e61..d38e54f5b6d7 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -115,7 +115,7 @@ static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m"
115static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", }; 115static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
116static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", }; 116static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", };
117static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", }; 117static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
118static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_pfd1_540m", }; 118static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
119static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", }; 119static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
120static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; 120static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
121static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; 121static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
@@ -443,7 +443,6 @@ int __init mx6q_clocks_init(void)
443 443
444 clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0"); 444 clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0");
445 clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0"); 445 clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
446 clk_register_clkdev(clk[twd], NULL, "smp_twd");
447 clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL); 446 clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL);
448 clk_register_clkdev(clk[ahb], "ahb", NULL); 447 clk_register_clkdev(clk[ahb], "ahb", NULL);
449 clk_register_clkdev(clk[cko1], "cko1", NULL); 448 clk_register_clkdev(clk[cko1], "cko1", NULL);
diff --git a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
index f655b2637b0e..e5f70415905a 100644
--- a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
+++ b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
@@ -20,10 +20,15 @@ static struct mv643xx_eth_platform_data iomega_ix2_200_ge00_data = {
20 .duplex = DUPLEX_FULL, 20 .duplex = DUPLEX_FULL,
21}; 21};
22 22
23static struct mv643xx_eth_platform_data iomega_ix2_200_ge01_data = {
24 .phy_addr = MV643XX_ETH_PHY_ADDR(11),
25};
26
23void __init iomega_ix2_200_init(void) 27void __init iomega_ix2_200_init(void)
24{ 28{
25 /* 29 /*
26 * Basic setup. Needs to be called early. 30 * Basic setup. Needs to be called early.
27 */ 31 */
28 kirkwood_ge01_init(&iomega_ix2_200_ge00_data); 32 kirkwood_ge00_init(&iomega_ix2_200_ge00_data);
33 kirkwood_ge01_init(&iomega_ix2_200_ge01_data);
29} 34}
diff --git a/arch/arm/mach-mvebu/irq-armada-370-xp.c b/arch/arm/mach-mvebu/irq-armada-370-xp.c
index 6a9195e10579..d5970f5a1e8d 100644
--- a/arch/arm/mach-mvebu/irq-armada-370-xp.c
+++ b/arch/arm/mach-mvebu/irq-armada-370-xp.c
@@ -61,7 +61,6 @@ static struct irq_domain *armada_370_xp_mpic_domain;
61 */ 61 */
62static void armada_370_xp_irq_mask(struct irq_data *d) 62static void armada_370_xp_irq_mask(struct irq_data *d)
63{ 63{
64#ifdef CONFIG_SMP
65 irq_hw_number_t hwirq = irqd_to_hwirq(d); 64 irq_hw_number_t hwirq = irqd_to_hwirq(d);
66 65
67 if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) 66 if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
@@ -70,15 +69,10 @@ static void armada_370_xp_irq_mask(struct irq_data *d)
70 else 69 else
71 writel(hwirq, per_cpu_int_base + 70 writel(hwirq, per_cpu_int_base +
72 ARMADA_370_XP_INT_SET_MASK_OFFS); 71 ARMADA_370_XP_INT_SET_MASK_OFFS);
73#else
74 writel(irqd_to_hwirq(d),
75 per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
76#endif
77} 72}
78 73
79static void armada_370_xp_irq_unmask(struct irq_data *d) 74static void armada_370_xp_irq_unmask(struct irq_data *d)
80{ 75{
81#ifdef CONFIG_SMP
82 irq_hw_number_t hwirq = irqd_to_hwirq(d); 76 irq_hw_number_t hwirq = irqd_to_hwirq(d);
83 77
84 if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) 78 if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
@@ -87,10 +81,6 @@ static void armada_370_xp_irq_unmask(struct irq_data *d)
87 else 81 else
88 writel(hwirq, per_cpu_int_base + 82 writel(hwirq, per_cpu_int_base +
89 ARMADA_370_XP_INT_CLEAR_MASK_OFFS); 83 ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
90#else
91 writel(irqd_to_hwirq(d),
92 per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
93#endif
94} 84}
95 85
96#ifdef CONFIG_SMP 86#ifdef CONFIG_SMP
@@ -146,7 +136,11 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
146 unsigned int virq, irq_hw_number_t hw) 136 unsigned int virq, irq_hw_number_t hw)
147{ 137{
148 armada_370_xp_irq_mask(irq_get_irq_data(virq)); 138 armada_370_xp_irq_mask(irq_get_irq_data(virq));
149 writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); 139 if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
140 writel(hw, per_cpu_int_base +
141 ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
142 else
143 writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
150 irq_set_status_flags(virq, IRQ_LEVEL); 144 irq_set_status_flags(virq, IRQ_LEVEL);
151 145
152 if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) { 146 if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
diff --git a/arch/arm/mach-s3c24xx/include/mach/irqs.h b/arch/arm/mach-s3c24xx/include/mach/irqs.h
index b7a9f4d469e8..1e73f5fa8659 100644
--- a/arch/arm/mach-s3c24xx/include/mach/irqs.h
+++ b/arch/arm/mach-s3c24xx/include/mach/irqs.h
@@ -188,10 +188,8 @@
188 188
189#if defined(CONFIG_CPU_S3C2416) 189#if defined(CONFIG_CPU_S3C2416)
190#define NR_IRQS (IRQ_S3C2416_I2S1 + 1) 190#define NR_IRQS (IRQ_S3C2416_I2S1 + 1)
191#elif defined(CONFIG_CPU_S3C2443)
192#define NR_IRQS (IRQ_S3C2443_AC97+1)
193#else 191#else
194#define NR_IRQS (IRQ_S3C2440_AC97+1) 192#define NR_IRQS (IRQ_S3C2443_AC97 + 1)
195#endif 193#endif
196 194
197/* compatibility define. */ 195/* compatibility define. */
diff --git a/arch/arm/mach-s3c24xx/irq.c b/arch/arm/mach-s3c24xx/irq.c
index cb9f5e011e73..d8ba9bee4c7e 100644
--- a/arch/arm/mach-s3c24xx/irq.c
+++ b/arch/arm/mach-s3c24xx/irq.c
@@ -500,7 +500,7 @@ struct s3c_irq_intc *s3c24xx_init_intc(struct device_node *np,
500 base = (void *)0xfd000000; 500 base = (void *)0xfd000000;
501 501
502 intc->reg_mask = base + 0xa4; 502 intc->reg_mask = base + 0xa4;
503 intc->reg_pending = base + 0x08; 503 intc->reg_pending = base + 0xa8;
504 irq_num = 20; 504 irq_num = 20;
505 irq_start = S3C2410_IRQ(32); 505 irq_start = S3C2410_IRQ(32);
506 irq_offset = 4; 506 irq_offset = 4;
diff --git a/arch/c6x/include/asm/irqflags.h b/arch/c6x/include/asm/irqflags.h
index cf78e09e18c3..2c71d5634ec2 100644
--- a/arch/c6x/include/asm/irqflags.h
+++ b/arch/c6x/include/asm/irqflags.h
@@ -27,7 +27,7 @@ static inline unsigned long arch_local_save_flags(void)
27/* set interrupt enabled status */ 27/* set interrupt enabled status */
28static inline void arch_local_irq_restore(unsigned long flags) 28static inline void arch_local_irq_restore(unsigned long flags)
29{ 29{
30 asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags)); 30 asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags) : "memory");
31} 31}
32 32
33/* unconditionally enable interrupts */ 33/* unconditionally enable interrupts */
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 77597e5ea60a..79521d5499f9 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -849,17 +849,6 @@ static palinfo_entry_t palinfo_entries[]={
849 849
850#define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries) 850#define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries)
851 851
852/*
853 * this array is used to keep track of the proc entries we create. This is
854 * required in the module mode when we need to remove all entries. The procfs code
855 * does not do recursion of deletion
856 *
857 * Notes:
858 * - +1 accounts for the cpuN directory entry in /proc/pal
859 */
860#define NR_PALINFO_PROC_ENTRIES (NR_CPUS*(NR_PALINFO_ENTRIES+1))
861
862static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES];
863static struct proc_dir_entry *palinfo_dir; 852static struct proc_dir_entry *palinfo_dir;
864 853
865/* 854/*
@@ -971,60 +960,32 @@ palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, voi
971static void __cpuinit 960static void __cpuinit
972create_palinfo_proc_entries(unsigned int cpu) 961create_palinfo_proc_entries(unsigned int cpu)
973{ 962{
974# define CPUSTR "cpu%d"
975
976 pal_func_cpu_u_t f; 963 pal_func_cpu_u_t f;
977 struct proc_dir_entry **pdir;
978 struct proc_dir_entry *cpu_dir; 964 struct proc_dir_entry *cpu_dir;
979 int j; 965 int j;
980 char cpustr[sizeof(CPUSTR)]; 966 char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
981 967 sprintf(cpustr, "cpu%d", cpu);
982
983 /*
984 * we keep track of created entries in a depth-first order for
985 * cleanup purposes. Each entry is stored into palinfo_proc_entries
986 */
987 sprintf(cpustr,CPUSTR, cpu);
988 968
989 cpu_dir = proc_mkdir(cpustr, palinfo_dir); 969 cpu_dir = proc_mkdir(cpustr, palinfo_dir);
970 if (!cpu_dir)
971 return;
990 972
991 f.req_cpu = cpu; 973 f.req_cpu = cpu;
992 974
993 /*
994 * Compute the location to store per cpu entries
995 * We dont store the top level entry in this list, but
996 * remove it finally after removing all cpu entries.
997 */
998 pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)];
999 *pdir++ = cpu_dir;
1000 for (j=0; j < NR_PALINFO_ENTRIES; j++) { 975 for (j=0; j < NR_PALINFO_ENTRIES; j++) {
1001 f.func_id = j; 976 f.func_id = j;
1002 *pdir = create_proc_read_entry( 977 create_proc_read_entry(
1003 palinfo_entries[j].name, 0, cpu_dir, 978 palinfo_entries[j].name, 0, cpu_dir,
1004 palinfo_read_entry, (void *)f.value); 979 palinfo_read_entry, (void *)f.value);
1005 pdir++;
1006 } 980 }
1007} 981}
1008 982
1009static void 983static void
1010remove_palinfo_proc_entries(unsigned int hcpu) 984remove_palinfo_proc_entries(unsigned int hcpu)
1011{ 985{
1012 int j; 986 char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
1013 struct proc_dir_entry *cpu_dir, **pdir; 987 sprintf(cpustr, "cpu%d", hcpu);
1014 988 remove_proc_subtree(cpustr, palinfo_dir);
1015 pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)];
1016 cpu_dir = *pdir;
1017 *pdir++=NULL;
1018 for (j=0; j < (NR_PALINFO_ENTRIES); j++) {
1019 if ((*pdir)) {
1020 remove_proc_entry ((*pdir)->name, cpu_dir);
1021 *pdir ++= NULL;
1022 }
1023 }
1024
1025 if (cpu_dir) {
1026 remove_proc_entry(cpu_dir->name, palinfo_dir);
1027 }
1028} 989}
1029 990
1030static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, 991static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
@@ -1058,6 +1019,8 @@ palinfo_init(void)
1058 1019
1059 printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION); 1020 printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);
1060 palinfo_dir = proc_mkdir("pal", NULL); 1021 palinfo_dir = proc_mkdir("pal", NULL);
1022 if (!palinfo_dir)
1023 return -ENOMEM;
1061 1024
1062 /* Create palinfo dirs in /proc for all online cpus */ 1025 /* Create palinfo dirs in /proc for all online cpus */
1063 for_each_online_cpu(i) { 1026 for_each_online_cpu(i) {
@@ -1073,22 +1036,8 @@ palinfo_init(void)
1073static void __exit 1036static void __exit
1074palinfo_exit(void) 1037palinfo_exit(void)
1075{ 1038{
1076 int i = 0;
1077
1078 /* remove all nodes: depth first pass. Could optimize this */
1079 for_each_online_cpu(i) {
1080 remove_palinfo_proc_entries(i);
1081 }
1082
1083 /*
1084 * Remove the top level entry finally
1085 */
1086 remove_proc_entry(palinfo_dir->name, NULL);
1087
1088 /*
1089 * Unregister from cpu notifier callbacks
1090 */
1091 unregister_hotcpu_notifier(&palinfo_cpu_notifier); 1039 unregister_hotcpu_notifier(&palinfo_cpu_notifier);
1040 remove_proc_subtree("pal", NULL);
1092} 1041}
1093 1042
1094module_init(palinfo_init); 1043module_init(palinfo_init);
diff --git a/arch/m68k/include/asm/gpio.h b/arch/m68k/include/asm/gpio.h
index 4395ffc51fdb..8cc83431805b 100644
--- a/arch/m68k/include/asm/gpio.h
+++ b/arch/m68k/include/asm/gpio.h
@@ -86,4 +86,24 @@ static inline int gpio_cansleep(unsigned gpio)
86 return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio); 86 return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio);
87} 87}
88 88
89static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
90{
91 int err;
92
93 err = gpio_request(gpio, label);
94 if (err)
95 return err;
96
97 if (flags & GPIOF_DIR_IN)
98 err = gpio_direction_input(gpio);
99 else
100 err = gpio_direction_output(gpio,
101 (flags & GPIOF_INIT_HIGH) ? 1 : 0);
102
103 if (err)
104 gpio_free(gpio);
105
106 return err;
107}
108
89#endif 109#endif
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 0da39fed355a..299731e9036b 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -186,7 +186,13 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
186 (0x1UL << 4), &dummy1, &dummy2); 186 (0x1UL << 4), &dummy1, &dummy2);
187 if (lpar_rc == H_SUCCESS) 187 if (lpar_rc == H_SUCCESS)
188 return i; 188 return i;
189 BUG_ON(lpar_rc != H_NOT_FOUND); 189
190 /*
191 * The test for adjunct partition is performed before the
192 * ANDCOND test. H_RESOURCE may be returned, so we need to
193 * check for that as well.
194 */
195 BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
190 196
191 slot_offset++; 197 slot_offset++;
192 slot_offset &= 0x7; 198 slot_offset &= 0x7;
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index 241c0bb60b12..c96f9bbb760d 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -40,7 +40,15 @@
40#include <asm/percpu.h> 40#include <asm/percpu.h>
41#include <arch/spr_def.h> 41#include <arch/spr_def.h>
42 42
43/* Set and clear kernel interrupt masks. */ 43/*
44 * Set and clear kernel interrupt masks.
45 *
46 * NOTE: __insn_mtspr() is a compiler builtin marked as a memory
47 * clobber. We rely on it being equivalent to a compiler barrier in
48 * this code since arch_local_irq_save() and friends must act as
49 * compiler barriers. This compiler semantic is baked into enough
50 * places that the compiler will maintain it going forward.
51 */
44#if CHIP_HAS_SPLIT_INTR_MASK() 52#if CHIP_HAS_SPLIT_INTR_MASK()
45#if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32 53#if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32
46# error Fix assumptions about which word various interrupts are in 54# error Fix assumptions about which word various interrupts are in
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 5edd1742cfd0..7361e47db79f 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -703,7 +703,10 @@ static inline void arch_leave_lazy_mmu_mode(void)
703 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); 703 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
704} 704}
705 705
706void arch_flush_lazy_mmu_mode(void); 706static inline void arch_flush_lazy_mmu_mode(void)
707{
708 PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
709}
707 710
708static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, 711static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
709 phys_addr_t phys, pgprot_t flags) 712 phys_addr_t phys, pgprot_t flags)
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 142236ed83af..b3b0ec1dac86 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -91,6 +91,7 @@ struct pv_lazy_ops {
91 /* Set deferred update mode, used for batching operations. */ 91 /* Set deferred update mode, used for batching operations. */
92 void (*enter)(void); 92 void (*enter)(void);
93 void (*leave)(void); 93 void (*leave)(void);
94 void (*flush)(void);
94}; 95};
95 96
96struct pv_time_ops { 97struct pv_time_ops {
@@ -679,6 +680,7 @@ void paravirt_end_context_switch(struct task_struct *next);
679 680
680void paravirt_enter_lazy_mmu(void); 681void paravirt_enter_lazy_mmu(void);
681void paravirt_leave_lazy_mmu(void); 682void paravirt_leave_lazy_mmu(void);
683void paravirt_flush_lazy_mmu(void);
682 684
683void _paravirt_nop(void); 685void _paravirt_nop(void);
684u32 _paravirt_ident_32(u32); 686u32 _paravirt_ident_32(u32);
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 4fef20773b8f..c7797307fc2b 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -7,7 +7,7 @@
7 7
8#define tlb_flush(tlb) \ 8#define tlb_flush(tlb) \
9{ \ 9{ \
10 if (tlb->fullmm == 0) \ 10 if (!tlb->fullmm && !tlb->need_flush_all) \
11 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \ 11 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
12 else \ 12 else \
13 flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \ 13 flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index b05a575d56f4..26830f3af0df 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -314,10 +314,11 @@ int intel_pmu_drain_bts_buffer(void)
314 if (top <= at) 314 if (top <= at)
315 return 0; 315 return 0;
316 316
317 memset(&regs, 0, sizeof(regs));
318
317 ds->bts_index = ds->bts_buffer_base; 319 ds->bts_index = ds->bts_buffer_base;
318 320
319 perf_sample_data_init(&data, 0, event->hw.last_period); 321 perf_sample_data_init(&data, 0, event->hw.last_period);
320 regs.ip = 0;
321 322
322 /* 323 /*
323 * Prepare a generic sample, i.e. fill in the invariant fields. 324 * Prepare a generic sample, i.e. fill in the invariant fields.
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 17fff18a1031..8bfb335f74bb 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -263,6 +263,18 @@ void paravirt_leave_lazy_mmu(void)
263 leave_lazy(PARAVIRT_LAZY_MMU); 263 leave_lazy(PARAVIRT_LAZY_MMU);
264} 264}
265 265
266void paravirt_flush_lazy_mmu(void)
267{
268 preempt_disable();
269
270 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
271 arch_leave_lazy_mmu_mode();
272 arch_enter_lazy_mmu_mode();
273 }
274
275 preempt_enable();
276}
277
266void paravirt_start_context_switch(struct task_struct *prev) 278void paravirt_start_context_switch(struct task_struct *prev)
267{ 279{
268 BUG_ON(preemptible()); 280 BUG_ON(preemptible());
@@ -292,18 +304,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
292 return this_cpu_read(paravirt_lazy_mode); 304 return this_cpu_read(paravirt_lazy_mode);
293} 305}
294 306
295void arch_flush_lazy_mmu_mode(void)
296{
297 preempt_disable();
298
299 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
300 arch_leave_lazy_mmu_mode();
301 arch_enter_lazy_mmu_mode();
302 }
303
304 preempt_enable();
305}
306
307struct pv_info pv_info = { 307struct pv_info pv_info = {
308 .name = "bare hardware", 308 .name = "bare hardware",
309 .paravirt_enabled = 0, 309 .paravirt_enabled = 0,
@@ -475,6 +475,7 @@ struct pv_mmu_ops pv_mmu_ops = {
475 .lazy_mode = { 475 .lazy_mode = {
476 .enter = paravirt_nop, 476 .enter = paravirt_nop,
477 .leave = paravirt_nop, 477 .leave = paravirt_nop,
478 .flush = paravirt_nop,
478 }, 479 },
479 480
480 .set_fixmap = native_set_fixmap, 481 .set_fixmap = native_set_fixmap,
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 1cbd89ca5569..7114c63f047d 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1334,6 +1334,7 @@ __init void lguest_init(void)
1334 pv_mmu_ops.read_cr3 = lguest_read_cr3; 1334 pv_mmu_ops.read_cr3 = lguest_read_cr3;
1335 pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu; 1335 pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
1336 pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode; 1336 pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
1337 pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
1337 pv_mmu_ops.pte_update = lguest_pte_update; 1338 pv_mmu_ops.pte_update = lguest_pte_update;
1338 pv_mmu_ops.pte_update_defer = lguest_pte_update; 1339 pv_mmu_ops.pte_update_defer = lguest_pte_update;
1339 1340
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 2b97525246d4..0e883364abb5 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -378,10 +378,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
378 if (pgd_none(*pgd_ref)) 378 if (pgd_none(*pgd_ref))
379 return -1; 379 return -1;
380 380
381 if (pgd_none(*pgd)) 381 if (pgd_none(*pgd)) {
382 set_pgd(pgd, *pgd_ref); 382 set_pgd(pgd, *pgd_ref);
383 else 383 arch_flush_lazy_mmu_mode();
384 } else {
384 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); 385 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
386 }
385 387
386 /* 388 /*
387 * Below here mismatches are bugs because these lower tables 389 * Below here mismatches are bugs because these lower tables
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index b0086567271c..0e38951e65eb 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -68,7 +68,7 @@ static int print_split(struct split_state *s)
68 s->gpg++; 68 s->gpg++;
69 i += GPS/PAGE_SIZE; 69 i += GPS/PAGE_SIZE;
70 } else if (level == PG_LEVEL_2M) { 70 } else if (level == PG_LEVEL_2M) {
71 if (!(pte_val(*pte) & _PAGE_PSE)) { 71 if ((pte_val(*pte) & _PAGE_PRESENT) && !(pte_val(*pte) & _PAGE_PSE)) {
72 printk(KERN_ERR 72 printk(KERN_ERR
73 "%lx level %d but not PSE %Lx\n", 73 "%lx level %d but not PSE %Lx\n",
74 addr, level, (u64)pte_val(*pte)); 74 addr, level, (u64)pte_val(*pte));
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 091934e1d0d9..fb4e73ec24d8 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -467,7 +467,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
467 * We are safe now. Check whether the new pgprot is the same: 467 * We are safe now. Check whether the new pgprot is the same:
468 */ 468 */
469 old_pte = *kpte; 469 old_pte = *kpte;
470 old_prot = new_prot = req_prot = pte_pgprot(old_pte); 470 old_prot = req_prot = pte_pgprot(old_pte);
471 471
472 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); 472 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
473 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); 473 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
@@ -478,12 +478,12 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
478 * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL 478 * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
479 * for the ancient hardware that doesn't support it. 479 * for the ancient hardware that doesn't support it.
480 */ 480 */
481 if (pgprot_val(new_prot) & _PAGE_PRESENT) 481 if (pgprot_val(req_prot) & _PAGE_PRESENT)
482 pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL; 482 pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
483 else 483 else
484 pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); 484 pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
485 485
486 new_prot = canon_pgprot(new_prot); 486 req_prot = canon_pgprot(req_prot);
487 487
488 /* 488 /*
489 * old_pte points to the large page base address. So we need 489 * old_pte points to the large page base address. So we need
@@ -1413,6 +1413,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
1413 * but that can deadlock->flush only current cpu: 1413 * but that can deadlock->flush only current cpu:
1414 */ 1414 */
1415 __flush_tlb_all(); 1415 __flush_tlb_all();
1416
1417 arch_flush_lazy_mmu_mode();
1416} 1418}
1417 1419
1418#ifdef CONFIG_HIBERNATION 1420#ifdef CONFIG_HIBERNATION
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 193350b51f90..17fda6a8b3c2 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -58,6 +58,13 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
58void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 58void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
59{ 59{
60 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); 60 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
61 /*
62 * NOTE! For PAE, any changes to the top page-directory-pointer-table
63 * entries need a full cr3 reload to flush.
64 */
65#ifdef CONFIG_X86_PAE
66 tlb->need_flush_all = 1;
67#endif
61 tlb_remove_page(tlb, virt_to_page(pmd)); 68 tlb_remove_page(tlb, virt_to_page(pmd));
62} 69}
63 70
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 6afbb2ca9a0a..e006c18d288a 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1748,14 +1748,18 @@ static void *m2v(phys_addr_t maddr)
1748} 1748}
1749 1749
1750/* Set the page permissions on an identity-mapped pages */ 1750/* Set the page permissions on an identity-mapped pages */
1751static void set_page_prot(void *addr, pgprot_t prot) 1751static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
1752{ 1752{
1753 unsigned long pfn = __pa(addr) >> PAGE_SHIFT; 1753 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1754 pte_t pte = pfn_pte(pfn, prot); 1754 pte_t pte = pfn_pte(pfn, prot);
1755 1755
1756 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0)) 1756 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
1757 BUG(); 1757 BUG();
1758} 1758}
1759static void set_page_prot(void *addr, pgprot_t prot)
1760{
1761 return set_page_prot_flags(addr, prot, UVMF_NONE);
1762}
1759#ifdef CONFIG_X86_32 1763#ifdef CONFIG_X86_32
1760static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) 1764static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1761{ 1765{
@@ -1839,12 +1843,12 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1839 unsigned long addr) 1843 unsigned long addr)
1840{ 1844{
1841 if (*pt_base == PFN_DOWN(__pa(addr))) { 1845 if (*pt_base == PFN_DOWN(__pa(addr))) {
1842 set_page_prot((void *)addr, PAGE_KERNEL); 1846 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1843 clear_page((void *)addr); 1847 clear_page((void *)addr);
1844 (*pt_base)++; 1848 (*pt_base)++;
1845 } 1849 }
1846 if (*pt_end == PFN_DOWN(__pa(addr))) { 1850 if (*pt_end == PFN_DOWN(__pa(addr))) {
1847 set_page_prot((void *)addr, PAGE_KERNEL); 1851 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1848 clear_page((void *)addr); 1852 clear_page((void *)addr);
1849 (*pt_end)--; 1853 (*pt_end)--;
1850 } 1854 }
@@ -2196,6 +2200,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2196 .lazy_mode = { 2200 .lazy_mode = {
2197 .enter = paravirt_enter_lazy_mmu, 2201 .enter = paravirt_enter_lazy_mmu,
2198 .leave = xen_leave_lazy_mmu, 2202 .leave = xen_leave_lazy_mmu,
2203 .flush = paravirt_flush_lazy_mmu,
2199 }, 2204 },
2200 2205
2201 .set_fixmap = xen_set_fixmap, 2206 .set_fixmap = xen_set_fixmap,
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 6206a934eb8c..5efc5a647183 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -229,6 +229,8 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \
229 unsigned long val; \ 229 unsigned long val; \
230 ssize_t ret; \ 230 ssize_t ret; \
231 ret = queue_var_store(&val, page, count); \ 231 ret = queue_var_store(&val, page, count); \
232 if (ret < 0) \
233 return ret; \
232 if (neg) \ 234 if (neg) \
233 val = !val; \ 235 val = !val; \
234 \ 236 \
diff --git a/block/partition-generic.c b/block/partition-generic.c
index ae95ee6a58aa..789cdea05893 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -257,7 +257,6 @@ void delete_partition(struct gendisk *disk, int partno)
257 257
258 hd_struct_put(part); 258 hd_struct_put(part);
259} 259}
260EXPORT_SYMBOL(delete_partition);
261 260
262static ssize_t whole_disk_show(struct device *dev, 261static ssize_t whole_disk_show(struct device *dev,
263 struct device_attribute *attr, char *buf) 262 struct device_attribute *attr, char *buf)
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 137ad1ec5438..13ccbda34ff9 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -44,6 +44,7 @@ struct crypto_rfc4543_ctx {
44 44
45struct crypto_rfc4543_req_ctx { 45struct crypto_rfc4543_req_ctx {
46 u8 auth_tag[16]; 46 u8 auth_tag[16];
47 u8 assocbuf[32];
47 struct scatterlist cipher[1]; 48 struct scatterlist cipher[1];
48 struct scatterlist payload[2]; 49 struct scatterlist payload[2];
49 struct scatterlist assoc[2]; 50 struct scatterlist assoc[2];
@@ -1133,9 +1134,19 @@ static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
1133 scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2); 1134 scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2);
1134 assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); 1135 assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);
1135 1136
1136 sg_init_table(assoc, 2); 1137 if (req->assoc->length == req->assoclen) {
1137 sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, 1138 sg_init_table(assoc, 2);
1138 req->assoc->offset); 1139 sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
1140 req->assoc->offset);
1141 } else {
1142 BUG_ON(req->assoclen > sizeof(rctx->assocbuf));
1143
1144 scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0,
1145 req->assoclen, 0);
1146
1147 sg_init_table(assoc, 2);
1148 sg_set_buf(assoc, rctx->assocbuf, req->assoclen);
1149 }
1139 scatterwalk_crypto_chain(assoc, payload, 0, 2); 1150 scatterwalk_crypto_chain(assoc, payload, 0, 2);
1140 1151
1141 aead_request_set_tfm(subreq, ctx->child); 1152 aead_request_set_tfm(subreq, ctx->child);
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index ffdd32d22602..2f48123d74c4 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -150,6 +150,7 @@ enum piix_controller_ids {
150 tolapai_sata, 150 tolapai_sata,
151 piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */ 151 piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */
152 ich8_sata_snb, 152 ich8_sata_snb,
153 ich8_2port_sata_snb,
153}; 154};
154 155
155struct piix_map_db { 156struct piix_map_db {
@@ -304,7 +305,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
304 /* SATA Controller IDE (Lynx Point) */ 305 /* SATA Controller IDE (Lynx Point) */
305 { 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, 306 { 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
306 /* SATA Controller IDE (Lynx Point) */ 307 /* SATA Controller IDE (Lynx Point) */
307 { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 308 { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
308 /* SATA Controller IDE (Lynx Point) */ 309 /* SATA Controller IDE (Lynx Point) */
309 { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 310 { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
310 /* SATA Controller IDE (Lynx Point-LP) */ 311 /* SATA Controller IDE (Lynx Point-LP) */
@@ -439,6 +440,7 @@ static const struct piix_map_db *piix_map_db_table[] = {
439 [ich8m_apple_sata] = &ich8m_apple_map_db, 440 [ich8m_apple_sata] = &ich8m_apple_map_db,
440 [tolapai_sata] = &tolapai_map_db, 441 [tolapai_sata] = &tolapai_map_db,
441 [ich8_sata_snb] = &ich8_map_db, 442 [ich8_sata_snb] = &ich8_map_db,
443 [ich8_2port_sata_snb] = &ich8_2port_map_db,
442}; 444};
443 445
444static struct pci_bits piix_enable_bits[] = { 446static struct pci_bits piix_enable_bits[] = {
@@ -1242,6 +1244,16 @@ static struct ata_port_info piix_port_info[] = {
1242 .udma_mask = ATA_UDMA6, 1244 .udma_mask = ATA_UDMA6,
1243 .port_ops = &piix_sata_ops, 1245 .port_ops = &piix_sata_ops,
1244 }, 1246 },
1247
1248 [ich8_2port_sata_snb] =
1249 {
1250 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR
1251 | PIIX_FLAG_PIO16,
1252 .pio_mask = ATA_PIO4,
1253 .mwdma_mask = ATA_MWDMA2,
1254 .udma_mask = ATA_UDMA6,
1255 .port_ops = &piix_sata_ops,
1256 },
1245}; 1257};
1246 1258
1247#define AHCI_PCI_BAR 5 1259#define AHCI_PCI_BAR 5
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 497adea1f0d6..63c743baf920 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2329,7 +2329,7 @@ int ata_dev_configure(struct ata_device *dev)
2329 * from SATA Settings page of Identify Device Data Log. 2329 * from SATA Settings page of Identify Device Data Log.
2330 */ 2330 */
2331 if (ata_id_has_devslp(dev->id)) { 2331 if (ata_id_has_devslp(dev->id)) {
2332 u8 sata_setting[ATA_SECT_SIZE]; 2332 u8 *sata_setting = ap->sector_buf;
2333 int i, j; 2333 int i, j;
2334 2334
2335 dev->flags |= ATA_DFLAG_DEVSLP; 2335 dev->flags |= ATA_DFLAG_DEVSLP;
@@ -2439,6 +2439,9 @@ int ata_dev_configure(struct ata_device *dev)
2439 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2439 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2440 dev->max_sectors); 2440 dev->max_sectors);
2441 2441
2442 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2443 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2444
2442 if (ap->ops->dev_config) 2445 if (ap->ops->dev_config)
2443 ap->ops->dev_config(dev); 2446 ap->ops->dev_config(dev);
2444 2447
@@ -4100,6 +4103,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4100 /* Weird ATAPI devices */ 4103 /* Weird ATAPI devices */
4101 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 4104 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4102 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, 4105 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4106 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4103 4107
4104 /* Devices we expect to fail diagnostics */ 4108 /* Devices we expect to fail diagnostics */
4105 4109
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 318b41358187..ff44787e5a45 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -532,8 +532,8 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
532 struct scsi_sense_hdr sshdr; 532 struct scsi_sense_hdr sshdr;
533 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, 533 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
534 &sshdr); 534 &sshdr);
535 if (sshdr.sense_key == 0 && 535 if (sshdr.sense_key == RECOVERED_ERROR &&
536 sshdr.asc == 0 && sshdr.ascq == 0) 536 sshdr.asc == 0 && sshdr.ascq == 0x1d)
537 cmd_result &= ~SAM_STAT_CHECK_CONDITION; 537 cmd_result &= ~SAM_STAT_CHECK_CONDITION;
538 } 538 }
539 539
@@ -618,8 +618,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
618 struct scsi_sense_hdr sshdr; 618 struct scsi_sense_hdr sshdr;
619 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, 619 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
620 &sshdr); 620 &sshdr);
621 if (sshdr.sense_key == 0 && 621 if (sshdr.sense_key == RECOVERED_ERROR &&
622 sshdr.asc == 0 && sshdr.ascq == 0) 622 sshdr.asc == 0 && sshdr.ascq == 0x1d)
623 cmd_result &= ~SAM_STAT_CHECK_CONDITION; 623 cmd_result &= ~SAM_STAT_CHECK_CONDITION;
624 } 624 }
625 625
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index d34adef1e63e..58cfb3232428 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -943,7 +943,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
943 unsigned int ival; 943 unsigned int ival;
944 int val_bytes = map->format.val_bytes; 944 int val_bytes = map->format.val_bytes;
945 for (i = 0; i < val_len / val_bytes; i++) { 945 for (i = 0; i < val_len / val_bytes; i++) {
946 ival = map->format.parse_val(val + (i * val_bytes)); 946 memcpy(map->work_buf, val + (i * val_bytes), val_bytes);
947 ival = map->format.parse_val(map->work_buf);
947 ret = regcache_write(map, reg + (i * map->reg_stride), 948 ret = regcache_write(map, reg + (i * map->reg_stride),
948 ival); 949 ival);
949 if (ret) { 950 if (ret) {
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 2c127f9c3f3b..dfe758382eaf 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1051,29 +1051,12 @@ static int loop_clr_fd(struct loop_device *lo)
1051 lo->lo_state = Lo_unbound; 1051 lo->lo_state = Lo_unbound;
1052 /* This is safe: open() is still holding a reference. */ 1052 /* This is safe: open() is still holding a reference. */
1053 module_put(THIS_MODULE); 1053 module_put(THIS_MODULE);
1054 if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev)
1055 ioctl_by_bdev(bdev, BLKRRPART, 0);
1054 lo->lo_flags = 0; 1056 lo->lo_flags = 0;
1055 if (!part_shift) 1057 if (!part_shift)
1056 lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; 1058 lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
1057 mutex_unlock(&lo->lo_ctl_mutex); 1059 mutex_unlock(&lo->lo_ctl_mutex);
1058
1059 /*
1060 * Remove all partitions, since BLKRRPART won't remove user
1061 * added partitions when max_part=0
1062 */
1063 if (bdev) {
1064 struct disk_part_iter piter;
1065 struct hd_struct *part;
1066
1067 mutex_lock_nested(&bdev->bd_mutex, 1);
1068 invalidate_partition(bdev->bd_disk, 0);
1069 disk_part_iter_init(&piter, bdev->bd_disk,
1070 DISK_PITER_INCL_EMPTY);
1071 while ((part = disk_part_iter_next(&piter)))
1072 delete_partition(bdev->bd_disk, part->partno);
1073 disk_part_iter_exit(&piter);
1074 mutex_unlock(&bdev->bd_mutex);
1075 }
1076
1077 /* 1060 /*
1078 * Need not hold lo_ctl_mutex to fput backing file. 1061 * Need not hold lo_ctl_mutex to fput backing file.
1079 * Calling fput holding lo_ctl_mutex triggers a circular 1062 * Calling fput holding lo_ctl_mutex triggers a circular
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 92250af84e7d..32c678028e53 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -81,12 +81,17 @@
81/* Device instance number, incremented each time a device is probed. */ 81/* Device instance number, incremented each time a device is probed. */
82static int instance; 82static int instance;
83 83
84struct list_head online_list;
85struct list_head removing_list;
86spinlock_t dev_lock;
87
84/* 88/*
85 * Global variable used to hold the major block device number 89 * Global variable used to hold the major block device number
86 * allocated in mtip_init(). 90 * allocated in mtip_init().
87 */ 91 */
88static int mtip_major; 92static int mtip_major;
89static struct dentry *dfs_parent; 93static struct dentry *dfs_parent;
94static struct dentry *dfs_device_status;
90 95
91static u32 cpu_use[NR_CPUS]; 96static u32 cpu_use[NR_CPUS];
92 97
@@ -243,40 +248,31 @@ static inline void release_slot(struct mtip_port *port, int tag)
243/* 248/*
244 * Reset the HBA (without sleeping) 249 * Reset the HBA (without sleeping)
245 * 250 *
246 * Just like hba_reset, except does not call sleep, so can be
247 * run from interrupt/tasklet context.
248 *
249 * @dd Pointer to the driver data structure. 251 * @dd Pointer to the driver data structure.
250 * 252 *
251 * return value 253 * return value
252 * 0 The reset was successful. 254 * 0 The reset was successful.
253 * -1 The HBA Reset bit did not clear. 255 * -1 The HBA Reset bit did not clear.
254 */ 256 */
255static int hba_reset_nosleep(struct driver_data *dd) 257static int mtip_hba_reset(struct driver_data *dd)
256{ 258{
257 unsigned long timeout; 259 unsigned long timeout;
258 260
259 /* Chip quirk: quiesce any chip function */
260 mdelay(10);
261
262 /* Set the reset bit */ 261 /* Set the reset bit */
263 writel(HOST_RESET, dd->mmio + HOST_CTL); 262 writel(HOST_RESET, dd->mmio + HOST_CTL);
264 263
265 /* Flush */ 264 /* Flush */
266 readl(dd->mmio + HOST_CTL); 265 readl(dd->mmio + HOST_CTL);
267 266
268 /* 267 /* Spin for up to 2 seconds, waiting for reset acknowledgement */
269 * Wait 10ms then spin for up to 1 second 268 timeout = jiffies + msecs_to_jiffies(2000);
270 * waiting for reset acknowledgement 269 do {
271 */ 270 mdelay(10);
272 timeout = jiffies + msecs_to_jiffies(1000); 271 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
273 mdelay(10); 272 return -1;
274 while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
275 && time_before(jiffies, timeout))
276 mdelay(1);
277 273
278 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) 274 } while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
279 return -1; 275 && time_before(jiffies, timeout));
280 276
281 if (readl(dd->mmio + HOST_CTL) & HOST_RESET) 277 if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
282 return -1; 278 return -1;
@@ -481,7 +477,7 @@ static void mtip_restart_port(struct mtip_port *port)
481 dev_warn(&port->dd->pdev->dev, 477 dev_warn(&port->dd->pdev->dev,
482 "PxCMD.CR not clear, escalating reset\n"); 478 "PxCMD.CR not clear, escalating reset\n");
483 479
484 if (hba_reset_nosleep(port->dd)) 480 if (mtip_hba_reset(port->dd))
485 dev_err(&port->dd->pdev->dev, 481 dev_err(&port->dd->pdev->dev,
486 "HBA reset escalation failed.\n"); 482 "HBA reset escalation failed.\n");
487 483
@@ -527,6 +523,26 @@ static void mtip_restart_port(struct mtip_port *port)
527 523
528} 524}
529 525
526static int mtip_device_reset(struct driver_data *dd)
527{
528 int rv = 0;
529
530 if (mtip_check_surprise_removal(dd->pdev))
531 return 0;
532
533 if (mtip_hba_reset(dd) < 0)
534 rv = -EFAULT;
535
536 mdelay(1);
537 mtip_init_port(dd->port);
538 mtip_start_port(dd->port);
539
540 /* Enable interrupts on the HBA. */
541 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
542 dd->mmio + HOST_CTL);
543 return rv;
544}
545
530/* 546/*
531 * Helper function for tag logging 547 * Helper function for tag logging
532 */ 548 */
@@ -632,7 +648,7 @@ static void mtip_timeout_function(unsigned long int data)
632 if (cmdto_cnt) { 648 if (cmdto_cnt) {
633 print_tags(port->dd, "timed out", tagaccum, cmdto_cnt); 649 print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);
634 if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { 650 if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
635 mtip_restart_port(port); 651 mtip_device_reset(port->dd);
636 wake_up_interruptible(&port->svc_wait); 652 wake_up_interruptible(&port->svc_wait);
637 } 653 }
638 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); 654 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
@@ -1283,11 +1299,11 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1283 int rv = 0, ready2go = 1; 1299 int rv = 0, ready2go = 1;
1284 struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL]; 1300 struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL];
1285 unsigned long to; 1301 unsigned long to;
1302 struct driver_data *dd = port->dd;
1286 1303
1287 /* Make sure the buffer is 8 byte aligned. This is asic specific. */ 1304 /* Make sure the buffer is 8 byte aligned. This is asic specific. */
1288 if (buffer & 0x00000007) { 1305 if (buffer & 0x00000007) {
1289 dev_err(&port->dd->pdev->dev, 1306 dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n");
1290 "SG buffer is not 8 byte aligned\n");
1291 return -EFAULT; 1307 return -EFAULT;
1292 } 1308 }
1293 1309
@@ -1300,23 +1316,21 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1300 mdelay(100); 1316 mdelay(100);
1301 } while (time_before(jiffies, to)); 1317 } while (time_before(jiffies, to));
1302 if (!ready2go) { 1318 if (!ready2go) {
1303 dev_warn(&port->dd->pdev->dev, 1319 dev_warn(&dd->pdev->dev,
1304 "Internal cmd active. new cmd [%02X]\n", fis->command); 1320 "Internal cmd active. new cmd [%02X]\n", fis->command);
1305 return -EBUSY; 1321 return -EBUSY;
1306 } 1322 }
1307 set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); 1323 set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
1308 port->ic_pause_timer = 0; 1324 port->ic_pause_timer = 0;
1309 1325
1310 if (fis->command == ATA_CMD_SEC_ERASE_UNIT) 1326 clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
1311 clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); 1327 clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
1312 else if (fis->command == ATA_CMD_DOWNLOAD_MICRO)
1313 clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
1314 1328
1315 if (atomic == GFP_KERNEL) { 1329 if (atomic == GFP_KERNEL) {
1316 if (fis->command != ATA_CMD_STANDBYNOW1) { 1330 if (fis->command != ATA_CMD_STANDBYNOW1) {
1317 /* wait for io to complete if non atomic */ 1331 /* wait for io to complete if non atomic */
1318 if (mtip_quiesce_io(port, 5000) < 0) { 1332 if (mtip_quiesce_io(port, 5000) < 0) {
1319 dev_warn(&port->dd->pdev->dev, 1333 dev_warn(&dd->pdev->dev,
1320 "Failed to quiesce IO\n"); 1334 "Failed to quiesce IO\n");
1321 release_slot(port, MTIP_TAG_INTERNAL); 1335 release_slot(port, MTIP_TAG_INTERNAL);
1322 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); 1336 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
@@ -1361,58 +1375,84 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1361 /* Issue the command to the hardware */ 1375 /* Issue the command to the hardware */
1362 mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL); 1376 mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
1363 1377
1364 /* Poll if atomic, wait_for_completion otherwise */
1365 if (atomic == GFP_KERNEL) { 1378 if (atomic == GFP_KERNEL) {
1366 /* Wait for the command to complete or timeout. */ 1379 /* Wait for the command to complete or timeout. */
1367 if (wait_for_completion_timeout( 1380 if (wait_for_completion_interruptible_timeout(
1368 &wait, 1381 &wait,
1369 msecs_to_jiffies(timeout)) == 0) { 1382 msecs_to_jiffies(timeout)) <= 0) {
1370 dev_err(&port->dd->pdev->dev, 1383 if (rv == -ERESTARTSYS) { /* interrupted */
1371 "Internal command did not complete [%d] " 1384 dev_err(&dd->pdev->dev,
1372 "within timeout of %lu ms\n", 1385 "Internal command [%02X] was interrupted after %lu ms\n",
1373 atomic, timeout); 1386 fis->command, timeout);
1374 if (mtip_check_surprise_removal(port->dd->pdev) || 1387 rv = -EINTR;
1388 goto exec_ic_exit;
1389 } else if (rv == 0) /* timeout */
1390 dev_err(&dd->pdev->dev,
1391 "Internal command did not complete [%02X] within timeout of %lu ms\n",
1392 fis->command, timeout);
1393 else
1394 dev_err(&dd->pdev->dev,
1395 "Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n",
1396 fis->command, rv, timeout);
1397
1398 if (mtip_check_surprise_removal(dd->pdev) ||
1375 test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 1399 test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
1376 &port->dd->dd_flag)) { 1400 &dd->dd_flag)) {
1401 dev_err(&dd->pdev->dev,
1402 "Internal command [%02X] wait returned due to SR\n",
1403 fis->command);
1377 rv = -ENXIO; 1404 rv = -ENXIO;
1378 goto exec_ic_exit; 1405 goto exec_ic_exit;
1379 } 1406 }
1407 mtip_device_reset(dd); /* recover from timeout issue */
1380 rv = -EAGAIN; 1408 rv = -EAGAIN;
1409 goto exec_ic_exit;
1381 } 1410 }
1382 } else { 1411 } else {
1412 u32 hba_stat, port_stat;
1413
1383 /* Spin for <timeout> checking if command still outstanding */ 1414 /* Spin for <timeout> checking if command still outstanding */
1384 timeout = jiffies + msecs_to_jiffies(timeout); 1415 timeout = jiffies + msecs_to_jiffies(timeout);
1385 while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL]) 1416 while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL])
1386 & (1 << MTIP_TAG_INTERNAL)) 1417 & (1 << MTIP_TAG_INTERNAL))
1387 && time_before(jiffies, timeout)) { 1418 && time_before(jiffies, timeout)) {
1388 if (mtip_check_surprise_removal(port->dd->pdev)) { 1419 if (mtip_check_surprise_removal(dd->pdev)) {
1389 rv = -ENXIO; 1420 rv = -ENXIO;
1390 goto exec_ic_exit; 1421 goto exec_ic_exit;
1391 } 1422 }
1392 if ((fis->command != ATA_CMD_STANDBYNOW1) && 1423 if ((fis->command != ATA_CMD_STANDBYNOW1) &&
1393 test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 1424 test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
1394 &port->dd->dd_flag)) { 1425 &dd->dd_flag)) {
1395 rv = -ENXIO; 1426 rv = -ENXIO;
1396 goto exec_ic_exit; 1427 goto exec_ic_exit;
1397 } 1428 }
1398 if (readl(port->mmio + PORT_IRQ_STAT) & PORT_IRQ_ERR) { 1429 port_stat = readl(port->mmio + PORT_IRQ_STAT);
1399 atomic_inc(&int_cmd->active); /* error */ 1430 if (!port_stat)
1400 break; 1431 continue;
1432
1433 if (port_stat & PORT_IRQ_ERR) {
1434 dev_err(&dd->pdev->dev,
1435 "Internal command [%02X] failed\n",
1436 fis->command);
1437 mtip_device_reset(dd);
1438 rv = -EIO;
1439 goto exec_ic_exit;
1440 } else {
1441 writel(port_stat, port->mmio + PORT_IRQ_STAT);
1442 hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
1443 if (hba_stat)
1444 writel(hba_stat,
1445 dd->mmio + HOST_IRQ_STAT);
1401 } 1446 }
1447 break;
1402 } 1448 }
1403 } 1449 }
1404 1450
1405 if (atomic_read(&int_cmd->active) > 1) {
1406 dev_err(&port->dd->pdev->dev,
1407 "Internal command [%02X] failed\n", fis->command);
1408 rv = -EIO;
1409 }
1410 if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) 1451 if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
1411 & (1 << MTIP_TAG_INTERNAL)) { 1452 & (1 << MTIP_TAG_INTERNAL)) {
1412 rv = -ENXIO; 1453 rv = -ENXIO;
1413 if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 1454 if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
1414 &port->dd->dd_flag)) { 1455 mtip_device_reset(dd);
1415 mtip_restart_port(port);
1416 rv = -EAGAIN; 1456 rv = -EAGAIN;
1417 } 1457 }
1418 } 1458 }
@@ -1724,7 +1764,8 @@ static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
1724 * -EINVAL Invalid parameters passed in, trim not supported 1764 * -EINVAL Invalid parameters passed in, trim not supported
1725 * -EIO Error submitting trim request to hw 1765 * -EIO Error submitting trim request to hw
1726 */ 1766 */
1727static int mtip_send_trim(struct driver_data *dd, unsigned int lba, unsigned int len) 1767static int mtip_send_trim(struct driver_data *dd, unsigned int lba,
1768 unsigned int len)
1728{ 1769{
1729 int i, rv = 0; 1770 int i, rv = 0;
1730 u64 tlba, tlen, sect_left; 1771 u64 tlba, tlen, sect_left;
@@ -1811,45 +1852,6 @@ static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors)
1811} 1852}
1812 1853
1813/* 1854/*
1814 * Reset the HBA.
1815 *
1816 * Resets the HBA by setting the HBA Reset bit in the Global
1817 * HBA Control register. After setting the HBA Reset bit the
1818 * function waits for 1 second before reading the HBA Reset
1819 * bit to make sure it has cleared. If HBA Reset is not clear
1820 * an error is returned. Cannot be used in non-blockable
1821 * context.
1822 *
1823 * @dd Pointer to the driver data structure.
1824 *
1825 * return value
1826 * 0 The reset was successful.
1827 * -1 The HBA Reset bit did not clear.
1828 */
1829static int mtip_hba_reset(struct driver_data *dd)
1830{
1831 mtip_deinit_port(dd->port);
1832
1833 /* Set the reset bit */
1834 writel(HOST_RESET, dd->mmio + HOST_CTL);
1835
1836 /* Flush */
1837 readl(dd->mmio + HOST_CTL);
1838
1839 /* Wait for reset to clear */
1840 ssleep(1);
1841
1842 /* Check the bit has cleared */
1843 if (readl(dd->mmio + HOST_CTL) & HOST_RESET) {
1844 dev_err(&dd->pdev->dev,
1845 "Reset bit did not clear.\n");
1846 return -1;
1847 }
1848
1849 return 0;
1850}
1851
1852/*
1853 * Display the identify command data. 1855 * Display the identify command data.
1854 * 1856 *
1855 * @port Pointer to the port data structure. 1857 * @port Pointer to the port data structure.
@@ -2710,6 +2712,100 @@ static ssize_t mtip_hw_show_status(struct device *dev,
2710 2712
2711static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL); 2713static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
2712 2714
2715/* debugsfs entries */
2716
2717static ssize_t show_device_status(struct device_driver *drv, char *buf)
2718{
2719 int size = 0;
2720 struct driver_data *dd, *tmp;
2721 unsigned long flags;
2722 char id_buf[42];
2723 u16 status = 0;
2724
2725 spin_lock_irqsave(&dev_lock, flags);
2726 size += sprintf(&buf[size], "Devices Present:\n");
2727 list_for_each_entry_safe(dd, tmp, &online_list, online_list) {
2728 if (dd->pdev) {
2729 if (dd->port &&
2730 dd->port->identify &&
2731 dd->port->identify_valid) {
2732 strlcpy(id_buf,
2733 (char *) (dd->port->identify + 10), 21);
2734 status = *(dd->port->identify + 141);
2735 } else {
2736 memset(id_buf, 0, 42);
2737 status = 0;
2738 }
2739
2740 if (dd->port &&
2741 test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
2742 size += sprintf(&buf[size],
2743 " device %s %s (ftl rebuild %d %%)\n",
2744 dev_name(&dd->pdev->dev),
2745 id_buf,
2746 status);
2747 } else {
2748 size += sprintf(&buf[size],
2749 " device %s %s\n",
2750 dev_name(&dd->pdev->dev),
2751 id_buf);
2752 }
2753 }
2754 }
2755
2756 size += sprintf(&buf[size], "Devices Being Removed:\n");
2757 list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) {
2758 if (dd->pdev) {
2759 if (dd->port &&
2760 dd->port->identify &&
2761 dd->port->identify_valid) {
2762 strlcpy(id_buf,
2763 (char *) (dd->port->identify+10), 21);
2764 status = *(dd->port->identify + 141);
2765 } else {
2766 memset(id_buf, 0, 42);
2767 status = 0;
2768 }
2769
2770 if (dd->port &&
2771 test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
2772 size += sprintf(&buf[size],
2773 " device %s %s (ftl rebuild %d %%)\n",
2774 dev_name(&dd->pdev->dev),
2775 id_buf,
2776 status);
2777 } else {
2778 size += sprintf(&buf[size],
2779 " device %s %s\n",
2780 dev_name(&dd->pdev->dev),
2781 id_buf);
2782 }
2783 }
2784 }
2785 spin_unlock_irqrestore(&dev_lock, flags);
2786
2787 return size;
2788}
2789
2790static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf,
2791 size_t len, loff_t *offset)
2792{
2793 int size = *offset;
2794 char buf[MTIP_DFS_MAX_BUF_SIZE];
2795
2796 if (!len || *offset)
2797 return 0;
2798
2799 size += show_device_status(NULL, buf);
2800
2801 *offset = size <= len ? size : len;
2802 size = copy_to_user(ubuf, buf, *offset);
2803 if (size)
2804 return -EFAULT;
2805
2806 return *offset;
2807}
2808
2713static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, 2809static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
2714 size_t len, loff_t *offset) 2810 size_t len, loff_t *offset)
2715{ 2811{
@@ -2804,6 +2900,13 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
2804 return *offset; 2900 return *offset;
2805} 2901}
2806 2902
2903static const struct file_operations mtip_device_status_fops = {
2904 .owner = THIS_MODULE,
2905 .open = simple_open,
2906 .read = mtip_hw_read_device_status,
2907 .llseek = no_llseek,
2908};
2909
2807static const struct file_operations mtip_regs_fops = { 2910static const struct file_operations mtip_regs_fops = {
2808 .owner = THIS_MODULE, 2911 .owner = THIS_MODULE,
2809 .open = simple_open, 2912 .open = simple_open,
@@ -4161,6 +4264,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
4161 const struct cpumask *node_mask; 4264 const struct cpumask *node_mask;
4162 int cpu, i = 0, j = 0; 4265 int cpu, i = 0, j = 0;
4163 int my_node = NUMA_NO_NODE; 4266 int my_node = NUMA_NO_NODE;
4267 unsigned long flags;
4164 4268
4165 /* Allocate memory for this devices private data. */ 4269 /* Allocate memory for this devices private data. */
4166 my_node = pcibus_to_node(pdev->bus); 4270 my_node = pcibus_to_node(pdev->bus);
@@ -4218,6 +4322,9 @@ static int mtip_pci_probe(struct pci_dev *pdev,
4218 dd->pdev = pdev; 4322 dd->pdev = pdev;
4219 dd->numa_node = my_node; 4323 dd->numa_node = my_node;
4220 4324
4325 INIT_LIST_HEAD(&dd->online_list);
4326 INIT_LIST_HEAD(&dd->remove_list);
4327
4221 memset(dd->workq_name, 0, 32); 4328 memset(dd->workq_name, 0, 32);
4222 snprintf(dd->workq_name, 31, "mtipq%d", dd->instance); 4329 snprintf(dd->workq_name, 31, "mtipq%d", dd->instance);
4223 4330
@@ -4305,6 +4412,14 @@ static int mtip_pci_probe(struct pci_dev *pdev,
4305 instance++; 4412 instance++;
4306 if (rv != MTIP_FTL_REBUILD_MAGIC) 4413 if (rv != MTIP_FTL_REBUILD_MAGIC)
4307 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); 4414 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
4415 else
4416 rv = 0; /* device in rebuild state, return 0 from probe */
4417
4418 /* Add to online list even if in ftl rebuild */
4419 spin_lock_irqsave(&dev_lock, flags);
4420 list_add(&dd->online_list, &online_list);
4421 spin_unlock_irqrestore(&dev_lock, flags);
4422
4308 goto done; 4423 goto done;
4309 4424
4310block_initialize_err: 4425block_initialize_err:
@@ -4338,9 +4453,15 @@ static void mtip_pci_remove(struct pci_dev *pdev)
4338{ 4453{
4339 struct driver_data *dd = pci_get_drvdata(pdev); 4454 struct driver_data *dd = pci_get_drvdata(pdev);
4340 int counter = 0; 4455 int counter = 0;
4456 unsigned long flags;
4341 4457
4342 set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); 4458 set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
4343 4459
4460 spin_lock_irqsave(&dev_lock, flags);
4461 list_del_init(&dd->online_list);
4462 list_add(&dd->remove_list, &removing_list);
4463 spin_unlock_irqrestore(&dev_lock, flags);
4464
4344 if (mtip_check_surprise_removal(pdev)) { 4465 if (mtip_check_surprise_removal(pdev)) {
4345 while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) { 4466 while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
4346 counter++; 4467 counter++;
@@ -4366,6 +4487,10 @@ static void mtip_pci_remove(struct pci_dev *pdev)
4366 4487
4367 pci_disable_msi(pdev); 4488 pci_disable_msi(pdev);
4368 4489
4490 spin_lock_irqsave(&dev_lock, flags);
4491 list_del_init(&dd->remove_list);
4492 spin_unlock_irqrestore(&dev_lock, flags);
4493
4369 kfree(dd); 4494 kfree(dd);
4370 pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); 4495 pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
4371} 4496}
@@ -4513,6 +4638,11 @@ static int __init mtip_init(void)
4513 4638
4514 pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); 4639 pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
4515 4640
4641 spin_lock_init(&dev_lock);
4642
4643 INIT_LIST_HEAD(&online_list);
4644 INIT_LIST_HEAD(&removing_list);
4645
4516 /* Allocate a major block device number to use with this driver. */ 4646 /* Allocate a major block device number to use with this driver. */
4517 error = register_blkdev(0, MTIP_DRV_NAME); 4647 error = register_blkdev(0, MTIP_DRV_NAME);
4518 if (error <= 0) { 4648 if (error <= 0) {
@@ -4522,11 +4652,18 @@ static int __init mtip_init(void)
4522 } 4652 }
4523 mtip_major = error; 4653 mtip_major = error;
4524 4654
4525 if (!dfs_parent) { 4655 dfs_parent = debugfs_create_dir("rssd", NULL);
4526 dfs_parent = debugfs_create_dir("rssd", NULL); 4656 if (IS_ERR_OR_NULL(dfs_parent)) {
4527 if (IS_ERR_OR_NULL(dfs_parent)) { 4657 pr_warn("Error creating debugfs parent\n");
4528 pr_warn("Error creating debugfs parent\n"); 4658 dfs_parent = NULL;
4529 dfs_parent = NULL; 4659 }
4660 if (dfs_parent) {
4661 dfs_device_status = debugfs_create_file("device_status",
4662 S_IRUGO, dfs_parent, NULL,
4663 &mtip_device_status_fops);
4664 if (IS_ERR_OR_NULL(dfs_device_status)) {
4665 pr_err("Error creating device_status node\n");
4666 dfs_device_status = NULL;
4530 } 4667 }
4531 } 4668 }
4532 4669
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 3bffff5f670c..8e8334c9dd0f 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -129,9 +129,9 @@ enum {
129 MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */ 129 MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */
130 MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */ 130 MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */
131 MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */ 131 MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */
132 MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) | \ 132 MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) |
133 (1 << MTIP_PF_EH_ACTIVE_BIT) | \ 133 (1 << MTIP_PF_EH_ACTIVE_BIT) |
134 (1 << MTIP_PF_SE_ACTIVE_BIT) | \ 134 (1 << MTIP_PF_SE_ACTIVE_BIT) |
135 (1 << MTIP_PF_DM_ACTIVE_BIT)), 135 (1 << MTIP_PF_DM_ACTIVE_BIT)),
136 136
137 MTIP_PF_SVC_THD_ACTIVE_BIT = 4, 137 MTIP_PF_SVC_THD_ACTIVE_BIT = 4,
@@ -144,9 +144,9 @@ enum {
144 MTIP_DDF_REMOVE_PENDING_BIT = 1, 144 MTIP_DDF_REMOVE_PENDING_BIT = 1,
145 MTIP_DDF_OVER_TEMP_BIT = 2, 145 MTIP_DDF_OVER_TEMP_BIT = 2,
146 MTIP_DDF_WRITE_PROTECT_BIT = 3, 146 MTIP_DDF_WRITE_PROTECT_BIT = 3,
147 MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \ 147 MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
148 (1 << MTIP_DDF_SEC_LOCK_BIT) | \ 148 (1 << MTIP_DDF_SEC_LOCK_BIT) |
149 (1 << MTIP_DDF_OVER_TEMP_BIT) | \ 149 (1 << MTIP_DDF_OVER_TEMP_BIT) |
150 (1 << MTIP_DDF_WRITE_PROTECT_BIT)), 150 (1 << MTIP_DDF_WRITE_PROTECT_BIT)),
151 151
152 MTIP_DDF_CLEANUP_BIT = 5, 152 MTIP_DDF_CLEANUP_BIT = 5,
@@ -180,7 +180,7 @@ struct mtip_work {
180 180
181#define MTIP_TRIM_TIMEOUT_MS 240000 181#define MTIP_TRIM_TIMEOUT_MS 240000
182#define MTIP_MAX_TRIM_ENTRIES 8 182#define MTIP_MAX_TRIM_ENTRIES 8
183#define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8 183#define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8
184 184
185struct mtip_trim_entry { 185struct mtip_trim_entry {
186 u32 lba; /* starting lba of region */ 186 u32 lba; /* starting lba of region */
@@ -501,6 +501,10 @@ struct driver_data {
501 atomic_t irq_workers_active; 501 atomic_t irq_workers_active;
502 502
503 int isr_binding; 503 int isr_binding;
504
505 struct list_head online_list; /* linkage for online list */
506
507 struct list_head remove_list; /* linkage for removing list */
504}; 508};
505 509
506#endif 510#endif
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index ad72922919ed..6133ef5cf671 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -502,7 +502,6 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
502 502
503 sample_time = cpu->pstate_policy->sample_rate_ms; 503 sample_time = cpu->pstate_policy->sample_rate_ms;
504 delay = msecs_to_jiffies(sample_time); 504 delay = msecs_to_jiffies(sample_time);
505 delay -= jiffies % delay;
506 mod_timer_pinned(&cpu->timer, jiffies + delay); 505 mod_timer_pinned(&cpu->timer, jiffies + delay);
507} 506}
508 507
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 8bc5fef07e7a..22c9063e0120 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1750,7 +1750,7 @@ static struct platform_driver cryp_driver = {
1750 .shutdown = ux500_cryp_shutdown, 1750 .shutdown = ux500_cryp_shutdown,
1751 .driver = { 1751 .driver = {
1752 .owner = THIS_MODULE, 1752 .owner = THIS_MODULE,
1753 .name = "cryp1" 1753 .name = "cryp1",
1754 .pm = &ux500_cryp_pm, 1754 .pm = &ux500_cryp_pm,
1755 } 1755 }
1756}; 1756};
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index c4b4fd2acc42..08b43bf37158 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -276,12 +276,20 @@ static void omap_dma_issue_pending(struct dma_chan *chan)
276 276
277 spin_lock_irqsave(&c->vc.lock, flags); 277 spin_lock_irqsave(&c->vc.lock, flags);
278 if (vchan_issue_pending(&c->vc) && !c->desc) { 278 if (vchan_issue_pending(&c->vc) && !c->desc) {
279 struct omap_dmadev *d = to_omap_dma_dev(chan->device); 279 /*
280 spin_lock(&d->lock); 280 * c->cyclic is used only by audio and in this case the DMA need
281 if (list_empty(&c->node)) 281 * to be started without delay.
282 list_add_tail(&c->node, &d->pending); 282 */
283 spin_unlock(&d->lock); 283 if (!c->cyclic) {
284 tasklet_schedule(&d->task); 284 struct omap_dmadev *d = to_omap_dma_dev(chan->device);
285 spin_lock(&d->lock);
286 if (list_empty(&c->node))
287 list_add_tail(&c->node, &d->pending);
288 spin_unlock(&d->lock);
289 tasklet_schedule(&d->task);
290 } else {
291 omap_dma_start_desc(c);
292 }
285 } 293 }
286 spin_unlock_irqrestore(&c->vc.lock, flags); 294 spin_unlock_irqrestore(&c->vc.lock, flags);
287} 295}
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 718153122759..5dbc5946c4c3 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2882,7 +2882,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2882{ 2882{
2883 struct dma_pl330_platdata *pdat; 2883 struct dma_pl330_platdata *pdat;
2884 struct dma_pl330_dmac *pdmac; 2884 struct dma_pl330_dmac *pdmac;
2885 struct dma_pl330_chan *pch; 2885 struct dma_pl330_chan *pch, *_p;
2886 struct pl330_info *pi; 2886 struct pl330_info *pi;
2887 struct dma_device *pd; 2887 struct dma_device *pd;
2888 struct resource *res; 2888 struct resource *res;
@@ -2984,7 +2984,16 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2984 ret = dma_async_device_register(pd); 2984 ret = dma_async_device_register(pd);
2985 if (ret) { 2985 if (ret) {
2986 dev_err(&adev->dev, "unable to register DMAC\n"); 2986 dev_err(&adev->dev, "unable to register DMAC\n");
2987 goto probe_err2; 2987 goto probe_err3;
2988 }
2989
2990 if (adev->dev.of_node) {
2991 ret = of_dma_controller_register(adev->dev.of_node,
2992 of_dma_pl330_xlate, pdmac);
2993 if (ret) {
2994 dev_err(&adev->dev,
2995 "unable to register DMA to the generic DT DMA helpers\n");
2996 }
2988 } 2997 }
2989 2998
2990 dev_info(&adev->dev, 2999 dev_info(&adev->dev,
@@ -2995,16 +3004,21 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2995 pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, 3004 pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
2996 pi->pcfg.num_peri, pi->pcfg.num_events); 3005 pi->pcfg.num_peri, pi->pcfg.num_events);
2997 3006
2998 ret = of_dma_controller_register(adev->dev.of_node,
2999 of_dma_pl330_xlate, pdmac);
3000 if (ret) {
3001 dev_err(&adev->dev,
3002 "unable to register DMA to the generic DT DMA helpers\n");
3003 goto probe_err2;
3004 }
3005
3006 return 0; 3007 return 0;
3008probe_err3:
3009 amba_set_drvdata(adev, NULL);
3007 3010
3011 /* Idle the DMAC */
3012 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
3013 chan.device_node) {
3014
3015 /* Remove the channel */
3016 list_del(&pch->chan.device_node);
3017
3018 /* Flush the channel */
3019 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
3020 pl330_free_chan_resources(&pch->chan);
3021 }
3008probe_err2: 3022probe_err2:
3009 pl330_del(pi); 3023 pl330_del(pi);
3010probe_err1: 3024probe_err1:
@@ -3023,8 +3037,10 @@ static int pl330_remove(struct amba_device *adev)
3023 if (!pdmac) 3037 if (!pdmac)
3024 return 0; 3038 return 0;
3025 3039
3026 of_dma_controller_free(adev->dev.of_node); 3040 if (adev->dev.of_node)
3041 of_dma_controller_free(adev->dev.of_node);
3027 3042
3043 dma_async_device_unregister(&pdmac->ddma);
3028 amba_set_drvdata(adev, NULL); 3044 amba_set_drvdata(adev, NULL);
3029 3045
3030 /* Idle the DMAC */ 3046 /* Idle the DMAC */
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 24059462c87f..9391cf16e990 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -575,7 +575,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
575 chip->gpio_chip.ngpio, 575 chip->gpio_chip.ngpio,
576 irq_base, 576 irq_base,
577 &pca953x_irq_simple_ops, 577 &pca953x_irq_simple_ops,
578 NULL); 578 chip);
579 if (!chip->domain) 579 if (!chip->domain)
580 return -ENODEV; 580 return -ENODEV;
581 581
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 59d6b9bf204b..892ff9f95975 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1544,10 +1544,10 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1544 if (!fb_helper->fb) 1544 if (!fb_helper->fb)
1545 return 0; 1545 return 0;
1546 1546
1547 drm_modeset_lock_all(dev); 1547 mutex_lock(&fb_helper->dev->mode_config.mutex);
1548 if (!drm_fb_helper_is_bound(fb_helper)) { 1548 if (!drm_fb_helper_is_bound(fb_helper)) {
1549 fb_helper->delayed_hotplug = true; 1549 fb_helper->delayed_hotplug = true;
1550 drm_modeset_unlock_all(dev); 1550 mutex_unlock(&fb_helper->dev->mode_config.mutex);
1551 return 0; 1551 return 0;
1552 } 1552 }
1553 DRM_DEBUG_KMS("\n"); 1553 DRM_DEBUG_KMS("\n");
@@ -1558,9 +1558,11 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1558 1558
1559 count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, 1559 count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
1560 max_height); 1560 max_height);
1561 mutex_unlock(&fb_helper->dev->mode_config.mutex);
1562
1563 drm_modeset_lock_all(dev);
1561 drm_setup_crtcs(fb_helper); 1564 drm_setup_crtcs(fb_helper);
1562 drm_modeset_unlock_all(dev); 1565 drm_modeset_unlock_all(dev);
1563
1564 drm_fb_helper_set_par(fb_helper->fbdev); 1566 drm_fb_helper_set_par(fb_helper->fbdev);
1565 1567
1566 return 0; 1568 return 0;
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index fe22bb780e1d..78d8e919509f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -751,8 +751,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
751 int i; 751 int i;
752 unsigned char misc = 0; 752 unsigned char misc = 0;
753 unsigned char ext_vga[6]; 753 unsigned char ext_vga[6];
754 unsigned char ext_vga_index24;
755 unsigned char dac_index90 = 0;
756 u8 bppshift; 754 u8 bppshift;
757 755
758 static unsigned char dacvalue[] = { 756 static unsigned char dacvalue[] = {
@@ -803,7 +801,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
803 option2 = 0x0000b000; 801 option2 = 0x0000b000;
804 break; 802 break;
805 case G200_ER: 803 case G200_ER:
806 dac_index90 = 0;
807 break; 804 break;
808 } 805 }
809 806
@@ -852,10 +849,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
852 WREG_DAC(i, dacvalue[i]); 849 WREG_DAC(i, dacvalue[i]);
853 } 850 }
854 851
855 if (mdev->type == G200_ER) { 852 if (mdev->type == G200_ER)
856 WREG_DAC(0x90, dac_index90); 853 WREG_DAC(0x90, 0);
857 }
858
859 854
860 if (option) 855 if (option)
861 pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option); 856 pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
@@ -952,8 +947,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
952 if (mdev->type == G200_WB) 947 if (mdev->type == G200_WB)
953 ext_vga[1] |= 0x88; 948 ext_vga[1] |= 0x88;
954 949
955 ext_vga_index24 = 0x05;
956
957 /* Set pixel clocks */ 950 /* Set pixel clocks */
958 misc = 0x2d; 951 misc = 0x2d;
959 WREG8(MGA_MISC_OUT, misc); 952 WREG8(MGA_MISC_OUT, misc);
@@ -965,7 +958,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
965 } 958 }
966 959
967 if (mdev->type == G200_ER) 960 if (mdev->type == G200_ER)
968 WREG_ECRT(24, ext_vga_index24); 961 WREG_ECRT(0x24, 0x5);
969 962
970 if (mdev->type == G200_EV) { 963 if (mdev->type == G200_EV) {
971 WREG_ECRT(6, 0); 964 WREG_ECRT(6, 0);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 7f0e6c3f37d1..1ddc03e51bf4 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -479,7 +479,7 @@ nv50_display_flip_wait(void *data)
479{ 479{
480 struct nv50_display_flip *flip = data; 480 struct nv50_display_flip *flip = data;
481 if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) == 481 if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) ==
482 flip->chan->data); 482 flip->chan->data)
483 return true; 483 return true;
484 usleep_range(1, 2); 484 usleep_range(1, 2);
485 return false; 485 return false;
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index fe5cdbcf2636..b44d548c56f8 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -61,6 +61,10 @@ static int udl_get_modes(struct drm_connector *connector)
61 int ret; 61 int ret;
62 62
63 edid = (struct edid *)udl_get_edid(udl); 63 edid = (struct edid *)udl_get_edid(udl);
64 if (!edid) {
65 drm_mode_connector_update_edid_property(connector, NULL);
66 return 0;
67 }
64 68
65 /* 69 /*
66 * We only read the main block, but if the monitor reports extension 70 * We only read the main block, but if the monitor reports extension
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index db713c0dfba4..461a0d739d75 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -416,6 +416,8 @@ static int __hwspin_lock_request(struct hwspinlock *hwlock)
416 ret = pm_runtime_get_sync(dev); 416 ret = pm_runtime_get_sync(dev);
417 if (ret < 0) { 417 if (ret < 0) {
418 dev_err(dev, "%s: can't power on device\n", __func__); 418 dev_err(dev, "%s: can't power on device\n", __func__);
419 pm_runtime_put_noidle(dev);
420 module_put(dev->driver->owner);
419 return ret; 421 return ret;
420 } 422 }
421 423
diff --git a/drivers/misc/vmw_vmci/Kconfig b/drivers/misc/vmw_vmci/Kconfig
index 39c2ecadb273..ea98f7e9ccd1 100644
--- a/drivers/misc/vmw_vmci/Kconfig
+++ b/drivers/misc/vmw_vmci/Kconfig
@@ -4,7 +4,7 @@
4 4
5config VMWARE_VMCI 5config VMWARE_VMCI
6 tristate "VMware VMCI Driver" 6 tristate "VMware VMCI Driver"
7 depends on X86 && PCI 7 depends on X86 && PCI && NET
8 help 8 help
9 This is VMware's Virtual Machine Communication Interface. It enables 9 This is VMware's Virtual Machine Communication Interface. It enables
10 high-speed communication between host and guest in a virtual 10 high-speed communication between host and guest in a virtual
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 171b10f167a5..07401a3e256b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4846,9 +4846,18 @@ static int __net_init bond_net_init(struct net *net)
4846static void __net_exit bond_net_exit(struct net *net) 4846static void __net_exit bond_net_exit(struct net *net)
4847{ 4847{
4848 struct bond_net *bn = net_generic(net, bond_net_id); 4848 struct bond_net *bn = net_generic(net, bond_net_id);
4849 struct bonding *bond, *tmp_bond;
4850 LIST_HEAD(list);
4849 4851
4850 bond_destroy_sysfs(bn); 4852 bond_destroy_sysfs(bn);
4851 bond_destroy_proc_dir(bn); 4853 bond_destroy_proc_dir(bn);
4854
4855 /* Kill off any bonds created after unregistering bond rtnl ops */
4856 rtnl_lock();
4857 list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
4858 unregister_netdevice_queue(bond->dev, &list);
4859 unregister_netdevice_many(&list);
4860 rtnl_unlock();
4852} 4861}
4853 4862
4854static struct pernet_operations bond_net_ops = { 4863static struct pernet_operations bond_net_ops = {
@@ -4902,8 +4911,8 @@ static void __exit bonding_exit(void)
4902 4911
4903 bond_destroy_debugfs(); 4912 bond_destroy_debugfs();
4904 4913
4905 unregister_pernet_subsys(&bond_net_ops);
4906 rtnl_link_unregister(&bond_link_ops); 4914 rtnl_link_unregister(&bond_link_ops);
4915 unregister_pernet_subsys(&bond_net_ops);
4907 4916
4908#ifdef CONFIG_NET_POLL_CONTROLLER 4917#ifdef CONFIG_NET_POLL_CONTROLLER
4909 /* 4918 /*
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 77ebae0ac64a..0283f343b0d1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -13437,13 +13437,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13437{ 13437{
13438 struct bnx2x *bp = params->bp; 13438 struct bnx2x *bp = params->bp;
13439 u16 base_page, next_page, not_kr2_device, lane; 13439 u16 base_page, next_page, not_kr2_device, lane;
13440 int sigdet = bnx2x_warpcore_get_sigdet(phy, params); 13440 int sigdet;
13441
13442 if (!sigdet) {
13443 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
13444 bnx2x_kr2_recovery(params, vars, phy);
13445 return;
13446 }
13447 13441
13448 /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery 13442 /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
13449 * since some switches tend to reinit the AN process and clear the 13443 * since some switches tend to reinit the AN process and clear the
@@ -13454,6 +13448,16 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13454 vars->check_kr2_recovery_cnt--; 13448 vars->check_kr2_recovery_cnt--;
13455 return; 13449 return;
13456 } 13450 }
13451
13452 sigdet = bnx2x_warpcore_get_sigdet(phy, params);
13453 if (!sigdet) {
13454 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
13455 bnx2x_kr2_recovery(params, vars, phy);
13456 DP(NETIF_MSG_LINK, "No sigdet\n");
13457 }
13458 return;
13459 }
13460
13457 lane = bnx2x_get_warpcore_lane(phy, params); 13461 lane = bnx2x_get_warpcore_lane(phy, params);
13458 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 13462 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
13459 MDIO_AER_BLOCK_AER_REG, lane); 13463 MDIO_AER_BLOCK_AER_REG, lane);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e81a747ea8ce..8e58da909f5c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -4947,7 +4947,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
4947 q); 4947 q);
4948 } 4948 }
4949 4949
4950 if (!NO_FCOE(bp)) { 4950 if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
4951 fp = &bp->fp[FCOE_IDX(bp)]; 4951 fp = &bp->fp[FCOE_IDX(bp)];
4952 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 4952 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
4953 4953
@@ -13354,6 +13354,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
13354 RCU_INIT_POINTER(bp->cnic_ops, NULL); 13354 RCU_INIT_POINTER(bp->cnic_ops, NULL);
13355 mutex_unlock(&bp->cnic_mutex); 13355 mutex_unlock(&bp->cnic_mutex);
13356 synchronize_rcu(); 13356 synchronize_rcu();
13357 bp->cnic_enabled = false;
13357 kfree(bp->cnic_kwq); 13358 kfree(bp->cnic_kwq);
13358 bp->cnic_kwq = NULL; 13359 bp->cnic_kwq = NULL;
13359 13360
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index ec800b093e7e..d2bea3f07c73 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -870,7 +870,7 @@ err_unlock:
870} 870}
871 871
872static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, 872static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
873 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) 873 int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
874{ 874{
875 struct cb *cb; 875 struct cb *cb;
876 unsigned long flags; 876 unsigned long flags;
@@ -888,10 +888,13 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
888 nic->cbs_avail--; 888 nic->cbs_avail--;
889 cb->skb = skb; 889 cb->skb = skb;
890 890
891 err = cb_prepare(nic, cb, skb);
892 if (err)
893 goto err_unlock;
894
891 if (unlikely(!nic->cbs_avail)) 895 if (unlikely(!nic->cbs_avail))
892 err = -ENOSPC; 896 err = -ENOSPC;
893 897
894 cb_prepare(nic, cb, skb);
895 898
896 /* Order is important otherwise we'll be in a race with h/w: 899 /* Order is important otherwise we'll be in a race with h/w:
897 * set S-bit in current first, then clear S-bit in previous. */ 900 * set S-bit in current first, then clear S-bit in previous. */
@@ -1091,7 +1094,7 @@ static void e100_get_defaults(struct nic *nic)
1091 nic->mii.mdio_write = mdio_write; 1094 nic->mii.mdio_write = mdio_write;
1092} 1095}
1093 1096
1094static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) 1097static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1095{ 1098{
1096 struct config *config = &cb->u.config; 1099 struct config *config = &cb->u.config;
1097 u8 *c = (u8 *)config; 1100 u8 *c = (u8 *)config;
@@ -1181,6 +1184,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1181 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, 1184 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1182 "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", 1185 "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1183 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); 1186 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1187 return 0;
1184} 1188}
1185 1189
1186/************************************************************************* 1190/*************************************************************************
@@ -1331,7 +1335,7 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
1331 return fw; 1335 return fw;
1332} 1336}
1333 1337
1334static void e100_setup_ucode(struct nic *nic, struct cb *cb, 1338static int e100_setup_ucode(struct nic *nic, struct cb *cb,
1335 struct sk_buff *skb) 1339 struct sk_buff *skb)
1336{ 1340{
1337 const struct firmware *fw = (void *)skb; 1341 const struct firmware *fw = (void *)skb;
@@ -1358,6 +1362,7 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb,
1358 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); 1362 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1359 1363
1360 cb->command = cpu_to_le16(cb_ucode | cb_el); 1364 cb->command = cpu_to_le16(cb_ucode | cb_el);
1365 return 0;
1361} 1366}
1362 1367
1363static inline int e100_load_ucode_wait(struct nic *nic) 1368static inline int e100_load_ucode_wait(struct nic *nic)
@@ -1400,18 +1405,20 @@ static inline int e100_load_ucode_wait(struct nic *nic)
1400 return err; 1405 return err;
1401} 1406}
1402 1407
1403static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, 1408static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1404 struct sk_buff *skb) 1409 struct sk_buff *skb)
1405{ 1410{
1406 cb->command = cpu_to_le16(cb_iaaddr); 1411 cb->command = cpu_to_le16(cb_iaaddr);
1407 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); 1412 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1413 return 0;
1408} 1414}
1409 1415
1410static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) 1416static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1411{ 1417{
1412 cb->command = cpu_to_le16(cb_dump); 1418 cb->command = cpu_to_le16(cb_dump);
1413 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + 1419 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1414 offsetof(struct mem, dump_buf)); 1420 offsetof(struct mem, dump_buf));
1421 return 0;
1415} 1422}
1416 1423
1417static int e100_phy_check_without_mii(struct nic *nic) 1424static int e100_phy_check_without_mii(struct nic *nic)
@@ -1581,7 +1588,7 @@ static int e100_hw_init(struct nic *nic)
1581 return 0; 1588 return 0;
1582} 1589}
1583 1590
1584static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) 1591static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1585{ 1592{
1586 struct net_device *netdev = nic->netdev; 1593 struct net_device *netdev = nic->netdev;
1587 struct netdev_hw_addr *ha; 1594 struct netdev_hw_addr *ha;
@@ -1596,6 +1603,7 @@ static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1596 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, 1603 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
1597 ETH_ALEN); 1604 ETH_ALEN);
1598 } 1605 }
1606 return 0;
1599} 1607}
1600 1608
1601static void e100_set_multicast_list(struct net_device *netdev) 1609static void e100_set_multicast_list(struct net_device *netdev)
@@ -1756,11 +1764,18 @@ static void e100_watchdog(unsigned long data)
1756 round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); 1764 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1757} 1765}
1758 1766
1759static void e100_xmit_prepare(struct nic *nic, struct cb *cb, 1767static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
1760 struct sk_buff *skb) 1768 struct sk_buff *skb)
1761{ 1769{
1770 dma_addr_t dma_addr;
1762 cb->command = nic->tx_command; 1771 cb->command = nic->tx_command;
1763 1772
1773 dma_addr = pci_map_single(nic->pdev,
1774 skb->data, skb->len, PCI_DMA_TODEVICE);
1775 /* If we can't map the skb, have the upper layer try later */
1776 if (pci_dma_mapping_error(nic->pdev, dma_addr))
1777 return -ENOMEM;
1778
1764 /* 1779 /*
1765 * Use the last 4 bytes of the SKB payload packet as the CRC, used for 1780 * Use the last 4 bytes of the SKB payload packet as the CRC, used for
1766 * testing, ie sending frames with bad CRC. 1781 * testing, ie sending frames with bad CRC.
@@ -1777,11 +1792,10 @@ static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1777 cb->u.tcb.tcb_byte_count = 0; 1792 cb->u.tcb.tcb_byte_count = 0;
1778 cb->u.tcb.threshold = nic->tx_threshold; 1793 cb->u.tcb.threshold = nic->tx_threshold;
1779 cb->u.tcb.tbd_count = 1; 1794 cb->u.tcb.tbd_count = 1;
1780 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, 1795 cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);
1781 skb->data, skb->len, PCI_DMA_TODEVICE));
1782 /* check for mapping failure? */
1783 cb->u.tcb.tbd.size = cpu_to_le16(skb->len); 1796 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1784 skb_tx_timestamp(skb); 1797 skb_tx_timestamp(skb);
1798 return 0;
1785} 1799}
1786 1800
1787static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, 1801static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index cd345b8969bc..1e628ce57201 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2771,16 +2771,17 @@ static int mvneta_probe(struct platform_device *pdev)
2771 2771
2772 netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight); 2772 netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
2773 2773
2774 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2775 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2776 dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2777 dev->priv_flags |= IFF_UNICAST_FLT;
2778
2774 err = register_netdev(dev); 2779 err = register_netdev(dev);
2775 if (err < 0) { 2780 if (err < 0) {
2776 dev_err(&pdev->dev, "failed to register\n"); 2781 dev_err(&pdev->dev, "failed to register\n");
2777 goto err_deinit; 2782 goto err_deinit;
2778 } 2783 }
2779 2784
2780 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2781 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM;
2782 dev->priv_flags |= IFF_UNICAST_FLT;
2783
2784 netdev_info(dev, "mac: %pM\n", dev->dev_addr); 2785 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
2785 2786
2786 platform_set_drvdata(pdev, pp->dev); 2787 platform_set_drvdata(pdev, pp->dev);
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 1cd77483da50..f5f0f09e4cc5 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -470,8 +470,10 @@ static void netvsc_send_completion(struct hv_device *device,
470 packet->trans_id; 470 packet->trans_id;
471 471
472 /* Notify the layer above us */ 472 /* Notify the layer above us */
473 nvsc_packet->completion.send.send_completion( 473 if (nvsc_packet)
474 nvsc_packet->completion.send.send_completion_ctx); 474 nvsc_packet->completion.send.send_completion(
475 nvsc_packet->completion.send.
476 send_completion_ctx);
475 477
476 num_outstanding_sends = 478 num_outstanding_sends =
477 atomic_dec_return(&net_device->num_outstanding_sends); 479 atomic_dec_return(&net_device->num_outstanding_sends);
@@ -498,6 +500,7 @@ int netvsc_send(struct hv_device *device,
498 int ret = 0; 500 int ret = 0;
499 struct nvsp_message sendMessage; 501 struct nvsp_message sendMessage;
500 struct net_device *ndev; 502 struct net_device *ndev;
503 u64 req_id;
501 504
502 net_device = get_outbound_net_device(device); 505 net_device = get_outbound_net_device(device);
503 if (!net_device) 506 if (!net_device)
@@ -518,20 +521,24 @@ int netvsc_send(struct hv_device *device,
518 0xFFFFFFFF; 521 0xFFFFFFFF;
519 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0; 522 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
520 523
524 if (packet->completion.send.send_completion)
525 req_id = (u64)packet;
526 else
527 req_id = 0;
528
521 if (packet->page_buf_cnt) { 529 if (packet->page_buf_cnt) {
522 ret = vmbus_sendpacket_pagebuffer(device->channel, 530 ret = vmbus_sendpacket_pagebuffer(device->channel,
523 packet->page_buf, 531 packet->page_buf,
524 packet->page_buf_cnt, 532 packet->page_buf_cnt,
525 &sendMessage, 533 &sendMessage,
526 sizeof(struct nvsp_message), 534 sizeof(struct nvsp_message),
527 (unsigned long)packet); 535 req_id);
528 } else { 536 } else {
529 ret = vmbus_sendpacket(device->channel, &sendMessage, 537 ret = vmbus_sendpacket(device->channel, &sendMessage,
530 sizeof(struct nvsp_message), 538 sizeof(struct nvsp_message),
531 (unsigned long)packet, 539 req_id,
532 VM_PKT_DATA_INBAND, 540 VM_PKT_DATA_INBAND,
533 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 541 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
534
535 } 542 }
536 543
537 if (ret == 0) { 544 if (ret == 0) {
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 5f85205cd12b..8341b62e5521 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -241,13 +241,11 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
241 241
242 if (status == 1) { 242 if (status == 1) {
243 netif_carrier_on(net); 243 netif_carrier_on(net);
244 netif_wake_queue(net);
245 ndev_ctx = netdev_priv(net); 244 ndev_ctx = netdev_priv(net);
246 schedule_delayed_work(&ndev_ctx->dwork, 0); 245 schedule_delayed_work(&ndev_ctx->dwork, 0);
247 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20)); 246 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
248 } else { 247 } else {
249 netif_carrier_off(net); 248 netif_carrier_off(net);
250 netif_tx_disable(net);
251 } 249 }
252} 250}
253 251
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 2b657d4d63a8..0775f0aefd1e 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -61,9 +61,6 @@ struct rndis_request {
61 61
62static void rndis_filter_send_completion(void *ctx); 62static void rndis_filter_send_completion(void *ctx);
63 63
64static void rndis_filter_send_request_completion(void *ctx);
65
66
67 64
68static struct rndis_device *get_rndis_device(void) 65static struct rndis_device *get_rndis_device(void)
69{ 66{
@@ -241,10 +238,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
241 packet->page_buf[0].len; 238 packet->page_buf[0].len;
242 } 239 }
243 240
244 packet->completion.send.send_completion_ctx = req;/* packet; */ 241 packet->completion.send.send_completion = NULL;
245 packet->completion.send.send_completion =
246 rndis_filter_send_request_completion;
247 packet->completion.send.send_completion_tid = (unsigned long)dev;
248 242
249 ret = netvsc_send(dev->net_dev->dev, packet); 243 ret = netvsc_send(dev->net_dev->dev, packet);
250 return ret; 244 return ret;
@@ -999,9 +993,3 @@ static void rndis_filter_send_completion(void *ctx)
999 /* Pass it back to the original handler */ 993 /* Pass it back to the original handler */
1000 filter_pkt->completion(filter_pkt->completion_ctx); 994 filter_pkt->completion(filter_pkt->completion_ctx);
1001} 995}
1002
1003
1004static void rndis_filter_send_request_completion(void *ctx)
1005{
1006 /* Noop */
1007}
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 6e66f9c6782b..988372d218a4 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -280,6 +280,10 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
280 if (r) { 280 if (r) {
281 ath_err(common, 281 ath_err(common,
282 "Unable to reset channel, reset status %d\n", r); 282 "Unable to reset channel, reset status %d\n", r);
283
284 ath9k_hw_enable_interrupts(ah);
285 ath9k_queue_reset(sc, RESET_TYPE_BB_HANG);
286
283 goto out; 287 goto out;
284 } 288 }
285 289
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 4469321c0eb3..35fc68be158d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -3317,15 +3317,15 @@ static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
3317 goto err; 3317 goto err;
3318 } 3318 }
3319 3319
3320 /* External image takes precedence if specified */
3321 if (brcmf_sdbrcm_download_code_file(bus)) { 3320 if (brcmf_sdbrcm_download_code_file(bus)) {
3322 brcmf_err("dongle image file download failed\n"); 3321 brcmf_err("dongle image file download failed\n");
3323 goto err; 3322 goto err;
3324 } 3323 }
3325 3324
3326 /* External nvram takes precedence if specified */ 3325 if (brcmf_sdbrcm_download_nvram(bus)) {
3327 if (brcmf_sdbrcm_download_nvram(bus))
3328 brcmf_err("dongle nvram file download failed\n"); 3326 brcmf_err("dongle nvram file download failed\n");
3327 goto err;
3328 }
3329 3329
3330 /* Take arm out of reset */ 3330 /* Take arm out of reset */
3331 if (brcmf_sdbrcm_download_state(bus, false)) { 3331 if (brcmf_sdbrcm_download_state(bus, false)) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 2af9c0f0798d..ec46ffff5409 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -1891,8 +1891,10 @@ static s32
1891brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev, 1891brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
1892 u8 key_idx, const u8 *mac_addr, struct key_params *params) 1892 u8 key_idx, const u8 *mac_addr, struct key_params *params)
1893{ 1893{
1894 struct brcmf_if *ifp = netdev_priv(ndev);
1894 struct brcmf_wsec_key key; 1895 struct brcmf_wsec_key key;
1895 s32 err = 0; 1896 s32 err = 0;
1897 u8 keybuf[8];
1896 1898
1897 memset(&key, 0, sizeof(key)); 1899 memset(&key, 0, sizeof(key));
1898 key.index = (u32) key_idx; 1900 key.index = (u32) key_idx;
@@ -1916,8 +1918,9 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
1916 brcmf_dbg(CONN, "Setting the key index %d\n", key.index); 1918 brcmf_dbg(CONN, "Setting the key index %d\n", key.index);
1917 memcpy(key.data, params->key, key.len); 1919 memcpy(key.data, params->key, key.len);
1918 1920
1919 if (params->cipher == WLAN_CIPHER_SUITE_TKIP) { 1921 if ((ifp->vif->mode != WL_MODE_AP) &&
1920 u8 keybuf[8]; 1922 (params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
1923 brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
1921 memcpy(keybuf, &key.data[24], sizeof(keybuf)); 1924 memcpy(keybuf, &key.data[24], sizeof(keybuf));
1922 memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); 1925 memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
1923 memcpy(&key.data[16], keybuf, sizeof(keybuf)); 1926 memcpy(&key.data[16], keybuf, sizeof(keybuf));
@@ -2013,7 +2016,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
2013 break; 2016 break;
2014 case WLAN_CIPHER_SUITE_TKIP: 2017 case WLAN_CIPHER_SUITE_TKIP:
2015 if (ifp->vif->mode != WL_MODE_AP) { 2018 if (ifp->vif->mode != WL_MODE_AP) {
2016 brcmf_dbg(CONN, "Swapping key\n"); 2019 brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
2017 memcpy(keybuf, &key.data[24], sizeof(keybuf)); 2020 memcpy(keybuf, &key.data[24], sizeof(keybuf));
2018 memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); 2021 memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
2019 memcpy(&key.data[16], keybuf, sizeof(keybuf)); 2022 memcpy(&key.data[16], keybuf, sizeof(keybuf));
@@ -2118,8 +2121,7 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
2118 err = -EAGAIN; 2121 err = -EAGAIN;
2119 goto done; 2122 goto done;
2120 } 2123 }
2121 switch (wsec & ~SES_OW_ENABLED) { 2124 if (wsec & WEP_ENABLED) {
2122 case WEP_ENABLED:
2123 sec = &profile->sec; 2125 sec = &profile->sec;
2124 if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) { 2126 if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
2125 params.cipher = WLAN_CIPHER_SUITE_WEP40; 2127 params.cipher = WLAN_CIPHER_SUITE_WEP40;
@@ -2128,16 +2130,13 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
2128 params.cipher = WLAN_CIPHER_SUITE_WEP104; 2130 params.cipher = WLAN_CIPHER_SUITE_WEP104;
2129 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n"); 2131 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
2130 } 2132 }
2131 break; 2133 } else if (wsec & TKIP_ENABLED) {
2132 case TKIP_ENABLED:
2133 params.cipher = WLAN_CIPHER_SUITE_TKIP; 2134 params.cipher = WLAN_CIPHER_SUITE_TKIP;
2134 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n"); 2135 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n");
2135 break; 2136 } else if (wsec & AES_ENABLED) {
2136 case AES_ENABLED:
2137 params.cipher = WLAN_CIPHER_SUITE_AES_CMAC; 2137 params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
2138 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n"); 2138 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n");
2139 break; 2139 } else {
2140 default:
2141 brcmf_err("Invalid algo (0x%x)\n", wsec); 2140 brcmf_err("Invalid algo (0x%x)\n", wsec);
2142 err = -EINVAL; 2141 err = -EINVAL;
2143 goto done; 2142 goto done;
@@ -3824,8 +3823,9 @@ exit:
3824static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) 3823static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
3825{ 3824{
3826 struct brcmf_if *ifp = netdev_priv(ndev); 3825 struct brcmf_if *ifp = netdev_priv(ndev);
3827 s32 err = -EPERM; 3826 s32 err;
3828 struct brcmf_fil_bss_enable_le bss_enable; 3827 struct brcmf_fil_bss_enable_le bss_enable;
3828 struct brcmf_join_params join_params;
3829 3829
3830 brcmf_dbg(TRACE, "Enter\n"); 3830 brcmf_dbg(TRACE, "Enter\n");
3831 3831
@@ -3833,16 +3833,21 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
3833 /* Due to most likely deauths outstanding we sleep */ 3833 /* Due to most likely deauths outstanding we sleep */
3834 /* first to make sure they get processed by fw. */ 3834 /* first to make sure they get processed by fw. */
3835 msleep(400); 3835 msleep(400);
3836 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0); 3836
3837 if (err < 0) { 3837 memset(&join_params, 0, sizeof(join_params));
3838 brcmf_err("setting AP mode failed %d\n", err); 3838 err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
3839 goto exit; 3839 &join_params, sizeof(join_params));
3840 } 3840 if (err < 0)
3841 brcmf_err("SET SSID error (%d)\n", err);
3841 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0); 3842 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0);
3842 if (err < 0) { 3843 if (err < 0)
3843 brcmf_err("BRCMF_C_UP error %d\n", err); 3844 brcmf_err("BRCMF_C_UP error %d\n", err);
3844 goto exit; 3845 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
3845 } 3846 if (err < 0)
3847 brcmf_err("setting AP mode failed %d\n", err);
3848 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0);
3849 if (err < 0)
3850 brcmf_err("setting INFRA mode failed %d\n", err);
3846 } else { 3851 } else {
3847 bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx); 3852 bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx);
3848 bss_enable.enable = cpu_to_le32(0); 3853 bss_enable.enable = cpu_to_le32(0);
@@ -3855,7 +3860,6 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
3855 set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state); 3860 set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
3856 clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); 3861 clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
3857 3862
3858exit:
3859 return err; 3863 return err;
3860} 3864}
3861 3865
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index d215b4d3c51b..e7f6deaf715e 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -1393,8 +1393,10 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
1393 queue_work(adapter->workqueue, &adapter->main_work); 1393 queue_work(adapter->workqueue, &adapter->main_work);
1394 1394
1395 /* Perform internal scan synchronously */ 1395 /* Perform internal scan synchronously */
1396 if (!priv->scan_request) 1396 if (!priv->scan_request) {
1397 dev_dbg(adapter->dev, "wait internal scan\n");
1397 mwifiex_wait_queue_complete(adapter, cmd_node); 1398 mwifiex_wait_queue_complete(adapter, cmd_node);
1399 }
1398 } else { 1400 } else {
1399 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, 1401 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1400 flags); 1402 flags);
@@ -1793,7 +1795,12 @@ check_next_scan:
1793 /* Need to indicate IOCTL complete */ 1795 /* Need to indicate IOCTL complete */
1794 if (adapter->curr_cmd->wait_q_enabled) { 1796 if (adapter->curr_cmd->wait_q_enabled) {
1795 adapter->cmd_wait_q.status = 0; 1797 adapter->cmd_wait_q.status = 0;
1796 mwifiex_complete_cmd(adapter, adapter->curr_cmd); 1798 if (!priv->scan_request) {
1799 dev_dbg(adapter->dev,
1800 "complete internal scan\n");
1801 mwifiex_complete_cmd(adapter,
1802 adapter->curr_cmd);
1803 }
1797 } 1804 }
1798 if (priv->report_scan_result) 1805 if (priv->report_scan_result)
1799 priv->report_scan_result = false; 1806 priv->report_scan_result = false;
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 2bf4efa33186..76cd47eb901e 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -20,6 +20,7 @@ if RT2X00
20config RT2400PCI 20config RT2400PCI
21 tristate "Ralink rt2400 (PCI/PCMCIA) support" 21 tristate "Ralink rt2400 (PCI/PCMCIA) support"
22 depends on PCI 22 depends on PCI
23 select RT2X00_LIB_MMIO
23 select RT2X00_LIB_PCI 24 select RT2X00_LIB_PCI
24 select EEPROM_93CX6 25 select EEPROM_93CX6
25 ---help--- 26 ---help---
@@ -31,6 +32,7 @@ config RT2400PCI
31config RT2500PCI 32config RT2500PCI
32 tristate "Ralink rt2500 (PCI/PCMCIA) support" 33 tristate "Ralink rt2500 (PCI/PCMCIA) support"
33 depends on PCI 34 depends on PCI
35 select RT2X00_LIB_MMIO
34 select RT2X00_LIB_PCI 36 select RT2X00_LIB_PCI
35 select EEPROM_93CX6 37 select EEPROM_93CX6
36 ---help--- 38 ---help---
@@ -43,6 +45,7 @@ config RT61PCI
43 tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support" 45 tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support"
44 depends on PCI 46 depends on PCI
45 select RT2X00_LIB_PCI 47 select RT2X00_LIB_PCI
48 select RT2X00_LIB_MMIO
46 select RT2X00_LIB_FIRMWARE 49 select RT2X00_LIB_FIRMWARE
47 select RT2X00_LIB_CRYPTO 50 select RT2X00_LIB_CRYPTO
48 select CRC_ITU_T 51 select CRC_ITU_T
@@ -57,6 +60,7 @@ config RT2800PCI
57 tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support" 60 tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support"
58 depends on PCI || SOC_RT288X || SOC_RT305X 61 depends on PCI || SOC_RT288X || SOC_RT305X
59 select RT2800_LIB 62 select RT2800_LIB
63 select RT2X00_LIB_MMIO
60 select RT2X00_LIB_PCI if PCI 64 select RT2X00_LIB_PCI if PCI
61 select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X 65 select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X
62 select RT2X00_LIB_FIRMWARE 66 select RT2X00_LIB_FIRMWARE
@@ -185,6 +189,9 @@ endif
185config RT2800_LIB 189config RT2800_LIB
186 tristate 190 tristate
187 191
192config RT2X00_LIB_MMIO
193 tristate
194
188config RT2X00_LIB_PCI 195config RT2X00_LIB_PCI
189 tristate 196 tristate
190 select RT2X00_LIB 197 select RT2X00_LIB
diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile
index 349d5b8284a4..f069d8bc5b67 100644
--- a/drivers/net/wireless/rt2x00/Makefile
+++ b/drivers/net/wireless/rt2x00/Makefile
@@ -9,6 +9,7 @@ rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE) += rt2x00firmware.o
9rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o 9rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o
10 10
11obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o 11obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o
12obj-$(CONFIG_RT2X00_LIB_MMIO) += rt2x00mmio.o
12obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o 13obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o
13obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o 14obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o
14obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o 15obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 221beaaa83f1..dcfb54e0c516 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -34,6 +34,7 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35 35
36#include "rt2x00.h" 36#include "rt2x00.h"
37#include "rt2x00mmio.h"
37#include "rt2x00pci.h" 38#include "rt2x00pci.h"
38#include "rt2400pci.h" 39#include "rt2400pci.h"
39 40
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 39edc59e8d03..e1d2dc9ed28a 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -34,6 +34,7 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35 35
36#include "rt2x00.h" 36#include "rt2x00.h"
37#include "rt2x00mmio.h"
37#include "rt2x00pci.h" 38#include "rt2x00pci.h"
38#include "rt2500pci.h" 39#include "rt2500pci.h"
39 40
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index ded73da4de0b..ba5a05625aaa 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -41,6 +41,7 @@
41#include <linux/eeprom_93cx6.h> 41#include <linux/eeprom_93cx6.h>
42 42
43#include "rt2x00.h" 43#include "rt2x00.h"
44#include "rt2x00mmio.h"
44#include "rt2x00pci.h" 45#include "rt2x00pci.h"
45#include "rt2x00soc.h" 46#include "rt2x00soc.h"
46#include "rt2800lib.h" 47#include "rt2800lib.h"
diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.c b/drivers/net/wireless/rt2x00/rt2x00mmio.c
new file mode 100644
index 000000000000..d84a680ba0c9
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2x00mmio.c
@@ -0,0 +1,216 @@
1/*
2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00mmio
23 Abstract: rt2x00 generic mmio device routines.
24 */
25
26#include <linux/dma-mapping.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/slab.h>
30
31#include "rt2x00.h"
32#include "rt2x00mmio.h"
33
34/*
35 * Register access.
36 */
37int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
38 const unsigned int offset,
39 const struct rt2x00_field32 field,
40 u32 *reg)
41{
42 unsigned int i;
43
44 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
45 return 0;
46
47 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
48 rt2x00pci_register_read(rt2x00dev, offset, reg);
49 if (!rt2x00_get_field32(*reg, field))
50 return 1;
51 udelay(REGISTER_BUSY_DELAY);
52 }
53
54 printk_once(KERN_ERR "%s() Indirect register access failed: "
55 "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg);
56 *reg = ~0;
57
58 return 0;
59}
60EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
61
62bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
63{
64 struct data_queue *queue = rt2x00dev->rx;
65 struct queue_entry *entry;
66 struct queue_entry_priv_pci *entry_priv;
67 struct skb_frame_desc *skbdesc;
68 int max_rx = 16;
69
70 while (--max_rx) {
71 entry = rt2x00queue_get_entry(queue, Q_INDEX);
72 entry_priv = entry->priv_data;
73
74 if (rt2x00dev->ops->lib->get_entry_state(entry))
75 break;
76
77 /*
78 * Fill in desc fields of the skb descriptor
79 */
80 skbdesc = get_skb_frame_desc(entry->skb);
81 skbdesc->desc = entry_priv->desc;
82 skbdesc->desc_len = entry->queue->desc_size;
83
84 /*
85 * DMA is already done, notify rt2x00lib that
86 * it finished successfully.
87 */
88 rt2x00lib_dmastart(entry);
89 rt2x00lib_dmadone(entry);
90
91 /*
92 * Send the frame to rt2x00lib for further processing.
93 */
94 rt2x00lib_rxdone(entry, GFP_ATOMIC);
95 }
96
97 return !max_rx;
98}
99EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
100
101void rt2x00pci_flush_queue(struct data_queue *queue, bool drop)
102{
103 unsigned int i;
104
105 for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
106 msleep(10);
107}
108EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue);
109
110/*
111 * Device initialization handlers.
112 */
113static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
114 struct data_queue *queue)
115{
116 struct queue_entry_priv_pci *entry_priv;
117 void *addr;
118 dma_addr_t dma;
119 unsigned int i;
120
121 /*
122 * Allocate DMA memory for descriptor and buffer.
123 */
124 addr = dma_alloc_coherent(rt2x00dev->dev,
125 queue->limit * queue->desc_size,
126 &dma, GFP_KERNEL);
127 if (!addr)
128 return -ENOMEM;
129
130 memset(addr, 0, queue->limit * queue->desc_size);
131
132 /*
133 * Initialize all queue entries to contain valid addresses.
134 */
135 for (i = 0; i < queue->limit; i++) {
136 entry_priv = queue->entries[i].priv_data;
137 entry_priv->desc = addr + i * queue->desc_size;
138 entry_priv->desc_dma = dma + i * queue->desc_size;
139 }
140
141 return 0;
142}
143
144static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
145 struct data_queue *queue)
146{
147 struct queue_entry_priv_pci *entry_priv =
148 queue->entries[0].priv_data;
149
150 if (entry_priv->desc)
151 dma_free_coherent(rt2x00dev->dev,
152 queue->limit * queue->desc_size,
153 entry_priv->desc, entry_priv->desc_dma);
154 entry_priv->desc = NULL;
155}
156
157int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
158{
159 struct data_queue *queue;
160 int status;
161
162 /*
163 * Allocate DMA
164 */
165 queue_for_each(rt2x00dev, queue) {
166 status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
167 if (status)
168 goto exit;
169 }
170
171 /*
172 * Register interrupt handler.
173 */
174 status = request_irq(rt2x00dev->irq,
175 rt2x00dev->ops->lib->irq_handler,
176 IRQF_SHARED, rt2x00dev->name, rt2x00dev);
177 if (status) {
178 ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
179 rt2x00dev->irq, status);
180 goto exit;
181 }
182
183 return 0;
184
185exit:
186 queue_for_each(rt2x00dev, queue)
187 rt2x00pci_free_queue_dma(rt2x00dev, queue);
188
189 return status;
190}
191EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
192
193void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
194{
195 struct data_queue *queue;
196
197 /*
198 * Free irq line.
199 */
200 free_irq(rt2x00dev->irq, rt2x00dev);
201
202 /*
203 * Free DMA
204 */
205 queue_for_each(rt2x00dev, queue)
206 rt2x00pci_free_queue_dma(rt2x00dev, queue);
207}
208EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
209
210/*
211 * rt2x00mmio module information.
212 */
213MODULE_AUTHOR(DRV_PROJECT);
214MODULE_VERSION(DRV_VERSION);
215MODULE_DESCRIPTION("rt2x00 mmio library");
216MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.h b/drivers/net/wireless/rt2x00/rt2x00mmio.h
new file mode 100644
index 000000000000..4ecaf60175bf
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2x00mmio.h
@@ -0,0 +1,119 @@
1/*
2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00mmio
23 Abstract: Data structures for the rt2x00mmio module.
24 */
25
26#ifndef RT2X00MMIO_H
27#define RT2X00MMIO_H
28
29#include <linux/io.h>
30
31/*
32 * Register access.
33 */
34static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
35 const unsigned int offset,
36 u32 *value)
37{
38 *value = readl(rt2x00dev->csr.base + offset);
39}
40
41static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev,
42 const unsigned int offset,
43 void *value, const u32 length)
44{
45 memcpy_fromio(value, rt2x00dev->csr.base + offset, length);
46}
47
48static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev,
49 const unsigned int offset,
50 u32 value)
51{
52 writel(value, rt2x00dev->csr.base + offset);
53}
54
55static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
56 const unsigned int offset,
57 const void *value,
58 const u32 length)
59{
60 __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2);
61}
62
63/**
64 * rt2x00pci_regbusy_read - Read from register with busy check
65 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
66 * @offset: Register offset
67 * @field: Field to check if register is busy
68 * @reg: Pointer to where register contents should be stored
69 *
70 * This function will read the given register, and checks if the
71 * register is busy. If it is, it will sleep for a couple of
72 * microseconds before reading the register again. If the register
73 * is not read after a certain timeout, this function will return
74 * FALSE.
75 */
76int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
77 const unsigned int offset,
78 const struct rt2x00_field32 field,
79 u32 *reg);
80
81/**
82 * struct queue_entry_priv_pci: Per entry PCI specific information
83 *
84 * @desc: Pointer to device descriptor
85 * @desc_dma: DMA pointer to &desc.
86 * @data: Pointer to device's entry memory.
87 * @data_dma: DMA pointer to &data.
88 */
89struct queue_entry_priv_pci {
90 __le32 *desc;
91 dma_addr_t desc_dma;
92};
93
94/**
95 * rt2x00pci_rxdone - Handle RX done events
96 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
97 *
98 * Returns true if there are still rx frames pending and false if all
99 * pending rx frames were processed.
100 */
101bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev);
102
103/**
104 * rt2x00pci_flush_queue - Flush data queue
105 * @queue: Data queue to stop
106 * @drop: True to drop all pending frames.
107 *
108 * This will wait for a maximum of 100ms, waiting for the queues
109 * to become empty.
110 */
111void rt2x00pci_flush_queue(struct data_queue *queue, bool drop);
112
113/*
114 * Device initialization handlers.
115 */
116int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev);
117void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev);
118
119#endif /* RT2X00MMIO_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index a0c8caef3b0a..e87865e33113 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -33,182 +33,6 @@
33#include "rt2x00pci.h" 33#include "rt2x00pci.h"
34 34
35/* 35/*
36 * Register access.
37 */
38int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
39 const unsigned int offset,
40 const struct rt2x00_field32 field,
41 u32 *reg)
42{
43 unsigned int i;
44
45 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
46 return 0;
47
48 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
49 rt2x00pci_register_read(rt2x00dev, offset, reg);
50 if (!rt2x00_get_field32(*reg, field))
51 return 1;
52 udelay(REGISTER_BUSY_DELAY);
53 }
54
55 ERROR(rt2x00dev, "Indirect register access failed: "
56 "offset=0x%.08x, value=0x%.08x\n", offset, *reg);
57 *reg = ~0;
58
59 return 0;
60}
61EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
62
63bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
64{
65 struct data_queue *queue = rt2x00dev->rx;
66 struct queue_entry *entry;
67 struct queue_entry_priv_pci *entry_priv;
68 struct skb_frame_desc *skbdesc;
69 int max_rx = 16;
70
71 while (--max_rx) {
72 entry = rt2x00queue_get_entry(queue, Q_INDEX);
73 entry_priv = entry->priv_data;
74
75 if (rt2x00dev->ops->lib->get_entry_state(entry))
76 break;
77
78 /*
79 * Fill in desc fields of the skb descriptor
80 */
81 skbdesc = get_skb_frame_desc(entry->skb);
82 skbdesc->desc = entry_priv->desc;
83 skbdesc->desc_len = entry->queue->desc_size;
84
85 /*
86 * DMA is already done, notify rt2x00lib that
87 * it finished successfully.
88 */
89 rt2x00lib_dmastart(entry);
90 rt2x00lib_dmadone(entry);
91
92 /*
93 * Send the frame to rt2x00lib for further processing.
94 */
95 rt2x00lib_rxdone(entry, GFP_ATOMIC);
96 }
97
98 return !max_rx;
99}
100EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
101
102void rt2x00pci_flush_queue(struct data_queue *queue, bool drop)
103{
104 unsigned int i;
105
106 for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
107 msleep(10);
108}
109EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue);
110
111/*
112 * Device initialization handlers.
113 */
114static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
115 struct data_queue *queue)
116{
117 struct queue_entry_priv_pci *entry_priv;
118 void *addr;
119 dma_addr_t dma;
120 unsigned int i;
121
122 /*
123 * Allocate DMA memory for descriptor and buffer.
124 */
125 addr = dma_alloc_coherent(rt2x00dev->dev,
126 queue->limit * queue->desc_size,
127 &dma, GFP_KERNEL);
128 if (!addr)
129 return -ENOMEM;
130
131 memset(addr, 0, queue->limit * queue->desc_size);
132
133 /*
134 * Initialize all queue entries to contain valid addresses.
135 */
136 for (i = 0; i < queue->limit; i++) {
137 entry_priv = queue->entries[i].priv_data;
138 entry_priv->desc = addr + i * queue->desc_size;
139 entry_priv->desc_dma = dma + i * queue->desc_size;
140 }
141
142 return 0;
143}
144
145static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
146 struct data_queue *queue)
147{
148 struct queue_entry_priv_pci *entry_priv =
149 queue->entries[0].priv_data;
150
151 if (entry_priv->desc)
152 dma_free_coherent(rt2x00dev->dev,
153 queue->limit * queue->desc_size,
154 entry_priv->desc, entry_priv->desc_dma);
155 entry_priv->desc = NULL;
156}
157
158int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
159{
160 struct data_queue *queue;
161 int status;
162
163 /*
164 * Allocate DMA
165 */
166 queue_for_each(rt2x00dev, queue) {
167 status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
168 if (status)
169 goto exit;
170 }
171
172 /*
173 * Register interrupt handler.
174 */
175 status = request_irq(rt2x00dev->irq,
176 rt2x00dev->ops->lib->irq_handler,
177 IRQF_SHARED, rt2x00dev->name, rt2x00dev);
178 if (status) {
179 ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
180 rt2x00dev->irq, status);
181 goto exit;
182 }
183
184 return 0;
185
186exit:
187 queue_for_each(rt2x00dev, queue)
188 rt2x00pci_free_queue_dma(rt2x00dev, queue);
189
190 return status;
191}
192EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
193
194void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
195{
196 struct data_queue *queue;
197
198 /*
199 * Free irq line.
200 */
201 free_irq(rt2x00dev->irq, rt2x00dev);
202
203 /*
204 * Free DMA
205 */
206 queue_for_each(rt2x00dev, queue)
207 rt2x00pci_free_queue_dma(rt2x00dev, queue);
208}
209EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
210
211/*
212 * PCI driver handlers. 36 * PCI driver handlers.
213 */ 37 */
214static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev) 38static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index e2c99f2b9a14..60d90b20f8b9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -36,94 +36,6 @@
36#define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops) 36#define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops)
37 37
38/* 38/*
39 * Register access.
40 */
41static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
42 const unsigned int offset,
43 u32 *value)
44{
45 *value = readl(rt2x00dev->csr.base + offset);
46}
47
48static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev,
49 const unsigned int offset,
50 void *value, const u32 length)
51{
52 memcpy_fromio(value, rt2x00dev->csr.base + offset, length);
53}
54
55static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev,
56 const unsigned int offset,
57 u32 value)
58{
59 writel(value, rt2x00dev->csr.base + offset);
60}
61
62static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
63 const unsigned int offset,
64 const void *value,
65 const u32 length)
66{
67 __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2);
68}
69
70/**
71 * rt2x00pci_regbusy_read - Read from register with busy check
72 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
73 * @offset: Register offset
74 * @field: Field to check if register is busy
75 * @reg: Pointer to where register contents should be stored
76 *
77 * This function will read the given register, and checks if the
78 * register is busy. If it is, it will sleep for a couple of
79 * microseconds before reading the register again. If the register
80 * is not read after a certain timeout, this function will return
81 * FALSE.
82 */
83int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
84 const unsigned int offset,
85 const struct rt2x00_field32 field,
86 u32 *reg);
87
88/**
89 * struct queue_entry_priv_pci: Per entry PCI specific information
90 *
91 * @desc: Pointer to device descriptor
92 * @desc_dma: DMA pointer to &desc.
93 * @data: Pointer to device's entry memory.
94 * @data_dma: DMA pointer to &data.
95 */
96struct queue_entry_priv_pci {
97 __le32 *desc;
98 dma_addr_t desc_dma;
99};
100
101/**
102 * rt2x00pci_rxdone - Handle RX done events
103 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
104 *
105 * Returns true if there are still rx frames pending and false if all
106 * pending rx frames were processed.
107 */
108bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev);
109
110/**
111 * rt2x00pci_flush_queue - Flush data queue
112 * @queue: Data queue to stop
113 * @drop: True to drop all pending frames.
114 *
115 * This will wait for a maximum of 100ms, waiting for the queues
116 * to become empty.
117 */
118void rt2x00pci_flush_queue(struct data_queue *queue, bool drop);
119
120/*
121 * Device initialization handlers.
122 */
123int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev);
124void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev);
125
126/*
127 * PCI driver handlers. 39 * PCI driver handlers.
128 */ 40 */
129int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops); 41int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops);
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index f95792cfcf89..9e3c8ff53e3f 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -35,6 +35,7 @@
35#include <linux/eeprom_93cx6.h> 35#include <linux/eeprom_93cx6.h>
36 36
37#include "rt2x00.h" 37#include "rt2x00.h"
38#include "rt2x00mmio.h"
38#include "rt2x00pci.h" 39#include "rt2x00pci.h"
39#include "rt61pci.h" 40#include "rt61pci.h"
40 41
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index cc1f7bf53fd0..c6d77e20622c 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -4,7 +4,7 @@ menu "Remoteproc drivers"
4config REMOTEPROC 4config REMOTEPROC
5 tristate 5 tristate
6 depends on HAS_DMA 6 depends on HAS_DMA
7 select FW_CONFIG 7 select FW_LOADER
8 select VIRTIO 8 select VIRTIO
9 9
10config OMAP_REMOTEPROC 10config OMAP_REMOTEPROC
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 29387df4bfc9..8edb4aed5d36 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -217,7 +217,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
217 * TODO: support predefined notifyids (via resource table) 217 * TODO: support predefined notifyids (via resource table)
218 */ 218 */
219 ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL); 219 ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL);
220 if (ret) { 220 if (ret < 0) {
221 dev_err(dev, "idr_alloc failed: %d\n", ret); 221 dev_err(dev, "idr_alloc failed: %d\n", ret);
222 dma_free_coherent(dev->parent, size, va, dma); 222 dma_free_coherent(dev->parent, size, va, dma);
223 return ret; 223 return ret;
@@ -366,10 +366,12 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
366 /* it is now safe to add the virtio device */ 366 /* it is now safe to add the virtio device */
367 ret = rproc_add_virtio_dev(rvdev, rsc->id); 367 ret = rproc_add_virtio_dev(rvdev, rsc->id);
368 if (ret) 368 if (ret)
369 goto free_rvdev; 369 goto remove_rvdev;
370 370
371 return 0; 371 return 0;
372 372
373remove_rvdev:
374 list_del(&rvdev->node);
373free_rvdev: 375free_rvdev:
374 kfree(rvdev); 376 kfree(rvdev);
375 return ret; 377 return ret;
diff --git a/drivers/remoteproc/ste_modem_rproc.c b/drivers/remoteproc/ste_modem_rproc.c
index a7743c069339..fb95c4220052 100644
--- a/drivers/remoteproc/ste_modem_rproc.c
+++ b/drivers/remoteproc/ste_modem_rproc.c
@@ -240,6 +240,8 @@ static int sproc_drv_remove(struct platform_device *pdev)
240 240
241 /* Unregister as remoteproc device */ 241 /* Unregister as remoteproc device */
242 rproc_del(sproc->rproc); 242 rproc_del(sproc->rproc);
243 dma_free_coherent(sproc->rproc->dev.parent, SPROC_FW_SIZE,
244 sproc->fw_addr, sproc->fw_dma_addr);
243 rproc_put(sproc->rproc); 245 rproc_put(sproc->rproc);
244 246
245 mdev->drv_data = NULL; 247 mdev->drv_data = NULL;
@@ -297,10 +299,13 @@ static int sproc_probe(struct platform_device *pdev)
297 /* Register as a remoteproc device */ 299 /* Register as a remoteproc device */
298 err = rproc_add(rproc); 300 err = rproc_add(rproc);
299 if (err) 301 if (err)
300 goto free_rproc; 302 goto free_mem;
301 303
302 return 0; 304 return 0;
303 305
306free_mem:
307 dma_free_coherent(rproc->dev.parent, SPROC_FW_SIZE,
308 sproc->fw_addr, sproc->fw_dma_addr);
304free_rproc: 309free_rproc:
305 /* Reset device data upon error */ 310 /* Reset device data upon error */
306 mdev->drv_data = NULL; 311 mdev->drv_data = NULL;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 8c0622399fcd..6ccb7457746b 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -769,6 +769,7 @@ struct qeth_card {
769 unsigned long thread_start_mask; 769 unsigned long thread_start_mask;
770 unsigned long thread_allowed_mask; 770 unsigned long thread_allowed_mask;
771 unsigned long thread_running_mask; 771 unsigned long thread_running_mask;
772 struct task_struct *recovery_task;
772 spinlock_t ip_lock; 773 spinlock_t ip_lock;
773 struct list_head ip_list; 774 struct list_head ip_list;
774 struct list_head *ip_tbd_list; 775 struct list_head *ip_tbd_list;
@@ -862,6 +863,8 @@ extern struct qeth_card_list_struct qeth_core_card_list;
862extern struct kmem_cache *qeth_core_header_cache; 863extern struct kmem_cache *qeth_core_header_cache;
863extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS]; 864extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
864 865
866void qeth_set_recovery_task(struct qeth_card *);
867void qeth_clear_recovery_task(struct qeth_card *);
865void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int); 868void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
866int qeth_threads_running(struct qeth_card *, unsigned long); 869int qeth_threads_running(struct qeth_card *, unsigned long);
867int qeth_wait_for_threads(struct qeth_card *, unsigned long); 870int qeth_wait_for_threads(struct qeth_card *, unsigned long);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 0d73a999983d..451f92020599 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -177,6 +177,23 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
177 return "n/a"; 177 return "n/a";
178} 178}
179 179
180void qeth_set_recovery_task(struct qeth_card *card)
181{
182 card->recovery_task = current;
183}
184EXPORT_SYMBOL_GPL(qeth_set_recovery_task);
185
186void qeth_clear_recovery_task(struct qeth_card *card)
187{
188 card->recovery_task = NULL;
189}
190EXPORT_SYMBOL_GPL(qeth_clear_recovery_task);
191
192static bool qeth_is_recovery_task(const struct qeth_card *card)
193{
194 return card->recovery_task == current;
195}
196
180void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, 197void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
181 int clear_start_mask) 198 int clear_start_mask)
182{ 199{
@@ -205,6 +222,8 @@ EXPORT_SYMBOL_GPL(qeth_threads_running);
205 222
206int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads) 223int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
207{ 224{
225 if (qeth_is_recovery_task(card))
226 return 0;
208 return wait_event_interruptible(card->wait_q, 227 return wait_event_interruptible(card->wait_q,
209 qeth_threads_running(card, threads) == 0); 228 qeth_threads_running(card, threads) == 0);
210} 229}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index d690166efeaf..155b101bd730 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1143,6 +1143,7 @@ static int qeth_l2_recover(void *ptr)
1143 QETH_CARD_TEXT(card, 2, "recover2"); 1143 QETH_CARD_TEXT(card, 2, "recover2");
1144 dev_warn(&card->gdev->dev, 1144 dev_warn(&card->gdev->dev,
1145 "A recovery process has been started for the device\n"); 1145 "A recovery process has been started for the device\n");
1146 qeth_set_recovery_task(card);
1146 __qeth_l2_set_offline(card->gdev, 1); 1147 __qeth_l2_set_offline(card->gdev, 1);
1147 rc = __qeth_l2_set_online(card->gdev, 1); 1148 rc = __qeth_l2_set_online(card->gdev, 1);
1148 if (!rc) 1149 if (!rc)
@@ -1153,6 +1154,7 @@ static int qeth_l2_recover(void *ptr)
1153 dev_warn(&card->gdev->dev, "The qeth device driver " 1154 dev_warn(&card->gdev->dev, "The qeth device driver "
1154 "failed to recover an error on the device\n"); 1155 "failed to recover an error on the device\n");
1155 } 1156 }
1157 qeth_clear_recovery_task(card);
1156 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 1158 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1157 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); 1159 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
1158 return 0; 1160 return 0;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 8710337dab3e..1f7edf1b26c3 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3515,6 +3515,7 @@ static int qeth_l3_recover(void *ptr)
3515 QETH_CARD_TEXT(card, 2, "recover2"); 3515 QETH_CARD_TEXT(card, 2, "recover2");
3516 dev_warn(&card->gdev->dev, 3516 dev_warn(&card->gdev->dev,
3517 "A recovery process has been started for the device\n"); 3517 "A recovery process has been started for the device\n");
3518 qeth_set_recovery_task(card);
3518 __qeth_l3_set_offline(card->gdev, 1); 3519 __qeth_l3_set_offline(card->gdev, 1);
3519 rc = __qeth_l3_set_online(card->gdev, 1); 3520 rc = __qeth_l3_set_online(card->gdev, 1);
3520 if (!rc) 3521 if (!rc)
@@ -3525,6 +3526,7 @@ static int qeth_l3_recover(void *ptr)
3525 dev_warn(&card->gdev->dev, "The qeth device driver " 3526 dev_warn(&card->gdev->dev, "The qeth device driver "
3526 "failed to recover an error on the device\n"); 3527 "failed to recover an error on the device\n");
3527 } 3528 }
3529 qeth_clear_recovery_task(card);
3528 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 3530 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
3529 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); 3531 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
3530 return 0; 3532 return 0;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index a044f593e8b9..d0fa4b6c551f 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1899,8 +1899,8 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1899 sdev->allow_restart = 1; 1899 sdev->allow_restart = 1;
1900 blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); 1900 blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
1901 } 1901 }
1902 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
1903 spin_unlock_irqrestore(shost->host_lock, lock_flags); 1902 spin_unlock_irqrestore(shost->host_lock, lock_flags);
1903 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
1904 return 0; 1904 return 0;
1905} 1905}
1906 1906
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index f328089a1060..2197b57fb225 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -5148,7 +5148,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5148 ipr_trace; 5148 ipr_trace;
5149 } 5149 }
5150 5150
5151 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); 5151 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5152 if (!ipr_is_naca_model(res)) 5152 if (!ipr_is_naca_model(res))
5153 res->needs_sync_complete = 1; 5153 res->needs_sync_complete = 1;
5154 5154
@@ -9349,7 +9349,10 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9349 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 9349 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9350 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 9350 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9351 9351
9352 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); 9352 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9353 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9354 else
9355 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9353 if (rc) { 9356 if (rc) {
9354 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq); 9357 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9355 return rc; 9358 return rc;
@@ -9371,7 +9374,10 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9371 9374
9372 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 9375 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9373 9376
9374 free_irq(pdev->irq, ioa_cfg); 9377 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9378 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9379 else
9380 free_irq(pdev->irq, ioa_cfg);
9375 9381
9376 LEAVE; 9382 LEAVE;
9377 9383
@@ -9722,6 +9728,7 @@ static void __ipr_remove(struct pci_dev *pdev)
9722 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 9728 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9723 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 9729 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9724 flush_work(&ioa_cfg->work_q); 9730 flush_work(&ioa_cfg->work_q);
9731 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9725 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 9732 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9726 9733
9727 spin_lock(&ipr_driver_lock); 9734 spin_lock(&ipr_driver_lock);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index aec2e0da5016..55cbd0180159 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -235,6 +235,17 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
235 linkrate = phy->linkrate; 235 linkrate = phy->linkrate;
236 memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); 236 memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
237 237
238 /* Handle vacant phy - rest of dr data is not valid so skip it */
239 if (phy->phy_state == PHY_VACANT) {
240 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
241 phy->attached_dev_type = NO_DEVICE;
242 if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
243 phy->phy_id = phy_id;
244 goto skip;
245 } else
246 goto out;
247 }
248
238 phy->attached_dev_type = to_dev_type(dr); 249 phy->attached_dev_type = to_dev_type(dr);
239 if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) 250 if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
240 goto out; 251 goto out;
@@ -272,6 +283,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
272 phy->phy->maximum_linkrate = dr->pmax_linkrate; 283 phy->phy->maximum_linkrate = dr->pmax_linkrate;
273 phy->phy->negotiated_linkrate = phy->linkrate; 284 phy->phy->negotiated_linkrate = phy->linkrate;
274 285
286 skip:
275 if (new_phy) 287 if (new_phy)
276 if (sas_phy_add(phy->phy)) { 288 if (sas_phy_add(phy->phy)) {
277 sas_phy_free(phy->phy); 289 sas_phy_free(phy->phy);
@@ -388,7 +400,7 @@ int sas_ex_phy_discover(struct domain_device *dev, int single)
388 if (!disc_req) 400 if (!disc_req)
389 return -ENOMEM; 401 return -ENOMEM;
390 402
391 disc_resp = alloc_smp_req(DISCOVER_RESP_SIZE); 403 disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
392 if (!disc_resp) { 404 if (!disc_resp) {
393 kfree(disc_req); 405 kfree(disc_req);
394 return -ENOMEM; 406 return -ENOMEM;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 74b67d98e952..d43faf34c1e2 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -438,11 +438,12 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
438 struct lpfc_rqe *temp_hrqe; 438 struct lpfc_rqe *temp_hrqe;
439 struct lpfc_rqe *temp_drqe; 439 struct lpfc_rqe *temp_drqe;
440 struct lpfc_register doorbell; 440 struct lpfc_register doorbell;
441 int put_index = hq->host_index; 441 int put_index;
442 442
443 /* sanity check on queue memory */ 443 /* sanity check on queue memory */
444 if (unlikely(!hq) || unlikely(!dq)) 444 if (unlikely(!hq) || unlikely(!dq))
445 return -ENOMEM; 445 return -ENOMEM;
446 put_index = hq->host_index;
446 temp_hrqe = hq->qe[hq->host_index].rqe; 447 temp_hrqe = hq->qe[hq->host_index].rqe;
447 temp_drqe = dq->qe[dq->host_index].rqe; 448 temp_drqe = dq->qe[dq->host_index].rqe;
448 449
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 1d82eef4e1eb..b3db9dcc2619 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1938,11 +1938,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1938 "Timer for the VP[%d] has stopped\n", vha->vp_idx); 1938 "Timer for the VP[%d] has stopped\n", vha->vp_idx);
1939 } 1939 }
1940 1940
1941 /* No pending activities shall be there on the vha now */
1942 if (ql2xextended_error_logging & ql_dbg_user)
1943 msleep(random32()%10); /* Just to see if something falls on
1944 * the net we have placed below */
1945
1946 BUG_ON(atomic_read(&vha->vref_count)); 1941 BUG_ON(atomic_read(&vha->vref_count));
1947 1942
1948 qla2x00_free_fcports(vha); 1943 qla2x00_free_fcports(vha);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 1626de52e32a..fbc305f1c87c 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -15,6 +15,7 @@
15 * | Mailbox commands | 0x115b | 0x111a-0x111b | 15 * | Mailbox commands | 0x115b | 0x111a-0x111b |
16 * | | | 0x112c-0x112e | 16 * | | | 0x112c-0x112e |
17 * | | | 0x113a | 17 * | | | 0x113a |
18 * | | | 0x1155-0x1158 |
18 * | Device Discovery | 0x2087 | 0x2020-0x2022, | 19 * | Device Discovery | 0x2087 | 0x2020-0x2022, |
19 * | | | 0x2016 | 20 * | | | 0x2016 |
20 * | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b | 21 * | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b |
@@ -401,7 +402,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
401 void *ring; 402 void *ring;
402 } aq, *aqp; 403 } aq, *aqp;
403 404
404 if (!ha->tgt.atio_q_length) 405 if (!ha->tgt.atio_ring)
405 return ptr; 406 return ptr;
406 407
407 num_queues = 1; 408 num_queues = 1;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index c6509911772b..65c5ff75936b 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -863,7 +863,6 @@ typedef struct {
863#define MBX_1 BIT_1 863#define MBX_1 BIT_1
864#define MBX_0 BIT_0 864#define MBX_0 BIT_0
865 865
866#define RNID_TYPE_SET_VERSION 0x9
867#define RNID_TYPE_ASIC_TEMP 0xC 866#define RNID_TYPE_ASIC_TEMP 0xC
868 867
869/* 868/*
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index eb3ca21a7f17..b310fa97b545 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -358,9 +358,6 @@ extern int
358qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *); 358qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
359 359
360extern int 360extern int
361qla2x00_set_driver_version(scsi_qla_host_t *, char *);
362
363extern int
364qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *, 361qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
365 uint16_t, uint16_t, uint16_t, uint16_t); 362 uint16_t, uint16_t, uint16_t, uint16_t);
366 363
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index edf4d14a1335..b59203393cb2 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -619,8 +619,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
619 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) 619 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
620 qla24xx_read_fcp_prio_cfg(vha); 620 qla24xx_read_fcp_prio_cfg(vha);
621 621
622 qla2x00_set_driver_version(vha, QLA2XXX_VERSION);
623
624 return (rval); 622 return (rval);
625} 623}
626 624
@@ -1399,7 +1397,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1399 mq_size += ha->max_rsp_queues * 1397 mq_size += ha->max_rsp_queues *
1400 (rsp->length * sizeof(response_t)); 1398 (rsp->length * sizeof(response_t));
1401 } 1399 }
1402 if (ha->tgt.atio_q_length) 1400 if (ha->tgt.atio_ring)
1403 mq_size += ha->tgt.atio_q_length * sizeof(request_t); 1401 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
1404 /* Allocate memory for Fibre Channel Event Buffer. */ 1402 /* Allocate memory for Fibre Channel Event Buffer. */
1405 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 1403 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 186dd59ce4fa..43345af56431 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3866,64 +3866,6 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3866 return rval; 3866 return rval;
3867} 3867}
3868 3868
3869int
3870qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version)
3871{
3872 int rval;
3873 mbx_cmd_t mc;
3874 mbx_cmd_t *mcp = &mc;
3875 int len;
3876 uint16_t dwlen;
3877 uint8_t *str;
3878 dma_addr_t str_dma;
3879 struct qla_hw_data *ha = vha->hw;
3880
3881 if (!IS_FWI2_CAPABLE(ha) || IS_QLA82XX(ha))
3882 return QLA_FUNCTION_FAILED;
3883
3884 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1155,
3885 "Entered %s.\n", __func__);
3886
3887 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
3888 if (!str) {
3889 ql_log(ql_log_warn, vha, 0x1156,
3890 "Failed to allocate driver version param.\n");
3891 return QLA_MEMORY_ALLOC_FAILED;
3892 }
3893
3894 memcpy(str, "\x7\x3\x11\x0", 4);
3895 dwlen = str[0];
3896 len = dwlen * sizeof(uint32_t) - 4;
3897 memset(str + 4, 0, len);
3898 if (len > strlen(version))
3899 len = strlen(version);
3900 memcpy(str + 4, version, len);
3901
3902 mcp->mb[0] = MBC_SET_RNID_PARAMS;
3903 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
3904 mcp->mb[2] = MSW(LSD(str_dma));
3905 mcp->mb[3] = LSW(LSD(str_dma));
3906 mcp->mb[6] = MSW(MSD(str_dma));
3907 mcp->mb[7] = LSW(MSD(str_dma));
3908 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3909 mcp->in_mb = MBX_0;
3910 mcp->tov = MBX_TOV_SECONDS;
3911 mcp->flags = 0;
3912 rval = qla2x00_mailbox_command(vha, mcp);
3913
3914 if (rval != QLA_SUCCESS) {
3915 ql_dbg(ql_dbg_mbx, vha, 0x1157,
3916 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3917 } else {
3918 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1158,
3919 "Done %s.\n", __func__);
3920 }
3921
3922 dma_pool_free(ha->s_dma_pool, str, str_dma);
3923
3924 return rval;
3925}
3926
3927static int 3869static int
3928qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) 3870qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
3929{ 3871{
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 2b6e478d9e33..ec54036d1e12 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.04.00.08-k" 10#define QLA2XXX_VERSION "8.04.00.13-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 4 13#define QLA_DRIVER_MINOR_VER 4
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 86974471af68..2a32036a9404 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4112,6 +4112,10 @@ static int st_probe(struct device *dev)
4112 tpnt->disk = disk; 4112 tpnt->disk = disk;
4113 disk->private_data = &tpnt->driver; 4113 disk->private_data = &tpnt->driver;
4114 disk->queue = SDp->request_queue; 4114 disk->queue = SDp->request_queue;
4115 /* SCSI tape doesn't register this gendisk via add_disk(). Manually
4116 * take queue reference that release_disk() expects. */
4117 if (!blk_get_queue(disk->queue))
4118 goto out_put_disk;
4115 tpnt->driver = &st_template; 4119 tpnt->driver = &st_template;
4116 4120
4117 tpnt->device = SDp; 4121 tpnt->device = SDp;
@@ -4185,7 +4189,7 @@ static int st_probe(struct device *dev)
4185 idr_preload_end(); 4189 idr_preload_end();
4186 if (error < 0) { 4190 if (error < 0) {
4187 pr_warn("st: idr allocation failed: %d\n", error); 4191 pr_warn("st: idr allocation failed: %d\n", error);
4188 goto out_put_disk; 4192 goto out_put_queue;
4189 } 4193 }
4190 tpnt->index = error; 4194 tpnt->index = error;
4191 sprintf(disk->disk_name, "st%d", tpnt->index); 4195 sprintf(disk->disk_name, "st%d", tpnt->index);
@@ -4211,6 +4215,8 @@ out_remove_devs:
4211 spin_lock(&st_index_lock); 4215 spin_lock(&st_index_lock);
4212 idr_remove(&st_index_idr, tpnt->index); 4216 idr_remove(&st_index_idr, tpnt->index);
4213 spin_unlock(&st_index_lock); 4217 spin_unlock(&st_index_lock);
4218out_put_queue:
4219 blk_put_queue(disk->queue);
4214out_put_disk: 4220out_put_disk:
4215 put_disk(disk); 4221 put_disk(disk);
4216 kfree(tpnt); 4222 kfree(tpnt);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index ff1c5ee352cb..cbe48ab41745 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -409,6 +409,7 @@ static inline int core_alua_state_standby(
409 case REPORT_LUNS: 409 case REPORT_LUNS:
410 case RECEIVE_DIAGNOSTIC: 410 case RECEIVE_DIAGNOSTIC:
411 case SEND_DIAGNOSTIC: 411 case SEND_DIAGNOSTIC:
412 return 0;
412 case MAINTENANCE_IN: 413 case MAINTENANCE_IN:
413 switch (cdb[1] & 0x1f) { 414 switch (cdb[1] & 0x1f) {
414 case MI_REPORT_TARGET_PGS: 415 case MI_REPORT_TARGET_PGS:
@@ -451,6 +452,7 @@ static inline int core_alua_state_unavailable(
451 switch (cdb[0]) { 452 switch (cdb[0]) {
452 case INQUIRY: 453 case INQUIRY:
453 case REPORT_LUNS: 454 case REPORT_LUNS:
455 return 0;
454 case MAINTENANCE_IN: 456 case MAINTENANCE_IN:
455 switch (cdb[1] & 0x1f) { 457 switch (cdb[1] & 0x1f) {
456 case MI_REPORT_TARGET_PGS: 458 case MI_REPORT_TARGET_PGS:
@@ -491,6 +493,7 @@ static inline int core_alua_state_transition(
491 switch (cdb[0]) { 493 switch (cdb[0]) {
492 case INQUIRY: 494 case INQUIRY:
493 case REPORT_LUNS: 495 case REPORT_LUNS:
496 return 0;
494 case MAINTENANCE_IN: 497 case MAINTENANCE_IN:
495 switch (cdb[1] & 0x1f) { 498 switch (cdb[1] & 0x1f) {
496 case MI_REPORT_TARGET_PGS: 499 case MI_REPORT_TARGET_PGS:
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 484b6a3c9b03..302909ccf183 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -2643,9 +2643,9 @@ static int mxser_probe(struct pci_dev *pdev,
2643 mxvar_sdriver, brd->idx + i, &pdev->dev); 2643 mxvar_sdriver, brd->idx + i, &pdev->dev);
2644 if (IS_ERR(tty_dev)) { 2644 if (IS_ERR(tty_dev)) {
2645 retval = PTR_ERR(tty_dev); 2645 retval = PTR_ERR(tty_dev);
2646 for (i--; i >= 0; i--) 2646 for (; i > 0; i--)
2647 tty_unregister_device(mxvar_sdriver, 2647 tty_unregister_device(mxvar_sdriver,
2648 brd->idx + i); 2648 brd->idx + i - 1);
2649 goto err_relbrd; 2649 goto err_relbrd;
2650 } 2650 }
2651 } 2651 }
@@ -2751,9 +2751,9 @@ static int __init mxser_module_init(void)
2751 tty_dev = tty_port_register_device(&brd->ports[i].port, 2751 tty_dev = tty_port_register_device(&brd->ports[i].port,
2752 mxvar_sdriver, brd->idx + i, NULL); 2752 mxvar_sdriver, brd->idx + i, NULL);
2753 if (IS_ERR(tty_dev)) { 2753 if (IS_ERR(tty_dev)) {
2754 for (i--; i >= 0; i--) 2754 for (; i > 0; i--)
2755 tty_unregister_device(mxvar_sdriver, 2755 tty_unregister_device(mxvar_sdriver,
2756 brd->idx + i); 2756 brd->idx + i - 1);
2757 for (i = 0; i < brd->info->nports; i++) 2757 for (i = 0; i < brd->info->nports; i++)
2758 tty_port_destroy(&brd->ports[i].port); 2758 tty_port_destroy(&brd->ports[i].port);
2759 free_irq(brd->irq, brd); 2759 free_irq(brd->irq, brd);
diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c
index b3455a970a1d..35d9ab95c5cb 100644
--- a/drivers/tty/serial/8250/8250_pnp.c
+++ b/drivers/tty/serial/8250/8250_pnp.c
@@ -429,7 +429,6 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
429{ 429{
430 struct uart_8250_port uart; 430 struct uart_8250_port uart;
431 int ret, line, flags = dev_id->driver_data; 431 int ret, line, flags = dev_id->driver_data;
432 struct resource *res = NULL;
433 432
434 if (flags & UNKNOWN_DEV) { 433 if (flags & UNKNOWN_DEV) {
435 ret = serial_pnp_guess_board(dev); 434 ret = serial_pnp_guess_board(dev);
@@ -440,12 +439,11 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
440 memset(&uart, 0, sizeof(uart)); 439 memset(&uart, 0, sizeof(uart));
441 if (pnp_irq_valid(dev, 0)) 440 if (pnp_irq_valid(dev, 0))
442 uart.port.irq = pnp_irq(dev, 0); 441 uart.port.irq = pnp_irq(dev, 0);
443 if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) 442 if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) {
444 res = pnp_get_resource(dev, IORESOURCE_IO, 2); 443 uart.port.iobase = pnp_port_start(dev, 2);
445 else if (pnp_port_valid(dev, 0)) 444 uart.port.iotype = UPIO_PORT;
446 res = pnp_get_resource(dev, IORESOURCE_IO, 0); 445 } else if (pnp_port_valid(dev, 0)) {
447 if (pnp_resource_enabled(res)) { 446 uart.port.iobase = pnp_port_start(dev, 0);
448 uart.port.iobase = res->start;
449 uart.port.iotype = UPIO_PORT; 447 uart.port.iotype = UPIO_PORT;
450 } else if (pnp_mem_valid(dev, 0)) { 448 } else if (pnp_mem_valid(dev, 0)) {
451 uart.port.mapbase = pnp_mem_start(dev, 0); 449 uart.port.mapbase = pnp_mem_start(dev, 0);
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 4dc41408ecb7..30d4f7a783cd 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -886,6 +886,17 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
886 serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR); 886 serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
887 /* FIFO ENABLE, DMA MODE */ 887 /* FIFO ENABLE, DMA MODE */
888 888
889 up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK;
890 /*
891 * NOTE: Setting OMAP_UART_SCR_RX_TRIG_GRANU1_MASK
892 * sets Enables the granularity of 1 for TRIGGER RX
893 * level. Along with setting RX FIFO trigger level
894 * to 1 (as noted below, 16 characters) and TLR[3:0]
895 * to zero this will result RX FIFO threshold level
896 * to 1 character, instead of 16 as noted in comment
897 * below.
898 */
899
889 /* Set receive FIFO threshold to 16 characters and 900 /* Set receive FIFO threshold to 16 characters and
890 * transmit FIFO threshold to 16 spaces 901 * transmit FIFO threshold to 16 spaces
891 */ 902 */
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 8189cb6a86af..7abc5c81af2c 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -346,6 +346,7 @@ static long vfio_pci_ioctl(void *device_data,
346 346
347 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { 347 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
348 size_t size; 348 size_t size;
349 int max = vfio_pci_get_irq_count(vdev, hdr.index);
349 350
350 if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL) 351 if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
351 size = sizeof(uint8_t); 352 size = sizeof(uint8_t);
@@ -355,7 +356,7 @@ static long vfio_pci_ioctl(void *device_data,
355 return -EINVAL; 356 return -EINVAL;
356 357
357 if (hdr.argsz - minsz < hdr.count * size || 358 if (hdr.argsz - minsz < hdr.count * size ||
358 hdr.count > vfio_pci_get_irq_count(vdev, hdr.index)) 359 hdr.start >= max || hdr.start + hdr.count > max)
359 return -EINVAL; 360 return -EINVAL;
360 361
361 data = memdup_user((void __user *)(arg + minsz), 362 data = memdup_user((void __user *)(arg + minsz),
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index 2968b4934659..957a0b98a5d9 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -74,9 +74,8 @@ enum {
74 74
75struct vhost_scsi { 75struct vhost_scsi {
76 /* Protected by vhost_scsi->dev.mutex */ 76 /* Protected by vhost_scsi->dev.mutex */
77 struct tcm_vhost_tpg *vs_tpg[VHOST_SCSI_MAX_TARGET]; 77 struct tcm_vhost_tpg **vs_tpg;
78 char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; 78 char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
79 bool vs_endpoint;
80 79
81 struct vhost_dev dev; 80 struct vhost_dev dev;
82 struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ]; 81 struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ];
@@ -579,9 +578,27 @@ static void tcm_vhost_submission_work(struct work_struct *work)
579 } 578 }
580} 579}
581 580
581static void vhost_scsi_send_bad_target(struct vhost_scsi *vs,
582 struct vhost_virtqueue *vq, int head, unsigned out)
583{
584 struct virtio_scsi_cmd_resp __user *resp;
585 struct virtio_scsi_cmd_resp rsp;
586 int ret;
587
588 memset(&rsp, 0, sizeof(rsp));
589 rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
590 resp = vq->iov[out].iov_base;
591 ret = __copy_to_user(resp, &rsp, sizeof(rsp));
592 if (!ret)
593 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
594 else
595 pr_err("Faulted on virtio_scsi_cmd_resp\n");
596}
597
582static void vhost_scsi_handle_vq(struct vhost_scsi *vs, 598static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
583 struct vhost_virtqueue *vq) 599 struct vhost_virtqueue *vq)
584{ 600{
601 struct tcm_vhost_tpg **vs_tpg;
585 struct virtio_scsi_cmd_req v_req; 602 struct virtio_scsi_cmd_req v_req;
586 struct tcm_vhost_tpg *tv_tpg; 603 struct tcm_vhost_tpg *tv_tpg;
587 struct tcm_vhost_cmd *tv_cmd; 604 struct tcm_vhost_cmd *tv_cmd;
@@ -590,8 +607,16 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
590 int head, ret; 607 int head, ret;
591 u8 target; 608 u8 target;
592 609
593 /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */ 610 /*
594 if (unlikely(!vs->vs_endpoint)) 611 * We can handle the vq only after the endpoint is setup by calling the
612 * VHOST_SCSI_SET_ENDPOINT ioctl.
613 *
614 * TODO: Check that we are running from vhost_worker which acts
615 * as read-side critical section for vhost kind of RCU.
616 * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
617 */
618 vs_tpg = rcu_dereference_check(vq->private_data, 1);
619 if (!vs_tpg)
595 return; 620 return;
596 621
597 mutex_lock(&vq->mutex); 622 mutex_lock(&vq->mutex);
@@ -661,23 +686,11 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
661 686
662 /* Extract the tpgt */ 687 /* Extract the tpgt */
663 target = v_req.lun[1]; 688 target = v_req.lun[1];
664 tv_tpg = vs->vs_tpg[target]; 689 tv_tpg = ACCESS_ONCE(vs_tpg[target]);
665 690
666 /* Target does not exist, fail the request */ 691 /* Target does not exist, fail the request */
667 if (unlikely(!tv_tpg)) { 692 if (unlikely(!tv_tpg)) {
668 struct virtio_scsi_cmd_resp __user *resp; 693 vhost_scsi_send_bad_target(vs, vq, head, out);
669 struct virtio_scsi_cmd_resp rsp;
670
671 memset(&rsp, 0, sizeof(rsp));
672 rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
673 resp = vq->iov[out].iov_base;
674 ret = __copy_to_user(resp, &rsp, sizeof(rsp));
675 if (!ret)
676 vhost_add_used_and_signal(&vs->dev,
677 vq, head, 0);
678 else
679 pr_err("Faulted on virtio_scsi_cmd_resp\n");
680
681 continue; 694 continue;
682 } 695 }
683 696
@@ -690,22 +703,13 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
690 if (IS_ERR(tv_cmd)) { 703 if (IS_ERR(tv_cmd)) {
691 vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n", 704 vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
692 PTR_ERR(tv_cmd)); 705 PTR_ERR(tv_cmd));
693 break; 706 goto err_cmd;
694 } 707 }
695 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" 708 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
696 ": %d\n", tv_cmd, exp_data_len, data_direction); 709 ": %d\n", tv_cmd, exp_data_len, data_direction);
697 710
698 tv_cmd->tvc_vhost = vs; 711 tv_cmd->tvc_vhost = vs;
699 tv_cmd->tvc_vq = vq; 712 tv_cmd->tvc_vq = vq;
700
701 if (unlikely(vq->iov[out].iov_len !=
702 sizeof(struct virtio_scsi_cmd_resp))) {
703 vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
704 " bytes, out: %d, in: %d\n",
705 vq->iov[out].iov_len, out, in);
706 break;
707 }
708
709 tv_cmd->tvc_resp = vq->iov[out].iov_base; 713 tv_cmd->tvc_resp = vq->iov[out].iov_base;
710 714
711 /* 715 /*
@@ -725,7 +729,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
725 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 729 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
726 scsi_command_size(tv_cmd->tvc_cdb), 730 scsi_command_size(tv_cmd->tvc_cdb),
727 TCM_VHOST_MAX_CDB_SIZE); 731 TCM_VHOST_MAX_CDB_SIZE);
728 break; /* TODO */ 732 goto err_free;
729 } 733 }
730 tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; 734 tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
731 735
@@ -738,7 +742,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
738 data_direction == DMA_TO_DEVICE); 742 data_direction == DMA_TO_DEVICE);
739 if (unlikely(ret)) { 743 if (unlikely(ret)) {
740 vq_err(vq, "Failed to map iov to sgl\n"); 744 vq_err(vq, "Failed to map iov to sgl\n");
741 break; /* TODO */ 745 goto err_free;
742 } 746 }
743 } 747 }
744 748
@@ -759,6 +763,13 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
759 } 763 }
760 764
761 mutex_unlock(&vq->mutex); 765 mutex_unlock(&vq->mutex);
766 return;
767
768err_free:
769 vhost_scsi_free_cmd(tv_cmd);
770err_cmd:
771 vhost_scsi_send_bad_target(vs, vq, head, out);
772 mutex_unlock(&vq->mutex);
762} 773}
763 774
764static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) 775static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
@@ -780,6 +791,20 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
780 vhost_scsi_handle_vq(vs, vq); 791 vhost_scsi_handle_vq(vs, vq);
781} 792}
782 793
794static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
795{
796 vhost_poll_flush(&vs->dev.vqs[index].poll);
797}
798
799static void vhost_scsi_flush(struct vhost_scsi *vs)
800{
801 int i;
802
803 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
804 vhost_scsi_flush_vq(vs, i);
805 vhost_work_flush(&vs->dev, &vs->vs_completion_work);
806}
807
783/* 808/*
784 * Called from vhost_scsi_ioctl() context to walk the list of available 809 * Called from vhost_scsi_ioctl() context to walk the list of available
785 * tcm_vhost_tpg with an active struct tcm_vhost_nexus 810 * tcm_vhost_tpg with an active struct tcm_vhost_nexus
@@ -790,8 +815,10 @@ static int vhost_scsi_set_endpoint(
790{ 815{
791 struct tcm_vhost_tport *tv_tport; 816 struct tcm_vhost_tport *tv_tport;
792 struct tcm_vhost_tpg *tv_tpg; 817 struct tcm_vhost_tpg *tv_tpg;
818 struct tcm_vhost_tpg **vs_tpg;
819 struct vhost_virtqueue *vq;
820 int index, ret, i, len;
793 bool match = false; 821 bool match = false;
794 int index, ret;
795 822
796 mutex_lock(&vs->dev.mutex); 823 mutex_lock(&vs->dev.mutex);
797 /* Verify that ring has been setup correctly. */ 824 /* Verify that ring has been setup correctly. */
@@ -803,6 +830,15 @@ static int vhost_scsi_set_endpoint(
803 } 830 }
804 } 831 }
805 832
833 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
834 vs_tpg = kzalloc(len, GFP_KERNEL);
835 if (!vs_tpg) {
836 mutex_unlock(&vs->dev.mutex);
837 return -ENOMEM;
838 }
839 if (vs->vs_tpg)
840 memcpy(vs_tpg, vs->vs_tpg, len);
841
806 mutex_lock(&tcm_vhost_mutex); 842 mutex_lock(&tcm_vhost_mutex);
807 list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) { 843 list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
808 mutex_lock(&tv_tpg->tv_tpg_mutex); 844 mutex_lock(&tv_tpg->tv_tpg_mutex);
@@ -817,14 +853,15 @@ static int vhost_scsi_set_endpoint(
817 tv_tport = tv_tpg->tport; 853 tv_tport = tv_tpg->tport;
818 854
819 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { 855 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
820 if (vs->vs_tpg[tv_tpg->tport_tpgt]) { 856 if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
821 mutex_unlock(&tv_tpg->tv_tpg_mutex); 857 mutex_unlock(&tv_tpg->tv_tpg_mutex);
822 mutex_unlock(&tcm_vhost_mutex); 858 mutex_unlock(&tcm_vhost_mutex);
823 mutex_unlock(&vs->dev.mutex); 859 mutex_unlock(&vs->dev.mutex);
860 kfree(vs_tpg);
824 return -EEXIST; 861 return -EEXIST;
825 } 862 }
826 tv_tpg->tv_tpg_vhost_count++; 863 tv_tpg->tv_tpg_vhost_count++;
827 vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; 864 vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
828 smp_mb__after_atomic_inc(); 865 smp_mb__after_atomic_inc();
829 match = true; 866 match = true;
830 } 867 }
@@ -835,12 +872,27 @@ static int vhost_scsi_set_endpoint(
835 if (match) { 872 if (match) {
836 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, 873 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
837 sizeof(vs->vs_vhost_wwpn)); 874 sizeof(vs->vs_vhost_wwpn));
838 vs->vs_endpoint = true; 875 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
876 vq = &vs->vqs[i];
877 /* Flushing the vhost_work acts as synchronize_rcu */
878 mutex_lock(&vq->mutex);
879 rcu_assign_pointer(vq->private_data, vs_tpg);
880 vhost_init_used(vq);
881 mutex_unlock(&vq->mutex);
882 }
839 ret = 0; 883 ret = 0;
840 } else { 884 } else {
841 ret = -EEXIST; 885 ret = -EEXIST;
842 } 886 }
843 887
888 /*
889 * Act as synchronize_rcu to make sure access to
890 * old vs->vs_tpg is finished.
891 */
892 vhost_scsi_flush(vs);
893 kfree(vs->vs_tpg);
894 vs->vs_tpg = vs_tpg;
895
844 mutex_unlock(&vs->dev.mutex); 896 mutex_unlock(&vs->dev.mutex);
845 return ret; 897 return ret;
846} 898}
@@ -851,6 +903,8 @@ static int vhost_scsi_clear_endpoint(
851{ 903{
852 struct tcm_vhost_tport *tv_tport; 904 struct tcm_vhost_tport *tv_tport;
853 struct tcm_vhost_tpg *tv_tpg; 905 struct tcm_vhost_tpg *tv_tpg;
906 struct vhost_virtqueue *vq;
907 bool match = false;
854 int index, ret, i; 908 int index, ret, i;
855 u8 target; 909 u8 target;
856 910
@@ -862,9 +916,14 @@ static int vhost_scsi_clear_endpoint(
862 goto err_dev; 916 goto err_dev;
863 } 917 }
864 } 918 }
919
920 if (!vs->vs_tpg) {
921 mutex_unlock(&vs->dev.mutex);
922 return 0;
923 }
924
865 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { 925 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
866 target = i; 926 target = i;
867
868 tv_tpg = vs->vs_tpg[target]; 927 tv_tpg = vs->vs_tpg[target];
869 if (!tv_tpg) 928 if (!tv_tpg)
870 continue; 929 continue;
@@ -886,10 +945,27 @@ static int vhost_scsi_clear_endpoint(
886 } 945 }
887 tv_tpg->tv_tpg_vhost_count--; 946 tv_tpg->tv_tpg_vhost_count--;
888 vs->vs_tpg[target] = NULL; 947 vs->vs_tpg[target] = NULL;
889 vs->vs_endpoint = false; 948 match = true;
890 mutex_unlock(&tv_tpg->tv_tpg_mutex); 949 mutex_unlock(&tv_tpg->tv_tpg_mutex);
891 } 950 }
951 if (match) {
952 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
953 vq = &vs->vqs[i];
954 /* Flushing the vhost_work acts as synchronize_rcu */
955 mutex_lock(&vq->mutex);
956 rcu_assign_pointer(vq->private_data, NULL);
957 mutex_unlock(&vq->mutex);
958 }
959 }
960 /*
961 * Act as synchronize_rcu to make sure access to
962 * old vs->vs_tpg is finished.
963 */
964 vhost_scsi_flush(vs);
965 kfree(vs->vs_tpg);
966 vs->vs_tpg = NULL;
892 mutex_unlock(&vs->dev.mutex); 967 mutex_unlock(&vs->dev.mutex);
968
893 return 0; 969 return 0;
894 970
895err_tpg: 971err_tpg:
@@ -899,6 +975,24 @@ err_dev:
899 return ret; 975 return ret;
900} 976}
901 977
978static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
979{
980 if (features & ~VHOST_SCSI_FEATURES)
981 return -EOPNOTSUPP;
982
983 mutex_lock(&vs->dev.mutex);
984 if ((features & (1 << VHOST_F_LOG_ALL)) &&
985 !vhost_log_access_ok(&vs->dev)) {
986 mutex_unlock(&vs->dev.mutex);
987 return -EFAULT;
988 }
989 vs->dev.acked_features = features;
990 smp_wmb();
991 vhost_scsi_flush(vs);
992 mutex_unlock(&vs->dev.mutex);
993 return 0;
994}
995
902static int vhost_scsi_open(struct inode *inode, struct file *f) 996static int vhost_scsi_open(struct inode *inode, struct file *f)
903{ 997{
904 struct vhost_scsi *s; 998 struct vhost_scsi *s;
@@ -939,38 +1033,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
939 return 0; 1033 return 0;
940} 1034}
941 1035
942static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
943{
944 vhost_poll_flush(&vs->dev.vqs[index].poll);
945}
946
947static void vhost_scsi_flush(struct vhost_scsi *vs)
948{
949 int i;
950
951 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
952 vhost_scsi_flush_vq(vs, i);
953 vhost_work_flush(&vs->dev, &vs->vs_completion_work);
954}
955
956static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
957{
958 if (features & ~VHOST_SCSI_FEATURES)
959 return -EOPNOTSUPP;
960
961 mutex_lock(&vs->dev.mutex);
962 if ((features & (1 << VHOST_F_LOG_ALL)) &&
963 !vhost_log_access_ok(&vs->dev)) {
964 mutex_unlock(&vs->dev.mutex);
965 return -EFAULT;
966 }
967 vs->dev.acked_features = features;
968 smp_wmb();
969 vhost_scsi_flush(vs);
970 mutex_unlock(&vs->dev.mutex);
971 return 0;
972}
973
974static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl, 1036static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
975 unsigned long arg) 1037 unsigned long arg)
976{ 1038{
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 9fcc70c11cea..e89fc3133972 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -117,7 +117,7 @@ config ARM_SP805_WATCHDOG
117 117
118config AT91RM9200_WATCHDOG 118config AT91RM9200_WATCHDOG
119 tristate "AT91RM9200 watchdog" 119 tristate "AT91RM9200 watchdog"
120 depends on ARCH_AT91 120 depends on ARCH_AT91RM9200
121 help 121 help
122 Watchdog timer embedded into AT91RM9200 chips. This will reboot your 122 Watchdog timer embedded into AT91RM9200 chips. This will reboot your
123 system when the timeout is reached. 123 system when the timeout is reached.
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index aa85881d17b2..2647ad8e1f19 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -1316,7 +1316,7 @@ static void __xen_evtchn_do_upcall(void)
1316{ 1316{
1317 int start_word_idx, start_bit_idx; 1317 int start_word_idx, start_bit_idx;
1318 int word_idx, bit_idx; 1318 int word_idx, bit_idx;
1319 int i; 1319 int i, irq;
1320 int cpu = get_cpu(); 1320 int cpu = get_cpu();
1321 struct shared_info *s = HYPERVISOR_shared_info; 1321 struct shared_info *s = HYPERVISOR_shared_info;
1322 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); 1322 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
@@ -1324,6 +1324,8 @@ static void __xen_evtchn_do_upcall(void)
1324 1324
1325 do { 1325 do {
1326 xen_ulong_t pending_words; 1326 xen_ulong_t pending_words;
1327 xen_ulong_t pending_bits;
1328 struct irq_desc *desc;
1327 1329
1328 vcpu_info->evtchn_upcall_pending = 0; 1330 vcpu_info->evtchn_upcall_pending = 0;
1329 1331
@@ -1335,6 +1337,17 @@ static void __xen_evtchn_do_upcall(void)
1335 * selector flag. xchg_xen_ulong must contain an 1337 * selector flag. xchg_xen_ulong must contain an
1336 * appropriate barrier. 1338 * appropriate barrier.
1337 */ 1339 */
1340 if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) {
1341 int evtchn = evtchn_from_irq(irq);
1342 word_idx = evtchn / BITS_PER_LONG;
1343 pending_bits = evtchn % BITS_PER_LONG;
1344 if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) {
1345 desc = irq_to_desc(irq);
1346 if (desc)
1347 generic_handle_irq_desc(irq, desc);
1348 }
1349 }
1350
1338 pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0); 1351 pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
1339 1352
1340 start_word_idx = __this_cpu_read(current_word_idx); 1353 start_word_idx = __this_cpu_read(current_word_idx);
@@ -1343,7 +1356,6 @@ static void __xen_evtchn_do_upcall(void)
1343 word_idx = start_word_idx; 1356 word_idx = start_word_idx;
1344 1357
1345 for (i = 0; pending_words != 0; i++) { 1358 for (i = 0; pending_words != 0; i++) {
1346 xen_ulong_t pending_bits;
1347 xen_ulong_t words; 1359 xen_ulong_t words;
1348 1360
1349 words = MASK_LSBS(pending_words, word_idx); 1361 words = MASK_LSBS(pending_words, word_idx);
@@ -1372,8 +1384,7 @@ static void __xen_evtchn_do_upcall(void)
1372 1384
1373 do { 1385 do {
1374 xen_ulong_t bits; 1386 xen_ulong_t bits;
1375 int port, irq; 1387 int port;
1376 struct irq_desc *desc;
1377 1388
1378 bits = MASK_LSBS(pending_bits, bit_idx); 1389 bits = MASK_LSBS(pending_bits, bit_idx);
1379 1390
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 451fad96ecd1..ef96381569a4 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -317,6 +317,7 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
317 unsigned long src_ptr; 317 unsigned long src_ptr;
318 unsigned long dst_ptr; 318 unsigned long dst_ptr;
319 int overwrite_root = 0; 319 int overwrite_root = 0;
320 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
320 321
321 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) 322 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
322 overwrite_root = 1; 323 overwrite_root = 1;
@@ -326,6 +327,9 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
326 327
327 /* look for the key in the destination tree */ 328 /* look for the key in the destination tree */
328 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 329 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
330 if (ret < 0)
331 return ret;
332
329 if (ret == 0) { 333 if (ret == 0) {
330 char *src_copy; 334 char *src_copy;
331 char *dst_copy; 335 char *dst_copy;
@@ -367,6 +371,30 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
367 return 0; 371 return 0;
368 } 372 }
369 373
374 /*
375 * We need to load the old nbytes into the inode so when we
376 * replay the extents we've logged we get the right nbytes.
377 */
378 if (inode_item) {
379 struct btrfs_inode_item *item;
380 u64 nbytes;
381
382 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
383 struct btrfs_inode_item);
384 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
385 item = btrfs_item_ptr(eb, slot,
386 struct btrfs_inode_item);
387 btrfs_set_inode_nbytes(eb, item, nbytes);
388 }
389 } else if (inode_item) {
390 struct btrfs_inode_item *item;
391
392 /*
393 * New inode, set nbytes to 0 so that the nbytes comes out
394 * properly when we replay the extents.
395 */
396 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
397 btrfs_set_inode_nbytes(eb, item, 0);
370 } 398 }
371insert: 399insert:
372 btrfs_release_path(path); 400 btrfs_release_path(path);
@@ -486,7 +514,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
486 int found_type; 514 int found_type;
487 u64 extent_end; 515 u64 extent_end;
488 u64 start = key->offset; 516 u64 start = key->offset;
489 u64 saved_nbytes; 517 u64 nbytes = 0;
490 struct btrfs_file_extent_item *item; 518 struct btrfs_file_extent_item *item;
491 struct inode *inode = NULL; 519 struct inode *inode = NULL;
492 unsigned long size; 520 unsigned long size;
@@ -496,10 +524,19 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
496 found_type = btrfs_file_extent_type(eb, item); 524 found_type = btrfs_file_extent_type(eb, item);
497 525
498 if (found_type == BTRFS_FILE_EXTENT_REG || 526 if (found_type == BTRFS_FILE_EXTENT_REG ||
499 found_type == BTRFS_FILE_EXTENT_PREALLOC) 527 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
500 extent_end = start + btrfs_file_extent_num_bytes(eb, item); 528 nbytes = btrfs_file_extent_num_bytes(eb, item);
501 else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 529 extent_end = start + nbytes;
530
531 /*
532 * We don't add to the inodes nbytes if we are prealloc or a
533 * hole.
534 */
535 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
536 nbytes = 0;
537 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
502 size = btrfs_file_extent_inline_len(eb, item); 538 size = btrfs_file_extent_inline_len(eb, item);
539 nbytes = btrfs_file_extent_ram_bytes(eb, item);
503 extent_end = ALIGN(start + size, root->sectorsize); 540 extent_end = ALIGN(start + size, root->sectorsize);
504 } else { 541 } else {
505 ret = 0; 542 ret = 0;
@@ -548,7 +585,6 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
548 } 585 }
549 btrfs_release_path(path); 586 btrfs_release_path(path);
550 587
551 saved_nbytes = inode_get_bytes(inode);
552 /* drop any overlapping extents */ 588 /* drop any overlapping extents */
553 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1); 589 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
554 BUG_ON(ret); 590 BUG_ON(ret);
@@ -635,7 +671,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
635 BUG_ON(ret); 671 BUG_ON(ret);
636 } 672 }
637 673
638 inode_set_bytes(inode, saved_nbytes); 674 inode_add_bytes(inode, nbytes);
639 ret = btrfs_update_inode(trans, root, inode); 675 ret = btrfs_update_inode(trans, root, inode);
640out: 676out:
641 if (inode) 677 if (inode)
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 991c63c6bdd0..21b3a291c327 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1575,14 +1575,24 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1575 } 1575 }
1576 break; 1576 break;
1577 case Opt_blank_pass: 1577 case Opt_blank_pass:
1578 vol->password = NULL;
1579 break;
1580 case Opt_pass:
1581 /* passwords have to be handled differently 1578 /* passwords have to be handled differently
1582 * to allow the character used for deliminator 1579 * to allow the character used for deliminator
1583 * to be passed within them 1580 * to be passed within them
1584 */ 1581 */
1585 1582
1583 /*
1584 * Check if this is a case where the password
1585 * starts with a delimiter
1586 */
1587 tmp_end = strchr(data, '=');
1588 tmp_end++;
1589 if (!(tmp_end < end && tmp_end[1] == delim)) {
1590 /* No it is not. Set the password to NULL */
1591 vol->password = NULL;
1592 break;
1593 }
1594 /* Yes it is. Drop down to Opt_pass below.*/
1595 case Opt_pass:
1586 /* Obtain the value string */ 1596 /* Obtain the value string */
1587 value = strchr(data, '='); 1597 value = strchr(data, '=');
1588 value++; 1598 value++;
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
index 412e6eda25f8..e4141f257495 100644
--- a/fs/ecryptfs/miscdev.c
+++ b/fs/ecryptfs/miscdev.c
@@ -80,13 +80,6 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
80 int rc; 80 int rc;
81 81
82 mutex_lock(&ecryptfs_daemon_hash_mux); 82 mutex_lock(&ecryptfs_daemon_hash_mux);
83 rc = try_module_get(THIS_MODULE);
84 if (rc == 0) {
85 rc = -EIO;
86 printk(KERN_ERR "%s: Error attempting to increment module use "
87 "count; rc = [%d]\n", __func__, rc);
88 goto out_unlock_daemon_list;
89 }
90 rc = ecryptfs_find_daemon_by_euid(&daemon); 83 rc = ecryptfs_find_daemon_by_euid(&daemon);
91 if (!rc) { 84 if (!rc) {
92 rc = -EINVAL; 85 rc = -EINVAL;
@@ -96,7 +89,7 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
96 if (rc) { 89 if (rc) {
97 printk(KERN_ERR "%s: Error attempting to spawn daemon; " 90 printk(KERN_ERR "%s: Error attempting to spawn daemon; "
98 "rc = [%d]\n", __func__, rc); 91 "rc = [%d]\n", __func__, rc);
99 goto out_module_put_unlock_daemon_list; 92 goto out_unlock_daemon_list;
100 } 93 }
101 mutex_lock(&daemon->mux); 94 mutex_lock(&daemon->mux);
102 if (daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN) { 95 if (daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN) {
@@ -108,9 +101,6 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
108 atomic_inc(&ecryptfs_num_miscdev_opens); 101 atomic_inc(&ecryptfs_num_miscdev_opens);
109out_unlock_daemon: 102out_unlock_daemon:
110 mutex_unlock(&daemon->mux); 103 mutex_unlock(&daemon->mux);
111out_module_put_unlock_daemon_list:
112 if (rc)
113 module_put(THIS_MODULE);
114out_unlock_daemon_list: 104out_unlock_daemon_list:
115 mutex_unlock(&ecryptfs_daemon_hash_mux); 105 mutex_unlock(&ecryptfs_daemon_hash_mux);
116 return rc; 106 return rc;
@@ -147,7 +137,6 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file)
147 "bug.\n", __func__, rc); 137 "bug.\n", __func__, rc);
148 BUG(); 138 BUG();
149 } 139 }
150 module_put(THIS_MODULE);
151 return rc; 140 return rc;
152} 141}
153 142
@@ -471,6 +460,7 @@ out_free:
471 460
472 461
473static const struct file_operations ecryptfs_miscdev_fops = { 462static const struct file_operations ecryptfs_miscdev_fops = {
463 .owner = THIS_MODULE,
474 .open = ecryptfs_miscdev_open, 464 .open = ecryptfs_miscdev_open,
475 .poll = ecryptfs_miscdev_poll, 465 .poll = ecryptfs_miscdev_poll,
476 .read = ecryptfs_miscdev_read, 466 .read = ecryptfs_miscdev_read,
diff --git a/fs/inode.c b/fs/inode.c
index f5f7c06c36fb..a898b3d43ccf 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -725,7 +725,7 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan)
725 * inode to the back of the list so we don't spin on it. 725 * inode to the back of the list so we don't spin on it.
726 */ 726 */
727 if (!spin_trylock(&inode->i_lock)) { 727 if (!spin_trylock(&inode->i_lock)) {
728 list_move_tail(&inode->i_lru, &sb->s_inode_lru); 728 list_move(&inode->i_lru, &sb->s_inode_lru);
729 continue; 729 continue;
730 } 730 }
731 731
diff --git a/fs/namespace.c b/fs/namespace.c
index d581e45c0a9f..341d3f564082 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1690,7 +1690,7 @@ static int do_loopback(struct path *path, const char *old_name,
1690 1690
1691 if (IS_ERR(mnt)) { 1691 if (IS_ERR(mnt)) {
1692 err = PTR_ERR(mnt); 1692 err = PTR_ERR(mnt);
1693 goto out; 1693 goto out2;
1694 } 1694 }
1695 1695
1696 err = graft_tree(mnt, path); 1696 err = graft_tree(mnt, path);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index ac4fc9a8fdbc..66b6664dcd4c 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -300,7 +300,7 @@ int nfs40_walk_client_list(struct nfs_client *new,
300 struct rpc_cred *cred) 300 struct rpc_cred *cred)
301{ 301{
302 struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); 302 struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id);
303 struct nfs_client *pos, *n, *prev = NULL; 303 struct nfs_client *pos, *prev = NULL;
304 struct nfs4_setclientid_res clid = { 304 struct nfs4_setclientid_res clid = {
305 .clientid = new->cl_clientid, 305 .clientid = new->cl_clientid,
306 .confirm = new->cl_confirm, 306 .confirm = new->cl_confirm,
@@ -308,10 +308,23 @@ int nfs40_walk_client_list(struct nfs_client *new,
308 int status = -NFS4ERR_STALE_CLIENTID; 308 int status = -NFS4ERR_STALE_CLIENTID;
309 309
310 spin_lock(&nn->nfs_client_lock); 310 spin_lock(&nn->nfs_client_lock);
311 list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { 311 list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
312 /* If "pos" isn't marked ready, we can't trust the 312 /* If "pos" isn't marked ready, we can't trust the
313 * remaining fields in "pos" */ 313 * remaining fields in "pos" */
314 if (pos->cl_cons_state < NFS_CS_READY) 314 if (pos->cl_cons_state > NFS_CS_READY) {
315 atomic_inc(&pos->cl_count);
316 spin_unlock(&nn->nfs_client_lock);
317
318 if (prev)
319 nfs_put_client(prev);
320 prev = pos;
321
322 status = nfs_wait_client_init_complete(pos);
323 spin_lock(&nn->nfs_client_lock);
324 if (status < 0)
325 continue;
326 }
327 if (pos->cl_cons_state != NFS_CS_READY)
315 continue; 328 continue;
316 329
317 if (pos->rpc_ops != new->rpc_ops) 330 if (pos->rpc_ops != new->rpc_ops)
@@ -423,16 +436,16 @@ int nfs41_walk_client_list(struct nfs_client *new,
423 struct rpc_cred *cred) 436 struct rpc_cred *cred)
424{ 437{
425 struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); 438 struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id);
426 struct nfs_client *pos, *n, *prev = NULL; 439 struct nfs_client *pos, *prev = NULL;
427 int status = -NFS4ERR_STALE_CLIENTID; 440 int status = -NFS4ERR_STALE_CLIENTID;
428 441
429 spin_lock(&nn->nfs_client_lock); 442 spin_lock(&nn->nfs_client_lock);
430 list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { 443 list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
431 /* If "pos" isn't marked ready, we can't trust the 444 /* If "pos" isn't marked ready, we can't trust the
432 * remaining fields in "pos", especially the client 445 * remaining fields in "pos", especially the client
433 * ID and serverowner fields. Wait for CREATE_SESSION 446 * ID and serverowner fields. Wait for CREATE_SESSION
434 * to finish. */ 447 * to finish. */
435 if (pos->cl_cons_state < NFS_CS_READY) { 448 if (pos->cl_cons_state > NFS_CS_READY) {
436 atomic_inc(&pos->cl_count); 449 atomic_inc(&pos->cl_count);
437 spin_unlock(&nn->nfs_client_lock); 450 spin_unlock(&nn->nfs_client_lock);
438 451
@@ -440,18 +453,17 @@ int nfs41_walk_client_list(struct nfs_client *new,
440 nfs_put_client(prev); 453 nfs_put_client(prev);
441 prev = pos; 454 prev = pos;
442 455
443 nfs4_schedule_lease_recovery(pos);
444 status = nfs_wait_client_init_complete(pos); 456 status = nfs_wait_client_init_complete(pos);
445 if (status < 0) { 457 if (status == 0) {
446 nfs_put_client(pos); 458 nfs4_schedule_lease_recovery(pos);
447 spin_lock(&nn->nfs_client_lock); 459 status = nfs4_wait_clnt_recover(pos);
448 continue;
449 } 460 }
450 status = pos->cl_cons_state;
451 spin_lock(&nn->nfs_client_lock); 461 spin_lock(&nn->nfs_client_lock);
452 if (status < 0) 462 if (status < 0)
453 continue; 463 continue;
454 } 464 }
465 if (pos->cl_cons_state != NFS_CS_READY)
466 continue;
455 467
456 if (pos->rpc_ops != new->rpc_ops) 468 if (pos->rpc_ops != new->rpc_ops)
457 continue; 469 continue;
@@ -469,17 +481,18 @@ int nfs41_walk_client_list(struct nfs_client *new,
469 continue; 481 continue;
470 482
471 atomic_inc(&pos->cl_count); 483 atomic_inc(&pos->cl_count);
472 spin_unlock(&nn->nfs_client_lock); 484 *result = pos;
485 status = 0;
473 dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n", 486 dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n",
474 __func__, pos, atomic_read(&pos->cl_count)); 487 __func__, pos, atomic_read(&pos->cl_count));
475 488 break;
476 *result = pos;
477 return 0;
478 } 489 }
479 490
480 /* No matching nfs_client found. */ 491 /* No matching nfs_client found. */
481 spin_unlock(&nn->nfs_client_lock); 492 spin_unlock(&nn->nfs_client_lock);
482 dprintk("NFS: <-- %s status = %d\n", __func__, status); 493 dprintk("NFS: <-- %s status = %d\n", __func__, status);
494 if (prev)
495 nfs_put_client(prev);
483 return status; 496 return status;
484} 497}
485#endif /* CONFIG_NFS_V4_1 */ 498#endif /* CONFIG_NFS_V4_1 */
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 26431cf62ddb..0ad025eb523b 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1046,6 +1046,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1046 /* Save the delegation */ 1046 /* Save the delegation */
1047 nfs4_stateid_copy(&stateid, &delegation->stateid); 1047 nfs4_stateid_copy(&stateid, &delegation->stateid);
1048 rcu_read_unlock(); 1048 rcu_read_unlock();
1049 nfs_release_seqid(opendata->o_arg.seqid);
1049 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 1050 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1050 if (ret != 0) 1051 if (ret != 0)
1051 goto out; 1052 goto out;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 6ace365c6334..d41a3518509f 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1886,7 +1886,13 @@ again:
1886 status = PTR_ERR(clnt); 1886 status = PTR_ERR(clnt);
1887 break; 1887 break;
1888 } 1888 }
1889 clp->cl_rpcclient = clnt; 1889 /* Note: this is safe because we haven't yet marked the
1890 * client as ready, so we are the only user of
1891 * clp->cl_rpcclient
1892 */
1893 clnt = xchg(&clp->cl_rpcclient, clnt);
1894 rpc_shutdown_client(clnt);
1895 clnt = clp->cl_rpcclient;
1890 goto again; 1896 goto again;
1891 1897
1892 case -NFS4ERR_MINOR_VERS_MISMATCH: 1898 case -NFS4ERR_MINOR_VERS_MISMATCH:
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 4b3b3ffb52f1..21e1a8f1659d 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -755,37 +755,8 @@ void pde_put(struct proc_dir_entry *pde)
755 free_proc_entry(pde); 755 free_proc_entry(pde);
756} 756}
757 757
758/* 758static void entry_rundown(struct proc_dir_entry *de)
759 * Remove a /proc entry and free it if it's not currently in use.
760 */
761void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
762{ 759{
763 struct proc_dir_entry **p;
764 struct proc_dir_entry *de = NULL;
765 const char *fn = name;
766 unsigned int len;
767
768 spin_lock(&proc_subdir_lock);
769 if (__xlate_proc_name(name, &parent, &fn) != 0) {
770 spin_unlock(&proc_subdir_lock);
771 return;
772 }
773 len = strlen(fn);
774
775 for (p = &parent->subdir; *p; p=&(*p)->next ) {
776 if (proc_match(len, fn, *p)) {
777 de = *p;
778 *p = de->next;
779 de->next = NULL;
780 break;
781 }
782 }
783 spin_unlock(&proc_subdir_lock);
784 if (!de) {
785 WARN(1, "name '%s'\n", name);
786 return;
787 }
788
789 spin_lock(&de->pde_unload_lock); 760 spin_lock(&de->pde_unload_lock);
790 /* 761 /*
791 * Stop accepting new callers into module. If you're 762 * Stop accepting new callers into module. If you're
@@ -817,6 +788,40 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
817 spin_lock(&de->pde_unload_lock); 788 spin_lock(&de->pde_unload_lock);
818 } 789 }
819 spin_unlock(&de->pde_unload_lock); 790 spin_unlock(&de->pde_unload_lock);
791}
792
793/*
794 * Remove a /proc entry and free it if it's not currently in use.
795 */
796void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
797{
798 struct proc_dir_entry **p;
799 struct proc_dir_entry *de = NULL;
800 const char *fn = name;
801 unsigned int len;
802
803 spin_lock(&proc_subdir_lock);
804 if (__xlate_proc_name(name, &parent, &fn) != 0) {
805 spin_unlock(&proc_subdir_lock);
806 return;
807 }
808 len = strlen(fn);
809
810 for (p = &parent->subdir; *p; p=&(*p)->next ) {
811 if (proc_match(len, fn, *p)) {
812 de = *p;
813 *p = de->next;
814 de->next = NULL;
815 break;
816 }
817 }
818 spin_unlock(&proc_subdir_lock);
819 if (!de) {
820 WARN(1, "name '%s'\n", name);
821 return;
822 }
823
824 entry_rundown(de);
820 825
821 if (S_ISDIR(de->mode)) 826 if (S_ISDIR(de->mode))
822 parent->nlink--; 827 parent->nlink--;
@@ -827,3 +832,57 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
827 pde_put(de); 832 pde_put(de);
828} 833}
829EXPORT_SYMBOL(remove_proc_entry); 834EXPORT_SYMBOL(remove_proc_entry);
835
836int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
837{
838 struct proc_dir_entry **p;
839 struct proc_dir_entry *root = NULL, *de, *next;
840 const char *fn = name;
841 unsigned int len;
842
843 spin_lock(&proc_subdir_lock);
844 if (__xlate_proc_name(name, &parent, &fn) != 0) {
845 spin_unlock(&proc_subdir_lock);
846 return -ENOENT;
847 }
848 len = strlen(fn);
849
850 for (p = &parent->subdir; *p; p=&(*p)->next ) {
851 if (proc_match(len, fn, *p)) {
852 root = *p;
853 *p = root->next;
854 root->next = NULL;
855 break;
856 }
857 }
858 if (!root) {
859 spin_unlock(&proc_subdir_lock);
860 return -ENOENT;
861 }
862 de = root;
863 while (1) {
864 next = de->subdir;
865 if (next) {
866 de->subdir = next->next;
867 next->next = NULL;
868 de = next;
869 continue;
870 }
871 spin_unlock(&proc_subdir_lock);
872
873 entry_rundown(de);
874 next = de->parent;
875 if (S_ISDIR(de->mode))
876 next->nlink--;
877 de->nlink = 0;
878 if (de == root)
879 break;
880 pde_put(de);
881
882 spin_lock(&proc_subdir_lock);
883 de = next;
884 }
885 pde_put(root);
886 return 0;
887}
888EXPORT_SYMBOL(remove_proc_subtree);
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 25f01d0bc149..b1b1fa6ffffe 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -99,7 +99,12 @@ struct mmu_gather {
99 unsigned int need_flush : 1, /* Did free PTEs */ 99 unsigned int need_flush : 1, /* Did free PTEs */
100 fast_mode : 1; /* No batching */ 100 fast_mode : 1; /* No batching */
101 101
102 unsigned int fullmm; 102 /* we are in the middle of an operation to clear
103 * a full mm and can make some optimizations */
104 unsigned int fullmm : 1,
105 /* we have performed an operation which
106 * requires a complete flush of the tlb */
107 need_flush_all : 1;
103 108
104 struct mmu_gather_batch *active; 109 struct mmu_gather_batch *active;
105 struct mmu_gather_batch local; 110 struct mmu_gather_batch local;
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 8f7a3d68371a..ee0bd9524055 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -954,7 +954,7 @@ static inline int atapi_cdb_len(const u16 *dev_id)
954 } 954 }
955} 955}
956 956
957static inline bool atapi_command_packet_set(const u16 *dev_id) 957static inline int atapi_command_packet_set(const u16 *dev_id)
958{ 958{
959 return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f; 959 return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f;
960} 960}
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 98503b792369..d9a4f7f40f32 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -35,6 +35,7 @@ struct cpu_vfs_cap_data {
35#define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t)) 35#define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t))
36 36
37 37
38struct file;
38struct inode; 39struct inode;
39struct dentry; 40struct dentry;
40struct user_namespace; 41struct user_namespace;
@@ -211,6 +212,7 @@ extern bool capable(int cap);
211extern bool ns_capable(struct user_namespace *ns, int cap); 212extern bool ns_capable(struct user_namespace *ns, int cap);
212extern bool nsown_capable(int cap); 213extern bool nsown_capable(int cap);
213extern bool inode_capable(const struct inode *inode, int cap); 214extern bool inode_capable(const struct inode *inode, int cap);
215extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
214 216
215/* audit system wants to get cap info from files as well */ 217/* audit system wants to get cap info from files as well */
216extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); 218extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index e5ca8ef50e9b..52da2a250795 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -89,6 +89,7 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
89 * that the call back has its own recursion protection. If it does 89 * that the call back has its own recursion protection. If it does
90 * not set this, then the ftrace infrastructure will add recursion 90 * not set this, then the ftrace infrastructure will add recursion
91 * protection for the caller. 91 * protection for the caller.
92 * STUB - The ftrace_ops is just a place holder.
92 */ 93 */
93enum { 94enum {
94 FTRACE_OPS_FL_ENABLED = 1 << 0, 95 FTRACE_OPS_FL_ENABLED = 1 << 0,
@@ -98,6 +99,7 @@ enum {
98 FTRACE_OPS_FL_SAVE_REGS = 1 << 4, 99 FTRACE_OPS_FL_SAVE_REGS = 1 << 4,
99 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, 100 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5,
100 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, 101 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6,
102 FTRACE_OPS_FL_STUB = 1 << 7,
101}; 103};
102 104
103struct ftrace_ops { 105struct ftrace_ops {
@@ -394,7 +396,6 @@ ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
394 size_t cnt, loff_t *ppos); 396 size_t cnt, loff_t *ppos);
395ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, 397ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
396 size_t cnt, loff_t *ppos); 398 size_t cnt, loff_t *ppos);
397loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int whence);
398int ftrace_regex_release(struct inode *inode, struct file *file); 399int ftrace_regex_release(struct inode *inode, struct file *file);
399 400
400void __init 401void __init
@@ -567,6 +568,8 @@ static inline int
567ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } 568ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
568#endif /* CONFIG_DYNAMIC_FTRACE */ 569#endif /* CONFIG_DYNAMIC_FTRACE */
569 570
571loff_t ftrace_filter_lseek(struct file *file, loff_t offset, int whence);
572
570/* totally disable ftrace - can not re-enable after this */ 573/* totally disable ftrace - can not re-enable after this */
571void ftrace_kill(void); 574void ftrace_kill(void);
572 575
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 91c9d109e5f1..eae7a053dc51 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -398,6 +398,7 @@ enum {
398 ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */ 398 ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */
399 ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */ 399 ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */
400 ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ 400 ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
401 ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
401 402
402 /* DMA mask for user DMA control: User visible values; DO NOT 403 /* DMA mask for user DMA control: User visible values; DO NOT
403 renumber */ 404 renumber */
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 5a710b9c578e..87a03c746f17 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -93,14 +93,20 @@ do { \
93 93
94#else /* !CONFIG_PREEMPT_COUNT */ 94#else /* !CONFIG_PREEMPT_COUNT */
95 95
96#define preempt_disable() do { } while (0) 96/*
97#define sched_preempt_enable_no_resched() do { } while (0) 97 * Even if we don't have any preemption, we need preempt disable/enable
98#define preempt_enable_no_resched() do { } while (0) 98 * to be barriers, so that we don't have things like get_user/put_user
99#define preempt_enable() do { } while (0) 99 * that can cause faults and scheduling migrate into our preempt-protected
100 100 * region.
101#define preempt_disable_notrace() do { } while (0) 101 */
102#define preempt_enable_no_resched_notrace() do { } while (0) 102#define preempt_disable() barrier()
103#define preempt_enable_notrace() do { } while (0) 103#define sched_preempt_enable_no_resched() barrier()
104#define preempt_enable_no_resched() barrier()
105#define preempt_enable() barrier()
106
107#define preempt_disable_notrace() barrier()
108#define preempt_enable_no_resched_notrace() barrier()
109#define preempt_enable_notrace() barrier()
104 110
105#endif /* CONFIG_PREEMPT_COUNT */ 111#endif /* CONFIG_PREEMPT_COUNT */
106 112
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 8307f2f94d86..94dfb2aa5533 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -117,6 +117,7 @@ struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
117 const struct file_operations *proc_fops, 117 const struct file_operations *proc_fops,
118 void *data); 118 void *data);
119extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent); 119extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent);
120extern int remove_proc_subtree(const char *name, struct proc_dir_entry *parent);
120 121
121struct pid_namespace; 122struct pid_namespace;
122 123
@@ -202,6 +203,7 @@ static inline struct proc_dir_entry *proc_create_data(const char *name,
202 return NULL; 203 return NULL;
203} 204}
204#define remove_proc_entry(name, parent) do {} while (0) 205#define remove_proc_entry(name, parent) do {} while (0)
206#define remove_proc_subtree(name, parent) do {} while (0)
205 207
206static inline struct proc_dir_entry *proc_symlink(const char *name, 208static inline struct proc_dir_entry *proc_symlink(const char *name,
207 struct proc_dir_entry *parent,const char *dest) {return NULL;} 209 struct proc_dir_entry *parent,const char *dest) {return NULL;}
diff --git a/include/linux/security.h b/include/linux/security.h
index eee7478cda70..032c366ef1c6 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1012,6 +1012,10 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
1012 * This hook can be used by the module to update any security state 1012 * This hook can be used by the module to update any security state
1013 * associated with the TUN device's security structure. 1013 * associated with the TUN device's security structure.
1014 * @security pointer to the TUN devices's security structure. 1014 * @security pointer to the TUN devices's security structure.
1015 * @skb_owned_by:
1016 * This hook sets the packet's owning sock.
1017 * @skb is the packet.
1018 * @sk the sock which owns the packet.
1015 * 1019 *
1016 * Security hooks for XFRM operations. 1020 * Security hooks for XFRM operations.
1017 * 1021 *
@@ -1638,6 +1642,7 @@ struct security_operations {
1638 int (*tun_dev_attach_queue) (void *security); 1642 int (*tun_dev_attach_queue) (void *security);
1639 int (*tun_dev_attach) (struct sock *sk, void *security); 1643 int (*tun_dev_attach) (struct sock *sk, void *security);
1640 int (*tun_dev_open) (void *security); 1644 int (*tun_dev_open) (void *security);
1645 void (*skb_owned_by) (struct sk_buff *skb, struct sock *sk);
1641#endif /* CONFIG_SECURITY_NETWORK */ 1646#endif /* CONFIG_SECURITY_NETWORK */
1642 1647
1643#ifdef CONFIG_SECURITY_NETWORK_XFRM 1648#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -2588,6 +2593,8 @@ int security_tun_dev_attach_queue(void *security);
2588int security_tun_dev_attach(struct sock *sk, void *security); 2593int security_tun_dev_attach(struct sock *sk, void *security);
2589int security_tun_dev_open(void *security); 2594int security_tun_dev_open(void *security);
2590 2595
2596void security_skb_owned_by(struct sk_buff *skb, struct sock *sk);
2597
2591#else /* CONFIG_SECURITY_NETWORK */ 2598#else /* CONFIG_SECURITY_NETWORK */
2592static inline int security_unix_stream_connect(struct sock *sock, 2599static inline int security_unix_stream_connect(struct sock *sock,
2593 struct sock *other, 2600 struct sock *other,
@@ -2779,6 +2786,11 @@ static inline int security_tun_dev_open(void *security)
2779{ 2786{
2780 return 0; 2787 return 0;
2781} 2788}
2789
2790static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
2791{
2792}
2793
2782#endif /* CONFIG_SECURITY_NETWORK */ 2794#endif /* CONFIG_SECURITY_NETWORK */
2783 2795
2784#ifdef CONFIG_SECURITY_NETWORK_XFRM 2796#ifdef CONFIG_SECURITY_NETWORK_XFRM
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index a26e2fb604e6..e2369c167dbd 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -16,7 +16,10 @@
16 * In the debug case, 1 means unlocked, 0 means locked. (the values 16 * In the debug case, 1 means unlocked, 0 means locked. (the values
17 * are inverted, to catch initialization bugs) 17 * are inverted, to catch initialization bugs)
18 * 18 *
19 * No atomicity anywhere, we are on UP. 19 * No atomicity anywhere, we are on UP. However, we still need
20 * the compiler barriers, because we do not want the compiler to
21 * move potentially faulting instructions (notably user accesses)
22 * into the locked sequence, resulting in non-atomic execution.
20 */ 23 */
21 24
22#ifdef CONFIG_DEBUG_SPINLOCK 25#ifdef CONFIG_DEBUG_SPINLOCK
@@ -25,6 +28,7 @@
25static inline void arch_spin_lock(arch_spinlock_t *lock) 28static inline void arch_spin_lock(arch_spinlock_t *lock)
26{ 29{
27 lock->slock = 0; 30 lock->slock = 0;
31 barrier();
28} 32}
29 33
30static inline void 34static inline void
@@ -32,6 +36,7 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
32{ 36{
33 local_irq_save(flags); 37 local_irq_save(flags);
34 lock->slock = 0; 38 lock->slock = 0;
39 barrier();
35} 40}
36 41
37static inline int arch_spin_trylock(arch_spinlock_t *lock) 42static inline int arch_spin_trylock(arch_spinlock_t *lock)
@@ -39,32 +44,34 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
39 char oldval = lock->slock; 44 char oldval = lock->slock;
40 45
41 lock->slock = 0; 46 lock->slock = 0;
47 barrier();
42 48
43 return oldval > 0; 49 return oldval > 0;
44} 50}
45 51
46static inline void arch_spin_unlock(arch_spinlock_t *lock) 52static inline void arch_spin_unlock(arch_spinlock_t *lock)
47{ 53{
54 barrier();
48 lock->slock = 1; 55 lock->slock = 1;
49} 56}
50 57
51/* 58/*
52 * Read-write spinlocks. No debug version. 59 * Read-write spinlocks. No debug version.
53 */ 60 */
54#define arch_read_lock(lock) do { (void)(lock); } while (0) 61#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0)
55#define arch_write_lock(lock) do { (void)(lock); } while (0) 62#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0)
56#define arch_read_trylock(lock) ({ (void)(lock); 1; }) 63#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
57#define arch_write_trylock(lock) ({ (void)(lock); 1; }) 64#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; })
58#define arch_read_unlock(lock) do { (void)(lock); } while (0) 65#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0)
59#define arch_write_unlock(lock) do { (void)(lock); } while (0) 66#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)
60 67
61#else /* DEBUG_SPINLOCK */ 68#else /* DEBUG_SPINLOCK */
62#define arch_spin_is_locked(lock) ((void)(lock), 0) 69#define arch_spin_is_locked(lock) ((void)(lock), 0)
63/* for sched.c and kernel_lock.c: */ 70/* for sched.c and kernel_lock.c: */
64# define arch_spin_lock(lock) do { (void)(lock); } while (0) 71# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
65# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) 72# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
66# define arch_spin_unlock(lock) do { (void)(lock); } while (0) 73# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0)
67# define arch_spin_trylock(lock) ({ (void)(lock); 1; }) 74# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; })
68#endif /* DEBUG_SPINLOCK */ 75#endif /* DEBUG_SPINLOCK */
69 76
70#define arch_spin_is_contended(lock) (((void)(lock), 0)) 77#define arch_spin_is_contended(lock) (((void)(lock), 0))
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index cc7c19732389..714cc9a54a4c 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -130,6 +130,14 @@ struct iucv_sock {
130 enum iucv_tx_notify n); 130 enum iucv_tx_notify n);
131}; 131};
132 132
133struct iucv_skb_cb {
134 u32 class; /* target class of message */
135 u32 tag; /* tag associated with message */
136 u32 offset; /* offset for skb receival */
137};
138
139#define IUCV_SKB_CB(__skb) ((struct iucv_skb_cb *)&((__skb)->cb[0]))
140
133/* iucv socket options (SOL_IUCV) */ 141/* iucv socket options (SOL_IUCV) */
134#define SO_IPRMDATA_MSG 0x0080 /* send/recv IPRM_DATA msgs */ 142#define SO_IPRMDATA_MSG 0x0080 /* send/recv IPRM_DATA msgs */
135#define SO_MSGLIMIT 0x1000 /* get/set IUCV MSGLIMIT */ 143#define SO_MSGLIMIT 0x1000 /* get/set IUCV MSGLIMIT */
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index 95620428a59b..b1d1150c1d60 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -16,6 +16,7 @@
16#define __SOUND_DMAENGINE_PCM_H__ 16#define __SOUND_DMAENGINE_PCM_H__
17 17
18#include <sound/pcm.h> 18#include <sound/pcm.h>
19#include <sound/soc.h>
19#include <linux/dmaengine.h> 20#include <linux/dmaengine.h>
20 21
21/** 22/**
@@ -39,9 +40,15 @@ snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream)
39snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream); 40snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream);
40 41
41int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream, 42int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
42 dma_filter_fn filter_fn, void *filter_data); 43 struct dma_chan *chan);
43int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream); 44int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream);
44 45
46int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
47 dma_filter_fn filter_fn, void *filter_data);
48int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream);
49
50struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
51 void *filter_data);
45struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream); 52struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream);
46 53
47/** 54/**
@@ -68,4 +75,60 @@ void snd_dmaengine_pcm_set_config_from_dai_data(
68 const struct snd_dmaengine_dai_dma_data *dma_data, 75 const struct snd_dmaengine_dai_dma_data *dma_data,
69 struct dma_slave_config *config); 76 struct dma_slave_config *config);
70 77
78
79/*
80 * Try to request the DMA channel using compat_request_channel or
81 * compat_filter_fn if it couldn't be requested through devicetree.
82 */
83#define SND_DMAENGINE_PCM_FLAG_COMPAT BIT(0)
84/*
85 * Don't try to request the DMA channels through devicetree. This flag only
86 * makes sense if SND_DMAENGINE_PCM_FLAG_COMPAT is set as well.
87 */
88#define SND_DMAENGINE_PCM_FLAG_NO_DT BIT(1)
89/*
90 * The platforms dmaengine driver does not support reporting the amount of
91 * bytes that are still left to transfer.
92 */
93#define SND_DMAENGINE_PCM_FLAG_NO_RESIDUE BIT(2)
94
95/**
96 * struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM
97 * @prepare_slave_config: Callback used to fill in the DMA slave_config for a
98 * PCM substream. Will be called from the PCM drivers hwparams callback.
99 * @compat_request_channel: Callback to request a DMA channel for platforms
100 * which do not use devicetree.
101 * @compat_filter_fn: Will be used as the filter function when requesting a
102 * channel for platforms which do not use devicetree. The filter parameter
103 * will be the DAI's DMA data.
104 * @pcm_hardware: snd_pcm_hardware struct to be used for the PCM.
105 * @prealloc_buffer_size: Size of the preallocated audio buffer.
106 *
107 * Note: If both compat_request_channel and compat_filter_fn are set
108 * compat_request_channel will be used to request the channel and
109 * compat_filter_fn will be ignored. Otherwise the channel will be requested
110 * using dma_request_channel with compat_filter_fn as the filter function.
111 */
112struct snd_dmaengine_pcm_config {
113 int (*prepare_slave_config)(struct snd_pcm_substream *substream,
114 struct snd_pcm_hw_params *params,
115 struct dma_slave_config *slave_config);
116 struct dma_chan *(*compat_request_channel)(
117 struct snd_soc_pcm_runtime *rtd,
118 struct snd_pcm_substream *substream);
119 dma_filter_fn compat_filter_fn;
120
121 const struct snd_pcm_hardware *pcm_hardware;
122 unsigned int prealloc_buffer_size;
123};
124
125int snd_dmaengine_pcm_register(struct device *dev,
126 const struct snd_dmaengine_pcm_config *config,
127 unsigned int flags);
128void snd_dmaengine_pcm_unregister(struct device *dev);
129
130int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
131 struct snd_pcm_hw_params *params,
132 struct dma_slave_config *slave_config);
133
71#endif 134#endif
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 9eb0e4db4835..85c15226103b 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -375,6 +375,10 @@ int snd_soc_poweroff(struct device *dev);
375int snd_soc_register_platform(struct device *dev, 375int snd_soc_register_platform(struct device *dev,
376 const struct snd_soc_platform_driver *platform_drv); 376 const struct snd_soc_platform_driver *platform_drv);
377void snd_soc_unregister_platform(struct device *dev); 377void snd_soc_unregister_platform(struct device *dev);
378int snd_soc_add_platform(struct device *dev, struct snd_soc_platform *platform,
379 const struct snd_soc_platform_driver *platform_drv);
380void snd_soc_remove_platform(struct snd_soc_platform *platform);
381struct snd_soc_platform *snd_soc_lookup_platform(struct device *dev);
378int snd_soc_register_codec(struct device *dev, 382int snd_soc_register_codec(struct device *dev,
379 const struct snd_soc_codec_driver *codec_drv, 383 const struct snd_soc_codec_driver *codec_drv,
380 struct snd_soc_dai_driver *dai_drv, int num_dai); 384 struct snd_soc_dai_driver *dai_drv, int num_dai);
diff --git a/kernel/capability.c b/kernel/capability.c
index 493d97259484..f6c2ce5701e1 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -393,6 +393,30 @@ bool ns_capable(struct user_namespace *ns, int cap)
393EXPORT_SYMBOL(ns_capable); 393EXPORT_SYMBOL(ns_capable);
394 394
395/** 395/**
396 * file_ns_capable - Determine if the file's opener had a capability in effect
397 * @file: The file we want to check
398 * @ns: The usernamespace we want the capability in
399 * @cap: The capability to be tested for
400 *
401 * Return true if task that opened the file had a capability in effect
402 * when the file was opened.
403 *
404 * This does not set PF_SUPERPRIV because the caller may not
405 * actually be privileged.
406 */
407bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap)
408{
409 if (WARN_ON_ONCE(!cap_valid(cap)))
410 return false;
411
412 if (security_capable(file->f_cred, ns, cap) == 0)
413 return true;
414
415 return false;
416}
417EXPORT_SYMBOL(file_ns_capable);
418
419/**
396 * capable - Determine if the current task has a superior capability in effect 420 * capable - Determine if the current task has a superior capability in effect
397 * @cap: The capability to be tested for 421 * @cap: The capability to be tested for
398 * 422 *
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 59412d037eed..7e0962ed7f8a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4737,7 +4737,8 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
4737 } else { 4737 } else {
4738 if (arch_vma_name(mmap_event->vma)) { 4738 if (arch_vma_name(mmap_event->vma)) {
4739 name = strncpy(tmp, arch_vma_name(mmap_event->vma), 4739 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
4740 sizeof(tmp)); 4740 sizeof(tmp) - 1);
4741 tmp[sizeof(tmp) - 1] = '\0';
4741 goto got_name; 4742 goto got_name;
4742 } 4743 }
4743 4744
@@ -5986,6 +5987,7 @@ skip_type:
5986 if (pmu->pmu_cpu_context) 5987 if (pmu->pmu_cpu_context)
5987 goto got_cpu_context; 5988 goto got_cpu_context;
5988 5989
5990 ret = -ENOMEM;
5989 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); 5991 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
5990 if (!pmu->pmu_cpu_context) 5992 if (!pmu->pmu_cpu_context)
5991 goto free_dev; 5993 goto free_dev;
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index d56a64c99a8b..eb675c4d59df 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -16,7 +16,7 @@ struct ring_buffer {
16 int page_order; /* allocation order */ 16 int page_order; /* allocation order */
17#endif 17#endif
18 int nr_pages; /* nr of data pages */ 18 int nr_pages; /* nr of data pages */
19 int writable; /* are we writable */ 19 int overwrite; /* can overwrite itself */
20 20
21 atomic_t poll; /* POLL_ for wakeups */ 21 atomic_t poll; /* POLL_ for wakeups */
22 22
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 23cb34ff3973..97fddb09762b 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -18,12 +18,24 @@
18static bool perf_output_space(struct ring_buffer *rb, unsigned long tail, 18static bool perf_output_space(struct ring_buffer *rb, unsigned long tail,
19 unsigned long offset, unsigned long head) 19 unsigned long offset, unsigned long head)
20{ 20{
21 unsigned long mask; 21 unsigned long sz = perf_data_size(rb);
22 unsigned long mask = sz - 1;
22 23
23 if (!rb->writable) 24 /*
25 * check if user-writable
26 * overwrite : over-write its own tail
27 * !overwrite: buffer possibly drops events.
28 */
29 if (rb->overwrite)
24 return true; 30 return true;
25 31
26 mask = perf_data_size(rb) - 1; 32 /*
33 * verify that payload is not bigger than buffer
34 * otherwise masking logic may fail to detect
35 * the "not enough space" condition
36 */
37 if ((head - offset) > sz)
38 return false;
27 39
28 offset = (offset - tail) & mask; 40 offset = (offset - tail) & mask;
29 head = (head - tail) & mask; 41 head = (head - tail) & mask;
@@ -212,7 +224,9 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
212 rb->watermark = max_size / 2; 224 rb->watermark = max_size / 2;
213 225
214 if (flags & RING_BUFFER_WRITABLE) 226 if (flags & RING_BUFFER_WRITABLE)
215 rb->writable = 1; 227 rb->overwrite = 0;
228 else
229 rb->overwrite = 1;
216 230
217 atomic_set(&rb->refcount, 1); 231 atomic_set(&rb->refcount, 1);
218 232
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index c685e31492df..c3ae1446461c 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -176,10 +176,36 @@ static u64 sched_clock_remote(struct sched_clock_data *scd)
176 u64 this_clock, remote_clock; 176 u64 this_clock, remote_clock;
177 u64 *ptr, old_val, val; 177 u64 *ptr, old_val, val;
178 178
179#if BITS_PER_LONG != 64
180again:
181 /*
182 * Careful here: The local and the remote clock values need to
183 * be read out atomic as we need to compare the values and
184 * then update either the local or the remote side. So the
185 * cmpxchg64 below only protects one readout.
186 *
187 * We must reread via sched_clock_local() in the retry case on
188 * 32bit as an NMI could use sched_clock_local() via the
189 * tracer and hit between the readout of
190 * the low32bit and the high 32bit portion.
191 */
192 this_clock = sched_clock_local(my_scd);
193 /*
194 * We must enforce atomic readout on 32bit, otherwise the
195 * update on the remote cpu can hit inbetween the readout of
196 * the low32bit and the high 32bit portion.
197 */
198 remote_clock = cmpxchg64(&scd->clock, 0, 0);
199#else
200 /*
201 * On 64bit the read of [my]scd->clock is atomic versus the
202 * update, so we can avoid the above 32bit dance.
203 */
179 sched_clock_local(my_scd); 204 sched_clock_local(my_scd);
180again: 205again:
181 this_clock = my_scd->clock; 206 this_clock = my_scd->clock;
182 remote_clock = scd->clock; 207 remote_clock = scd->clock;
208#endif
183 209
184 /* 210 /*
185 * Use the opportunity that we have both locks 211 * Use the opportunity that we have both locks
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7f12624a393c..67d04651f44b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1498,8 +1498,10 @@ static void try_to_wake_up_local(struct task_struct *p)
1498{ 1498{
1499 struct rq *rq = task_rq(p); 1499 struct rq *rq = task_rq(p);
1500 1500
1501 BUG_ON(rq != this_rq()); 1501 if (WARN_ON_ONCE(rq != this_rq()) ||
1502 BUG_ON(p == current); 1502 WARN_ON_ONCE(p == current))
1503 return;
1504
1503 lockdep_assert_held(&rq->lock); 1505 lockdep_assert_held(&rq->lock);
1504 1506
1505 if (!raw_spin_trylock(&p->pi_lock)) { 1507 if (!raw_spin_trylock(&p->pi_lock)) {
@@ -4999,7 +5001,7 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
4999} 5001}
5000 5002
5001static int min_load_idx = 0; 5003static int min_load_idx = 0;
5002static int max_load_idx = CPU_LOAD_IDX_MAX; 5004static int max_load_idx = CPU_LOAD_IDX_MAX-1;
5003 5005
5004static void 5006static void
5005set_table_entry(struct ctl_table *entry, 5007set_table_entry(struct ctl_table *entry,
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index ed12cbb135f4..e93cca92f38b 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -310,7 +310,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
310 310
311 t = tsk; 311 t = tsk;
312 do { 312 do {
313 task_cputime(tsk, &utime, &stime); 313 task_cputime(t, &utime, &stime);
314 times->utime += utime; 314 times->utime += utime;
315 times->stime += stime; 315 times->stime += stime;
316 times->sum_exec_runtime += task_sched_runtime(t); 316 times->sum_exec_runtime += task_sched_runtime(t);
diff --git a/kernel/sys.c b/kernel/sys.c
index 39c9c4a2949f..0da73cf73e60 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -324,7 +324,6 @@ void kernel_restart_prepare(char *cmd)
324 system_state = SYSTEM_RESTART; 324 system_state = SYSTEM_RESTART;
325 usermodehelper_disable(); 325 usermodehelper_disable();
326 device_shutdown(); 326 device_shutdown();
327 syscore_shutdown();
328} 327}
329 328
330/** 329/**
@@ -370,6 +369,7 @@ void kernel_restart(char *cmd)
370{ 369{
371 kernel_restart_prepare(cmd); 370 kernel_restart_prepare(cmd);
372 disable_nonboot_cpus(); 371 disable_nonboot_cpus();
372 syscore_shutdown();
373 if (!cmd) 373 if (!cmd)
374 printk(KERN_EMERG "Restarting system.\n"); 374 printk(KERN_EMERG "Restarting system.\n");
375 else 375 else
@@ -395,6 +395,7 @@ static void kernel_shutdown_prepare(enum system_states state)
395void kernel_halt(void) 395void kernel_halt(void)
396{ 396{
397 kernel_shutdown_prepare(SYSTEM_HALT); 397 kernel_shutdown_prepare(SYSTEM_HALT);
398 disable_nonboot_cpus();
398 syscore_shutdown(); 399 syscore_shutdown();
399 printk(KERN_EMERG "System halted.\n"); 400 printk(KERN_EMERG "System halted.\n");
400 kmsg_dump(KMSG_DUMP_HALT); 401 kmsg_dump(KMSG_DUMP_HALT);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 6893d5a2bf08..b3fde6d7b7fc 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -66,7 +66,7 @@
66 66
67static struct ftrace_ops ftrace_list_end __read_mostly = { 67static struct ftrace_ops ftrace_list_end __read_mostly = {
68 .func = ftrace_stub, 68 .func = ftrace_stub,
69 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 69 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
70}; 70};
71 71
72/* ftrace_enabled is a method to turn ftrace on or off */ 72/* ftrace_enabled is a method to turn ftrace on or off */
@@ -694,7 +694,6 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
694 free_page(tmp); 694 free_page(tmp);
695 } 695 }
696 696
697 free_page((unsigned long)stat->pages);
698 stat->pages = NULL; 697 stat->pages = NULL;
699 stat->start = NULL; 698 stat->start = NULL;
700 699
@@ -1053,6 +1052,19 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1053 1052
1054static struct pid * const ftrace_swapper_pid = &init_struct_pid; 1053static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1055 1054
1055loff_t
1056ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
1057{
1058 loff_t ret;
1059
1060 if (file->f_mode & FMODE_READ)
1061 ret = seq_lseek(file, offset, whence);
1062 else
1063 file->f_pos = ret = 1;
1064
1065 return ret;
1066}
1067
1056#ifdef CONFIG_DYNAMIC_FTRACE 1068#ifdef CONFIG_DYNAMIC_FTRACE
1057 1069
1058#ifndef CONFIG_FTRACE_MCOUNT_RECORD 1070#ifndef CONFIG_FTRACE_MCOUNT_RECORD
@@ -2613,7 +2625,7 @@ static void ftrace_filter_reset(struct ftrace_hash *hash)
2613 * routine, you can use ftrace_filter_write() for the write 2625 * routine, you can use ftrace_filter_write() for the write
2614 * routine if @flag has FTRACE_ITER_FILTER set, or 2626 * routine if @flag has FTRACE_ITER_FILTER set, or
2615 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. 2627 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
2616 * ftrace_regex_lseek() should be used as the lseek routine, and 2628 * ftrace_filter_lseek() should be used as the lseek routine, and
2617 * release must call ftrace_regex_release(). 2629 * release must call ftrace_regex_release().
2618 */ 2630 */
2619int 2631int
@@ -2697,19 +2709,6 @@ ftrace_notrace_open(struct inode *inode, struct file *file)
2697 inode, file); 2709 inode, file);
2698} 2710}
2699 2711
2700loff_t
2701ftrace_regex_lseek(struct file *file, loff_t offset, int whence)
2702{
2703 loff_t ret;
2704
2705 if (file->f_mode & FMODE_READ)
2706 ret = seq_lseek(file, offset, whence);
2707 else
2708 file->f_pos = ret = 1;
2709
2710 return ret;
2711}
2712
2713static int ftrace_match(char *str, char *regex, int len, int type) 2712static int ftrace_match(char *str, char *regex, int len, int type)
2714{ 2713{
2715 int matched = 0; 2714 int matched = 0;
@@ -3441,14 +3440,14 @@ static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3441 3440
3442static int __init set_ftrace_notrace(char *str) 3441static int __init set_ftrace_notrace(char *str)
3443{ 3442{
3444 strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); 3443 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3445 return 1; 3444 return 1;
3446} 3445}
3447__setup("ftrace_notrace=", set_ftrace_notrace); 3446__setup("ftrace_notrace=", set_ftrace_notrace);
3448 3447
3449static int __init set_ftrace_filter(char *str) 3448static int __init set_ftrace_filter(char *str)
3450{ 3449{
3451 strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); 3450 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3452 return 1; 3451 return 1;
3453} 3452}
3454__setup("ftrace_filter=", set_ftrace_filter); 3453__setup("ftrace_filter=", set_ftrace_filter);
@@ -3571,7 +3570,7 @@ static const struct file_operations ftrace_filter_fops = {
3571 .open = ftrace_filter_open, 3570 .open = ftrace_filter_open,
3572 .read = seq_read, 3571 .read = seq_read,
3573 .write = ftrace_filter_write, 3572 .write = ftrace_filter_write,
3574 .llseek = ftrace_regex_lseek, 3573 .llseek = ftrace_filter_lseek,
3575 .release = ftrace_regex_release, 3574 .release = ftrace_regex_release,
3576}; 3575};
3577 3576
@@ -3579,7 +3578,7 @@ static const struct file_operations ftrace_notrace_fops = {
3579 .open = ftrace_notrace_open, 3578 .open = ftrace_notrace_open,
3580 .read = seq_read, 3579 .read = seq_read,
3581 .write = ftrace_notrace_write, 3580 .write = ftrace_notrace_write,
3582 .llseek = ftrace_regex_lseek, 3581 .llseek = ftrace_filter_lseek,
3583 .release = ftrace_regex_release, 3582 .release = ftrace_regex_release,
3584}; 3583};
3585 3584
@@ -3784,8 +3783,8 @@ static const struct file_operations ftrace_graph_fops = {
3784 .open = ftrace_graph_open, 3783 .open = ftrace_graph_open,
3785 .read = seq_read, 3784 .read = seq_read,
3786 .write = ftrace_graph_write, 3785 .write = ftrace_graph_write,
3786 .llseek = ftrace_filter_lseek,
3787 .release = ftrace_graph_release, 3787 .release = ftrace_graph_release,
3788 .llseek = seq_lseek,
3789}; 3788};
3790#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 3789#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3791 3790
@@ -4131,7 +4130,8 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4131 preempt_disable_notrace(); 4130 preempt_disable_notrace();
4132 trace_recursion_set(TRACE_CONTROL_BIT); 4131 trace_recursion_set(TRACE_CONTROL_BIT);
4133 do_for_each_ftrace_op(op, ftrace_control_list) { 4132 do_for_each_ftrace_op(op, ftrace_control_list) {
4134 if (!ftrace_function_local_disabled(op) && 4133 if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4134 !ftrace_function_local_disabled(op) &&
4135 ftrace_ops_test(op, ip)) 4135 ftrace_ops_test(op, ip))
4136 op->func(ip, parent_ip, op, regs); 4136 op->func(ip, parent_ip, op, regs);
4137 } while_for_each_ftrace_op(op); 4137 } while_for_each_ftrace_op(op);
@@ -4439,7 +4439,7 @@ static const struct file_operations ftrace_pid_fops = {
4439 .open = ftrace_pid_open, 4439 .open = ftrace_pid_open,
4440 .write = ftrace_pid_write, 4440 .write = ftrace_pid_write,
4441 .read = seq_read, 4441 .read = seq_read,
4442 .llseek = seq_lseek, 4442 .llseek = ftrace_filter_lseek,
4443 .release = ftrace_pid_release, 4443 .release = ftrace_pid_release,
4444}; 4444};
4445 4445
@@ -4555,12 +4555,8 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
4555 ftrace_startup_sysctl(); 4555 ftrace_startup_sysctl();
4556 4556
4557 /* we are starting ftrace again */ 4557 /* we are starting ftrace again */
4558 if (ftrace_ops_list != &ftrace_list_end) { 4558 if (ftrace_ops_list != &ftrace_list_end)
4559 if (ftrace_ops_list->next == &ftrace_list_end) 4559 update_ftrace_function();
4560 ftrace_trace_function = ftrace_ops_list->func;
4561 else
4562 ftrace_trace_function = ftrace_ops_list_func;
4563 }
4564 4560
4565 } else { 4561 } else {
4566 /* stopping ftrace calls (just send to ftrace_stub) */ 4562 /* stopping ftrace calls (just send to ftrace_stub) */
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 4f1dade56981..66338c4f7f4b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -132,7 +132,7 @@ static char *default_bootup_tracer;
132 132
133static int __init set_cmdline_ftrace(char *str) 133static int __init set_cmdline_ftrace(char *str)
134{ 134{
135 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); 135 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
136 default_bootup_tracer = bootup_tracer_buf; 136 default_bootup_tracer = bootup_tracer_buf;
137 /* We are using ftrace early, expand it */ 137 /* We are using ftrace early, expand it */
138 ring_buffer_expanded = 1; 138 ring_buffer_expanded = 1;
@@ -162,7 +162,7 @@ static char *trace_boot_options __initdata;
162 162
163static int __init set_trace_boot_options(char *str) 163static int __init set_trace_boot_options(char *str)
164{ 164{
165 strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); 165 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
166 trace_boot_options = trace_boot_options_buf; 166 trace_boot_options = trace_boot_options_buf;
167 return 0; 167 return 0;
168} 168}
@@ -744,8 +744,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
744 return; 744 return;
745 745
746 WARN_ON_ONCE(!irqs_disabled()); 746 WARN_ON_ONCE(!irqs_disabled());
747 if (WARN_ON_ONCE(!current_trace->allocated_snapshot)) 747 if (!current_trace->allocated_snapshot) {
748 /* Only the nop tracer should hit this when disabling */
749 WARN_ON_ONCE(current_trace != &nop_trace);
748 return; 750 return;
751 }
749 752
750 arch_spin_lock(&ftrace_max_lock); 753 arch_spin_lock(&ftrace_max_lock);
751 754
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 42ca822fc701..83a8b5b7bd35 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -322,7 +322,7 @@ static const struct file_operations stack_trace_filter_fops = {
322 .open = stack_trace_filter_open, 322 .open = stack_trace_filter_open,
323 .read = seq_read, 323 .read = seq_read,
324 .write = ftrace_filter_write, 324 .write = ftrace_filter_write,
325 .llseek = ftrace_regex_lseek, 325 .llseek = ftrace_filter_lseek,
326 .release = ftrace_regex_release, 326 .release = ftrace_regex_release,
327}; 327};
328 328
diff --git a/lib/kobject.c b/lib/kobject.c
index e07ee1fcd6f1..a65486613d79 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -529,6 +529,13 @@ struct kobject *kobject_get(struct kobject *kobj)
529 return kobj; 529 return kobj;
530} 530}
531 531
532static struct kobject *kobject_get_unless_zero(struct kobject *kobj)
533{
534 if (!kref_get_unless_zero(&kobj->kref))
535 kobj = NULL;
536 return kobj;
537}
538
532/* 539/*
533 * kobject_cleanup - free kobject resources. 540 * kobject_cleanup - free kobject resources.
534 * @kobj: object to cleanup 541 * @kobj: object to cleanup
@@ -751,7 +758,7 @@ struct kobject *kset_find_obj(struct kset *kset, const char *name)
751 758
752 list_for_each_entry(k, &kset->list, entry) { 759 list_for_each_entry(k, &kset->list, entry) {
753 if (kobject_name(k) && !strcmp(kobject_name(k), name)) { 760 if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
754 ret = kobject_get(k); 761 ret = kobject_get_unless_zero(k);
755 break; 762 break;
756 } 763 }
757 } 764 }
diff --git a/mm/memory.c b/mm/memory.c
index 494526ae024a..13cbc420fead 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -216,6 +216,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
216 tlb->mm = mm; 216 tlb->mm = mm;
217 217
218 tlb->fullmm = fullmm; 218 tlb->fullmm = fullmm;
219 tlb->need_flush_all = 0;
219 tlb->start = -1UL; 220 tlb->start = -1UL;
220 tlb->end = 0; 221 tlb->end = 0;
221 tlb->need_flush = 0; 222 tlb->need_flush = 0;
diff --git a/net/atm/common.c b/net/atm/common.c
index 7b491006eaf4..737bef59ce89 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -531,6 +531,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
531 struct sk_buff *skb; 531 struct sk_buff *skb;
532 int copied, error = -EINVAL; 532 int copied, error = -EINVAL;
533 533
534 msg->msg_namelen = 0;
535
534 if (sock->state != SS_CONNECTED) 536 if (sock->state != SS_CONNECTED)
535 return -ENOTCONN; 537 return -ENOTCONN;
536 538
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 7b11f8bc5071..e277e38f736b 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1642,6 +1642,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
1642 ax25_address src; 1642 ax25_address src;
1643 const unsigned char *mac = skb_mac_header(skb); 1643 const unsigned char *mac = skb_mac_header(skb);
1644 1644
1645 memset(sax, 0, sizeof(struct full_sockaddr_ax25));
1645 ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL, 1646 ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
1646 &digi, NULL, NULL); 1647 &digi, NULL, NULL);
1647 sax->sax25_family = AF_AX25; 1648 sax->sax25_family = AF_AX25;
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index d3ee69b35a78..0d1b08cc76e1 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -230,6 +230,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
230 if (flags & (MSG_OOB)) 230 if (flags & (MSG_OOB))
231 return -EOPNOTSUPP; 231 return -EOPNOTSUPP;
232 232
233 msg->msg_namelen = 0;
234
233 skb = skb_recv_datagram(sk, flags, noblock, &err); 235 skb = skb_recv_datagram(sk, flags, noblock, &err);
234 if (!skb) { 236 if (!skb) {
235 if (sk->sk_shutdown & RCV_SHUTDOWN) 237 if (sk->sk_shutdown & RCV_SHUTDOWN)
@@ -237,8 +239,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
237 return err; 239 return err;
238 } 240 }
239 241
240 msg->msg_namelen = 0;
241
242 copied = skb->len; 242 copied = skb->len;
243 if (len < copied) { 243 if (len < copied) {
244 msg->msg_flags |= MSG_TRUNC; 244 msg->msg_flags |= MSG_TRUNC;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index c23bae86263b..7c9224bcce17 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -608,6 +608,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
608 608
609 if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { 609 if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
610 rfcomm_dlc_accept(d); 610 rfcomm_dlc_accept(d);
611 msg->msg_namelen = 0;
611 return 0; 612 return 0;
612 } 613 }
613 614
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index fad0302bdb32..fb6192c9812e 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -665,6 +665,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
665 test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { 665 test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
666 hci_conn_accept(pi->conn->hcon, 0); 666 hci_conn_accept(pi->conn->hcon, 0);
667 sk->sk_state = BT_CONFIG; 667 sk->sk_state = BT_CONFIG;
668 msg->msg_namelen = 0;
668 669
669 release_sock(sk); 670 release_sock(sk);
670 return 0; 671 return 0;
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 095259f83902..ff2ff3ce6965 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
286 if (m->msg_flags&MSG_OOB) 286 if (m->msg_flags&MSG_OOB)
287 goto read_error; 287 goto read_error;
288 288
289 m->msg_namelen = 0;
290
289 skb = skb_recv_datagram(sk, flags, 0 , &ret); 291 skb = skb_recv_datagram(sk, flags, 0 , &ret);
290 if (!skb) 292 if (!skb)
291 goto read_error; 293 goto read_error;
diff --git a/net/can/gw.c b/net/can/gw.c
index 2d117dc5ebea..117814a7e73c 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -466,7 +466,7 @@ static int cgw_notifier(struct notifier_block *nb,
466 if (gwj->src.dev == dev || gwj->dst.dev == dev) { 466 if (gwj->src.dev == dev || gwj->dst.dev == dev) {
467 hlist_del(&gwj->list); 467 hlist_del(&gwj->list);
468 cgw_unregister_filter(gwj); 468 cgw_unregister_filter(gwj);
469 kfree(gwj); 469 kmem_cache_free(cgw_cache, gwj);
470 } 470 }
471 } 471 }
472 } 472 }
@@ -864,7 +864,7 @@ static void cgw_remove_all_jobs(void)
864 hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { 864 hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) {
865 hlist_del(&gwj->list); 865 hlist_del(&gwj->list);
866 cgw_unregister_filter(gwj); 866 cgw_unregister_filter(gwj);
867 kfree(gwj); 867 kmem_cache_free(cgw_cache, gwj);
868 } 868 }
869} 869}
870 870
@@ -920,7 +920,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
920 920
921 hlist_del(&gwj->list); 921 hlist_del(&gwj->list);
922 cgw_unregister_filter(gwj); 922 cgw_unregister_filter(gwj);
923 kfree(gwj); 923 kmem_cache_free(cgw_cache, gwj);
924 err = 0; 924 err = 0;
925 break; 925 break;
926 } 926 }
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index b65441da74ab..23854b51a259 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1072,7 +1072,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1072 rcu_read_lock(); 1072 rcu_read_lock();
1073 cb->seq = net->dev_base_seq; 1073 cb->seq = net->dev_base_seq;
1074 1074
1075 if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, 1075 if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
1076 ifla_policy) >= 0) { 1076 ifla_policy) >= 0) {
1077 1077
1078 if (tb[IFLA_EXT_MASK]) 1078 if (tb[IFLA_EXT_MASK])
@@ -1922,7 +1922,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
1922 u32 ext_filter_mask = 0; 1922 u32 ext_filter_mask = 0;
1923 u16 min_ifinfo_dump_size = 0; 1923 u16 min_ifinfo_dump_size = 0;
1924 1924
1925 if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, 1925 if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
1926 ifla_policy) >= 0) { 1926 ifla_policy) >= 0) {
1927 if (tb[IFLA_EXT_MASK]) 1927 if (tb[IFLA_EXT_MASK])
1928 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 1928 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 96083b7a436b..c6287cd978c2 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -587,13 +587,16 @@ static void check_lifetime(struct work_struct *work)
587{ 587{
588 unsigned long now, next, next_sec, next_sched; 588 unsigned long now, next, next_sec, next_sched;
589 struct in_ifaddr *ifa; 589 struct in_ifaddr *ifa;
590 struct hlist_node *n;
590 int i; 591 int i;
591 592
592 now = jiffies; 593 now = jiffies;
593 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); 594 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
594 595
595 rcu_read_lock();
596 for (i = 0; i < IN4_ADDR_HSIZE; i++) { 596 for (i = 0; i < IN4_ADDR_HSIZE; i++) {
597 bool change_needed = false;
598
599 rcu_read_lock();
597 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) { 600 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
598 unsigned long age; 601 unsigned long age;
599 602
@@ -606,16 +609,7 @@ static void check_lifetime(struct work_struct *work)
606 609
607 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME && 610 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
608 age >= ifa->ifa_valid_lft) { 611 age >= ifa->ifa_valid_lft) {
609 struct in_ifaddr **ifap ; 612 change_needed = true;
610
611 rtnl_lock();
612 for (ifap = &ifa->ifa_dev->ifa_list;
613 *ifap != NULL; ifap = &ifa->ifa_next) {
614 if (*ifap == ifa)
615 inet_del_ifa(ifa->ifa_dev,
616 ifap, 1);
617 }
618 rtnl_unlock();
619 } else if (ifa->ifa_preferred_lft == 613 } else if (ifa->ifa_preferred_lft ==
620 INFINITY_LIFE_TIME) { 614 INFINITY_LIFE_TIME) {
621 continue; 615 continue;
@@ -625,10 +619,8 @@ static void check_lifetime(struct work_struct *work)
625 next = ifa->ifa_tstamp + 619 next = ifa->ifa_tstamp +
626 ifa->ifa_valid_lft * HZ; 620 ifa->ifa_valid_lft * HZ;
627 621
628 if (!(ifa->ifa_flags & IFA_F_DEPRECATED)) { 622 if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
629 ifa->ifa_flags |= IFA_F_DEPRECATED; 623 change_needed = true;
630 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
631 }
632 } else if (time_before(ifa->ifa_tstamp + 624 } else if (time_before(ifa->ifa_tstamp +
633 ifa->ifa_preferred_lft * HZ, 625 ifa->ifa_preferred_lft * HZ,
634 next)) { 626 next)) {
@@ -636,8 +628,42 @@ static void check_lifetime(struct work_struct *work)
636 ifa->ifa_preferred_lft * HZ; 628 ifa->ifa_preferred_lft * HZ;
637 } 629 }
638 } 630 }
631 rcu_read_unlock();
632 if (!change_needed)
633 continue;
634 rtnl_lock();
635 hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) {
636 unsigned long age;
637
638 if (ifa->ifa_flags & IFA_F_PERMANENT)
639 continue;
640
641 /* We try to batch several events at once. */
642 age = (now - ifa->ifa_tstamp +
643 ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
644
645 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
646 age >= ifa->ifa_valid_lft) {
647 struct in_ifaddr **ifap;
648
649 for (ifap = &ifa->ifa_dev->ifa_list;
650 *ifap != NULL; ifap = &(*ifap)->ifa_next) {
651 if (*ifap == ifa) {
652 inet_del_ifa(ifa->ifa_dev,
653 ifap, 1);
654 break;
655 }
656 }
657 } else if (ifa->ifa_preferred_lft !=
658 INFINITY_LIFE_TIME &&
659 age >= ifa->ifa_preferred_lft &&
660 !(ifa->ifa_flags & IFA_F_DEPRECATED)) {
661 ifa->ifa_flags |= IFA_F_DEPRECATED;
662 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
663 }
664 }
665 rtnl_unlock();
639 } 666 }
640 rcu_read_unlock();
641 667
642 next_sec = round_jiffies_up(next); 668 next_sec = round_jiffies_up(next);
643 next_sched = next; 669 next_sched = next;
@@ -804,6 +830,8 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
804 return -EEXIST; 830 return -EEXIST;
805 ifa = ifa_existing; 831 ifa = ifa_existing;
806 set_ifa_lifetime(ifa, valid_lft, prefered_lft); 832 set_ifa_lifetime(ifa, valid_lft, prefered_lft);
833 cancel_delayed_work(&check_lifetime_work);
834 schedule_delayed_work(&check_lifetime_work, 0);
807 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid); 835 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
808 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa); 836 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
809 } 837 }
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 5d0b4387cba6..b44cf81d8178 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2709,6 +2709,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2709 skb_reserve(skb, MAX_TCP_HEADER); 2709 skb_reserve(skb, MAX_TCP_HEADER);
2710 2710
2711 skb_dst_set(skb, dst); 2711 skb_dst_set(skb, dst);
2712 security_skb_owned_by(skb, sk);
2712 2713
2713 mss = dst_metric_advmss(dst); 2714 mss = dst_metric_advmss(dst);
2714 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) 2715 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index f6d629fd6aee..46a5be85be87 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -386,6 +386,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
386 386
387 if (dst) 387 if (dst)
388 dst->ops->redirect(dst, sk, skb); 388 dst->ops->redirect(dst, sk, skb);
389 goto out;
389 } 390 }
390 391
391 if (type == ICMPV6_PKT_TOOBIG) { 392 if (type == ICMPV6_PKT_TOOBIG) {
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index d28e7f014cc6..e493b3397ae3 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
1386 1386
1387 IRDA_DEBUG(4, "%s()\n", __func__); 1387 IRDA_DEBUG(4, "%s()\n", __func__);
1388 1388
1389 msg->msg_namelen = 0;
1390
1389 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, 1391 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
1390 flags & MSG_DONTWAIT, &err); 1392 flags & MSG_DONTWAIT, &err);
1391 if (!skb) 1393 if (!skb)
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index a7d11ffe4284..206ce6db2c36 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -49,12 +49,6 @@ static const u8 iprm_shutdown[8] =
49 49
50#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class)) 50#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
51 51
52/* macros to set/get socket control buffer at correct offset */
53#define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
54#define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
55#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
56#define CB_TRGCLS_LEN (TRGCLS_SIZE)
57
58#define __iucv_sock_wait(sk, condition, timeo, ret) \ 52#define __iucv_sock_wait(sk, condition, timeo, ret) \
59do { \ 53do { \
60 DEFINE_WAIT(__wait); \ 54 DEFINE_WAIT(__wait); \
@@ -1141,7 +1135,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1141 1135
1142 /* increment and save iucv message tag for msg_completion cbk */ 1136 /* increment and save iucv message tag for msg_completion cbk */
1143 txmsg.tag = iucv->send_tag++; 1137 txmsg.tag = iucv->send_tag++;
1144 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); 1138 IUCV_SKB_CB(skb)->tag = txmsg.tag;
1145 1139
1146 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 1140 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1147 atomic_inc(&iucv->msg_sent); 1141 atomic_inc(&iucv->msg_sent);
@@ -1224,7 +1218,7 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
1224 return -ENOMEM; 1218 return -ENOMEM;
1225 1219
1226 /* copy target class to control buffer of new skb */ 1220 /* copy target class to control buffer of new skb */
1227 memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN); 1221 IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class;
1228 1222
1229 /* copy data fragment */ 1223 /* copy data fragment */
1230 memcpy(nskb->data, skb->data + copied, size); 1224 memcpy(nskb->data, skb->data + copied, size);
@@ -1256,7 +1250,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1256 1250
1257 /* store msg target class in the second 4 bytes of skb ctrl buffer */ 1251 /* store msg target class in the second 4 bytes of skb ctrl buffer */
1258 /* Note: the first 4 bytes are reserved for msg tag */ 1252 /* Note: the first 4 bytes are reserved for msg tag */
1259 memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN); 1253 IUCV_SKB_CB(skb)->class = msg->class;
1260 1254
1261 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ 1255 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1262 if ((msg->flags & IUCV_IPRMDATA) && len > 7) { 1256 if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
@@ -1292,6 +1286,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1292 } 1286 }
1293 } 1287 }
1294 1288
1289 IUCV_SKB_CB(skb)->offset = 0;
1295 if (sock_queue_rcv_skb(sk, skb)) 1290 if (sock_queue_rcv_skb(sk, skb))
1296 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); 1291 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
1297} 1292}
@@ -1327,6 +1322,9 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1327 unsigned int copied, rlen; 1322 unsigned int copied, rlen;
1328 struct sk_buff *skb, *rskb, *cskb; 1323 struct sk_buff *skb, *rskb, *cskb;
1329 int err = 0; 1324 int err = 0;
1325 u32 offset;
1326
1327 msg->msg_namelen = 0;
1330 1328
1331 if ((sk->sk_state == IUCV_DISCONN) && 1329 if ((sk->sk_state == IUCV_DISCONN) &&
1332 skb_queue_empty(&iucv->backlog_skb_q) && 1330 skb_queue_empty(&iucv->backlog_skb_q) &&
@@ -1346,13 +1344,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1346 return err; 1344 return err;
1347 } 1345 }
1348 1346
1349 rlen = skb->len; /* real length of skb */ 1347 offset = IUCV_SKB_CB(skb)->offset;
1348 rlen = skb->len - offset; /* real length of skb */
1350 copied = min_t(unsigned int, rlen, len); 1349 copied = min_t(unsigned int, rlen, len);
1351 if (!rlen) 1350 if (!rlen)
1352 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; 1351 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
1353 1352
1354 cskb = skb; 1353 cskb = skb;
1355 if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) { 1354 if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) {
1356 if (!(flags & MSG_PEEK)) 1355 if (!(flags & MSG_PEEK))
1357 skb_queue_head(&sk->sk_receive_queue, skb); 1356 skb_queue_head(&sk->sk_receive_queue, skb);
1358 return -EFAULT; 1357 return -EFAULT;
@@ -1370,7 +1369,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1370 * get the trgcls from the control buffer of the skb due to 1369 * get the trgcls from the control buffer of the skb due to
1371 * fragmentation of original iucv message. */ 1370 * fragmentation of original iucv message. */
1372 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, 1371 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1373 CB_TRGCLS_LEN, CB_TRGCLS(skb)); 1372 sizeof(IUCV_SKB_CB(skb)->class),
1373 (void *)&IUCV_SKB_CB(skb)->class);
1374 if (err) { 1374 if (err) {
1375 if (!(flags & MSG_PEEK)) 1375 if (!(flags & MSG_PEEK))
1376 skb_queue_head(&sk->sk_receive_queue, skb); 1376 skb_queue_head(&sk->sk_receive_queue, skb);
@@ -1382,9 +1382,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1382 1382
1383 /* SOCK_STREAM: re-queue skb if it contains unreceived data */ 1383 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1384 if (sk->sk_type == SOCK_STREAM) { 1384 if (sk->sk_type == SOCK_STREAM) {
1385 skb_pull(skb, copied); 1385 if (copied < rlen) {
1386 if (skb->len) { 1386 IUCV_SKB_CB(skb)->offset = offset + copied;
1387 skb_queue_head(&sk->sk_receive_queue, skb);
1388 goto done; 1387 goto done;
1389 } 1388 }
1390 } 1389 }
@@ -1403,6 +1402,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1403 spin_lock_bh(&iucv->message_q.lock); 1402 spin_lock_bh(&iucv->message_q.lock);
1404 rskb = skb_dequeue(&iucv->backlog_skb_q); 1403 rskb = skb_dequeue(&iucv->backlog_skb_q);
1405 while (rskb) { 1404 while (rskb) {
1405 IUCV_SKB_CB(rskb)->offset = 0;
1406 if (sock_queue_rcv_skb(sk, rskb)) { 1406 if (sock_queue_rcv_skb(sk, rskb)) {
1407 skb_queue_head(&iucv->backlog_skb_q, 1407 skb_queue_head(&iucv->backlog_skb_q,
1408 rskb); 1408 rskb);
@@ -1830,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
1830 spin_lock_irqsave(&list->lock, flags); 1830 spin_lock_irqsave(&list->lock, flags);
1831 1831
1832 while (list_skb != (struct sk_buff *)list) { 1832 while (list_skb != (struct sk_buff *)list) {
1833 if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) { 1833 if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {
1834 this = list_skb; 1834 this = list_skb;
1835 break; 1835 break;
1836 } 1836 }
@@ -2091,6 +2091,7 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2091 skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); 2091 skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2092 skb_reset_transport_header(skb); 2092 skb_reset_transport_header(skb);
2093 skb_reset_network_header(skb); 2093 skb_reset_network_header(skb);
2094 IUCV_SKB_CB(skb)->offset = 0;
2094 spin_lock(&iucv->message_q.lock); 2095 spin_lock(&iucv->message_q.lock);
2095 if (skb_queue_empty(&iucv->backlog_skb_q)) { 2096 if (skb_queue_empty(&iucv->backlog_skb_q)) {
2096 if (sock_queue_rcv_skb(sk, skb)) { 2097 if (sock_queue_rcv_skb(sk, skb)) {
@@ -2195,8 +2196,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2195 /* fall through and receive zero length data */ 2196 /* fall through and receive zero length data */
2196 case 0: 2197 case 0:
2197 /* plain data frame */ 2198 /* plain data frame */
2198 memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class, 2199 IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
2199 CB_TRGCLS_LEN);
2200 err = afiucv_hs_callback_rx(sk, skb); 2200 err = afiucv_hs_callback_rx(sk, skb);
2201 break; 2201 break;
2202 default: 2202 default:
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index c74f5a91ff6a..b8a6039314e8 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -690,6 +690,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
690 lsa->l2tp_addr = ipv6_hdr(skb)->saddr; 690 lsa->l2tp_addr = ipv6_hdr(skb)->saddr;
691 lsa->l2tp_flowinfo = 0; 691 lsa->l2tp_flowinfo = 0;
692 lsa->l2tp_scope_id = 0; 692 lsa->l2tp_scope_id = 0;
693 lsa->l2tp_conn_id = 0;
693 if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL) 694 if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
694 lsa->l2tp_scope_id = IP6CB(skb)->iif; 695 lsa->l2tp_scope_id = IP6CB(skb)->iif;
695 } 696 }
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 88709882c464..48aaa89253e0 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -720,6 +720,8 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
720 int target; /* Read at least this many bytes */ 720 int target; /* Read at least this many bytes */
721 long timeo; 721 long timeo;
722 722
723 msg->msg_namelen = 0;
724
723 lock_sock(sk); 725 lock_sock(sk);
724 copied = -ENOTCONN; 726 copied = -ENOTCONN;
725 if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN)) 727 if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN))
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index d1fa1d9ffd2e..103bd704b5fc 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1173,6 +1173,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
1173 } 1173 }
1174 1174
1175 if (sax != NULL) { 1175 if (sax != NULL) {
1176 memset(sax, 0, sizeof(*sax));
1176 sax->sax25_family = AF_NETROM; 1177 sax->sax25_family = AF_NETROM;
1177 skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call, 1178 skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
1178 AX25_ADDR_LEN); 1179 AX25_ADDR_LEN);
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index 8f025746f337..6c94447ec414 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -646,6 +646,8 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
646 646
647 pr_debug("%p %zu\n", sk, len); 647 pr_debug("%p %zu\n", sk, len);
648 648
649 msg->msg_namelen = 0;
650
649 lock_sock(sk); 651 lock_sock(sk);
650 652
651 if (sk->sk_state == LLCP_CLOSED && 653 if (sk->sk_state == LLCP_CLOSED &&
@@ -691,6 +693,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
691 693
692 pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap); 694 pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap);
693 695
696 memset(sockaddr, 0, sizeof(*sockaddr));
694 sockaddr->sa_family = AF_NFC; 697 sockaddr->sa_family = AF_NFC;
695 sockaddr->nfc_protocol = NFC_PROTO_NFC_DEP; 698 sockaddr->nfc_protocol = NFC_PROTO_NFC_DEP;
696 sockaddr->dsap = ui_cb->dsap; 699 sockaddr->dsap = ui_cb->dsap;
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index cf68e6e4054a..9c8347451597 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1253 skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 1253 skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1254 1254
1255 if (srose != NULL) { 1255 if (srose != NULL) {
1256 memset(srose, 0, msg->msg_namelen);
1256 srose->srose_family = AF_ROSE; 1257 srose->srose_family = AF_ROSE;
1257 srose->srose_addr = rose->dest_addr; 1258 srose->srose_addr = rose->dest_addr;
1258 srose->srose_call = rose->dest_call; 1259 srose->srose_call = rose->dest_call;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index dcc446e7fbf6..d5f35f15af98 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -304,10 +304,8 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
304 err = rpciod_up(); 304 err = rpciod_up();
305 if (err) 305 if (err)
306 goto out_no_rpciod; 306 goto out_no_rpciod;
307 err = -EINVAL;
308 if (!xprt)
309 goto out_no_xprt;
310 307
308 err = -EINVAL;
311 if (args->version >= program->nrvers) 309 if (args->version >= program->nrvers)
312 goto out_err; 310 goto out_err;
313 version = program->version[args->version]; 311 version = program->version[args->version];
@@ -382,10 +380,9 @@ out_no_principal:
382out_no_stats: 380out_no_stats:
383 kfree(clnt); 381 kfree(clnt);
384out_err: 382out_err:
385 xprt_put(xprt);
386out_no_xprt:
387 rpciod_down(); 383 rpciod_down();
388out_no_rpciod: 384out_no_rpciod:
385 xprt_put(xprt);
389 return ERR_PTR(err); 386 return ERR_PTR(err);
390} 387}
391 388
@@ -512,7 +509,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
512 new = rpc_new_client(args, xprt); 509 new = rpc_new_client(args, xprt);
513 if (IS_ERR(new)) { 510 if (IS_ERR(new)) {
514 err = PTR_ERR(new); 511 err = PTR_ERR(new);
515 goto out_put; 512 goto out_err;
516 } 513 }
517 514
518 atomic_inc(&clnt->cl_count); 515 atomic_inc(&clnt->cl_count);
@@ -525,8 +522,6 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
525 new->cl_chatty = clnt->cl_chatty; 522 new->cl_chatty = clnt->cl_chatty;
526 return new; 523 return new;
527 524
528out_put:
529 xprt_put(xprt);
530out_err: 525out_err:
531 dprintk("RPC: %s: returned error %d\n", __func__, err); 526 dprintk("RPC: %s: returned error %d\n", __func__, err);
532 return ERR_PTR(err); 527 return ERR_PTR(err);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index a9622b6cd916..515ce38e4f4c 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -790,6 +790,7 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
790 if (addr) { 790 if (addr) {
791 addr->family = AF_TIPC; 791 addr->family = AF_TIPC;
792 addr->addrtype = TIPC_ADDR_ID; 792 addr->addrtype = TIPC_ADDR_ID;
793 memset(&addr->addr, 0, sizeof(addr->addr));
793 addr->addr.id.ref = msg_origport(msg); 794 addr->addr.id.ref = msg_origport(msg);
794 addr->addr.id.node = msg_orignode(msg); 795 addr->addr.id.node = msg_orignode(msg);
795 addr->addr.name.domain = 0; /* could leave uninitialized */ 796 addr->addr.name.domain = 0; /* could leave uninitialized */
@@ -904,6 +905,9 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
904 goto exit; 905 goto exit;
905 } 906 }
906 907
908 /* will be updated in set_orig_addr() if needed */
909 m->msg_namelen = 0;
910
907 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 911 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
908restart: 912restart:
909 913
@@ -1013,6 +1017,9 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
1013 goto exit; 1017 goto exit;
1014 } 1018 }
1015 1019
1020 /* will be updated in set_orig_addr() if needed */
1021 m->msg_namelen = 0;
1022
1016 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); 1023 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1017 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1024 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1018 1025
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index d8079daf1bde..7f93e2a42d7a 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1670,6 +1670,8 @@ vsock_stream_recvmsg(struct kiocb *kiocb,
1670 vsk = vsock_sk(sk); 1670 vsk = vsock_sk(sk);
1671 err = 0; 1671 err = 0;
1672 1672
1673 msg->msg_namelen = 0;
1674
1673 lock_sock(sk); 1675 lock_sock(sk);
1674 1676
1675 if (sk->sk_state != SS_CONNECTED) { 1677 if (sk->sk_state != SS_CONNECTED) {
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 1f6508e249ae..5e04d3d96285 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1736,6 +1736,8 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
1736 if (flags & MSG_OOB || flags & MSG_ERRQUEUE) 1736 if (flags & MSG_OOB || flags & MSG_ERRQUEUE)
1737 return -EOPNOTSUPP; 1737 return -EOPNOTSUPP;
1738 1738
1739 msg->msg_namelen = 0;
1740
1739 /* Retrieve the head sk_buff from the socket's receive queue. */ 1741 /* Retrieve the head sk_buff from the socket's receive queue. */
1740 err = 0; 1742 err = 0;
1741 skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); 1743 skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
@@ -1768,7 +1770,6 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
1768 if (err) 1770 if (err)
1769 goto out; 1771 goto out;
1770 1772
1771 msg->msg_namelen = 0;
1772 if (msg->msg_name) { 1773 if (msg->msg_name) {
1773 struct sockaddr_vm *vm_addr; 1774 struct sockaddr_vm *vm_addr;
1774 1775
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 09d994d192ff..482c70e70127 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -224,6 +224,7 @@ void cfg80211_conn_work(struct work_struct *work)
224 rtnl_lock(); 224 rtnl_lock();
225 cfg80211_lock_rdev(rdev); 225 cfg80211_lock_rdev(rdev);
226 mutex_lock(&rdev->devlist_mtx); 226 mutex_lock(&rdev->devlist_mtx);
227 mutex_lock(&rdev->sched_scan_mtx);
227 228
228 list_for_each_entry(wdev, &rdev->wdev_list, list) { 229 list_for_each_entry(wdev, &rdev->wdev_list, list) {
229 wdev_lock(wdev); 230 wdev_lock(wdev);
@@ -248,6 +249,7 @@ void cfg80211_conn_work(struct work_struct *work)
248 wdev_unlock(wdev); 249 wdev_unlock(wdev);
249 } 250 }
250 251
252 mutex_unlock(&rdev->sched_scan_mtx);
251 mutex_unlock(&rdev->devlist_mtx); 253 mutex_unlock(&rdev->devlist_mtx);
252 cfg80211_unlock_rdev(rdev); 254 cfg80211_unlock_rdev(rdev);
253 rtnl_unlock(); 255 rtnl_unlock();
diff --git a/security/capability.c b/security/capability.c
index 579775088967..6783c3e6c88e 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -737,6 +737,11 @@ static int cap_tun_dev_open(void *security)
737{ 737{
738 return 0; 738 return 0;
739} 739}
740
741static void cap_skb_owned_by(struct sk_buff *skb, struct sock *sk)
742{
743}
744
740#endif /* CONFIG_SECURITY_NETWORK */ 745#endif /* CONFIG_SECURITY_NETWORK */
741 746
742#ifdef CONFIG_SECURITY_NETWORK_XFRM 747#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -1071,6 +1076,7 @@ void __init security_fixup_ops(struct security_operations *ops)
1071 set_to_cap_if_null(ops, tun_dev_open); 1076 set_to_cap_if_null(ops, tun_dev_open);
1072 set_to_cap_if_null(ops, tun_dev_attach_queue); 1077 set_to_cap_if_null(ops, tun_dev_attach_queue);
1073 set_to_cap_if_null(ops, tun_dev_attach); 1078 set_to_cap_if_null(ops, tun_dev_attach);
1079 set_to_cap_if_null(ops, skb_owned_by);
1074#endif /* CONFIG_SECURITY_NETWORK */ 1080#endif /* CONFIG_SECURITY_NETWORK */
1075#ifdef CONFIG_SECURITY_NETWORK_XFRM 1081#ifdef CONFIG_SECURITY_NETWORK_XFRM
1076 set_to_cap_if_null(ops, xfrm_policy_alloc_security); 1082 set_to_cap_if_null(ops, xfrm_policy_alloc_security);
diff --git a/security/security.c b/security/security.c
index 7b88c6aeaed4..03f248b84e9f 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1290,6 +1290,11 @@ int security_tun_dev_open(void *security)
1290} 1290}
1291EXPORT_SYMBOL(security_tun_dev_open); 1291EXPORT_SYMBOL(security_tun_dev_open);
1292 1292
1293void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
1294{
1295 security_ops->skb_owned_by(skb, sk);
1296}
1297
1293#endif /* CONFIG_SECURITY_NETWORK */ 1298#endif /* CONFIG_SECURITY_NETWORK */
1294 1299
1295#ifdef CONFIG_SECURITY_NETWORK_XFRM 1300#ifdef CONFIG_SECURITY_NETWORK_XFRM
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 2fa28c88900c..7171a957b933 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -51,6 +51,7 @@
51#include <linux/tty.h> 51#include <linux/tty.h>
52#include <net/icmp.h> 52#include <net/icmp.h>
53#include <net/ip.h> /* for local_port_range[] */ 53#include <net/ip.h> /* for local_port_range[] */
54#include <net/sock.h>
54#include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */ 55#include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */
55#include <net/net_namespace.h> 56#include <net/net_namespace.h>
56#include <net/netlabel.h> 57#include <net/netlabel.h>
@@ -4363,6 +4364,11 @@ static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb)
4363 selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid); 4364 selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid);
4364} 4365}
4365 4366
4367static void selinux_skb_owned_by(struct sk_buff *skb, struct sock *sk)
4368{
4369 skb_set_owner_w(skb, sk);
4370}
4371
4366static int selinux_secmark_relabel_packet(u32 sid) 4372static int selinux_secmark_relabel_packet(u32 sid)
4367{ 4373{
4368 const struct task_security_struct *__tsec; 4374 const struct task_security_struct *__tsec;
@@ -5664,6 +5670,7 @@ static struct security_operations selinux_ops = {
5664 .tun_dev_attach_queue = selinux_tun_dev_attach_queue, 5670 .tun_dev_attach_queue = selinux_tun_dev_attach_queue,
5665 .tun_dev_attach = selinux_tun_dev_attach, 5671 .tun_dev_attach = selinux_tun_dev_attach,
5666 .tun_dev_open = selinux_tun_dev_open, 5672 .tun_dev_open = selinux_tun_dev_open,
5673 .skb_owned_by = selinux_skb_owned_by,
5667 5674
5668#ifdef CONFIG_SECURITY_NETWORK_XFRM 5675#ifdef CONFIG_SECURITY_NETWORK_XFRM
5669 .xfrm_policy_alloc_security = selinux_xfrm_policy_alloc, 5676 .xfrm_policy_alloc_security = selinux_xfrm_policy_alloc,
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 5da8ca7aee05..9e675c76436c 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -29,6 +29,10 @@ config SND_SOC_AC97_BUS
29config SND_SOC_DMAENGINE_PCM 29config SND_SOC_DMAENGINE_PCM
30 bool 30 bool
31 31
32config SND_SOC_GENERIC_DMAENGINE_PCM
33 bool
34 select SND_SOC_DMAENGINE_PCM
35
32# All the supported SoCs 36# All the supported SoCs
33source "sound/soc/atmel/Kconfig" 37source "sound/soc/atmel/Kconfig"
34source "sound/soc/au1x/Kconfig" 38source "sound/soc/au1x/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 99f32f7c0692..197b6ae54c8d 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -5,6 +5,10 @@ ifneq ($(CONFIG_SND_SOC_DMAENGINE_PCM),)
5snd-soc-core-objs += soc-dmaengine-pcm.o 5snd-soc-core-objs += soc-dmaengine-pcm.o
6endif 6endif
7 7
8ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),)
9snd-soc-core-objs += soc-generic-dmaengine-pcm.o
10endif
11
8obj-$(CONFIG_SND_SOC) += snd-soc-core.o 12obj-$(CONFIG_SND_SOC) += snd-soc-core.o
9obj-$(CONFIG_SND_SOC) += codecs/ 13obj-$(CONFIG_SND_SOC) += codecs/
10obj-$(CONFIG_SND_SOC) += generic/ 14obj-$(CONFIG_SND_SOC) += generic/
diff --git a/sound/soc/atmel/atmel-pcm-dma.c b/sound/soc/atmel/atmel-pcm-dma.c
index bb07989762d5..1d38fd0bc4e2 100644
--- a/sound/soc/atmel/atmel-pcm-dma.c
+++ b/sound/soc/atmel/atmel-pcm-dma.c
@@ -155,7 +155,7 @@ static int atmel_pcm_hw_params(struct snd_pcm_substream *substream,
155 if (ssc->pdev) 155 if (ssc->pdev)
156 sdata = ssc->pdev->dev.platform_data; 156 sdata = ssc->pdev->dev.platform_data;
157 157
158 ret = snd_dmaengine_pcm_open(substream, filter, sdata); 158 ret = snd_dmaengine_pcm_open_request_chan(substream, filter, sdata);
159 if (ret) { 159 if (ret) {
160 pr_err("atmel-pcm: dmaengine pcm open failed\n"); 160 pr_err("atmel-pcm: dmaengine pcm open failed\n");
161 return -EINVAL; 161 return -EINVAL;
@@ -171,7 +171,7 @@ static int atmel_pcm_hw_params(struct snd_pcm_substream *substream,
171 171
172 return 0; 172 return 0;
173err: 173err:
174 snd_dmaengine_pcm_close(substream); 174 snd_dmaengine_pcm_close_release_chan(substream);
175 return ret; 175 return ret;
176} 176}
177 177
@@ -197,7 +197,7 @@ static int atmel_pcm_open(struct snd_pcm_substream *substream)
197 197
198static struct snd_pcm_ops atmel_pcm_ops = { 198static struct snd_pcm_ops atmel_pcm_ops = {
199 .open = atmel_pcm_open, 199 .open = atmel_pcm_open,
200 .close = snd_dmaengine_pcm_close, 200 .close = snd_dmaengine_pcm_close_release_chan,
201 .ioctl = snd_pcm_lib_ioctl, 201 .ioctl = snd_pcm_lib_ioctl,
202 .hw_params = atmel_pcm_hw_params, 202 .hw_params = atmel_pcm_hw_params,
203 .prepare = atmel_pcm_dma_prepare, 203 .prepare = atmel_pcm_dma_prepare,
diff --git a/sound/soc/cirrus/ep93xx-pcm.c b/sound/soc/cirrus/ep93xx-pcm.c
index 298946f790eb..488032690378 100644
--- a/sound/soc/cirrus/ep93xx-pcm.c
+++ b/sound/soc/cirrus/ep93xx-pcm.c
@@ -69,7 +69,8 @@ static int ep93xx_pcm_open(struct snd_pcm_substream *substream)
69 69
70 snd_soc_set_runtime_hwparams(substream, &ep93xx_pcm_hardware); 70 snd_soc_set_runtime_hwparams(substream, &ep93xx_pcm_hardware);
71 71
72 return snd_dmaengine_pcm_open(substream, ep93xx_pcm_dma_filter, 72 return snd_dmaengine_pcm_open_request_chan(substream,
73 ep93xx_pcm_dma_filter,
73 snd_soc_dai_get_dma_data(rtd->cpu_dai, substream)); 74 snd_soc_dai_get_dma_data(rtd->cpu_dai, substream));
74} 75}
75 76
@@ -100,7 +101,7 @@ static int ep93xx_pcm_mmap(struct snd_pcm_substream *substream,
100 101
101static struct snd_pcm_ops ep93xx_pcm_ops = { 102static struct snd_pcm_ops ep93xx_pcm_ops = {
102 .open = ep93xx_pcm_open, 103 .open = ep93xx_pcm_open,
103 .close = snd_dmaengine_pcm_close, 104 .close = snd_dmaengine_pcm_close_release_chan,
104 .ioctl = snd_pcm_lib_ioctl, 105 .ioctl = snd_pcm_lib_ioctl,
105 .hw_params = ep93xx_pcm_hw_params, 106 .hw_params = ep93xx_pcm_hw_params,
106 .hw_free = ep93xx_pcm_hw_free, 107 .hw_free = ep93xx_pcm_hw_free,
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index 3a7b7fd14e3e..3eeada57e87d 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -2024,7 +2024,7 @@ static int max98088_probe(struct snd_soc_codec *codec)
2024 ret); 2024 ret);
2025 goto err_access; 2025 goto err_access;
2026 } 2026 }
2027 dev_info(codec->dev, "revision %c\n", ret + 'A'); 2027 dev_info(codec->dev, "revision %c\n", ret - 0x40 + 'A');
2028 2028
2029 snd_soc_write(codec, M98088_REG_51_PWR_SYS, M98088_PWRSV); 2029 snd_soc_write(codec, M98088_REG_51_PWR_SYS, M98088_PWRSV);
2030 2030
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 3b98159d9645..3843a18d4e56 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -118,7 +118,7 @@ config SND_SOC_IMX_PCM_FIQ
118 118
119config SND_SOC_IMX_PCM_DMA 119config SND_SOC_IMX_PCM_DMA
120 bool 120 bool
121 select SND_SOC_DMAENGINE_PCM 121 select SND_SOC_GENERIC_DMAENGINE_PCM
122 select SND_SOC_IMX_PCM 122 select SND_SOC_IMX_PCM
123 123
124config SND_SOC_IMX_AUDMUX 124config SND_SOC_IMX_AUDMUX
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 42366d776f62..0f0bed6def9e 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -425,12 +425,6 @@ static int fsl_ssi_startup(struct snd_pcm_substream *substream,
425 ssi_private->second_stream = substream; 425 ssi_private->second_stream = substream;
426 } 426 }
427 427
428 if (ssi_private->ssi_on_imx)
429 snd_soc_dai_set_dma_data(dai, substream,
430 (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
431 &ssi_private->dma_params_tx :
432 &ssi_private->dma_params_rx);
433
434 return 0; 428 return 0;
435} 429}
436 430
@@ -552,6 +546,18 @@ static void fsl_ssi_shutdown(struct snd_pcm_substream *substream,
552 } 546 }
553} 547}
554 548
549static int fsl_ssi_dai_probe(struct snd_soc_dai *dai)
550{
551 struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(dai);
552
553 if (ssi_private->ssi_on_imx) {
554 dai->playback_dma_data = &ssi_private->dma_params_tx;
555 dai->capture_dma_data = &ssi_private->dma_params_rx;
556 }
557
558 return 0;
559}
560
555static const struct snd_soc_dai_ops fsl_ssi_dai_ops = { 561static const struct snd_soc_dai_ops fsl_ssi_dai_ops = {
556 .startup = fsl_ssi_startup, 562 .startup = fsl_ssi_startup,
557 .hw_params = fsl_ssi_hw_params, 563 .hw_params = fsl_ssi_hw_params,
@@ -561,6 +567,7 @@ static const struct snd_soc_dai_ops fsl_ssi_dai_ops = {
561 567
562/* Template for the CPU dai driver structure */ 568/* Template for the CPU dai driver structure */
563static struct snd_soc_dai_driver fsl_ssi_dai_template = { 569static struct snd_soc_dai_driver fsl_ssi_dai_template = {
570 .probe = fsl_ssi_dai_probe,
564 .playback = { 571 .playback = {
565 /* The SSI does not support monaural audio. */ 572 /* The SSI does not support monaural audio. */
566 .channels_min = 2, 573 .channels_min = 2,
diff --git a/sound/soc/fsl/fsl_ssi.h b/sound/soc/fsl/fsl_ssi.h
index 217300029b5b..e6b9a69e2a68 100644
--- a/sound/soc/fsl/fsl_ssi.h
+++ b/sound/soc/fsl/fsl_ssi.h
@@ -196,5 +196,13 @@ struct ccsr_ssi {
196#define CCSR_SSI_SOR_WAIT(x) (((x) & 3) << CCSR_SSI_SOR_WAIT_SHIFT) 196#define CCSR_SSI_SOR_WAIT(x) (((x) & 3) << CCSR_SSI_SOR_WAIT_SHIFT)
197#define CCSR_SSI_SOR_SYNRST 0x00000001 197#define CCSR_SSI_SOR_SYNRST 0x00000001
198 198
199#define CCSR_SSI_SACNT_FRDIV(x) (((x) & 0x3f) << 5)
200#define CCSR_SSI_SACNT_WR 0x00000010
201#define CCSR_SSI_SACNT_RD 0x00000008
202#define CCSR_SSI_SACNT_RDWR_MASK 0x00000018
203#define CCSR_SSI_SACNT_TIF 0x00000004
204#define CCSR_SSI_SACNT_FV 0x00000002
205#define CCSR_SSI_SACNT_AC97EN 0x00000001
206
199#endif 207#endif
200 208
diff --git a/sound/soc/fsl/imx-pcm-dma.c b/sound/soc/fsl/imx-pcm-dma.c
index ee838c8a3b11..c246fb514930 100644
--- a/sound/soc/fsl/imx-pcm-dma.c
+++ b/sound/soc/fsl/imx-pcm-dma.c
@@ -11,22 +11,12 @@
11 * Free Software Foundation; either version 2 of the License, or (at your 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version. 12 * option) any later version.
13 */ 13 */
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/device.h>
17#include <linux/dma-mapping.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/module.h>
21#include <linux/platform_device.h> 14#include <linux/platform_device.h>
22#include <linux/slab.h>
23#include <linux/dmaengine.h> 15#include <linux/dmaengine.h>
24#include <linux/types.h> 16#include <linux/types.h>
25 17
26#include <sound/core.h> 18#include <sound/core.h>
27#include <sound/initval.h>
28#include <sound/pcm.h> 19#include <sound/pcm.h>
29#include <sound/pcm_params.h>
30#include <sound/soc.h> 20#include <sound/soc.h>
31#include <sound/dmaengine_pcm.h> 21#include <sound/dmaengine_pcm.h>
32 22
@@ -44,32 +34,7 @@ static bool filter(struct dma_chan *chan, void *param)
44 return true; 34 return true;
45} 35}
46 36
47static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream, 37static const struct snd_pcm_hardware imx_pcm_hardware = {
48 struct snd_pcm_hw_params *params)
49{
50 struct snd_soc_pcm_runtime *rtd = substream->private_data;
51 struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
52 struct dma_slave_config slave_config;
53 int ret;
54
55 ret = snd_hwparams_to_dma_slave_config(substream, params, &slave_config);
56 if (ret)
57 return ret;
58
59 snd_dmaengine_pcm_set_config_from_dai_data(substream,
60 snd_soc_dai_get_dma_data(rtd->cpu_dai, substream),
61 &slave_config);
62
63 ret = dmaengine_slave_config(chan, &slave_config);
64 if (ret)
65 return ret;
66
67 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
68
69 return 0;
70}
71
72static struct snd_pcm_hardware snd_imx_hardware = {
73 .info = SNDRV_PCM_INFO_INTERLEAVED | 38 .info = SNDRV_PCM_INFO_INTERLEAVED |
74 SNDRV_PCM_INFO_BLOCK_TRANSFER | 39 SNDRV_PCM_INFO_BLOCK_TRANSFER |
75 SNDRV_PCM_INFO_MMAP | 40 SNDRV_PCM_INFO_MMAP |
@@ -88,33 +53,22 @@ static struct snd_pcm_hardware snd_imx_hardware = {
88 .fifo_size = 0, 53 .fifo_size = 0,
89}; 54};
90 55
91static int snd_imx_open(struct snd_pcm_substream *substream) 56static const struct snd_dmaengine_pcm_config imx_dmaengine_pcm_config = {
92{ 57 .pcm_hardware = &imx_pcm_hardware,
93 struct snd_soc_pcm_runtime *rtd = substream->private_data; 58 .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
94 59 .compat_filter_fn = filter,
95 snd_soc_set_runtime_hwparams(substream, &snd_imx_hardware); 60 .prealloc_buffer_size = IMX_SSI_DMABUF_SIZE,
96
97 return snd_dmaengine_pcm_open(substream, filter,
98 snd_soc_dai_get_dma_data(rtd->cpu_dai, substream));
99}
100
101static struct snd_pcm_ops imx_pcm_ops = {
102 .open = snd_imx_open,
103 .close = snd_dmaengine_pcm_close,
104 .ioctl = snd_pcm_lib_ioctl,
105 .hw_params = snd_imx_pcm_hw_params,
106 .trigger = snd_dmaengine_pcm_trigger,
107 .pointer = snd_dmaengine_pcm_pointer_no_residue,
108 .mmap = snd_imx_pcm_mmap,
109};
110
111static struct snd_soc_platform_driver imx_soc_platform_mx2 = {
112 .ops = &imx_pcm_ops,
113 .pcm_new = imx_pcm_new,
114 .pcm_free = imx_pcm_free,
115}; 61};
116 62
117int imx_pcm_dma_init(struct platform_device *pdev) 63int imx_pcm_dma_init(struct platform_device *pdev)
118{ 64{
119 return snd_soc_register_platform(&pdev->dev, &imx_soc_platform_mx2); 65 return snd_dmaengine_pcm_register(&pdev->dev, &imx_dmaengine_pcm_config,
66 SND_DMAENGINE_PCM_FLAG_NO_RESIDUE |
67 SND_DMAENGINE_PCM_FLAG_NO_DT |
68 SND_DMAENGINE_PCM_FLAG_COMPAT);
69}
70
71void imx_pcm_dma_exit(struct platform_device *pdev)
72{
73 snd_dmaengine_pcm_unregister(&pdev->dev);
120} 74}
diff --git a/sound/soc/fsl/imx-pcm.c b/sound/soc/fsl/imx-pcm.c
index 0d0625bfcb65..c49896442d8e 100644
--- a/sound/soc/fsl/imx-pcm.c
+++ b/sound/soc/fsl/imx-pcm.c
@@ -114,7 +114,11 @@ static int imx_pcm_probe(struct platform_device *pdev)
114 114
115static int imx_pcm_remove(struct platform_device *pdev) 115static int imx_pcm_remove(struct platform_device *pdev)
116{ 116{
117 snd_soc_unregister_platform(&pdev->dev); 117 if (strcmp(pdev->id_entry->name, "imx-fiq-pcm-audio") == 0)
118 snd_soc_unregister_platform(&pdev->dev);
119 else
120 imx_pcm_dma_exit(pdev);
121
118 return 0; 122 return 0;
119} 123}
120 124
diff --git a/sound/soc/fsl/imx-pcm.h b/sound/soc/fsl/imx-pcm.h
index be9cc64a208b..b7fa0d75c687 100644
--- a/sound/soc/fsl/imx-pcm.h
+++ b/sound/soc/fsl/imx-pcm.h
@@ -39,11 +39,16 @@ void imx_pcm_free(struct snd_pcm *pcm);
39 39
40#ifdef CONFIG_SND_SOC_IMX_PCM_DMA 40#ifdef CONFIG_SND_SOC_IMX_PCM_DMA
41int imx_pcm_dma_init(struct platform_device *pdev); 41int imx_pcm_dma_init(struct platform_device *pdev);
42void imx_pcm_dma_exit(struct platform_device *pdev);
42#else 43#else
43static inline int imx_pcm_dma_init(struct platform_device *pdev) 44static inline int imx_pcm_dma_init(struct platform_device *pdev)
44{ 45{
45 return -ENODEV; 46 return -ENODEV;
46} 47}
48
49static inline void imx_pcm_dma_exit(struct platform_device *pdev)
50{
51}
47#endif 52#endif
48 53
49#ifdef CONFIG_SND_SOC_IMX_PCM_FIQ 54#ifdef CONFIG_SND_SOC_IMX_PCM_FIQ
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c
index 4ce2d608b37a..902fab02b851 100644
--- a/sound/soc/fsl/imx-ssi.c
+++ b/sound/soc/fsl/imx-ssi.c
@@ -232,23 +232,6 @@ static int imx_ssi_set_dai_clkdiv(struct snd_soc_dai *cpu_dai,
232 return 0; 232 return 0;
233} 233}
234 234
235static int imx_ssi_startup(struct snd_pcm_substream *substream,
236 struct snd_soc_dai *cpu_dai)
237{
238 struct imx_ssi *ssi = snd_soc_dai_get_drvdata(cpu_dai);
239 struct snd_dmaengine_dai_dma_data *dma_data;
240
241 /* Tx/Rx config */
242 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
243 dma_data = &ssi->dma_params_tx;
244 else
245 dma_data = &ssi->dma_params_rx;
246
247 snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data);
248
249 return 0;
250}
251
252/* 235/*
253 * Should only be called when port is inactive (i.e. SSIEN = 0), 236 * Should only be called when port is inactive (i.e. SSIEN = 0),
254 * although can be called multiple times by upper layers. 237 * although can be called multiple times by upper layers.
@@ -353,7 +336,6 @@ static int imx_ssi_trigger(struct snd_pcm_substream *substream, int cmd,
353} 336}
354 337
355static const struct snd_soc_dai_ops imx_ssi_pcm_dai_ops = { 338static const struct snd_soc_dai_ops imx_ssi_pcm_dai_ops = {
356 .startup = imx_ssi_startup,
357 .hw_params = imx_ssi_hw_params, 339 .hw_params = imx_ssi_hw_params,
358 .set_fmt = imx_ssi_set_dai_fmt, 340 .set_fmt = imx_ssi_set_dai_fmt,
359 .set_clkdiv = imx_ssi_set_dai_clkdiv, 341 .set_clkdiv = imx_ssi_set_dai_clkdiv,
@@ -373,6 +355,10 @@ static int imx_ssi_dai_probe(struct snd_soc_dai *dai)
373 SSI_SFCSR_RFWM0(ssi->dma_params_rx.maxburst); 355 SSI_SFCSR_RFWM0(ssi->dma_params_rx.maxburst);
374 writel(val, ssi->base + SSI_SFCSR); 356 writel(val, ssi->base + SSI_SFCSR);
375 357
358 /* Tx/Rx config */
359 dai->playback_dma_data = &ssi->dma_params_tx;
360 dai->capture_dma_data = &ssi->dma_params_rx;
361
376 return 0; 362 return 0;
377} 363}
378 364
diff --git a/sound/soc/mxs/mxs-pcm.c b/sound/soc/mxs/mxs-pcm.c
index ebbef8597554..7bceb16d0fd9 100644
--- a/sound/soc/mxs/mxs-pcm.c
+++ b/sound/soc/mxs/mxs-pcm.c
@@ -87,7 +87,7 @@ static int snd_mxs_open(struct snd_pcm_substream *substream)
87 87
88 snd_soc_set_runtime_hwparams(substream, &snd_mxs_hardware); 88 snd_soc_set_runtime_hwparams(substream, &snd_mxs_hardware);
89 89
90 return snd_dmaengine_pcm_open(substream, filter, 90 return snd_dmaengine_pcm_open_request_chan(substream, filter,
91 snd_soc_dai_get_dma_data(rtd->cpu_dai, substream)); 91 snd_soc_dai_get_dma_data(rtd->cpu_dai, substream));
92} 92}
93 93
@@ -104,7 +104,7 @@ static int snd_mxs_pcm_mmap(struct snd_pcm_substream *substream,
104 104
105static struct snd_pcm_ops mxs_pcm_ops = { 105static struct snd_pcm_ops mxs_pcm_ops = {
106 .open = snd_mxs_open, 106 .open = snd_mxs_open,
107 .close = snd_dmaengine_pcm_close, 107 .close = snd_dmaengine_pcm_close_release_chan,
108 .ioctl = snd_pcm_lib_ioctl, 108 .ioctl = snd_pcm_lib_ioctl,
109 .hw_params = snd_mxs_pcm_hw_params, 109 .hw_params = snd_mxs_pcm_hw_params,
110 .trigger = snd_dmaengine_pcm_trigger, 110 .trigger = snd_dmaengine_pcm_trigger,
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index c8e272f9c2de..c28e042f2208 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -118,8 +118,9 @@ static int omap_pcm_open(struct snd_pcm_substream *substream)
118 118
119 dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); 119 dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
120 120
121 return snd_dmaengine_pcm_open(substream, omap_dma_filter_fn, 121 return snd_dmaengine_pcm_open_request_chan(substream,
122 dma_data->filter_data); 122 omap_dma_filter_fn,
123 dma_data->filter_data);
123} 124}
124 125
125static int omap_pcm_mmap(struct snd_pcm_substream *substream, 126static int omap_pcm_mmap(struct snd_pcm_substream *substream,
@@ -135,7 +136,7 @@ static int omap_pcm_mmap(struct snd_pcm_substream *substream,
135 136
136static struct snd_pcm_ops omap_pcm_ops = { 137static struct snd_pcm_ops omap_pcm_ops = {
137 .open = omap_pcm_open, 138 .open = omap_pcm_open,
138 .close = snd_dmaengine_pcm_close, 139 .close = snd_dmaengine_pcm_close_release_chan,
139 .ioctl = snd_pcm_lib_ioctl, 140 .ioctl = snd_pcm_lib_ioctl,
140 .hw_params = omap_pcm_hw_params, 141 .hw_params = omap_pcm_hw_params,
141 .hw_free = omap_pcm_hw_free, 142 .hw_free = omap_pcm_hw_free,
diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c
index 6c3980252bf6..349930015264 100644
--- a/sound/soc/pxa/mmp-pcm.c
+++ b/sound/soc/pxa/mmp-pcm.c
@@ -131,7 +131,8 @@ static int mmp_pcm_open(struct snd_pcm_substream *substream)
131 dma_data.dma_res = r; 131 dma_data.dma_res = r;
132 dma_data.ssp_id = cpu_dai->id; 132 dma_data.ssp_id = cpu_dai->id;
133 133
134 return snd_dmaengine_pcm_open(substream, filter, &dma_data); 134 return snd_dmaengine_pcm_open_request_chan(substream, filter,
135 &dma_data);
135} 136}
136 137
137static int mmp_pcm_mmap(struct snd_pcm_substream *substream, 138static int mmp_pcm_mmap(struct snd_pcm_substream *substream,
@@ -148,7 +149,7 @@ static int mmp_pcm_mmap(struct snd_pcm_substream *substream,
148 149
149struct snd_pcm_ops mmp_pcm_ops = { 150struct snd_pcm_ops mmp_pcm_ops = {
150 .open = mmp_pcm_open, 151 .open = mmp_pcm_open,
151 .close = snd_dmaengine_pcm_close, 152 .close = snd_dmaengine_pcm_close_release_chan,
152 .ioctl = snd_pcm_lib_ioctl, 153 .ioctl = snd_pcm_lib_ioctl,
153 .hw_params = mmp_pcm_hw_params, 154 .hw_params = mmp_pcm_hw_params,
154 .trigger = snd_dmaengine_pcm_trigger, 155 .trigger = snd_dmaengine_pcm_trigger,
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 7bf21a1035ea..d56bbea6e75e 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3900,21 +3900,14 @@ static void snd_soc_unregister_dais(struct device *dev, size_t count)
3900} 3900}
3901 3901
3902/** 3902/**
3903 * snd_soc_register_platform - Register a platform with the ASoC core 3903 * snd_soc_add_platform - Add a platform to the ASoC core
3904 * 3904 * @dev: The parent device for the platform
3905 * @platform: platform to register 3905 * @platform: The platform to add
3906 * @platform_driver: The driver for the platform
3906 */ 3907 */
3907int snd_soc_register_platform(struct device *dev, 3908int snd_soc_add_platform(struct device *dev, struct snd_soc_platform *platform,
3908 const struct snd_soc_platform_driver *platform_drv) 3909 const struct snd_soc_platform_driver *platform_drv)
3909{ 3910{
3910 struct snd_soc_platform *platform;
3911
3912 dev_dbg(dev, "ASoC: platform register %s\n", dev_name(dev));
3913
3914 platform = kzalloc(sizeof(struct snd_soc_platform), GFP_KERNEL);
3915 if (platform == NULL)
3916 return -ENOMEM;
3917
3918 /* create platform component name */ 3911 /* create platform component name */
3919 platform->name = fmt_single_name(dev, &platform->id); 3912 platform->name = fmt_single_name(dev, &platform->id);
3920 if (platform->name == NULL) { 3913 if (platform->name == NULL) {
@@ -3937,30 +3930,76 @@ int snd_soc_register_platform(struct device *dev,
3937 3930
3938 return 0; 3931 return 0;
3939} 3932}
3940EXPORT_SYMBOL_GPL(snd_soc_register_platform); 3933EXPORT_SYMBOL_GPL(snd_soc_add_platform);
3941 3934
3942/** 3935/**
3943 * snd_soc_unregister_platform - Unregister a platform from the ASoC core 3936 * snd_soc_register_platform - Register a platform with the ASoC core
3944 * 3937 *
3945 * @platform: platform to unregister 3938 * @platform: platform to register
3946 */ 3939 */
3947void snd_soc_unregister_platform(struct device *dev) 3940int snd_soc_register_platform(struct device *dev,
3941 const struct snd_soc_platform_driver *platform_drv)
3948{ 3942{
3949 struct snd_soc_platform *platform; 3943 struct snd_soc_platform *platform;
3944 int ret;
3950 3945
3951 list_for_each_entry(platform, &platform_list, list) { 3946 dev_dbg(dev, "ASoC: platform register %s\n", dev_name(dev));
3952 if (dev == platform->dev)
3953 goto found;
3954 }
3955 return;
3956 3947
3957found: 3948 platform = kzalloc(sizeof(struct snd_soc_platform), GFP_KERNEL);
3949 if (platform == NULL)
3950 return -ENOMEM;
3951
3952 ret = snd_soc_add_platform(dev, platform, platform_drv);
3953 if (ret)
3954 kfree(platform);
3955
3956 return ret;
3957}
3958EXPORT_SYMBOL_GPL(snd_soc_register_platform);
3959
3960/**
3961 * snd_soc_remove_platform - Remove a platform from the ASoC core
3962 * @platform: the platform to remove
3963 */
3964void snd_soc_remove_platform(struct snd_soc_platform *platform)
3965{
3958 mutex_lock(&client_mutex); 3966 mutex_lock(&client_mutex);
3959 list_del(&platform->list); 3967 list_del(&platform->list);
3960 mutex_unlock(&client_mutex); 3968 mutex_unlock(&client_mutex);
3961 3969
3962 dev_dbg(dev, "ASoC: Unregistered platform '%s'\n", platform->name); 3970 dev_dbg(platform->dev, "ASoC: Unregistered platform '%s'\n",
3971 platform->name);
3963 kfree(platform->name); 3972 kfree(platform->name);
3973}
3974EXPORT_SYMBOL_GPL(snd_soc_remove_platform);
3975
3976struct snd_soc_platform *snd_soc_lookup_platform(struct device *dev)
3977{
3978 struct snd_soc_platform *platform;
3979
3980 list_for_each_entry(platform, &platform_list, list) {
3981 if (dev == platform->dev)
3982 return platform;
3983 }
3984
3985 return NULL;
3986}
3987EXPORT_SYMBOL_GPL(snd_soc_lookup_platform);
3988
3989/**
3990 * snd_soc_unregister_platform - Unregister a platform from the ASoC core
3991 *
3992 * @platform: platform to unregister
3993 */
3994void snd_soc_unregister_platform(struct device *dev)
3995{
3996 struct snd_soc_platform *platform;
3997
3998 platform = snd_soc_lookup_platform(dev);
3999 if (!platform)
4000 return;
4001
4002 snd_soc_remove_platform(platform);
3964 kfree(platform); 4003 kfree(platform);
3965} 4004}
3966EXPORT_SYMBOL_GPL(snd_soc_unregister_platform); 4005EXPORT_SYMBOL_GPL(snd_soc_unregister_platform);
diff --git a/sound/soc/soc-dmaengine-pcm.c b/sound/soc/soc-dmaengine-pcm.c
index a9a300acb506..aa924d9b7986 100644
--- a/sound/soc/soc-dmaengine-pcm.c
+++ b/sound/soc/soc-dmaengine-pcm.c
@@ -254,44 +254,48 @@ snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream)
254} 254}
255EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer); 255EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer);
256 256
257static int dmaengine_pcm_request_channel(struct dmaengine_pcm_runtime_data *prtd, 257/**
258 dma_filter_fn filter_fn, void *filter_data) 258 * snd_dmaengine_pcm_request_channel - Request channel for the dmaengine PCM
259 * @filter_fn: Filter function used to request the DMA channel
260 * @filter_data: Data passed to the DMA filter function
261 *
262 * Returns NULL or the requested DMA channel.
263 *
264 * This function request a DMA channel for usage with dmaengine PCM.
265 */
266struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
267 void *filter_data)
259{ 268{
260 dma_cap_mask_t mask; 269 dma_cap_mask_t mask;
261 270
262 dma_cap_zero(mask); 271 dma_cap_zero(mask);
263 dma_cap_set(DMA_SLAVE, mask); 272 dma_cap_set(DMA_SLAVE, mask);
264 dma_cap_set(DMA_CYCLIC, mask); 273 dma_cap_set(DMA_CYCLIC, mask);
265 prtd->dma_chan = dma_request_channel(mask, filter_fn, filter_data);
266
267 if (!prtd->dma_chan)
268 return -ENXIO;
269 274
270 return 0; 275 return dma_request_channel(mask, filter_fn, filter_data);
271} 276}
277EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_request_channel);
272 278
273/** 279/**
274 * snd_dmaengine_pcm_open - Open a dmaengine based PCM substream 280 * snd_dmaengine_pcm_open - Open a dmaengine based PCM substream
275 * @substream: PCM substream 281 * @substream: PCM substream
276 * @filter_fn: Filter function used to request the DMA channel 282 * @chan: DMA channel to use for data transfers
277 * @filter_data: Data passed to the DMA filter function
278 * 283 *
279 * Returns 0 on success, a negative error code otherwise. 284 * Returns 0 on success, a negative error code otherwise.
280 * 285 *
281 * This function will request a DMA channel using the passed filter function and 286 * The function should usually be called from the pcm open callback. Note that
282 * data. The function should usually be called from the pcm open callback. 287 * this function will use private_data field of the substream's runtime. So it
283 * 288 * is not availabe to your pcm driver implementation.
284 * Note that this function will use private_data field of the substream's
285 * runtime. So it is not availabe to your pcm driver implementation. If you need
286 * to keep additional data attached to a substream use
287 * snd_dmaengine_pcm_{set,get}_data.
288 */ 289 */
289int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream, 290int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
290 dma_filter_fn filter_fn, void *filter_data) 291 struct dma_chan *chan)
291{ 292{
292 struct dmaengine_pcm_runtime_data *prtd; 293 struct dmaengine_pcm_runtime_data *prtd;
293 int ret; 294 int ret;
294 295
296 if (!chan)
297 return -ENXIO;
298
295 ret = snd_pcm_hw_constraint_integer(substream->runtime, 299 ret = snd_pcm_hw_constraint_integer(substream->runtime,
296 SNDRV_PCM_HW_PARAM_PERIODS); 300 SNDRV_PCM_HW_PARAM_PERIODS);
297 if (ret < 0) 301 if (ret < 0)
@@ -301,11 +305,7 @@ int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
301 if (!prtd) 305 if (!prtd)
302 return -ENOMEM; 306 return -ENOMEM;
303 307
304 ret = dmaengine_pcm_request_channel(prtd, filter_fn, filter_data); 308 prtd->dma_chan = chan;
305 if (ret < 0) {
306 kfree(prtd);
307 return ret;
308 }
309 309
310 substream->runtime->private_data = prtd; 310 substream->runtime->private_data = prtd;
311 311
@@ -314,6 +314,27 @@ int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
314EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open); 314EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open);
315 315
316/** 316/**
317 * snd_dmaengine_pcm_open_request_chan - Open a dmaengine based PCM substream and request channel
318 * @substream: PCM substream
319 * @filter_fn: Filter function used to request the DMA channel
320 * @filter_data: Data passed to the DMA filter function
321 *
322 * Returns 0 on success, a negative error code otherwise.
323 *
324 * This function will request a DMA channel using the passed filter function and
325 * data. The function should usually be called from the pcm open callback. Note
326 * that this function will use private_data field of the substream's runtime. So
327 * it is not availabe to your pcm driver implementation.
328 */
329int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
330 dma_filter_fn filter_fn, void *filter_data)
331{
332 return snd_dmaengine_pcm_open(substream,
333 snd_dmaengine_pcm_request_channel(filter_fn, filter_data));
334}
335EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan);
336
337/**
317 * snd_dmaengine_pcm_close - Close a dmaengine based PCM substream 338 * snd_dmaengine_pcm_close - Close a dmaengine based PCM substream
318 * @substream: PCM substream 339 * @substream: PCM substream
319 */ 340 */
@@ -321,11 +342,26 @@ int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream)
321{ 342{
322 struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); 343 struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
323 344
324 dma_release_channel(prtd->dma_chan);
325 kfree(prtd); 345 kfree(prtd);
326 346
327 return 0; 347 return 0;
328} 348}
329EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close); 349EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close);
330 350
351/**
352 * snd_dmaengine_pcm_release_chan_close - Close a dmaengine based PCM substream and release channel
353 * @substream: PCM substream
354 *
355 * Releases the DMA channel associated with the PCM substream.
356 */
357int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream)
358{
359 struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
360
361 dma_release_channel(prtd->dma_chan);
362
363 return snd_dmaengine_pcm_close(substream);
364}
365EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan);
366
331MODULE_LICENSE("GPL"); 367MODULE_LICENSE("GPL");
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
new file mode 100644
index 000000000000..ae0c37e66ae0
--- /dev/null
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -0,0 +1,284 @@
1/*
2 * Copyright (C) 2013, Analog Devices Inc.
3 * Author: Lars-Peter Clausen <lars@metafoo.de>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * You should have received a copy of the GNU General Public License along
11 * with this program; if not, write to the Free Software Foundation, Inc.,
12 * 675 Mass Ave, Cambridge, MA 02139, USA.
13 *
14 */
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/dmaengine.h>
18#include <linux/slab.h>
19#include <sound/pcm.h>
20#include <sound/pcm_params.h>
21#include <sound/soc.h>
22#include <linux/dma-mapping.h>
23#include <linux/of.h>
24#include <linux/of_dma.h>
25
26#include <sound/dmaengine_pcm.h>
27
28struct dmaengine_pcm {
29 struct dma_chan *chan[SNDRV_PCM_STREAM_CAPTURE + 1];
30 const struct snd_dmaengine_pcm_config *config;
31 struct snd_soc_platform platform;
32 bool compat;
33};
34
35static struct dmaengine_pcm *soc_platform_to_pcm(struct snd_soc_platform *p)
36{
37 return container_of(p, struct dmaengine_pcm, platform);
38}
39
40/**
41 * snd_dmaengine_pcm_prepare_slave_config() - Generic prepare_slave_config callback
42 * @substream: PCM substream
43 * @params: hw_params
44 * @slave_config: DMA slave config to prepare
45 *
46 * This function can be used as a generic prepare_slave_config callback for
47 * platforms which make use of the snd_dmaengine_dai_dma_data struct for their
48 * DAI DMA data. Internally the function will first call
49 * snd_hwparams_to_dma_slave_config to fill in the slave config based on the
50 * hw_params, followed by snd_dmaengine_set_config_from_dai_data to fill in the
51 * remaining fields based on the DAI DMA data.
52 */
53int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
54 struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config)
55{
56 struct snd_soc_pcm_runtime *rtd = substream->private_data;
57 struct snd_dmaengine_dai_dma_data *dma_data;
58 int ret;
59
60 dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
61
62 ret = snd_hwparams_to_dma_slave_config(substream, params, slave_config);
63 if (ret)
64 return ret;
65
66 snd_dmaengine_pcm_set_config_from_dai_data(substream, dma_data,
67 slave_config);
68
69 return 0;
70}
71EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_prepare_slave_config);
72
73static int dmaengine_pcm_hw_params(struct snd_pcm_substream *substream,
74 struct snd_pcm_hw_params *params)
75{
76 struct snd_soc_pcm_runtime *rtd = substream->private_data;
77 struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
78 struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
79 struct dma_slave_config slave_config;
80 int ret;
81
82 if (pcm->config->prepare_slave_config) {
83 ret = pcm->config->prepare_slave_config(substream, params,
84 &slave_config);
85 if (ret)
86 return ret;
87
88 ret = dmaengine_slave_config(chan, &slave_config);
89 if (ret)
90 return ret;
91 }
92
93 return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
94}
95
96static int dmaengine_pcm_open(struct snd_pcm_substream *substream)
97{
98 struct snd_soc_pcm_runtime *rtd = substream->private_data;
99 struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
100 struct dma_chan *chan = pcm->chan[substream->stream];
101 int ret;
102
103 ret = snd_soc_set_runtime_hwparams(substream,
104 pcm->config->pcm_hardware);
105 if (ret)
106 return ret;
107
108 return snd_dmaengine_pcm_open(substream, chan);
109}
110
111static struct device *dmaengine_dma_dev(struct dmaengine_pcm *pcm,
112 struct snd_pcm_substream *substream)
113{
114 if (!pcm->chan[substream->stream])
115 return NULL;
116
117 return pcm->chan[substream->stream]->device->dev;
118}
119
120static void dmaengine_pcm_free(struct snd_pcm *pcm)
121{
122 snd_pcm_lib_preallocate_free_for_all(pcm);
123}
124
125static struct dma_chan *dmaengine_pcm_compat_request_channel(
126 struct snd_soc_pcm_runtime *rtd,
127 struct snd_pcm_substream *substream)
128{
129 struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
130
131 if (pcm->config->compat_request_channel)
132 return pcm->config->compat_request_channel(rtd, substream);
133
134 return snd_dmaengine_pcm_request_channel(pcm->config->compat_filter_fn,
135 snd_soc_dai_get_dma_data(rtd->cpu_dai, substream));
136}
137
138static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
139{
140 struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
141 const struct snd_dmaengine_pcm_config *config = pcm->config;
142 struct snd_pcm_substream *substream;
143 unsigned int i;
144 int ret;
145
146 for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; i++) {
147 substream = rtd->pcm->streams[i].substream;
148 if (!substream)
149 continue;
150
151 if (!pcm->chan[i] && pcm->compat) {
152 pcm->chan[i] = dmaengine_pcm_compat_request_channel(rtd,
153 substream);
154 }
155
156 if (!pcm->chan[i]) {
157 dev_err(rtd->platform->dev,
158 "Missing dma channel for stream: %d\n", i);
159 ret = -EINVAL;
160 goto err_free;
161 }
162
163 ret = snd_pcm_lib_preallocate_pages(substream,
164 SNDRV_DMA_TYPE_DEV,
165 dmaengine_dma_dev(pcm, substream),
166 config->prealloc_buffer_size,
167 config->pcm_hardware->buffer_bytes_max);
168 if (ret)
169 goto err_free;
170 }
171
172 return 0;
173
174err_free:
175 dmaengine_pcm_free(rtd->pcm);
176 return ret;
177}
178
179static const struct snd_pcm_ops dmaengine_pcm_ops = {
180 .open = dmaengine_pcm_open,
181 .close = snd_dmaengine_pcm_close,
182 .ioctl = snd_pcm_lib_ioctl,
183 .hw_params = dmaengine_pcm_hw_params,
184 .hw_free = snd_pcm_lib_free_pages,
185 .trigger = snd_dmaengine_pcm_trigger,
186 .pointer = snd_dmaengine_pcm_pointer,
187};
188
189static const struct snd_soc_platform_driver dmaengine_pcm_platform = {
190 .ops = &dmaengine_pcm_ops,
191 .pcm_new = dmaengine_pcm_new,
192 .pcm_free = dmaengine_pcm_free,
193 .probe_order = SND_SOC_COMP_ORDER_LATE,
194};
195
196static const struct snd_pcm_ops dmaengine_no_residue_pcm_ops = {
197 .open = dmaengine_pcm_open,
198 .close = snd_dmaengine_pcm_close,
199 .ioctl = snd_pcm_lib_ioctl,
200 .hw_params = dmaengine_pcm_hw_params,
201 .hw_free = snd_pcm_lib_free_pages,
202 .trigger = snd_dmaengine_pcm_trigger,
203 .pointer = snd_dmaengine_pcm_pointer_no_residue,
204};
205
206static const struct snd_soc_platform_driver dmaengine_no_residue_pcm_platform = {
207 .ops = &dmaengine_no_residue_pcm_ops,
208 .pcm_new = dmaengine_pcm_new,
209 .pcm_free = dmaengine_pcm_free,
210 .probe_order = SND_SOC_COMP_ORDER_LATE,
211};
212
213static const char * const dmaengine_pcm_dma_channel_names[] = {
214 [SNDRV_PCM_STREAM_PLAYBACK] = "tx",
215 [SNDRV_PCM_STREAM_CAPTURE] = "rx",
216};
217
218/**
219 * snd_dmaengine_pcm_register - Register a dmaengine based PCM device
220 * @dev: The parent device for the PCM device
221 * @config: Platform specific PCM configuration
222 * @flags: Platform specific quirks
223 */
224int snd_dmaengine_pcm_register(struct device *dev,
225 const struct snd_dmaengine_pcm_config *config, unsigned int flags)
226{
227 struct dmaengine_pcm *pcm;
228 unsigned int i;
229
230 pcm = kzalloc(sizeof(*pcm), GFP_KERNEL);
231 if (!pcm)
232 return -ENOMEM;
233
234 pcm->config = config;
235
236 if (flags & SND_DMAENGINE_PCM_FLAG_COMPAT)
237 pcm->compat = true;
238
239 if (!(flags & SND_DMAENGINE_PCM_FLAG_NO_DT) && dev->of_node) {
240 for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; i++) {
241 pcm->chan[i] = of_dma_request_slave_channel(dev->of_node,
242 dmaengine_pcm_dma_channel_names[i]);
243 }
244 }
245
246 if (flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
247 return snd_soc_add_platform(dev, &pcm->platform,
248 &dmaengine_no_residue_pcm_platform);
249 else
250 return snd_soc_add_platform(dev, &pcm->platform,
251 &dmaengine_pcm_platform);
252}
253EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register);
254
255/**
256 * snd_dmaengine_pcm_unregister - Removes a dmaengine based PCM device
257 * @dev: Parent device the PCM was register with
258 *
259 * Removes a dmaengine based PCM device previously registered with
260 * snd_dmaengine_pcm_register.
261 */
262void snd_dmaengine_pcm_unregister(struct device *dev)
263{
264 struct snd_soc_platform *platform;
265 struct dmaengine_pcm *pcm;
266 unsigned int i;
267
268 platform = snd_soc_lookup_platform(dev);
269 if (!platform)
270 return;
271
272 pcm = soc_platform_to_pcm(platform);
273
274 for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; i++) {
275 if (pcm->chan[i])
276 dma_release_channel(pcm->chan[i]);
277 }
278
279 snd_soc_remove_platform(platform);
280 kfree(pcm);
281}
282EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister);
283
284MODULE_LICENSE("GPL");
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index fe4541df498c..4b3be6c3c91e 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -90,8 +90,33 @@ static struct snd_soc_platform_driver dummy_platform = {
90}; 90};
91 91
92static struct snd_soc_codec_driver dummy_codec; 92static struct snd_soc_codec_driver dummy_codec;
93
94#define STUB_RATES SNDRV_PCM_RATE_8000_192000
95#define STUB_FORMATS (SNDRV_PCM_FMTBIT_S8 | \
96 SNDRV_PCM_FMTBIT_U8 | \
97 SNDRV_PCM_FMTBIT_S16_LE | \
98 SNDRV_PCM_FMTBIT_U16_LE | \
99 SNDRV_PCM_FMTBIT_S24_LE | \
100 SNDRV_PCM_FMTBIT_U24_LE | \
101 SNDRV_PCM_FMTBIT_S32_LE | \
102 SNDRV_PCM_FMTBIT_U32_LE | \
103 SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE)
93static struct snd_soc_dai_driver dummy_dai = { 104static struct snd_soc_dai_driver dummy_dai = {
94 .name = "snd-soc-dummy-dai", 105 .name = "snd-soc-dummy-dai",
106 .playback = {
107 .stream_name = "Playback",
108 .channels_min = 1,
109 .channels_max = 384,
110 .rates = STUB_RATES,
111 .formats = STUB_FORMATS,
112 },
113 .capture = {
114 .stream_name = "Capture",
115 .channels_min = 1,
116 .channels_max = 384,
117 .rates = STUB_RATES,
118 .formats = STUB_FORMATS,
119 },
95}; 120};
96 121
97static int snd_soc_dummy_probe(struct platform_device *pdev) 122static int snd_soc_dummy_probe(struct platform_device *pdev)
diff --git a/sound/soc/spear/spear_pcm.c b/sound/soc/spear/spear_pcm.c
index bfbcc1fcfe6f..2fbd4899d8ef 100644
--- a/sound/soc/spear/spear_pcm.c
+++ b/sound/soc/spear/spear_pcm.c
@@ -64,7 +64,8 @@ static int spear_pcm_open(struct snd_pcm_substream *substream)
64 if (ret) 64 if (ret)
65 return ret; 65 return ret;
66 66
67 return snd_dmaengine_pcm_open(substream, dma_data->filter, dma_data) 67 return snd_dmaengine_pcm_open_request_chan(substream, dma_data->filter,
68 dma_data);
68} 69}
69 70
70static int spear_pcm_mmap(struct snd_pcm_substream *substream, 71static int spear_pcm_mmap(struct snd_pcm_substream *substream,
@@ -79,7 +80,7 @@ static int spear_pcm_mmap(struct snd_pcm_substream *substream,
79 80
80static struct snd_pcm_ops spear_pcm_ops = { 81static struct snd_pcm_ops spear_pcm_ops = {
81 .open = spear_pcm_open, 82 .open = spear_pcm_open,
82 .close = snd_dmaengine_pcm_close, 83 .close = snd_dmaengine_pcm_close_release_chan,
83 .ioctl = snd_pcm_lib_ioctl, 84 .ioctl = snd_pcm_lib_ioctl,
84 .hw_params = spear_pcm_hw_params, 85 .hw_params = spear_pcm_hw_params,
85 .hw_free = spear_pcm_hw_free, 86 .hw_free = spear_pcm_hw_free,
diff --git a/sound/soc/tegra/Kconfig b/sound/soc/tegra/Kconfig
index dbc27ce1d4de..b1c9d573da05 100644
--- a/sound/soc/tegra/Kconfig
+++ b/sound/soc/tegra/Kconfig
@@ -2,7 +2,7 @@ config SND_SOC_TEGRA
2 tristate "SoC Audio for the Tegra System-on-Chip" 2 tristate "SoC Audio for the Tegra System-on-Chip"
3 depends on ARCH_TEGRA && TEGRA20_APB_DMA 3 depends on ARCH_TEGRA && TEGRA20_APB_DMA
4 select REGMAP_MMIO 4 select REGMAP_MMIO
5 select SND_SOC_DMAENGINE_PCM if TEGRA20_APB_DMA 5 select SND_SOC_GENERIC_DMAENGINE_PCM if TEGRA20_APB_DMA
6 help 6 help
7 Say Y or M here if you want support for SoC audio on Tegra. 7 Say Y or M here if you want support for SoC audio on Tegra.
8 8
diff --git a/sound/soc/tegra/tegra_pcm.c b/sound/soc/tegra/tegra_pcm.c
index 6d1c70c3d753..f056f632557c 100644
--- a/sound/soc/tegra/tegra_pcm.c
+++ b/sound/soc/tegra/tegra_pcm.c
@@ -29,9 +29,7 @@
29 * 29 *
30 */ 30 */
31 31
32#include <linux/dma-mapping.h>
33#include <linux/module.h> 32#include <linux/module.h>
34#include <linux/slab.h>
35#include <sound/core.h> 33#include <sound/core.h>
36#include <sound/pcm.h> 34#include <sound/pcm.h>
37#include <sound/pcm_params.h> 35#include <sound/pcm_params.h>
@@ -55,175 +53,24 @@ static const struct snd_pcm_hardware tegra_pcm_hardware = {
55 .fifo_size = 4, 53 .fifo_size = 4,
56}; 54};
57 55
58static int tegra_pcm_open(struct snd_pcm_substream *substream) 56static const struct snd_dmaengine_pcm_config tegra_dmaengine_pcm_config = {
59{ 57 .pcm_hardware = &tegra_pcm_hardware,
60 struct snd_soc_pcm_runtime *rtd = substream->private_data; 58 .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
61 struct device *dev = rtd->platform->dev; 59 .compat_filter_fn = NULL,
62 int ret; 60 .prealloc_buffer_size = PAGE_SIZE * 8,
63
64 /* Set HW params now that initialization is complete */
65 snd_soc_set_runtime_hwparams(substream, &tegra_pcm_hardware);
66
67 ret = snd_dmaengine_pcm_open(substream, NULL, NULL);
68 if (ret) {
69 dev_err(dev, "dmaengine pcm open failed with err %d\n", ret);
70 return ret;
71 }
72
73 return 0;
74}
75
76static int tegra_pcm_hw_params(struct snd_pcm_substream *substream,
77 struct snd_pcm_hw_params *params)
78{
79 struct snd_soc_pcm_runtime *rtd = substream->private_data;
80 struct device *dev = rtd->platform->dev;
81 struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
82 struct dma_slave_config slave_config;
83 int ret;
84
85 ret = snd_hwparams_to_dma_slave_config(substream, params,
86 &slave_config);
87 if (ret) {
88 dev_err(dev, "hw params config failed with err %d\n", ret);
89 return ret;
90 }
91
92 snd_dmaengine_pcm_set_config_from_dai_data(substream,
93 snd_soc_dai_get_dma_data(rtd->cpu_dai, substream),
94 &slave_config);
95
96 ret = dmaengine_slave_config(chan, &slave_config);
97 if (ret < 0) {
98 dev_err(dev, "dma slave config failed with err %d\n", ret);
99 return ret;
100 }
101
102 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
103 return 0;
104}
105
106static int tegra_pcm_hw_free(struct snd_pcm_substream *substream)
107{
108 snd_pcm_set_runtime_buffer(substream, NULL);
109 return 0;
110}
111
112static int tegra_pcm_mmap(struct snd_pcm_substream *substream,
113 struct vm_area_struct *vma)
114{
115 struct snd_pcm_runtime *runtime = substream->runtime;
116
117 return dma_mmap_writecombine(substream->pcm->card->dev, vma,
118 runtime->dma_area,
119 runtime->dma_addr,
120 runtime->dma_bytes);
121}
122
123static struct snd_pcm_ops tegra_pcm_ops = {
124 .open = tegra_pcm_open,
125 .close = snd_dmaengine_pcm_close,
126 .ioctl = snd_pcm_lib_ioctl,
127 .hw_params = tegra_pcm_hw_params,
128 .hw_free = tegra_pcm_hw_free,
129 .trigger = snd_dmaengine_pcm_trigger,
130 .pointer = snd_dmaengine_pcm_pointer,
131 .mmap = tegra_pcm_mmap,
132};
133
134static int tegra_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
135{
136 struct snd_pcm_substream *substream = pcm->streams[stream].substream;
137 struct snd_dma_buffer *buf = &substream->dma_buffer;
138 size_t size = tegra_pcm_hardware.buffer_bytes_max;
139
140 buf->area = dma_alloc_writecombine(pcm->card->dev, size,
141 &buf->addr, GFP_KERNEL);
142 if (!buf->area)
143 return -ENOMEM;
144
145 buf->dev.type = SNDRV_DMA_TYPE_DEV;
146 buf->dev.dev = pcm->card->dev;
147 buf->private_data = NULL;
148 buf->bytes = size;
149
150 return 0;
151}
152
153static void tegra_pcm_deallocate_dma_buffer(struct snd_pcm *pcm, int stream)
154{
155 struct snd_pcm_substream *substream;
156 struct snd_dma_buffer *buf;
157
158 substream = pcm->streams[stream].substream;
159 if (!substream)
160 return;
161
162 buf = &substream->dma_buffer;
163 if (!buf->area)
164 return;
165
166 dma_free_writecombine(pcm->card->dev, buf->bytes,
167 buf->area, buf->addr);
168 buf->area = NULL;
169}
170
171static u64 tegra_dma_mask = DMA_BIT_MASK(32);
172
173static int tegra_pcm_new(struct snd_soc_pcm_runtime *rtd)
174{
175 struct snd_card *card = rtd->card->snd_card;
176 struct snd_pcm *pcm = rtd->pcm;
177 int ret = 0;
178
179 if (!card->dev->dma_mask)
180 card->dev->dma_mask = &tegra_dma_mask;
181 if (!card->dev->coherent_dma_mask)
182 card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
183
184 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
185 ret = tegra_pcm_preallocate_dma_buffer(pcm,
186 SNDRV_PCM_STREAM_PLAYBACK);
187 if (ret)
188 goto err;
189 }
190
191 if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
192 ret = tegra_pcm_preallocate_dma_buffer(pcm,
193 SNDRV_PCM_STREAM_CAPTURE);
194 if (ret)
195 goto err_free_play;
196 }
197
198 return 0;
199
200err_free_play:
201 tegra_pcm_deallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_PLAYBACK);
202err:
203 return ret;
204}
205
206static void tegra_pcm_free(struct snd_pcm *pcm)
207{
208 tegra_pcm_deallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_CAPTURE);
209 tegra_pcm_deallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_PLAYBACK);
210}
211
212static struct snd_soc_platform_driver tegra_pcm_platform = {
213 .ops = &tegra_pcm_ops,
214 .pcm_new = tegra_pcm_new,
215 .pcm_free = tegra_pcm_free,
216}; 61};
217 62
218int tegra_pcm_platform_register(struct device *dev) 63int tegra_pcm_platform_register(struct device *dev)
219{ 64{
220 return snd_soc_register_platform(dev, &tegra_pcm_platform); 65 return snd_dmaengine_pcm_register(dev, &tegra_dmaengine_pcm_config,
66 SND_DMAENGINE_PCM_FLAG_NO_DT |
67 SND_DMAENGINE_PCM_FLAG_COMPAT);
221} 68}
222EXPORT_SYMBOL_GPL(tegra_pcm_platform_register); 69EXPORT_SYMBOL_GPL(tegra_pcm_platform_register);
223 70
224void tegra_pcm_platform_unregister(struct device *dev) 71void tegra_pcm_platform_unregister(struct device *dev)
225{ 72{
226 snd_soc_unregister_platform(dev); 73 return snd_dmaengine_pcm_unregister(dev);
227} 74}
228EXPORT_SYMBOL_GPL(tegra_pcm_platform_unregister); 75EXPORT_SYMBOL_GPL(tegra_pcm_platform_unregister);
229 76
diff --git a/sound/soc/ux500/Kconfig b/sound/soc/ux500/Kconfig
index 069330d82be5..c73c5907eb11 100644
--- a/sound/soc/ux500/Kconfig
+++ b/sound/soc/ux500/Kconfig
@@ -16,7 +16,7 @@ config SND_SOC_UX500_PLAT_MSP_I2S
16config SND_SOC_UX500_PLAT_DMA 16config SND_SOC_UX500_PLAT_DMA
17 tristate "Platform - DB8500 (DMA)" 17 tristate "Platform - DB8500 (DMA)"
18 depends on SND_SOC_UX500 18 depends on SND_SOC_UX500
19 select SND_SOC_DMAENGINE_PCM 19 select SND_SOC_GENERIC_DMAENGINE_PCM
20 help 20 help
21 Say Y if you want to enable the Ux500 platform-driver. 21 Say Y if you want to enable the Ux500 platform-driver.
22 22
diff --git a/sound/soc/ux500/ux500_pcm.c b/sound/soc/ux500/ux500_pcm.c
index 09b5364e5095..b6e5ae277299 100644
--- a/sound/soc/ux500/ux500_pcm.c
+++ b/sound/soc/ux500/ux500_pcm.c
@@ -40,7 +40,7 @@
40#define UX500_PLATFORM_PERIODS_MAX 48 40#define UX500_PLATFORM_PERIODS_MAX 48
41#define UX500_PLATFORM_BUFFER_BYTES_MAX (2048 * PAGE_SIZE) 41#define UX500_PLATFORM_BUFFER_BYTES_MAX (2048 * PAGE_SIZE)
42 42
43static struct snd_pcm_hardware ux500_pcm_hw = { 43static const struct snd_pcm_hardware ux500_pcm_hw = {
44 .info = SNDRV_PCM_INFO_INTERLEAVED | 44 .info = SNDRV_PCM_INFO_INTERLEAVED |
45 SNDRV_PCM_INFO_MMAP | 45 SNDRV_PCM_INFO_MMAP |
46 SNDRV_PCM_INFO_RESUME | 46 SNDRV_PCM_INFO_RESUME |
@@ -61,43 +61,23 @@ static struct snd_pcm_hardware ux500_pcm_hw = {
61 .periods_max = UX500_PLATFORM_PERIODS_MAX, 61 .periods_max = UX500_PLATFORM_PERIODS_MAX,
62}; 62};
63 63
64static void ux500_pcm_dma_hw_free(struct device *dev, 64static struct dma_chan *ux500_pcm_request_chan(struct snd_soc_pcm_runtime *rtd,
65 struct snd_pcm_substream *substream) 65 struct snd_pcm_substream *substream)
66{ 66{
67 struct snd_pcm_runtime *runtime = substream->runtime;
68 struct snd_dma_buffer *buf = runtime->dma_buffer_p;
69
70 if (runtime->dma_area == NULL)
71 return;
72
73 if (buf != &substream->dma_buffer) {
74 dma_free_coherent(buf->dev.dev, buf->bytes, buf->area,
75 buf->addr);
76 kfree(runtime->dma_buffer_p);
77 }
78
79 snd_pcm_set_runtime_buffer(substream, NULL);
80}
81
82static int ux500_pcm_open(struct snd_pcm_substream *substream)
83{
84 struct snd_soc_pcm_runtime *rtd = substream->private_data;
85 struct snd_soc_dai *dai = rtd->cpu_dai; 67 struct snd_soc_dai *dai = rtd->cpu_dai;
86 struct device *dev = dai->dev; 68 struct device *dev = dai->dev;
87 int ret;
88 struct ux500_msp_dma_params *dma_params;
89 u16 per_data_width, mem_data_width; 69 u16 per_data_width, mem_data_width;
90 struct stedma40_chan_cfg *dma_cfg; 70 struct stedma40_chan_cfg *dma_cfg;
71 struct ux500_msp_dma_params *dma_params;
91 72
92 dev_dbg(dev, "%s: MSP %d (%s): Enter.\n", __func__, dai->id, 73 dev_dbg(dev, "%s: MSP %d (%s): Enter.\n", __func__, dai->id,
93 snd_pcm_stream_str(substream)); 74 snd_pcm_stream_str(substream));
94 75
95 dev_dbg(dev, "%s: Set runtime hwparams.\n", __func__); 76 dma_params = snd_soc_dai_get_dma_data(dai, substream);
96 snd_soc_set_runtime_hwparams(substream, &ux500_pcm_hw); 77 dma_cfg = dma_params->dma_cfg;
97 78
98 mem_data_width = STEDMA40_HALFWORD_WIDTH; 79 mem_data_width = STEDMA40_HALFWORD_WIDTH;
99 80
100 dma_params = snd_soc_dai_get_dma_data(dai, substream);
101 switch (dma_params->data_size) { 81 switch (dma_params->data_size) {
102 case 32: 82 case 32:
103 per_data_width = STEDMA40_WORD_WIDTH; 83 per_data_width = STEDMA40_WORD_WIDTH;
@@ -110,13 +90,8 @@ static int ux500_pcm_open(struct snd_pcm_substream *substream)
110 break; 90 break;
111 default: 91 default:
112 per_data_width = STEDMA40_WORD_WIDTH; 92 per_data_width = STEDMA40_WORD_WIDTH;
113 dev_warn(rtd->platform->dev,
114 "%s: Unknown data-size (%d)! Assuming 32 bits.\n",
115 __func__, dma_params->data_size);
116 } 93 }
117 94
118 dma_cfg = dma_params->dma_cfg;
119
120 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 95 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
121 dma_cfg->src_info.data_width = mem_data_width; 96 dma_cfg->src_info.data_width = mem_data_width;
122 dma_cfg->dst_info.data_width = per_data_width; 97 dma_cfg->dst_info.data_width = per_data_width;
@@ -125,123 +100,24 @@ static int ux500_pcm_open(struct snd_pcm_substream *substream)
125 dma_cfg->dst_info.data_width = mem_data_width; 100 dma_cfg->dst_info.data_width = mem_data_width;
126 } 101 }
127 102
128 103 return snd_dmaengine_pcm_request_channel(stedma40_filter, dma_cfg);
129 ret = snd_dmaengine_pcm_open(substream, stedma40_filter, dma_cfg);
130 if (ret) {
131 dev_dbg(dai->dev,
132 "%s: ERROR: snd_dmaengine_pcm_open failed (%d)!\n",
133 __func__, ret);
134 return ret;
135 }
136
137 return 0;
138} 104}
139 105
140static int ux500_pcm_hw_params(struct snd_pcm_substream *substream, 106static const struct snd_dmaengine_pcm_config ux500_dmaengine_pcm_config = {
141 struct snd_pcm_hw_params *hw_params) 107 .pcm_hardware = &ux500_pcm_hw,
142{ 108 .compat_request_channel = ux500_pcm_request_chan,
143 struct snd_pcm_runtime *runtime = substream->runtime; 109 .prealloc_buffer_size = 128 * 1024,
144 struct snd_dma_buffer *buf = runtime->dma_buffer_p;
145 struct snd_soc_pcm_runtime *rtd = substream->private_data;
146 int ret = 0;
147 int size;
148
149 dev_dbg(rtd->platform->dev, "%s: Enter\n", __func__);
150
151 size = params_buffer_bytes(hw_params);
152
153 if (buf) {
154 if (buf->bytes >= size)
155 goto out;
156 ux500_pcm_dma_hw_free(NULL, substream);
157 }
158
159 if (substream->dma_buffer.area != NULL &&
160 substream->dma_buffer.bytes >= size) {
161 buf = &substream->dma_buffer;
162 } else {
163 buf = kmalloc(sizeof(struct snd_dma_buffer), GFP_KERNEL);
164 if (!buf)
165 goto nomem;
166
167 buf->dev.type = SNDRV_DMA_TYPE_DEV;
168 buf->dev.dev = NULL;
169 buf->area = dma_alloc_coherent(NULL, size, &buf->addr,
170 GFP_KERNEL);
171 buf->bytes = size;
172 buf->private_data = NULL;
173
174 if (!buf->area)
175 goto free;
176 }
177 snd_pcm_set_runtime_buffer(substream, buf);
178 ret = 1;
179 out:
180 runtime->dma_bytes = size;
181 return ret;
182
183 free:
184 kfree(buf);
185 nomem:
186 return -ENOMEM;
187}
188
189static int ux500_pcm_hw_free(struct snd_pcm_substream *substream)
190{
191 struct snd_soc_pcm_runtime *rtd = substream->private_data;
192
193 dev_dbg(rtd->platform->dev, "%s: Enter\n", __func__);
194
195 ux500_pcm_dma_hw_free(NULL, substream);
196
197 return 0;
198}
199
200static int ux500_pcm_mmap(struct snd_pcm_substream *substream,
201 struct vm_area_struct *vma)
202{
203 struct snd_pcm_runtime *runtime = substream->runtime;
204 struct snd_soc_pcm_runtime *rtd = substream->private_data;
205
206 dev_dbg(rtd->platform->dev, "%s: Enter.\n", __func__);
207
208 return dma_mmap_coherent(NULL, vma, runtime->dma_area,
209 runtime->dma_addr, runtime->dma_bytes);
210}
211
212static struct snd_pcm_ops ux500_pcm_ops = {
213 .open = ux500_pcm_open,
214 .close = snd_dmaengine_pcm_close,
215 .ioctl = snd_pcm_lib_ioctl,
216 .hw_params = ux500_pcm_hw_params,
217 .hw_free = ux500_pcm_hw_free,
218 .trigger = snd_dmaengine_pcm_trigger,
219 .pointer = snd_dmaengine_pcm_pointer_no_residue,
220 .mmap = ux500_pcm_mmap
221};
222
223int ux500_pcm_new(struct snd_soc_pcm_runtime *rtd)
224{
225 struct snd_pcm *pcm = rtd->pcm;
226
227 dev_dbg(rtd->platform->dev, "%s: Enter (id = '%s').\n", __func__,
228 pcm->id);
229
230 pcm->info_flags = 0;
231
232 return 0;
233}
234
235static struct snd_soc_platform_driver ux500_pcm_soc_drv = {
236 .ops = &ux500_pcm_ops,
237 .pcm_new = ux500_pcm_new,
238}; 110};
239 111
240int ux500_pcm_register_platform(struct platform_device *pdev) 112int ux500_pcm_register_platform(struct platform_device *pdev)
241{ 113{
242 int ret; 114 int ret;
243 115
244 ret = snd_soc_register_platform(&pdev->dev, &ux500_pcm_soc_drv); 116 ret = snd_dmaengine_pcm_register(&pdev->dev,
117 &ux500_dmaengine_pcm_config,
118 SND_DMAENGINE_PCM_FLAG_NO_RESIDUE |
119 SND_DMAENGINE_PCM_FLAG_COMPAT |
120 SND_DMAENGINE_PCM_FLAG_NO_DT);
245 if (ret < 0) { 121 if (ret < 0) {
246 dev_err(&pdev->dev, 122 dev_err(&pdev->dev,
247 "%s: ERROR: Failed to register platform '%s' (%d)!\n", 123 "%s: ERROR: Failed to register platform '%s' (%d)!\n",
@@ -255,8 +131,7 @@ EXPORT_SYMBOL_GPL(ux500_pcm_register_platform);
255 131
256int ux500_pcm_unregister_platform(struct platform_device *pdev) 132int ux500_pcm_unregister_platform(struct platform_device *pdev)
257{ 133{
258 snd_soc_unregister_platform(&pdev->dev); 134 snd_dmaengine_pcm_unregister(&pdev->dev);
259
260 return 0; 135 return 0;
261} 136}
262EXPORT_SYMBOL_GPL(ux500_pcm_unregister_platform); 137EXPORT_SYMBOL_GPL(ux500_pcm_unregister_platform);
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 497d2741d119..ebe91440a068 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -509,7 +509,7 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol,
509 else 509 else
510 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest, 510 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
511 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 511 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
512 0, cpu_to_le16(wIndex), 512 0, wIndex,
513 &tmp, sizeof(tmp), 1000); 513 &tmp, sizeof(tmp), 1000);
514 up_read(&mixer->chip->shutdown_rwsem); 514 up_read(&mixer->chip->shutdown_rwsem);
515 515
@@ -540,7 +540,7 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol,
540 else 540 else
541 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest, 541 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest,
542 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 542 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
543 cpu_to_le16(wValue), cpu_to_le16(wIndex), 543 wValue, wIndex,
544 NULL, 0, 1000); 544 NULL, 0, 1000);
545 up_read(&mixer->chip->shutdown_rwsem); 545 up_read(&mixer->chip->shutdown_rwsem);
546 546
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index e564adb74852..3879eae7e874 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -499,7 +499,7 @@ static int snd_usb_nativeinstruments_boot_quirk(struct usb_device *dev)
499{ 499{
500 int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 500 int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
501 0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE, 501 0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
502 cpu_to_le16(1), 0, NULL, 0, 1000); 502 1, 0, NULL, 0, 1000);
503 503
504 if (ret < 0) 504 if (ret < 0)
505 return ret; 505 return ret;