aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2011-10-24 11:03:35 -0400
committerGrant Likely <grant.likely@secretlab.ca>2011-10-24 11:03:35 -0400
commit2bf6f675fa1a0f80b21aff20e6c21e87d6a7c9c9 (patch)
treeef49d741211dcc4f38636b5c422c9c346da09adf
parent5762c20593b6b959f1470dc6f1ff4ca4d9570f8d (diff)
parentc3b92c8787367a8bb53d57d9789b558f1295cc96 (diff)
Merge commit 'v3.1' into devicetree/next
-rw-r--r--Documentation/kernel-parameters.txt7
-rw-r--r--Documentation/networking/scaling.txt10
-rw-r--r--MAINTAINERS8
-rw-r--r--Makefile2
-rw-r--r--arch/arm/common/vic.c1
-rw-r--r--arch/arm/include/asm/localtimer.h2
-rw-r--r--arch/arm/kernel/perf_event_v7.c4
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c3
-rw-r--r--arch/arm/mach-omap2/hsmmc.c12
-rw-r--r--arch/arm/mach-omap2/usb-musb.c3
-rw-r--r--arch/arm/mach-s3c2410/s3c2410.c2
-rw-r--r--arch/arm/mach-s3c2412/s3c2412.c2
-rw-r--r--arch/arm/mach-s3c2416/s3c2416.c2
-rw-r--r--arch/arm/mach-s3c2440/s3c2440.c2
-rw-r--r--arch/arm/mach-s3c2440/s3c2442.c2
-rw-r--r--arch/arm/mach-tegra/cpu-tegra.c1
-rw-r--r--arch/arm/mach-ux500/Kconfig1
-rw-r--r--arch/arm/mm/init.c7
-rw-r--r--arch/arm/plat-s5p/irq-gpioint.c6
-rw-r--r--arch/mips/Kconfig6
-rw-r--r--arch/mips/alchemy/common/platform.c2
-rw-r--r--arch/mips/alchemy/common/power.c22
-rw-r--r--arch/mips/alchemy/devboards/bcsr.c4
-rw-r--r--arch/mips/alchemy/devboards/db1200/setup.c7
-rw-r--r--arch/mips/ar7/irq.c3
-rw-r--r--arch/mips/bcm63xx/irq.c1
-rw-r--r--arch/mips/cobalt/irq.c1
-rw-r--r--arch/mips/dec/setup.c4
-rw-r--r--arch/mips/emma/markeins/irq.c2
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-powertv/dma-coherence.h1
-rw-r--r--arch/mips/include/asm/stackframe.h4
-rw-r--r--arch/mips/jz4740/gpio.c52
-rw-r--r--arch/mips/kernel/ftrace.c39
-rw-r--r--arch/mips/kernel/i8259.c3
-rw-r--r--arch/mips/kernel/linux32.c7
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/signal.c3
-rw-r--r--arch/mips/kernel/traps.c16
-rw-r--r--arch/mips/kernel/vpe.c2
-rw-r--r--arch/mips/lantiq/irq.c6
-rw-r--r--arch/mips/lantiq/xway/ebu.c1
-rw-r--r--arch/mips/lantiq/xway/pmu.c1
-rw-r--r--arch/mips/lasat/interrupt.c1
-rw-r--r--arch/mips/loongson/fuloong-2e/irq.c1
-rw-r--r--arch/mips/loongson/lemote-2f/irq.c3
-rw-r--r--arch/mips/mm/mmap.c48
-rw-r--r--arch/mips/mm/tlbex.c6
-rw-r--r--arch/mips/mti-malta/malta-int.c6
-rw-r--r--arch/mips/netlogic/xlr/Makefile2
-rw-r--r--arch/mips/pci/pci-lantiq.c9
-rw-r--r--arch/mips/pci/pci-rc32434.c2
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_irq.c6
-rw-r--r--arch/mips/pnx8550/common/int.c2
-rw-r--r--arch/mips/sgi-ip22/ip22-int.c10
-rw-r--r--arch/mips/sni/rm200.c1
-rw-r--r--arch/mips/vr41xx/common/irq.c1
-rw-r--r--arch/sparc/include/asm/pgtsrmmu.h2
-rw-r--r--arch/sparc/kernel/pci.c3
-rw-r--r--arch/sparc/kernel/signal32.c21
-rw-r--r--arch/sparc/kernel/signal_32.c32
-rw-r--r--arch/sparc/kernel/signal_64.c32
-rw-r--r--arch/sparc/mm/leon_mm.c2
-rw-r--r--arch/tile/kernel/intvec_32.S2
-rw-r--r--arch/tile/lib/atomic_asm_32.S2
-rw-r--r--arch/x86/kernel/vsyscall_64.c2
-rw-r--r--arch/x86/mm/init.c3
-rw-r--r--arch/x86/pci/acpi.c11
-rw-r--r--arch/x86/platform/mrst/mrst.c22
-rw-r--r--crypto/ghash-generic.c6
-rw-r--r--drivers/gpio/gpio-omap.c2
-rw-r--r--drivers/gpio/gpio-pca953x.c1
-rw-r--r--drivers/gpu/drm/radeon/atom.c15
-rw-r--r--drivers/gpu/drm/radeon/atom.h1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c26
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--drivers/hwmon/w83627ehf.c20
-rw-r--r--drivers/ide/Kconfig24
-rw-r--r--drivers/input/tablet/wacom_wac.c7
-rw-r--r--drivers/iommu/intel-iommu.c75
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/dm-flakey.c4
-rw-r--r--drivers/md/dm-kcopyd.c1
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-table.c32
-rw-r--r--drivers/md/md.c22
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/multipath.c3
-rw-r--r--drivers/md/raid1.c3
-rw-r--r--drivers/md/raid10.c5
-rw-r--r--drivers/md/raid5.c6
-rw-r--r--drivers/media/video/v4l2-dev.c2
-rw-r--r--drivers/net/bnx2x/bnx2x.h18
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/bonding/bond_main.c7
-rw-r--r--drivers/net/can/mscan/mscan.c11
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mlx4/en_tx.c6
-rw-r--r--drivers/net/netconsole.c5
-rw-r--r--drivers/net/pptp.c22
-rw-r--r--drivers/net/r8169.c90
-rw-r--r--drivers/net/smsc911x.c2
-rw-r--r--drivers/net/tg3.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c9
-rw-r--r--drivers/staging/octeon/ethernet-rx.c3
-rw-r--r--drivers/tty/serial/lantiq.c4
-rw-r--r--fs/btrfs/ioctl.c11
-rw-r--r--fs/cifs/connect.c2
-rw-r--r--fs/xfs/xfs_buf_item.c3
-rw-r--r--fs/xfs/xfs_dquot_item.c10
-rw-r--r--fs/xfs/xfs_inode_item.c10
-rw-r--r--fs/xfs/xfs_linux.h2
-rw-r--r--fs/xfs/xfs_super.c13
-rw-r--r--fs/xfs/xfs_trans.h2
-rw-r--r--fs/xfs/xfs_trans_ail.c83
-rw-r--r--fs/xfs/xfs_trans_priv.h8
-rw-r--r--include/linux/device-mapper.h5
-rw-r--r--include/net/ip_vs.h1
-rw-r--r--include/net/udplite.h63
-rw-r--r--kernel/posix-cpu-timers.c7
-rw-r--r--kernel/sys.c2
-rw-r--r--mm/migrate.c8
-rw-r--r--net/bluetooth/l2cap_sock.c4
-rw-r--r--net/bluetooth/rfcomm/sock.c3
-rw-r--r--net/bluetooth/sco.c5
-rw-r--r--net/bridge/br_device.c3
-rw-r--r--net/bridge/br_if.c9
-rw-r--r--net/bridge/br_netlink.c1
-rw-r--r--net/bridge/br_private.h1
-rw-r--r--net/core/fib_rules.c5
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_ipv4.c11
-rw-r--r--net/ipv4/tcp_minisocks.c1
-rw-r--r--net/ipv6/af_inet6.c1
-rw-r--r--net/ipv6/tcp_ipv6.c8
-rw-r--r--net/l2tp/l2tp_core.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c133
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c4
-rw-r--r--net/x25/af_x25.c40
-rw-r--r--net/x25/x25_dev.c6
-rw-r--r--net/x25/x25_facilities.c10
-rw-r--r--net/x25/x25_in.c43
-rw-r--r--net/x25/x25_link.c3
-rw-r--r--net/x25/x25_subr.c14
-rw-r--r--security/security.c1
-rw-r--r--sound/pci/hda/hda_intel.c1
-rw-r--r--sound/pci/hda/patch_conexant.c1
153 files changed, 948 insertions, 596 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 854ed5ca7e3f..d6e6724446c8 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2706,10 +2706,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2706 functions are at fixed addresses, they make nice 2706 functions are at fixed addresses, they make nice
2707 targets for exploits that can control RIP. 2707 targets for exploits that can control RIP.
2708 2708
2709 emulate [default] Vsyscalls turn into traps and are 2709 emulate Vsyscalls turn into traps and are emulated
2710 emulated reasonably safely. 2710 reasonably safely.
2711 2711
2712 native Vsyscalls are native syscall instructions. 2712 native [default] Vsyscalls are native syscall
2713 instructions.
2713 This is a little bit faster than trapping 2714 This is a little bit faster than trapping
2714 and makes a few dynamic recompilers work 2715 and makes a few dynamic recompilers work
2715 better than they would in emulation mode. 2716 better than they would in emulation mode.
diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt
index 8ce7c30e7230..fe67b5c79f0f 100644
--- a/Documentation/networking/scaling.txt
+++ b/Documentation/networking/scaling.txt
@@ -27,7 +27,7 @@ applying a filter to each packet that assigns it to one of a small number
27of logical flows. Packets for each flow are steered to a separate receive 27of logical flows. Packets for each flow are steered to a separate receive
28queue, which in turn can be processed by separate CPUs. This mechanism is 28queue, which in turn can be processed by separate CPUs. This mechanism is
29generally known as “Receive-side Scaling” (RSS). The goal of RSS and 29generally known as “Receive-side Scaling” (RSS). The goal of RSS and
30the other scaling techniques to increase performance uniformly. 30the other scaling techniques is to increase performance uniformly.
31Multi-queue distribution can also be used for traffic prioritization, but 31Multi-queue distribution can also be used for traffic prioritization, but
32that is not the focus of these techniques. 32that is not the focus of these techniques.
33 33
@@ -186,10 +186,10 @@ are steered using plain RPS. Multiple table entries may point to the
186same CPU. Indeed, with many flows and few CPUs, it is very likely that 186same CPU. Indeed, with many flows and few CPUs, it is very likely that
187a single application thread handles flows with many different flow hashes. 187a single application thread handles flows with many different flow hashes.
188 188
189rps_sock_table is a global flow table that contains the *desired* CPU for 189rps_sock_flow_table is a global flow table that contains the *desired* CPU
190flows: the CPU that is currently processing the flow in userspace. Each 190for flows: the CPU that is currently processing the flow in userspace.
191table value is a CPU index that is updated during calls to recvmsg and 191Each table value is a CPU index that is updated during calls to recvmsg
192sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage() 192and sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage()
193and tcp_splice_read()). 193and tcp_splice_read()).
194 194
195When the scheduler moves a thread to a new CPU while it has outstanding 195When the scheduler moves a thread to a new CPU while it has outstanding
diff --git a/MAINTAINERS b/MAINTAINERS
index 046526a647c4..73369be4c241 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2460,7 +2460,7 @@ S: Supported
2460F: drivers/infiniband/hw/ehca/ 2460F: drivers/infiniband/hw/ehca/
2461 2461
2462EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER 2462EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
2463M: Breno Leitao <leitao@linux.vnet.ibm.com> 2463M: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
2464L: netdev@vger.kernel.org 2464L: netdev@vger.kernel.org
2465S: Maintained 2465S: Maintained
2466F: drivers/net/ehea/ 2466F: drivers/net/ehea/
@@ -3313,7 +3313,7 @@ M: David Woodhouse <dwmw2@infradead.org>
3313L: iommu@lists.linux-foundation.org 3313L: iommu@lists.linux-foundation.org
3314T: git git://git.infradead.org/iommu-2.6.git 3314T: git git://git.infradead.org/iommu-2.6.git
3315S: Supported 3315S: Supported
3316F: drivers/pci/intel-iommu.c 3316F: drivers/iommu/intel-iommu.c
3317F: include/linux/intel-iommu.h 3317F: include/linux/intel-iommu.h
3318 3318
3319INTEL IOP-ADMA DMA DRIVER 3319INTEL IOP-ADMA DMA DRIVER
@@ -6368,10 +6368,10 @@ F: net/ipv4/tcp_lp.c
6368 6368
6369TEGRA SUPPORT 6369TEGRA SUPPORT
6370M: Colin Cross <ccross@android.com> 6370M: Colin Cross <ccross@android.com>
6371M: Erik Gilling <konkers@android.com>
6372M: Olof Johansson <olof@lixom.net> 6371M: Olof Johansson <olof@lixom.net>
6372M: Stephen Warren <swarren@nvidia.com>
6373L: linux-tegra@vger.kernel.org 6373L: linux-tegra@vger.kernel.org
6374T: git git://android.git.kernel.org/kernel/tegra.git 6374T: git git://git.kernel.org/pub/scm/linux/kernel/git/olof/tegra.git
6375S: Supported 6375S: Supported
6376F: arch/arm/mach-tegra 6376F: arch/arm/mach-tegra
6377 6377
diff --git a/Makefile b/Makefile
index 31f967c31e7f..07bc92544e9c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 1 2PATCHLEVEL = 1
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc9 4EXTRAVERSION =
5NAME = "Divemaster Edition" 5NAME = "Divemaster Edition"
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index 7aa4262ada7a..197f81c77351 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -259,7 +259,6 @@ static void __init vic_disable(void __iomem *base)
259 writel(0, base + VIC_INT_SELECT); 259 writel(0, base + VIC_INT_SELECT);
260 writel(0, base + VIC_INT_ENABLE); 260 writel(0, base + VIC_INT_ENABLE);
261 writel(~0, base + VIC_INT_ENABLE_CLEAR); 261 writel(~0, base + VIC_INT_ENABLE_CLEAR);
262 writel(0, base + VIC_IRQ_STATUS);
263 writel(0, base + VIC_ITCR); 262 writel(0, base + VIC_ITCR);
264 writel(~0, base + VIC_INT_SOFT_CLEAR); 263 writel(~0, base + VIC_INT_SOFT_CLEAR);
265} 264}
diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h
index 080d74f8128d..ff66638ff54d 100644
--- a/arch/arm/include/asm/localtimer.h
+++ b/arch/arm/include/asm/localtimer.h
@@ -10,6 +10,8 @@
10#ifndef __ASM_ARM_LOCALTIMER_H 10#ifndef __ASM_ARM_LOCALTIMER_H
11#define __ASM_ARM_LOCALTIMER_H 11#define __ASM_ARM_LOCALTIMER_H
12 12
13#include <linux/errno.h>
14
13struct clock_event_device; 15struct clock_event_device;
14 16
15/* 17/*
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 4c851834f68e..6be3e2e4d838 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -321,8 +321,8 @@ static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
321 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, 321 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
322 [PERF_COUNT_HW_INSTRUCTIONS] = 322 [PERF_COUNT_HW_INSTRUCTIONS] =
323 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE, 323 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
324 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT, 324 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_DCACHE_ACCESS,
325 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS, 325 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_DCACHE_REFILL,
326 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, 326 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
327 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, 327 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
328 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, 328 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index 2028464cf5b9..f79b7d2a8ed4 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -193,7 +193,8 @@ static int __init omap2430_i2c_init(void)
193{ 193{
194 omap_register_i2c_bus(1, 100, sdp2430_i2c1_boardinfo, 194 omap_register_i2c_bus(1, 100, sdp2430_i2c1_boardinfo,
195 ARRAY_SIZE(sdp2430_i2c1_boardinfo)); 195 ARRAY_SIZE(sdp2430_i2c1_boardinfo));
196 omap2_pmic_init("twl4030", &sdp2430_twldata); 196 omap_pmic_init(2, 100, "twl4030", INT_24XX_SYS_NIRQ,
197 &sdp2430_twldata);
197 return 0; 198 return 0;
198} 199}
199 200
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index a9b45c76e1d3..097a42d81e59 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -137,8 +137,7 @@ static void omap4_hsmmc1_before_set_reg(struct device *dev, int slot,
137 */ 137 */
138 reg = omap4_ctrl_pad_readl(control_pbias_offset); 138 reg = omap4_ctrl_pad_readl(control_pbias_offset);
139 reg &= ~(OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK | 139 reg &= ~(OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK |
140 OMAP4_MMC1_PWRDNZ_MASK | 140 OMAP4_MMC1_PWRDNZ_MASK);
141 OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
142 omap4_ctrl_pad_writel(reg, control_pbias_offset); 141 omap4_ctrl_pad_writel(reg, control_pbias_offset);
143} 142}
144 143
@@ -156,8 +155,7 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
156 else 155 else
157 reg |= OMAP4_MMC1_PBIASLITE_VMODE_MASK; 156 reg |= OMAP4_MMC1_PBIASLITE_VMODE_MASK;
158 reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK | 157 reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK |
159 OMAP4_MMC1_PWRDNZ_MASK | 158 OMAP4_MMC1_PWRDNZ_MASK);
160 OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
161 omap4_ctrl_pad_writel(reg, control_pbias_offset); 159 omap4_ctrl_pad_writel(reg, control_pbias_offset);
162 160
163 timeout = jiffies + msecs_to_jiffies(5); 161 timeout = jiffies + msecs_to_jiffies(5);
@@ -171,16 +169,14 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
171 if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK) { 169 if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK) {
172 pr_err("Pbias Voltage is not same as LDO\n"); 170 pr_err("Pbias Voltage is not same as LDO\n");
173 /* Caution : On VMODE_ERROR Power Down MMC IO */ 171 /* Caution : On VMODE_ERROR Power Down MMC IO */
174 reg &= ~(OMAP4_MMC1_PWRDNZ_MASK | 172 reg &= ~(OMAP4_MMC1_PWRDNZ_MASK);
175 OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
176 omap4_ctrl_pad_writel(reg, control_pbias_offset); 173 omap4_ctrl_pad_writel(reg, control_pbias_offset);
177 } 174 }
178 } else { 175 } else {
179 reg = omap4_ctrl_pad_readl(control_pbias_offset); 176 reg = omap4_ctrl_pad_readl(control_pbias_offset);
180 reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK | 177 reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK |
181 OMAP4_MMC1_PWRDNZ_MASK | 178 OMAP4_MMC1_PWRDNZ_MASK |
182 OMAP4_MMC1_PBIASLITE_VMODE_MASK | 179 OMAP4_MMC1_PBIASLITE_VMODE_MASK);
183 OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
184 omap4_ctrl_pad_writel(reg, control_pbias_offset); 180 omap4_ctrl_pad_writel(reg, control_pbias_offset);
185 } 181 }
186} 182}
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index a65145b02a55..19e4dac62a8c 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -137,9 +137,6 @@ void __init usb_musb_init(struct omap_musb_board_data *musb_board_data)
137 musb_plat.mode = board_data->mode; 137 musb_plat.mode = board_data->mode;
138 musb_plat.extvbus = board_data->extvbus; 138 musb_plat.extvbus = board_data->extvbus;
139 139
140 if (cpu_is_omap44xx())
141 omap4430_phy_init(dev);
142
143 if (cpu_is_omap3517() || cpu_is_omap3505()) { 140 if (cpu_is_omap3517() || cpu_is_omap3505()) {
144 oh_name = "am35x_otg_hs"; 141 oh_name = "am35x_otg_hs";
145 name = "musb-am35x"; 142 name = "musb-am35x";
diff --git a/arch/arm/mach-s3c2410/s3c2410.c b/arch/arm/mach-s3c2410/s3c2410.c
index f1d3bd8f6f17..343a540d86a9 100644
--- a/arch/arm/mach-s3c2410/s3c2410.c
+++ b/arch/arm/mach-s3c2410/s3c2410.c
@@ -170,7 +170,9 @@ int __init s3c2410_init(void)
170{ 170{
171 printk("S3C2410: Initialising architecture\n"); 171 printk("S3C2410: Initialising architecture\n");
172 172
173#ifdef CONFIG_PM
173 register_syscore_ops(&s3c2410_pm_syscore_ops); 174 register_syscore_ops(&s3c2410_pm_syscore_ops);
175#endif
174 register_syscore_ops(&s3c24xx_irq_syscore_ops); 176 register_syscore_ops(&s3c24xx_irq_syscore_ops);
175 177
176 return sysdev_register(&s3c2410_sysdev); 178 return sysdev_register(&s3c2410_sysdev);
diff --git a/arch/arm/mach-s3c2412/s3c2412.c b/arch/arm/mach-s3c2412/s3c2412.c
index ef0958d3e5c6..57a1e01e4e50 100644
--- a/arch/arm/mach-s3c2412/s3c2412.c
+++ b/arch/arm/mach-s3c2412/s3c2412.c
@@ -245,7 +245,9 @@ int __init s3c2412_init(void)
245{ 245{
246 printk("S3C2412: Initialising architecture\n"); 246 printk("S3C2412: Initialising architecture\n");
247 247
248#ifdef CONFIG_PM
248 register_syscore_ops(&s3c2412_pm_syscore_ops); 249 register_syscore_ops(&s3c2412_pm_syscore_ops);
250#endif
249 register_syscore_ops(&s3c24xx_irq_syscore_ops); 251 register_syscore_ops(&s3c24xx_irq_syscore_ops);
250 252
251 return sysdev_register(&s3c2412_sysdev); 253 return sysdev_register(&s3c2412_sysdev);
diff --git a/arch/arm/mach-s3c2416/s3c2416.c b/arch/arm/mach-s3c2416/s3c2416.c
index 494ce913dc95..20b3fdfb3051 100644
--- a/arch/arm/mach-s3c2416/s3c2416.c
+++ b/arch/arm/mach-s3c2416/s3c2416.c
@@ -97,7 +97,9 @@ int __init s3c2416_init(void)
97 97
98 s3c_fb_setname("s3c2443-fb"); 98 s3c_fb_setname("s3c2443-fb");
99 99
100#ifdef CONFIG_PM
100 register_syscore_ops(&s3c2416_pm_syscore_ops); 101 register_syscore_ops(&s3c2416_pm_syscore_ops);
102#endif
101 register_syscore_ops(&s3c24xx_irq_syscore_ops); 103 register_syscore_ops(&s3c24xx_irq_syscore_ops);
102 104
103 return sysdev_register(&s3c2416_sysdev); 105 return sysdev_register(&s3c2416_sysdev);
diff --git a/arch/arm/mach-s3c2440/s3c2440.c b/arch/arm/mach-s3c2440/s3c2440.c
index ce99ff72838d..2270d3360216 100644
--- a/arch/arm/mach-s3c2440/s3c2440.c
+++ b/arch/arm/mach-s3c2440/s3c2440.c
@@ -55,7 +55,9 @@ int __init s3c2440_init(void)
55 55
56 /* register suspend/resume handlers */ 56 /* register suspend/resume handlers */
57 57
58#ifdef CONFIG_PM
58 register_syscore_ops(&s3c2410_pm_syscore_ops); 59 register_syscore_ops(&s3c2410_pm_syscore_ops);
60#endif
59 register_syscore_ops(&s3c244x_pm_syscore_ops); 61 register_syscore_ops(&s3c244x_pm_syscore_ops);
60 register_syscore_ops(&s3c24xx_irq_syscore_ops); 62 register_syscore_ops(&s3c24xx_irq_syscore_ops);
61 63
diff --git a/arch/arm/mach-s3c2440/s3c2442.c b/arch/arm/mach-s3c2440/s3c2442.c
index 9ad99f8016a1..6f2b65e6e068 100644
--- a/arch/arm/mach-s3c2440/s3c2442.c
+++ b/arch/arm/mach-s3c2440/s3c2442.c
@@ -169,7 +169,9 @@ int __init s3c2442_init(void)
169{ 169{
170 printk("S3C2442: Initialising architecture\n"); 170 printk("S3C2442: Initialising architecture\n");
171 171
172#ifdef CONFIG_PM
172 register_syscore_ops(&s3c2410_pm_syscore_ops); 173 register_syscore_ops(&s3c2410_pm_syscore_ops);
174#endif
173 register_syscore_ops(&s3c244x_pm_syscore_ops); 175 register_syscore_ops(&s3c244x_pm_syscore_ops);
174 register_syscore_ops(&s3c24xx_irq_syscore_ops); 176 register_syscore_ops(&s3c24xx_irq_syscore_ops);
175 177
diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c
index 0e1016a827ac..0e0fd4d889bd 100644
--- a/arch/arm/mach-tegra/cpu-tegra.c
+++ b/arch/arm/mach-tegra/cpu-tegra.c
@@ -32,7 +32,6 @@
32 32
33#include <asm/system.h> 33#include <asm/system.h>
34 34
35#include <mach/hardware.h>
36#include <mach/clk.h> 35#include <mach/clk.h>
37 36
38/* Frequency table index must be sequential starting at 0 */ 37/* Frequency table index must be sequential starting at 0 */
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 4210cb434dbc..a3e0c8692f0d 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -6,6 +6,7 @@ config UX500_SOC_COMMON
6 select ARM_GIC 6 select ARM_GIC
7 select HAS_MTU 7 select HAS_MTU
8 select ARM_ERRATA_753970 8 select ARM_ERRATA_753970
9 select ARM_ERRATA_754322
9 10
10menu "Ux500 SoC" 11menu "Ux500 SoC"
11 12
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index cc7e2d8be9aa..f8037ba338ac 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -496,6 +496,13 @@ static void __init free_unused_memmap(struct meminfo *mi)
496 */ 496 */
497 bank_start = min(bank_start, 497 bank_start = min(bank_start,
498 ALIGN(prev_bank_end, PAGES_PER_SECTION)); 498 ALIGN(prev_bank_end, PAGES_PER_SECTION));
499#else
500 /*
501 * Align down here since the VM subsystem insists that the
502 * memmap entries are valid from the bank start aligned to
503 * MAX_ORDER_NR_PAGES.
504 */
505 bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);
499#endif 506#endif
500 /* 507 /*
501 * If we had a previous bank, and there is a space 508 * If we had a previous bank, and there is a space
diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-s5p/irq-gpioint.c
index f88216d23991..c65eb791d1bb 100644
--- a/arch/arm/plat-s5p/irq-gpioint.c
+++ b/arch/arm/plat-s5p/irq-gpioint.c
@@ -163,9 +163,9 @@ static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip)
163 ct->chip.irq_mask = irq_gc_mask_set_bit; 163 ct->chip.irq_mask = irq_gc_mask_set_bit;
164 ct->chip.irq_unmask = irq_gc_mask_clr_bit; 164 ct->chip.irq_unmask = irq_gc_mask_clr_bit;
165 ct->chip.irq_set_type = s5p_gpioint_set_type, 165 ct->chip.irq_set_type = s5p_gpioint_set_type,
166 ct->regs.ack = PEND_OFFSET + REG_OFFSET(chip->group); 166 ct->regs.ack = PEND_OFFSET + REG_OFFSET(group - bank->start);
167 ct->regs.mask = MASK_OFFSET + REG_OFFSET(chip->group); 167 ct->regs.mask = MASK_OFFSET + REG_OFFSET(group - bank->start);
168 ct->regs.type = CON_OFFSET + REG_OFFSET(chip->group); 168 ct->regs.type = CON_OFFSET + REG_OFFSET(group - bank->start);
169 irq_setup_generic_chip(gc, IRQ_MSK(chip->chip.ngpio), 169 irq_setup_generic_chip(gc, IRQ_MSK(chip->chip.ngpio),
170 IRQ_GC_INIT_MASK_CACHE, 170 IRQ_GC_INIT_MASK_CACHE,
171 IRQ_NOREQUEST | IRQ_NOPROBE, 0); 171 IRQ_NOREQUEST | IRQ_NOPROBE, 0);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 177cdaf83564..b122adc8bdbb 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -24,6 +24,7 @@ config MIPS
24 select GENERIC_IRQ_PROBE 24 select GENERIC_IRQ_PROBE
25 select GENERIC_IRQ_SHOW 25 select GENERIC_IRQ_SHOW
26 select HAVE_ARCH_JUMP_LABEL 26 select HAVE_ARCH_JUMP_LABEL
27 select IRQ_FORCED_THREADING
27 28
28menu "Machine selection" 29menu "Machine selection"
29 30
@@ -722,6 +723,7 @@ config CAVIUM_OCTEON_SIMULATOR
722 select SYS_SUPPORTS_HIGHMEM 723 select SYS_SUPPORTS_HIGHMEM
723 select SYS_SUPPORTS_HOTPLUG_CPU 724 select SYS_SUPPORTS_HOTPLUG_CPU
724 select SYS_HAS_CPU_CAVIUM_OCTEON 725 select SYS_HAS_CPU_CAVIUM_OCTEON
726 select HOLES_IN_ZONE
725 help 727 help
726 The Octeon simulator is software performance model of the Cavium 728 The Octeon simulator is software performance model of the Cavium
727 Octeon Processor. It supports simulating Octeon processors on x86 729 Octeon Processor. It supports simulating Octeon processors on x86
@@ -744,6 +746,7 @@ config CAVIUM_OCTEON_REFERENCE_BOARD
744 select ZONE_DMA32 746 select ZONE_DMA32
745 select USB_ARCH_HAS_OHCI 747 select USB_ARCH_HAS_OHCI
746 select USB_ARCH_HAS_EHCI 748 select USB_ARCH_HAS_EHCI
749 select HOLES_IN_ZONE
747 help 750 help
748 This option supports all of the Octeon reference boards from Cavium 751 This option supports all of the Octeon reference boards from Cavium
749 Networks. It builds a kernel that dynamically determines the Octeon 752 Networks. It builds a kernel that dynamically determines the Octeon
@@ -973,6 +976,9 @@ config ISA_DMA_API
973config GENERIC_GPIO 976config GENERIC_GPIO
974 bool 977 bool
975 978
979config HOLES_IN_ZONE
980 bool
981
976# 982#
977# Endianess selection. Sufficiently obscure so many users don't know what to 983# Endianess selection. Sufficiently obscure so many users don't know what to
978# answer,so we try hard to limit the available choices. Also the use of a 984# answer,so we try hard to limit the available choices. Also the use of a
diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c
index 3b2c18b14341..f72c48d4804c 100644
--- a/arch/mips/alchemy/common/platform.c
+++ b/arch/mips/alchemy/common/platform.c
@@ -492,7 +492,7 @@ static void __init alchemy_setup_macs(int ctype)
492 memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6); 492 memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6);
493 493
494 ret = platform_device_register(&au1xxx_eth0_device); 494 ret = platform_device_register(&au1xxx_eth0_device);
495 if (!ret) 495 if (ret)
496 printk(KERN_INFO "Alchemy: failed to register MAC0\n"); 496 printk(KERN_INFO "Alchemy: failed to register MAC0\n");
497 497
498 498
diff --git a/arch/mips/alchemy/common/power.c b/arch/mips/alchemy/common/power.c
index 647e518c90bc..b86324a42601 100644
--- a/arch/mips/alchemy/common/power.c
+++ b/arch/mips/alchemy/common/power.c
@@ -158,15 +158,21 @@ static void restore_core_regs(void)
158 158
159void au_sleep(void) 159void au_sleep(void)
160{ 160{
161 int cpuid = alchemy_get_cputype(); 161 save_core_regs();
162 if (cpuid != ALCHEMY_CPU_UNKNOWN) { 162
163 save_core_regs(); 163 switch (alchemy_get_cputype()) {
164 if (cpuid <= ALCHEMY_CPU_AU1500) 164 case ALCHEMY_CPU_AU1000:
165 alchemy_sleep_au1000(); 165 case ALCHEMY_CPU_AU1500:
166 else if (cpuid <= ALCHEMY_CPU_AU1200) 166 case ALCHEMY_CPU_AU1100:
167 alchemy_sleep_au1550(); 167 alchemy_sleep_au1000();
168 restore_core_regs(); 168 break;
169 case ALCHEMY_CPU_AU1550:
170 case ALCHEMY_CPU_AU1200:
171 alchemy_sleep_au1550();
172 break;
169 } 173 }
174
175 restore_core_regs();
170} 176}
171 177
172#endif /* CONFIG_PM */ 178#endif /* CONFIG_PM */
diff --git a/arch/mips/alchemy/devboards/bcsr.c b/arch/mips/alchemy/devboards/bcsr.c
index 596ad00e7f05..463d2c4d9441 100644
--- a/arch/mips/alchemy/devboards/bcsr.c
+++ b/arch/mips/alchemy/devboards/bcsr.c
@@ -89,8 +89,12 @@ static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d)
89{ 89{
90 unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT); 90 unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT);
91 91
92 disable_irq_nosync(irq);
93
92 for ( ; bisr; bisr &= bisr - 1) 94 for ( ; bisr; bisr &= bisr - 1)
93 generic_handle_irq(bcsr_csc_base + __ffs(bisr)); 95 generic_handle_irq(bcsr_csc_base + __ffs(bisr));
96
97 enable_irq(irq);
94} 98}
95 99
96/* NOTE: both the enable and mask bits must be cleared, otherwise the 100/* NOTE: both the enable and mask bits must be cleared, otherwise the
diff --git a/arch/mips/alchemy/devboards/db1200/setup.c b/arch/mips/alchemy/devboards/db1200/setup.c
index 1dac4f27d334..4a8980027ecf 100644
--- a/arch/mips/alchemy/devboards/db1200/setup.c
+++ b/arch/mips/alchemy/devboards/db1200/setup.c
@@ -23,13 +23,6 @@ void __init board_setup(void)
23 unsigned long freq0, clksrc, div, pfc; 23 unsigned long freq0, clksrc, div, pfc;
24 unsigned short whoami; 24 unsigned short whoami;
25 25
26 /* Set Config[OD] (disable overlapping bus transaction):
27 * This gets rid of a _lot_ of spurious interrupts (especially
28 * wrt. IDE); but incurs ~10% performance hit in some
29 * cpu-bound applications.
30 */
31 set_c0_config(1 << 19);
32
33 bcsr_init(DB1200_BCSR_PHYS_ADDR, 26 bcsr_init(DB1200_BCSR_PHYS_ADDR,
34 DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS); 27 DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS);
35 28
diff --git a/arch/mips/ar7/irq.c b/arch/mips/ar7/irq.c
index 03db3daadbd8..88c4babfdb5d 100644
--- a/arch/mips/ar7/irq.c
+++ b/arch/mips/ar7/irq.c
@@ -98,7 +98,8 @@ static struct irq_chip ar7_sec_irq_type = {
98 98
99static struct irqaction ar7_cascade_action = { 99static struct irqaction ar7_cascade_action = {
100 .handler = no_action, 100 .handler = no_action,
101 .name = "AR7 cascade interrupt" 101 .name = "AR7 cascade interrupt",
102 .flags = IRQF_NO_THREAD,
102}; 103};
103 104
104static void __init ar7_irq_init(int base) 105static void __init ar7_irq_init(int base)
diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c
index cea6021cb8d7..162e11b4ed75 100644
--- a/arch/mips/bcm63xx/irq.c
+++ b/arch/mips/bcm63xx/irq.c
@@ -222,6 +222,7 @@ static struct irq_chip bcm63xx_external_irq_chip = {
222static struct irqaction cpu_ip2_cascade_action = { 222static struct irqaction cpu_ip2_cascade_action = {
223 .handler = no_action, 223 .handler = no_action,
224 .name = "cascade_ip2", 224 .name = "cascade_ip2",
225 .flags = IRQF_NO_THREAD,
225}; 226};
226 227
227void __init arch_init_irq(void) 228void __init arch_init_irq(void)
diff --git a/arch/mips/cobalt/irq.c b/arch/mips/cobalt/irq.c
index cb9bf820fe53..965c777d3561 100644
--- a/arch/mips/cobalt/irq.c
+++ b/arch/mips/cobalt/irq.c
@@ -48,6 +48,7 @@ asmlinkage void plat_irq_dispatch(void)
48static struct irqaction cascade = { 48static struct irqaction cascade = {
49 .handler = no_action, 49 .handler = no_action,
50 .name = "cascade", 50 .name = "cascade",
51 .flags = IRQF_NO_THREAD,
51}; 52};
52 53
53void __init arch_init_irq(void) 54void __init arch_init_irq(void)
diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c
index fa45e924be05..f7b7ba6d5c45 100644
--- a/arch/mips/dec/setup.c
+++ b/arch/mips/dec/setup.c
@@ -101,20 +101,24 @@ int cpu_fpu_mask = DEC_CPU_IRQ_MASK(DEC_CPU_INR_FPU);
101static struct irqaction ioirq = { 101static struct irqaction ioirq = {
102 .handler = no_action, 102 .handler = no_action,
103 .name = "cascade", 103 .name = "cascade",
104 .flags = IRQF_NO_THREAD,
104}; 105};
105static struct irqaction fpuirq = { 106static struct irqaction fpuirq = {
106 .handler = no_action, 107 .handler = no_action,
107 .name = "fpu", 108 .name = "fpu",
109 .flags = IRQF_NO_THREAD,
108}; 110};
109 111
110static struct irqaction busirq = { 112static struct irqaction busirq = {
111 .flags = IRQF_DISABLED, 113 .flags = IRQF_DISABLED,
112 .name = "bus error", 114 .name = "bus error",
115 .flags = IRQF_NO_THREAD,
113}; 116};
114 117
115static struct irqaction haltirq = { 118static struct irqaction haltirq = {
116 .handler = dec_intr_halt, 119 .handler = dec_intr_halt,
117 .name = "halt", 120 .name = "halt",
121 .flags = IRQF_NO_THREAD,
118}; 122};
119 123
120 124
diff --git a/arch/mips/emma/markeins/irq.c b/arch/mips/emma/markeins/irq.c
index 3dbd7a5a6ad3..7798887a1288 100644
--- a/arch/mips/emma/markeins/irq.c
+++ b/arch/mips/emma/markeins/irq.c
@@ -169,7 +169,7 @@ void emma2rh_gpio_irq_init(void)
169 169
170static struct irqaction irq_cascade = { 170static struct irqaction irq_cascade = {
171 .handler = no_action, 171 .handler = no_action,
172 .flags = 0, 172 .flags = IRQF_NO_THREAD,
173 .name = "cascade", 173 .name = "cascade",
174 .dev_id = NULL, 174 .dev_id = NULL,
175 .next = NULL, 175 .next = NULL,
diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
index 0d5a42b5f47a..a58addb98cfd 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
@@ -54,7 +54,6 @@
54#define cpu_has_mips_r2_exec_hazard 0 54#define cpu_has_mips_r2_exec_hazard 0
55#define cpu_has_dsp 0 55#define cpu_has_dsp 0
56#define cpu_has_mipsmt 0 56#define cpu_has_mipsmt 0
57#define cpu_has_userlocal 0
58#define cpu_has_vint 0 57#define cpu_has_vint 0
59#define cpu_has_veic 0 58#define cpu_has_veic 0
60#define cpu_hwrena_impl_bits 0xc0000000 59#define cpu_hwrena_impl_bits 0xc0000000
diff --git a/arch/mips/include/asm/mach-powertv/dma-coherence.h b/arch/mips/include/asm/mach-powertv/dma-coherence.h
index 62c094085947..35371641575d 100644
--- a/arch/mips/include/asm/mach-powertv/dma-coherence.h
+++ b/arch/mips/include/asm/mach-powertv/dma-coherence.h
@@ -13,7 +13,6 @@
13#define __ASM_MACH_POWERTV_DMA_COHERENCE_H 13#define __ASM_MACH_POWERTV_DMA_COHERENCE_H
14 14
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/version.h>
17#include <linux/device.h> 16#include <linux/device.h>
18#include <asm/mach-powertv/asic.h> 17#include <asm/mach-powertv/asic.h>
19 18
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index b4ba2449444b..cb41af5f3406 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -195,9 +195,9 @@
195 * to cover the pipeline delay. 195 * to cover the pipeline delay.
196 */ 196 */
197 .set mips32 197 .set mips32
198 mfc0 v1, CP0_TCSTATUS 198 mfc0 k0, CP0_TCSTATUS
199 .set mips0 199 .set mips0
200 LONG_S v1, PT_TCSTATUS(sp) 200 LONG_S k0, PT_TCSTATUS(sp)
201#endif /* CONFIG_MIPS_MT_SMTC */ 201#endif /* CONFIG_MIPS_MT_SMTC */
202 LONG_S $4, PT_R4(sp) 202 LONG_S $4, PT_R4(sp)
203 LONG_S $5, PT_R5(sp) 203 LONG_S $5, PT_R5(sp)
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c
index 73031f7fc827..4397972949fa 100644
--- a/arch/mips/jz4740/gpio.c
+++ b/arch/mips/jz4740/gpio.c
@@ -18,7 +18,7 @@
18#include <linux/init.h> 18#include <linux/init.h>
19 19
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/sysdev.h> 21#include <linux/syscore_ops.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/gpio.h> 23#include <linux/gpio.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
@@ -86,7 +86,6 @@ struct jz_gpio_chip {
86 spinlock_t lock; 86 spinlock_t lock;
87 87
88 struct gpio_chip gpio_chip; 88 struct gpio_chip gpio_chip;
89 struct sys_device sysdev;
90}; 89};
91 90
92static struct jz_gpio_chip jz4740_gpio_chips[]; 91static struct jz_gpio_chip jz4740_gpio_chips[];
@@ -459,49 +458,47 @@ static struct jz_gpio_chip jz4740_gpio_chips[] = {
459 JZ4740_GPIO_CHIP(D), 458 JZ4740_GPIO_CHIP(D),
460}; 459};
461 460
462static inline struct jz_gpio_chip *sysdev_to_chip(struct sys_device *dev) 461static void jz4740_gpio_suspend_chip(struct jz_gpio_chip *chip)
463{ 462{
464 return container_of(dev, struct jz_gpio_chip, sysdev); 463 chip->suspend_mask = readl(chip->base + JZ_REG_GPIO_MASK);
464 writel(~(chip->wakeup), chip->base + JZ_REG_GPIO_MASK_SET);
465 writel(chip->wakeup, chip->base + JZ_REG_GPIO_MASK_CLEAR);
465} 466}
466 467
467static int jz4740_gpio_suspend(struct sys_device *dev, pm_message_t state) 468static int jz4740_gpio_suspend(void)
468{ 469{
469 struct jz_gpio_chip *chip = sysdev_to_chip(dev); 470 int i;
470 471
471 chip->suspend_mask = readl(chip->base + JZ_REG_GPIO_MASK); 472 for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); i++)
472 writel(~(chip->wakeup), chip->base + JZ_REG_GPIO_MASK_SET); 473 jz4740_gpio_suspend_chip(&jz4740_gpio_chips[i]);
473 writel(chip->wakeup, chip->base + JZ_REG_GPIO_MASK_CLEAR);
474 474
475 return 0; 475 return 0;
476} 476}
477 477
478static int jz4740_gpio_resume(struct sys_device *dev) 478static void jz4740_gpio_resume_chip(struct jz_gpio_chip *chip)
479{ 479{
480 struct jz_gpio_chip *chip = sysdev_to_chip(dev);
481 uint32_t mask = chip->suspend_mask; 480 uint32_t mask = chip->suspend_mask;
482 481
483 writel(~mask, chip->base + JZ_REG_GPIO_MASK_CLEAR); 482 writel(~mask, chip->base + JZ_REG_GPIO_MASK_CLEAR);
484 writel(mask, chip->base + JZ_REG_GPIO_MASK_SET); 483 writel(mask, chip->base + JZ_REG_GPIO_MASK_SET);
484}
485 485
486 return 0; 486static void jz4740_gpio_resume(void)
487{
488 int i;
489
490 for (i = ARRAY_SIZE(jz4740_gpio_chips) - 1; i >= 0 ; i--)
491 jz4740_gpio_resume_chip(&jz4740_gpio_chips[i]);
487} 492}
488 493
489static struct sysdev_class jz4740_gpio_sysdev_class = { 494static struct syscore_ops jz4740_gpio_syscore_ops = {
490 .name = "gpio",
491 .suspend = jz4740_gpio_suspend, 495 .suspend = jz4740_gpio_suspend,
492 .resume = jz4740_gpio_resume, 496 .resume = jz4740_gpio_resume,
493}; 497};
494 498
495static int jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id) 499static void jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id)
496{ 500{
497 int ret, irq; 501 int irq;
498
499 chip->sysdev.id = id;
500 chip->sysdev.cls = &jz4740_gpio_sysdev_class;
501 ret = sysdev_register(&chip->sysdev);
502
503 if (ret)
504 return ret;
505 502
506 spin_lock_init(&chip->lock); 503 spin_lock_init(&chip->lock);
507 504
@@ -519,22 +516,17 @@ static int jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id)
519 irq_set_chip_and_handler(irq, &jz_gpio_irq_chip, 516 irq_set_chip_and_handler(irq, &jz_gpio_irq_chip,
520 handle_level_irq); 517 handle_level_irq);
521 } 518 }
522
523 return 0;
524} 519}
525 520
526static int __init jz4740_gpio_init(void) 521static int __init jz4740_gpio_init(void)
527{ 522{
528 unsigned int i; 523 unsigned int i;
529 int ret;
530
531 ret = sysdev_class_register(&jz4740_gpio_sysdev_class);
532 if (ret)
533 return ret;
534 524
535 for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i) 525 for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i)
536 jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i); 526 jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i);
537 527
528 register_syscore_ops(&jz4740_gpio_syscore_ops);
529
538 printk(KERN_INFO "JZ4740 GPIO initialized\n"); 530 printk(KERN_INFO "JZ4740 GPIO initialized\n");
539 531
540 return 0; 532 return 0;
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index feb8021a305f..6a2d758dd8e9 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -19,6 +19,26 @@
19 19
20#include <asm-generic/sections.h> 20#include <asm-generic/sections.h>
21 21
22#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
23#define MCOUNT_OFFSET_INSNS 5
24#else
25#define MCOUNT_OFFSET_INSNS 4
26#endif
27
28/*
29 * Check if the address is in kernel space
30 *
31 * Clone core_kernel_text() from kernel/extable.c, but doesn't call
32 * init_kernel_text() for Ftrace doesn't trace functions in init sections.
33 */
34static inline int in_kernel_space(unsigned long ip)
35{
36 if (ip >= (unsigned long)_stext &&
37 ip <= (unsigned long)_etext)
38 return 1;
39 return 0;
40}
41
22#ifdef CONFIG_DYNAMIC_FTRACE 42#ifdef CONFIG_DYNAMIC_FTRACE
23 43
24#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ 44#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
@@ -54,20 +74,6 @@ static inline void ftrace_dyn_arch_init_insns(void)
54#endif 74#endif
55} 75}
56 76
57/*
58 * Check if the address is in kernel space
59 *
60 * Clone core_kernel_text() from kernel/extable.c, but doesn't call
61 * init_kernel_text() for Ftrace doesn't trace functions in init sections.
62 */
63static inline int in_kernel_space(unsigned long ip)
64{
65 if (ip >= (unsigned long)_stext &&
66 ip <= (unsigned long)_etext)
67 return 1;
68 return 0;
69}
70
71static int ftrace_modify_code(unsigned long ip, unsigned int new_code) 77static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
72{ 78{
73 int faulted; 79 int faulted;
@@ -112,11 +118,6 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
112 * 1: offset = 4 instructions 118 * 1: offset = 4 instructions
113 */ 119 */
114 120
115#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
116#define MCOUNT_OFFSET_INSNS 5
117#else
118#define MCOUNT_OFFSET_INSNS 4
119#endif
120#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) 121#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
121 122
122int ftrace_make_nop(struct module *mod, 123int ftrace_make_nop(struct module *mod,
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 5c74eb797f08..32b397b646ee 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -229,7 +229,7 @@ static void i8259A_shutdown(void)
229 */ 229 */
230 if (i8259A_auto_eoi >= 0) { 230 if (i8259A_auto_eoi >= 0) {
231 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ 231 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
232 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ 232 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
233 } 233 }
234} 234}
235 235
@@ -295,6 +295,7 @@ static void init_8259A(int auto_eoi)
295static struct irqaction irq2 = { 295static struct irqaction irq2 = {
296 .handler = no_action, 296 .handler = no_action,
297 .name = "cascade", 297 .name = "cascade",
298 .flags = IRQF_NO_THREAD,
298}; 299};
299 300
300static struct resource pic1_io_resource = { 301static struct resource pic1_io_resource = {
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 876a75cc376f..922a554cd108 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -349,3 +349,10 @@ SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
349 return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4), 349 return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
350 dfd, pathname); 350 dfd, pathname);
351} 351}
352
353SYSCALL_DEFINE6(32_futex, u32 __user *, uaddr, int, op, u32, val,
354 struct compat_timespec __user *, utime, u32 __user *, uaddr2,
355 u32, val3)
356{
357 return compat_sys_futex(uaddr, op, val, utime, uaddr2, val3);
358}
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index f9296e894e46..6de1f598346e 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -315,7 +315,7 @@ EXPORT(sysn32_call_table)
315 PTR sys_fremovexattr 315 PTR sys_fremovexattr
316 PTR sys_tkill 316 PTR sys_tkill
317 PTR sys_ni_syscall 317 PTR sys_ni_syscall
318 PTR compat_sys_futex 318 PTR sys_32_futex
319 PTR compat_sys_sched_setaffinity /* 6195 */ 319 PTR compat_sys_sched_setaffinity /* 6195 */
320 PTR compat_sys_sched_getaffinity 320 PTR compat_sys_sched_getaffinity
321 PTR sys_cacheflush 321 PTR sys_cacheflush
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 4d7c9827706f..1d813169e453 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -441,7 +441,7 @@ sys_call_table:
441 PTR sys_fremovexattr /* 4235 */ 441 PTR sys_fremovexattr /* 4235 */
442 PTR sys_tkill 442 PTR sys_tkill
443 PTR sys_sendfile64 443 PTR sys_sendfile64
444 PTR compat_sys_futex 444 PTR sys_32_futex
445 PTR compat_sys_sched_setaffinity 445 PTR compat_sys_sched_setaffinity
446 PTR compat_sys_sched_getaffinity /* 4240 */ 446 PTR compat_sys_sched_getaffinity /* 4240 */
447 PTR compat_sys_io_setup 447 PTR compat_sys_io_setup
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index dbbe0ce48d89..f8524003676a 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -8,6 +8,7 @@
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 */ 9 */
10#include <linux/cache.h> 10#include <linux/cache.h>
11#include <linux/irqflags.h>
11#include <linux/sched.h> 12#include <linux/sched.h>
12#include <linux/mm.h> 13#include <linux/mm.h>
13#include <linux/personality.h> 14#include <linux/personality.h>
@@ -658,6 +659,8 @@ static void do_signal(struct pt_regs *regs)
658asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, 659asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
659 __u32 thread_info_flags) 660 __u32 thread_info_flags)
660{ 661{
662 local_irq_enable();
663
661 /* deal with pending signal delivery */ 664 /* deal with pending signal delivery */
662 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) 665 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
663 do_signal(regs); 666 do_signal(regs);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index b7517e3abc85..cbea618af0b4 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -14,6 +14,7 @@
14#include <linux/bug.h> 14#include <linux/bug.h>
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/kernel.h>
17#include <linux/mm.h> 18#include <linux/mm.h>
18#include <linux/module.h> 19#include <linux/module.h>
19#include <linux/sched.h> 20#include <linux/sched.h>
@@ -364,21 +365,26 @@ static int regs_to_trapnr(struct pt_regs *regs)
364 return (regs->cp0_cause >> 2) & 0x1f; 365 return (regs->cp0_cause >> 2) & 0x1f;
365} 366}
366 367
367static DEFINE_SPINLOCK(die_lock); 368static DEFINE_RAW_SPINLOCK(die_lock);
368 369
369void __noreturn die(const char *str, struct pt_regs *regs) 370void __noreturn die(const char *str, struct pt_regs *regs)
370{ 371{
371 static int die_counter; 372 static int die_counter;
372 int sig = SIGSEGV; 373 int sig = SIGSEGV;
373#ifdef CONFIG_MIPS_MT_SMTC 374#ifdef CONFIG_MIPS_MT_SMTC
374 unsigned long dvpret = dvpe(); 375 unsigned long dvpret;
375#endif /* CONFIG_MIPS_MT_SMTC */ 376#endif /* CONFIG_MIPS_MT_SMTC */
376 377
378 oops_enter();
379
377 if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) 380 if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
378 sig = 0; 381 sig = 0;
379 382
380 console_verbose(); 383 console_verbose();
381 spin_lock_irq(&die_lock); 384 raw_spin_lock_irq(&die_lock);
385#ifdef CONFIG_MIPS_MT_SMTC
386 dvpret = dvpe();
387#endif /* CONFIG_MIPS_MT_SMTC */
382 bust_spinlocks(1); 388 bust_spinlocks(1);
383#ifdef CONFIG_MIPS_MT_SMTC 389#ifdef CONFIG_MIPS_MT_SMTC
384 mips_mt_regdump(dvpret); 390 mips_mt_regdump(dvpret);
@@ -387,7 +393,9 @@ void __noreturn die(const char *str, struct pt_regs *regs)
387 printk("%s[#%d]:\n", str, ++die_counter); 393 printk("%s[#%d]:\n", str, ++die_counter);
388 show_registers(regs); 394 show_registers(regs);
389 add_taint(TAINT_DIE); 395 add_taint(TAINT_DIE);
390 spin_unlock_irq(&die_lock); 396 raw_spin_unlock_irq(&die_lock);
397
398 oops_exit();
391 399
392 if (in_interrupt()) 400 if (in_interrupt())
393 panic("Fatal exception in interrupt"); 401 panic("Fatal exception in interrupt");
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 2cd50ad0d5c6..3efcb065f78a 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -192,7 +192,7 @@ static struct tc *get_tc(int index)
192 } 192 }
193 spin_unlock(&vpecontrol.tc_list_lock); 193 spin_unlock(&vpecontrol.tc_list_lock);
194 194
195 return NULL; 195 return res;
196} 196}
197 197
198/* allocate a vpe and associate it with this minor (or index) */ 198/* allocate a vpe and associate it with this minor (or index) */
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index fc89795cafdb..f9737bb3c5ab 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -123,11 +123,10 @@ void ltq_enable_irq(struct irq_data *d)
123static unsigned int ltq_startup_eiu_irq(struct irq_data *d) 123static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
124{ 124{
125 int i; 125 int i;
126 int irq_nr = d->irq - INT_NUM_IRQ0;
127 126
128 ltq_enable_irq(d); 127 ltq_enable_irq(d);
129 for (i = 0; i < MAX_EIU; i++) { 128 for (i = 0; i < MAX_EIU; i++) {
130 if (irq_nr == ltq_eiu_irq[i]) { 129 if (d->irq == ltq_eiu_irq[i]) {
131 /* low level - we should really handle set_type */ 130 /* low level - we should really handle set_type */
132 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) | 131 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
133 (0x6 << (i * 4)), LTQ_EIU_EXIN_C); 132 (0x6 << (i * 4)), LTQ_EIU_EXIN_C);
@@ -147,11 +146,10 @@ static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
147static void ltq_shutdown_eiu_irq(struct irq_data *d) 146static void ltq_shutdown_eiu_irq(struct irq_data *d)
148{ 147{
149 int i; 148 int i;
150 int irq_nr = d->irq - INT_NUM_IRQ0;
151 149
152 ltq_disable_irq(d); 150 ltq_disable_irq(d);
153 for (i = 0; i < MAX_EIU; i++) { 151 for (i = 0; i < MAX_EIU; i++) {
154 if (irq_nr == ltq_eiu_irq[i]) { 152 if (d->irq == ltq_eiu_irq[i]) {
155 /* disable */ 153 /* disable */
156 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i), 154 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i),
157 LTQ_EIU_EXIN_INEN); 155 LTQ_EIU_EXIN_INEN);
diff --git a/arch/mips/lantiq/xway/ebu.c b/arch/mips/lantiq/xway/ebu.c
index 66eb52fa50a1..033b3184c7a7 100644
--- a/arch/mips/lantiq/xway/ebu.c
+++ b/arch/mips/lantiq/xway/ebu.c
@@ -10,7 +10,6 @@
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/version.h>
14#include <linux/ioport.h> 13#include <linux/ioport.h>
15 14
16#include <lantiq_soc.h> 15#include <lantiq_soc.h>
diff --git a/arch/mips/lantiq/xway/pmu.c b/arch/mips/lantiq/xway/pmu.c
index 9d69f01e352b..39f0d2641cbf 100644
--- a/arch/mips/lantiq/xway/pmu.c
+++ b/arch/mips/lantiq/xway/pmu.c
@@ -8,7 +8,6 @@
8 8
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/version.h>
12#include <linux/ioport.h> 11#include <linux/ioport.h>
13 12
14#include <lantiq_soc.h> 13#include <lantiq_soc.h>
diff --git a/arch/mips/lasat/interrupt.c b/arch/mips/lasat/interrupt.c
index de4c165515d7..d608b6ef0edd 100644
--- a/arch/mips/lasat/interrupt.c
+++ b/arch/mips/lasat/interrupt.c
@@ -105,6 +105,7 @@ asmlinkage void plat_irq_dispatch(void)
105static struct irqaction cascade = { 105static struct irqaction cascade = {
106 .handler = no_action, 106 .handler = no_action,
107 .name = "cascade", 107 .name = "cascade",
108 .flags = IRQF_NO_THREAD,
108}; 109};
109 110
110void __init arch_init_irq(void) 111void __init arch_init_irq(void)
diff --git a/arch/mips/loongson/fuloong-2e/irq.c b/arch/mips/loongson/fuloong-2e/irq.c
index d61a04222b87..3cf1fef29f0e 100644
--- a/arch/mips/loongson/fuloong-2e/irq.c
+++ b/arch/mips/loongson/fuloong-2e/irq.c
@@ -42,6 +42,7 @@ asmlinkage void mach_irq_dispatch(unsigned int pending)
42static struct irqaction cascade_irqaction = { 42static struct irqaction cascade_irqaction = {
43 .handler = no_action, 43 .handler = no_action,
44 .name = "cascade", 44 .name = "cascade",
45 .flags = IRQF_NO_THREAD,
45}; 46};
46 47
47void __init mach_init_irq(void) 48void __init mach_init_irq(void)
diff --git a/arch/mips/loongson/lemote-2f/irq.c b/arch/mips/loongson/lemote-2f/irq.c
index 081db102bb98..14b081841b6b 100644
--- a/arch/mips/loongson/lemote-2f/irq.c
+++ b/arch/mips/loongson/lemote-2f/irq.c
@@ -96,12 +96,13 @@ static irqreturn_t ip6_action(int cpl, void *dev_id)
96struct irqaction ip6_irqaction = { 96struct irqaction ip6_irqaction = {
97 .handler = ip6_action, 97 .handler = ip6_action,
98 .name = "cascade", 98 .name = "cascade",
99 .flags = IRQF_SHARED, 99 .flags = IRQF_SHARED | IRQF_NO_THREAD,
100}; 100};
101 101
102struct irqaction cascade_irqaction = { 102struct irqaction cascade_irqaction = {
103 .handler = no_action, 103 .handler = no_action,
104 .name = "cascade", 104 .name = "cascade",
105 .flags = IRQF_NO_THREAD,
105}; 106};
106 107
107void __init mach_init_irq(void) 108void __init mach_init_irq(void)
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 9ff5d0fac556..302d779d5b0d 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -6,6 +6,7 @@
6 * Copyright (C) 2011 Wind River Systems, 6 * Copyright (C) 2011 Wind River Systems,
7 * written by Ralf Baechle <ralf@linux-mips.org> 7 * written by Ralf Baechle <ralf@linux-mips.org>
8 */ 8 */
9#include <linux/compiler.h>
9#include <linux/errno.h> 10#include <linux/errno.h>
10#include <linux/mm.h> 11#include <linux/mm.h>
11#include <linux/mman.h> 12#include <linux/mman.h>
@@ -15,12 +16,11 @@
15#include <linux/sched.h> 16#include <linux/sched.h>
16 17
17unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ 18unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
18
19EXPORT_SYMBOL(shm_align_mask); 19EXPORT_SYMBOL(shm_align_mask);
20 20
21/* gap between mmap and stack */ 21/* gap between mmap and stack */
22#define MIN_GAP (128*1024*1024UL) 22#define MIN_GAP (128*1024*1024UL)
23#define MAX_GAP ((TASK_SIZE)/6*5) 23#define MAX_GAP ((TASK_SIZE)/6*5)
24 24
25static int mmap_is_legacy(void) 25static int mmap_is_legacy(void)
26{ 26{
@@ -57,13 +57,13 @@ static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
57 return base - off; 57 return base - off;
58} 58}
59 59
60#define COLOUR_ALIGN(addr,pgoff) \ 60#define COLOUR_ALIGN(addr, pgoff) \
61 ((((addr) + shm_align_mask) & ~shm_align_mask) + \ 61 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
62 (((pgoff) << PAGE_SHIFT) & shm_align_mask)) 62 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
63 63
64enum mmap_allocation_direction {UP, DOWN}; 64enum mmap_allocation_direction {UP, DOWN};
65 65
66static unsigned long arch_get_unmapped_area_foo(struct file *filp, 66static unsigned long arch_get_unmapped_area_common(struct file *filp,
67 unsigned long addr0, unsigned long len, unsigned long pgoff, 67 unsigned long addr0, unsigned long len, unsigned long pgoff,
68 unsigned long flags, enum mmap_allocation_direction dir) 68 unsigned long flags, enum mmap_allocation_direction dir)
69{ 69{
@@ -103,16 +103,16 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp,
103 103
104 vma = find_vma(mm, addr); 104 vma = find_vma(mm, addr);
105 if (TASK_SIZE - len >= addr && 105 if (TASK_SIZE - len >= addr &&
106 (!vma || addr + len <= vma->vm_start)) 106 (!vma || addr + len <= vma->vm_start))
107 return addr; 107 return addr;
108 } 108 }
109 109
110 if (dir == UP) { 110 if (dir == UP) {
111 addr = mm->mmap_base; 111 addr = mm->mmap_base;
112 if (do_color_align) 112 if (do_color_align)
113 addr = COLOUR_ALIGN(addr, pgoff); 113 addr = COLOUR_ALIGN(addr, pgoff);
114 else 114 else
115 addr = PAGE_ALIGN(addr); 115 addr = PAGE_ALIGN(addr);
116 116
117 for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { 117 for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
118 /* At this point: (!vma || addr < vma->vm_end). */ 118 /* At this point: (!vma || addr < vma->vm_end). */
@@ -131,28 +131,30 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp,
131 mm->free_area_cache = mm->mmap_base; 131 mm->free_area_cache = mm->mmap_base;
132 } 132 }
133 133
134 /* either no address requested or can't fit in requested address hole */ 134 /*
135 * either no address requested, or the mapping can't fit into
136 * the requested address hole
137 */
135 addr = mm->free_area_cache; 138 addr = mm->free_area_cache;
136 if (do_color_align) { 139 if (do_color_align) {
137 unsigned long base = 140 unsigned long base =
138 COLOUR_ALIGN_DOWN(addr - len, pgoff); 141 COLOUR_ALIGN_DOWN(addr - len, pgoff);
139
140 addr = base + len; 142 addr = base + len;
141 } 143 }
142 144
143 /* make sure it can fit in the remaining address space */ 145 /* make sure it can fit in the remaining address space */
144 if (likely(addr > len)) { 146 if (likely(addr > len)) {
145 vma = find_vma(mm, addr - len); 147 vma = find_vma(mm, addr - len);
146 if (!vma || addr <= vma->vm_start) { 148 if (!vma || addr <= vma->vm_start) {
147 /* remember the address as a hint for next time */ 149 /* cache the address as a hint for next time */
148 return mm->free_area_cache = addr-len; 150 return mm->free_area_cache = addr - len;
149 } 151 }
150 } 152 }
151 153
152 if (unlikely(mm->mmap_base < len)) 154 if (unlikely(mm->mmap_base < len))
153 goto bottomup; 155 goto bottomup;
154 156
155 addr = mm->mmap_base-len; 157 addr = mm->mmap_base - len;
156 if (do_color_align) 158 if (do_color_align)
157 addr = COLOUR_ALIGN_DOWN(addr, pgoff); 159 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
158 160
@@ -163,8 +165,8 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp,
163 * return with success: 165 * return with success:
164 */ 166 */
165 vma = find_vma(mm, addr); 167 vma = find_vma(mm, addr);
166 if (likely(!vma || addr+len <= vma->vm_start)) { 168 if (likely(!vma || addr + len <= vma->vm_start)) {
167 /* remember the address as a hint for next time */ 169 /* cache the address as a hint for next time */
168 return mm->free_area_cache = addr; 170 return mm->free_area_cache = addr;
169 } 171 }
170 172
@@ -173,7 +175,7 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp,
173 mm->cached_hole_size = vma->vm_start - addr; 175 mm->cached_hole_size = vma->vm_start - addr;
174 176
175 /* try just below the current vma->vm_start */ 177 /* try just below the current vma->vm_start */
176 addr = vma->vm_start-len; 178 addr = vma->vm_start - len;
177 if (do_color_align) 179 if (do_color_align)
178 addr = COLOUR_ALIGN_DOWN(addr, pgoff); 180 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
179 } while (likely(len < vma->vm_start)); 181 } while (likely(len < vma->vm_start));
@@ -201,7 +203,7 @@ bottomup:
201unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, 203unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
202 unsigned long len, unsigned long pgoff, unsigned long flags) 204 unsigned long len, unsigned long pgoff, unsigned long flags)
203{ 205{
204 return arch_get_unmapped_area_foo(filp, 206 return arch_get_unmapped_area_common(filp,
205 addr0, len, pgoff, flags, UP); 207 addr0, len, pgoff, flags, UP);
206} 208}
207 209
@@ -213,7 +215,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
213 unsigned long addr0, unsigned long len, unsigned long pgoff, 215 unsigned long addr0, unsigned long len, unsigned long pgoff,
214 unsigned long flags) 216 unsigned long flags)
215{ 217{
216 return arch_get_unmapped_area_foo(filp, 218 return arch_get_unmapped_area_common(filp,
217 addr0, len, pgoff, flags, DOWN); 219 addr0, len, pgoff, flags, DOWN);
218} 220}
219 221
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index b6e1cff50667..e06370f58ef3 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1759,14 +1759,13 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
1759 u32 *p = handle_tlbm; 1759 u32 *p = handle_tlbm;
1760 struct uasm_label *l = labels; 1760 struct uasm_label *l = labels;
1761 struct uasm_reloc *r = relocs; 1761 struct uasm_reloc *r = relocs;
1762 struct work_registers wr;
1763 1762
1764 memset(handle_tlbm, 0, sizeof(handle_tlbm)); 1763 memset(handle_tlbm, 0, sizeof(handle_tlbm));
1765 memset(labels, 0, sizeof(labels)); 1764 memset(labels, 0, sizeof(labels));
1766 memset(relocs, 0, sizeof(relocs)); 1765 memset(relocs, 0, sizeof(relocs));
1767 1766
1768 build_r3000_tlbchange_handler_head(&p, K0, K1); 1767 build_r3000_tlbchange_handler_head(&p, K0, K1);
1769 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); 1768 build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm);
1770 uasm_i_nop(&p); /* load delay */ 1769 uasm_i_nop(&p); /* load delay */
1771 build_make_write(&p, &r, K0, K1); 1770 build_make_write(&p, &r, K0, K1);
1772 build_r3000_pte_reload_tlbwi(&p, K0, K1); 1771 build_r3000_pte_reload_tlbwi(&p, K0, K1);
@@ -1963,7 +1962,8 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1963 uasm_i_andi(&p, wr.r3, wr.r3, 2); 1962 uasm_i_andi(&p, wr.r3, wr.r3, 2);
1964 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); 1963 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
1965 } 1964 }
1966 1965 if (PM_DEFAULT_MASK == 0)
1966 uasm_i_nop(&p);
1967 /* 1967 /*
1968 * We clobbered C0_PAGEMASK, restore it. On the other branch 1968 * We clobbered C0_PAGEMASK, restore it. On the other branch
1969 * it is restored in build_huge_tlb_write_entry. 1969 * it is restored in build_huge_tlb_write_entry.
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index 1d36c511a7a5..d53ff91b277c 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -350,12 +350,14 @@ unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
350 350
351static struct irqaction i8259irq = { 351static struct irqaction i8259irq = {
352 .handler = no_action, 352 .handler = no_action,
353 .name = "XT-PIC cascade" 353 .name = "XT-PIC cascade",
354 .flags = IRQF_NO_THREAD,
354}; 355};
355 356
356static struct irqaction corehi_irqaction = { 357static struct irqaction corehi_irqaction = {
357 .handler = no_action, 358 .handler = no_action,
358 .name = "CoreHi" 359 .name = "CoreHi",
360 .flags = IRQF_NO_THREAD,
359}; 361};
360 362
361static msc_irqmap_t __initdata msc_irqmap[] = { 363static msc_irqmap_t __initdata msc_irqmap[] = {
diff --git a/arch/mips/netlogic/xlr/Makefile b/arch/mips/netlogic/xlr/Makefile
index 9bd3f731f62e..2dca585dd2f7 100644
--- a/arch/mips/netlogic/xlr/Makefile
+++ b/arch/mips/netlogic/xlr/Makefile
@@ -2,4 +2,4 @@ obj-y += setup.o platform.o irq.o setup.o time.o
2obj-$(CONFIG_SMP) += smp.o smpboot.o 2obj-$(CONFIG_SMP) += smp.o smpboot.o
3obj-$(CONFIG_EARLY_PRINTK) += xlr_console.o 3obj-$(CONFIG_EARLY_PRINTK) += xlr_console.o
4 4
5EXTRA_CFLAGS += -Werror 5ccflags-y += -Werror
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index 603d7493e966..8656388b34bd 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -171,8 +171,13 @@ static int __devinit ltq_pci_startup(struct ltq_pci_data *conf)
171 u32 temp_buffer; 171 u32 temp_buffer;
172 172
173 /* set clock to 33Mhz */ 173 /* set clock to 33Mhz */
174 ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR); 174 if (ltq_is_ar9()) {
175 ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR); 175 ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0x1f00000, LTQ_CGU_IFCCR);
176 ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0xe00000, LTQ_CGU_IFCCR);
177 } else {
178 ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR);
179 ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR);
180 }
176 181
177 /* external or internal clock ? */ 182 /* external or internal clock ? */
178 if (conf->clock) { 183 if (conf->clock) {
diff --git a/arch/mips/pci/pci-rc32434.c b/arch/mips/pci/pci-rc32434.c
index 764362ce5e40..5f3a69cebad1 100644
--- a/arch/mips/pci/pci-rc32434.c
+++ b/arch/mips/pci/pci-rc32434.c
@@ -215,7 +215,7 @@ static int __init rc32434_pci_init(void)
215 rc32434_pcibridge_init(); 215 rc32434_pcibridge_init();
216 216
217 io_map_base = ioremap(rc32434_res_pci_io1.start, 217 io_map_base = ioremap(rc32434_res_pci_io1.start,
218 resource_size(&rcrc32434_res_pci_io1)); 218 resource_size(&rc32434_res_pci_io1));
219 219
220 if (!io_map_base) 220 if (!io_map_base)
221 return -ENOMEM; 221 return -ENOMEM;
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq.c b/arch/mips/pmc-sierra/msp71xx/msp_irq.c
index 4531c4a514bc..d3c3d81757a5 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_irq.c
+++ b/arch/mips/pmc-sierra/msp71xx/msp_irq.c
@@ -108,12 +108,14 @@ asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
108 108
109static struct irqaction cic_cascade_msp = { 109static struct irqaction cic_cascade_msp = {
110 .handler = no_action, 110 .handler = no_action,
111 .name = "MSP CIC cascade" 111 .name = "MSP CIC cascade",
112 .flags = IRQF_NO_THREAD,
112}; 113};
113 114
114static struct irqaction per_cascade_msp = { 115static struct irqaction per_cascade_msp = {
115 .handler = no_action, 116 .handler = no_action,
116 .name = "MSP PER cascade" 117 .name = "MSP PER cascade",
118 .flags = IRQF_NO_THREAD,
117}; 119};
118 120
119void __init arch_init_irq(void) 121void __init arch_init_irq(void)
diff --git a/arch/mips/pnx8550/common/int.c b/arch/mips/pnx8550/common/int.c
index 6b93c81779c1..1ebe22bdadc8 100644
--- a/arch/mips/pnx8550/common/int.c
+++ b/arch/mips/pnx8550/common/int.c
@@ -167,7 +167,7 @@ static struct irq_chip level_irq_type = {
167 167
168static struct irqaction gic_action = { 168static struct irqaction gic_action = {
169 .handler = no_action, 169 .handler = no_action,
170 .flags = IRQF_DISABLED, 170 .flags = IRQF_DISABLED | IRQF_NO_THREAD,
171 .name = "GIC", 171 .name = "GIC",
172}; 172};
173 173
diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c
index b4d08e4d2ea9..f72c336ea27b 100644
--- a/arch/mips/sgi-ip22/ip22-int.c
+++ b/arch/mips/sgi-ip22/ip22-int.c
@@ -155,32 +155,32 @@ static void __irq_entry indy_buserror_irq(void)
155 155
156static struct irqaction local0_cascade = { 156static struct irqaction local0_cascade = {
157 .handler = no_action, 157 .handler = no_action,
158 .flags = IRQF_DISABLED, 158 .flags = IRQF_DISABLED | IRQF_NO_THREAD,
159 .name = "local0 cascade", 159 .name = "local0 cascade",
160}; 160};
161 161
162static struct irqaction local1_cascade = { 162static struct irqaction local1_cascade = {
163 .handler = no_action, 163 .handler = no_action,
164 .flags = IRQF_DISABLED, 164 .flags = IRQF_DISABLED | IRQF_NO_THREAD,
165 .name = "local1 cascade", 165 .name = "local1 cascade",
166}; 166};
167 167
168static struct irqaction buserr = { 168static struct irqaction buserr = {
169 .handler = no_action, 169 .handler = no_action,
170 .flags = IRQF_DISABLED, 170 .flags = IRQF_DISABLED | IRQF_NO_THREAD,
171 .name = "Bus Error", 171 .name = "Bus Error",
172}; 172};
173 173
174static struct irqaction map0_cascade = { 174static struct irqaction map0_cascade = {
175 .handler = no_action, 175 .handler = no_action,
176 .flags = IRQF_DISABLED, 176 .flags = IRQF_DISABLED | IRQF_NO_THREAD,
177 .name = "mapable0 cascade", 177 .name = "mapable0 cascade",
178}; 178};
179 179
180#ifdef USE_LIO3_IRQ 180#ifdef USE_LIO3_IRQ
181static struct irqaction map1_cascade = { 181static struct irqaction map1_cascade = {
182 .handler = no_action, 182 .handler = no_action,
183 .flags = IRQF_DISABLED, 183 .flags = IRQF_DISABLED | IRQF_NO_THREAD,
184 .name = "mapable1 cascade", 184 .name = "mapable1 cascade",
185}; 185};
186#define SGI_INTERRUPTS SGINT_END 186#define SGI_INTERRUPTS SGINT_END
diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
index a7e5a6d917b1..3ab5b5d25b0a 100644
--- a/arch/mips/sni/rm200.c
+++ b/arch/mips/sni/rm200.c
@@ -359,6 +359,7 @@ void sni_rm200_init_8259A(void)
359static struct irqaction sni_rm200_irq2 = { 359static struct irqaction sni_rm200_irq2 = {
360 .handler = no_action, 360 .handler = no_action,
361 .name = "cascade", 361 .name = "cascade",
362 .flags = IRQF_NO_THREAD,
362}; 363};
363 364
364static struct resource sni_rm200_pic1_resource = { 365static struct resource sni_rm200_pic1_resource = {
diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
index 70a3b85f3757..fad2bef432cd 100644
--- a/arch/mips/vr41xx/common/irq.c
+++ b/arch/mips/vr41xx/common/irq.c
@@ -34,6 +34,7 @@ static irq_cascade_t irq_cascade[NR_IRQS] __cacheline_aligned;
34static struct irqaction cascade_irqaction = { 34static struct irqaction cascade_irqaction = {
35 .handler = no_action, 35 .handler = no_action,
36 .name = "cascade", 36 .name = "cascade",
37 .flags = IRQF_NO_THREAD,
37}; 38};
38 39
39int cascade_irq(unsigned int irq, int (*get_irq)(unsigned int)) 40int cascade_irq(unsigned int irq, int (*get_irq)(unsigned int))
diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
index 1407c07bdade..f6ae2b2b6870 100644
--- a/arch/sparc/include/asm/pgtsrmmu.h
+++ b/arch/sparc/include/asm/pgtsrmmu.h
@@ -280,7 +280,7 @@ static inline unsigned long srmmu_hwprobe(unsigned long vaddr)
280 return retval; 280 return retval;
281} 281}
282#else 282#else
283#define srmmu_hwprobe(addr) (srmmu_swprobe(addr, 0) & SRMMU_PTE_PMASK) 283#define srmmu_hwprobe(addr) srmmu_swprobe(addr, 0)
284#endif 284#endif
285 285
286static inline int 286static inline int
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 1e94f946570e..8aa0d4408586 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -230,7 +230,8 @@ static void pci_parse_of_addrs(struct platform_device *op,
230 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; 230 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
231 } else if (i == dev->rom_base_reg) { 231 } else if (i == dev->rom_base_reg) {
232 res = &dev->resource[PCI_ROM_RESOURCE]; 232 res = &dev->resource[PCI_ROM_RESOURCE];
233 flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE; 233 flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE
234 | IORESOURCE_SIZEALIGN;
234 } else { 235 } else {
235 printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); 236 printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
236 continue; 237 continue;
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index 1ba95aff5d59..2caa556db86d 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -273,10 +273,7 @@ void do_sigreturn32(struct pt_regs *regs)
273 case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32); 273 case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32);
274 } 274 }
275 sigdelsetmask(&set, ~_BLOCKABLE); 275 sigdelsetmask(&set, ~_BLOCKABLE);
276 spin_lock_irq(&current->sighand->siglock); 276 set_current_blocked(&set);
277 current->blocked = set;
278 recalc_sigpending();
279 spin_unlock_irq(&current->sighand->siglock);
280 return; 277 return;
281 278
282segv: 279segv:
@@ -377,10 +374,7 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
377 case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32); 374 case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
378 } 375 }
379 sigdelsetmask(&set, ~_BLOCKABLE); 376 sigdelsetmask(&set, ~_BLOCKABLE);
380 spin_lock_irq(&current->sighand->siglock); 377 set_current_blocked(&set);
381 current->blocked = set;
382 recalc_sigpending();
383 spin_unlock_irq(&current->sighand->siglock);
384 return; 378 return;
385segv: 379segv:
386 force_sig(SIGSEGV, current); 380 force_sig(SIGSEGV, current);
@@ -782,6 +776,7 @@ static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
782 siginfo_t *info, 776 siginfo_t *info,
783 sigset_t *oldset, struct pt_regs *regs) 777 sigset_t *oldset, struct pt_regs *regs)
784{ 778{
779 sigset_t blocked;
785 int err; 780 int err;
786 781
787 if (ka->sa.sa_flags & SA_SIGINFO) 782 if (ka->sa.sa_flags & SA_SIGINFO)
@@ -792,12 +787,10 @@ static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
792 if (err) 787 if (err)
793 return err; 788 return err;
794 789
795 spin_lock_irq(&current->sighand->siglock); 790 sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
796 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
797 if (!(ka->sa.sa_flags & SA_NOMASK)) 791 if (!(ka->sa.sa_flags & SA_NOMASK))
798 sigaddset(&current->blocked,signr); 792 sigaddset(&blocked, signr);
799 recalc_sigpending(); 793 set_current_blocked(&blocked);
800 spin_unlock_irq(&current->sighand->siglock);
801 794
802 tracehook_signal_handler(signr, info, ka, regs, 0); 795 tracehook_signal_handler(signr, info, ka, regs, 0);
803 796
@@ -881,7 +874,7 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs,
881 */ 874 */
882 if (current_thread_info()->status & TS_RESTORE_SIGMASK) { 875 if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
883 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 876 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
884 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 877 set_current_blocked(&current->saved_sigmask);
885 } 878 }
886} 879}
887 880
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 04ede8f04add..8ce247ac04cc 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -62,12 +62,13 @@ struct rt_signal_frame {
62 62
63static int _sigpause_common(old_sigset_t set) 63static int _sigpause_common(old_sigset_t set)
64{ 64{
65 set &= _BLOCKABLE; 65 sigset_t blocked;
66 spin_lock_irq(&current->sighand->siglock); 66
67 current->saved_sigmask = current->blocked; 67 current->saved_sigmask = current->blocked;
68 siginitset(&current->blocked, set); 68
69 recalc_sigpending(); 69 set &= _BLOCKABLE;
70 spin_unlock_irq(&current->sighand->siglock); 70 siginitset(&blocked, set);
71 set_current_blocked(&blocked);
71 72
72 current->state = TASK_INTERRUPTIBLE; 73 current->state = TASK_INTERRUPTIBLE;
73 schedule(); 74 schedule();
@@ -139,10 +140,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
139 goto segv_and_exit; 140 goto segv_and_exit;
140 141
141 sigdelsetmask(&set, ~_BLOCKABLE); 142 sigdelsetmask(&set, ~_BLOCKABLE);
142 spin_lock_irq(&current->sighand->siglock); 143 set_current_blocked(&set);
143 current->blocked = set;
144 recalc_sigpending();
145 spin_unlock_irq(&current->sighand->siglock);
146 return; 144 return;
147 145
148segv_and_exit: 146segv_and_exit:
@@ -209,10 +207,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
209 } 207 }
210 208
211 sigdelsetmask(&set, ~_BLOCKABLE); 209 sigdelsetmask(&set, ~_BLOCKABLE);
212 spin_lock_irq(&current->sighand->siglock); 210 set_current_blocked(&set);
213 current->blocked = set;
214 recalc_sigpending();
215 spin_unlock_irq(&current->sighand->siglock);
216 return; 211 return;
217segv: 212segv:
218 force_sig(SIGSEGV, current); 213 force_sig(SIGSEGV, current);
@@ -470,6 +465,7 @@ static inline int
470handle_signal(unsigned long signr, struct k_sigaction *ka, 465handle_signal(unsigned long signr, struct k_sigaction *ka,
471 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) 466 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
472{ 467{
468 sigset_t blocked;
473 int err; 469 int err;
474 470
475 if (ka->sa.sa_flags & SA_SIGINFO) 471 if (ka->sa.sa_flags & SA_SIGINFO)
@@ -480,12 +476,10 @@ handle_signal(unsigned long signr, struct k_sigaction *ka,
480 if (err) 476 if (err)
481 return err; 477 return err;
482 478
483 spin_lock_irq(&current->sighand->siglock); 479 sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
484 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
485 if (!(ka->sa.sa_flags & SA_NOMASK)) 480 if (!(ka->sa.sa_flags & SA_NOMASK))
486 sigaddset(&current->blocked, signr); 481 sigaddset(&blocked, signr);
487 recalc_sigpending(); 482 set_current_blocked(&blocked);
488 spin_unlock_irq(&current->sighand->siglock);
489 483
490 tracehook_signal_handler(signr, info, ka, regs, 0); 484 tracehook_signal_handler(signr, info, ka, regs, 0);
491 485
@@ -581,7 +575,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
581 */ 575 */
582 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 576 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
583 clear_thread_flag(TIF_RESTORE_SIGMASK); 577 clear_thread_flag(TIF_RESTORE_SIGMASK);
584 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 578 set_current_blocked(&current->saved_sigmask);
585 } 579 }
586} 580}
587 581
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 47509df3b893..a2b81598d905 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -70,10 +70,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs)
70 goto do_sigsegv; 70 goto do_sigsegv;
71 } 71 }
72 sigdelsetmask(&set, ~_BLOCKABLE); 72 sigdelsetmask(&set, ~_BLOCKABLE);
73 spin_lock_irq(&current->sighand->siglock); 73 set_current_blocked(&set);
74 current->blocked = set;
75 recalc_sigpending();
76 spin_unlock_irq(&current->sighand->siglock);
77 } 74 }
78 if (test_thread_flag(TIF_32BIT)) { 75 if (test_thread_flag(TIF_32BIT)) {
79 pc &= 0xffffffff; 76 pc &= 0xffffffff;
@@ -242,12 +239,13 @@ struct rt_signal_frame {
242 239
243static long _sigpause_common(old_sigset_t set) 240static long _sigpause_common(old_sigset_t set)
244{ 241{
245 set &= _BLOCKABLE; 242 sigset_t blocked;
246 spin_lock_irq(&current->sighand->siglock); 243
247 current->saved_sigmask = current->blocked; 244 current->saved_sigmask = current->blocked;
248 siginitset(&current->blocked, set); 245
249 recalc_sigpending(); 246 set &= _BLOCKABLE;
250 spin_unlock_irq(&current->sighand->siglock); 247 siginitset(&blocked, set);
248 set_current_blocked(&blocked);
251 249
252 current->state = TASK_INTERRUPTIBLE; 250 current->state = TASK_INTERRUPTIBLE;
253 schedule(); 251 schedule();
@@ -327,10 +325,7 @@ void do_rt_sigreturn(struct pt_regs *regs)
327 pt_regs_clear_syscall(regs); 325 pt_regs_clear_syscall(regs);
328 326
329 sigdelsetmask(&set, ~_BLOCKABLE); 327 sigdelsetmask(&set, ~_BLOCKABLE);
330 spin_lock_irq(&current->sighand->siglock); 328 set_current_blocked(&set);
331 current->blocked = set;
332 recalc_sigpending();
333 spin_unlock_irq(&current->sighand->siglock);
334 return; 329 return;
335segv: 330segv:
336 force_sig(SIGSEGV, current); 331 force_sig(SIGSEGV, current);
@@ -484,18 +479,17 @@ static inline int handle_signal(unsigned long signr, struct k_sigaction *ka,
484 siginfo_t *info, 479 siginfo_t *info,
485 sigset_t *oldset, struct pt_regs *regs) 480 sigset_t *oldset, struct pt_regs *regs)
486{ 481{
482 sigset_t blocked;
487 int err; 483 int err;
488 484
489 err = setup_rt_frame(ka, regs, signr, oldset, 485 err = setup_rt_frame(ka, regs, signr, oldset,
490 (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL); 486 (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
491 if (err) 487 if (err)
492 return err; 488 return err;
493 spin_lock_irq(&current->sighand->siglock); 489 sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
494 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
495 if (!(ka->sa.sa_flags & SA_NOMASK)) 490 if (!(ka->sa.sa_flags & SA_NOMASK))
496 sigaddset(&current->blocked,signr); 491 sigaddset(&blocked, signr);
497 recalc_sigpending(); 492 set_current_blocked(&blocked);
498 spin_unlock_irq(&current->sighand->siglock);
499 493
500 tracehook_signal_handler(signr, info, ka, regs, 0); 494 tracehook_signal_handler(signr, info, ka, regs, 0);
501 495
@@ -601,7 +595,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
601 */ 595 */
602 if (current_thread_info()->status & TS_RESTORE_SIGMASK) { 596 if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
603 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 597 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
604 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 598 set_current_blocked(&current->saved_sigmask);
605 } 599 }
606} 600}
607 601
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c
index e485a6804998..13c2169822a8 100644
--- a/arch/sparc/mm/leon_mm.c
+++ b/arch/sparc/mm/leon_mm.c
@@ -162,7 +162,7 @@ ready:
162 printk(KERN_INFO "swprobe: padde %x\n", paddr_calc); 162 printk(KERN_INFO "swprobe: padde %x\n", paddr_calc);
163 if (paddr) 163 if (paddr)
164 *paddr = paddr_calc; 164 *paddr = paddr_calc;
165 return paddrbase; 165 return pte;
166} 166}
167 167
168void leon_flush_icache_all(void) 168void leon_flush_icache_all(void)
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index fc94607f0bd5..aecc8ed5f39b 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -21,7 +21,7 @@
21#include <asm/ptrace.h> 21#include <asm/ptrace.h>
22#include <asm/thread_info.h> 22#include <asm/thread_info.h>
23#include <asm/irqflags.h> 23#include <asm/irqflags.h>
24#include <linux/atomic.h> 24#include <asm/atomic_32.h>
25#include <asm/asm-offsets.h> 25#include <asm/asm-offsets.h>
26#include <hv/hypervisor.h> 26#include <hv/hypervisor.h>
27#include <arch/abi.h> 27#include <arch/abi.h>
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S
index 1f75a2a56101..30638042691d 100644
--- a/arch/tile/lib/atomic_asm_32.S
+++ b/arch/tile/lib/atomic_asm_32.S
@@ -70,7 +70,7 @@
70 */ 70 */
71 71
72#include <linux/linkage.h> 72#include <linux/linkage.h>
73#include <linux/atomic.h> 73#include <asm/atomic_32.h>
74#include <asm/page.h> 74#include <asm/page.h>
75#include <asm/processor.h> 75#include <asm/processor.h>
76 76
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 18ae83dd1cd7..b56c65de384d 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -56,7 +56,7 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
56 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), 56 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
57}; 57};
58 58
59static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE; 59static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
60 60
61static int __init vsyscall_setup(char *str) 61static int __init vsyscall_setup(char *str)
62{ 62{
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 30326443ab81..87488b93a65c 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -63,9 +63,8 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
63#ifdef CONFIG_X86_32 63#ifdef CONFIG_X86_32
64 /* for fixmap */ 64 /* for fixmap */
65 tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); 65 tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
66
67 good_end = max_pfn_mapped << PAGE_SHIFT;
68#endif 66#endif
67 good_end = max_pfn_mapped << PAGE_SHIFT;
69 68
70 base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE); 69 base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE);
71 if (base == MEMBLOCK_ERROR) 70 if (base == MEMBLOCK_ERROR)
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 039d91315bc5..404f21a3ff9e 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -43,6 +43,17 @@ static const struct dmi_system_id pci_use_crs_table[] __initconst = {
43 DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"), 43 DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
44 }, 44 },
45 }, 45 },
46 /* https://bugzilla.kernel.org/show_bug.cgi?id=30552 */
47 /* 2006 AMD HT/VIA system with two host bridges */
48 {
49 .callback = set_use_crs,
50 .ident = "ASUS M2V-MX SE",
51 .matches = {
52 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
53 DMI_MATCH(DMI_BOARD_NAME, "M2V-MX SE"),
54 DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
55 },
56 },
46 {} 57 {}
47}; 58};
48 59
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index 58425adc22c6..fe73276e026b 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -678,38 +678,40 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
678 pentry = (struct sfi_device_table_entry *)sb->pentry; 678 pentry = (struct sfi_device_table_entry *)sb->pentry;
679 679
680 for (i = 0; i < num; i++, pentry++) { 680 for (i = 0; i < num; i++, pentry++) {
681 if (pentry->irq != (u8)0xff) { /* native RTE case */ 681 int irq = pentry->irq;
682
683 if (irq != (u8)0xff) { /* native RTE case */
682 /* these SPI2 devices are not exposed to system as PCI 684 /* these SPI2 devices are not exposed to system as PCI
683 * devices, but they have separate RTE entry in IOAPIC 685 * devices, but they have separate RTE entry in IOAPIC
684 * so we have to enable them one by one here 686 * so we have to enable them one by one here
685 */ 687 */
686 ioapic = mp_find_ioapic(pentry->irq); 688 ioapic = mp_find_ioapic(irq);
687 irq_attr.ioapic = ioapic; 689 irq_attr.ioapic = ioapic;
688 irq_attr.ioapic_pin = pentry->irq; 690 irq_attr.ioapic_pin = irq;
689 irq_attr.trigger = 1; 691 irq_attr.trigger = 1;
690 irq_attr.polarity = 1; 692 irq_attr.polarity = 1;
691 io_apic_set_pci_routing(NULL, pentry->irq, &irq_attr); 693 io_apic_set_pci_routing(NULL, irq, &irq_attr);
692 } else 694 } else
693 pentry->irq = 0; /* No irq */ 695 irq = 0; /* No irq */
694 696
695 switch (pentry->type) { 697 switch (pentry->type) {
696 case SFI_DEV_TYPE_IPC: 698 case SFI_DEV_TYPE_IPC:
697 /* ID as IRQ is a hack that will go away */ 699 /* ID as IRQ is a hack that will go away */
698 pdev = platform_device_alloc(pentry->name, pentry->irq); 700 pdev = platform_device_alloc(pentry->name, irq);
699 if (pdev == NULL) { 701 if (pdev == NULL) {
700 pr_err("out of memory for SFI platform device '%s'.\n", 702 pr_err("out of memory for SFI platform device '%s'.\n",
701 pentry->name); 703 pentry->name);
702 continue; 704 continue;
703 } 705 }
704 install_irq_resource(pdev, pentry->irq); 706 install_irq_resource(pdev, irq);
705 pr_debug("info[%2d]: IPC bus, name = %16.16s, " 707 pr_debug("info[%2d]: IPC bus, name = %16.16s, "
706 "irq = 0x%2x\n", i, pentry->name, pentry->irq); 708 "irq = 0x%2x\n", i, pentry->name, irq);
707 sfi_handle_ipc_dev(pdev); 709 sfi_handle_ipc_dev(pdev);
708 break; 710 break;
709 case SFI_DEV_TYPE_SPI: 711 case SFI_DEV_TYPE_SPI:
710 memset(&spi_info, 0, sizeof(spi_info)); 712 memset(&spi_info, 0, sizeof(spi_info));
711 strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN); 713 strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN);
712 spi_info.irq = pentry->irq; 714 spi_info.irq = irq;
713 spi_info.bus_num = pentry->host_num; 715 spi_info.bus_num = pentry->host_num;
714 spi_info.chip_select = pentry->addr; 716 spi_info.chip_select = pentry->addr;
715 spi_info.max_speed_hz = pentry->max_freq; 717 spi_info.max_speed_hz = pentry->max_freq;
@@ -726,7 +728,7 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
726 memset(&i2c_info, 0, sizeof(i2c_info)); 728 memset(&i2c_info, 0, sizeof(i2c_info));
727 bus = pentry->host_num; 729 bus = pentry->host_num;
728 strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN); 730 strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN);
729 i2c_info.irq = pentry->irq; 731 i2c_info.irq = irq;
730 i2c_info.addr = pentry->addr; 732 i2c_info.addr = pentry->addr;
731 pr_debug("info[%2d]: I2C bus = %d, name = %16.16s, " 733 pr_debug("info[%2d]: I2C bus = %d, name = %16.16s, "
732 "irq = 0x%2x, addr = 0x%x\n", i, bus, 734 "irq = 0x%2x, addr = 0x%x\n", i, bus,
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index be4425616931..7835b8fc94db 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -67,6 +67,9 @@ static int ghash_update(struct shash_desc *desc,
67 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); 67 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
68 u8 *dst = dctx->buffer; 68 u8 *dst = dctx->buffer;
69 69
70 if (!ctx->gf128)
71 return -ENOKEY;
72
70 if (dctx->bytes) { 73 if (dctx->bytes) {
71 int n = min(srclen, dctx->bytes); 74 int n = min(srclen, dctx->bytes);
72 u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); 75 u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
@@ -119,6 +122,9 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
119 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); 122 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
120 u8 *buf = dctx->buffer; 123 u8 *buf = dctx->buffer;
121 124
125 if (!ctx->gf128)
126 return -ENOKEY;
127
122 ghash_flush(ctx, dctx); 128 ghash_flush(ctx, dctx);
123 memcpy(dst, buf, GHASH_BLOCK_SIZE); 129 memcpy(dst, buf, GHASH_BLOCK_SIZE);
124 130
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 0599854e2217..118ec12d2d5f 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -34,8 +34,8 @@ struct gpio_bank {
34 u16 irq; 34 u16 irq;
35 u16 virtual_irq_start; 35 u16 virtual_irq_start;
36 int method; 36 int method;
37#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
38 u32 suspend_wakeup; 37 u32 suspend_wakeup;
38#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
39 u32 saved_wakeup; 39 u32 saved_wakeup;
40#endif 40#endif
41 u32 non_wakeup_gpios; 41 u32 non_wakeup_gpios;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index c43b8ff626a7..0550dcb85814 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -577,6 +577,7 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
577void 577void
578pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) 578pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
579{ 579{
580 *gpio_base = -1;
580} 581}
581#endif 582#endif
582 583
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index e88c64417a8a..14cc88aaf3a7 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -277,7 +277,12 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
277 case ATOM_ARG_FB: 277 case ATOM_ARG_FB:
278 idx = U8(*ptr); 278 idx = U8(*ptr);
279 (*ptr)++; 279 (*ptr)++;
280 val = gctx->scratch[((gctx->fb_base + idx) / 4)]; 280 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
281 DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
282 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
283 val = 0;
284 } else
285 val = gctx->scratch[(gctx->fb_base / 4) + idx];
281 if (print) 286 if (print)
282 DEBUG("FB[0x%02X]", idx); 287 DEBUG("FB[0x%02X]", idx);
283 break; 288 break;
@@ -531,7 +536,11 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
531 case ATOM_ARG_FB: 536 case ATOM_ARG_FB:
532 idx = U8(*ptr); 537 idx = U8(*ptr);
533 (*ptr)++; 538 (*ptr)++;
534 gctx->scratch[((gctx->fb_base + idx) / 4)] = val; 539 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
540 DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
541 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
542 } else
543 gctx->scratch[(gctx->fb_base / 4) + idx] = val;
535 DEBUG("FB[0x%02X]", idx); 544 DEBUG("FB[0x%02X]", idx);
536 break; 545 break;
537 case ATOM_ARG_PLL: 546 case ATOM_ARG_PLL:
@@ -1370,11 +1379,13 @@ int atom_allocate_fb_scratch(struct atom_context *ctx)
1370 1379
1371 usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; 1380 usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
1372 } 1381 }
1382 ctx->scratch_size_bytes = 0;
1373 if (usage_bytes == 0) 1383 if (usage_bytes == 0)
1374 usage_bytes = 20 * 1024; 1384 usage_bytes = 20 * 1024;
1375 /* allocate some scratch memory */ 1385 /* allocate some scratch memory */
1376 ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL); 1386 ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
1377 if (!ctx->scratch) 1387 if (!ctx->scratch)
1378 return -ENOMEM; 1388 return -ENOMEM;
1389 ctx->scratch_size_bytes = usage_bytes;
1379 return 0; 1390 return 0;
1380} 1391}
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index a589a55b223e..93cfe2086ba0 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -137,6 +137,7 @@ struct atom_context {
137 int cs_equal, cs_above; 137 int cs_equal, cs_above;
138 int io_mode; 138 int io_mode;
139 uint32_t *scratch; 139 uint32_t *scratch;
140 int scratch_size_bytes;
140}; 141};
141 142
142extern int atom_debug; 143extern int atom_debug;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index c742944d3805..a515b2a09d85 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -466,7 +466,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
466 return; 466 return;
467 } 467 }
468 args.v2.ucEnable = enable; 468 args.v2.ucEnable = enable;
469 if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK)) 469 if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev))
470 args.v2.ucEnable = ATOM_DISABLE; 470 args.v2.ucEnable = ATOM_DISABLE;
471 } else if (ASIC_IS_DCE3(rdev)) { 471 } else if (ASIC_IS_DCE3(rdev)) {
472 args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); 472 args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 4da23889fea6..79e8ebc05307 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -129,7 +129,9 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
129 for (retry = 0; retry < 4; retry++) { 129 for (retry = 0; retry < 4; retry++) {
130 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, 130 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
131 msg, msg_bytes, NULL, 0, delay, &ack); 131 msg, msg_bytes, NULL, 0, delay, &ack);
132 if (ret < 0) 132 if (ret == -EBUSY)
133 continue;
134 else if (ret < 0)
133 return ret; 135 return ret;
134 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 136 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
135 return send_bytes; 137 return send_bytes;
@@ -160,7 +162,9 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
160 for (retry = 0; retry < 4; retry++) { 162 for (retry = 0; retry < 4; retry++) {
161 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, 163 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
162 msg, msg_bytes, recv, recv_bytes, delay, &ack); 164 msg, msg_bytes, recv, recv_bytes, delay, &ack);
163 if (ret < 0) 165 if (ret == -EBUSY)
166 continue;
167 else if (ret < 0)
164 return ret; 168 return ret;
165 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 169 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
166 return ret; 170 return ret;
@@ -236,7 +240,9 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
236 for (retry = 0; retry < 4; retry++) { 240 for (retry = 0; retry < 4; retry++) {
237 ret = radeon_process_aux_ch(auxch, 241 ret = radeon_process_aux_ch(auxch,
238 msg, msg_bytes, reply, reply_bytes, 0, &ack); 242 msg, msg_bytes, reply, reply_bytes, 0, &ack);
239 if (ret < 0) { 243 if (ret == -EBUSY)
244 continue;
245 else if (ret < 0) {
240 DRM_DEBUG_KMS("aux_ch failed %d\n", ret); 246 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
241 return ret; 247 return ret;
242 } 248 }
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index bce63fd329d4..449c3d8c6836 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1303,23 +1303,14 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1303 /* get the DPCD from the bridge */ 1303 /* get the DPCD from the bridge */
1304 radeon_dp_getdpcd(radeon_connector); 1304 radeon_dp_getdpcd(radeon_connector);
1305 1305
1306 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) 1306 if (encoder) {
1307 ret = connector_status_connected; 1307 /* setup ddc on the bridge */
1308 else { 1308 radeon_atom_ext_encoder_setup_ddc(encoder);
1309 /* need to setup ddc on the bridge */
1310 if (encoder)
1311 radeon_atom_ext_encoder_setup_ddc(encoder);
1312 if (radeon_ddc_probe(radeon_connector, 1309 if (radeon_ddc_probe(radeon_connector,
1313 radeon_connector->requires_extended_probe)) 1310 radeon_connector->requires_extended_probe)) /* try DDC */
1314 ret = connector_status_connected; 1311 ret = connector_status_connected;
1315 } 1312 else if (radeon_connector->dac_load_detect) { /* try load detection */
1316 1313 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
1317 if ((ret == connector_status_disconnected) &&
1318 radeon_connector->dac_load_detect) {
1319 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
1320 struct drm_encoder_helper_funcs *encoder_funcs;
1321 if (encoder) {
1322 encoder_funcs = encoder->helper_private;
1323 ret = encoder_funcs->detect(encoder, connector); 1314 ret = encoder_funcs->detect(encoder, connector);
1324 } 1315 }
1325 } 1316 }
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 13690f3eb4a4..eb3f6dc6df83 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -1638,7 +1638,17 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1638 break; 1638 break;
1639 case 2: 1639 case 2:
1640 args.v2.ucCRTC = radeon_crtc->crtc_id; 1640 args.v2.ucCRTC = radeon_crtc->crtc_id;
1641 args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); 1641 if (radeon_encoder_is_dp_bridge(encoder)) {
1642 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1643
1644 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
1645 args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
1646 else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA)
1647 args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
1648 else
1649 args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
1650 } else
1651 args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
1642 switch (radeon_encoder->encoder_id) { 1652 switch (radeon_encoder->encoder_id) {
1643 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 1653 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1644 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 1654 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
@@ -1755,9 +1765,17 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
1755 /* DCE4/5 */ 1765 /* DCE4/5 */
1756 if (ASIC_IS_DCE4(rdev)) { 1766 if (ASIC_IS_DCE4(rdev)) {
1757 dig = radeon_encoder->enc_priv; 1767 dig = radeon_encoder->enc_priv;
1758 if (ASIC_IS_DCE41(rdev)) 1768 if (ASIC_IS_DCE41(rdev)) {
1759 return radeon_crtc->crtc_id; 1769 /* ontario follows DCE4 */
1760 else { 1770 if (rdev->family == CHIP_PALM) {
1771 if (dig->linkb)
1772 return 1;
1773 else
1774 return 0;
1775 } else
1776 /* llano follows DCE3.2 */
1777 return radeon_crtc->crtc_id;
1778 } else {
1761 switch (radeon_encoder->encoder_id) { 1779 switch (radeon_encoder->encoder_id) {
1762 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 1780 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1763 if (dig->linkb) 1781 if (dig->linkb)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index ae3c6f5dd2b7..082fcaea583f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -321,7 +321,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
321 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 321 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
322 struct ttm_tt *ttm = bo->ttm; 322 struct ttm_tt *ttm = bo->ttm;
323 struct ttm_mem_reg *old_mem = &bo->mem; 323 struct ttm_mem_reg *old_mem = &bo->mem;
324 struct ttm_mem_reg old_copy; 324 struct ttm_mem_reg old_copy = *old_mem;
325 void *old_iomap; 325 void *old_iomap;
326 void *new_iomap; 326 void *new_iomap;
327 int ret; 327 int ret;
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index f2b377c56a3a..36d7f270b14d 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -390,7 +390,7 @@ temp_from_reg(u16 reg, s16 regval)
390{ 390{
391 if (is_word_sized(reg)) 391 if (is_word_sized(reg))
392 return LM75_TEMP_FROM_REG(regval); 392 return LM75_TEMP_FROM_REG(regval);
393 return regval * 1000; 393 return ((s8)regval) * 1000;
394} 394}
395 395
396static inline u16 396static inline u16
@@ -398,7 +398,8 @@ temp_to_reg(u16 reg, long temp)
398{ 398{
399 if (is_word_sized(reg)) 399 if (is_word_sized(reg))
400 return LM75_TEMP_TO_REG(temp); 400 return LM75_TEMP_TO_REG(temp);
401 return DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, -127000, 128000), 1000); 401 return (s8)DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, -127000, 128000),
402 1000);
402} 403}
403 404
404/* Some of analog inputs have internal scaling (2x), 8mV is ADC LSB */ 405/* Some of analog inputs have internal scaling (2x), 8mV is ADC LSB */
@@ -1715,7 +1716,8 @@ static void w83627ehf_device_remove_files(struct device *dev)
1715} 1716}
1716 1717
1717/* Get the monitoring functions started */ 1718/* Get the monitoring functions started */
1718static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data) 1719static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data,
1720 enum kinds kind)
1719{ 1721{
1720 int i; 1722 int i;
1721 u8 tmp, diode; 1723 u8 tmp, diode;
@@ -1746,10 +1748,16 @@ static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data)
1746 w83627ehf_write_value(data, W83627EHF_REG_VBAT, tmp | 0x01); 1748 w83627ehf_write_value(data, W83627EHF_REG_VBAT, tmp | 0x01);
1747 1749
1748 /* Get thermal sensor types */ 1750 /* Get thermal sensor types */
1749 diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE); 1751 switch (kind) {
1752 case w83627ehf:
1753 diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE);
1754 break;
1755 default:
1756 diode = 0x70;
1757 }
1750 for (i = 0; i < 3; i++) { 1758 for (i = 0; i < 3; i++) {
1751 if ((tmp & (0x02 << i))) 1759 if ((tmp & (0x02 << i)))
1752 data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 2; 1760 data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 3;
1753 else 1761 else
1754 data->temp_type[i] = 4; /* thermistor */ 1762 data->temp_type[i] = 4; /* thermistor */
1755 } 1763 }
@@ -2016,7 +2024,7 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
2016 } 2024 }
2017 2025
2018 /* Initialize the chip */ 2026 /* Initialize the chip */
2019 w83627ehf_init_device(data); 2027 w83627ehf_init_device(data, sio_data->kind);
2020 2028
2021 data->vrm = vid_which_vrm(); 2029 data->vrm = vid_which_vrm();
2022 superio_enter(sio_data->sioreg); 2030 superio_enter(sio_data->sioreg);
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 9827c5e686cb..811dbbd9306c 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -327,7 +327,7 @@ config BLK_DEV_OPTI621
327 select BLK_DEV_IDEPCI 327 select BLK_DEV_IDEPCI
328 help 328 help
329 This is a driver for the OPTi 82C621 EIDE controller. 329 This is a driver for the OPTi 82C621 EIDE controller.
330 Please read the comments at the top of <file:drivers/ide/pci/opti621.c>. 330 Please read the comments at the top of <file:drivers/ide/opti621.c>.
331 331
332config BLK_DEV_RZ1000 332config BLK_DEV_RZ1000
333 tristate "RZ1000 chipset bugfix/support" 333 tristate "RZ1000 chipset bugfix/support"
@@ -365,7 +365,7 @@ config BLK_DEV_ALI15X3
365 normal dual channel support. 365 normal dual channel support.
366 366
367 Please read the comments at the top of 367 Please read the comments at the top of
368 <file:drivers/ide/pci/alim15x3.c>. 368 <file:drivers/ide/alim15x3.c>.
369 369
370 If unsure, say N. 370 If unsure, say N.
371 371
@@ -528,7 +528,7 @@ config BLK_DEV_NS87415
528 This driver adds detection and support for the NS87415 chip 528 This driver adds detection and support for the NS87415 chip
529 (used mainly on SPARC64 and PA-RISC machines). 529 (used mainly on SPARC64 and PA-RISC machines).
530 530
531 Please read the comments at the top of <file:drivers/ide/pci/ns87415.c>. 531 Please read the comments at the top of <file:drivers/ide/ns87415.c>.
532 532
533config BLK_DEV_PDC202XX_OLD 533config BLK_DEV_PDC202XX_OLD
534 tristate "PROMISE PDC202{46|62|65|67} support" 534 tristate "PROMISE PDC202{46|62|65|67} support"
@@ -547,7 +547,7 @@ config BLK_DEV_PDC202XX_OLD
547 for more than one card. 547 for more than one card.
548 548
549 Please read the comments at the top of 549 Please read the comments at the top of
550 <file:drivers/ide/pci/pdc202xx_old.c>. 550 <file:drivers/ide/pdc202xx_old.c>.
551 551
552 If unsure, say N. 552 If unsure, say N.
553 553
@@ -593,7 +593,7 @@ config BLK_DEV_SIS5513
593 ATA100: SiS635, SiS645, SiS650, SiS730, SiS735, SiS740, 593 ATA100: SiS635, SiS645, SiS650, SiS730, SiS735, SiS740,
594 SiS745, SiS750 594 SiS745, SiS750
595 595
596 Please read the comments at the top of <file:drivers/ide/pci/sis5513.c>. 596 Please read the comments at the top of <file:drivers/ide/sis5513.c>.
597 597
598config BLK_DEV_SL82C105 598config BLK_DEV_SL82C105
599 tristate "Winbond SL82c105 support" 599 tristate "Winbond SL82c105 support"
@@ -616,7 +616,7 @@ config BLK_DEV_SLC90E66
616 look-a-like to the PIIX4 it should be a nice addition. 616 look-a-like to the PIIX4 it should be a nice addition.
617 617
618 Please read the comments at the top of 618 Please read the comments at the top of
619 <file:drivers/ide/pci/slc90e66.c>. 619 <file:drivers/ide/slc90e66.c>.
620 620
621config BLK_DEV_TRM290 621config BLK_DEV_TRM290
622 tristate "Tekram TRM290 chipset support" 622 tristate "Tekram TRM290 chipset support"
@@ -625,7 +625,7 @@ config BLK_DEV_TRM290
625 This driver adds support for bus master DMA transfers 625 This driver adds support for bus master DMA transfers
626 using the Tekram TRM290 PCI IDE chip. Volunteers are 626 using the Tekram TRM290 PCI IDE chip. Volunteers are
627 needed for further tweaking and development. 627 needed for further tweaking and development.
628 Please read the comments at the top of <file:drivers/ide/pci/trm290.c>. 628 Please read the comments at the top of <file:drivers/ide/trm290.c>.
629 629
630config BLK_DEV_VIA82CXXX 630config BLK_DEV_VIA82CXXX
631 tristate "VIA82CXXX chipset support" 631 tristate "VIA82CXXX chipset support"
@@ -836,7 +836,7 @@ config BLK_DEV_ALI14XX
836 of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster 836 of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster
837 I/O speeds to be set as well. 837 I/O speeds to be set as well.
838 See the files <file:Documentation/ide/ide.txt> and 838 See the files <file:Documentation/ide/ide.txt> and
839 <file:drivers/ide/legacy/ali14xx.c> for more info. 839 <file:drivers/ide/ali14xx.c> for more info.
840 840
841config BLK_DEV_DTC2278 841config BLK_DEV_DTC2278
842 tristate "DTC-2278 support" 842 tristate "DTC-2278 support"
@@ -847,7 +847,7 @@ config BLK_DEV_DTC2278
847 boot parameter. It enables support for the secondary IDE interface 847 boot parameter. It enables support for the secondary IDE interface
848 of the DTC-2278 card, and permits faster I/O speeds to be set as 848 of the DTC-2278 card, and permits faster I/O speeds to be set as
849 well. See the <file:Documentation/ide/ide.txt> and 849 well. See the <file:Documentation/ide/ide.txt> and
850 <file:drivers/ide/legacy/dtc2278.c> files for more info. 850 <file:drivers/ide/dtc2278.c> files for more info.
851 851
852config BLK_DEV_HT6560B 852config BLK_DEV_HT6560B
853 tristate "Holtek HT6560B support" 853 tristate "Holtek HT6560B support"
@@ -858,7 +858,7 @@ config BLK_DEV_HT6560B
858 boot parameter. It enables support for the secondary IDE interface 858 boot parameter. It enables support for the secondary IDE interface
859 of the Holtek card, and permits faster I/O speeds to be set as well. 859 of the Holtek card, and permits faster I/O speeds to be set as well.
860 See the <file:Documentation/ide/ide.txt> and 860 See the <file:Documentation/ide/ide.txt> and
861 <file:drivers/ide/legacy/ht6560b.c> files for more info. 861 <file:drivers/ide/ht6560b.c> files for more info.
862 862
863config BLK_DEV_QD65XX 863config BLK_DEV_QD65XX
864 tristate "QDI QD65xx support" 864 tristate "QDI QD65xx support"
@@ -867,7 +867,7 @@ config BLK_DEV_QD65XX
867 help 867 help
868 This driver is enabled at runtime using the "qd65xx.probe" kernel 868 This driver is enabled at runtime using the "qd65xx.probe" kernel
869 boot parameter. It permits faster I/O speeds to be set. See the 869 boot parameter. It permits faster I/O speeds to be set. See the
870 <file:Documentation/ide/ide.txt> and <file:drivers/ide/legacy/qd65xx.c> 870 <file:Documentation/ide/ide.txt> and <file:drivers/ide/qd65xx.c>
871 for more info. 871 for more info.
872 872
873config BLK_DEV_UMC8672 873config BLK_DEV_UMC8672
@@ -879,7 +879,7 @@ config BLK_DEV_UMC8672
879 boot parameter. It enables support for the secondary IDE interface 879 boot parameter. It enables support for the secondary IDE interface
880 of the UMC-8672, and permits faster I/O speeds to be set as well. 880 of the UMC-8672, and permits faster I/O speeds to be set as well.
881 See the files <file:Documentation/ide/ide.txt> and 881 See the files <file:Documentation/ide/ide.txt> and
882 <file:drivers/ide/legacy/umc8672.c> for more info. 882 <file:drivers/ide/umc8672.c> for more info.
883 883
884endif 884endif
885 885
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 0dc97ec15c28..9dea71849f40 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1124,11 +1124,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1124 for (i = 0; i < 8; i++) 1124 for (i = 0; i < 8; i++)
1125 __set_bit(BTN_0 + i, input_dev->keybit); 1125 __set_bit(BTN_0 + i, input_dev->keybit);
1126 1126
1127 if (wacom_wac->features.type != WACOM_21UX2) { 1127 input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
1128 input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); 1128 input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
1129 input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
1130 }
1131
1132 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 1129 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
1133 1130
1134 __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); 1131 __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index c621c98c99da..a88f3cbb100b 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -306,6 +306,11 @@ static inline bool dma_pte_present(struct dma_pte *pte)
306 return (pte->val & 3) != 0; 306 return (pte->val & 3) != 0;
307} 307}
308 308
309static inline bool dma_pte_superpage(struct dma_pte *pte)
310{
311 return (pte->val & (1 << 7));
312}
313
309static inline int first_pte_in_page(struct dma_pte *pte) 314static inline int first_pte_in_page(struct dma_pte *pte)
310{ 315{
311 return !((unsigned long)pte & ~VTD_PAGE_MASK); 316 return !((unsigned long)pte & ~VTD_PAGE_MASK);
@@ -404,6 +409,9 @@ static int dmar_forcedac;
404static int intel_iommu_strict; 409static int intel_iommu_strict;
405static int intel_iommu_superpage = 1; 410static int intel_iommu_superpage = 1;
406 411
412int intel_iommu_gfx_mapped;
413EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
414
407#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) 415#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
408static DEFINE_SPINLOCK(device_domain_lock); 416static DEFINE_SPINLOCK(device_domain_lock);
409static LIST_HEAD(device_domain_list); 417static LIST_HEAD(device_domain_list);
@@ -577,17 +585,18 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain)
577 585
578static void domain_update_iommu_superpage(struct dmar_domain *domain) 586static void domain_update_iommu_superpage(struct dmar_domain *domain)
579{ 587{
580 int i, mask = 0xf; 588 struct dmar_drhd_unit *drhd;
589 struct intel_iommu *iommu = NULL;
590 int mask = 0xf;
581 591
582 if (!intel_iommu_superpage) { 592 if (!intel_iommu_superpage) {
583 domain->iommu_superpage = 0; 593 domain->iommu_superpage = 0;
584 return; 594 return;
585 } 595 }
586 596
587 domain->iommu_superpage = 4; /* 1TiB */ 597 /* set iommu_superpage to the smallest common denominator */
588 598 for_each_active_iommu(iommu, drhd) {
589 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) { 599 mask &= cap_super_page_val(iommu->cap);
590 mask |= cap_super_page_val(g_iommus[i]->cap);
591 if (!mask) { 600 if (!mask) {
592 break; 601 break;
593 } 602 }
@@ -730,29 +739,23 @@ out:
730} 739}
731 740
732static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, 741static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
733 unsigned long pfn, int large_level) 742 unsigned long pfn, int target_level)
734{ 743{
735 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; 744 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
736 struct dma_pte *parent, *pte = NULL; 745 struct dma_pte *parent, *pte = NULL;
737 int level = agaw_to_level(domain->agaw); 746 int level = agaw_to_level(domain->agaw);
738 int offset, target_level; 747 int offset;
739 748
740 BUG_ON(!domain->pgd); 749 BUG_ON(!domain->pgd);
741 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); 750 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
742 parent = domain->pgd; 751 parent = domain->pgd;
743 752
744 /* Search pte */
745 if (!large_level)
746 target_level = 1;
747 else
748 target_level = large_level;
749
750 while (level > 0) { 753 while (level > 0) {
751 void *tmp_page; 754 void *tmp_page;
752 755
753 offset = pfn_level_offset(pfn, level); 756 offset = pfn_level_offset(pfn, level);
754 pte = &parent[offset]; 757 pte = &parent[offset];
755 if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE)) 758 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
756 break; 759 break;
757 if (level == target_level) 760 if (level == target_level)
758 break; 761 break;
@@ -816,13 +819,14 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
816} 819}
817 820
818/* clear last level pte, a tlb flush should be followed */ 821/* clear last level pte, a tlb flush should be followed */
819static void dma_pte_clear_range(struct dmar_domain *domain, 822static int dma_pte_clear_range(struct dmar_domain *domain,
820 unsigned long start_pfn, 823 unsigned long start_pfn,
821 unsigned long last_pfn) 824 unsigned long last_pfn)
822{ 825{
823 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; 826 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
824 unsigned int large_page = 1; 827 unsigned int large_page = 1;
825 struct dma_pte *first_pte, *pte; 828 struct dma_pte *first_pte, *pte;
829 int order;
826 830
827 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); 831 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
828 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); 832 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
@@ -846,6 +850,9 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
846 (void *)pte - (void *)first_pte); 850 (void *)pte - (void *)first_pte);
847 851
848 } while (start_pfn && start_pfn <= last_pfn); 852 } while (start_pfn && start_pfn <= last_pfn);
853
854 order = (large_page - 1) * 9;
855 return order;
849} 856}
850 857
851/* free page table pages. last level pte should already be cleared */ 858/* free page table pages. last level pte should already be cleared */
@@ -3226,9 +3233,6 @@ static void __init init_no_remapping_devices(void)
3226 } 3233 }
3227 } 3234 }
3228 3235
3229 if (dmar_map_gfx)
3230 return;
3231
3232 for_each_drhd_unit(drhd) { 3236 for_each_drhd_unit(drhd) {
3233 int i; 3237 int i;
3234 if (drhd->ignored || drhd->include_all) 3238 if (drhd->ignored || drhd->include_all)
@@ -3236,18 +3240,23 @@ static void __init init_no_remapping_devices(void)
3236 3240
3237 for (i = 0; i < drhd->devices_cnt; i++) 3241 for (i = 0; i < drhd->devices_cnt; i++)
3238 if (drhd->devices[i] && 3242 if (drhd->devices[i] &&
3239 !IS_GFX_DEVICE(drhd->devices[i])) 3243 !IS_GFX_DEVICE(drhd->devices[i]))
3240 break; 3244 break;
3241 3245
3242 if (i < drhd->devices_cnt) 3246 if (i < drhd->devices_cnt)
3243 continue; 3247 continue;
3244 3248
3245 /* bypass IOMMU if it is just for gfx devices */ 3249 /* This IOMMU has *only* gfx devices. Either bypass it or
3246 drhd->ignored = 1; 3250 set the gfx_mapped flag, as appropriate */
3247 for (i = 0; i < drhd->devices_cnt; i++) { 3251 if (dmar_map_gfx) {
3248 if (!drhd->devices[i]) 3252 intel_iommu_gfx_mapped = 1;
3249 continue; 3253 } else {
3250 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; 3254 drhd->ignored = 1;
3255 for (i = 0; i < drhd->devices_cnt; i++) {
3256 if (!drhd->devices[i])
3257 continue;
3258 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3259 }
3251 } 3260 }
3252 } 3261 }
3253} 3262}
@@ -3568,6 +3577,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
3568 found = 1; 3577 found = 1;
3569 } 3578 }
3570 3579
3580 spin_unlock_irqrestore(&device_domain_lock, flags);
3581
3571 if (found == 0) { 3582 if (found == 0) {
3572 unsigned long tmp_flags; 3583 unsigned long tmp_flags;
3573 spin_lock_irqsave(&domain->iommu_lock, tmp_flags); 3584 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
@@ -3584,8 +3595,6 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
3584 spin_unlock_irqrestore(&iommu->lock, tmp_flags); 3595 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3585 } 3596 }
3586 } 3597 }
3587
3588 spin_unlock_irqrestore(&device_domain_lock, flags);
3589} 3598}
3590 3599
3591static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) 3600static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
@@ -3739,6 +3748,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain)
3739 vm_domain_exit(dmar_domain); 3748 vm_domain_exit(dmar_domain);
3740 return -ENOMEM; 3749 return -ENOMEM;
3741 } 3750 }
3751 domain_update_iommu_cap(dmar_domain);
3742 domain->priv = dmar_domain; 3752 domain->priv = dmar_domain;
3743 3753
3744 return 0; 3754 return 0;
@@ -3864,14 +3874,15 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
3864{ 3874{
3865 struct dmar_domain *dmar_domain = domain->priv; 3875 struct dmar_domain *dmar_domain = domain->priv;
3866 size_t size = PAGE_SIZE << gfp_order; 3876 size_t size = PAGE_SIZE << gfp_order;
3877 int order;
3867 3878
3868 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, 3879 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3869 (iova + size - 1) >> VTD_PAGE_SHIFT); 3880 (iova + size - 1) >> VTD_PAGE_SHIFT);
3870 3881
3871 if (dmar_domain->max_addr == iova + size) 3882 if (dmar_domain->max_addr == iova + size)
3872 dmar_domain->max_addr = iova; 3883 dmar_domain->max_addr = iova;
3873 3884
3874 return gfp_order; 3885 return order;
3875} 3886}
3876 3887
3877static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, 3888static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -3950,7 +3961,11 @@ static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
3950 if (!(ggc & GGC_MEMORY_VT_ENABLED)) { 3961 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
3951 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); 3962 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
3952 dmar_map_gfx = 0; 3963 dmar_map_gfx = 0;
3953 } 3964 } else if (dmar_map_gfx) {
3965 /* we have to ensure the gfx device is idle before we flush */
3966 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
3967 intel_iommu_strict = 1;
3968 }
3954} 3969}
3955DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); 3970DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
3956DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); 3971DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 49da55c1528a..8c2a000cf3f5 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1698,6 +1698,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1698 } 1698 }
1699 1699
1700 ti->num_flush_requests = 1; 1700 ti->num_flush_requests = 1;
1701 ti->discard_zeroes_data_unsupported = 1;
1702
1701 return 0; 1703 return 0;
1702 1704
1703bad: 1705bad:
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 89f73ca22cfa..f84c08029b21 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -81,8 +81,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
81 * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags> 81 * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
82 */ 82 */
83 if (!strcasecmp(arg_name, "corrupt_bio_byte")) { 83 if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
84 if (!argc) 84 if (!argc) {
85 ti->error = "Feature corrupt_bio_byte requires parameters"; 85 ti->error = "Feature corrupt_bio_byte requires parameters";
86 return -EINVAL;
87 }
86 88
87 r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error); 89 r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
88 if (r) 90 if (r)
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index f82147029636..32ac70861d66 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -628,6 +628,7 @@ void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
628 job->kc = kc; 628 job->kc = kc;
629 job->fn = fn; 629 job->fn = fn;
630 job->context = context; 630 job->context = context;
631 job->master_job = job;
631 632
632 atomic_inc(&kc->nr_jobs); 633 atomic_inc(&kc->nr_jobs);
633 634
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index a002dd85db1e..86df8b2cf927 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -449,7 +449,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
449 rs->ti->error = "write_mostly option is only valid for RAID1"; 449 rs->ti->error = "write_mostly option is only valid for RAID1";
450 return -EINVAL; 450 return -EINVAL;
451 } 451 }
452 if (value > rs->md.raid_disks) { 452 if (value >= rs->md.raid_disks) {
453 rs->ti->error = "Invalid write_mostly drive index given"; 453 rs->ti->error = "Invalid write_mostly drive index given";
454 return -EINVAL; 454 return -EINVAL;
455 } 455 }
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 986b8754bb08..bc04518e9d8b 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1238,14 +1238,15 @@ static void dm_table_set_integrity(struct dm_table *t)
1238 return; 1238 return;
1239 1239
1240 template_disk = dm_table_get_integrity_disk(t, true); 1240 template_disk = dm_table_get_integrity_disk(t, true);
1241 if (!template_disk && 1241 if (template_disk)
1242 blk_integrity_is_initialized(dm_disk(t->md))) { 1242 blk_integrity_register(dm_disk(t->md),
1243 blk_get_integrity(template_disk));
1244 else if (blk_integrity_is_initialized(dm_disk(t->md)))
1243 DMWARN("%s: device no longer has a valid integrity profile", 1245 DMWARN("%s: device no longer has a valid integrity profile",
1244 dm_device_name(t->md)); 1246 dm_device_name(t->md));
1245 return; 1247 else
1246 } 1248 DMWARN("%s: unable to establish an integrity profile",
1247 blk_integrity_register(dm_disk(t->md), 1249 dm_device_name(t->md));
1248 blk_get_integrity(template_disk));
1249} 1250}
1250 1251
1251static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, 1252static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
@@ -1282,6 +1283,22 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
1282 return 0; 1283 return 0;
1283} 1284}
1284 1285
1286static bool dm_table_discard_zeroes_data(struct dm_table *t)
1287{
1288 struct dm_target *ti;
1289 unsigned i = 0;
1290
1291 /* Ensure that all targets supports discard_zeroes_data. */
1292 while (i < dm_table_get_num_targets(t)) {
1293 ti = dm_table_get_target(t, i++);
1294
1295 if (ti->discard_zeroes_data_unsupported)
1296 return 0;
1297 }
1298
1299 return 1;
1300}
1301
1285void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 1302void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1286 struct queue_limits *limits) 1303 struct queue_limits *limits)
1287{ 1304{
@@ -1304,6 +1321,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1304 } 1321 }
1305 blk_queue_flush(q, flush); 1322 blk_queue_flush(q, flush);
1306 1323
1324 if (!dm_table_discard_zeroes_data(t))
1325 q->limits.discard_zeroes_data = 0;
1326
1307 dm_table_set_integrity(t); 1327 dm_table_set_integrity(t);
1308 1328
1309 /* 1329 /*
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 5404b2295820..5c95ccb59500 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -61,6 +61,11 @@
61static void autostart_arrays(int part); 61static void autostart_arrays(int part);
62#endif 62#endif
63 63
64/* pers_list is a list of registered personalities protected
65 * by pers_lock.
66 * pers_lock does extra service to protect accesses to
67 * mddev->thread when the mutex cannot be held.
68 */
64static LIST_HEAD(pers_list); 69static LIST_HEAD(pers_list);
65static DEFINE_SPINLOCK(pers_lock); 70static DEFINE_SPINLOCK(pers_lock);
66 71
@@ -739,7 +744,12 @@ static void mddev_unlock(mddev_t * mddev)
739 } else 744 } else
740 mutex_unlock(&mddev->reconfig_mutex); 745 mutex_unlock(&mddev->reconfig_mutex);
741 746
747 /* was we've dropped the mutex we need a spinlock to
748 * make sur the thread doesn't disappear
749 */
750 spin_lock(&pers_lock);
742 md_wakeup_thread(mddev->thread); 751 md_wakeup_thread(mddev->thread);
752 spin_unlock(&pers_lock);
743} 753}
744 754
745static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) 755static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
@@ -6429,11 +6439,18 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
6429 return thread; 6439 return thread;
6430} 6440}
6431 6441
6432void md_unregister_thread(mdk_thread_t *thread) 6442void md_unregister_thread(mdk_thread_t **threadp)
6433{ 6443{
6444 mdk_thread_t *thread = *threadp;
6434 if (!thread) 6445 if (!thread)
6435 return; 6446 return;
6436 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 6447 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
6448 /* Locking ensures that mddev_unlock does not wake_up a
6449 * non-existent thread
6450 */
6451 spin_lock(&pers_lock);
6452 *threadp = NULL;
6453 spin_unlock(&pers_lock);
6437 6454
6438 kthread_stop(thread->tsk); 6455 kthread_stop(thread->tsk);
6439 kfree(thread); 6456 kfree(thread);
@@ -7340,8 +7357,7 @@ static void reap_sync_thread(mddev_t *mddev)
7340 mdk_rdev_t *rdev; 7357 mdk_rdev_t *rdev;
7341 7358
7342 /* resync has finished, collect result */ 7359 /* resync has finished, collect result */
7343 md_unregister_thread(mddev->sync_thread); 7360 md_unregister_thread(&mddev->sync_thread);
7344 mddev->sync_thread = NULL;
7345 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 7361 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7346 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 7362 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7347 /* success...*/ 7363 /* success...*/
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 1e586bb4452e..0a309dc29b45 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -560,7 +560,7 @@ extern int register_md_personality(struct mdk_personality *p);
560extern int unregister_md_personality(struct mdk_personality *p); 560extern int unregister_md_personality(struct mdk_personality *p);
561extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), 561extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
562 mddev_t *mddev, const char *name); 562 mddev_t *mddev, const char *name);
563extern void md_unregister_thread(mdk_thread_t *thread); 563extern void md_unregister_thread(mdk_thread_t **threadp);
564extern void md_wakeup_thread(mdk_thread_t *thread); 564extern void md_wakeup_thread(mdk_thread_t *thread);
565extern void md_check_recovery(mddev_t *mddev); 565extern void md_check_recovery(mddev_t *mddev);
566extern void md_write_start(mddev_t *mddev, struct bio *bi); 566extern void md_write_start(mddev_t *mddev, struct bio *bi);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 3535c23af288..d5b5fb300171 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -514,8 +514,7 @@ static int multipath_stop (mddev_t *mddev)
514{ 514{
515 multipath_conf_t *conf = mddev->private; 515 multipath_conf_t *conf = mddev->private;
516 516
517 md_unregister_thread(mddev->thread); 517 md_unregister_thread(&mddev->thread);
518 mddev->thread = NULL;
519 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 518 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
520 mempool_destroy(conf->pool); 519 mempool_destroy(conf->pool);
521 kfree(conf->multipaths); 520 kfree(conf->multipaths);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f4622dd8fc59..d9587dffe533 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2562,8 +2562,7 @@ static int stop(mddev_t *mddev)
2562 raise_barrier(conf); 2562 raise_barrier(conf);
2563 lower_barrier(conf); 2563 lower_barrier(conf);
2564 2564
2565 md_unregister_thread(mddev->thread); 2565 md_unregister_thread(&mddev->thread);
2566 mddev->thread = NULL;
2567 if (conf->r1bio_pool) 2566 if (conf->r1bio_pool)
2568 mempool_destroy(conf->r1bio_pool); 2567 mempool_destroy(conf->r1bio_pool);
2569 kfree(conf->mirrors); 2568 kfree(conf->mirrors);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index d7a8468ddeab..0cd9672cf9cb 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2955,7 +2955,7 @@ static int run(mddev_t *mddev)
2955 return 0; 2955 return 0;
2956 2956
2957out_free_conf: 2957out_free_conf:
2958 md_unregister_thread(mddev->thread); 2958 md_unregister_thread(&mddev->thread);
2959 if (conf->r10bio_pool) 2959 if (conf->r10bio_pool)
2960 mempool_destroy(conf->r10bio_pool); 2960 mempool_destroy(conf->r10bio_pool);
2961 safe_put_page(conf->tmppage); 2961 safe_put_page(conf->tmppage);
@@ -2973,8 +2973,7 @@ static int stop(mddev_t *mddev)
2973 raise_barrier(conf, 0); 2973 raise_barrier(conf, 0);
2974 lower_barrier(conf); 2974 lower_barrier(conf);
2975 2975
2976 md_unregister_thread(mddev->thread); 2976 md_unregister_thread(&mddev->thread);
2977 mddev->thread = NULL;
2978 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 2977 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2979 if (conf->r10bio_pool) 2978 if (conf->r10bio_pool)
2980 mempool_destroy(conf->r10bio_pool); 2979 mempool_destroy(conf->r10bio_pool);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 43709fa6b6df..ac5e8b57e50f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4941,8 +4941,7 @@ static int run(mddev_t *mddev)
4941 4941
4942 return 0; 4942 return 0;
4943abort: 4943abort:
4944 md_unregister_thread(mddev->thread); 4944 md_unregister_thread(&mddev->thread);
4945 mddev->thread = NULL;
4946 if (conf) { 4945 if (conf) {
4947 print_raid5_conf(conf); 4946 print_raid5_conf(conf);
4948 free_conf(conf); 4947 free_conf(conf);
@@ -4956,8 +4955,7 @@ static int stop(mddev_t *mddev)
4956{ 4955{
4957 raid5_conf_t *conf = mddev->private; 4956 raid5_conf_t *conf = mddev->private;
4958 4957
4959 md_unregister_thread(mddev->thread); 4958 md_unregister_thread(&mddev->thread);
4960 mddev->thread = NULL;
4961 if (mddev->queue) 4959 if (mddev->queue)
4962 mddev->queue->backing_dev_info.congested_fn = NULL; 4960 mddev->queue->backing_dev_info.congested_fn = NULL;
4963 free_conf(conf); 4961 free_conf(conf);
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index d72156517726..a5c9ed128b97 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -181,7 +181,7 @@ static void v4l2_device_release(struct device *cd)
181 * TODO: In the long run all drivers that use v4l2_device should use the 181 * TODO: In the long run all drivers that use v4l2_device should use the
182 * v4l2_device release callback. This check will then be unnecessary. 182 * v4l2_device release callback. This check will then be unnecessary.
183 */ 183 */
184 if (v4l2_dev->release == NULL) 184 if (v4l2_dev && v4l2_dev->release == NULL)
185 v4l2_dev = NULL; 185 v4l2_dev = NULL;
186 186
187 /* Release video_device and perform other 187 /* Release video_device and perform other
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index e46df5331c55..9a7eb3b36cf3 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -239,13 +239,19 @@ void bnx2x_int_disable(struct bnx2x *bp);
239 * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X 239 * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X
240 * 240 *
241 */ 241 */
242/* iSCSI L2 */ 242enum {
243#define BNX2X_ISCSI_ETH_CL_ID_IDX 1 243 BNX2X_ISCSI_ETH_CL_ID_IDX,
244#define BNX2X_ISCSI_ETH_CID 49 244 BNX2X_FCOE_ETH_CL_ID_IDX,
245 BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
246};
245 247
246/* FCoE L2 */ 248#define BNX2X_CNIC_START_ETH_CID 48
247#define BNX2X_FCOE_ETH_CL_ID_IDX 2 249enum {
248#define BNX2X_FCOE_ETH_CID 50 250 /* iSCSI L2 */
251 BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID,
252 /* FCoE L2 */
253 BNX2X_FCOE_ETH_CID,
254};
249 255
250/** Additional rings budgeting */ 256/** Additional rings budgeting */
251#ifdef BCM_CNIC 257#ifdef BCM_CNIC
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 223bfeebc597..2dc1199239d0 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -1297,7 +1297,7 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp,
1297static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) 1297static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
1298{ 1298{
1299 return bp->cnic_base_cl_id + cl_idx + 1299 return bp->cnic_base_cl_id + cl_idx +
1300 (bp->pf_num >> 1) * NON_ETH_CONTEXT_USE; 1300 (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX;
1301} 1301}
1302 1302
1303static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) 1303static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6d79b78cfc75..de3d351ccb6b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1435,6 +1435,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1435 struct sk_buff *skb = *pskb; 1435 struct sk_buff *skb = *pskb;
1436 struct slave *slave; 1436 struct slave *slave;
1437 struct bonding *bond; 1437 struct bonding *bond;
1438 void (*recv_probe)(struct sk_buff *, struct bonding *,
1439 struct slave *);
1438 1440
1439 skb = skb_share_check(skb, GFP_ATOMIC); 1441 skb = skb_share_check(skb, GFP_ATOMIC);
1440 if (unlikely(!skb)) 1442 if (unlikely(!skb))
@@ -1448,11 +1450,12 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1448 if (bond->params.arp_interval) 1450 if (bond->params.arp_interval)
1449 slave->dev->last_rx = jiffies; 1451 slave->dev->last_rx = jiffies;
1450 1452
1451 if (bond->recv_probe) { 1453 recv_probe = ACCESS_ONCE(bond->recv_probe);
1454 if (recv_probe) {
1452 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 1455 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1453 1456
1454 if (likely(nskb)) { 1457 if (likely(nskb)) {
1455 bond->recv_probe(nskb, bond, slave); 1458 recv_probe(nskb, bond, slave);
1456 dev_kfree_skb(nskb); 1459 dev_kfree_skb(nskb);
1457 } 1460 }
1458 } 1461 }
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 92feac68b66e..4cc6f44c2ba2 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -261,11 +261,13 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
261 void __iomem *data = &regs->tx.dsr1_0; 261 void __iomem *data = &regs->tx.dsr1_0;
262 u16 *payload = (u16 *)frame->data; 262 u16 *payload = (u16 *)frame->data;
263 263
264 /* It is safe to write into dsr[dlc+1] */ 264 for (i = 0; i < frame->can_dlc / 2; i++) {
265 for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
266 out_be16(data, *payload++); 265 out_be16(data, *payload++);
267 data += 2 + _MSCAN_RESERVED_DSR_SIZE; 266 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
268 } 267 }
268 /* write remaining byte if necessary */
269 if (frame->can_dlc & 1)
270 out_8(data, frame->data[frame->can_dlc - 1]);
269 } 271 }
270 272
271 out_8(&regs->tx.dlr, frame->can_dlc); 273 out_8(&regs->tx.dlr, frame->can_dlc);
@@ -330,10 +332,13 @@ static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
330 void __iomem *data = &regs->rx.dsr1_0; 332 void __iomem *data = &regs->rx.dsr1_0;
331 u16 *payload = (u16 *)frame->data; 333 u16 *payload = (u16 *)frame->data;
332 334
333 for (i = 0; i < (frame->can_dlc + 1) / 2; i++) { 335 for (i = 0; i < frame->can_dlc / 2; i++) {
334 *payload++ = in_be16(data); 336 *payload++ = in_be16(data);
335 data += 2 + _MSCAN_RESERVED_DSR_SIZE; 337 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
336 } 338 }
339 /* read remaining byte if necessary */
340 if (frame->can_dlc & 1)
341 frame->data[frame->can_dlc - 1] = in_8(data);
337 } 342 }
338 343
339 out_8(&regs->canrflg, MSCAN_RXF); 344 out_8(&regs->canrflg, MSCAN_RXF);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 05172c39a0ce..376e3e94bae0 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -239,7 +239,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
239 dest = macvlan_hash_lookup(port, eth->h_dest); 239 dest = macvlan_hash_lookup(port, eth->h_dest);
240 if (dest && dest->mode == MACVLAN_MODE_BRIDGE) { 240 if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
241 /* send to lowerdev first for its network taps */ 241 /* send to lowerdev first for its network taps */
242 vlan->forward(vlan->lowerdev, skb); 242 dev_forward_skb(vlan->lowerdev, skb);
243 243
244 return NET_XMIT_SUCCESS; 244 return NET_XMIT_SUCCESS;
245 } 245 }
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 6e03de034ac7..f76ab6bf3096 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -172,7 +172,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
172 memset(ring->buf, 0, ring->buf_size); 172 memset(ring->buf, 0, ring->buf_size);
173 173
174 ring->qp_state = MLX4_QP_STATE_RST; 174 ring->qp_state = MLX4_QP_STATE_RST;
175 ring->doorbell_qpn = swab32(ring->qp.qpn << 8); 175 ring->doorbell_qpn = ring->qp.qpn << 8;
176 176
177 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 177 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
178 ring->cqn, &ring->context); 178 ring->cqn, &ring->context);
@@ -791,7 +791,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
791 skb_orphan(skb); 791 skb_orphan(skb);
792 792
793 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { 793 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
794 *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn; 794 *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
795 op_own |= htonl((bf_index & 0xffff) << 8); 795 op_own |= htonl((bf_index & 0xffff) << 8);
796 /* Ensure new descirptor hits memory 796 /* Ensure new descirptor hits memory
797 * before setting ownership of this descriptor to HW */ 797 * before setting ownership of this descriptor to HW */
@@ -812,7 +812,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
812 wmb(); 812 wmb();
813 tx_desc->ctrl.owner_opcode = op_own; 813 tx_desc->ctrl.owner_opcode = op_own;
814 wmb(); 814 wmb();
815 writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); 815 iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
816 } 816 }
817 817
818 /* Poll CQ here */ 818 /* Poll CQ here */
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index ed2a3977c6e7..e8882023576b 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -307,6 +307,11 @@ static ssize_t store_enabled(struct netconsole_target *nt,
307 return err; 307 return err;
308 if (enabled < 0 || enabled > 1) 308 if (enabled < 0 || enabled > 1)
309 return -EINVAL; 309 return -EINVAL;
310 if (enabled == nt->enabled) {
311 printk(KERN_INFO "netconsole: network logging has already %s\n",
312 nt->enabled ? "started" : "stopped");
313 return -EINVAL;
314 }
310 315
311 if (enabled) { /* 1 */ 316 if (enabled) { /* 1 */
312 317
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
index eae542a7e987..89f829f5f725 100644
--- a/drivers/net/pptp.c
+++ b/drivers/net/pptp.c
@@ -285,8 +285,10 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
285 ip_send_check(iph); 285 ip_send_check(iph);
286 286
287 ip_local_out(skb); 287 ip_local_out(skb);
288 return 1;
288 289
289tx_error: 290tx_error:
291 kfree_skb(skb);
290 return 1; 292 return 1;
291} 293}
292 294
@@ -305,11 +307,18 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
305 } 307 }
306 308
307 header = (struct pptp_gre_header *)(skb->data); 309 header = (struct pptp_gre_header *)(skb->data);
310 headersize = sizeof(*header);
308 311
309 /* test if acknowledgement present */ 312 /* test if acknowledgement present */
310 if (PPTP_GRE_IS_A(header->ver)) { 313 if (PPTP_GRE_IS_A(header->ver)) {
311 __u32 ack = (PPTP_GRE_IS_S(header->flags)) ? 314 __u32 ack;
312 header->ack : header->seq; /* ack in different place if S = 0 */ 315
316 if (!pskb_may_pull(skb, headersize))
317 goto drop;
318 header = (struct pptp_gre_header *)(skb->data);
319
320 /* ack in different place if S = 0 */
321 ack = PPTP_GRE_IS_S(header->flags) ? header->ack : header->seq;
313 322
314 ack = ntohl(ack); 323 ack = ntohl(ack);
315 324
@@ -318,21 +327,18 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
318 /* also handle sequence number wrap-around */ 327 /* also handle sequence number wrap-around */
319 if (WRAPPED(ack, opt->ack_recv)) 328 if (WRAPPED(ack, opt->ack_recv))
320 opt->ack_recv = ack; 329 opt->ack_recv = ack;
330 } else {
331 headersize -= sizeof(header->ack);
321 } 332 }
322
323 /* test if payload present */ 333 /* test if payload present */
324 if (!PPTP_GRE_IS_S(header->flags)) 334 if (!PPTP_GRE_IS_S(header->flags))
325 goto drop; 335 goto drop;
326 336
327 headersize = sizeof(*header);
328 payload_len = ntohs(header->payload_len); 337 payload_len = ntohs(header->payload_len);
329 seq = ntohl(header->seq); 338 seq = ntohl(header->seq);
330 339
331 /* no ack present? */
332 if (!PPTP_GRE_IS_A(header->ver))
333 headersize -= sizeof(header->ack);
334 /* check for incomplete packet (length smaller than expected) */ 340 /* check for incomplete packet (length smaller than expected) */
335 if (skb->len - headersize < payload_len) 341 if (!pskb_may_pull(skb, headersize + payload_len))
336 goto drop; 342 goto drop;
337 343
338 payload = skb->data + headersize; 344 payload = skb->data + headersize;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index c23667017922..6d657cabb951 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2859,7 +2859,7 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
2859 rtl_writephy(tp, 0x1f, 0x0004); 2859 rtl_writephy(tp, 0x1f, 0x0004);
2860 rtl_writephy(tp, 0x1f, 0x0007); 2860 rtl_writephy(tp, 0x1f, 0x0007);
2861 rtl_writephy(tp, 0x1e, 0x0020); 2861 rtl_writephy(tp, 0x1e, 0x0020);
2862 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); 2862 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
2863 rtl_writephy(tp, 0x1f, 0x0002); 2863 rtl_writephy(tp, 0x1f, 0x0002);
2864 rtl_writephy(tp, 0x1f, 0x0000); 2864 rtl_writephy(tp, 0x1f, 0x0000);
2865 rtl_writephy(tp, 0x0d, 0x0007); 2865 rtl_writephy(tp, 0x0d, 0x0007);
@@ -3316,6 +3316,37 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
3316 } 3316 }
3317} 3317}
3318 3318
3319static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3320{
3321 void __iomem *ioaddr = tp->mmio_addr;
3322
3323 switch (tp->mac_version) {
3324 case RTL_GIGA_MAC_VER_29:
3325 case RTL_GIGA_MAC_VER_30:
3326 case RTL_GIGA_MAC_VER_32:
3327 case RTL_GIGA_MAC_VER_33:
3328 case RTL_GIGA_MAC_VER_34:
3329 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3330 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3331 break;
3332 default:
3333 break;
3334 }
3335}
3336
3337static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
3338{
3339 if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
3340 return false;
3341
3342 rtl_writephy(tp, 0x1f, 0x0000);
3343 rtl_writephy(tp, MII_BMCR, 0x0000);
3344
3345 rtl_wol_suspend_quirk(tp);
3346
3347 return true;
3348}
3349
3319static void r810x_phy_power_down(struct rtl8169_private *tp) 3350static void r810x_phy_power_down(struct rtl8169_private *tp)
3320{ 3351{
3321 rtl_writephy(tp, 0x1f, 0x0000); 3352 rtl_writephy(tp, 0x1f, 0x0000);
@@ -3330,18 +3361,8 @@ static void r810x_phy_power_up(struct rtl8169_private *tp)
3330 3361
3331static void r810x_pll_power_down(struct rtl8169_private *tp) 3362static void r810x_pll_power_down(struct rtl8169_private *tp)
3332{ 3363{
3333 void __iomem *ioaddr = tp->mmio_addr; 3364 if (rtl_wol_pll_power_down(tp))
3334
3335 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
3336 rtl_writephy(tp, 0x1f, 0x0000);
3337 rtl_writephy(tp, MII_BMCR, 0x0000);
3338
3339 if (tp->mac_version == RTL_GIGA_MAC_VER_29 ||
3340 tp->mac_version == RTL_GIGA_MAC_VER_30)
3341 RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
3342 AcceptMulticast | AcceptMyPhys);
3343 return; 3365 return;
3344 }
3345 3366
3346 r810x_phy_power_down(tp); 3367 r810x_phy_power_down(tp);
3347} 3368}
@@ -3430,17 +3451,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
3430 tp->mac_version == RTL_GIGA_MAC_VER_33) 3451 tp->mac_version == RTL_GIGA_MAC_VER_33)
3431 rtl_ephy_write(ioaddr, 0x19, 0xff64); 3452 rtl_ephy_write(ioaddr, 0x19, 0xff64);
3432 3453
3433 if (__rtl8169_get_wol(tp) & WAKE_ANY) { 3454 if (rtl_wol_pll_power_down(tp))
3434 rtl_writephy(tp, 0x1f, 0x0000);
3435 rtl_writephy(tp, MII_BMCR, 0x0000);
3436
3437 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3438 tp->mac_version == RTL_GIGA_MAC_VER_33 ||
3439 tp->mac_version == RTL_GIGA_MAC_VER_34)
3440 RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
3441 AcceptMulticast | AcceptMyPhys);
3442 return; 3455 return;
3443 }
3444 3456
3445 r8168_phy_power_down(tp); 3457 r8168_phy_power_down(tp);
3446 3458
@@ -5788,11 +5800,30 @@ static const struct dev_pm_ops rtl8169_pm_ops = {
5788 5800
5789#endif /* !CONFIG_PM */ 5801#endif /* !CONFIG_PM */
5790 5802
5803static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
5804{
5805 void __iomem *ioaddr = tp->mmio_addr;
5806
5807 /* WoL fails with 8168b when the receiver is disabled. */
5808 switch (tp->mac_version) {
5809 case RTL_GIGA_MAC_VER_11:
5810 case RTL_GIGA_MAC_VER_12:
5811 case RTL_GIGA_MAC_VER_17:
5812 pci_clear_master(tp->pci_dev);
5813
5814 RTL_W8(ChipCmd, CmdRxEnb);
5815 /* PCI commit */
5816 RTL_R8(ChipCmd);
5817 break;
5818 default:
5819 break;
5820 }
5821}
5822
5791static void rtl_shutdown(struct pci_dev *pdev) 5823static void rtl_shutdown(struct pci_dev *pdev)
5792{ 5824{
5793 struct net_device *dev = pci_get_drvdata(pdev); 5825 struct net_device *dev = pci_get_drvdata(pdev);
5794 struct rtl8169_private *tp = netdev_priv(dev); 5826 struct rtl8169_private *tp = netdev_priv(dev);
5795 void __iomem *ioaddr = tp->mmio_addr;
5796 5827
5797 rtl8169_net_suspend(dev); 5828 rtl8169_net_suspend(dev);
5798 5829
@@ -5806,16 +5837,9 @@ static void rtl_shutdown(struct pci_dev *pdev)
5806 spin_unlock_irq(&tp->lock); 5837 spin_unlock_irq(&tp->lock);
5807 5838
5808 if (system_state == SYSTEM_POWER_OFF) { 5839 if (system_state == SYSTEM_POWER_OFF) {
5809 /* WoL fails with 8168b when the receiver is disabled. */ 5840 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
5810 if ((tp->mac_version == RTL_GIGA_MAC_VER_11 || 5841 rtl_wol_suspend_quirk(tp);
5811 tp->mac_version == RTL_GIGA_MAC_VER_12 || 5842 rtl_wol_shutdown_quirk(tp);
5812 tp->mac_version == RTL_GIGA_MAC_VER_17) &&
5813 (tp->features & RTL_FEATURE_WOL)) {
5814 pci_clear_master(pdev);
5815
5816 RTL_W8(ChipCmd, CmdRxEnb);
5817 /* PCI commit */
5818 RTL_R8(ChipCmd);
5819 } 5843 }
5820 5844
5821 pci_wake_from_d3(pdev, true); 5845 pci_wake_from_d3(pdev, true);
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index b9016a30cdc5..c90ddb61cc56 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -26,6 +26,7 @@
26 * LAN9215, LAN9216, LAN9217, LAN9218 26 * LAN9215, LAN9216, LAN9217, LAN9218
27 * LAN9210, LAN9211 27 * LAN9210, LAN9211
28 * LAN9220, LAN9221 28 * LAN9220, LAN9221
29 * LAN89218
29 * 30 *
30 */ 31 */
31 32
@@ -1983,6 +1984,7 @@ static int __devinit smsc911x_init(struct net_device *dev)
1983 case 0x01170000: 1984 case 0x01170000:
1984 case 0x01160000: 1985 case 0x01160000:
1985 case 0x01150000: 1986 case 0x01150000:
1987 case 0x218A0000:
1986 /* LAN911[5678] family */ 1988 /* LAN911[5678] family */
1987 pdata->generation = pdata->idrev & 0x0000FFFF; 1989 pdata->generation = pdata->idrev & 0x0000FFFF;
1988 break; 1990 break;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 4a1374df6084..c11a2b8327f3 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -15577,7 +15577,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
15577 15577
15578 cancel_work_sync(&tp->reset_task); 15578 cancel_work_sync(&tp->reset_task);
15579 15579
15580 if (!tg3_flag(tp, USE_PHYLIB)) { 15580 if (tg3_flag(tp, USE_PHYLIB)) {
15581 tg3_phy_fini(tp); 15581 tg3_phy_fini(tp);
15582 tg3_mdio_fini(tp); 15582 tg3_mdio_fini(tp);
15583 } 15583 }
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index c9e3dc024bc3..16ad97df5ba6 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1769,10 +1769,12 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
1769 sas_disable_routing(parent, phy->attached_sas_addr); 1769 sas_disable_routing(parent, phy->attached_sas_addr);
1770 } 1770 }
1771 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); 1771 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
1772 sas_port_delete_phy(phy->port, phy->phy); 1772 if (phy->port) {
1773 if (phy->port->num_phys == 0) 1773 sas_port_delete_phy(phy->port, phy->phy);
1774 sas_port_delete(phy->port); 1774 if (phy->port->num_phys == 0)
1775 phy->port = NULL; 1775 sas_port_delete(phy->port);
1776 phy->port = NULL;
1777 }
1776} 1778}
1777 1779
1778static int sas_discover_bfs_by_root_level(struct domain_device *root, 1780static int sas_discover_bfs_by_root_level(struct domain_device *root,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 4cace3f20c04..1e69527f1e4e 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1328,10 +1328,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1328 qla2x00_sp_compl(ha, sp); 1328 qla2x00_sp_compl(ha, sp);
1329 } else { 1329 } else {
1330 ctx = sp->ctx; 1330 ctx = sp->ctx;
1331 if (ctx->type == SRB_LOGIN_CMD || 1331 if (ctx->type == SRB_ELS_CMD_RPT ||
1332 ctx->type == SRB_LOGOUT_CMD) { 1332 ctx->type == SRB_ELS_CMD_HST ||
1333 ctx->u.iocb_cmd->free(sp); 1333 ctx->type == SRB_CT_CMD) {
1334 } else {
1335 struct fc_bsg_job *bsg_job = 1334 struct fc_bsg_job *bsg_job =
1336 ctx->u.bsg_job; 1335 ctx->u.bsg_job;
1337 if (bsg_job->request->msgcode 1336 if (bsg_job->request->msgcode
@@ -1343,6 +1342,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1343 kfree(sp->ctx); 1342 kfree(sp->ctx);
1344 mempool_free(sp, 1343 mempool_free(sp,
1345 ha->srb_mempool); 1344 ha->srb_mempool);
1345 } else {
1346 ctx->u.iocb_cmd->free(sp);
1346 } 1347 }
1347 } 1348 }
1348 } 1349 }
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 1a7c19ae766f..8b307b428791 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -411,7 +411,8 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
411 skb->protocol = eth_type_trans(skb, dev); 411 skb->protocol = eth_type_trans(skb, dev);
412 skb->dev = dev; 412 skb->dev = dev;
413 413
414 if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error)) 414 if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc ||
415 work->word2.s.L4_error || !work->word2.s.tcp_or_udp))
415 skb->ip_summed = CHECKSUM_NONE; 416 skb->ip_summed = CHECKSUM_NONE;
416 else 417 else
417 skb->ip_summed = CHECKSUM_UNNECESSARY; 418 skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index 58cf279ed879..bc95f52cad8b 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -478,8 +478,10 @@ lqasc_set_termios(struct uart_port *port,
478 spin_unlock_irqrestore(&ltq_asc_lock, flags); 478 spin_unlock_irqrestore(&ltq_asc_lock, flags);
479 479
480 /* Don't rewrite B0 */ 480 /* Don't rewrite B0 */
481 if (tty_termios_baud_rate(new)) 481 if (tty_termios_baud_rate(new))
482 tty_termios_encode_baud_rate(new, baud, baud); 482 tty_termios_encode_baud_rate(new, baud, baud);
483
484 uart_update_timeout(port, cflag, baud);
483} 485}
484 486
485static const char* 487static const char*
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 538f65a79ec5..dae5dfe41ba5 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1047,7 +1047,16 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1047 if (!max_to_defrag) 1047 if (!max_to_defrag)
1048 max_to_defrag = last_index - 1; 1048 max_to_defrag = last_index - 1;
1049 1049
1050 while (i <= last_index && defrag_count < max_to_defrag) { 1050 /*
1051 * make writeback starts from i, so the defrag range can be
1052 * written sequentially.
1053 */
1054 if (i < inode->i_mapping->writeback_index)
1055 inode->i_mapping->writeback_index = i;
1056
1057 while (i <= last_index && defrag_count < max_to_defrag &&
1058 (i < (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
1059 PAGE_CACHE_SHIFT)) {
1051 /* 1060 /*
1052 * make sure we stop running if someone unmounts 1061 * make sure we stop running if someone unmounts
1053 * the FS 1062 * the FS
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index f4af4cc37500..71beb0201970 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2018,7 +2018,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
2018 warned_on_ntlm = true; 2018 warned_on_ntlm = true;
2019 cERROR(1, "default security mechanism requested. The default " 2019 cERROR(1, "default security mechanism requested. The default "
2020 "security mechanism will be upgraded from ntlm to " 2020 "security mechanism will be upgraded from ntlm to "
2021 "ntlmv2 in kernel release 3.1"); 2021 "ntlmv2 in kernel release 3.2");
2022 } 2022 }
2023 ses->overrideSecFlg = volume_info->secFlg; 2023 ses->overrideSecFlg = volume_info->secFlg;
2024 2024
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index cac2ecfa6746..ef43fce519a1 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -629,7 +629,7 @@ xfs_buf_item_push(
629 * the xfsbufd to get this buffer written. We have to unlock the buffer 629 * the xfsbufd to get this buffer written. We have to unlock the buffer
630 * to allow the xfsbufd to write it, too. 630 * to allow the xfsbufd to write it, too.
631 */ 631 */
632STATIC void 632STATIC bool
633xfs_buf_item_pushbuf( 633xfs_buf_item_pushbuf(
634 struct xfs_log_item *lip) 634 struct xfs_log_item *lip)
635{ 635{
@@ -643,6 +643,7 @@ xfs_buf_item_pushbuf(
643 643
644 xfs_buf_delwri_promote(bp); 644 xfs_buf_delwri_promote(bp);
645 xfs_buf_relse(bp); 645 xfs_buf_relse(bp);
646 return true;
646} 647}
647 648
648STATIC void 649STATIC void
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index 9e0e2fa3f2c8..bb3f71d236d2 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -183,13 +183,14 @@ xfs_qm_dqunpin_wait(
183 * search the buffer cache can be a time consuming thing, and AIL lock is a 183 * search the buffer cache can be a time consuming thing, and AIL lock is a
184 * spinlock. 184 * spinlock.
185 */ 185 */
186STATIC void 186STATIC bool
187xfs_qm_dquot_logitem_pushbuf( 187xfs_qm_dquot_logitem_pushbuf(
188 struct xfs_log_item *lip) 188 struct xfs_log_item *lip)
189{ 189{
190 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip); 190 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
191 struct xfs_dquot *dqp = qlip->qli_dquot; 191 struct xfs_dquot *dqp = qlip->qli_dquot;
192 struct xfs_buf *bp; 192 struct xfs_buf *bp;
193 bool ret = true;
193 194
194 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 195 ASSERT(XFS_DQ_IS_LOCKED(dqp));
195 196
@@ -201,17 +202,20 @@ xfs_qm_dquot_logitem_pushbuf(
201 if (completion_done(&dqp->q_flush) || 202 if (completion_done(&dqp->q_flush) ||
202 !(lip->li_flags & XFS_LI_IN_AIL)) { 203 !(lip->li_flags & XFS_LI_IN_AIL)) {
203 xfs_dqunlock(dqp); 204 xfs_dqunlock(dqp);
204 return; 205 return true;
205 } 206 }
206 207
207 bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno, 208 bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
208 dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK); 209 dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
209 xfs_dqunlock(dqp); 210 xfs_dqunlock(dqp);
210 if (!bp) 211 if (!bp)
211 return; 212 return true;
212 if (XFS_BUF_ISDELAYWRITE(bp)) 213 if (XFS_BUF_ISDELAYWRITE(bp))
213 xfs_buf_delwri_promote(bp); 214 xfs_buf_delwri_promote(bp);
215 if (xfs_buf_ispinned(bp))
216 ret = false;
214 xfs_buf_relse(bp); 217 xfs_buf_relse(bp);
218 return ret;
215} 219}
216 220
217/* 221/*
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 588406dc6a35..836ad80d4f2b 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -708,13 +708,14 @@ xfs_inode_item_committed(
708 * marked delayed write. If that's the case, we'll promote it and that will 708 * marked delayed write. If that's the case, we'll promote it and that will
709 * allow the caller to write the buffer by triggering the xfsbufd to run. 709 * allow the caller to write the buffer by triggering the xfsbufd to run.
710 */ 710 */
711STATIC void 711STATIC bool
712xfs_inode_item_pushbuf( 712xfs_inode_item_pushbuf(
713 struct xfs_log_item *lip) 713 struct xfs_log_item *lip)
714{ 714{
715 struct xfs_inode_log_item *iip = INODE_ITEM(lip); 715 struct xfs_inode_log_item *iip = INODE_ITEM(lip);
716 struct xfs_inode *ip = iip->ili_inode; 716 struct xfs_inode *ip = iip->ili_inode;
717 struct xfs_buf *bp; 717 struct xfs_buf *bp;
718 bool ret = true;
718 719
719 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); 720 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
720 721
@@ -725,7 +726,7 @@ xfs_inode_item_pushbuf(
725 if (completion_done(&ip->i_flush) || 726 if (completion_done(&ip->i_flush) ||
726 !(lip->li_flags & XFS_LI_IN_AIL)) { 727 !(lip->li_flags & XFS_LI_IN_AIL)) {
727 xfs_iunlock(ip, XFS_ILOCK_SHARED); 728 xfs_iunlock(ip, XFS_ILOCK_SHARED);
728 return; 729 return true;
729 } 730 }
730 731
731 bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno, 732 bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno,
@@ -733,10 +734,13 @@ xfs_inode_item_pushbuf(
733 734
734 xfs_iunlock(ip, XFS_ILOCK_SHARED); 735 xfs_iunlock(ip, XFS_ILOCK_SHARED);
735 if (!bp) 736 if (!bp)
736 return; 737 return true;
737 if (XFS_BUF_ISDELAYWRITE(bp)) 738 if (XFS_BUF_ISDELAYWRITE(bp))
738 xfs_buf_delwri_promote(bp); 739 xfs_buf_delwri_promote(bp);
740 if (xfs_buf_ispinned(bp))
741 ret = false;
739 xfs_buf_relse(bp); 742 xfs_buf_relse(bp);
743 return ret;
740} 744}
741 745
742/* 746/*
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index 1e8a45e74c3e..828662f70d64 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -68,6 +68,8 @@
68#include <linux/ctype.h> 68#include <linux/ctype.h>
69#include <linux/writeback.h> 69#include <linux/writeback.h>
70#include <linux/capability.h> 70#include <linux/capability.h>
71#include <linux/kthread.h>
72#include <linux/freezer.h>
71#include <linux/list_sort.h> 73#include <linux/list_sort.h>
72 74
73#include <asm/page.h> 75#include <asm/page.h>
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 2366c54cc4fa..5cf06b85fd9d 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1652,24 +1652,13 @@ xfs_init_workqueues(void)
1652 */ 1652 */
1653 xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8); 1653 xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
1654 if (!xfs_syncd_wq) 1654 if (!xfs_syncd_wq)
1655 goto out; 1655 return -ENOMEM;
1656
1657 xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8);
1658 if (!xfs_ail_wq)
1659 goto out_destroy_syncd;
1660
1661 return 0; 1656 return 0;
1662
1663out_destroy_syncd:
1664 destroy_workqueue(xfs_syncd_wq);
1665out:
1666 return -ENOMEM;
1667} 1657}
1668 1658
1669STATIC void 1659STATIC void
1670xfs_destroy_workqueues(void) 1660xfs_destroy_workqueues(void)
1671{ 1661{
1672 destroy_workqueue(xfs_ail_wq);
1673 destroy_workqueue(xfs_syncd_wq); 1662 destroy_workqueue(xfs_syncd_wq);
1674} 1663}
1675 1664
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 06a9759b6352..53597f4db9b5 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -350,7 +350,7 @@ typedef struct xfs_item_ops {
350 void (*iop_unlock)(xfs_log_item_t *); 350 void (*iop_unlock)(xfs_log_item_t *);
351 xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t); 351 xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t);
352 void (*iop_push)(xfs_log_item_t *); 352 void (*iop_push)(xfs_log_item_t *);
353 void (*iop_pushbuf)(xfs_log_item_t *); 353 bool (*iop_pushbuf)(xfs_log_item_t *);
354 void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t); 354 void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
355} xfs_item_ops_t; 355} xfs_item_ops_t;
356 356
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index c15aa29fa169..3a1e7ca54c2d 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -28,8 +28,6 @@
28#include "xfs_trans_priv.h" 28#include "xfs_trans_priv.h"
29#include "xfs_error.h" 29#include "xfs_error.h"
30 30
31struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
32
33#ifdef DEBUG 31#ifdef DEBUG
34/* 32/*
35 * Check that the list is sorted as it should be. 33 * Check that the list is sorted as it should be.
@@ -356,16 +354,10 @@ xfs_ail_delete(
356 xfs_trans_ail_cursor_clear(ailp, lip); 354 xfs_trans_ail_cursor_clear(ailp, lip);
357} 355}
358 356
359/* 357static long
360 * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself 358xfsaild_push(
361 * to run at a later time if there is more work to do to complete the push. 359 struct xfs_ail *ailp)
362 */
363STATIC void
364xfs_ail_worker(
365 struct work_struct *work)
366{ 360{
367 struct xfs_ail *ailp = container_of(to_delayed_work(work),
368 struct xfs_ail, xa_work);
369 xfs_mount_t *mp = ailp->xa_mount; 361 xfs_mount_t *mp = ailp->xa_mount;
370 struct xfs_ail_cursor cur; 362 struct xfs_ail_cursor cur;
371 xfs_log_item_t *lip; 363 xfs_log_item_t *lip;
@@ -427,8 +419,13 @@ xfs_ail_worker(
427 419
428 case XFS_ITEM_PUSHBUF: 420 case XFS_ITEM_PUSHBUF:
429 XFS_STATS_INC(xs_push_ail_pushbuf); 421 XFS_STATS_INC(xs_push_ail_pushbuf);
430 IOP_PUSHBUF(lip); 422
431 ailp->xa_last_pushed_lsn = lsn; 423 if (!IOP_PUSHBUF(lip)) {
424 stuck++;
425 flush_log = 1;
426 } else {
427 ailp->xa_last_pushed_lsn = lsn;
428 }
432 push_xfsbufd = 1; 429 push_xfsbufd = 1;
433 break; 430 break;
434 431
@@ -440,7 +437,6 @@ xfs_ail_worker(
440 437
441 case XFS_ITEM_LOCKED: 438 case XFS_ITEM_LOCKED:
442 XFS_STATS_INC(xs_push_ail_locked); 439 XFS_STATS_INC(xs_push_ail_locked);
443 ailp->xa_last_pushed_lsn = lsn;
444 stuck++; 440 stuck++;
445 break; 441 break;
446 442
@@ -501,20 +497,6 @@ out_done:
501 /* We're past our target or empty, so idle */ 497 /* We're past our target or empty, so idle */
502 ailp->xa_last_pushed_lsn = 0; 498 ailp->xa_last_pushed_lsn = 0;
503 499
504 /*
505 * We clear the XFS_AIL_PUSHING_BIT first before checking
506 * whether the target has changed. If the target has changed,
507 * this pushes the requeue race directly onto the result of the
508 * atomic test/set bit, so we are guaranteed that either the
509 * the pusher that changed the target or ourselves will requeue
510 * the work (but not both).
511 */
512 clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
513 smp_rmb();
514 if (XFS_LSN_CMP(ailp->xa_target, target) == 0 ||
515 test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
516 return;
517
518 tout = 50; 500 tout = 50;
519 } else if (XFS_LSN_CMP(lsn, target) >= 0) { 501 } else if (XFS_LSN_CMP(lsn, target) >= 0) {
520 /* 502 /*
@@ -537,9 +519,30 @@ out_done:
537 tout = 20; 519 tout = 20;
538 } 520 }
539 521
540 /* There is more to do, requeue us. */ 522 return tout;
541 queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 523}
542 msecs_to_jiffies(tout)); 524
525static int
526xfsaild(
527 void *data)
528{
529 struct xfs_ail *ailp = data;
530 long tout = 0; /* milliseconds */
531
532 while (!kthread_should_stop()) {
533 if (tout && tout <= 20)
534 __set_current_state(TASK_KILLABLE);
535 else
536 __set_current_state(TASK_INTERRUPTIBLE);
537 schedule_timeout(tout ?
538 msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
539
540 try_to_freeze();
541
542 tout = xfsaild_push(ailp);
543 }
544
545 return 0;
543} 546}
544 547
545/* 548/*
@@ -574,8 +577,9 @@ xfs_ail_push(
574 */ 577 */
575 smp_wmb(); 578 smp_wmb();
576 xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn); 579 xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
577 if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) 580 smp_wmb();
578 queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); 581
582 wake_up_process(ailp->xa_task);
579} 583}
580 584
581/* 585/*
@@ -813,9 +817,18 @@ xfs_trans_ail_init(
813 INIT_LIST_HEAD(&ailp->xa_ail); 817 INIT_LIST_HEAD(&ailp->xa_ail);
814 INIT_LIST_HEAD(&ailp->xa_cursors); 818 INIT_LIST_HEAD(&ailp->xa_cursors);
815 spin_lock_init(&ailp->xa_lock); 819 spin_lock_init(&ailp->xa_lock);
816 INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker); 820
821 ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
822 ailp->xa_mount->m_fsname);
823 if (IS_ERR(ailp->xa_task))
824 goto out_free_ailp;
825
817 mp->m_ail = ailp; 826 mp->m_ail = ailp;
818 return 0; 827 return 0;
828
829out_free_ailp:
830 kmem_free(ailp);
831 return ENOMEM;
819} 832}
820 833
821void 834void
@@ -824,6 +837,6 @@ xfs_trans_ail_destroy(
824{ 837{
825 struct xfs_ail *ailp = mp->m_ail; 838 struct xfs_ail *ailp = mp->m_ail;
826 839
827 cancel_delayed_work_sync(&ailp->xa_work); 840 kthread_stop(ailp->xa_task);
828 kmem_free(ailp); 841 kmem_free(ailp);
829} 842}
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 212946b97239..22750b5e4a8f 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -64,23 +64,17 @@ struct xfs_ail_cursor {
64 */ 64 */
65struct xfs_ail { 65struct xfs_ail {
66 struct xfs_mount *xa_mount; 66 struct xfs_mount *xa_mount;
67 struct task_struct *xa_task;
67 struct list_head xa_ail; 68 struct list_head xa_ail;
68 xfs_lsn_t xa_target; 69 xfs_lsn_t xa_target;
69 struct list_head xa_cursors; 70 struct list_head xa_cursors;
70 spinlock_t xa_lock; 71 spinlock_t xa_lock;
71 struct delayed_work xa_work;
72 xfs_lsn_t xa_last_pushed_lsn; 72 xfs_lsn_t xa_last_pushed_lsn;
73 unsigned long xa_flags;
74}; 73};
75 74
76#define XFS_AIL_PUSHING_BIT 0
77
78/* 75/*
79 * From xfs_trans_ail.c 76 * From xfs_trans_ail.c
80 */ 77 */
81
82extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
83
84void xfs_trans_ail_update_bulk(struct xfs_ail *ailp, 78void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
85 struct xfs_ail_cursor *cur, 79 struct xfs_ail_cursor *cur,
86 struct xfs_log_item **log_items, int nr_items, 80 struct xfs_log_item **log_items, int nr_items,
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 3fa1f3d90ce0..99e3e50b5c57 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -197,6 +197,11 @@ struct dm_target {
197 * whether or not its underlying devices have support. 197 * whether or not its underlying devices have support.
198 */ 198 */
199 unsigned discards_supported:1; 199 unsigned discards_supported:1;
200
201 /*
202 * Set if this target does not return zeroes on discarded blocks.
203 */
204 unsigned discard_zeroes_data_unsupported:1;
200}; 205};
201 206
202/* Each target can link one of these into the table */ 207/* Each target can link one of these into the table */
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 1aaf915656f3..8fa4430f99c1 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -900,6 +900,7 @@ struct netns_ipvs {
900 volatile int sync_state; 900 volatile int sync_state;
901 volatile int master_syncid; 901 volatile int master_syncid;
902 volatile int backup_syncid; 902 volatile int backup_syncid;
903 struct mutex sync_mutex;
903 /* multicast interface name */ 904 /* multicast interface name */
904 char master_mcast_ifn[IP_VS_IFNAME_MAXLEN]; 905 char master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
905 char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN]; 906 char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 673a024c6b2a..5f097ca7d5c5 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -66,40 +66,34 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
66 return 0; 66 return 0;
67} 67}
68 68
69static inline int udplite_sender_cscov(struct udp_sock *up, struct udphdr *uh) 69/* Slow-path computation of checksum. Socket is locked. */
70static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
70{ 71{
72 const struct udp_sock *up = udp_sk(skb->sk);
71 int cscov = up->len; 73 int cscov = up->len;
74 __wsum csum = 0;
72 75
73 /* 76 if (up->pcflag & UDPLITE_SEND_CC) {
74 * Sender has set `partial coverage' option on UDP-Lite socket 77 /*
75 */ 78 * Sender has set `partial coverage' option on UDP-Lite socket.
76 if (up->pcflag & UDPLITE_SEND_CC) { 79 * The special case "up->pcslen == 0" signifies full coverage.
80 */
77 if (up->pcslen < up->len) { 81 if (up->pcslen < up->len) {
78 /* up->pcslen == 0 means that full coverage is required, 82 if (0 < up->pcslen)
79 * partial coverage only if 0 < up->pcslen < up->len */ 83 cscov = up->pcslen;
80 if (0 < up->pcslen) { 84 udp_hdr(skb)->len = htons(up->pcslen);
81 cscov = up->pcslen;
82 }
83 uh->len = htons(up->pcslen);
84 } 85 }
85 /* 86 /*
86 * NOTE: Causes for the error case `up->pcslen > up->len': 87 * NOTE: Causes for the error case `up->pcslen > up->len':
87 * (i) Application error (will not be penalized). 88 * (i) Application error (will not be penalized).
88 * (ii) Payload too big for send buffer: data is split 89 * (ii) Payload too big for send buffer: data is split
89 * into several packets, each with its own header. 90 * into several packets, each with its own header.
90 * In this case (e.g. last segment), coverage may 91 * In this case (e.g. last segment), coverage may
91 * exceed packet length. 92 * exceed packet length.
92 * Since packets with coverage length > packet length are 93 * Since packets with coverage length > packet length are
93 * illegal, we fall back to the defaults here. 94 * illegal, we fall back to the defaults here.
94 */ 95 */
95 } 96 }
96 return cscov;
97}
98
99static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
100{
101 int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb));
102 __wsum csum = 0;
103 97
104 skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */ 98 skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
105 99
@@ -115,16 +109,21 @@ static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
115 return csum; 109 return csum;
116} 110}
117 111
112/* Fast-path computation of checksum. Socket may not be locked. */
118static inline __wsum udplite_csum(struct sk_buff *skb) 113static inline __wsum udplite_csum(struct sk_buff *skb)
119{ 114{
120 struct sock *sk = skb->sk; 115 const struct udp_sock *up = udp_sk(skb->sk);
121 int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb));
122 const int off = skb_transport_offset(skb); 116 const int off = skb_transport_offset(skb);
123 const int len = skb->len - off; 117 int len = skb->len - off;
124 118
119 if ((up->pcflag & UDPLITE_SEND_CC) && up->pcslen < len) {
120 if (0 < up->pcslen)
121 len = up->pcslen;
122 udp_hdr(skb)->len = htons(up->pcslen);
123 }
125 skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */ 124 skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
126 125
127 return skb_checksum(skb, off, min(cscov, len), 0); 126 return skb_checksum(skb, off, len, 0);
128} 127}
129 128
130extern void udplite4_register(void); 129extern void udplite4_register(void);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index c8008dd58ef2..640ded8f5c48 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -274,9 +274,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
274 struct task_cputime sum; 274 struct task_cputime sum;
275 unsigned long flags; 275 unsigned long flags;
276 276
277 spin_lock_irqsave(&cputimer->lock, flags);
278 if (!cputimer->running) { 277 if (!cputimer->running) {
279 cputimer->running = 1;
280 /* 278 /*
281 * The POSIX timer interface allows for absolute time expiry 279 * The POSIX timer interface allows for absolute time expiry
282 * values through the TIMER_ABSTIME flag, therefore we have 280 * values through the TIMER_ABSTIME flag, therefore we have
@@ -284,8 +282,11 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
284 * it. 282 * it.
285 */ 283 */
286 thread_group_cputime(tsk, &sum); 284 thread_group_cputime(tsk, &sum);
285 spin_lock_irqsave(&cputimer->lock, flags);
286 cputimer->running = 1;
287 update_gt_cputime(&cputimer->cputime, &sum); 287 update_gt_cputime(&cputimer->cputime, &sum);
288 } 288 } else
289 spin_lock_irqsave(&cputimer->lock, flags);
289 *times = cputimer->cputime; 290 *times = cputimer->cputime;
290 spin_unlock_irqrestore(&cputimer->lock, flags); 291 spin_unlock_irqrestore(&cputimer->lock, flags);
291} 292}
diff --git a/kernel/sys.c b/kernel/sys.c
index 18ee1d2f6474..1dbbe695a5ef 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1172,7 +1172,7 @@ DECLARE_RWSEM(uts_sem);
1172static int override_release(char __user *release, int len) 1172static int override_release(char __user *release, int len)
1173{ 1173{
1174 int ret = 0; 1174 int ret = 0;
1175 char buf[len]; 1175 char buf[65];
1176 1176
1177 if (current->personality & UNAME26) { 1177 if (current->personality & UNAME26) {
1178 char *rest = UTS_RELEASE; 1178 char *rest = UTS_RELEASE;
diff --git a/mm/migrate.c b/mm/migrate.c
index 666e4e677414..14d0a6a632f6 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -120,10 +120,10 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
120 120
121 ptep = pte_offset_map(pmd, addr); 121 ptep = pte_offset_map(pmd, addr);
122 122
123 if (!is_swap_pte(*ptep)) { 123 /*
124 pte_unmap(ptep); 124 * Peek to check is_swap_pte() before taking ptlock? No, we
125 goto out; 125 * can race mremap's move_ptes(), which skips anon_vma lock.
126 } 126 */
127 127
128 ptl = pte_lockptr(mm, pmd); 128 ptl = pte_lockptr(mm, pmd);
129 } 129 }
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 61f1f623091d..e8292369cdcf 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -26,6 +26,8 @@
26 26
27/* Bluetooth L2CAP sockets. */ 27/* Bluetooth L2CAP sockets. */
28 28
29#include <linux/security.h>
30
29#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h> 32#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/l2cap.h> 33#include <net/bluetooth/l2cap.h>
@@ -933,6 +935,8 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
933 chan->force_reliable = pchan->force_reliable; 935 chan->force_reliable = pchan->force_reliable;
934 chan->flushable = pchan->flushable; 936 chan->flushable = pchan->flushable;
935 chan->force_active = pchan->force_active; 937 chan->force_active = pchan->force_active;
938
939 security_sk_clone(parent, sk);
936 } else { 940 } else {
937 941
938 switch (sk->sk_type) { 942 switch (sk->sk_type) {
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 482722bbc7a0..5417f6127323 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -42,6 +42,7 @@
42#include <linux/device.h> 42#include <linux/device.h>
43#include <linux/debugfs.h> 43#include <linux/debugfs.h>
44#include <linux/seq_file.h> 44#include <linux/seq_file.h>
45#include <linux/security.h>
45#include <net/sock.h> 46#include <net/sock.h>
46 47
47#include <asm/system.h> 48#include <asm/system.h>
@@ -264,6 +265,8 @@ static void rfcomm_sock_init(struct sock *sk, struct sock *parent)
264 265
265 pi->sec_level = rfcomm_pi(parent)->sec_level; 266 pi->sec_level = rfcomm_pi(parent)->sec_level;
266 pi->role_switch = rfcomm_pi(parent)->role_switch; 267 pi->role_switch = rfcomm_pi(parent)->role_switch;
268
269 security_sk_clone(parent, sk);
267 } else { 270 } else {
268 pi->dlc->defer_setup = 0; 271 pi->dlc->defer_setup = 0;
269 272
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 8270f05e3f1f..a324b009e34b 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -41,6 +41,7 @@
41#include <linux/debugfs.h> 41#include <linux/debugfs.h>
42#include <linux/seq_file.h> 42#include <linux/seq_file.h>
43#include <linux/list.h> 43#include <linux/list.h>
44#include <linux/security.h>
44#include <net/sock.h> 45#include <net/sock.h>
45 46
46#include <asm/system.h> 47#include <asm/system.h>
@@ -403,8 +404,10 @@ static void sco_sock_init(struct sock *sk, struct sock *parent)
403{ 404{
404 BT_DBG("sk %p", sk); 405 BT_DBG("sk %p", sk);
405 406
406 if (parent) 407 if (parent) {
407 sk->sk_type = parent->sk_type; 408 sk->sk_type = parent->sk_type;
409 security_sk_clone(parent, sk);
410 }
408} 411}
409 412
410static struct proto sco_proto = { 413static struct proto sco_proto = {
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 32b8f9f7f79e..ff3ed6086ce1 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -91,7 +91,6 @@ static int br_dev_open(struct net_device *dev)
91{ 91{
92 struct net_bridge *br = netdev_priv(dev); 92 struct net_bridge *br = netdev_priv(dev);
93 93
94 netif_carrier_off(dev);
95 netdev_update_features(dev); 94 netdev_update_features(dev);
96 netif_start_queue(dev); 95 netif_start_queue(dev);
97 br_stp_enable_bridge(br); 96 br_stp_enable_bridge(br);
@@ -108,8 +107,6 @@ static int br_dev_stop(struct net_device *dev)
108{ 107{
109 struct net_bridge *br = netdev_priv(dev); 108 struct net_bridge *br = netdev_priv(dev);
110 109
111 netif_carrier_off(dev);
112
113 br_stp_disable_bridge(br); 110 br_stp_disable_bridge(br);
114 br_multicast_stop(br); 111 br_multicast_stop(br);
115 112
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index e73815456adf..1d420f64ff27 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -161,9 +161,10 @@ static void del_nbp(struct net_bridge_port *p)
161 call_rcu(&p->rcu, destroy_nbp_rcu); 161 call_rcu(&p->rcu, destroy_nbp_rcu);
162} 162}
163 163
164/* called with RTNL */ 164/* Delete bridge device */
165static void del_br(struct net_bridge *br, struct list_head *head) 165void br_dev_delete(struct net_device *dev, struct list_head *head)
166{ 166{
167 struct net_bridge *br = netdev_priv(dev);
167 struct net_bridge_port *p, *n; 168 struct net_bridge_port *p, *n;
168 169
169 list_for_each_entry_safe(p, n, &br->port_list, list) { 170 list_for_each_entry_safe(p, n, &br->port_list, list) {
@@ -268,7 +269,7 @@ int br_del_bridge(struct net *net, const char *name)
268 } 269 }
269 270
270 else 271 else
271 del_br(netdev_priv(dev), NULL); 272 br_dev_delete(dev, NULL);
272 273
273 rtnl_unlock(); 274 rtnl_unlock();
274 return ret; 275 return ret;
@@ -449,7 +450,7 @@ void __net_exit br_net_exit(struct net *net)
449 rtnl_lock(); 450 rtnl_lock();
450 for_each_netdev(net, dev) 451 for_each_netdev(net, dev)
451 if (dev->priv_flags & IFF_EBRIDGE) 452 if (dev->priv_flags & IFF_EBRIDGE)
452 del_br(netdev_priv(dev), &list); 453 br_dev_delete(dev, &list);
453 454
454 unregister_netdevice_many(&list); 455 unregister_netdevice_many(&list);
455 rtnl_unlock(); 456 rtnl_unlock();
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 5b1ed1ba9aa7..e5f9ece3c9a0 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -210,6 +210,7 @@ static struct rtnl_link_ops br_link_ops __read_mostly = {
210 .priv_size = sizeof(struct net_bridge), 210 .priv_size = sizeof(struct net_bridge),
211 .setup = br_dev_setup, 211 .setup = br_dev_setup,
212 .validate = br_validate, 212 .validate = br_validate,
213 .dellink = br_dev_delete,
213}; 214};
214 215
215int __init br_netlink_init(void) 216int __init br_netlink_init(void)
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 78cc364997d9..857a021deea9 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -294,6 +294,7 @@ static inline int br_is_root_bridge(const struct net_bridge *br)
294 294
295/* br_device.c */ 295/* br_device.c */
296extern void br_dev_setup(struct net_device *dev); 296extern void br_dev_setup(struct net_device *dev);
297extern void br_dev_delete(struct net_device *dev, struct list_head *list);
297extern netdev_tx_t br_dev_xmit(struct sk_buff *skb, 298extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
298 struct net_device *dev); 299 struct net_device *dev);
299#ifdef CONFIG_NET_POLL_CONTROLLER 300#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 3231b468bb72..27071ee2a4e1 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -475,8 +475,11 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
475 475
476 list_del_rcu(&rule->list); 476 list_del_rcu(&rule->list);
477 477
478 if (rule->action == FR_ACT_GOTO) 478 if (rule->action == FR_ACT_GOTO) {
479 ops->nr_goto_rules--; 479 ops->nr_goto_rules--;
480 if (rtnl_dereference(rule->ctarget) == NULL)
481 ops->unresolved_rules--;
482 }
480 483
481 /* 484 /*
482 * Check if this rule is a target to any of them. If so, 485 * Check if this rule is a target to any of them. If so,
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 21fab3edb92c..d73aab3fbfc0 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1389,9 +1389,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1389 1389
1390 BUG_ON(!pcount); 1390 BUG_ON(!pcount);
1391 1391
1392 /* Tweak before seqno plays */ 1392 if (skb == tp->lost_skb_hint)
1393 if (!tcp_is_fack(tp) && tcp_is_sack(tp) && tp->lost_skb_hint &&
1394 !before(TCP_SKB_CB(tp->lost_skb_hint)->seq, TCP_SKB_CB(skb)->seq))
1395 tp->lost_cnt_hint += pcount; 1393 tp->lost_cnt_hint += pcount;
1396 1394
1397 TCP_SKB_CB(prev)->end_seq += shifted; 1395 TCP_SKB_CB(prev)->end_seq += shifted;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index c34f01513945..7963e03f1068 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -927,18 +927,21 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
927 } 927 }
928 sk_nocaps_add(sk, NETIF_F_GSO_MASK); 928 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
929 } 929 }
930 if (tcp_alloc_md5sig_pool(sk) == NULL) { 930
931 md5sig = tp->md5sig_info;
932 if (md5sig->entries4 == 0 &&
933 tcp_alloc_md5sig_pool(sk) == NULL) {
931 kfree(newkey); 934 kfree(newkey);
932 return -ENOMEM; 935 return -ENOMEM;
933 } 936 }
934 md5sig = tp->md5sig_info;
935 937
936 if (md5sig->alloced4 == md5sig->entries4) { 938 if (md5sig->alloced4 == md5sig->entries4) {
937 keys = kmalloc((sizeof(*keys) * 939 keys = kmalloc((sizeof(*keys) *
938 (md5sig->entries4 + 1)), GFP_ATOMIC); 940 (md5sig->entries4 + 1)), GFP_ATOMIC);
939 if (!keys) { 941 if (!keys) {
940 kfree(newkey); 942 kfree(newkey);
941 tcp_free_md5sig_pool(); 943 if (md5sig->entries4 == 0)
944 tcp_free_md5sig_pool();
942 return -ENOMEM; 945 return -ENOMEM;
943 } 946 }
944 947
@@ -982,6 +985,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
982 kfree(tp->md5sig_info->keys4); 985 kfree(tp->md5sig_info->keys4);
983 tp->md5sig_info->keys4 = NULL; 986 tp->md5sig_info->keys4 = NULL;
984 tp->md5sig_info->alloced4 = 0; 987 tp->md5sig_info->alloced4 = 0;
988 tcp_free_md5sig_pool();
985 } else if (tp->md5sig_info->entries4 != i) { 989 } else if (tp->md5sig_info->entries4 != i) {
986 /* Need to do some manipulation */ 990 /* Need to do some manipulation */
987 memmove(&tp->md5sig_info->keys4[i], 991 memmove(&tp->md5sig_info->keys4[i],
@@ -989,7 +993,6 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
989 (tp->md5sig_info->entries4 - i) * 993 (tp->md5sig_info->entries4 - i) *
990 sizeof(struct tcp4_md5sig_key)); 994 sizeof(struct tcp4_md5sig_key));
991 } 995 }
992 tcp_free_md5sig_pool();
993 return 0; 996 return 0;
994 } 997 }
995 } 998 }
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index d2fe4e06b472..0ce3d06dce60 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -328,6 +328,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
328 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 328 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
329 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); 329 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
330 330
331 tw->tw_transparent = inet_sk(sk)->transparent;
331 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; 332 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
332 tcptw->tw_rcv_nxt = tp->rcv_nxt; 333 tcptw->tw_rcv_nxt = tp->rcv_nxt;
333 tcptw->tw_snd_nxt = tp->snd_nxt; 334 tcptw->tw_snd_nxt = tp->snd_nxt;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 3b5669a2582d..d27c797f9f05 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -875,6 +875,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
875 skb_reset_transport_header(skb); 875 skb_reset_transport_header(skb);
876 __skb_push(skb, skb_gro_offset(skb)); 876 __skb_push(skb, skb_gro_offset(skb));
877 877
878 ops = rcu_dereference(inet6_protos[proto]);
878 if (!ops || !ops->gro_receive) 879 if (!ops || !ops->gro_receive)
879 goto out_unlock; 880 goto out_unlock;
880 881
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 79cc6469508d..7b8fc5794352 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -591,7 +591,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
591 } 591 }
592 sk_nocaps_add(sk, NETIF_F_GSO_MASK); 592 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
593 } 593 }
594 if (tcp_alloc_md5sig_pool(sk) == NULL) { 594 if (tp->md5sig_info->entries6 == 0 &&
595 tcp_alloc_md5sig_pool(sk) == NULL) {
595 kfree(newkey); 596 kfree(newkey);
596 return -ENOMEM; 597 return -ENOMEM;
597 } 598 }
@@ -600,8 +601,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
600 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC); 601 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
601 602
602 if (!keys) { 603 if (!keys) {
603 tcp_free_md5sig_pool();
604 kfree(newkey); 604 kfree(newkey);
605 if (tp->md5sig_info->entries6 == 0)
606 tcp_free_md5sig_pool();
605 return -ENOMEM; 607 return -ENOMEM;
606 } 608 }
607 609
@@ -647,6 +649,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
647 kfree(tp->md5sig_info->keys6); 649 kfree(tp->md5sig_info->keys6);
648 tp->md5sig_info->keys6 = NULL; 650 tp->md5sig_info->keys6 = NULL;
649 tp->md5sig_info->alloced6 = 0; 651 tp->md5sig_info->alloced6 = 0;
652 tcp_free_md5sig_pool();
650 } else { 653 } else {
651 /* shrink the database */ 654 /* shrink the database */
652 if (tp->md5sig_info->entries6 != i) 655 if (tp->md5sig_info->entries6 != i)
@@ -655,7 +658,6 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
655 (tp->md5sig_info->entries6 - i) 658 (tp->md5sig_info->entries6 - i)
656 * sizeof (tp->md5sig_info->keys6[0])); 659 * sizeof (tp->md5sig_info->keys6[0]));
657 } 660 }
658 tcp_free_md5sig_pool();
659 return 0; 661 return 0;
660 } 662 }
661 } 663 }
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index ad4ac2601a56..34b2ddeacb67 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1045,8 +1045,10 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1045 headroom = NET_SKB_PAD + sizeof(struct iphdr) + 1045 headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1046 uhlen + hdr_len; 1046 uhlen + hdr_len;
1047 old_headroom = skb_headroom(skb); 1047 old_headroom = skb_headroom(skb);
1048 if (skb_cow_head(skb, headroom)) 1048 if (skb_cow_head(skb, headroom)) {
1049 dev_kfree_skb(skb);
1049 goto abort; 1050 goto abort;
1051 }
1050 1052
1051 new_headroom = skb_headroom(skb); 1053 new_headroom = skb_headroom(skb);
1052 skb_orphan(skb); 1054 skb_orphan(skb);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 2b771dc708a3..e3be48bf4dcd 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2283,6 +2283,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2283 struct ip_vs_service *svc; 2283 struct ip_vs_service *svc;
2284 struct ip_vs_dest_user *udest_compat; 2284 struct ip_vs_dest_user *udest_compat;
2285 struct ip_vs_dest_user_kern udest; 2285 struct ip_vs_dest_user_kern udest;
2286 struct netns_ipvs *ipvs = net_ipvs(net);
2286 2287
2287 if (!capable(CAP_NET_ADMIN)) 2288 if (!capable(CAP_NET_ADMIN))
2288 return -EPERM; 2289 return -EPERM;
@@ -2303,6 +2304,24 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2303 /* increase the module use count */ 2304 /* increase the module use count */
2304 ip_vs_use_count_inc(); 2305 ip_vs_use_count_inc();
2305 2306
2307 /* Handle daemons since they have another lock */
2308 if (cmd == IP_VS_SO_SET_STARTDAEMON ||
2309 cmd == IP_VS_SO_SET_STOPDAEMON) {
2310 struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
2311
2312 if (mutex_lock_interruptible(&ipvs->sync_mutex)) {
2313 ret = -ERESTARTSYS;
2314 goto out_dec;
2315 }
2316 if (cmd == IP_VS_SO_SET_STARTDAEMON)
2317 ret = start_sync_thread(net, dm->state, dm->mcast_ifn,
2318 dm->syncid);
2319 else
2320 ret = stop_sync_thread(net, dm->state);
2321 mutex_unlock(&ipvs->sync_mutex);
2322 goto out_dec;
2323 }
2324
2306 if (mutex_lock_interruptible(&__ip_vs_mutex)) { 2325 if (mutex_lock_interruptible(&__ip_vs_mutex)) {
2307 ret = -ERESTARTSYS; 2326 ret = -ERESTARTSYS;
2308 goto out_dec; 2327 goto out_dec;
@@ -2316,15 +2335,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2316 /* Set timeout values for (tcp tcpfin udp) */ 2335 /* Set timeout values for (tcp tcpfin udp) */
2317 ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg); 2336 ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg);
2318 goto out_unlock; 2337 goto out_unlock;
2319 } else if (cmd == IP_VS_SO_SET_STARTDAEMON) {
2320 struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
2321 ret = start_sync_thread(net, dm->state, dm->mcast_ifn,
2322 dm->syncid);
2323 goto out_unlock;
2324 } else if (cmd == IP_VS_SO_SET_STOPDAEMON) {
2325 struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
2326 ret = stop_sync_thread(net, dm->state);
2327 goto out_unlock;
2328 } 2338 }
2329 2339
2330 usvc_compat = (struct ip_vs_service_user *)arg; 2340 usvc_compat = (struct ip_vs_service_user *)arg;
@@ -2584,6 +2594,33 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2584 2594
2585 if (copy_from_user(arg, user, copylen) != 0) 2595 if (copy_from_user(arg, user, copylen) != 0)
2586 return -EFAULT; 2596 return -EFAULT;
2597 /*
2598 * Handle daemons first since it has its own locking
2599 */
2600 if (cmd == IP_VS_SO_GET_DAEMON) {
2601 struct ip_vs_daemon_user d[2];
2602
2603 memset(&d, 0, sizeof(d));
2604 if (mutex_lock_interruptible(&ipvs->sync_mutex))
2605 return -ERESTARTSYS;
2606
2607 if (ipvs->sync_state & IP_VS_STATE_MASTER) {
2608 d[0].state = IP_VS_STATE_MASTER;
2609 strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn,
2610 sizeof(d[0].mcast_ifn));
2611 d[0].syncid = ipvs->master_syncid;
2612 }
2613 if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
2614 d[1].state = IP_VS_STATE_BACKUP;
2615 strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn,
2616 sizeof(d[1].mcast_ifn));
2617 d[1].syncid = ipvs->backup_syncid;
2618 }
2619 if (copy_to_user(user, &d, sizeof(d)) != 0)
2620 ret = -EFAULT;
2621 mutex_unlock(&ipvs->sync_mutex);
2622 return ret;
2623 }
2587 2624
2588 if (mutex_lock_interruptible(&__ip_vs_mutex)) 2625 if (mutex_lock_interruptible(&__ip_vs_mutex))
2589 return -ERESTARTSYS; 2626 return -ERESTARTSYS;
@@ -2681,28 +2718,6 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2681 } 2718 }
2682 break; 2719 break;
2683 2720
2684 case IP_VS_SO_GET_DAEMON:
2685 {
2686 struct ip_vs_daemon_user d[2];
2687
2688 memset(&d, 0, sizeof(d));
2689 if (ipvs->sync_state & IP_VS_STATE_MASTER) {
2690 d[0].state = IP_VS_STATE_MASTER;
2691 strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn,
2692 sizeof(d[0].mcast_ifn));
2693 d[0].syncid = ipvs->master_syncid;
2694 }
2695 if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
2696 d[1].state = IP_VS_STATE_BACKUP;
2697 strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn,
2698 sizeof(d[1].mcast_ifn));
2699 d[1].syncid = ipvs->backup_syncid;
2700 }
2701 if (copy_to_user(user, &d, sizeof(d)) != 0)
2702 ret = -EFAULT;
2703 }
2704 break;
2705
2706 default: 2721 default:
2707 ret = -EINVAL; 2722 ret = -EINVAL;
2708 } 2723 }
@@ -3205,7 +3220,7 @@ static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
3205 struct net *net = skb_sknet(skb); 3220 struct net *net = skb_sknet(skb);
3206 struct netns_ipvs *ipvs = net_ipvs(net); 3221 struct netns_ipvs *ipvs = net_ipvs(net);
3207 3222
3208 mutex_lock(&__ip_vs_mutex); 3223 mutex_lock(&ipvs->sync_mutex);
3209 if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) { 3224 if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
3210 if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER, 3225 if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER,
3211 ipvs->master_mcast_ifn, 3226 ipvs->master_mcast_ifn,
@@ -3225,7 +3240,7 @@ static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
3225 } 3240 }
3226 3241
3227nla_put_failure: 3242nla_put_failure:
3228 mutex_unlock(&__ip_vs_mutex); 3243 mutex_unlock(&ipvs->sync_mutex);
3229 3244
3230 return skb->len; 3245 return skb->len;
3231} 3246}
@@ -3271,13 +3286,9 @@ static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs)
3271 return ip_vs_set_timeout(net, &t); 3286 return ip_vs_set_timeout(net, &t);
3272} 3287}
3273 3288
3274static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) 3289static int ip_vs_genl_set_daemon(struct sk_buff *skb, struct genl_info *info)
3275{ 3290{
3276 struct ip_vs_service *svc = NULL;
3277 struct ip_vs_service_user_kern usvc;
3278 struct ip_vs_dest_user_kern udest;
3279 int ret = 0, cmd; 3291 int ret = 0, cmd;
3280 int need_full_svc = 0, need_full_dest = 0;
3281 struct net *net; 3292 struct net *net;
3282 struct netns_ipvs *ipvs; 3293 struct netns_ipvs *ipvs;
3283 3294
@@ -3285,19 +3296,10 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
3285 ipvs = net_ipvs(net); 3296 ipvs = net_ipvs(net);
3286 cmd = info->genlhdr->cmd; 3297 cmd = info->genlhdr->cmd;
3287 3298
3288 mutex_lock(&__ip_vs_mutex); 3299 if (cmd == IPVS_CMD_NEW_DAEMON || cmd == IPVS_CMD_DEL_DAEMON) {
3289
3290 if (cmd == IPVS_CMD_FLUSH) {
3291 ret = ip_vs_flush(net);
3292 goto out;
3293 } else if (cmd == IPVS_CMD_SET_CONFIG) {
3294 ret = ip_vs_genl_set_config(net, info->attrs);
3295 goto out;
3296 } else if (cmd == IPVS_CMD_NEW_DAEMON ||
3297 cmd == IPVS_CMD_DEL_DAEMON) {
3298
3299 struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1]; 3300 struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1];
3300 3301
3302 mutex_lock(&ipvs->sync_mutex);
3301 if (!info->attrs[IPVS_CMD_ATTR_DAEMON] || 3303 if (!info->attrs[IPVS_CMD_ATTR_DAEMON] ||
3302 nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX, 3304 nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX,
3303 info->attrs[IPVS_CMD_ATTR_DAEMON], 3305 info->attrs[IPVS_CMD_ATTR_DAEMON],
@@ -3310,6 +3312,33 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
3310 ret = ip_vs_genl_new_daemon(net, daemon_attrs); 3312 ret = ip_vs_genl_new_daemon(net, daemon_attrs);
3311 else 3313 else
3312 ret = ip_vs_genl_del_daemon(net, daemon_attrs); 3314 ret = ip_vs_genl_del_daemon(net, daemon_attrs);
3315out:
3316 mutex_unlock(&ipvs->sync_mutex);
3317 }
3318 return ret;
3319}
3320
3321static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
3322{
3323 struct ip_vs_service *svc = NULL;
3324 struct ip_vs_service_user_kern usvc;
3325 struct ip_vs_dest_user_kern udest;
3326 int ret = 0, cmd;
3327 int need_full_svc = 0, need_full_dest = 0;
3328 struct net *net;
3329 struct netns_ipvs *ipvs;
3330
3331 net = skb_sknet(skb);
3332 ipvs = net_ipvs(net);
3333 cmd = info->genlhdr->cmd;
3334
3335 mutex_lock(&__ip_vs_mutex);
3336
3337 if (cmd == IPVS_CMD_FLUSH) {
3338 ret = ip_vs_flush(net);
3339 goto out;
3340 } else if (cmd == IPVS_CMD_SET_CONFIG) {
3341 ret = ip_vs_genl_set_config(net, info->attrs);
3313 goto out; 3342 goto out;
3314 } else if (cmd == IPVS_CMD_ZERO && 3343 } else if (cmd == IPVS_CMD_ZERO &&
3315 !info->attrs[IPVS_CMD_ATTR_SERVICE]) { 3344 !info->attrs[IPVS_CMD_ATTR_SERVICE]) {
@@ -3536,13 +3565,13 @@ static struct genl_ops ip_vs_genl_ops[] __read_mostly = {
3536 .cmd = IPVS_CMD_NEW_DAEMON, 3565 .cmd = IPVS_CMD_NEW_DAEMON,
3537 .flags = GENL_ADMIN_PERM, 3566 .flags = GENL_ADMIN_PERM,
3538 .policy = ip_vs_cmd_policy, 3567 .policy = ip_vs_cmd_policy,
3539 .doit = ip_vs_genl_set_cmd, 3568 .doit = ip_vs_genl_set_daemon,
3540 }, 3569 },
3541 { 3570 {
3542 .cmd = IPVS_CMD_DEL_DAEMON, 3571 .cmd = IPVS_CMD_DEL_DAEMON,
3543 .flags = GENL_ADMIN_PERM, 3572 .flags = GENL_ADMIN_PERM,
3544 .policy = ip_vs_cmd_policy, 3573 .policy = ip_vs_cmd_policy,
3545 .doit = ip_vs_genl_set_cmd, 3574 .doit = ip_vs_genl_set_daemon,
3546 }, 3575 },
3547 { 3576 {
3548 .cmd = IPVS_CMD_GET_DAEMON, 3577 .cmd = IPVS_CMD_GET_DAEMON,
@@ -3679,7 +3708,7 @@ int __net_init ip_vs_control_net_init(struct net *net)
3679 int idx; 3708 int idx;
3680 struct netns_ipvs *ipvs = net_ipvs(net); 3709 struct netns_ipvs *ipvs = net_ipvs(net);
3681 3710
3682 ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock); 3711 rwlock_init(&ipvs->rs_lock);
3683 3712
3684 /* Initialize rs_table */ 3713 /* Initialize rs_table */
3685 for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++) 3714 for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 7ee7215b8ba0..3cdd479f9b5d 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -61,6 +61,7 @@
61 61
62#define SYNC_PROTO_VER 1 /* Protocol version in header */ 62#define SYNC_PROTO_VER 1 /* Protocol version in header */
63 63
64static struct lock_class_key __ipvs_sync_key;
64/* 65/*
65 * IPVS sync connection entry 66 * IPVS sync connection entry
66 * Version 0, i.e. original version. 67 * Version 0, i.e. original version.
@@ -1545,6 +1546,7 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
1545 IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n", 1546 IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
1546 sizeof(struct ip_vs_sync_conn_v0)); 1547 sizeof(struct ip_vs_sync_conn_v0));
1547 1548
1549
1548 if (state == IP_VS_STATE_MASTER) { 1550 if (state == IP_VS_STATE_MASTER) {
1549 if (ipvs->master_thread) 1551 if (ipvs->master_thread)
1550 return -EEXIST; 1552 return -EEXIST;
@@ -1667,6 +1669,7 @@ int __net_init ip_vs_sync_net_init(struct net *net)
1667{ 1669{
1668 struct netns_ipvs *ipvs = net_ipvs(net); 1670 struct netns_ipvs *ipvs = net_ipvs(net);
1669 1671
1672 __mutex_init(&ipvs->sync_mutex, "ipvs->sync_mutex", &__ipvs_sync_key);
1670 INIT_LIST_HEAD(&ipvs->sync_queue); 1673 INIT_LIST_HEAD(&ipvs->sync_queue);
1671 spin_lock_init(&ipvs->sync_lock); 1674 spin_lock_init(&ipvs->sync_lock);
1672 spin_lock_init(&ipvs->sync_buff_lock); 1675 spin_lock_init(&ipvs->sync_buff_lock);
@@ -1680,7 +1683,9 @@ int __net_init ip_vs_sync_net_init(struct net *net)
1680void ip_vs_sync_net_cleanup(struct net *net) 1683void ip_vs_sync_net_cleanup(struct net *net)
1681{ 1684{
1682 int retc; 1685 int retc;
1686 struct netns_ipvs *ipvs = net_ipvs(net);
1683 1687
1688 mutex_lock(&ipvs->sync_mutex);
1684 retc = stop_sync_thread(net, IP_VS_STATE_MASTER); 1689 retc = stop_sync_thread(net, IP_VS_STATE_MASTER);
1685 if (retc && retc != -ESRCH) 1690 if (retc && retc != -ESRCH)
1686 pr_err("Failed to stop Master Daemon\n"); 1691 pr_err("Failed to stop Master Daemon\n");
@@ -1688,4 +1693,5 @@ void ip_vs_sync_net_cleanup(struct net *net)
1688 retc = stop_sync_thread(net, IP_VS_STATE_BACKUP); 1693 retc = stop_sync_thread(net, IP_VS_STATE_BACKUP);
1689 if (retc && retc != -ESRCH) 1694 if (retc && retc != -ESRCH)
1690 pr_err("Failed to stop Backup Daemon\n"); 1695 pr_err("Failed to stop Backup Daemon\n");
1696 mutex_unlock(&ipvs->sync_mutex);
1691} 1697}
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index cf616e55ca41..d69facdd9a7a 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -241,8 +241,8 @@ static int gre_packet(struct nf_conn *ct,
241 nf_ct_refresh_acct(ct, ctinfo, skb, 241 nf_ct_refresh_acct(ct, ctinfo, skb,
242 ct->proto.gre.stream_timeout); 242 ct->proto.gre.stream_timeout);
243 /* Also, more likely to be important, and not a probe. */ 243 /* Also, more likely to be important, and not a probe. */
244 set_bit(IPS_ASSURED_BIT, &ct->status); 244 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
245 nf_conntrack_event_cache(IPCT_ASSURED, ct); 245 nf_conntrack_event_cache(IPCT_ASSURED, ct);
246 } else 246 } else
247 nf_ct_refresh_acct(ct, ctinfo, skb, 247 nf_ct_refresh_acct(ct, ctinfo, skb,
248 ct->proto.gre.timeout); 248 ct->proto.gre.timeout);
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index d30615419b4d..5f03e4ea65bf 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -91,7 +91,7 @@ int x25_parse_address_block(struct sk_buff *skb,
91 int needed; 91 int needed;
92 int rc; 92 int rc;
93 93
94 if (skb->len < 1) { 94 if (!pskb_may_pull(skb, 1)) {
95 /* packet has no address block */ 95 /* packet has no address block */
96 rc = 0; 96 rc = 0;
97 goto empty; 97 goto empty;
@@ -100,7 +100,7 @@ int x25_parse_address_block(struct sk_buff *skb,
100 len = *skb->data; 100 len = *skb->data;
101 needed = 1 + (len >> 4) + (len & 0x0f); 101 needed = 1 + (len >> 4) + (len & 0x0f);
102 102
103 if (skb->len < needed) { 103 if (!pskb_may_pull(skb, needed)) {
104 /* packet is too short to hold the addresses it claims 104 /* packet is too short to hold the addresses it claims
105 to hold */ 105 to hold */
106 rc = -1; 106 rc = -1;
@@ -295,7 +295,8 @@ static struct sock *x25_find_listener(struct x25_address *addr,
295 * Found a listening socket, now check the incoming 295 * Found a listening socket, now check the incoming
296 * call user data vs this sockets call user data 296 * call user data vs this sockets call user data
297 */ 297 */
298 if(skb->len > 0 && x25_sk(s)->cudmatchlength > 0) { 298 if (x25_sk(s)->cudmatchlength > 0 &&
299 skb->len >= x25_sk(s)->cudmatchlength) {
299 if((memcmp(x25_sk(s)->calluserdata.cuddata, 300 if((memcmp(x25_sk(s)->calluserdata.cuddata,
300 skb->data, 301 skb->data,
301 x25_sk(s)->cudmatchlength)) == 0) { 302 x25_sk(s)->cudmatchlength)) == 0) {
@@ -951,14 +952,27 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
951 * 952 *
952 * Facilities length is mandatory in call request packets 953 * Facilities length is mandatory in call request packets
953 */ 954 */
954 if (skb->len < 1) 955 if (!pskb_may_pull(skb, 1))
955 goto out_clear_request; 956 goto out_clear_request;
956 len = skb->data[0] + 1; 957 len = skb->data[0] + 1;
957 if (skb->len < len) 958 if (!pskb_may_pull(skb, len))
958 goto out_clear_request; 959 goto out_clear_request;
959 skb_pull(skb,len); 960 skb_pull(skb,len);
960 961
961 /* 962 /*
963 * Ensure that the amount of call user data is valid.
964 */
965 if (skb->len > X25_MAX_CUD_LEN)
966 goto out_clear_request;
967
968 /*
969 * Get all the call user data so it can be used in
970 * x25_find_listener and skb_copy_from_linear_data up ahead.
971 */
972 if (!pskb_may_pull(skb, skb->len))
973 goto out_clear_request;
974
975 /*
962 * Find a listener for the particular address/cud pair. 976 * Find a listener for the particular address/cud pair.
963 */ 977 */
964 sk = x25_find_listener(&source_addr,skb); 978 sk = x25_find_listener(&source_addr,skb);
@@ -1166,6 +1180,9 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
1166 * byte of the user data is the logical value of the Q Bit. 1180 * byte of the user data is the logical value of the Q Bit.
1167 */ 1181 */
1168 if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { 1182 if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
1183 if (!pskb_may_pull(skb, 1))
1184 goto out_kfree_skb;
1185
1169 qbit = skb->data[0]; 1186 qbit = skb->data[0];
1170 skb_pull(skb, 1); 1187 skb_pull(skb, 1);
1171 } 1188 }
@@ -1244,7 +1261,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
1244 struct x25_sock *x25 = x25_sk(sk); 1261 struct x25_sock *x25 = x25_sk(sk);
1245 struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name; 1262 struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name;
1246 size_t copied; 1263 size_t copied;
1247 int qbit; 1264 int qbit, header_len = x25->neighbour->extended ?
1265 X25_EXT_MIN_LEN : X25_STD_MIN_LEN;
1266
1248 struct sk_buff *skb; 1267 struct sk_buff *skb;
1249 unsigned char *asmptr; 1268 unsigned char *asmptr;
1250 int rc = -ENOTCONN; 1269 int rc = -ENOTCONN;
@@ -1265,6 +1284,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
1265 1284
1266 skb = skb_dequeue(&x25->interrupt_in_queue); 1285 skb = skb_dequeue(&x25->interrupt_in_queue);
1267 1286
1287 if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
1288 goto out_free_dgram;
1289
1268 skb_pull(skb, X25_STD_MIN_LEN); 1290 skb_pull(skb, X25_STD_MIN_LEN);
1269 1291
1270 /* 1292 /*
@@ -1285,10 +1307,12 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
1285 if (!skb) 1307 if (!skb)
1286 goto out; 1308 goto out;
1287 1309
1310 if (!pskb_may_pull(skb, header_len))
1311 goto out_free_dgram;
1312
1288 qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT; 1313 qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT;
1289 1314
1290 skb_pull(skb, x25->neighbour->extended ? 1315 skb_pull(skb, header_len);
1291 X25_EXT_MIN_LEN : X25_STD_MIN_LEN);
1292 1316
1293 if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { 1317 if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
1294 asmptr = skb_push(skb, 1); 1318 asmptr = skb_push(skb, 1);
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index e547ca1578c3..fa2b41888bd9 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -32,6 +32,9 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
32 unsigned short frametype; 32 unsigned short frametype;
33 unsigned int lci; 33 unsigned int lci;
34 34
35 if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
36 return 0;
37
35 frametype = skb->data[2]; 38 frametype = skb->data[2];
36 lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); 39 lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
37 40
@@ -115,6 +118,9 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
115 goto drop; 118 goto drop;
116 } 119 }
117 120
121 if (!pskb_may_pull(skb, 1))
122 return 0;
123
118 switch (skb->data[0]) { 124 switch (skb->data[0]) {
119 125
120 case X25_IFACE_DATA: 126 case X25_IFACE_DATA:
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index f77e4e75f914..36384a1fa9f2 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -44,7 +44,7 @@
44int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, 44int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
45 struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) 45 struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
46{ 46{
47 unsigned char *p = skb->data; 47 unsigned char *p;
48 unsigned int len; 48 unsigned int len;
49 49
50 *vc_fac_mask = 0; 50 *vc_fac_mask = 0;
@@ -60,14 +60,16 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
60 memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae)); 60 memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae));
61 memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae)); 61 memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae));
62 62
63 if (skb->len < 1) 63 if (!pskb_may_pull(skb, 1))
64 return 0; 64 return 0;
65 65
66 len = *p++; 66 len = skb->data[0];
67 67
68 if (len >= skb->len) 68 if (!pskb_may_pull(skb, 1 + len))
69 return -1; 69 return -1;
70 70
71 p = skb->data + 1;
72
71 while (len > 0) { 73 while (len > 0) {
72 switch (*p & X25_FAC_CLASS_MASK) { 74 switch (*p & X25_FAC_CLASS_MASK) {
73 case X25_FAC_CLASS_A: 75 case X25_FAC_CLASS_A:
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index 0b073b51b183..a49cd4ec551a 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -107,6 +107,8 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
107 /* 107 /*
108 * Parse the data in the frame. 108 * Parse the data in the frame.
109 */ 109 */
110 if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
111 goto out_clear;
110 skb_pull(skb, X25_STD_MIN_LEN); 112 skb_pull(skb, X25_STD_MIN_LEN);
111 113
112 len = x25_parse_address_block(skb, &source_addr, 114 len = x25_parse_address_block(skb, &source_addr,
@@ -127,9 +129,11 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
127 * Copy any Call User Data. 129 * Copy any Call User Data.
128 */ 130 */
129 if (skb->len > 0) { 131 if (skb->len > 0) {
130 skb_copy_from_linear_data(skb, 132 if (skb->len > X25_MAX_CUD_LEN)
131 x25->calluserdata.cuddata, 133 goto out_clear;
132 skb->len); 134
135 skb_copy_bits(skb, 0, x25->calluserdata.cuddata,
136 skb->len);
133 x25->calluserdata.cudlength = skb->len; 137 x25->calluserdata.cudlength = skb->len;
134 } 138 }
135 if (!sock_flag(sk, SOCK_DEAD)) 139 if (!sock_flag(sk, SOCK_DEAD))
@@ -137,6 +141,9 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
137 break; 141 break;
138 } 142 }
139 case X25_CLEAR_REQUEST: 143 case X25_CLEAR_REQUEST:
144 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
145 goto out_clear;
146
140 x25_write_internal(sk, X25_CLEAR_CONFIRMATION); 147 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
141 x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]); 148 x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
142 break; 149 break;
@@ -164,6 +171,9 @@ static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametyp
164 switch (frametype) { 171 switch (frametype) {
165 172
166 case X25_CLEAR_REQUEST: 173 case X25_CLEAR_REQUEST:
174 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
175 goto out_clear;
176
167 x25_write_internal(sk, X25_CLEAR_CONFIRMATION); 177 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
168 x25_disconnect(sk, 0, skb->data[3], skb->data[4]); 178 x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
169 break; 179 break;
@@ -177,6 +187,11 @@ static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametyp
177 } 187 }
178 188
179 return 0; 189 return 0;
190
191out_clear:
192 x25_write_internal(sk, X25_CLEAR_REQUEST);
193 x25_start_t23timer(sk);
194 return 0;
180} 195}
181 196
182/* 197/*
@@ -206,6 +221,9 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp
206 break; 221 break;
207 222
208 case X25_CLEAR_REQUEST: 223 case X25_CLEAR_REQUEST:
224 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
225 goto out_clear;
226
209 x25_write_internal(sk, X25_CLEAR_CONFIRMATION); 227 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
210 x25_disconnect(sk, 0, skb->data[3], skb->data[4]); 228 x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
211 break; 229 break;
@@ -304,6 +322,12 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp
304 } 322 }
305 323
306 return queued; 324 return queued;
325
326out_clear:
327 x25_write_internal(sk, X25_CLEAR_REQUEST);
328 x25->state = X25_STATE_2;
329 x25_start_t23timer(sk);
330 return 0;
307} 331}
308 332
309/* 333/*
@@ -313,13 +337,13 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp
313 */ 337 */
314static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype) 338static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype)
315{ 339{
340 struct x25_sock *x25 = x25_sk(sk);
341
316 switch (frametype) { 342 switch (frametype) {
317 343
318 case X25_RESET_REQUEST: 344 case X25_RESET_REQUEST:
319 x25_write_internal(sk, X25_RESET_CONFIRMATION); 345 x25_write_internal(sk, X25_RESET_CONFIRMATION);
320 case X25_RESET_CONFIRMATION: { 346 case X25_RESET_CONFIRMATION: {
321 struct x25_sock *x25 = x25_sk(sk);
322
323 x25_stop_timer(sk); 347 x25_stop_timer(sk);
324 x25->condition = 0x00; 348 x25->condition = 0x00;
325 x25->va = 0; 349 x25->va = 0;
@@ -331,6 +355,9 @@ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametyp
331 break; 355 break;
332 } 356 }
333 case X25_CLEAR_REQUEST: 357 case X25_CLEAR_REQUEST:
358 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
359 goto out_clear;
360
334 x25_write_internal(sk, X25_CLEAR_CONFIRMATION); 361 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
335 x25_disconnect(sk, 0, skb->data[3], skb->data[4]); 362 x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
336 break; 363 break;
@@ -340,6 +367,12 @@ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametyp
340 } 367 }
341 368
342 return 0; 369 return 0;
370
371out_clear:
372 x25_write_internal(sk, X25_CLEAR_REQUEST);
373 x25->state = X25_STATE_2;
374 x25_start_t23timer(sk);
375 return 0;
343} 376}
344 377
345/* Higher level upcall for a LAPB frame */ 378/* Higher level upcall for a LAPB frame */
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index 037958ff8eed..4acacf3c6617 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -90,6 +90,9 @@ void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
90 break; 90 break;
91 91
92 case X25_DIAGNOSTIC: 92 case X25_DIAGNOSTIC:
93 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
94 break;
95
93 printk(KERN_WARNING "x25: diagnostic #%d - %02X %02X %02X\n", 96 printk(KERN_WARNING "x25: diagnostic #%d - %02X %02X %02X\n",
94 skb->data[3], skb->data[4], 97 skb->data[3], skb->data[4],
95 skb->data[5], skb->data[6]); 98 skb->data[5], skb->data[6]);
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
index 24a342ebc7f5..5170d52bfd96 100644
--- a/net/x25/x25_subr.c
+++ b/net/x25/x25_subr.c
@@ -269,7 +269,11 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
269 int *d, int *m) 269 int *d, int *m)
270{ 270{
271 struct x25_sock *x25 = x25_sk(sk); 271 struct x25_sock *x25 = x25_sk(sk);
272 unsigned char *frame = skb->data; 272 unsigned char *frame;
273
274 if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
275 return X25_ILLEGAL;
276 frame = skb->data;
273 277
274 *ns = *nr = *q = *d = *m = 0; 278 *ns = *nr = *q = *d = *m = 0;
275 279
@@ -294,6 +298,10 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
294 if (frame[2] == X25_RR || 298 if (frame[2] == X25_RR ||
295 frame[2] == X25_RNR || 299 frame[2] == X25_RNR ||
296 frame[2] == X25_REJ) { 300 frame[2] == X25_REJ) {
301 if (!pskb_may_pull(skb, X25_EXT_MIN_LEN))
302 return X25_ILLEGAL;
303 frame = skb->data;
304
297 *nr = (frame[3] >> 1) & 0x7F; 305 *nr = (frame[3] >> 1) & 0x7F;
298 return frame[2]; 306 return frame[2];
299 } 307 }
@@ -308,6 +316,10 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
308 316
309 if (x25->neighbour->extended) { 317 if (x25->neighbour->extended) {
310 if ((frame[2] & 0x01) == X25_DATA) { 318 if ((frame[2] & 0x01) == X25_DATA) {
319 if (!pskb_may_pull(skb, X25_EXT_MIN_LEN))
320 return X25_ILLEGAL;
321 frame = skb->data;
322
311 *q = (frame[0] & X25_Q_BIT) == X25_Q_BIT; 323 *q = (frame[0] & X25_Q_BIT) == X25_Q_BIT;
312 *d = (frame[0] & X25_D_BIT) == X25_D_BIT; 324 *d = (frame[0] & X25_D_BIT) == X25_D_BIT;
313 *m = (frame[3] & X25_EXT_M_BIT) == X25_EXT_M_BIT; 325 *m = (frame[3] & X25_EXT_M_BIT) == X25_EXT_M_BIT;
diff --git a/security/security.c b/security/security.c
index 0e4fccfef12c..d9e153390926 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1097,6 +1097,7 @@ void security_sk_clone(const struct sock *sk, struct sock *newsk)
1097{ 1097{
1098 security_ops->sk_clone_security(sk, newsk); 1098 security_ops->sk_clone_security(sk, newsk);
1099} 1099}
1100EXPORT_SYMBOL(security_sk_clone);
1100 1101
1101void security_sk_classify_flow(struct sock *sk, struct flowi *fl) 1102void security_sk_classify_flow(struct sock *sk, struct flowi *fl)
1102{ 1103{
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index e9a2a8795d1b..191284a1c0ae 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2370,6 +2370,7 @@ static int azx_dev_free(struct snd_device *device)
2370static struct snd_pci_quirk position_fix_list[] __devinitdata = { 2370static struct snd_pci_quirk position_fix_list[] __devinitdata = {
2371 SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB), 2371 SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB),
2372 SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB), 2372 SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB),
2373 SND_PCI_QUIRK(0x1028, 0x02c6, "Dell Inspiron 1010", POS_FIX_LPIB),
2373 SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB), 2374 SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB),
2374 SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), 2375 SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
2375 SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB), 2376 SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 7696d05b9356..76752d8ea733 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3110,6 +3110,7 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
3110 SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD), 3110 SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD),
3111 SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS), 3111 SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS),
3112 SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), 3112 SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
3113 SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520 & W520", CXT5066_AUTO),
3113 SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD), 3114 SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD),
3114 SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD), 3115 SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD),
3115 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS), 3116 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS),