aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-09-05 14:58:52 -0400
committerDavid S. Miller <davem@davemloft.net>2013-09-05 14:58:52 -0400
commit06c54055bebf919249aa1eb68312887c3cfe77b4 (patch)
tree223a49c09e5d26516ed0161b8a52d08454ae028e
parent1a5bbfc3d6b700178b75743a2ba1fd2e58a8f36f (diff)
parente2e5c4c07caf810d7849658dca42f598b3938e21 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c net/bridge/br_multicast.c net/ipv6/sit.c The conflicts were minor: 1) sit.c changes overlap with change to ip_tunnel_xmit() signature. 2) br_multicast.c had an overlap between computing max_delay using msecs_to_jiffies and turning MLDV2_MRC() into an inline function with a name using lowercase instead of uppercase letters. 3) stmmac had two overlapping changes, one which conditionally allocated and hooked up a dma_cfg based upon the presence of the pbl OF property, and another one handling store-and-forward DMA made. The latter of which should not go into the new of_find_property() basic block. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--MAINTAINERS2
-rw-r--r--Makefile2
-rw-r--r--arch/arc/lib/strchr-700.S10
-rw-r--r--arch/arm/kernel/fiq.c3
-rw-r--r--arch/arm/kernel/machine_kexec.c1
-rw-r--r--arch/arm/mach-prima2/common.c2
-rw-r--r--arch/arm/mm/Kconfig9
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/page.h10
-rw-r--r--arch/powerpc/kernel/lparcfg.c22
-rw-r--r--drivers/acpi/video.c11
-rw-r--r--drivers/ata/libata-pmp.c12
-rw-r--r--drivers/ata/sata_fsl.c5
-rw-r--r--drivers/ata/sata_highbank.c4
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/regmap/regcache-rbtree.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/core/mm.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c34
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c58
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c58
-rw-r--r--drivers/iio/light/adjd_s311.c3
-rw-r--r--drivers/input/joystick/xpad.c1
-rw-r--r--drivers/input/mouse/elantech.c44
-rw-r--r--drivers/input/mouse/elantech.h1
-rw-r--r--drivers/input/serio/Kconfig3
-rw-r--r--drivers/input/tablet/wacom_wac.c10
-rw-r--r--drivers/irqchip/irq-sirfsoc.c18
-rw-r--r--drivers/isdn/mISDN/dsp_core.c4
-rw-r--r--drivers/net/ethernet/8390/Kconfig2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c63
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c31
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h3
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c18
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c195
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c4
-rw-r--r--drivers/net/ethernet/freescale/fec.h3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c123
-rw-r--r--drivers/net/ethernet/jme.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c29
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c21
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c3
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c1
-rw-r--r--drivers/net/usb/cdc_mbim.c4
-rw-r--r--drivers/s390/scsi/zfcp_erp.c29
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c8
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c14
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/staging/comedi/drivers.c2
-rw-r--r--drivers/tty/hvc/hvsi_lib.c4
-rw-r--r--drivers/usb/host/ohci-pci.c7
-rw-r--r--drivers/usb/phy/phy-fsl-usb.h2
-rw-r--r--drivers/usb/phy/phy-fsm-usb.c2
-rw-r--r--fs/bfs/inode.c2
-rw-r--r--fs/bio.c20
-rw-r--r--fs/dcache.c68
-rw-r--r--fs/efs/inode.c2
-rw-r--r--fs/hugetlbfs/inode.c8
-rw-r--r--fs/jfs/jfs_dtree.c31
-rw-r--r--fs/namei.c16
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/proc/fd.c2
-rw-r--r--include/linux/dcache.h20
-rw-r--r--include/linux/lockref.h71
-rw-r--r--include/linux/nsproxy.h6
-rw-r--r--include/linux/regmap.h1
-rw-r--r--include/linux/wait.h57
-rw-r--r--include/net/busy_poll.h1
-rw-r--r--include/net/genetlink.h20
-rw-r--r--include/net/route.h8
-rw-r--r--include/net/xfrm.h6
-rw-r--r--include/uapi/linux/cm4000_cs.h1
-rw-r--r--include/uapi/linux/icmpv6.h2
-rw-r--r--ipc/msg.c5
-rw-r--r--kernel/cgroup.c19
-rw-r--r--kernel/cpuset.c14
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/nsproxy.c27
-rw-r--r--kernel/pid_namespace.c4
-rw-r--r--kernel/time/timer_list.c41
-rw-r--r--kernel/workqueue.c9
-rw-r--r--mm/mremap.c21
-rw-r--r--mm/shmem.c8
-rw-r--r--mm/slab.h2
-rw-r--r--net/bridge/br_device.c2
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_mdb.c14
-rw-r--r--net/bridge/br_multicast.c258
-rw-r--r--net/bridge/br_private.h57
-rw-r--r--net/caif/cfctrl.c3
-rw-r--r--net/core/flow_dissector.c11
-rw-r--r--net/core/scm.c2
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/ipip.c5
-rw-r--r--net/ipv4/raw.c3
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv4/xfrm4_output.c16
-rw-r--r--net/ipv4/xfrm4_state.c1
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/addrlabel.c48
-rw-r--r--net/ipv6/icmp.c10
-rw-r--r--net/ipv6/ip6_gre.c5
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/ipv6/ip6_tunnel.c6
-rw-r--r--net/ipv6/ndisc.c14
-rw-r--r--net/ipv6/raw.c1
-rw-r--r--net/ipv6/sit.c11
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/xfrm6_output.c21
-rw-r--r--net/ipv6/xfrm6_state.c1
-rw-r--r--net/netlink/genetlink.c67
-rw-r--r--net/sunrpc/xdr.c9
-rw-r--r--net/tipc/socket.c4
-rw-r--r--net/xfrm/xfrm_output.c21
-rw-r--r--net/xfrm/xfrm_policy.c9
-rw-r--r--net/xfrm/xfrm_state.c7
-rw-r--r--sound/isa/opti9xx/opti92x-ad1848.c8
-rw-r--r--sound/pci/hda/patch_hdmi.c3
-rw-r--r--sound/pci/hda/patch_realtek.c1
143 files changed, 1483 insertions, 684 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index ecb83cdf06c0..2e1be9906d0a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6067,7 +6067,7 @@ M: Rob Herring <rob.herring@calxeda.com>
6067M: Pawel Moll <pawel.moll@arm.com> 6067M: Pawel Moll <pawel.moll@arm.com>
6068M: Mark Rutland <mark.rutland@arm.com> 6068M: Mark Rutland <mark.rutland@arm.com>
6069M: Stephen Warren <swarren@wwwdotorg.org> 6069M: Stephen Warren <swarren@wwwdotorg.org>
6070M: Ian Campbell <ian.campbell@citrix.com> 6070M: Ian Campbell <ijc+devicetree@hellion.org.uk>
6071L: devicetree@vger.kernel.org 6071L: devicetree@vger.kernel.org
6072S: Maintained 6072S: Maintained
6073F: Documentation/devicetree/ 6073F: Documentation/devicetree/
diff --git a/Makefile b/Makefile
index a5a55f4547c6..369882e4fc77 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 11 2PATCHLEVEL = 11
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc6 4EXTRAVERSION = -rc7
5NAME = Linux for Workgroups 5NAME = Linux for Workgroups
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
index 99c10475d477..9c548c7cf001 100644
--- a/arch/arc/lib/strchr-700.S
+++ b/arch/arc/lib/strchr-700.S
@@ -39,9 +39,18 @@ ARC_ENTRY strchr
39 ld.a r2,[r0,4] 39 ld.a r2,[r0,4]
40 sub r12,r6,r7 40 sub r12,r6,r7
41 bic r12,r12,r6 41 bic r12,r12,r6
42#ifdef __LITTLE_ENDIAN__
42 and r7,r12,r4 43 and r7,r12,r4
43 breq r7,0,.Loop ; For speed, we want this branch to be unaligned. 44 breq r7,0,.Loop ; For speed, we want this branch to be unaligned.
44 b .Lfound_char ; Likewise this one. 45 b .Lfound_char ; Likewise this one.
46#else
47 and r12,r12,r4
48 breq r12,0,.Loop ; For speed, we want this branch to be unaligned.
49 lsr_s r12,r12,7
50 bic r2,r7,r6
51 b.d .Lfound_char_b
52 and_s r2,r2,r12
53#endif
45; /* We require this code address to be unaligned for speed... */ 54; /* We require this code address to be unaligned for speed... */
46.Laligned: 55.Laligned:
47 ld_s r2,[r0] 56 ld_s r2,[r0]
@@ -95,6 +104,7 @@ ARC_ENTRY strchr
95 lsr r7,r7,7 104 lsr r7,r7,7
96 105
97 bic r2,r7,r6 106 bic r2,r7,r6
107.Lfound_char_b:
98 norm r2,r2 108 norm r2,r2
99 sub_s r0,r0,4 109 sub_s r0,r0,4
100 asr_s r2,r2,3 110 asr_s r2,r2,3
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index fc7920288a3d..918875d96d5d 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -89,7 +89,8 @@ void set_fiq_handler(void *start, unsigned int length)
89 89
90 memcpy(base + offset, start, length); 90 memcpy(base + offset, start, length);
91 if (!cache_is_vipt_nonaliasing()) 91 if (!cache_is_vipt_nonaliasing())
92 flush_icache_range(base + offset, offset + length); 92 flush_icache_range((unsigned long)base + offset, offset +
93 length);
93 flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); 94 flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
94} 95}
95 96
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index d7c82df69243..57221e349a7c 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -82,6 +82,7 @@ void machine_crash_nonpanic_core(void *unused)
82 crash_save_cpu(&regs, smp_processor_id()); 82 crash_save_cpu(&regs, smp_processor_id());
83 flush_cache_all(); 83 flush_cache_all();
84 84
85 set_cpu_online(smp_processor_id(), false);
85 atomic_dec(&waiting_for_crash_ipi); 86 atomic_dec(&waiting_for_crash_ipi);
86 while (1) 87 while (1)
87 cpu_relax(); 88 cpu_relax();
diff --git a/arch/arm/mach-prima2/common.c b/arch/arm/mach-prima2/common.c
index 2c70f74fed5d..e110b6d4ae8c 100644
--- a/arch/arm/mach-prima2/common.c
+++ b/arch/arm/mach-prima2/common.c
@@ -42,7 +42,6 @@ static const char *atlas6_dt_match[] __initdata = {
42 42
43DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)") 43DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)")
44 /* Maintainer: Barry Song <baohua.song@csr.com> */ 44 /* Maintainer: Barry Song <baohua.song@csr.com> */
45 .nr_irqs = 128,
46 .map_io = sirfsoc_map_io, 45 .map_io = sirfsoc_map_io,
47 .init_time = sirfsoc_init_time, 46 .init_time = sirfsoc_init_time,
48 .init_late = sirfsoc_init_late, 47 .init_late = sirfsoc_init_late,
@@ -59,7 +58,6 @@ static const char *prima2_dt_match[] __initdata = {
59 58
60DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)") 59DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)")
61 /* Maintainer: Barry Song <baohua.song@csr.com> */ 60 /* Maintainer: Barry Song <baohua.song@csr.com> */
62 .nr_irqs = 128,
63 .map_io = sirfsoc_map_io, 61 .map_io = sirfsoc_map_io,
64 .init_time = sirfsoc_init_time, 62 .init_time = sirfsoc_init_time,
65 .dma_zone_size = SZ_256M, 63 .dma_zone_size = SZ_256M,
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index db5c2cab8fda..cd2c88e7a8f7 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -809,15 +809,18 @@ config KUSER_HELPERS
809 the CPU type fitted to the system. This permits binaries to be 809 the CPU type fitted to the system. This permits binaries to be
810 run on ARMv4 through to ARMv7 without modification. 810 run on ARMv4 through to ARMv7 without modification.
811 811
812 See Documentation/arm/kernel_user_helpers.txt for details.
813
812 However, the fixed address nature of these helpers can be used 814 However, the fixed address nature of these helpers can be used
813 by ROP (return orientated programming) authors when creating 815 by ROP (return orientated programming) authors when creating
814 exploits. 816 exploits.
815 817
816 If all of the binaries and libraries which run on your platform 818 If all of the binaries and libraries which run on your platform
817 are built specifically for your platform, and make no use of 819 are built specifically for your platform, and make no use of
818 these helpers, then you can turn this option off. However, 820 these helpers, then you can turn this option off to hinder
819 when such an binary or library is run, it will receive a SIGILL 821 such exploits. However, in that case, if a binary or library
820 signal, which will terminate the program. 822 relying on those helpers is run, it will receive a SIGILL signal,
823 which will terminate the program.
821 824
822 Say N here only if you are absolutely certain that you do not 825 Say N here only if you are absolutely certain that you do not
823 need these helpers; otherwise, the safe option is to say Y. 826 need these helpers; otherwise, the safe option is to say Y.
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index dbd9d3c991e8..9cf59816d3e9 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -979,6 +979,7 @@ config RELOCATABLE
979 must live at a different physical address than the primary 979 must live at a different physical address than the primary
980 kernel. 980 kernel.
981 981
982# This value must have zeroes in the bottom 60 bits otherwise lots will break
982config PAGE_OFFSET 983config PAGE_OFFSET
983 hex 984 hex
984 default "0xc000000000000000" 985 default "0xc000000000000000"
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 988c812aab5b..b9f426212d3a 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -211,9 +211,19 @@ extern long long virt_phys_offset;
211#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET)) 211#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
212#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET) 212#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
213#else 213#else
214#ifdef CONFIG_PPC64
215/*
216 * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
217 * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
218 */
219#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET))
220#define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL)
221
222#else /* 32-bit, non book E */
214#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START)) 223#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
215#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START) 224#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
216#endif 225#endif
226#endif
217 227
218/* 228/*
219 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, 229 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index d92f3871e9cf..e2a0a162299b 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -35,7 +35,13 @@
35#include <asm/vdso_datapage.h> 35#include <asm/vdso_datapage.h>
36#include <asm/vio.h> 36#include <asm/vio.h>
37#include <asm/mmu.h> 37#include <asm/mmu.h>
38#include <asm/machdep.h>
38 39
40
41/*
42 * This isn't a module but we expose that to userspace
43 * via /proc so leave the definitions here
44 */
39#define MODULE_VERS "1.9" 45#define MODULE_VERS "1.9"
40#define MODULE_NAME "lparcfg" 46#define MODULE_NAME "lparcfg"
41 47
@@ -418,7 +424,8 @@ static void parse_em_data(struct seq_file *m)
418{ 424{
419 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 425 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
420 426
421 if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS) 427 if (firmware_has_feature(FW_FEATURE_LPAR) &&
428 plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
422 seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]); 429 seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
423} 430}
424 431
@@ -677,7 +684,6 @@ static int lparcfg_open(struct inode *inode, struct file *file)
677} 684}
678 685
679static const struct file_operations lparcfg_fops = { 686static const struct file_operations lparcfg_fops = {
680 .owner = THIS_MODULE,
681 .read = seq_read, 687 .read = seq_read,
682 .write = lparcfg_write, 688 .write = lparcfg_write,
683 .open = lparcfg_open, 689 .open = lparcfg_open,
@@ -699,14 +705,4 @@ static int __init lparcfg_init(void)
699 } 705 }
700 return 0; 706 return 0;
701} 707}
702 708machine_device_initcall(pseries, lparcfg_init);
703static void __exit lparcfg_cleanup(void)
704{
705 remove_proc_subtree("powerpc/lparcfg", NULL);
706}
707
708module_init(lparcfg_init);
709module_exit(lparcfg_cleanup);
710MODULE_DESCRIPTION("Interface for LPAR configuration data");
711MODULE_AUTHOR("Dave Engebretsen");
712MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index e1284b8dc6ee..3270d3c8ba4e 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -908,9 +908,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
908 device->cap._DDC = 1; 908 device->cap._DDC = 1;
909 } 909 }
910 910
911 if (acpi_video_init_brightness(device))
912 return;
913
914 if (acpi_video_backlight_support()) { 911 if (acpi_video_backlight_support()) {
915 struct backlight_properties props; 912 struct backlight_properties props;
916 struct pci_dev *pdev; 913 struct pci_dev *pdev;
@@ -920,6 +917,9 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
920 static int count = 0; 917 static int count = 0;
921 char *name; 918 char *name;
922 919
920 result = acpi_video_init_brightness(device);
921 if (result)
922 return;
923 name = kasprintf(GFP_KERNEL, "acpi_video%d", count); 923 name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
924 if (!name) 924 if (!name)
925 return; 925 return;
@@ -979,11 +979,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
979 if (result) 979 if (result)
980 printk(KERN_ERR PREFIX "Create sysfs link\n"); 980 printk(KERN_ERR PREFIX "Create sysfs link\n");
981 981
982 } else {
983 /* Remove the brightness object. */
984 kfree(device->brightness->levels);
985 kfree(device->brightness);
986 device->brightness = NULL;
987 } 982 }
988} 983}
989 984
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 1c41722bb7e2..20fd337a5731 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -289,24 +289,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
289 289
290 /* Disable sending Early R_OK. 290 /* Disable sending Early R_OK.
291 * With "cached read" HDD testing and multiple ports busy on a SATA 291 * With "cached read" HDD testing and multiple ports busy on a SATA
292 * host controller, 3726 PMP will very rarely drop a deferred 292 * host controller, 3x26 PMP will very rarely drop a deferred
293 * R_OK that was intended for the host. Symptom will be all 293 * R_OK that was intended for the host. Symptom will be all
294 * 5 drives under test will timeout, get reset, and recover. 294 * 5 drives under test will timeout, get reset, and recover.
295 */ 295 */
296 if (vendor == 0x1095 && devid == 0x3726) { 296 if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
297 u32 reg; 297 u32 reg;
298 298
299 err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg); 299 err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg);
300 if (err_mask) { 300 if (err_mask) {
301 rc = -EIO; 301 rc = -EIO;
302 reason = "failed to read Sil3726 Private Register"; 302 reason = "failed to read Sil3x26 Private Register";
303 goto fail; 303 goto fail;
304 } 304 }
305 reg &= ~0x1; 305 reg &= ~0x1;
306 err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg); 306 err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
307 if (err_mask) { 307 if (err_mask) {
308 rc = -EIO; 308 rc = -EIO;
309 reason = "failed to write Sil3726 Private Register"; 309 reason = "failed to write Sil3x26 Private Register";
310 goto fail; 310 goto fail;
311 } 311 }
312 } 312 }
@@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap)
383 u16 devid = sata_pmp_gscr_devid(gscr); 383 u16 devid = sata_pmp_gscr_devid(gscr);
384 struct ata_link *link; 384 struct ata_link *link;
385 385
386 if (vendor == 0x1095 && devid == 0x3726) { 386 if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
387 /* sil3726 quirks */ 387 /* sil3x26 quirks */
388 ata_for_each_link(link, ap, EDGE) { 388 ata_for_each_link(link, ap, EDGE) {
389 /* link reports offline after LPM */ 389 /* link reports offline after LPM */
390 link->flags |= ATA_LFLAG_NO_LPM; 390 link->flags |= ATA_LFLAG_NO_LPM;
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 19720a0a4a65..851bd3f43ac6 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -293,6 +293,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
293{ 293{
294 struct sata_fsl_host_priv *host_priv = host->private_data; 294 struct sata_fsl_host_priv *host_priv = host->private_data;
295 void __iomem *hcr_base = host_priv->hcr_base; 295 void __iomem *hcr_base = host_priv->hcr_base;
296 unsigned long flags;
296 297
297 if (count > ICC_MAX_INT_COUNT_THRESHOLD) 298 if (count > ICC_MAX_INT_COUNT_THRESHOLD)
298 count = ICC_MAX_INT_COUNT_THRESHOLD; 299 count = ICC_MAX_INT_COUNT_THRESHOLD;
@@ -305,12 +306,12 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
305 (count > ICC_MIN_INT_COUNT_THRESHOLD)) 306 (count > ICC_MIN_INT_COUNT_THRESHOLD))
306 ticks = ICC_SAFE_INT_TICKS; 307 ticks = ICC_SAFE_INT_TICKS;
307 308
308 spin_lock(&host->lock); 309 spin_lock_irqsave(&host->lock, flags);
309 iowrite32((count << 24 | ticks), hcr_base + ICC); 310 iowrite32((count << 24 | ticks), hcr_base + ICC);
310 311
311 intr_coalescing_count = count; 312 intr_coalescing_count = count;
312 intr_coalescing_ticks = ticks; 313 intr_coalescing_ticks = ticks;
313 spin_unlock(&host->lock); 314 spin_unlock_irqrestore(&host->lock, flags);
314 315
315 DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n", 316 DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n",
316 intr_coalescing_count, intr_coalescing_ticks); 317 intr_coalescing_count, intr_coalescing_ticks);
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index d047d92a456f..e9a4f46d962e 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -86,11 +86,11 @@ struct ecx_plat_data {
86 86
87#define SGPIO_SIGNALS 3 87#define SGPIO_SIGNALS 3
88#define ECX_ACTIVITY_BITS 0x300000 88#define ECX_ACTIVITY_BITS 0x300000
89#define ECX_ACTIVITY_SHIFT 2 89#define ECX_ACTIVITY_SHIFT 0
90#define ECX_LOCATE_BITS 0x80000 90#define ECX_LOCATE_BITS 0x80000
91#define ECX_LOCATE_SHIFT 1 91#define ECX_LOCATE_SHIFT 1
92#define ECX_FAULT_BITS 0x400000 92#define ECX_FAULT_BITS 0x400000
93#define ECX_FAULT_SHIFT 0 93#define ECX_FAULT_SHIFT 2
94static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port, 94static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
95 u32 shift) 95 u32 shift)
96{ 96{
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 2b7813ec6d02..ec386ee9cb22 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -141,6 +141,8 @@ static ssize_t show_mem_removable(struct device *dev,
141 container_of(dev, struct memory_block, dev); 141 container_of(dev, struct memory_block, dev);
142 142
143 for (i = 0; i < sections_per_block; i++) { 143 for (i = 0; i < sections_per_block; i++) {
144 if (!present_section_nr(mem->start_section_nr + i))
145 continue;
144 pfn = section_nr_to_pfn(mem->start_section_nr + i); 146 pfn = section_nr_to_pfn(mem->start_section_nr + i);
145 ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); 147 ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
146 } 148 }
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 5c1435c4e210..0fccc99881fd 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -332,7 +332,7 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
332 } 332 }
333 333
334 if (!rbnode->blklen) { 334 if (!rbnode->blklen) {
335 rbnode->blklen = sizeof(*rbnode); 335 rbnode->blklen = 1;
336 rbnode->base_reg = reg; 336 rbnode->base_reg = reg;
337 } 337 }
338 338
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 19e36603b23b..3bc8414533c9 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -500,7 +500,8 @@ static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
500 &status)) 500 &status))
501 goto log_fail; 501 goto log_fail;
502 502
503 while (status == SDVO_CMD_STATUS_PENDING && retry--) { 503 while ((status == SDVO_CMD_STATUS_PENDING ||
504 status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && retry--) {
504 udelay(15); 505 udelay(15);
505 if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, 506 if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
506 SDVO_I2C_CMD_STATUS, 507 SDVO_I2C_CMD_STATUS,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 6f514297c483..342f1f336168 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -752,6 +752,8 @@
752 will not assert AGPBUSY# and will only 752 will not assert AGPBUSY# and will only
753 be delivered when out of C3. */ 753 be delivered when out of C3. */
754#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ 754#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
755#define INSTPM_TLB_INVALIDATE (1<<9)
756#define INSTPM_SYNC_FLUSH (1<<5)
755#define ACTHD 0x020c8 757#define ACTHD 0x020c8
756#define FW_BLC 0x020d8 758#define FW_BLC 0x020d8
757#define FW_BLC2 0x020dc 759#define FW_BLC2 0x020dc
@@ -4438,7 +4440,7 @@
4438#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22) 4440#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
4439#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22) 4441#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
4440#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22) 4442#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
4441#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22) 4443#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22)
4442 4444
4443/* legacy values */ 4445/* legacy values */
4444#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22) 4446#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 664118d8c1d6..079ef0129e74 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -968,6 +968,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
968 968
969 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); 969 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
970 POSTING_READ(mmio); 970 POSTING_READ(mmio);
971
972 /* Flush the TLB for this page */
973 if (INTEL_INFO(dev)->gen >= 6) {
974 u32 reg = RING_INSTPM(ring->mmio_base);
975 I915_WRITE(reg,
976 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
977 INSTPM_SYNC_FLUSH));
978 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
979 1000))
980 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
981 ring->name);
982 }
971} 983}
972 984
973static int 985static int
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c
index d8291724dbd4..7a4e0891c5f8 100644
--- a/drivers/gpu/drm/nouveau/core/core/mm.c
+++ b/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -98,6 +98,8 @@ nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
98 u32 splitoff; 98 u32 splitoff;
99 u32 s, e; 99 u32 s, e;
100 100
101 BUG_ON(!type);
102
101 list_for_each_entry(this, &mm->free, fl_entry) { 103 list_for_each_entry(this, &mm->free, fl_entry) {
102 e = this->offset + this->length; 104 e = this->offset + this->length;
103 s = this->offset; 105 s = this->offset;
@@ -162,6 +164,8 @@ nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
162 struct nouveau_mm_node *prev, *this, *next; 164 struct nouveau_mm_node *prev, *this, *next;
163 u32 mask = align - 1; 165 u32 mask = align - 1;
164 166
167 BUG_ON(!type);
168
165 list_for_each_entry_reverse(this, &mm->free, fl_entry) { 169 list_for_each_entry_reverse(this, &mm->free, fl_entry) {
166 u32 e = this->offset + this->length; 170 u32 e = this->offset + this->length;
167 u32 s = this->offset; 171 u32 s = this->offset;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index d5502267c30f..9d2cd2006250 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -20,8 +20,8 @@ nouveau_mc(void *obj)
20 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC]; 20 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
21} 21}
22 22
23#define nouveau_mc_create(p,e,o,d) \ 23#define nouveau_mc_create(p,e,o,m,d) \
24 nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d) 24 nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
25#define nouveau_mc_destroy(p) ({ \ 25#define nouveau_mc_destroy(p) ({ \
26 struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \ 26 struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \
27}) 27})
@@ -33,7 +33,8 @@ nouveau_mc(void *obj)
33}) 33})
34 34
35int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *, 35int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
36 struct nouveau_oclass *, int, void **); 36 struct nouveau_oclass *, const struct nouveau_mc_intr *,
37 int, void **);
37void _nouveau_mc_dtor(struct nouveau_object *); 38void _nouveau_mc_dtor(struct nouveau_object *);
38int _nouveau_mc_init(struct nouveau_object *); 39int _nouveau_mc_init(struct nouveau_object *);
39int _nouveau_mc_fini(struct nouveau_object *, bool); 40int _nouveau_mc_fini(struct nouveau_object *, bool);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
index 19e3a9a63a02..ab7ef0ac9e34 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
@@ -40,15 +40,15 @@ nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
40 return ret; 40 return ret;
41 41
42 switch (pfb914 & 0x00000003) { 42 switch (pfb914 & 0x00000003) {
43 case 0x00000000: pfb->ram->type = NV_MEM_TYPE_DDR1; break; 43 case 0x00000000: ram->type = NV_MEM_TYPE_DDR1; break;
44 case 0x00000001: pfb->ram->type = NV_MEM_TYPE_DDR2; break; 44 case 0x00000001: ram->type = NV_MEM_TYPE_DDR2; break;
45 case 0x00000002: pfb->ram->type = NV_MEM_TYPE_GDDR3; break; 45 case 0x00000002: ram->type = NV_MEM_TYPE_GDDR3; break;
46 case 0x00000003: break; 46 case 0x00000003: break;
47 } 47 }
48 48
49 pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; 49 ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
50 pfb->ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; 50 ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
51 pfb->ram->tags = nv_rd32(pfb, 0x100320); 51 ram->tags = nv_rd32(pfb, 0x100320);
52 return 0; 52 return 0;
53} 53}
54 54
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
index 7192aa6e5577..63a6aab86028 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
@@ -38,8 +38,8 @@ nv4e_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
38 if (ret) 38 if (ret)
39 return ret; 39 return ret;
40 40
41 pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; 41 ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
42 pfb->ram->type = NV_MEM_TYPE_STOLEN; 42 ram->type = NV_MEM_TYPE_STOLEN;
43 return 0; 43 return 0;
44} 44}
45 45
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
index bcca883018f4..cce65cc56514 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
@@ -30,8 +30,9 @@ struct nvc0_ltcg_priv {
30 struct nouveau_ltcg base; 30 struct nouveau_ltcg base;
31 u32 part_nr; 31 u32 part_nr;
32 u32 subp_nr; 32 u32 subp_nr;
33 struct nouveau_mm tags;
34 u32 num_tags; 33 u32 num_tags;
34 u32 tag_base;
35 struct nouveau_mm tags;
35 struct nouveau_mm_node *tag_ram; 36 struct nouveau_mm_node *tag_ram;
36}; 37};
37 38
@@ -117,10 +118,6 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
117 u32 tag_size, tag_margin, tag_align; 118 u32 tag_size, tag_margin, tag_align;
118 int ret; 119 int ret;
119 120
120 nv_wr32(priv, 0x17e8d8, priv->part_nr);
121 if (nv_device(pfb)->card_type >= NV_E0)
122 nv_wr32(priv, 0x17e000, priv->part_nr);
123
124 /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ 121 /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
125 priv->num_tags = (pfb->ram->size >> 17) / 4; 122 priv->num_tags = (pfb->ram->size >> 17) / 4;
126 if (priv->num_tags > (1 << 17)) 123 if (priv->num_tags > (1 << 17))
@@ -142,7 +139,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
142 tag_size += tag_align; 139 tag_size += tag_align;
143 tag_size = (tag_size + 0xfff) >> 12; /* round up */ 140 tag_size = (tag_size + 0xfff) >> 12; /* round up */
144 141
145 ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1, 142 ret = nouveau_mm_tail(&pfb->vram, 1, tag_size, tag_size, 1,
146 &priv->tag_ram); 143 &priv->tag_ram);
147 if (ret) { 144 if (ret) {
148 priv->num_tags = 0; 145 priv->num_tags = 0;
@@ -152,7 +149,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
152 tag_base += tag_align - 1; 149 tag_base += tag_align - 1;
153 ret = do_div(tag_base, tag_align); 150 ret = do_div(tag_base, tag_align);
154 151
155 nv_wr32(priv, 0x17e8d4, tag_base); 152 priv->tag_base = tag_base;
156 } 153 }
157 ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1); 154 ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1);
158 155
@@ -182,8 +179,6 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
182 } 179 }
183 priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28; 180 priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28;
184 181
185 nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
186
187 ret = nvc0_ltcg_init_tag_ram(pfb, priv); 182 ret = nvc0_ltcg_init_tag_ram(pfb, priv);
188 if (ret) 183 if (ret)
189 return ret; 184 return ret;
@@ -209,13 +204,32 @@ nvc0_ltcg_dtor(struct nouveau_object *object)
209 nouveau_ltcg_destroy(ltcg); 204 nouveau_ltcg_destroy(ltcg);
210} 205}
211 206
207static int
208nvc0_ltcg_init(struct nouveau_object *object)
209{
210 struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
211 struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
212 int ret;
213
214 ret = nouveau_ltcg_init(ltcg);
215 if (ret)
216 return ret;
217
218 nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
219 nv_wr32(priv, 0x17e8d8, priv->part_nr);
220 if (nv_device(ltcg)->card_type >= NV_E0)
221 nv_wr32(priv, 0x17e000, priv->part_nr);
222 nv_wr32(priv, 0x17e8d4, priv->tag_base);
223 return 0;
224}
225
212struct nouveau_oclass 226struct nouveau_oclass
213nvc0_ltcg_oclass = { 227nvc0_ltcg_oclass = {
214 .handle = NV_SUBDEV(LTCG, 0xc0), 228 .handle = NV_SUBDEV(LTCG, 0xc0),
215 .ofuncs = &(struct nouveau_ofuncs) { 229 .ofuncs = &(struct nouveau_ofuncs) {
216 .ctor = nvc0_ltcg_ctor, 230 .ctor = nvc0_ltcg_ctor,
217 .dtor = nvc0_ltcg_dtor, 231 .dtor = nvc0_ltcg_dtor,
218 .init = _nouveau_ltcg_init, 232 .init = nvc0_ltcg_init,
219 .fini = _nouveau_ltcg_fini, 233 .fini = _nouveau_ltcg_fini,
220 }, 234 },
221}; 235};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index 1c0330b8c9a4..ec9cd6f10f91 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -80,7 +80,9 @@ _nouveau_mc_dtor(struct nouveau_object *object)
80 80
81int 81int
82nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, 82nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
83 struct nouveau_oclass *oclass, int length, void **pobject) 83 struct nouveau_oclass *oclass,
84 const struct nouveau_mc_intr *intr_map,
85 int length, void **pobject)
84{ 86{
85 struct nouveau_device *device = nv_device(parent); 87 struct nouveau_device *device = nv_device(parent);
86 struct nouveau_mc *pmc; 88 struct nouveau_mc *pmc;
@@ -92,6 +94,8 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
92 if (ret) 94 if (ret)
93 return ret; 95 return ret;
94 96
97 pmc->intr_map = intr_map;
98
95 ret = request_irq(device->pdev->irq, nouveau_mc_intr, 99 ret = request_irq(device->pdev->irq, nouveau_mc_intr,
96 IRQF_SHARED, "nouveau", pmc); 100 IRQF_SHARED, "nouveau", pmc);
97 if (ret < 0) 101 if (ret < 0)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
index 8c769715227b..64aa4edb0d9d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -50,12 +50,11 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
50 struct nv04_mc_priv *priv; 50 struct nv04_mc_priv *priv;
51 int ret; 51 int ret;
52 52
53 ret = nouveau_mc_create(parent, engine, oclass, &priv); 53 ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
54 *pobject = nv_object(priv); 54 *pobject = nv_object(priv);
55 if (ret) 55 if (ret)
56 return ret; 56 return ret;
57 57
58 priv->base.intr_map = nv04_mc_intr;
59 return 0; 58 return 0;
60} 59}
61 60
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
index 51919371810f..d9891782bf28 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -36,12 +36,11 @@ nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
36 struct nv44_mc_priv *priv; 36 struct nv44_mc_priv *priv;
37 int ret; 37 int ret;
38 38
39 ret = nouveau_mc_create(parent, engine, oclass, &priv); 39 ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
40 *pobject = nv_object(priv); 40 *pobject = nv_object(priv);
41 if (ret) 41 if (ret)
42 return ret; 42 return ret;
43 43
44 priv->base.intr_map = nv04_mc_intr;
45 return 0; 44 return 0;
46} 45}
47 46
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index f25fc5fc7dd1..2b1afe225db8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -53,12 +53,11 @@ nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
53 struct nv50_mc_priv *priv; 53 struct nv50_mc_priv *priv;
54 int ret; 54 int ret;
55 55
56 ret = nouveau_mc_create(parent, engine, oclass, &priv); 56 ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv);
57 *pobject = nv_object(priv); 57 *pobject = nv_object(priv);
58 if (ret) 58 if (ret)
59 return ret; 59 return ret;
60 60
61 priv->base.intr_map = nv50_mc_intr;
62 return 0; 61 return 0;
63} 62}
64 63
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index e82fd21b5041..0d57b4d3e001 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -54,12 +54,11 @@ nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
54 struct nv98_mc_priv *priv; 54 struct nv98_mc_priv *priv;
55 int ret; 55 int ret;
56 56
57 ret = nouveau_mc_create(parent, engine, oclass, &priv); 57 ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv);
58 *pobject = nv_object(priv); 58 *pobject = nv_object(priv);
59 if (ret) 59 if (ret)
60 return ret; 60 return ret;
61 61
62 priv->base.intr_map = nv98_mc_intr;
63 return 0; 62 return 0;
64} 63}
65 64
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index c5da3babbc62..104175c5a2dd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -57,12 +57,11 @@ nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
57 struct nvc0_mc_priv *priv; 57 struct nvc0_mc_priv *priv;
58 int ret; 58 int ret;
59 59
60 ret = nouveau_mc_create(parent, engine, oclass, &priv); 60 ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv);
61 *pobject = nv_object(priv); 61 *pobject = nv_object(priv);
62 if (ret) 62 if (ret)
63 return ret; 63 return ret;
64 64
65 priv->base.intr_map = nvc0_mc_intr;
66 return 0; 65 return 0;
67} 66}
68 67
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 0782bd2f1e04..6a13ffb53bdb 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -606,6 +606,24 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
606 regp->ramdac_a34 = 0x1; 606 regp->ramdac_a34 = 0x1;
607} 607}
608 608
609static int
610nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
611{
612 struct nv04_display *disp = nv04_display(crtc->dev);
613 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
614 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
615 int ret;
616
617 ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
618 if (ret == 0) {
619 if (disp->image[nv_crtc->index])
620 nouveau_bo_unpin(disp->image[nv_crtc->index]);
621 nouveau_bo_ref(nvfb->nvbo, &disp->image[nv_crtc->index]);
622 }
623
624 return ret;
625}
626
609/** 627/**
610 * Sets up registers for the given mode/adjusted_mode pair. 628 * Sets up registers for the given mode/adjusted_mode pair.
611 * 629 *
@@ -622,10 +640,15 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
622 struct drm_device *dev = crtc->dev; 640 struct drm_device *dev = crtc->dev;
623 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 641 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
624 struct nouveau_drm *drm = nouveau_drm(dev); 642 struct nouveau_drm *drm = nouveau_drm(dev);
643 int ret;
625 644
626 NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index); 645 NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index);
627 drm_mode_debug_printmodeline(adjusted_mode); 646 drm_mode_debug_printmodeline(adjusted_mode);
628 647
648 ret = nv_crtc_swap_fbs(crtc, old_fb);
649 if (ret)
650 return ret;
651
629 /* unlock must come after turning off FP_TG_CONTROL in output_prepare */ 652 /* unlock must come after turning off FP_TG_CONTROL in output_prepare */
630 nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1); 653 nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1);
631 654
@@ -722,6 +745,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc)
722 745
723static void nv_crtc_destroy(struct drm_crtc *crtc) 746static void nv_crtc_destroy(struct drm_crtc *crtc)
724{ 747{
748 struct nv04_display *disp = nv04_display(crtc->dev);
725 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 749 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
726 750
727 if (!nv_crtc) 751 if (!nv_crtc)
@@ -729,6 +753,10 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
729 753
730 drm_crtc_cleanup(crtc); 754 drm_crtc_cleanup(crtc);
731 755
756 if (disp->image[nv_crtc->index])
757 nouveau_bo_unpin(disp->image[nv_crtc->index]);
758 nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
759
732 nouveau_bo_unmap(nv_crtc->cursor.nvbo); 760 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
733 nouveau_bo_unpin(nv_crtc->cursor.nvbo); 761 nouveau_bo_unpin(nv_crtc->cursor.nvbo);
734 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); 762 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
@@ -754,6 +782,16 @@ nv_crtc_gamma_load(struct drm_crtc *crtc)
754} 782}
755 783
756static void 784static void
785nv_crtc_disable(struct drm_crtc *crtc)
786{
787 struct nv04_display *disp = nv04_display(crtc->dev);
788 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
789 if (disp->image[nv_crtc->index])
790 nouveau_bo_unpin(disp->image[nv_crtc->index]);
791 nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
792}
793
794static void
757nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, 795nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
758 uint32_t size) 796 uint32_t size)
759{ 797{
@@ -791,7 +829,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
791 struct drm_framebuffer *drm_fb; 829 struct drm_framebuffer *drm_fb;
792 struct nouveau_framebuffer *fb; 830 struct nouveau_framebuffer *fb;
793 int arb_burst, arb_lwm; 831 int arb_burst, arb_lwm;
794 int ret;
795 832
796 NV_DEBUG(drm, "index %d\n", nv_crtc->index); 833 NV_DEBUG(drm, "index %d\n", nv_crtc->index);
797 834
@@ -801,10 +838,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
801 return 0; 838 return 0;
802 } 839 }
803 840
804
805 /* If atomic, we want to switch to the fb we were passed, so 841 /* If atomic, we want to switch to the fb we were passed, so
806 * now we update pointers to do that. (We don't pin; just 842 * now we update pointers to do that.
807 * assume we're already pinned and update the base address.)
808 */ 843 */
809 if (atomic) { 844 if (atomic) {
810 drm_fb = passed_fb; 845 drm_fb = passed_fb;
@@ -812,17 +847,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
812 } else { 847 } else {
813 drm_fb = crtc->fb; 848 drm_fb = crtc->fb;
814 fb = nouveau_framebuffer(crtc->fb); 849 fb = nouveau_framebuffer(crtc->fb);
815 /* If not atomic, we can go ahead and pin, and unpin the
816 * old fb we were passed.
817 */
818 ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
819 if (ret)
820 return ret;
821
822 if (passed_fb) {
823 struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
824 nouveau_bo_unpin(ofb->nvbo);
825 }
826 } 850 }
827 851
828 nv_crtc->fb.offset = fb->nvbo->bo.offset; 852 nv_crtc->fb.offset = fb->nvbo->bo.offset;
@@ -877,6 +901,9 @@ static int
877nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 901nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
878 struct drm_framebuffer *old_fb) 902 struct drm_framebuffer *old_fb)
879{ 903{
904 int ret = nv_crtc_swap_fbs(crtc, old_fb);
905 if (ret)
906 return ret;
880 return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false); 907 return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
881} 908}
882 909
@@ -1027,6 +1054,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
1027 .mode_set_base = nv04_crtc_mode_set_base, 1054 .mode_set_base = nv04_crtc_mode_set_base,
1028 .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic, 1055 .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
1029 .load_lut = nv_crtc_gamma_load, 1056 .load_lut = nv_crtc_gamma_load,
1057 .disable = nv_crtc_disable,
1030}; 1058};
1031 1059
1032int 1060int
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index a0a031dad13f..9928187f0a7d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -81,6 +81,7 @@ struct nv04_display {
81 uint32_t saved_vga_font[4][16384]; 81 uint32_t saved_vga_font[4][16384];
82 uint32_t dac_users[4]; 82 uint32_t dac_users[4];
83 struct nouveau_object *core; 83 struct nouveau_object *core;
84 struct nouveau_bo *image[2];
84}; 85};
85 86
86static inline struct nv04_display * 87static inline struct nv04_display *
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 907d20ef6d4d..a03e75deacaf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -577,6 +577,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
577 ret = nv50_display_flip_next(crtc, fb, chan, 0); 577 ret = nv50_display_flip_next(crtc, fb, chan, 0);
578 if (ret) 578 if (ret)
579 goto fail_unreserve; 579 goto fail_unreserve;
580 } else {
581 struct nv04_display *dispnv04 = nv04_display(dev);
582 nouveau_bo_ref(new_bo, &dispnv04->image[nouveau_crtc(crtc)->index]);
580 } 583 }
581 584
582 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); 585 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index 3af5bcd0b203..625f80d53dc2 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -131,7 +131,7 @@ nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
131 if (clk < pll->vco1.max_freq) 131 if (clk < pll->vco1.max_freq)
132 pll->vco2.max_freq = 0; 132 pll->vco2.max_freq = 0;
133 133
134 pclk->pll_calc(pclk, pll, clk, &coef); 134 ret = pclk->pll_calc(pclk, pll, clk, &coef);
135 if (ret == 0) 135 if (ret == 0)
136 return -ERANGE; 136 return -ERANGE;
137 137
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 3751730764a5..1a0bf07fe54b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -29,7 +29,9 @@
29#include <drm/drmP.h> 29#include <drm/drmP.h>
30#include <drm/ttm/ttm_bo_driver.h> 30#include <drm/ttm/ttm_bo_driver.h>
31 31
32#define VMW_PPN_SIZE sizeof(unsigned long) 32#define VMW_PPN_SIZE (sizeof(unsigned long))
33/* A future safe maximum remap size. */
34#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
33 35
34static int vmw_gmr2_bind(struct vmw_private *dev_priv, 36static int vmw_gmr2_bind(struct vmw_private *dev_priv,
35 struct page *pages[], 37 struct page *pages[],
@@ -38,43 +40,61 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
38{ 40{
39 SVGAFifoCmdDefineGMR2 define_cmd; 41 SVGAFifoCmdDefineGMR2 define_cmd;
40 SVGAFifoCmdRemapGMR2 remap_cmd; 42 SVGAFifoCmdRemapGMR2 remap_cmd;
41 uint32_t define_size = sizeof(define_cmd) + 4;
42 uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4;
43 uint32_t *cmd; 43 uint32_t *cmd;
44 uint32_t *cmd_orig; 44 uint32_t *cmd_orig;
45 uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
46 uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
47 uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
48 uint32_t remap_pos = 0;
49 uint32_t cmd_size = define_size + remap_size;
45 uint32_t i; 50 uint32_t i;
46 51
47 cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size); 52 cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size);
48 if (unlikely(cmd == NULL)) 53 if (unlikely(cmd == NULL))
49 return -ENOMEM; 54 return -ENOMEM;
50 55
51 define_cmd.gmrId = gmr_id; 56 define_cmd.gmrId = gmr_id;
52 define_cmd.numPages = num_pages; 57 define_cmd.numPages = num_pages;
53 58
59 *cmd++ = SVGA_CMD_DEFINE_GMR2;
60 memcpy(cmd, &define_cmd, sizeof(define_cmd));
61 cmd += sizeof(define_cmd) / sizeof(*cmd);
62
63 /*
64 * Need to split the command if there are too many
65 * pages that goes into the gmr.
66 */
67
54 remap_cmd.gmrId = gmr_id; 68 remap_cmd.gmrId = gmr_id;
55 remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ? 69 remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
56 SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32; 70 SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
57 remap_cmd.offsetPages = 0;
58 remap_cmd.numPages = num_pages;
59 71
60 *cmd++ = SVGA_CMD_DEFINE_GMR2; 72 while (num_pages > 0) {
61 memcpy(cmd, &define_cmd, sizeof(define_cmd)); 73 unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
62 cmd += sizeof(define_cmd) / sizeof(uint32); 74
75 remap_cmd.offsetPages = remap_pos;
76 remap_cmd.numPages = nr;
63 77
64 *cmd++ = SVGA_CMD_REMAP_GMR2; 78 *cmd++ = SVGA_CMD_REMAP_GMR2;
65 memcpy(cmd, &remap_cmd, sizeof(remap_cmd)); 79 memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
66 cmd += sizeof(remap_cmd) / sizeof(uint32); 80 cmd += sizeof(remap_cmd) / sizeof(*cmd);
67 81
68 for (i = 0; i < num_pages; ++i) { 82 for (i = 0; i < nr; ++i) {
69 if (VMW_PPN_SIZE <= 4) 83 if (VMW_PPN_SIZE <= 4)
70 *cmd = page_to_pfn(*pages++); 84 *cmd = page_to_pfn(*pages++);
71 else 85 else
72 *((uint64_t *)cmd) = page_to_pfn(*pages++); 86 *((uint64_t *)cmd) = page_to_pfn(*pages++);
73 87
74 cmd += VMW_PPN_SIZE / sizeof(*cmd); 88 cmd += VMW_PPN_SIZE / sizeof(*cmd);
89 }
90
91 num_pages -= nr;
92 remap_pos += nr;
75 } 93 }
76 94
77 vmw_fifo_commit(dev_priv, define_size + remap_size); 95 BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
96
97 vmw_fifo_commit(dev_priv, cmd_size);
78 98
79 return 0; 99 return 0;
80} 100}
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index 5f4749e60b04..c1cd5698b8ae 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -232,7 +232,8 @@ static int adjd_s311_read_raw(struct iio_dev *indio_dev,
232 232
233 switch (mask) { 233 switch (mask) {
234 case IIO_CHAN_INFO_RAW: 234 case IIO_CHAN_INFO_RAW:
235 ret = adjd_s311_read_data(indio_dev, chan->address, val); 235 ret = adjd_s311_read_data(indio_dev,
236 ADJD_S311_DATA_REG(chan->address), val);
236 if (ret < 0) 237 if (ret < 0)
237 return ret; 238 return ret;
238 return IIO_VAL_INT; 239 return IIO_VAL_INT;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index fa061d46527f..75e3b102ce45 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -167,6 +167,7 @@ static const struct xpad_device {
167 { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 167 { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
168 { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, 168 { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
169 { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, 169 { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
170 { 0x1689, 0xfd01, "Razer Onza Classic Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
170 { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, 171 { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
171 { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, 172 { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
172 { 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 }, 173 { 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 },
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 57b2637e153a..8551dcaf24db 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -672,6 +672,7 @@ static int elantech_packet_check_v2(struct psmouse *psmouse)
672 */ 672 */
673static int elantech_packet_check_v3(struct psmouse *psmouse) 673static int elantech_packet_check_v3(struct psmouse *psmouse)
674{ 674{
675 struct elantech_data *etd = psmouse->private;
675 const u8 debounce_packet[] = { 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff }; 676 const u8 debounce_packet[] = { 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff };
676 unsigned char *packet = psmouse->packet; 677 unsigned char *packet = psmouse->packet;
677 678
@@ -682,19 +683,48 @@ static int elantech_packet_check_v3(struct psmouse *psmouse)
682 if (!memcmp(packet, debounce_packet, sizeof(debounce_packet))) 683 if (!memcmp(packet, debounce_packet, sizeof(debounce_packet)))
683 return PACKET_DEBOUNCE; 684 return PACKET_DEBOUNCE;
684 685
685 if ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0xcf) == 0x02) 686 /*
686 return PACKET_V3_HEAD; 687 * If the hardware flag 'crc_enabled' is set the packets have
688 * different signatures.
689 */
690 if (etd->crc_enabled) {
691 if ((packet[3] & 0x09) == 0x08)
692 return PACKET_V3_HEAD;
693
694 if ((packet[3] & 0x09) == 0x09)
695 return PACKET_V3_TAIL;
696 } else {
697 if ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0xcf) == 0x02)
698 return PACKET_V3_HEAD;
687 699
688 if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c) 700 if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c)
689 return PACKET_V3_TAIL; 701 return PACKET_V3_TAIL;
702 }
690 703
691 return PACKET_UNKNOWN; 704 return PACKET_UNKNOWN;
692} 705}
693 706
694static int elantech_packet_check_v4(struct psmouse *psmouse) 707static int elantech_packet_check_v4(struct psmouse *psmouse)
695{ 708{
709 struct elantech_data *etd = psmouse->private;
696 unsigned char *packet = psmouse->packet; 710 unsigned char *packet = psmouse->packet;
697 unsigned char packet_type = packet[3] & 0x03; 711 unsigned char packet_type = packet[3] & 0x03;
712 bool sanity_check;
713
714 /*
715 * Sanity check based on the constant bits of a packet.
716 * The constant bits change depending on the value of
717 * the hardware flag 'crc_enabled' but are the same for
718 * every packet, regardless of the type.
719 */
720 if (etd->crc_enabled)
721 sanity_check = ((packet[3] & 0x08) == 0x00);
722 else
723 sanity_check = ((packet[0] & 0x0c) == 0x04 &&
724 (packet[3] & 0x1c) == 0x10);
725
726 if (!sanity_check)
727 return PACKET_UNKNOWN;
698 728
699 switch (packet_type) { 729 switch (packet_type) {
700 case 0: 730 case 0:
@@ -1313,6 +1343,12 @@ static int elantech_set_properties(struct elantech_data *etd)
1313 etd->reports_pressure = true; 1343 etd->reports_pressure = true;
1314 } 1344 }
1315 1345
1346 /*
1347 * The signatures of v3 and v4 packets change depending on the
1348 * value of this hardware flag.
1349 */
1350 etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000);
1351
1316 return 0; 1352 return 0;
1317} 1353}
1318 1354
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index 46db3be45ac9..036a04abaef7 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -129,6 +129,7 @@ struct elantech_data {
129 bool paritycheck; 129 bool paritycheck;
130 bool jumpy_cursor; 130 bool jumpy_cursor;
131 bool reports_pressure; 131 bool reports_pressure;
132 bool crc_enabled;
132 unsigned char hw_version; 133 unsigned char hw_version;
133 unsigned int fw_version; 134 unsigned int fw_version;
134 unsigned int single_finger_reports; 135 unsigned int single_finger_reports;
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 94c17c28d268..1e691a3a79cb 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -22,7 +22,8 @@ config SERIO_I8042
22 tristate "i8042 PC Keyboard controller" if EXPERT || !X86 22 tristate "i8042 PC Keyboard controller" if EXPERT || !X86
23 default y 23 default y
24 depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \ 24 depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \
25 (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 25 (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 && \
26 !ARC
26 help 27 help
27 i8042 is the chip over which the standard AT keyboard and PS/2 28 i8042 is the chip over which the standard AT keyboard and PS/2
28 mouse are connected to the computer. If you use these devices, 29 mouse are connected to the computer. If you use these devices,
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 384fbcd0cee0..f3e91f0b57ae 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -2112,7 +2112,7 @@ static const struct wacom_features wacom_features_0xDA =
2112 { "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 2112 { "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
2113 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 2113 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
2114 .touch_max = 2 }; 2114 .touch_max = 2 };
2115static struct wacom_features wacom_features_0xDB = 2115static const struct wacom_features wacom_features_0xDB =
2116 { "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023, 2116 { "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
2117 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 2117 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
2118 .touch_max = 2 }; 2118 .touch_max = 2 };
@@ -2127,6 +2127,12 @@ static const struct wacom_features wacom_features_0xDF =
2127 { "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023, 2127 { "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023,
2128 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 2128 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
2129 .touch_max = 16 }; 2129 .touch_max = 16 };
2130static const struct wacom_features wacom_features_0x300 =
2131 { "Wacom Bamboo One S", WACOM_PKGLEN_BBPEN, 14720, 9225, 1023,
2132 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
2133static const struct wacom_features wacom_features_0x301 =
2134 { "Wacom Bamboo One M", WACOM_PKGLEN_BBPEN, 21648, 13530, 1023,
2135 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
2130static const struct wacom_features wacom_features_0x6004 = 2136static const struct wacom_features wacom_features_0x6004 =
2131 { "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255, 2137 { "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255,
2132 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 2138 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2253,6 +2259,8 @@ const struct usb_device_id wacom_ids[] = {
2253 { USB_DEVICE_WACOM(0x100) }, 2259 { USB_DEVICE_WACOM(0x100) },
2254 { USB_DEVICE_WACOM(0x101) }, 2260 { USB_DEVICE_WACOM(0x101) },
2255 { USB_DEVICE_WACOM(0x10D) }, 2261 { USB_DEVICE_WACOM(0x10D) },
2262 { USB_DEVICE_WACOM(0x300) },
2263 { USB_DEVICE_WACOM(0x301) },
2256 { USB_DEVICE_WACOM(0x304) }, 2264 { USB_DEVICE_WACOM(0x304) },
2257 { USB_DEVICE_WACOM(0x4001) }, 2265 { USB_DEVICE_WACOM(0x4001) },
2258 { USB_DEVICE_WACOM(0x47) }, 2266 { USB_DEVICE_WACOM(0x47) },
diff --git a/drivers/irqchip/irq-sirfsoc.c b/drivers/irqchip/irq-sirfsoc.c
index 69ea44ebcf61..4851afae38dc 100644
--- a/drivers/irqchip/irq-sirfsoc.c
+++ b/drivers/irqchip/irq-sirfsoc.c
@@ -23,7 +23,7 @@
23#define SIRFSOC_INT_RISC_LEVEL1 0x0024 23#define SIRFSOC_INT_RISC_LEVEL1 0x0024
24#define SIRFSOC_INIT_IRQ_ID 0x0038 24#define SIRFSOC_INIT_IRQ_ID 0x0038
25 25
26#define SIRFSOC_NUM_IRQS 128 26#define SIRFSOC_NUM_IRQS 64
27 27
28static struct irq_domain *sirfsoc_irqdomain; 28static struct irq_domain *sirfsoc_irqdomain;
29 29
@@ -32,15 +32,18 @@ sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
32{ 32{
33 struct irq_chip_generic *gc; 33 struct irq_chip_generic *gc;
34 struct irq_chip_type *ct; 34 struct irq_chip_type *ct;
35 int ret;
36 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
35 37
36 gc = irq_alloc_generic_chip("SIRFINTC", 1, irq_start, base, handle_level_irq); 38 ret = irq_alloc_domain_generic_chips(sirfsoc_irqdomain, num, 1, "irq_sirfsoc",
37 ct = gc->chip_types; 39 handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
38 40
41 gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, irq_start);
42 gc->reg_base = base;
43 ct = gc->chip_types;
39 ct->chip.irq_mask = irq_gc_mask_clr_bit; 44 ct->chip.irq_mask = irq_gc_mask_clr_bit;
40 ct->chip.irq_unmask = irq_gc_mask_set_bit; 45 ct->chip.irq_unmask = irq_gc_mask_set_bit;
41 ct->regs.mask = SIRFSOC_INT_RISC_MASK0; 46 ct->regs.mask = SIRFSOC_INT_RISC_MASK0;
42
43 irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST, 0);
44} 47}
45 48
46static asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs) 49static asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs)
@@ -60,9 +63,8 @@ static int __init sirfsoc_irq_init(struct device_node *np, struct device_node *p
60 if (!base) 63 if (!base)
61 panic("unable to map intc cpu registers\n"); 64 panic("unable to map intc cpu registers\n");
62 65
63 /* using legacy because irqchip_generic does not work with linear */ 66 sirfsoc_irqdomain = irq_domain_add_linear(np, SIRFSOC_NUM_IRQS,
64 sirfsoc_irqdomain = irq_domain_add_legacy(np, SIRFSOC_NUM_IRQS, 0, 0, 67 &irq_generic_chip_ops, base);
65 &irq_domain_simple_ops, base);
66 68
67 sirfsoc_alloc_gc(base, 0, 32); 69 sirfsoc_alloc_gc(base, 0, 32);
68 sirfsoc_alloc_gc(base + 4, 32, SIRFSOC_NUM_IRQS - 32); 70 sirfsoc_alloc_gc(base + 4, 32, SIRFSOC_NUM_IRQS - 32);
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
index 22b720ec80cb..77025f5cb57d 100644
--- a/drivers/isdn/mISDN/dsp_core.c
+++ b/drivers/isdn/mISDN/dsp_core.c
@@ -288,8 +288,10 @@ dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb)
288 u8 *data; 288 u8 *data;
289 int len; 289 int len;
290 290
291 if (skb->len < sizeof(int)) 291 if (skb->len < sizeof(int)) {
292 printk(KERN_ERR "%s: PH_CONTROL message too short\n", __func__); 292 printk(KERN_ERR "%s: PH_CONTROL message too short\n", __func__);
293 return -EINVAL;
294 }
293 cont = *((int *)skb->data); 295 cont = *((int *)skb->data);
294 len = skb->len - sizeof(int); 296 len = skb->len - sizeof(int);
295 data = skb->data + sizeof(int); 297 data = skb->data + sizeof(int);
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index a5f91e1e8fe3..becef25fa194 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -148,7 +148,7 @@ config PCMCIA_PCNET
148 148
149config NE_H8300 149config NE_H8300
150 tristate "NE2000 compatible support for H8/300" 150 tristate "NE2000 compatible support for H8/300"
151 depends on H8300 151 depends on H8300H_AKI3068NET || H8300H_H8MAX
152 ---help--- 152 ---help---
153 Say Y here if you want to use the NE2000 compatible 153 Say Y here if you want to use the NE2000 compatible
154 controller on the Renesas H8/300 processor. 154 controller on the Renesas H8/300 processor.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 8d726f6e1c52..2361bf236ce3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -53,6 +53,7 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; 53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
54 int old_max_eth_txqs, new_max_eth_txqs; 54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0; 55 int old_txdata_index = 0, new_txdata_index = 0;
56 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
56 57
57 /* Copy the NAPI object as it has been already initialized */ 58 /* Copy the NAPI object as it has been already initialized */
58 from_fp->napi = to_fp->napi; 59 from_fp->napi = to_fp->napi;
@@ -61,6 +62,11 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
61 memcpy(to_fp, from_fp, sizeof(*to_fp)); 62 memcpy(to_fp, from_fp, sizeof(*to_fp));
62 to_fp->index = to; 63 to_fp->index = to;
63 64
65 /* Retain the tpa_info of the original `to' version as we don't want
66 * 2 FPs to contain the same tpa_info pointer.
67 */
68 to_fp->tpa_info = old_tpa_info;
69
64 /* move sp_objs contents as well, as their indices match fp ones */ 70 /* move sp_objs contents as well, as their indices match fp ones */
65 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); 71 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
66 72
@@ -2959,8 +2965,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2959 if (IS_PF(bp)) { 2965 if (IS_PF(bp)) {
2960 if (CNIC_LOADED(bp)) 2966 if (CNIC_LOADED(bp))
2961 bnx2x_free_mem_cnic(bp); 2967 bnx2x_free_mem_cnic(bp);
2962 bnx2x_free_mem(bp);
2963 } 2968 }
2969 bnx2x_free_mem(bp);
2970
2964 bp->state = BNX2X_STATE_CLOSED; 2971 bp->state = BNX2X_STATE_CLOSED;
2965 bp->cnic_loaded = false; 2972 bp->cnic_loaded = false;
2966 2973
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 9d64b988ab34..664568420c9b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6501,12 +6501,13 @@ static int bnx2x_link_initialize(struct link_params *params,
6501 struct bnx2x_phy *phy = &params->phy[INT_PHY]; 6501 struct bnx2x_phy *phy = &params->phy[INT_PHY];
6502 if (vars->line_speed == SPEED_AUTO_NEG && 6502 if (vars->line_speed == SPEED_AUTO_NEG &&
6503 (CHIP_IS_E1x(bp) || 6503 (CHIP_IS_E1x(bp) ||
6504 CHIP_IS_E2(bp))) 6504 CHIP_IS_E2(bp))) {
6505 bnx2x_set_parallel_detection(phy, params); 6505 bnx2x_set_parallel_detection(phy, params);
6506 if (params->phy[INT_PHY].config_init) 6506 if (params->phy[INT_PHY].config_init)
6507 params->phy[INT_PHY].config_init(phy, 6507 params->phy[INT_PHY].config_init(phy,
6508 params, 6508 params,
6509 vars); 6509 vars);
6510 }
6510 } 6511 }
6511 6512
6512 /* Init external phy*/ 6513 /* Init external phy*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c69990d2170e..285f2a59a3a5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -7855,12 +7855,15 @@ void bnx2x_free_mem(struct bnx2x *bp)
7855{ 7855{
7856 int i; 7856 int i;
7857 7857
7858 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7859 sizeof(struct host_sp_status_block));
7860
7861 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, 7858 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
7862 bp->fw_stats_data_sz + bp->fw_stats_req_sz); 7859 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
7863 7860
7861 if (IS_VF(bp))
7862 return;
7863
7864 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7865 sizeof(struct host_sp_status_block));
7866
7864 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, 7867 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7865 sizeof(struct bnx2x_slowpath)); 7868 sizeof(struct bnx2x_slowpath));
7866 7869
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 73731eb68f2a..b26eb83069b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -545,23 +545,6 @@ static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
545 return 0; 545 return 0;
546} 546}
547 547
548static int
549bnx2x_vfop_config_vlan0(struct bnx2x *bp,
550 struct bnx2x_vlan_mac_ramrod_params *vlan_mac,
551 bool add)
552{
553 int rc;
554
555 vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD :
556 BNX2X_VLAN_MAC_DEL;
557 vlan_mac->user_req.u.vlan.vlan = 0;
558
559 rc = bnx2x_config_vlan_mac(bp, vlan_mac);
560 if (rc == -EEXIST)
561 rc = 0;
562 return rc;
563}
564
565static int bnx2x_vfop_config_list(struct bnx2x *bp, 548static int bnx2x_vfop_config_list(struct bnx2x *bp,
566 struct bnx2x_vfop_filters *filters, 549 struct bnx2x_vfop_filters *filters,
567 struct bnx2x_vlan_mac_ramrod_params *vlan_mac) 550 struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
@@ -666,30 +649,14 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
666 649
667 case BNX2X_VFOP_VLAN_CONFIG_LIST: 650 case BNX2X_VFOP_VLAN_CONFIG_LIST:
668 /* next state */ 651 /* next state */
669 vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0; 652 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
670
671 /* remove vlan0 - could be no-op */
672 vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false);
673 if (vfop->rc)
674 goto op_err;
675 653
676 /* Do vlan list config. if this operation fails we try to 654 /* do list config */
677 * restore vlan0 to keep the queue is working order
678 */
679 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 655 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
680 if (!vfop->rc) { 656 if (!vfop->rc) {
681 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 657 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
682 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 658 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
683 } 659 }
684 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */
685
686 case BNX2X_VFOP_VLAN_CONFIG_LIST_0:
687 /* next state */
688 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
689
690 if (list_empty(&obj->head))
691 /* add vlan0 */
692 vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true);
693 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 660 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
694 661
695 default: 662 default:
@@ -2833,6 +2800,18 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2833 return 0; 2800 return 0;
2834} 2801}
2835 2802
2803struct set_vf_state_cookie {
2804 struct bnx2x_virtf *vf;
2805 u8 state;
2806};
2807
2808void bnx2x_set_vf_state(void *cookie)
2809{
2810 struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
2811
2812 p->vf->state = p->state;
2813}
2814
2836/* VFOP close (teardown the queues, delete mcasts and close HW) */ 2815/* VFOP close (teardown the queues, delete mcasts and close HW) */
2837static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) 2816static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2838{ 2817{
@@ -2883,7 +2862,19 @@ static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2883op_err: 2862op_err:
2884 BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); 2863 BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
2885op_done: 2864op_done:
2886 vf->state = VF_ACQUIRED; 2865
2866 /* need to make sure there are no outstanding stats ramrods which may
2867 * cause the device to access the VF's stats buffer which it will free
2868 * as soon as we return from the close flow.
2869 */
2870 {
2871 struct set_vf_state_cookie cookie;
2872
2873 cookie.vf = vf;
2874 cookie.state = VF_ACQUIRED;
2875 bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
2876 }
2877
2887 DP(BNX2X_MSG_IOV, "set state to acquired\n"); 2878 DP(BNX2X_MSG_IOV, "set state to acquired\n");
2888 bnx2x_vfop_end(bp, vf, vfop); 2879 bnx2x_vfop_end(bp, vf, vfop);
2889} 2880}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index d63d1327b051..86436c77af03 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -522,20 +522,16 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
522/* should be called under stats_sema */ 522/* should be called under stats_sema */
523static void __bnx2x_stats_start(struct bnx2x *bp) 523static void __bnx2x_stats_start(struct bnx2x *bp)
524{ 524{
525 /* vfs travel through here as part of the statistics FSM, but no action 525 if (IS_PF(bp)) {
526 * is required 526 if (bp->port.pmf)
527 */ 527 bnx2x_port_stats_init(bp);
528 if (IS_VF(bp))
529 return;
530
531 if (bp->port.pmf)
532 bnx2x_port_stats_init(bp);
533 528
534 else if (bp->func_stx) 529 else if (bp->func_stx)
535 bnx2x_func_stats_init(bp); 530 bnx2x_func_stats_init(bp);
536 531
537 bnx2x_hw_stats_post(bp); 532 bnx2x_hw_stats_post(bp);
538 bnx2x_storm_stats_post(bp); 533 bnx2x_storm_stats_post(bp);
534 }
539 535
540 bp->stats_started = true; 536 bp->stats_started = true;
541} 537}
@@ -1997,3 +1993,14 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
1997 estats->mac_discard); 1993 estats->mac_discard);
1998 } 1994 }
1999} 1995}
1996
1997void bnx2x_stats_safe_exec(struct bnx2x *bp,
1998 void (func_to_exec)(void *cookie),
1999 void *cookie){
2000 if (down_timeout(&bp->stats_sema, HZ/10))
2001 BNX2X_ERR("Unable to acquire stats lock\n");
2002 bnx2x_stats_comp(bp);
2003 func_to_exec(cookie);
2004 __bnx2x_stats_start(bp);
2005 up(&bp->stats_sema);
2006}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 853824d258e8..f35845006cdd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -539,6 +539,9 @@ struct bnx2x;
539void bnx2x_memset_stats(struct bnx2x *bp); 539void bnx2x_memset_stats(struct bnx2x *bp);
540void bnx2x_stats_init(struct bnx2x *bp); 540void bnx2x_stats_init(struct bnx2x *bp);
541void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); 541void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
542void bnx2x_stats_safe_exec(struct bnx2x *bp,
543 void (func_to_exec)(void *cookie),
544 void *cookie);
542 545
543/** 546/**
544 * bnx2x_save_statistics - save statistics when unloading. 547 * bnx2x_save_statistics - save statistics when unloading.
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 2e55ee29cf13..5701f3d1a169 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -3030,6 +3030,19 @@ static bool tg3_phy_power_bug(struct tg3 *tp)
3030 return false; 3030 return false;
3031} 3031}
3032 3032
3033static bool tg3_phy_led_bug(struct tg3 *tp)
3034{
3035 switch (tg3_asic_rev(tp)) {
3036 case ASIC_REV_5719:
3037 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3038 !tp->pci_fn)
3039 return true;
3040 return false;
3041 }
3042
3043 return false;
3044}
3045
3033static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) 3046static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3034{ 3047{
3035 u32 val; 3048 u32 val;
@@ -3077,8 +3090,9 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3077 } 3090 }
3078 return; 3091 return;
3079 } else if (do_low_power) { 3092 } else if (do_low_power) {
3080 tg3_writephy(tp, MII_TG3_EXT_CTRL, 3093 if (!tg3_phy_led_bug(tp))
3081 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 3094 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3095 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3082 3096
3083 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | 3097 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3084 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | 3098 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 7cb148c495c9..78d6d6b970e1 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -353,11 +353,9 @@ struct xgmac_extra_stats {
353 /* Receive errors */ 353 /* Receive errors */
354 unsigned long rx_watchdog; 354 unsigned long rx_watchdog;
355 unsigned long rx_da_filter_fail; 355 unsigned long rx_da_filter_fail;
356 unsigned long rx_sa_filter_fail;
357 unsigned long rx_payload_error; 356 unsigned long rx_payload_error;
358 unsigned long rx_ip_header_error; 357 unsigned long rx_ip_header_error;
359 /* Tx/Rx IRQ errors */ 358 /* Tx/Rx IRQ errors */
360 unsigned long tx_undeflow;
361 unsigned long tx_process_stopped; 359 unsigned long tx_process_stopped;
362 unsigned long rx_buf_unav; 360 unsigned long rx_buf_unav;
363 unsigned long rx_process_stopped; 361 unsigned long rx_process_stopped;
@@ -393,6 +391,7 @@ struct xgmac_priv {
393 char rx_pause; 391 char rx_pause;
394 char tx_pause; 392 char tx_pause;
395 int wolopts; 393 int wolopts;
394 struct work_struct tx_timeout_work;
396}; 395};
397 396
398/* XGMAC Configuration Settings */ 397/* XGMAC Configuration Settings */
@@ -409,6 +408,9 @@ struct xgmac_priv {
409#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s) 408#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s)
410#define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s) 409#define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s)
411 410
411#define tx_dma_ring_space(p) \
412 dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ)
413
412/* XGMAC Descriptor Access Helpers */ 414/* XGMAC Descriptor Access Helpers */
413static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) 415static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
414{ 416{
@@ -421,7 +423,7 @@ static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
421 423
422static inline int desc_get_buf_len(struct xgmac_dma_desc *p) 424static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
423{ 425{
424 u32 len = cpu_to_le32(p->flags); 426 u32 len = le32_to_cpu(p->buf_size);
425 return (len & DESC_BUFFER1_SZ_MASK) + 427 return (len & DESC_BUFFER1_SZ_MASK) +
426 ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET); 428 ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
427} 429}
@@ -464,11 +466,23 @@ static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
464 p->flags = cpu_to_le32(tmpflags); 466 p->flags = cpu_to_le32(tmpflags);
465} 467}
466 468
469static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p)
470{
471 u32 tmpflags = le32_to_cpu(p->flags);
472 tmpflags &= TXDESC_END_RING;
473 p->flags = cpu_to_le32(tmpflags);
474}
475
467static inline int desc_get_tx_ls(struct xgmac_dma_desc *p) 476static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
468{ 477{
469 return le32_to_cpu(p->flags) & TXDESC_LAST_SEG; 478 return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
470} 479}
471 480
481static inline int desc_get_tx_fs(struct xgmac_dma_desc *p)
482{
483 return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG;
484}
485
472static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p) 486static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
473{ 487{
474 return le32_to_cpu(p->buf1_addr); 488 return le32_to_cpu(p->buf1_addr);
@@ -609,10 +623,15 @@ static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
609{ 623{
610 u32 data; 624 u32 data;
611 625
612 data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0); 626 if (addr) {
613 writel(data, ioaddr + XGMAC_ADDR_HIGH(num)); 627 data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
614 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; 628 writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
615 writel(data, ioaddr + XGMAC_ADDR_LOW(num)); 629 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
630 writel(data, ioaddr + XGMAC_ADDR_LOW(num));
631 } else {
632 writel(0, ioaddr + XGMAC_ADDR_HIGH(num));
633 writel(0, ioaddr + XGMAC_ADDR_LOW(num));
634 }
616} 635}
617 636
618static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, 637static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
@@ -683,9 +702,14 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
683 if (unlikely(skb == NULL)) 702 if (unlikely(skb == NULL))
684 break; 703 break;
685 704
686 priv->rx_skbuff[entry] = skb;
687 paddr = dma_map_single(priv->device, skb->data, 705 paddr = dma_map_single(priv->device, skb->data,
688 bufsz, DMA_FROM_DEVICE); 706 priv->dma_buf_sz - NET_IP_ALIGN,
707 DMA_FROM_DEVICE);
708 if (dma_mapping_error(priv->device, paddr)) {
709 dev_kfree_skb_any(skb);
710 break;
711 }
712 priv->rx_skbuff[entry] = skb;
689 desc_set_buf_addr(p, paddr, priv->dma_buf_sz); 713 desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
690 } 714 }
691 715
@@ -782,20 +806,21 @@ static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
782 return; 806 return;
783 807
784 for (i = 0; i < DMA_RX_RING_SZ; i++) { 808 for (i = 0; i < DMA_RX_RING_SZ; i++) {
785 if (priv->rx_skbuff[i] == NULL) 809 struct sk_buff *skb = priv->rx_skbuff[i];
810 if (skb == NULL)
786 continue; 811 continue;
787 812
788 p = priv->dma_rx + i; 813 p = priv->dma_rx + i;
789 dma_unmap_single(priv->device, desc_get_buf_addr(p), 814 dma_unmap_single(priv->device, desc_get_buf_addr(p),
790 priv->dma_buf_sz, DMA_FROM_DEVICE); 815 priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
791 dev_kfree_skb_any(priv->rx_skbuff[i]); 816 dev_kfree_skb_any(skb);
792 priv->rx_skbuff[i] = NULL; 817 priv->rx_skbuff[i] = NULL;
793 } 818 }
794} 819}
795 820
796static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) 821static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
797{ 822{
798 int i, f; 823 int i;
799 struct xgmac_dma_desc *p; 824 struct xgmac_dma_desc *p;
800 825
801 if (!priv->tx_skbuff) 826 if (!priv->tx_skbuff)
@@ -806,16 +831,15 @@ static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
806 continue; 831 continue;
807 832
808 p = priv->dma_tx + i; 833 p = priv->dma_tx + i;
809 dma_unmap_single(priv->device, desc_get_buf_addr(p), 834 if (desc_get_tx_fs(p))
810 desc_get_buf_len(p), DMA_TO_DEVICE); 835 dma_unmap_single(priv->device, desc_get_buf_addr(p),
811 836 desc_get_buf_len(p), DMA_TO_DEVICE);
812 for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) { 837 else
813 p = priv->dma_tx + i++;
814 dma_unmap_page(priv->device, desc_get_buf_addr(p), 838 dma_unmap_page(priv->device, desc_get_buf_addr(p),
815 desc_get_buf_len(p), DMA_TO_DEVICE); 839 desc_get_buf_len(p), DMA_TO_DEVICE);
816 }
817 840
818 dev_kfree_skb_any(priv->tx_skbuff[i]); 841 if (desc_get_tx_ls(p))
842 dev_kfree_skb_any(priv->tx_skbuff[i]);
819 priv->tx_skbuff[i] = NULL; 843 priv->tx_skbuff[i] = NULL;
820 } 844 }
821} 845}
@@ -852,8 +876,6 @@ static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
852 */ 876 */
853static void xgmac_tx_complete(struct xgmac_priv *priv) 877static void xgmac_tx_complete(struct xgmac_priv *priv)
854{ 878{
855 int i;
856
857 while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { 879 while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
858 unsigned int entry = priv->tx_tail; 880 unsigned int entry = priv->tx_tail;
859 struct sk_buff *skb = priv->tx_skbuff[entry]; 881 struct sk_buff *skb = priv->tx_skbuff[entry];
@@ -863,55 +885,45 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
863 if (desc_get_owner(p)) 885 if (desc_get_owner(p))
864 break; 886 break;
865 887
866 /* Verify tx error by looking at the last segment */
867 if (desc_get_tx_ls(p))
868 desc_get_tx_status(priv, p);
869
870 netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n", 888 netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
871 priv->tx_head, priv->tx_tail); 889 priv->tx_head, priv->tx_tail);
872 890
873 dma_unmap_single(priv->device, desc_get_buf_addr(p), 891 if (desc_get_tx_fs(p))
874 desc_get_buf_len(p), DMA_TO_DEVICE); 892 dma_unmap_single(priv->device, desc_get_buf_addr(p),
875 893 desc_get_buf_len(p), DMA_TO_DEVICE);
876 priv->tx_skbuff[entry] = NULL; 894 else
877 priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
878
879 if (!skb) {
880 continue;
881 }
882
883 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
884 entry = priv->tx_tail = dma_ring_incr(priv->tx_tail,
885 DMA_TX_RING_SZ);
886 p = priv->dma_tx + priv->tx_tail;
887
888 dma_unmap_page(priv->device, desc_get_buf_addr(p), 895 dma_unmap_page(priv->device, desc_get_buf_addr(p),
889 desc_get_buf_len(p), DMA_TO_DEVICE); 896 desc_get_buf_len(p), DMA_TO_DEVICE);
897
898 /* Check tx error on the last segment */
899 if (desc_get_tx_ls(p)) {
900 desc_get_tx_status(priv, p);
901 dev_kfree_skb(skb);
890 } 902 }
891 903
892 dev_kfree_skb(skb); 904 priv->tx_skbuff[entry] = NULL;
905 priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
893 } 906 }
894 907
895 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > 908 /* Ensure tx_tail is visible to xgmac_xmit */
896 MAX_SKB_FRAGS) 909 smp_mb();
910 if (unlikely(netif_queue_stopped(priv->dev) &&
911 (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)))
897 netif_wake_queue(priv->dev); 912 netif_wake_queue(priv->dev);
898} 913}
899 914
900/** 915static void xgmac_tx_timeout_work(struct work_struct *work)
901 * xgmac_tx_err:
902 * @priv: pointer to the private device structure
903 * Description: it cleans the descriptors and restarts the transmission
904 * in case of errors.
905 */
906static void xgmac_tx_err(struct xgmac_priv *priv)
907{ 916{
908 u32 reg, value, inten; 917 u32 reg, value;
918 struct xgmac_priv *priv =
919 container_of(work, struct xgmac_priv, tx_timeout_work);
909 920
910 netif_stop_queue(priv->dev); 921 napi_disable(&priv->napi);
911 922
912 inten = readl(priv->base + XGMAC_DMA_INTR_ENA);
913 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 923 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
914 924
925 netif_tx_lock(priv->dev);
926
915 reg = readl(priv->base + XGMAC_DMA_CONTROL); 927 reg = readl(priv->base + XGMAC_DMA_CONTROL);
916 writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); 928 writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
917 do { 929 do {
@@ -927,9 +939,15 @@ static void xgmac_tx_err(struct xgmac_priv *priv)
927 939
928 writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS, 940 writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
929 priv->base + XGMAC_DMA_STATUS); 941 priv->base + XGMAC_DMA_STATUS);
930 writel(inten, priv->base + XGMAC_DMA_INTR_ENA);
931 942
943 netif_tx_unlock(priv->dev);
932 netif_wake_queue(priv->dev); 944 netif_wake_queue(priv->dev);
945
946 napi_enable(&priv->napi);
947
948 /* Enable interrupts */
949 writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS);
950 writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
933} 951}
934 952
935static int xgmac_hw_init(struct net_device *dev) 953static int xgmac_hw_init(struct net_device *dev)
@@ -957,9 +975,7 @@ static int xgmac_hw_init(struct net_device *dev)
957 DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL; 975 DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
958 writel(value, ioaddr + XGMAC_DMA_BUS_MODE); 976 writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
959 977
960 /* Enable interrupts */ 978 writel(0, ioaddr + XGMAC_DMA_INTR_ENA);
961 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
962 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
963 979
964 /* Mask power mgt interrupt */ 980 /* Mask power mgt interrupt */
965 writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); 981 writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
@@ -1027,6 +1043,10 @@ static int xgmac_open(struct net_device *dev)
1027 napi_enable(&priv->napi); 1043 napi_enable(&priv->napi);
1028 netif_start_queue(dev); 1044 netif_start_queue(dev);
1029 1045
1046 /* Enable interrupts */
1047 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
1048 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
1049
1030 return 0; 1050 return 0;
1031} 1051}
1032 1052
@@ -1087,7 +1107,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1087 paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE); 1107 paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
1088 if (dma_mapping_error(priv->device, paddr)) { 1108 if (dma_mapping_error(priv->device, paddr)) {
1089 dev_kfree_skb(skb); 1109 dev_kfree_skb(skb);
1090 return -EIO; 1110 return NETDEV_TX_OK;
1091 } 1111 }
1092 priv->tx_skbuff[entry] = skb; 1112 priv->tx_skbuff[entry] = skb;
1093 desc_set_buf_addr_and_size(desc, paddr, len); 1113 desc_set_buf_addr_and_size(desc, paddr, len);
@@ -1099,14 +1119,12 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1099 1119
1100 paddr = skb_frag_dma_map(priv->device, frag, 0, len, 1120 paddr = skb_frag_dma_map(priv->device, frag, 0, len,
1101 DMA_TO_DEVICE); 1121 DMA_TO_DEVICE);
1102 if (dma_mapping_error(priv->device, paddr)) { 1122 if (dma_mapping_error(priv->device, paddr))
1103 dev_kfree_skb(skb); 1123 goto dma_err;
1104 return -EIO;
1105 }
1106 1124
1107 entry = dma_ring_incr(entry, DMA_TX_RING_SZ); 1125 entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
1108 desc = priv->dma_tx + entry; 1126 desc = priv->dma_tx + entry;
1109 priv->tx_skbuff[entry] = NULL; 1127 priv->tx_skbuff[entry] = skb;
1110 1128
1111 desc_set_buf_addr_and_size(desc, paddr, len); 1129 desc_set_buf_addr_and_size(desc, paddr, len);
1112 if (i < (nfrags - 1)) 1130 if (i < (nfrags - 1))
@@ -1124,13 +1142,35 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1124 wmb(); 1142 wmb();
1125 desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG); 1143 desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
1126 1144
1145 writel(1, priv->base + XGMAC_DMA_TX_POLL);
1146
1127 priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); 1147 priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
1128 1148
1129 writel(1, priv->base + XGMAC_DMA_TX_POLL); 1149 /* Ensure tx_head update is visible to tx completion */
1130 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) < 1150 smp_mb();
1131 MAX_SKB_FRAGS) 1151 if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) {
1132 netif_stop_queue(dev); 1152 netif_stop_queue(dev);
1153 /* Ensure netif_stop_queue is visible to tx completion */
1154 smp_mb();
1155 if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)
1156 netif_start_queue(dev);
1157 }
1158 return NETDEV_TX_OK;
1133 1159
1160dma_err:
1161 entry = priv->tx_head;
1162 for ( ; i > 0; i--) {
1163 entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
1164 desc = priv->dma_tx + entry;
1165 priv->tx_skbuff[entry] = NULL;
1166 dma_unmap_page(priv->device, desc_get_buf_addr(desc),
1167 desc_get_buf_len(desc), DMA_TO_DEVICE);
1168 desc_clear_tx_owner(desc);
1169 }
1170 desc = first;
1171 dma_unmap_single(priv->device, desc_get_buf_addr(desc),
1172 desc_get_buf_len(desc), DMA_TO_DEVICE);
1173 dev_kfree_skb(skb);
1134 return NETDEV_TX_OK; 1174 return NETDEV_TX_OK;
1135} 1175}
1136 1176
@@ -1174,7 +1214,7 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
1174 1214
1175 skb_put(skb, frame_len); 1215 skb_put(skb, frame_len);
1176 dma_unmap_single(priv->device, desc_get_buf_addr(p), 1216 dma_unmap_single(priv->device, desc_get_buf_addr(p),
1177 frame_len, DMA_FROM_DEVICE); 1217 priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
1178 1218
1179 skb->protocol = eth_type_trans(skb, priv->dev); 1219 skb->protocol = eth_type_trans(skb, priv->dev);
1180 skb->ip_summed = ip_checksum; 1220 skb->ip_summed = ip_checksum;
@@ -1225,9 +1265,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget)
1225static void xgmac_tx_timeout(struct net_device *dev) 1265static void xgmac_tx_timeout(struct net_device *dev)
1226{ 1266{
1227 struct xgmac_priv *priv = netdev_priv(dev); 1267 struct xgmac_priv *priv = netdev_priv(dev);
1228 1268 schedule_work(&priv->tx_timeout_work);
1229 /* Clear Tx resources and restart transmitting again */
1230 xgmac_tx_err(priv);
1231} 1269}
1232 1270
1233/** 1271/**
@@ -1286,6 +1324,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
1286 if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) { 1324 if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
1287 use_hash = true; 1325 use_hash = true;
1288 value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; 1326 value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
1327 } else {
1328 use_hash = false;
1289 } 1329 }
1290 netdev_for_each_mc_addr(ha, dev) { 1330 netdev_for_each_mc_addr(ha, dev) {
1291 if (use_hash) { 1331 if (use_hash) {
@@ -1302,6 +1342,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
1302 } 1342 }
1303 1343
1304out: 1344out:
1345 for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++)
1346 xgmac_set_mac_addr(ioaddr, NULL, reg);
1305 for (i = 0; i < XGMAC_NUM_HASH; i++) 1347 for (i = 0; i < XGMAC_NUM_HASH; i++)
1306 writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); 1348 writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
1307 1349
@@ -1366,7 +1408,6 @@ static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
1366static irqreturn_t xgmac_interrupt(int irq, void *dev_id) 1408static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
1367{ 1409{
1368 u32 intr_status; 1410 u32 intr_status;
1369 bool tx_err = false;
1370 struct net_device *dev = (struct net_device *)dev_id; 1411 struct net_device *dev = (struct net_device *)dev_id;
1371 struct xgmac_priv *priv = netdev_priv(dev); 1412 struct xgmac_priv *priv = netdev_priv(dev);
1372 struct xgmac_extra_stats *x = &priv->xstats; 1413 struct xgmac_extra_stats *x = &priv->xstats;
@@ -1396,16 +1437,12 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
1396 if (intr_status & DMA_STATUS_TPS) { 1437 if (intr_status & DMA_STATUS_TPS) {
1397 netdev_err(priv->dev, "transmit process stopped\n"); 1438 netdev_err(priv->dev, "transmit process stopped\n");
1398 x->tx_process_stopped++; 1439 x->tx_process_stopped++;
1399 tx_err = true; 1440 schedule_work(&priv->tx_timeout_work);
1400 } 1441 }
1401 if (intr_status & DMA_STATUS_FBI) { 1442 if (intr_status & DMA_STATUS_FBI) {
1402 netdev_err(priv->dev, "fatal bus error\n"); 1443 netdev_err(priv->dev, "fatal bus error\n");
1403 x->fatal_bus_error++; 1444 x->fatal_bus_error++;
1404 tx_err = true;
1405 } 1445 }
1406
1407 if (tx_err)
1408 xgmac_tx_err(priv);
1409 } 1446 }
1410 1447
1411 /* TX/RX NORMAL interrupts */ 1448 /* TX/RX NORMAL interrupts */
@@ -1569,7 +1606,6 @@ static const struct xgmac_stats xgmac_gstrings_stats[] = {
1569 XGMAC_STAT(rx_payload_error), 1606 XGMAC_STAT(rx_payload_error),
1570 XGMAC_STAT(rx_ip_header_error), 1607 XGMAC_STAT(rx_ip_header_error),
1571 XGMAC_STAT(rx_da_filter_fail), 1608 XGMAC_STAT(rx_da_filter_fail),
1572 XGMAC_STAT(rx_sa_filter_fail),
1573 XGMAC_STAT(fatal_bus_error), 1609 XGMAC_STAT(fatal_bus_error),
1574 XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG), 1610 XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
1575 XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME), 1611 XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
@@ -1708,6 +1744,7 @@ static int xgmac_probe(struct platform_device *pdev)
1708 ndev->netdev_ops = &xgmac_netdev_ops; 1744 ndev->netdev_ops = &xgmac_netdev_ops;
1709 SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops); 1745 SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
1710 spin_lock_init(&priv->stats_lock); 1746 spin_lock_init(&priv->stats_lock);
1747 INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work);
1711 1748
1712 priv->device = &pdev->dev; 1749 priv->device = &pdev->dev;
1713 priv->dev = ndev; 1750 priv->dev = ndev;
@@ -1759,7 +1796,7 @@ static int xgmac_probe(struct platform_device *pdev)
1759 if (device_can_wakeup(priv->device)) 1796 if (device_can_wakeup(priv->device))
1760 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ 1797 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
1761 1798
1762 ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; 1799 ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA;
1763 if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL) 1800 if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
1764 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1801 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1765 NETIF_F_RXCSUM; 1802 NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index e104db7fcf27..3224d28cdad4 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4476,6 +4476,10 @@ static int be_resume(struct pci_dev *pdev)
4476 pci_set_power_state(pdev, PCI_D0); 4476 pci_set_power_state(pdev, PCI_D0);
4477 pci_restore_state(pdev); 4477 pci_restore_state(pdev);
4478 4478
4479 status = be_fw_wait_ready(adapter);
4480 if (status)
4481 return status;
4482
4479 /* tell fw we're ready to fire cmds */ 4483 /* tell fw we're ready to fire cmds */
4480 status = be_cmd_fw_init(adapter); 4484 status = be_cmd_fw_init(adapter);
4481 if (status) 4485 if (status)
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index ae236009f1a8..0120217a16dd 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -296,6 +296,9 @@ struct fec_enet_private {
296 /* The ring entries to be free()ed */ 296 /* The ring entries to be free()ed */
297 struct bufdesc *dirty_tx; 297 struct bufdesc *dirty_tx;
298 298
299 unsigned short tx_ring_size;
300 unsigned short rx_ring_size;
301
299 struct platform_device *pdev; 302 struct platform_device *pdev;
300 303
301 int opened; 304 int opened;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 0cd5e4b8b545..f9aacf5d8523 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -238,22 +238,57 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
238 238
239static int mii_cnt; 239static int mii_cnt;
240 240
241static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, int is_ex) 241static inline
242struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
242{ 243{
243 struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; 244 struct bufdesc *new_bd = bdp + 1;
244 if (is_ex) 245 struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
245 return (struct bufdesc *)(ex + 1); 246 struct bufdesc_ex *ex_base;
247 struct bufdesc *base;
248 int ring_size;
249
250 if (bdp >= fep->tx_bd_base) {
251 base = fep->tx_bd_base;
252 ring_size = fep->tx_ring_size;
253 ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
254 } else {
255 base = fep->rx_bd_base;
256 ring_size = fep->rx_ring_size;
257 ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
258 }
259
260 if (fep->bufdesc_ex)
261 return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
262 ex_base : ex_new_bd);
246 else 263 else
247 return bdp + 1; 264 return (new_bd >= (base + ring_size)) ?
265 base : new_bd;
248} 266}
249 267
250static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, int is_ex) 268static inline
269struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
251{ 270{
252 struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; 271 struct bufdesc *new_bd = bdp - 1;
253 if (is_ex) 272 struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
254 return (struct bufdesc *)(ex - 1); 273 struct bufdesc_ex *ex_base;
274 struct bufdesc *base;
275 int ring_size;
276
277 if (bdp >= fep->tx_bd_base) {
278 base = fep->tx_bd_base;
279 ring_size = fep->tx_ring_size;
280 ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
281 } else {
282 base = fep->rx_bd_base;
283 ring_size = fep->rx_ring_size;
284 ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
285 }
286
287 if (fep->bufdesc_ex)
288 return (struct bufdesc *)((ex_new_bd < ex_base) ?
289 (ex_new_bd + ring_size) : ex_new_bd);
255 else 290 else
256 return bdp - 1; 291 return (new_bd < base) ? (new_bd + ring_size) : new_bd;
257} 292}
258 293
259static void *swap_buffer(void *bufaddr, int len) 294static void *swap_buffer(void *bufaddr, int len)
@@ -379,7 +414,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
379 } 414 }
380 } 415 }
381 416
382 bdp_pre = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 417 bdp_pre = fec_enet_get_prevdesc(bdp, fep);
383 if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && 418 if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
384 !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { 419 !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
385 fep->delay_work.trig_tx = true; 420 fep->delay_work.trig_tx = true;
@@ -388,10 +423,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
388 } 423 }
389 424
390 /* If this was the last BD in the ring, start at the beginning again. */ 425 /* If this was the last BD in the ring, start at the beginning again. */
391 if (status & BD_ENET_TX_WRAP) 426 bdp = fec_enet_get_nextdesc(bdp, fep);
392 bdp = fep->tx_bd_base;
393 else
394 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
395 427
396 fep->cur_tx = bdp; 428 fep->cur_tx = bdp;
397 429
@@ -416,18 +448,18 @@ static void fec_enet_bd_init(struct net_device *dev)
416 448
417 /* Initialize the receive buffer descriptors. */ 449 /* Initialize the receive buffer descriptors. */
418 bdp = fep->rx_bd_base; 450 bdp = fep->rx_bd_base;
419 for (i = 0; i < RX_RING_SIZE; i++) { 451 for (i = 0; i < fep->rx_ring_size; i++) {
420 452
421 /* Initialize the BD for every fragment in the page. */ 453 /* Initialize the BD for every fragment in the page. */
422 if (bdp->cbd_bufaddr) 454 if (bdp->cbd_bufaddr)
423 bdp->cbd_sc = BD_ENET_RX_EMPTY; 455 bdp->cbd_sc = BD_ENET_RX_EMPTY;
424 else 456 else
425 bdp->cbd_sc = 0; 457 bdp->cbd_sc = 0;
426 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 458 bdp = fec_enet_get_nextdesc(bdp, fep);
427 } 459 }
428 460
429 /* Set the last buffer to wrap */ 461 /* Set the last buffer to wrap */
430 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 462 bdp = fec_enet_get_prevdesc(bdp, fep);
431 bdp->cbd_sc |= BD_SC_WRAP; 463 bdp->cbd_sc |= BD_SC_WRAP;
432 464
433 fep->cur_rx = fep->rx_bd_base; 465 fep->cur_rx = fep->rx_bd_base;
@@ -435,7 +467,7 @@ static void fec_enet_bd_init(struct net_device *dev)
435 /* ...and the same for transmit */ 467 /* ...and the same for transmit */
436 bdp = fep->tx_bd_base; 468 bdp = fep->tx_bd_base;
437 fep->cur_tx = bdp; 469 fep->cur_tx = bdp;
438 for (i = 0; i < TX_RING_SIZE; i++) { 470 for (i = 0; i < fep->tx_ring_size; i++) {
439 471
440 /* Initialize the BD for every fragment in the page. */ 472 /* Initialize the BD for every fragment in the page. */
441 bdp->cbd_sc = 0; 473 bdp->cbd_sc = 0;
@@ -444,11 +476,11 @@ static void fec_enet_bd_init(struct net_device *dev)
444 fep->tx_skbuff[i] = NULL; 476 fep->tx_skbuff[i] = NULL;
445 } 477 }
446 bdp->cbd_bufaddr = 0; 478 bdp->cbd_bufaddr = 0;
447 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 479 bdp = fec_enet_get_nextdesc(bdp, fep);
448 } 480 }
449 481
450 /* Set the last buffer to wrap */ 482 /* Set the last buffer to wrap */
451 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 483 bdp = fec_enet_get_prevdesc(bdp, fep);
452 bdp->cbd_sc |= BD_SC_WRAP; 484 bdp->cbd_sc |= BD_SC_WRAP;
453 fep->dirty_tx = bdp; 485 fep->dirty_tx = bdp;
454} 486}
@@ -509,10 +541,10 @@ fec_restart(struct net_device *ndev, int duplex)
509 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); 541 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
510 if (fep->bufdesc_ex) 542 if (fep->bufdesc_ex)
511 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex) 543 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex)
512 * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); 544 * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
513 else 545 else
514 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) 546 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
515 * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); 547 * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
516 548
517 549
518 for (i = 0; i <= TX_RING_MOD_MASK; i++) { 550 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
@@ -726,10 +758,7 @@ fec_enet_tx(struct net_device *ndev)
726 bdp = fep->dirty_tx; 758 bdp = fep->dirty_tx;
727 759
728 /* get next bdp of dirty_tx */ 760 /* get next bdp of dirty_tx */
729 if (bdp->cbd_sc & BD_ENET_TX_WRAP) 761 bdp = fec_enet_get_nextdesc(bdp, fep);
730 bdp = fep->tx_bd_base;
731 else
732 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
733 762
734 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { 763 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
735 764
@@ -799,10 +828,7 @@ fec_enet_tx(struct net_device *ndev)
799 fep->dirty_tx = bdp; 828 fep->dirty_tx = bdp;
800 829
801 /* Update pointer to next buffer descriptor to be transmitted */ 830 /* Update pointer to next buffer descriptor to be transmitted */
802 if (status & BD_ENET_TX_WRAP) 831 bdp = fec_enet_get_nextdesc(bdp, fep);
803 bdp = fep->tx_bd_base;
804 else
805 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
806 832
807 /* Since we have freed up a buffer, the ring is no longer full 833 /* Since we have freed up a buffer, the ring is no longer full
808 */ 834 */
@@ -970,8 +996,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
970 htons(ETH_P_8021Q), 996 htons(ETH_P_8021Q),
971 vlan_tag); 997 vlan_tag);
972 998
973 if (!skb_defer_rx_timestamp(skb)) 999 napi_gro_receive(&fep->napi, skb);
974 napi_gro_receive(&fep->napi, skb);
975 } 1000 }
976 1001
977 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, 1002 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
@@ -993,10 +1018,8 @@ rx_processing_done:
993 } 1018 }
994 1019
995 /* Update BD pointer to next entry */ 1020 /* Update BD pointer to next entry */
996 if (status & BD_ENET_RX_WRAP) 1021 bdp = fec_enet_get_nextdesc(bdp, fep);
997 bdp = fep->rx_bd_base; 1022
998 else
999 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
1000 /* Doing this here will keep the FEC running while we process 1023 /* Doing this here will keep the FEC running while we process
1001 * incoming frames. On a heavily loaded network, we should be 1024 * incoming frames. On a heavily loaded network, we should be
1002 * able to keep up at the expense of system resources. 1025 * able to keep up at the expense of system resources.
@@ -1662,7 +1685,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
1662 struct bufdesc *bdp; 1685 struct bufdesc *bdp;
1663 1686
1664 bdp = fep->rx_bd_base; 1687 bdp = fep->rx_bd_base;
1665 for (i = 0; i < RX_RING_SIZE; i++) { 1688 for (i = 0; i < fep->rx_ring_size; i++) {
1666 skb = fep->rx_skbuff[i]; 1689 skb = fep->rx_skbuff[i];
1667 1690
1668 if (bdp->cbd_bufaddr) 1691 if (bdp->cbd_bufaddr)
@@ -1670,11 +1693,11 @@ static void fec_enet_free_buffers(struct net_device *ndev)
1670 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1693 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1671 if (skb) 1694 if (skb)
1672 dev_kfree_skb(skb); 1695 dev_kfree_skb(skb);
1673 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 1696 bdp = fec_enet_get_nextdesc(bdp, fep);
1674 } 1697 }
1675 1698
1676 bdp = fep->tx_bd_base; 1699 bdp = fep->tx_bd_base;
1677 for (i = 0; i < TX_RING_SIZE; i++) 1700 for (i = 0; i < fep->tx_ring_size; i++)
1678 kfree(fep->tx_bounce[i]); 1701 kfree(fep->tx_bounce[i]);
1679} 1702}
1680 1703
@@ -1686,7 +1709,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
1686 struct bufdesc *bdp; 1709 struct bufdesc *bdp;
1687 1710
1688 bdp = fep->rx_bd_base; 1711 bdp = fep->rx_bd_base;
1689 for (i = 0; i < RX_RING_SIZE; i++) { 1712 for (i = 0; i < fep->rx_ring_size; i++) {
1690 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); 1713 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
1691 if (!skb) { 1714 if (!skb) {
1692 fec_enet_free_buffers(ndev); 1715 fec_enet_free_buffers(ndev);
@@ -1703,15 +1726,15 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
1703 ebdp->cbd_esc = BD_ENET_RX_INT; 1726 ebdp->cbd_esc = BD_ENET_RX_INT;
1704 } 1727 }
1705 1728
1706 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 1729 bdp = fec_enet_get_nextdesc(bdp, fep);
1707 } 1730 }
1708 1731
1709 /* Set the last buffer to wrap. */ 1732 /* Set the last buffer to wrap. */
1710 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 1733 bdp = fec_enet_get_prevdesc(bdp, fep);
1711 bdp->cbd_sc |= BD_SC_WRAP; 1734 bdp->cbd_sc |= BD_SC_WRAP;
1712 1735
1713 bdp = fep->tx_bd_base; 1736 bdp = fep->tx_bd_base;
1714 for (i = 0; i < TX_RING_SIZE; i++) { 1737 for (i = 0; i < fep->tx_ring_size; i++) {
1715 fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); 1738 fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
1716 1739
1717 bdp->cbd_sc = 0; 1740 bdp->cbd_sc = 0;
@@ -1722,11 +1745,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
1722 ebdp->cbd_esc = BD_ENET_TX_INT; 1745 ebdp->cbd_esc = BD_ENET_TX_INT;
1723 } 1746 }
1724 1747
1725 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 1748 bdp = fec_enet_get_nextdesc(bdp, fep);
1726 } 1749 }
1727 1750
1728 /* Set the last buffer to wrap. */ 1751 /* Set the last buffer to wrap. */
1729 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 1752 bdp = fec_enet_get_prevdesc(bdp, fep);
1730 bdp->cbd_sc |= BD_SC_WRAP; 1753 bdp->cbd_sc |= BD_SC_WRAP;
1731 1754
1732 return 0; 1755 return 0;
@@ -1966,13 +1989,17 @@ static int fec_enet_init(struct net_device *ndev)
1966 /* Get the Ethernet address */ 1989 /* Get the Ethernet address */
1967 fec_get_mac(ndev); 1990 fec_get_mac(ndev);
1968 1991
1992 /* init the tx & rx ring size */
1993 fep->tx_ring_size = TX_RING_SIZE;
1994 fep->rx_ring_size = RX_RING_SIZE;
1995
1969 /* Set receive and transmit descriptor base. */ 1996 /* Set receive and transmit descriptor base. */
1970 fep->rx_bd_base = cbd_base; 1997 fep->rx_bd_base = cbd_base;
1971 if (fep->bufdesc_ex) 1998 if (fep->bufdesc_ex)
1972 fep->tx_bd_base = (struct bufdesc *) 1999 fep->tx_bd_base = (struct bufdesc *)
1973 (((struct bufdesc_ex *)cbd_base) + RX_RING_SIZE); 2000 (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size);
1974 else 2001 else
1975 fep->tx_bd_base = cbd_base + RX_RING_SIZE; 2002 fep->tx_bd_base = cbd_base + fep->rx_ring_size;
1976 2003
1977 /* The FEC Ethernet specific entries in the device structure */ 2004 /* The FEC Ethernet specific entries in the device structure */
1978 ndev->watchdog_timeo = TX_TIMEOUT; 2005 ndev->watchdog_timeo = TX_TIMEOUT;
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 7fbe6abf6054..23de82a9da82 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -3069,7 +3069,7 @@ jme_init_one(struct pci_dev *pdev,
3069 jwrite32(jme, JME_APMC, apmc); 3069 jwrite32(jme, JME_APMC, apmc);
3070 } 3070 }
3071 3071
3072 NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2) 3072 NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT)
3073 3073
3074 spin_lock_init(&jme->phy_lock); 3074 spin_lock_init(&jme->phy_lock);
3075 spin_lock_init(&jme->macaddr_lock); 3075 spin_lock_init(&jme->macaddr_lock);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 2777c70c603b..e35bac7cfdf1 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -138,7 +138,9 @@
138#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) 138#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
139#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) 139#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
140#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) 140#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
141#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
141#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) 142#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
143#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
142#define MVNETA_MIB_COUNTERS_BASE 0x3080 144#define MVNETA_MIB_COUNTERS_BASE 0x3080
143#define MVNETA_MIB_LATE_COLLISION 0x7c 145#define MVNETA_MIB_LATE_COLLISION 0x7c
144#define MVNETA_DA_FILT_SPEC_MCAST 0x3400 146#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
@@ -948,6 +950,13 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
948 /* Assign port SDMA configuration */ 950 /* Assign port SDMA configuration */
949 mvreg_write(pp, MVNETA_SDMA_CONFIG, val); 951 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
950 952
953 /* Disable PHY polling in hardware, since we're using the
954 * kernel phylib to do this.
955 */
956 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
957 val &= ~MVNETA_PHY_POLLING_ENABLE;
958 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
959
951 mvneta_set_ucast_table(pp, -1); 960 mvneta_set_ucast_table(pp, -1);
952 mvneta_set_special_mcast_table(pp, -1); 961 mvneta_set_special_mcast_table(pp, -1);
953 mvneta_set_other_mcast_table(pp, -1); 962 mvneta_set_other_mcast_table(pp, -1);
@@ -2340,7 +2349,9 @@ static void mvneta_adjust_link(struct net_device *ndev)
2340 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 2349 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2341 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | 2350 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2342 MVNETA_GMAC_CONFIG_GMII_SPEED | 2351 MVNETA_GMAC_CONFIG_GMII_SPEED |
2343 MVNETA_GMAC_CONFIG_FULL_DUPLEX); 2352 MVNETA_GMAC_CONFIG_FULL_DUPLEX |
2353 MVNETA_GMAC_AN_SPEED_EN |
2354 MVNETA_GMAC_AN_DUPLEX_EN);
2344 2355
2345 if (phydev->duplex) 2356 if (phydev->duplex)
2346 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; 2357 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
@@ -2473,6 +2484,21 @@ static int mvneta_stop(struct net_device *dev)
2473 return 0; 2484 return 0;
2474} 2485}
2475 2486
2487static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2488{
2489 struct mvneta_port *pp = netdev_priv(dev);
2490 int ret;
2491
2492 if (!pp->phy_dev)
2493 return -ENOTSUPP;
2494
2495 ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd);
2496 if (!ret)
2497 mvneta_adjust_link(dev);
2498
2499 return ret;
2500}
2501
2476/* Ethtool methods */ 2502/* Ethtool methods */
2477 2503
2478/* Get settings (phy address, speed) for ethtools */ 2504/* Get settings (phy address, speed) for ethtools */
@@ -2591,6 +2617,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
2591 .ndo_change_mtu = mvneta_change_mtu, 2617 .ndo_change_mtu = mvneta_change_mtu,
2592 .ndo_tx_timeout = mvneta_tx_timeout, 2618 .ndo_tx_timeout = mvneta_tx_timeout,
2593 .ndo_get_stats64 = mvneta_get_stats64, 2619 .ndo_get_stats64 = mvneta_get_stats64,
2620 .ndo_do_ioctl = mvneta_ioctl,
2594}; 2621};
2595 2622
2596const struct ethtool_ops mvneta_eth_tool_ops = { 2623const struct ethtool_ops mvneta_eth_tool_ops = {
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 3fe09ab2d7c9..32675e16021e 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -1171,7 +1171,6 @@ typedef struct {
1171 1171
1172#define NETXEN_DB_MAPSIZE_BYTES 0x1000 1172#define NETXEN_DB_MAPSIZE_BYTES 0x1000
1173 1173
1174#define NETXEN_NETDEV_WEIGHT 128
1175#define NETXEN_ADAPTER_UP_MAGIC 777 1174#define NETXEN_ADAPTER_UP_MAGIC 777
1176#define NETXEN_NIC_PEG_TUNE 0 1175#define NETXEN_NIC_PEG_TUNE 0
1177 1176
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 1046e9461509..cbd75f97ffb3 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -197,7 +197,7 @@ netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
197 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 197 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
198 sds_ring = &recv_ctx->sds_rings[ring]; 198 sds_ring = &recv_ctx->sds_rings[ring];
199 netif_napi_add(netdev, &sds_ring->napi, 199 netif_napi_add(netdev, &sds_ring->napi,
200 netxen_nic_poll, NETXEN_NETDEV_WEIGHT); 200 netxen_nic_poll, NAPI_POLL_WEIGHT);
201 } 201 }
202 202
203 return 0; 203 return 0;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 474c8a86a2af..5cd831ebfa83 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1348,7 +1348,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1348 DMA_FROM_DEVICE); 1348 DMA_FROM_DEVICE);
1349 skb_put(skb, pkt_len); 1349 skb_put(skb, pkt_len);
1350 skb->protocol = eth_type_trans(skb, ndev); 1350 skb->protocol = eth_type_trans(skb, ndev);
1351 netif_rx(skb); 1351 netif_receive_skb(skb);
1352 ndev->stats.rx_packets++; 1352 ndev->stats.rx_packets++;
1353 ndev->stats.rx_bytes += pkt_len; 1353 ndev->stats.rx_bytes += pkt_len;
1354 } 1354 }
@@ -1906,11 +1906,13 @@ static int sh_eth_open(struct net_device *ndev)
1906 1906
1907 pm_runtime_get_sync(&mdp->pdev->dev); 1907 pm_runtime_get_sync(&mdp->pdev->dev);
1908 1908
1909 napi_enable(&mdp->napi);
1910
1909 ret = request_irq(ndev->irq, sh_eth_interrupt, 1911 ret = request_irq(ndev->irq, sh_eth_interrupt,
1910 mdp->cd->irq_flags, ndev->name, ndev); 1912 mdp->cd->irq_flags, ndev->name, ndev);
1911 if (ret) { 1913 if (ret) {
1912 dev_err(&ndev->dev, "Can not assign IRQ number\n"); 1914 dev_err(&ndev->dev, "Can not assign IRQ number\n");
1913 return ret; 1915 goto out_napi_off;
1914 } 1916 }
1915 1917
1916 /* Descriptor set */ 1918 /* Descriptor set */
@@ -1928,12 +1930,12 @@ static int sh_eth_open(struct net_device *ndev)
1928 if (ret) 1930 if (ret)
1929 goto out_free_irq; 1931 goto out_free_irq;
1930 1932
1931 napi_enable(&mdp->napi);
1932
1933 return ret; 1933 return ret;
1934 1934
1935out_free_irq: 1935out_free_irq:
1936 free_irq(ndev->irq, ndev); 1936 free_irq(ndev->irq, ndev);
1937out_napi_off:
1938 napi_disable(&mdp->napi);
1937 pm_runtime_put_sync(&mdp->pdev->dev); 1939 pm_runtime_put_sync(&mdp->pdev->dev);
1938 return ret; 1940 return ret;
1939} 1941}
@@ -2025,8 +2027,6 @@ static int sh_eth_close(struct net_device *ndev)
2025{ 2027{
2026 struct sh_eth_private *mdp = netdev_priv(ndev); 2028 struct sh_eth_private *mdp = netdev_priv(ndev);
2027 2029
2028 napi_disable(&mdp->napi);
2029
2030 netif_stop_queue(ndev); 2030 netif_stop_queue(ndev);
2031 2031
2032 /* Disable interrupts by clearing the interrupt mask. */ 2032 /* Disable interrupts by clearing the interrupt mask. */
@@ -2044,6 +2044,8 @@ static int sh_eth_close(struct net_device *ndev)
2044 2044
2045 free_irq(ndev->irq, ndev); 2045 free_irq(ndev->irq, ndev);
2046 2046
2047 napi_disable(&mdp->napi);
2048
2047 /* Free all the skbuffs in the Rx queue. */ 2049 /* Free all the skbuffs in the Rx queue. */
2048 sh_eth_ring_free(ndev); 2050 sh_eth_ring_free(ndev);
2049 2051
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 623ebc50fe6b..7a0072003f34 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -71,19 +71,22 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
71 plat->force_sf_dma_mode = 1; 71 plat->force_sf_dma_mode = 1;
72 } 72 }
73 73
74 dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); 74 if (of_find_property(np, "snps,pbl", NULL)) {
75 if (!dma_cfg) 75 dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
76 return -ENOMEM; 76 GFP_KERNEL);
77 77 if (!dma_cfg)
78 plat->dma_cfg = dma_cfg; 78 return -ENOMEM;
79 of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); 79 plat->dma_cfg = dma_cfg;
80 dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); 80 of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
81 dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); 81 dma_cfg->fixed_burst =
82 of_property_read_bool(np, "snps,fixed-burst");
83 dma_cfg->mixed_burst =
84 of_property_read_bool(np, "snps,mixed-burst");
85 }
82 plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode"); 86 plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
83 if (plat->force_thresh_dma_mode) { 87 if (plat->force_thresh_dma_mode) {
84 plat->force_sf_dma_mode = 0; 88 plat->force_sf_dma_mode = 0;
85 pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set."); 89 pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
86 }
87 90
88 return 0; 91 return 0;
89} 92}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index ad32af67e618..9c805e0c0cae 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1466,8 +1466,7 @@ static void gelic_ether_setup_netdev_ops(struct net_device *netdev,
1466{ 1466{
1467 netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; 1467 netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
1468 /* NAPI */ 1468 /* NAPI */
1469 netif_napi_add(netdev, napi, 1469 netif_napi_add(netdev, napi, gelic_net_poll, NAPI_POLL_WEIGHT);
1470 gelic_net_poll, GELIC_NET_NAPI_WEIGHT);
1471 netdev->ethtool_ops = &gelic_ether_ethtool_ops; 1470 netdev->ethtool_ops = &gelic_ether_ethtool_ops;
1472 netdev->netdev_ops = &gelic_netdevice_ops; 1471 netdev->netdev_ops = &gelic_netdevice_ops;
1473} 1472}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index a93df6ac1909..309abb472aa2 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -37,7 +37,6 @@
37#define GELIC_NET_RXBUF_ALIGN 128 37#define GELIC_NET_RXBUF_ALIGN 128
38#define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */ 38#define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */
39#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ 39#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ
40#define GELIC_NET_NAPI_WEIGHT (GELIC_NET_RX_DESCRIPTORS)
41#define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL 40#define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL
42 41
43#define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */ 42#define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index e90e1f46121e..64b4639f43b6 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -175,6 +175,7 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
175 printk(KERN_WARNING "Setting MDIO clock divisor to " 175 printk(KERN_WARNING "Setting MDIO clock divisor to "
176 "default %d\n", DEFAULT_CLOCK_DIVISOR); 176 "default %d\n", DEFAULT_CLOCK_DIVISOR);
177 clk_div = DEFAULT_CLOCK_DIVISOR; 177 clk_div = DEFAULT_CLOCK_DIVISOR;
178 of_node_put(np1);
178 goto issue; 179 goto issue;
179 } 180 }
180 181
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 872819851aef..25ba7eca9a13 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -400,6 +400,10 @@ static const struct usb_device_id mbim_devs[] = {
400 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 400 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
401 .driver_info = (unsigned long)&cdc_mbim_info_zlp, 401 .driver_info = (unsigned long)&cdc_mbim_info_zlp,
402 }, 402 },
403 /* HP hs2434 Mobile Broadband Module needs ZLPs */
404 { USB_DEVICE_AND_INTERFACE_INFO(0x3f0, 0x4b1d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
405 .driver_info = (unsigned long)&cdc_mbim_info_zlp,
406 },
403 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 407 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
404 .driver_info = (unsigned long)&cdc_mbim_info, 408 .driver_info = (unsigned long)&cdc_mbim_info,
405 }, 409 },
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 1d4c8fe72752..c82fe65c4128 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
102 102
103 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) 103 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
104 zfcp_erp_action_dismiss(&port->erp_action); 104 zfcp_erp_action_dismiss(&port->erp_action);
105 else 105 else {
106 shost_for_each_device(sdev, port->adapter->scsi_host) 106 spin_lock(port->adapter->scsi_host->host_lock);
107 __shost_for_each_device(sdev, port->adapter->scsi_host)
107 if (sdev_to_zfcp(sdev)->port == port) 108 if (sdev_to_zfcp(sdev)->port == port)
108 zfcp_erp_action_dismiss_lun(sdev); 109 zfcp_erp_action_dismiss_lun(sdev);
110 spin_unlock(port->adapter->scsi_host->host_lock);
111 }
109} 112}
110 113
111static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) 114static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
@@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
592{ 595{
593 struct scsi_device *sdev; 596 struct scsi_device *sdev;
594 597
595 shost_for_each_device(sdev, port->adapter->scsi_host) 598 spin_lock(port->adapter->scsi_host->host_lock);
599 __shost_for_each_device(sdev, port->adapter->scsi_host)
596 if (sdev_to_zfcp(sdev)->port == port) 600 if (sdev_to_zfcp(sdev)->port == port)
597 _zfcp_erp_lun_reopen(sdev, clear, id, 0); 601 _zfcp_erp_lun_reopen(sdev, clear, id, 0);
602 spin_unlock(port->adapter->scsi_host->host_lock);
598} 603}
599 604
600static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) 605static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
@@ -1434,8 +1439,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
1434 atomic_set_mask(common_mask, &port->status); 1439 atomic_set_mask(common_mask, &port->status);
1435 read_unlock_irqrestore(&adapter->port_list_lock, flags); 1440 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1436 1441
1437 shost_for_each_device(sdev, adapter->scsi_host) 1442 spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
1443 __shost_for_each_device(sdev, adapter->scsi_host)
1438 atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); 1444 atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
1445 spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
1439} 1446}
1440 1447
1441/** 1448/**
@@ -1469,11 +1476,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
1469 } 1476 }
1470 read_unlock_irqrestore(&adapter->port_list_lock, flags); 1477 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1471 1478
1472 shost_for_each_device(sdev, adapter->scsi_host) { 1479 spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
1480 __shost_for_each_device(sdev, adapter->scsi_host) {
1473 atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); 1481 atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
1474 if (clear_counter) 1482 if (clear_counter)
1475 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); 1483 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
1476 } 1484 }
1485 spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
1477} 1486}
1478 1487
1479/** 1488/**
@@ -1487,16 +1496,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
1487{ 1496{
1488 struct scsi_device *sdev; 1497 struct scsi_device *sdev;
1489 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1498 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1499 unsigned long flags;
1490 1500
1491 atomic_set_mask(mask, &port->status); 1501 atomic_set_mask(mask, &port->status);
1492 1502
1493 if (!common_mask) 1503 if (!common_mask)
1494 return; 1504 return;
1495 1505
1496 shost_for_each_device(sdev, port->adapter->scsi_host) 1506 spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
1507 __shost_for_each_device(sdev, port->adapter->scsi_host)
1497 if (sdev_to_zfcp(sdev)->port == port) 1508 if (sdev_to_zfcp(sdev)->port == port)
1498 atomic_set_mask(common_mask, 1509 atomic_set_mask(common_mask,
1499 &sdev_to_zfcp(sdev)->status); 1510 &sdev_to_zfcp(sdev)->status);
1511 spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
1500} 1512}
1501 1513
1502/** 1514/**
@@ -1511,6 +1523,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
1511 struct scsi_device *sdev; 1523 struct scsi_device *sdev;
1512 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1524 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1513 u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; 1525 u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
1526 unsigned long flags;
1514 1527
1515 atomic_clear_mask(mask, &port->status); 1528 atomic_clear_mask(mask, &port->status);
1516 1529
@@ -1520,13 +1533,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
1520 if (clear_counter) 1533 if (clear_counter)
1521 atomic_set(&port->erp_counter, 0); 1534 atomic_set(&port->erp_counter, 0);
1522 1535
1523 shost_for_each_device(sdev, port->adapter->scsi_host) 1536 spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
1537 __shost_for_each_device(sdev, port->adapter->scsi_host)
1524 if (sdev_to_zfcp(sdev)->port == port) { 1538 if (sdev_to_zfcp(sdev)->port == port) {
1525 atomic_clear_mask(common_mask, 1539 atomic_clear_mask(common_mask,
1526 &sdev_to_zfcp(sdev)->status); 1540 &sdev_to_zfcp(sdev)->status);
1527 if (clear_counter) 1541 if (clear_counter)
1528 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); 1542 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
1529 } 1543 }
1544 spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
1530} 1545}
1531 1546
1532/** 1547/**
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 665e3cfaaf85..de0598eaacd2 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -224,11 +224,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
224 224
225static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) 225static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
226{ 226{
227 spin_lock_irq(&qdio->req_q_lock);
228 if (atomic_read(&qdio->req_q_free) || 227 if (atomic_read(&qdio->req_q_free) ||
229 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 228 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
230 return 1; 229 return 1;
231 spin_unlock_irq(&qdio->req_q_lock);
232 return 0; 230 return 0;
233} 231}
234 232
@@ -246,9 +244,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
246{ 244{
247 long ret; 245 long ret;
248 246
249 spin_unlock_irq(&qdio->req_q_lock); 247 ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
250 ret = wait_event_interruptible_timeout(qdio->req_q_wq, 248 zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
251 zfcp_qdio_sbal_check(qdio), 5 * HZ);
252 249
253 if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 250 if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
254 return -EIO; 251 return -EIO;
@@ -262,7 +259,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
262 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1"); 259 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
263 } 260 }
264 261
265 spin_lock_irq(&qdio->req_q_lock);
266 return -EIO; 262 return -EIO;
267} 263}
268 264
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 3f01bbf0609f..890639274bcf 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -27,6 +27,16 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
27static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ 27static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
28 zfcp_sysfs_##_feat##_##_name##_show, NULL); 28 zfcp_sysfs_##_feat##_##_name##_show, NULL);
29 29
30#define ZFCP_DEFINE_ATTR_CONST(_feat, _name, _format, _value) \
31static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
32 struct device_attribute *at,\
33 char *buf) \
34{ \
35 return sprintf(buf, _format, _value); \
36} \
37static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
38 zfcp_sysfs_##_feat##_##_name##_show, NULL);
39
30#define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \ 40#define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \
31static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \ 41static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
32 struct device_attribute *at,\ 42 struct device_attribute *at,\
@@ -75,6 +85,8 @@ ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
75ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n", 85ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
76 (zfcp_unit_sdev_status(unit) & 86 (zfcp_unit_sdev_status(unit) &
77 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); 87 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
88ZFCP_DEFINE_ATTR_CONST(unit, access_shared, "%d\n", 0);
89ZFCP_DEFINE_ATTR_CONST(unit, access_readonly, "%d\n", 0);
78 90
79static ssize_t zfcp_sysfs_port_failed_show(struct device *dev, 91static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
80 struct device_attribute *attr, 92 struct device_attribute *attr,
@@ -347,6 +359,8 @@ static struct attribute *zfcp_unit_attrs[] = {
347 &dev_attr_unit_in_recovery.attr, 359 &dev_attr_unit_in_recovery.attr,
348 &dev_attr_unit_status.attr, 360 &dev_attr_unit_status.attr,
349 &dev_attr_unit_access_denied.attr, 361 &dev_attr_unit_access_denied.attr,
362 &dev_attr_unit_access_shared.attr,
363 &dev_attr_unit_access_readonly.attr,
350 NULL 364 NULL
351}; 365};
352static struct attribute_group zfcp_unit_attr_group = { 366static struct attribute_group zfcp_unit_attr_group = {
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 48b2918e0d65..92ff027746f2 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1353,7 +1353,6 @@ config SCSI_LPFC
1353 tristate "Emulex LightPulse Fibre Channel Support" 1353 tristate "Emulex LightPulse Fibre Channel Support"
1354 depends on PCI && SCSI 1354 depends on PCI && SCSI
1355 select SCSI_FC_ATTRS 1355 select SCSI_FC_ATTRS
1356 select GENERIC_CSUM
1357 select CRC_T10DIF 1356 select CRC_T10DIF
1358 help 1357 help
1359 This lpfc driver supports the Emulex LightPulse 1358 This lpfc driver supports the Emulex LightPulse
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index e25eba5713c1..b3b5125faa72 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -482,7 +482,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
482 ret = comedi_device_postconfig(dev); 482 ret = comedi_device_postconfig(dev);
483 if (ret < 0) { 483 if (ret < 0) {
484 comedi_device_detach(dev); 484 comedi_device_detach(dev);
485 module_put(dev->driver->module); 485 module_put(driv->module);
486 } 486 }
487 /* On success, the driver module count has been incremented. */ 487 /* On success, the driver module count has been incremented. */
488 return ret; 488 return ret;
diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
index 3396eb9d57a3..ac2767100df5 100644
--- a/drivers/tty/hvc/hvsi_lib.c
+++ b/drivers/tty/hvc/hvsi_lib.c
@@ -341,8 +341,8 @@ void hvsilib_establish(struct hvsi_priv *pv)
341 341
342 pr_devel("HVSI@%x: ... waiting handshake\n", pv->termno); 342 pr_devel("HVSI@%x: ... waiting handshake\n", pv->termno);
343 343
344 /* Try for up to 200s */ 344 /* Try for up to 400ms */
345 for (timeout = 0; timeout < 20; timeout++) { 345 for (timeout = 0; timeout < 40; timeout++) {
346 if (pv->established) 346 if (pv->established)
347 goto established; 347 goto established;
348 if (!hvsi_get_packet(pv)) 348 if (!hvsi_get_packet(pv))
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 08613e241894..279b04910f00 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -304,6 +304,13 @@ static int __init ohci_pci_init(void)
304 pr_info("%s: " DRIVER_DESC "\n", hcd_name); 304 pr_info("%s: " DRIVER_DESC "\n", hcd_name);
305 305
306 ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides); 306 ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides);
307
308#ifdef CONFIG_PM
309 /* Entries for the PCI suspend/resume callbacks are special */
310 ohci_pci_hc_driver.pci_suspend = ohci_suspend;
311 ohci_pci_hc_driver.pci_resume = ohci_resume;
312#endif
313
307 return pci_register_driver(&ohci_pci_driver); 314 return pci_register_driver(&ohci_pci_driver);
308} 315}
309module_init(ohci_pci_init); 316module_init(ohci_pci_init);
diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h
index ca266280895d..e1859b8ef567 100644
--- a/drivers/usb/phy/phy-fsl-usb.h
+++ b/drivers/usb/phy/phy-fsl-usb.h
@@ -15,7 +15,7 @@
15 * 675 Mass Ave, Cambridge, MA 02139, USA. 15 * 675 Mass Ave, Cambridge, MA 02139, USA.
16 */ 16 */
17 17
18#include "otg_fsm.h" 18#include "phy-fsm-usb.h"
19#include <linux/usb/otg.h> 19#include <linux/usb/otg.h>
20#include <linux/ioctl.h> 20#include <linux/ioctl.h>
21 21
diff --git a/drivers/usb/phy/phy-fsm-usb.c b/drivers/usb/phy/phy-fsm-usb.c
index c520b3548e7c..7f4596606e18 100644
--- a/drivers/usb/phy/phy-fsm-usb.c
+++ b/drivers/usb/phy/phy-fsm-usb.c
@@ -29,7 +29,7 @@
29#include <linux/usb/gadget.h> 29#include <linux/usb/gadget.h>
30#include <linux/usb/otg.h> 30#include <linux/usb/otg.h>
31 31
32#include "phy-otg-fsm.h" 32#include "phy-fsm-usb.h"
33 33
34/* Change USB protocol when there is a protocol change */ 34/* Change USB protocol when there is a protocol change */
35static int otg_set_protocol(struct otg_fsm *fsm, int protocol) 35static int otg_set_protocol(struct otg_fsm *fsm, int protocol)
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 5e376bb93419..8defc6b3f9a2 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -40,7 +40,7 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
40 int block, off; 40 int block, off;
41 41
42 inode = iget_locked(sb, ino); 42 inode = iget_locked(sb, ino);
43 if (IS_ERR(inode)) 43 if (!inode)
44 return ERR_PTR(-ENOMEM); 44 return ERR_PTR(-ENOMEM);
45 if (!(inode->i_state & I_NEW)) 45 if (!(inode->i_state & I_NEW))
46 return inode; 46 return inode;
diff --git a/fs/bio.c b/fs/bio.c
index 94bbc04dba77..c5eae7251490 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1045,12 +1045,22 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
1045int bio_uncopy_user(struct bio *bio) 1045int bio_uncopy_user(struct bio *bio)
1046{ 1046{
1047 struct bio_map_data *bmd = bio->bi_private; 1047 struct bio_map_data *bmd = bio->bi_private;
1048 int ret = 0; 1048 struct bio_vec *bvec;
1049 int ret = 0, i;
1049 1050
1050 if (!bio_flagged(bio, BIO_NULL_MAPPED)) 1051 if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1051 ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, 1052 /*
1052 bmd->nr_sgvecs, bio_data_dir(bio) == READ, 1053 * if we're in a workqueue, the request is orphaned, so
1053 0, bmd->is_our_pages); 1054 * don't copy into a random user address space, just free.
1055 */
1056 if (current->mm)
1057 ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
1058 bmd->nr_sgvecs, bio_data_dir(bio) == READ,
1059 0, bmd->is_our_pages);
1060 else if (bmd->is_our_pages)
1061 bio_for_each_segment_all(bvec, bio, i)
1062 __free_page(bvec->bv_page);
1063 }
1054 bio_free_map_data(bmd); 1064 bio_free_map_data(bmd);
1055 bio_put(bio); 1065 bio_put(bio);
1056 return ret; 1066 return ret;
diff --git a/fs/dcache.c b/fs/dcache.c
index 87bdb5329c3c..b949af850cd6 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -229,7 +229,7 @@ static void __d_free(struct rcu_head *head)
229 */ 229 */
230static void d_free(struct dentry *dentry) 230static void d_free(struct dentry *dentry)
231{ 231{
232 BUG_ON(dentry->d_count); 232 BUG_ON(dentry->d_lockref.count);
233 this_cpu_dec(nr_dentry); 233 this_cpu_dec(nr_dentry);
234 if (dentry->d_op && dentry->d_op->d_release) 234 if (dentry->d_op && dentry->d_op->d_release)
235 dentry->d_op->d_release(dentry); 235 dentry->d_op->d_release(dentry);
@@ -467,7 +467,7 @@ relock:
467 } 467 }
468 468
469 if (ref) 469 if (ref)
470 dentry->d_count--; 470 dentry->d_lockref.count--;
471 /* 471 /*
472 * inform the fs via d_prune that this dentry is about to be 472 * inform the fs via d_prune that this dentry is about to be
473 * unhashed and destroyed. 473 * unhashed and destroyed.
@@ -513,15 +513,10 @@ void dput(struct dentry *dentry)
513 return; 513 return;
514 514
515repeat: 515repeat:
516 if (dentry->d_count == 1) 516 if (dentry->d_lockref.count == 1)
517 might_sleep(); 517 might_sleep();
518 spin_lock(&dentry->d_lock); 518 if (lockref_put_or_lock(&dentry->d_lockref))
519 BUG_ON(!dentry->d_count);
520 if (dentry->d_count > 1) {
521 dentry->d_count--;
522 spin_unlock(&dentry->d_lock);
523 return; 519 return;
524 }
525 520
526 if (dentry->d_flags & DCACHE_OP_DELETE) { 521 if (dentry->d_flags & DCACHE_OP_DELETE) {
527 if (dentry->d_op->d_delete(dentry)) 522 if (dentry->d_op->d_delete(dentry))
@@ -535,7 +530,7 @@ repeat:
535 dentry->d_flags |= DCACHE_REFERENCED; 530 dentry->d_flags |= DCACHE_REFERENCED;
536 dentry_lru_add(dentry); 531 dentry_lru_add(dentry);
537 532
538 dentry->d_count--; 533 dentry->d_lockref.count--;
539 spin_unlock(&dentry->d_lock); 534 spin_unlock(&dentry->d_lock);
540 return; 535 return;
541 536
@@ -590,7 +585,7 @@ int d_invalidate(struct dentry * dentry)
590 * We also need to leave mountpoints alone, 585 * We also need to leave mountpoints alone,
591 * directory or not. 586 * directory or not.
592 */ 587 */
593 if (dentry->d_count > 1 && dentry->d_inode) { 588 if (dentry->d_lockref.count > 1 && dentry->d_inode) {
594 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) { 589 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
595 spin_unlock(&dentry->d_lock); 590 spin_unlock(&dentry->d_lock);
596 return -EBUSY; 591 return -EBUSY;
@@ -606,14 +601,12 @@ EXPORT_SYMBOL(d_invalidate);
606/* This must be called with d_lock held */ 601/* This must be called with d_lock held */
607static inline void __dget_dlock(struct dentry *dentry) 602static inline void __dget_dlock(struct dentry *dentry)
608{ 603{
609 dentry->d_count++; 604 dentry->d_lockref.count++;
610} 605}
611 606
612static inline void __dget(struct dentry *dentry) 607static inline void __dget(struct dentry *dentry)
613{ 608{
614 spin_lock(&dentry->d_lock); 609 lockref_get(&dentry->d_lockref);
615 __dget_dlock(dentry);
616 spin_unlock(&dentry->d_lock);
617} 610}
618 611
619struct dentry *dget_parent(struct dentry *dentry) 612struct dentry *dget_parent(struct dentry *dentry)
@@ -634,8 +627,8 @@ repeat:
634 goto repeat; 627 goto repeat;
635 } 628 }
636 rcu_read_unlock(); 629 rcu_read_unlock();
637 BUG_ON(!ret->d_count); 630 BUG_ON(!ret->d_lockref.count);
638 ret->d_count++; 631 ret->d_lockref.count++;
639 spin_unlock(&ret->d_lock); 632 spin_unlock(&ret->d_lock);
640 return ret; 633 return ret;
641} 634}
@@ -718,7 +711,7 @@ restart:
718 spin_lock(&inode->i_lock); 711 spin_lock(&inode->i_lock);
719 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) { 712 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
720 spin_lock(&dentry->d_lock); 713 spin_lock(&dentry->d_lock);
721 if (!dentry->d_count) { 714 if (!dentry->d_lockref.count) {
722 __dget_dlock(dentry); 715 __dget_dlock(dentry);
723 __d_drop(dentry); 716 __d_drop(dentry);
724 spin_unlock(&dentry->d_lock); 717 spin_unlock(&dentry->d_lock);
@@ -763,12 +756,8 @@ static void try_prune_one_dentry(struct dentry *dentry)
763 /* Prune ancestors. */ 756 /* Prune ancestors. */
764 dentry = parent; 757 dentry = parent;
765 while (dentry) { 758 while (dentry) {
766 spin_lock(&dentry->d_lock); 759 if (lockref_put_or_lock(&dentry->d_lockref))
767 if (dentry->d_count > 1) {
768 dentry->d_count--;
769 spin_unlock(&dentry->d_lock);
770 return; 760 return;
771 }
772 dentry = dentry_kill(dentry, 1); 761 dentry = dentry_kill(dentry, 1);
773 } 762 }
774} 763}
@@ -793,7 +782,7 @@ static void shrink_dentry_list(struct list_head *list)
793 * the LRU because of laziness during lookup. Do not free 782 * the LRU because of laziness during lookup. Do not free
794 * it - just keep it off the LRU list. 783 * it - just keep it off the LRU list.
795 */ 784 */
796 if (dentry->d_count) { 785 if (dentry->d_lockref.count) {
797 dentry_lru_del(dentry); 786 dentry_lru_del(dentry);
798 spin_unlock(&dentry->d_lock); 787 spin_unlock(&dentry->d_lock);
799 continue; 788 continue;
@@ -913,7 +902,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
913 dentry_lru_del(dentry); 902 dentry_lru_del(dentry);
914 __d_shrink(dentry); 903 __d_shrink(dentry);
915 904
916 if (dentry->d_count != 0) { 905 if (dentry->d_lockref.count != 0) {
917 printk(KERN_ERR 906 printk(KERN_ERR
918 "BUG: Dentry %p{i=%lx,n=%s}" 907 "BUG: Dentry %p{i=%lx,n=%s}"
919 " still in use (%d)" 908 " still in use (%d)"
@@ -922,7 +911,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
922 dentry->d_inode ? 911 dentry->d_inode ?
923 dentry->d_inode->i_ino : 0UL, 912 dentry->d_inode->i_ino : 0UL,
924 dentry->d_name.name, 913 dentry->d_name.name,
925 dentry->d_count, 914 dentry->d_lockref.count,
926 dentry->d_sb->s_type->name, 915 dentry->d_sb->s_type->name,
927 dentry->d_sb->s_id); 916 dentry->d_sb->s_id);
928 BUG(); 917 BUG();
@@ -933,7 +922,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
933 list_del(&dentry->d_u.d_child); 922 list_del(&dentry->d_u.d_child);
934 } else { 923 } else {
935 parent = dentry->d_parent; 924 parent = dentry->d_parent;
936 parent->d_count--; 925 parent->d_lockref.count--;
937 list_del(&dentry->d_u.d_child); 926 list_del(&dentry->d_u.d_child);
938 } 927 }
939 928
@@ -981,7 +970,7 @@ void shrink_dcache_for_umount(struct super_block *sb)
981 970
982 dentry = sb->s_root; 971 dentry = sb->s_root;
983 sb->s_root = NULL; 972 sb->s_root = NULL;
984 dentry->d_count--; 973 dentry->d_lockref.count--;
985 shrink_dcache_for_umount_subtree(dentry); 974 shrink_dcache_for_umount_subtree(dentry);
986 975
987 while (!hlist_bl_empty(&sb->s_anon)) { 976 while (!hlist_bl_empty(&sb->s_anon)) {
@@ -1147,7 +1136,7 @@ resume:
1147 * loop in shrink_dcache_parent() might not make any progress 1136 * loop in shrink_dcache_parent() might not make any progress
1148 * and loop forever. 1137 * and loop forever.
1149 */ 1138 */
1150 if (dentry->d_count) { 1139 if (dentry->d_lockref.count) {
1151 dentry_lru_del(dentry); 1140 dentry_lru_del(dentry);
1152 } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { 1141 } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
1153 dentry_lru_move_list(dentry, dispose); 1142 dentry_lru_move_list(dentry, dispose);
@@ -1269,7 +1258,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1269 smp_wmb(); 1258 smp_wmb();
1270 dentry->d_name.name = dname; 1259 dentry->d_name.name = dname;
1271 1260
1272 dentry->d_count = 1; 1261 dentry->d_lockref.count = 1;
1273 dentry->d_flags = 0; 1262 dentry->d_flags = 0;
1274 spin_lock_init(&dentry->d_lock); 1263 spin_lock_init(&dentry->d_lock);
1275 seqcount_init(&dentry->d_seq); 1264 seqcount_init(&dentry->d_seq);
@@ -1970,7 +1959,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
1970 goto next; 1959 goto next;
1971 } 1960 }
1972 1961
1973 dentry->d_count++; 1962 dentry->d_lockref.count++;
1974 found = dentry; 1963 found = dentry;
1975 spin_unlock(&dentry->d_lock); 1964 spin_unlock(&dentry->d_lock);
1976 break; 1965 break;
@@ -2069,7 +2058,7 @@ again:
2069 spin_lock(&dentry->d_lock); 2058 spin_lock(&dentry->d_lock);
2070 inode = dentry->d_inode; 2059 inode = dentry->d_inode;
2071 isdir = S_ISDIR(inode->i_mode); 2060 isdir = S_ISDIR(inode->i_mode);
2072 if (dentry->d_count == 1) { 2061 if (dentry->d_lockref.count == 1) {
2073 if (!spin_trylock(&inode->i_lock)) { 2062 if (!spin_trylock(&inode->i_lock)) {
2074 spin_unlock(&dentry->d_lock); 2063 spin_unlock(&dentry->d_lock);
2075 cpu_relax(); 2064 cpu_relax();
@@ -2724,6 +2713,17 @@ char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
2724 return memcpy(buffer, temp, sz); 2713 return memcpy(buffer, temp, sz);
2725} 2714}
2726 2715
2716char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
2717{
2718 char *end = buffer + buflen;
2719 /* these dentries are never renamed, so d_lock is not needed */
2720 if (prepend(&end, &buflen, " (deleted)", 11) ||
2721 prepend_name(&end, &buflen, &dentry->d_name) ||
2722 prepend(&end, &buflen, "/", 1))
2723 end = ERR_PTR(-ENAMETOOLONG);
2724 return end;
2725}
2726
2727/* 2727/*
2728 * Write full pathname from the root of the filesystem into the buffer. 2728 * Write full pathname from the root of the filesystem into the buffer.
2729 */ 2729 */
@@ -2937,7 +2937,7 @@ resume:
2937 } 2937 }
2938 if (!(dentry->d_flags & DCACHE_GENOCIDE)) { 2938 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
2939 dentry->d_flags |= DCACHE_GENOCIDE; 2939 dentry->d_flags |= DCACHE_GENOCIDE;
2940 dentry->d_count--; 2940 dentry->d_lockref.count--;
2941 } 2941 }
2942 spin_unlock(&dentry->d_lock); 2942 spin_unlock(&dentry->d_lock);
2943 } 2943 }
@@ -2945,7 +2945,7 @@ resume:
2945 struct dentry *child = this_parent; 2945 struct dentry *child = this_parent;
2946 if (!(this_parent->d_flags & DCACHE_GENOCIDE)) { 2946 if (!(this_parent->d_flags & DCACHE_GENOCIDE)) {
2947 this_parent->d_flags |= DCACHE_GENOCIDE; 2947 this_parent->d_flags |= DCACHE_GENOCIDE;
2948 this_parent->d_count--; 2948 this_parent->d_lockref.count--;
2949 } 2949 }
2950 this_parent = try_to_ascend(this_parent, locked, seq); 2950 this_parent = try_to_ascend(this_parent, locked, seq);
2951 if (!this_parent) 2951 if (!this_parent)
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
index f3913eb2c474..d15ccf20f1b3 100644
--- a/fs/efs/inode.c
+++ b/fs/efs/inode.c
@@ -57,7 +57,7 @@ struct inode *efs_iget(struct super_block *super, unsigned long ino)
57 struct inode *inode; 57 struct inode *inode;
58 58
59 inode = iget_locked(super, ino); 59 inode = iget_locked(super, ino);
60 if (IS_ERR(inode)) 60 if (!inode)
61 return ERR_PTR(-ENOMEM); 61 return ERR_PTR(-ENOMEM);
62 if (!(inode->i_state & I_NEW)) 62 if (!(inode->i_state & I_NEW))
63 return inode; 63 return inode;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 34423978b170..d19b30ababf1 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -926,14 +926,8 @@ static int get_hstate_idx(int page_size_log)
926 return h - hstates; 926 return h - hstates;
927} 927}
928 928
929static char *hugetlb_dname(struct dentry *dentry, char *buffer, int buflen)
930{
931 return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
932 dentry->d_name.name);
933}
934
935static struct dentry_operations anon_ops = { 929static struct dentry_operations anon_ops = {
936 .d_dname = hugetlb_dname 930 .d_dname = simple_dname
937}; 931};
938 932
939/* 933/*
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 8743ba9c6742..984c2bbf4f61 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -3047,6 +3047,14 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
3047 3047
3048 dir_index = (u32) ctx->pos; 3048 dir_index = (u32) ctx->pos;
3049 3049
3050 /*
3051 * NFSv4 reserves cookies 1 and 2 for . and .. so the value
3052 * we return to the vfs is one greater than the one we use
3053 * internally.
3054 */
3055 if (dir_index)
3056 dir_index--;
3057
3050 if (dir_index > 1) { 3058 if (dir_index > 1) {
3051 struct dir_table_slot dirtab_slot; 3059 struct dir_table_slot dirtab_slot;
3052 3060
@@ -3086,7 +3094,7 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
3086 if (p->header.flag & BT_INTERNAL) { 3094 if (p->header.flag & BT_INTERNAL) {
3087 jfs_err("jfs_readdir: bad index table"); 3095 jfs_err("jfs_readdir: bad index table");
3088 DT_PUTPAGE(mp); 3096 DT_PUTPAGE(mp);
3089 ctx->pos = -1; 3097 ctx->pos = DIREND;
3090 return 0; 3098 return 0;
3091 } 3099 }
3092 } else { 3100 } else {
@@ -3094,14 +3102,14 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
3094 /* 3102 /*
3095 * self "." 3103 * self "."
3096 */ 3104 */
3097 ctx->pos = 0; 3105 ctx->pos = 1;
3098 if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR)) 3106 if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR))
3099 return 0; 3107 return 0;
3100 } 3108 }
3101 /* 3109 /*
3102 * parent ".." 3110 * parent ".."
3103 */ 3111 */
3104 ctx->pos = 1; 3112 ctx->pos = 2;
3105 if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR)) 3113 if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR))
3106 return 0; 3114 return 0;
3107 3115
@@ -3122,22 +3130,23 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
3122 /* 3130 /*
3123 * Legacy filesystem - OS/2 & Linux JFS < 0.3.6 3131 * Legacy filesystem - OS/2 & Linux JFS < 0.3.6
3124 * 3132 *
3125 * pn = index = 0: First entry "." 3133 * pn = 0; index = 1: First entry "."
3126 * pn = 0; index = 1: Second entry ".." 3134 * pn = 0; index = 2: Second entry ".."
3127 * pn > 0: Real entries, pn=1 -> leftmost page 3135 * pn > 0: Real entries, pn=1 -> leftmost page
3128 * pn = index = -1: No more entries 3136 * pn = index = -1: No more entries
3129 */ 3137 */
3130 dtpos = ctx->pos; 3138 dtpos = ctx->pos;
3131 if (dtpos == 0) { 3139 if (dtpos < 2) {
3132 /* build "." entry */ 3140 /* build "." entry */
3141 ctx->pos = 1;
3133 if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR)) 3142 if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR))
3134 return 0; 3143 return 0;
3135 dtoffset->index = 1; 3144 dtoffset->index = 2;
3136 ctx->pos = dtpos; 3145 ctx->pos = dtpos;
3137 } 3146 }
3138 3147
3139 if (dtoffset->pn == 0) { 3148 if (dtoffset->pn == 0) {
3140 if (dtoffset->index == 1) { 3149 if (dtoffset->index == 2) {
3141 /* build ".." entry */ 3150 /* build ".." entry */
3142 if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR)) 3151 if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR))
3143 return 0; 3152 return 0;
@@ -3228,6 +3237,12 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
3228 } 3237 }
3229 jfs_dirent->position = unique_pos++; 3238 jfs_dirent->position = unique_pos++;
3230 } 3239 }
3240 /*
3241 * We add 1 to the index because we may
3242 * use a value of 2 internally, and NFSv4
3243 * doesn't like that.
3244 */
3245 jfs_dirent->position++;
3231 } else { 3246 } else {
3232 jfs_dirent->position = dtpos; 3247 jfs_dirent->position = dtpos;
3233 len = min(d_namleft, DTLHDRDATALEN_LEGACY); 3248 len = min(d_namleft, DTLHDRDATALEN_LEGACY);
diff --git a/fs/namei.c b/fs/namei.c
index 89a612e392eb..7720fbd5277b 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -536,8 +536,8 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
536 * a reference at this point. 536 * a reference at this point.
537 */ 537 */
538 BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent); 538 BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent);
539 BUG_ON(!parent->d_count); 539 BUG_ON(!parent->d_lockref.count);
540 parent->d_count++; 540 parent->d_lockref.count++;
541 spin_unlock(&dentry->d_lock); 541 spin_unlock(&dentry->d_lock);
542 } 542 }
543 spin_unlock(&parent->d_lock); 543 spin_unlock(&parent->d_lock);
@@ -3327,7 +3327,7 @@ void dentry_unhash(struct dentry *dentry)
3327{ 3327{
3328 shrink_dcache_parent(dentry); 3328 shrink_dcache_parent(dentry);
3329 spin_lock(&dentry->d_lock); 3329 spin_lock(&dentry->d_lock);
3330 if (dentry->d_count == 1) 3330 if (dentry->d_lockref.count == 1)
3331 __d_drop(dentry); 3331 __d_drop(dentry);
3332 spin_unlock(&dentry->d_lock); 3332 spin_unlock(&dentry->d_lock);
3333} 3333}
@@ -3671,11 +3671,15 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
3671 if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) 3671 if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0)
3672 return -EINVAL; 3672 return -EINVAL;
3673 /* 3673 /*
3674 * Using empty names is equivalent to using AT_SYMLINK_FOLLOW 3674 * To use null names we require CAP_DAC_READ_SEARCH
3675 * on /proc/self/fd/<fd>. 3675 * This ensures that not everyone will be able to create
3676 * handlink using the passed filedescriptor.
3676 */ 3677 */
3677 if (flags & AT_EMPTY_PATH) 3678 if (flags & AT_EMPTY_PATH) {
3679 if (!capable(CAP_DAC_READ_SEARCH))
3680 return -ENOENT;
3678 how = LOOKUP_EMPTY; 3681 how = LOOKUP_EMPTY;
3682 }
3679 3683
3680 if (flags & AT_SYMLINK_FOLLOW) 3684 if (flags & AT_SYMLINK_FOLLOW)
3681 how |= LOOKUP_FOLLOW; 3685 how |= LOOKUP_FOLLOW;
diff --git a/fs/namespace.c b/fs/namespace.c
index 7b1ca9ba0b0a..a45ba4f267fe 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1429,7 +1429,7 @@ struct vfsmount *collect_mounts(struct path *path)
1429 CL_COPY_ALL | CL_PRIVATE); 1429 CL_COPY_ALL | CL_PRIVATE);
1430 namespace_unlock(); 1430 namespace_unlock();
1431 if (IS_ERR(tree)) 1431 if (IS_ERR(tree))
1432 return NULL; 1432 return ERR_CAST(tree);
1433 return &tree->mnt; 1433 return &tree->mnt;
1434} 1434}
1435 1435
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 854d80955bf8..121da2dc3be8 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1022,7 +1022,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
1022 struct inode *inode = NULL; 1022 struct inode *inode = NULL;
1023 struct ocfs2_super *osb = NULL; 1023 struct ocfs2_super *osb = NULL;
1024 struct buffer_head *bh = NULL; 1024 struct buffer_head *bh = NULL;
1025 char nodestr[8]; 1025 char nodestr[12];
1026 struct ocfs2_blockcheck_stats stats; 1026 struct ocfs2_blockcheck_stats stats;
1027 1027
1028 trace_ocfs2_fill_super(sb, data, silent); 1028 trace_ocfs2_fill_super(sb, data, silent);
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 75f2890abbd8..0ff80f9b930f 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -230,8 +230,6 @@ static int proc_readfd_common(struct file *file, struct dir_context *ctx,
230 230
231 if (!dir_emit_dots(file, ctx)) 231 if (!dir_emit_dots(file, ctx))
232 goto out; 232 goto out;
233 if (!dir_emit_dots(file, ctx))
234 goto out;
235 files = get_files_struct(p); 233 files = get_files_struct(p);
236 if (!files) 234 if (!files)
237 goto out; 235 goto out;
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index b90337c9d468..efdc94434c30 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -9,6 +9,7 @@
9#include <linux/seqlock.h> 9#include <linux/seqlock.h>
10#include <linux/cache.h> 10#include <linux/cache.h>
11#include <linux/rcupdate.h> 11#include <linux/rcupdate.h>
12#include <linux/lockref.h>
12 13
13struct nameidata; 14struct nameidata;
14struct path; 15struct path;
@@ -100,6 +101,8 @@ extern unsigned int full_name_hash(const unsigned char *, unsigned int);
100# endif 101# endif
101#endif 102#endif
102 103
104#define d_lock d_lockref.lock
105
103struct dentry { 106struct dentry {
104 /* RCU lookup touched fields */ 107 /* RCU lookup touched fields */
105 unsigned int d_flags; /* protected by d_lock */ 108 unsigned int d_flags; /* protected by d_lock */
@@ -112,8 +115,7 @@ struct dentry {
112 unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */ 115 unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */
113 116
114 /* Ref lookup also touches following */ 117 /* Ref lookup also touches following */
115 unsigned int d_count; /* protected by d_lock */ 118 struct lockref d_lockref; /* per-dentry lock and refcount */
116 spinlock_t d_lock; /* per dentry lock */
117 const struct dentry_operations *d_op; 119 const struct dentry_operations *d_op;
118 struct super_block *d_sb; /* The root of the dentry tree */ 120 struct super_block *d_sb; /* The root of the dentry tree */
119 unsigned long d_time; /* used by d_revalidate */ 121 unsigned long d_time; /* used by d_revalidate */
@@ -318,7 +320,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
318 assert_spin_locked(&dentry->d_lock); 320 assert_spin_locked(&dentry->d_lock);
319 if (!read_seqcount_retry(&dentry->d_seq, seq)) { 321 if (!read_seqcount_retry(&dentry->d_seq, seq)) {
320 ret = 1; 322 ret = 1;
321 dentry->d_count++; 323 dentry->d_lockref.count++;
322 } 324 }
323 325
324 return ret; 326 return ret;
@@ -326,7 +328,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
326 328
327static inline unsigned d_count(const struct dentry *dentry) 329static inline unsigned d_count(const struct dentry *dentry)
328{ 330{
329 return dentry->d_count; 331 return dentry->d_lockref.count;
330} 332}
331 333
332/* validate "insecure" dentry pointer */ 334/* validate "insecure" dentry pointer */
@@ -336,6 +338,7 @@ extern int d_validate(struct dentry *, struct dentry *);
336 * helper function for dentry_operations.d_dname() members 338 * helper function for dentry_operations.d_dname() members
337 */ 339 */
338extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); 340extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
341extern char *simple_dname(struct dentry *, char *, int);
339 342
340extern char *__d_path(const struct path *, const struct path *, char *, int); 343extern char *__d_path(const struct path *, const struct path *, char *, int);
341extern char *d_absolute_path(const struct path *, char *, int); 344extern char *d_absolute_path(const struct path *, char *, int);
@@ -356,17 +359,14 @@ extern char *dentry_path(struct dentry *, char *, int);
356static inline struct dentry *dget_dlock(struct dentry *dentry) 359static inline struct dentry *dget_dlock(struct dentry *dentry)
357{ 360{
358 if (dentry) 361 if (dentry)
359 dentry->d_count++; 362 dentry->d_lockref.count++;
360 return dentry; 363 return dentry;
361} 364}
362 365
363static inline struct dentry *dget(struct dentry *dentry) 366static inline struct dentry *dget(struct dentry *dentry)
364{ 367{
365 if (dentry) { 368 if (dentry)
366 spin_lock(&dentry->d_lock); 369 lockref_get(&dentry->d_lockref);
367 dget_dlock(dentry);
368 spin_unlock(&dentry->d_lock);
369 }
370 return dentry; 370 return dentry;
371} 371}
372 372
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
new file mode 100644
index 000000000000..01233e01627a
--- /dev/null
+++ b/include/linux/lockref.h
@@ -0,0 +1,71 @@
1#ifndef __LINUX_LOCKREF_H
2#define __LINUX_LOCKREF_H
3
4/*
5 * Locked reference counts.
6 *
7 * These are different from just plain atomic refcounts in that they
8 * are atomic with respect to the spinlock that goes with them. In
9 * particular, there can be implementations that don't actually get
10 * the spinlock for the common decrement/increment operations, but they
11 * still have to check that the operation is done semantically as if
12 * the spinlock had been taken (using a cmpxchg operation that covers
13 * both the lock and the count word, or using memory transactions, for
14 * example).
15 */
16
17#include <linux/spinlock.h>
18
19struct lockref {
20 spinlock_t lock;
21 unsigned int count;
22};
23
24/**
25 * lockref_get - Increments reference count unconditionally
26 * @lockcnt: pointer to lockref structure
27 *
28 * This operation is only valid if you already hold a reference
29 * to the object, so you know the count cannot be zero.
30 */
31static inline void lockref_get(struct lockref *lockref)
32{
33 spin_lock(&lockref->lock);
34 lockref->count++;
35 spin_unlock(&lockref->lock);
36}
37
38/**
39 * lockref_get_not_zero - Increments count unless the count is 0
40 * @lockcnt: pointer to lockref structure
41 * Return: 1 if count updated successfully or 0 if count is 0
42 */
43static inline int lockref_get_not_zero(struct lockref *lockref)
44{
45 int retval = 0;
46
47 spin_lock(&lockref->lock);
48 if (lockref->count) {
49 lockref->count++;
50 retval = 1;
51 }
52 spin_unlock(&lockref->lock);
53 return retval;
54}
55
56/**
57 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
58 * @lockcnt: pointer to lockref structure
59 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
60 */
61static inline int lockref_put_or_lock(struct lockref *lockref)
62{
63 spin_lock(&lockref->lock);
64 if (lockref->count <= 1)
65 return 0;
66 lockref->count--;
67 spin_unlock(&lockref->lock);
68 return 1;
69}
70
71#endif /* __LINUX_LOCKREF_H */
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index 10e5947491c7..b4ec59d159ac 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -14,6 +14,10 @@ struct fs_struct;
14 * A structure to contain pointers to all per-process 14 * A structure to contain pointers to all per-process
15 * namespaces - fs (mount), uts, network, sysvipc, etc. 15 * namespaces - fs (mount), uts, network, sysvipc, etc.
16 * 16 *
17 * The pid namespace is an exception -- it's accessed using
18 * task_active_pid_ns. The pid namespace here is the
19 * namespace that children will use.
20 *
17 * 'count' is the number of tasks holding a reference. 21 * 'count' is the number of tasks holding a reference.
18 * The count for each namespace, then, will be the number 22 * The count for each namespace, then, will be the number
19 * of nsproxies pointing to it, not the number of tasks. 23 * of nsproxies pointing to it, not the number of tasks.
@@ -27,7 +31,7 @@ struct nsproxy {
27 struct uts_namespace *uts_ns; 31 struct uts_namespace *uts_ns;
28 struct ipc_namespace *ipc_ns; 32 struct ipc_namespace *ipc_ns;
29 struct mnt_namespace *mnt_ns; 33 struct mnt_namespace *mnt_ns;
30 struct pid_namespace *pid_ns; 34 struct pid_namespace *pid_ns_for_children;
31 struct net *net_ns; 35 struct net *net_ns;
32}; 36};
33extern struct nsproxy init_nsproxy; 37extern struct nsproxy init_nsproxy;
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 580a5320cc96..6d91fcb4c5cb 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -16,6 +16,7 @@
16#include <linux/list.h> 16#include <linux/list.h>
17#include <linux/rbtree.h> 17#include <linux/rbtree.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/bug.h>
19 20
20struct module; 21struct module;
21struct device; 22struct device;
diff --git a/include/linux/wait.h b/include/linux/wait.h
index f487a4750b7f..a67fc1635592 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -811,6 +811,63 @@ do { \
811 __ret; \ 811 __ret; \
812}) 812})
813 813
814#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
815 lock, ret) \
816do { \
817 DEFINE_WAIT(__wait); \
818 \
819 for (;;) { \
820 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
821 if (condition) \
822 break; \
823 if (signal_pending(current)) { \
824 ret = -ERESTARTSYS; \
825 break; \
826 } \
827 spin_unlock_irq(&lock); \
828 ret = schedule_timeout(ret); \
829 spin_lock_irq(&lock); \
830 if (!ret) \
831 break; \
832 } \
833 finish_wait(&wq, &__wait); \
834} while (0)
835
836/**
837 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
838 * The condition is checked under the lock. This is expected
839 * to be called with the lock taken.
840 * @wq: the waitqueue to wait on
841 * @condition: a C expression for the event to wait for
842 * @lock: a locked spinlock_t, which will be released before schedule()
843 * and reacquired afterwards.
844 * @timeout: timeout, in jiffies
845 *
846 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
847 * @condition evaluates to true or signal is received. The @condition is
848 * checked each time the waitqueue @wq is woken up.
849 *
850 * wake_up() has to be called after changing any variable that could
851 * change the result of the wait condition.
852 *
853 * This is supposed to be called while holding the lock. The lock is
854 * dropped before going to sleep and is reacquired afterwards.
855 *
856 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
857 * was interrupted by a signal, and the remaining jiffies otherwise
858 * if the condition evaluated to true before the timeout elapsed.
859 */
860#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
861 timeout) \
862({ \
863 int __ret = timeout; \
864 \
865 if (!(condition)) \
866 __wait_event_interruptible_lock_irq_timeout( \
867 wq, condition, lock, __ret); \
868 __ret; \
869})
870
814 871
815/* 872/*
816 * These are the old interfaces to sleep waiting for an event. 873 * These are the old interfaces to sleep waiting for an event.
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 8a358a2c97e6..829627d7b846 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -123,6 +123,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
123 /* local bh are disabled so it is ok to use _BH */ 123 /* local bh are disabled so it is ok to use _BH */
124 NET_ADD_STATS_BH(sock_net(sk), 124 NET_ADD_STATS_BH(sock_net(sk),
125 LINUX_MIB_BUSYPOLLRXPACKETS, rc); 125 LINUX_MIB_BUSYPOLLRXPACKETS, rc);
126 cpu_relax();
126 127
127 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && 128 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
128 !need_resched() && !busy_loop_timeout(end_time)); 129 !need_resched() && !busy_loop_timeout(end_time));
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 93024a47e0e2..8e0b6c856a13 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -61,6 +61,7 @@ struct genl_family {
61 struct list_head ops_list; /* private */ 61 struct list_head ops_list; /* private */
62 struct list_head family_list; /* private */ 62 struct list_head family_list; /* private */
63 struct list_head mcast_groups; /* private */ 63 struct list_head mcast_groups; /* private */
64 struct module *module;
64}; 65};
65 66
66/** 67/**
@@ -121,9 +122,24 @@ struct genl_ops {
121 struct list_head ops_list; 122 struct list_head ops_list;
122}; 123};
123 124
124extern int genl_register_family(struct genl_family *family); 125extern int __genl_register_family(struct genl_family *family);
125extern int genl_register_family_with_ops(struct genl_family *family, 126
127static inline int genl_register_family(struct genl_family *family)
128{
129 family->module = THIS_MODULE;
130 return __genl_register_family(family);
131}
132
133extern int __genl_register_family_with_ops(struct genl_family *family,
126 struct genl_ops *ops, size_t n_ops); 134 struct genl_ops *ops, size_t n_ops);
135
136static inline int genl_register_family_with_ops(struct genl_family *family,
137 struct genl_ops *ops, size_t n_ops)
138{
139 family->module = THIS_MODULE;
140 return __genl_register_family_with_ops(family, ops, n_ops);
141}
142
127extern int genl_unregister_family(struct genl_family *family); 143extern int genl_unregister_family(struct genl_family *family);
128extern int genl_register_ops(struct genl_family *, struct genl_ops *ops); 144extern int genl_register_ops(struct genl_family *, struct genl_ops *ops);
129extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops); 145extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);
diff --git a/include/net/route.h b/include/net/route.h
index 2ea40c1b5e00..afdeeb5bec25 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -317,4 +317,12 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
317 return hoplimit; 317 return hoplimit;
318} 318}
319 319
320static inline int ip_skb_dst_mtu(struct sk_buff *skb)
321{
322 struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
323
324 return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
325 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
326}
327
320#endif /* _ROUTE_H */ 328#endif /* _ROUTE_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 89d3d8ae204e..e253bf0cc7ef 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -341,10 +341,13 @@ struct xfrm_state_afinfo {
341 struct sk_buff *skb); 341 struct sk_buff *skb);
342 int (*transport_finish)(struct sk_buff *skb, 342 int (*transport_finish)(struct sk_buff *skb,
343 int async); 343 int async);
344 void (*local_error)(struct sk_buff *skb, u32 mtu);
344}; 345};
345 346
346extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo); 347extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
347extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo); 348extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
349extern struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
350extern void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
348 351
349extern void xfrm_state_delete_tunnel(struct xfrm_state *x); 352extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
350 353
@@ -1477,6 +1480,7 @@ extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1477extern int xfrm_output_resume(struct sk_buff *skb, int err); 1480extern int xfrm_output_resume(struct sk_buff *skb, int err);
1478extern int xfrm_output(struct sk_buff *skb); 1481extern int xfrm_output(struct sk_buff *skb);
1479extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb); 1482extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1483extern void xfrm_local_error(struct sk_buff *skb, int mtu);
1480extern int xfrm4_extract_header(struct sk_buff *skb); 1484extern int xfrm4_extract_header(struct sk_buff *skb);
1481extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb); 1485extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1482extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, 1486extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
@@ -1497,6 +1501,7 @@ extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short fam
1497extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); 1501extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1498extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler); 1502extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler);
1499extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler); 1503extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler);
1504extern void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
1500extern int xfrm6_extract_header(struct sk_buff *skb); 1505extern int xfrm6_extract_header(struct sk_buff *skb);
1501extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb); 1506extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1502extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi); 1507extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
@@ -1514,6 +1519,7 @@ extern int xfrm6_output(struct sk_buff *skb);
1514extern int xfrm6_output_finish(struct sk_buff *skb); 1519extern int xfrm6_output_finish(struct sk_buff *skb);
1515extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, 1520extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
1516 u8 **prevhdr); 1521 u8 **prevhdr);
1522extern void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
1517 1523
1518#ifdef CONFIG_XFRM 1524#ifdef CONFIG_XFRM
1519extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); 1525extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
diff --git a/include/uapi/linux/cm4000_cs.h b/include/uapi/linux/cm4000_cs.h
index bc51f77db918..1217f751a1bc 100644
--- a/include/uapi/linux/cm4000_cs.h
+++ b/include/uapi/linux/cm4000_cs.h
@@ -2,6 +2,7 @@
2#define _UAPI_CM4000_H_ 2#define _UAPI_CM4000_H_
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/ioctl.h>
5 6
6#define MAX_ATR 33 7#define MAX_ATR 33
7 8
diff --git a/include/uapi/linux/icmpv6.h b/include/uapi/linux/icmpv6.h
index e0133c73c304..590beda78ea0 100644
--- a/include/uapi/linux/icmpv6.h
+++ b/include/uapi/linux/icmpv6.h
@@ -115,6 +115,8 @@ struct icmp6hdr {
115#define ICMPV6_NOT_NEIGHBOUR 2 115#define ICMPV6_NOT_NEIGHBOUR 2
116#define ICMPV6_ADDR_UNREACH 3 116#define ICMPV6_ADDR_UNREACH 3
117#define ICMPV6_PORT_UNREACH 4 117#define ICMPV6_PORT_UNREACH 4
118#define ICMPV6_POLICY_FAIL 5
119#define ICMPV6_REJECT_ROUTE 6
118 120
119/* 121/*
120 * Codes for Time Exceeded 122 * Codes for Time Exceeded
diff --git a/ipc/msg.c b/ipc/msg.c
index bd60d7e159e8..9f29d9e89bac 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -839,7 +839,7 @@ static inline void free_copy(struct msg_msg *copy)
839 839
840static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode) 840static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
841{ 841{
842 struct msg_msg *msg; 842 struct msg_msg *msg, *found = NULL;
843 long count = 0; 843 long count = 0;
844 844
845 list_for_each_entry(msg, &msq->q_messages, m_list) { 845 list_for_each_entry(msg, &msq->q_messages, m_list) {
@@ -848,6 +848,7 @@ static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
848 *msgtyp, mode)) { 848 *msgtyp, mode)) {
849 if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) { 849 if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) {
850 *msgtyp = msg->m_type - 1; 850 *msgtyp = msg->m_type - 1;
851 found = msg;
851 } else if (mode == SEARCH_NUMBER) { 852 } else if (mode == SEARCH_NUMBER) {
852 if (*msgtyp == count) 853 if (*msgtyp == count)
853 return msg; 854 return msg;
@@ -857,7 +858,7 @@ static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
857 } 858 }
858 } 859 }
859 860
860 return ERR_PTR(-EAGAIN); 861 return found ?: ERR_PTR(-EAGAIN);
861} 862}
862 863
863long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg, 864long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg,
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 781845a013ab..e91963302c0d 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4480,6 +4480,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
4480 struct dentry *d = cgrp->dentry; 4480 struct dentry *d = cgrp->dentry;
4481 struct cgroup_event *event, *tmp; 4481 struct cgroup_event *event, *tmp;
4482 struct cgroup_subsys *ss; 4482 struct cgroup_subsys *ss;
4483 struct cgroup *child;
4483 bool empty; 4484 bool empty;
4484 4485
4485 lockdep_assert_held(&d->d_inode->i_mutex); 4486 lockdep_assert_held(&d->d_inode->i_mutex);
@@ -4490,12 +4491,28 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
4490 * @cgrp from being removed while __put_css_set() is in progress. 4491 * @cgrp from being removed while __put_css_set() is in progress.
4491 */ 4492 */
4492 read_lock(&css_set_lock); 4493 read_lock(&css_set_lock);
4493 empty = list_empty(&cgrp->cset_links) && list_empty(&cgrp->children); 4494 empty = list_empty(&cgrp->cset_links);
4494 read_unlock(&css_set_lock); 4495 read_unlock(&css_set_lock);
4495 if (!empty) 4496 if (!empty)
4496 return -EBUSY; 4497 return -EBUSY;
4497 4498
4498 /* 4499 /*
4500 * Make sure there's no live children. We can't test ->children
4501 * emptiness as dead children linger on it while being destroyed;
4502 * otherwise, "rmdir parent/child parent" may fail with -EBUSY.
4503 */
4504 empty = true;
4505 rcu_read_lock();
4506 list_for_each_entry_rcu(child, &cgrp->children, sibling) {
4507 empty = cgroup_is_dead(child);
4508 if (!empty)
4509 break;
4510 }
4511 rcu_read_unlock();
4512 if (!empty)
4513 return -EBUSY;
4514
4515 /*
4499 * Block new css_tryget() by killing css refcnts. cgroup core 4516 * Block new css_tryget() by killing css refcnts. cgroup core
4500 * guarantees that, by the time ->css_offline() is invoked, no new 4517 * guarantees that, by the time ->css_offline() is invoked, no new
4501 * css reference will be given out via css_tryget(). We can't 4518 * css reference will be given out via css_tryget(). We can't
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 010a0083c0ae..ea1966db34f2 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -475,13 +475,17 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
475 475
476 /* 476 /*
477 * Cpusets with tasks - existing or newly being attached - can't 477 * Cpusets with tasks - existing or newly being attached - can't
478 * have empty cpus_allowed or mems_allowed. 478 * be changed to have empty cpus_allowed or mems_allowed.
479 */ 479 */
480 ret = -ENOSPC; 480 ret = -ENOSPC;
481 if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) && 481 if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress)) {
482 (cpumask_empty(trial->cpus_allowed) && 482 if (!cpumask_empty(cur->cpus_allowed) &&
483 nodes_empty(trial->mems_allowed))) 483 cpumask_empty(trial->cpus_allowed))
484 goto out; 484 goto out;
485 if (!nodes_empty(cur->mems_allowed) &&
486 nodes_empty(trial->mems_allowed))
487 goto out;
488 }
485 489
486 ret = 0; 490 ret = 0;
487out: 491out:
diff --git a/kernel/fork.c b/kernel/fork.c
index e23bb19e2a3e..bf46287c91a4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1177,7 +1177,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1177 * don't allow the creation of threads. 1177 * don't allow the creation of threads.
1178 */ 1178 */
1179 if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) && 1179 if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) &&
1180 (task_active_pid_ns(current) != current->nsproxy->pid_ns)) 1180 (task_active_pid_ns(current) !=
1181 current->nsproxy->pid_ns_for_children))
1181 return ERR_PTR(-EINVAL); 1182 return ERR_PTR(-EINVAL);
1182 1183
1183 retval = security_task_create(clone_flags); 1184 retval = security_task_create(clone_flags);
@@ -1351,7 +1352,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1351 1352
1352 if (pid != &init_struct_pid) { 1353 if (pid != &init_struct_pid) {
1353 retval = -ENOMEM; 1354 retval = -ENOMEM;
1354 pid = alloc_pid(p->nsproxy->pid_ns); 1355 pid = alloc_pid(p->nsproxy->pid_ns_for_children);
1355 if (!pid) 1356 if (!pid)
1356 goto bad_fork_cleanup_io; 1357 goto bad_fork_cleanup_io;
1357 } 1358 }
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index 364ceab15f0c..997cbb951a3b 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -29,15 +29,15 @@
29static struct kmem_cache *nsproxy_cachep; 29static struct kmem_cache *nsproxy_cachep;
30 30
31struct nsproxy init_nsproxy = { 31struct nsproxy init_nsproxy = {
32 .count = ATOMIC_INIT(1), 32 .count = ATOMIC_INIT(1),
33 .uts_ns = &init_uts_ns, 33 .uts_ns = &init_uts_ns,
34#if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC) 34#if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC)
35 .ipc_ns = &init_ipc_ns, 35 .ipc_ns = &init_ipc_ns,
36#endif 36#endif
37 .mnt_ns = NULL, 37 .mnt_ns = NULL,
38 .pid_ns = &init_pid_ns, 38 .pid_ns_for_children = &init_pid_ns,
39#ifdef CONFIG_NET 39#ifdef CONFIG_NET
40 .net_ns = &init_net, 40 .net_ns = &init_net,
41#endif 41#endif
42}; 42};
43 43
@@ -85,9 +85,10 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
85 goto out_ipc; 85 goto out_ipc;
86 } 86 }
87 87
88 new_nsp->pid_ns = copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns); 88 new_nsp->pid_ns_for_children =
89 if (IS_ERR(new_nsp->pid_ns)) { 89 copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns_for_children);
90 err = PTR_ERR(new_nsp->pid_ns); 90 if (IS_ERR(new_nsp->pid_ns_for_children)) {
91 err = PTR_ERR(new_nsp->pid_ns_for_children);
91 goto out_pid; 92 goto out_pid;
92 } 93 }
93 94
@@ -100,8 +101,8 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
100 return new_nsp; 101 return new_nsp;
101 102
102out_net: 103out_net:
103 if (new_nsp->pid_ns) 104 if (new_nsp->pid_ns_for_children)
104 put_pid_ns(new_nsp->pid_ns); 105 put_pid_ns(new_nsp->pid_ns_for_children);
105out_pid: 106out_pid:
106 if (new_nsp->ipc_ns) 107 if (new_nsp->ipc_ns)
107 put_ipc_ns(new_nsp->ipc_ns); 108 put_ipc_ns(new_nsp->ipc_ns);
@@ -174,8 +175,8 @@ void free_nsproxy(struct nsproxy *ns)
174 put_uts_ns(ns->uts_ns); 175 put_uts_ns(ns->uts_ns);
175 if (ns->ipc_ns) 176 if (ns->ipc_ns)
176 put_ipc_ns(ns->ipc_ns); 177 put_ipc_ns(ns->ipc_ns);
177 if (ns->pid_ns) 178 if (ns->pid_ns_for_children)
178 put_pid_ns(ns->pid_ns); 179 put_pid_ns(ns->pid_ns_for_children);
179 put_net(ns->net_ns); 180 put_net(ns->net_ns);
180 kmem_cache_free(nsproxy_cachep, ns); 181 kmem_cache_free(nsproxy_cachep, ns);
181} 182}
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 6917e8edb48e..601bb361c235 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -349,8 +349,8 @@ static int pidns_install(struct nsproxy *nsproxy, void *ns)
349 if (ancestor != active) 349 if (ancestor != active)
350 return -EINVAL; 350 return -EINVAL;
351 351
352 put_pid_ns(nsproxy->pid_ns); 352 put_pid_ns(nsproxy->pid_ns_for_children);
353 nsproxy->pid_ns = get_pid_ns(new); 353 nsproxy->pid_ns_for_children = get_pid_ns(new);
354 return 0; 354 return 0;
355} 355}
356 356
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 3bdf28323012..61ed862cdd37 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -265,10 +265,9 @@ static inline void timer_list_header(struct seq_file *m, u64 now)
265static int timer_list_show(struct seq_file *m, void *v) 265static int timer_list_show(struct seq_file *m, void *v)
266{ 266{
267 struct timer_list_iter *iter = v; 267 struct timer_list_iter *iter = v;
268 u64 now = ktime_to_ns(ktime_get());
269 268
270 if (iter->cpu == -1 && !iter->second_pass) 269 if (iter->cpu == -1 && !iter->second_pass)
271 timer_list_header(m, now); 270 timer_list_header(m, iter->now);
272 else if (!iter->second_pass) 271 else if (!iter->second_pass)
273 print_cpu(m, iter->cpu, iter->now); 272 print_cpu(m, iter->cpu, iter->now);
274#ifdef CONFIG_GENERIC_CLOCKEVENTS 273#ifdef CONFIG_GENERIC_CLOCKEVENTS
@@ -298,33 +297,41 @@ void sysrq_timer_list_show(void)
298 return; 297 return;
299} 298}
300 299
301static void *timer_list_start(struct seq_file *file, loff_t *offset) 300static void *move_iter(struct timer_list_iter *iter, loff_t offset)
302{ 301{
303 struct timer_list_iter *iter = file->private; 302 for (; offset; offset--) {
304 303 iter->cpu = cpumask_next(iter->cpu, cpu_online_mask);
305 if (!*offset) { 304 if (iter->cpu >= nr_cpu_ids) {
306 iter->cpu = -1;
307 iter->now = ktime_to_ns(ktime_get());
308 } else if (iter->cpu >= nr_cpu_ids) {
309#ifdef CONFIG_GENERIC_CLOCKEVENTS 305#ifdef CONFIG_GENERIC_CLOCKEVENTS
310 if (!iter->second_pass) { 306 if (!iter->second_pass) {
311 iter->cpu = -1; 307 iter->cpu = -1;
312 iter->second_pass = true; 308 iter->second_pass = true;
313 } else 309 } else
314 return NULL; 310 return NULL;
315#else 311#else
316 return NULL; 312 return NULL;
317#endif 313#endif
314 }
318 } 315 }
319 return iter; 316 return iter;
320} 317}
321 318
319static void *timer_list_start(struct seq_file *file, loff_t *offset)
320{
321 struct timer_list_iter *iter = file->private;
322
323 if (!*offset)
324 iter->now = ktime_to_ns(ktime_get());
325 iter->cpu = -1;
326 iter->second_pass = false;
327 return move_iter(iter, *offset);
328}
329
322static void *timer_list_next(struct seq_file *file, void *v, loff_t *offset) 330static void *timer_list_next(struct seq_file *file, void *v, loff_t *offset)
323{ 331{
324 struct timer_list_iter *iter = file->private; 332 struct timer_list_iter *iter = file->private;
325 iter->cpu = cpumask_next(iter->cpu, cpu_online_mask);
326 ++*offset; 333 ++*offset;
327 return timer_list_start(file, offset); 334 return move_iter(iter, 1);
328} 335}
329 336
330static void timer_list_stop(struct seq_file *seq, void *v) 337static void timer_list_stop(struct seq_file *seq, void *v)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7f5d4be22034..e93f7b9067d8 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2201,6 +2201,15 @@ __acquires(&pool->lock)
2201 dump_stack(); 2201 dump_stack();
2202 } 2202 }
2203 2203
2204 /*
2205 * The following prevents a kworker from hogging CPU on !PREEMPT
2206 * kernels, where a requeueing work item waiting for something to
2207 * happen could deadlock with stop_machine as such work item could
2208 * indefinitely requeue itself while all other CPUs are trapped in
2209 * stop_machine.
2210 */
2211 cond_resched();
2212
2204 spin_lock_irq(&pool->lock); 2213 spin_lock_irq(&pool->lock);
2205 2214
2206 /* clear cpu intensive status */ 2215 /* clear cpu intensive status */
diff --git a/mm/mremap.c b/mm/mremap.c
index 457d34ef3bf2..0843feb66f3d 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -15,6 +15,7 @@
15#include <linux/swap.h> 15#include <linux/swap.h>
16#include <linux/capability.h> 16#include <linux/capability.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/swapops.h>
18#include <linux/highmem.h> 19#include <linux/highmem.h>
19#include <linux/security.h> 20#include <linux/security.h>
20#include <linux/syscalls.h> 21#include <linux/syscalls.h>
@@ -69,6 +70,23 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
69 return pmd; 70 return pmd;
70} 71}
71 72
73static pte_t move_soft_dirty_pte(pte_t pte)
74{
75 /*
76 * Set soft dirty bit so we can notice
77 * in userspace the ptes were moved.
78 */
79#ifdef CONFIG_MEM_SOFT_DIRTY
80 if (pte_present(pte))
81 pte = pte_mksoft_dirty(pte);
82 else if (is_swap_pte(pte))
83 pte = pte_swp_mksoft_dirty(pte);
84 else if (pte_file(pte))
85 pte = pte_file_mksoft_dirty(pte);
86#endif
87 return pte;
88}
89
72static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, 90static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
73 unsigned long old_addr, unsigned long old_end, 91 unsigned long old_addr, unsigned long old_end,
74 struct vm_area_struct *new_vma, pmd_t *new_pmd, 92 struct vm_area_struct *new_vma, pmd_t *new_pmd,
@@ -126,7 +144,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
126 continue; 144 continue;
127 pte = ptep_get_and_clear(mm, old_addr, old_pte); 145 pte = ptep_get_and_clear(mm, old_addr, old_pte);
128 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); 146 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
129 set_pte_at(mm, new_addr, new_pte, pte_mksoft_dirty(pte)); 147 pte = move_soft_dirty_pte(pte);
148 set_pte_at(mm, new_addr, new_pte, pte);
130 } 149 }
131 150
132 arch_leave_lazy_mmu_mode(); 151 arch_leave_lazy_mmu_mode();
diff --git a/mm/shmem.c b/mm/shmem.c
index 8335dbd3fc35..e43dc555069d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2909,14 +2909,8 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
2909 2909
2910/* common code */ 2910/* common code */
2911 2911
2912static char *shmem_dname(struct dentry *dentry, char *buffer, int buflen)
2913{
2914 return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
2915 dentry->d_name.name);
2916}
2917
2918static struct dentry_operations anon_ops = { 2912static struct dentry_operations anon_ops = {
2919 .d_dname = shmem_dname 2913 .d_dname = simple_dname
2920}; 2914};
2921 2915
2922/** 2916/**
diff --git a/mm/slab.h b/mm/slab.h
index 620ceeddbe1a..a535033f7e9a 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -162,6 +162,8 @@ static inline const char *cache_name(struct kmem_cache *s)
162 162
163static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx) 163static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
164{ 164{
165 if (!s->memcg_params)
166 return NULL;
165 return s->memcg_params->memcg_caches[idx]; 167 return s->memcg_params->memcg_caches[idx];
166} 168}
167 169
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 0feaaa0d37d1..ca04163635da 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -71,7 +71,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
71 71
72 mdst = br_mdb_get(br, skb, vid); 72 mdst = br_mdb_get(br, skb, vid);
73 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && 73 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
74 br_multicast_querier_exists(br)) 74 br_multicast_querier_exists(br, eth_hdr(skb)))
75 br_multicast_deliver(mdst, skb); 75 br_multicast_deliver(mdst, skb);
76 else 76 else
77 br_flood_deliver(br, skb, false); 77 br_flood_deliver(br, skb, false);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 8c561c0aa636..a2fd37ec35f7 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -102,7 +102,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
102 } else if (is_multicast_ether_addr(dest)) { 102 } else if (is_multicast_ether_addr(dest)) {
103 mdst = br_mdb_get(br, skb, vid); 103 mdst = br_mdb_get(br, skb, vid);
104 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && 104 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
105 br_multicast_querier_exists(br)) { 105 br_multicast_querier_exists(br, eth_hdr(skb))) {
106 if ((mdst && mdst->mglist) || 106 if ((mdst && mdst->mglist) ||
107 br_multicast_is_router(br)) 107 br_multicast_is_router(br))
108 skb2 = skb; 108 skb2 = skb;
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index de818d95c476..85a09bb5ca51 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -416,16 +416,20 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
416 if (!netif_running(br->dev) || br->multicast_disabled) 416 if (!netif_running(br->dev) || br->multicast_disabled)
417 return -EINVAL; 417 return -EINVAL;
418 418
419 if (timer_pending(&br->multicast_querier_timer))
420 return -EBUSY;
421
422 ip.proto = entry->addr.proto; 419 ip.proto = entry->addr.proto;
423 if (ip.proto == htons(ETH_P_IP)) 420 if (ip.proto == htons(ETH_P_IP)) {
421 if (timer_pending(&br->ip4_querier.timer))
422 return -EBUSY;
423
424 ip.u.ip4 = entry->addr.u.ip4; 424 ip.u.ip4 = entry->addr.u.ip4;
425#if IS_ENABLED(CONFIG_IPV6) 425#if IS_ENABLED(CONFIG_IPV6)
426 else 426 } else {
427 if (timer_pending(&br->ip6_querier.timer))
428 return -EBUSY;
429
427 ip.u.ip6 = entry->addr.u.ip6; 430 ip.u.ip6 = entry->addr.u.ip6;
428#endif 431#endif
432 }
429 433
430 spin_lock_bh(&br->multicast_lock); 434 spin_lock_bh(&br->multicast_lock);
431 mdb = mlock_dereference(br->mdb, br); 435 mdb = mlock_dereference(br->mdb, br);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 23531471f16a..d1c578630678 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -34,7 +34,8 @@
34 34
35#include "br_private.h" 35#include "br_private.h"
36 36
37static void br_multicast_start_querier(struct net_bridge *br); 37static void br_multicast_start_querier(struct net_bridge *br,
38 struct bridge_mcast_query *query);
38unsigned int br_mdb_rehash_seq; 39unsigned int br_mdb_rehash_seq;
39 40
40static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 41static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
@@ -756,20 +757,35 @@ static void br_multicast_local_router_expired(unsigned long data)
756{ 757{
757} 758}
758 759
759static void br_multicast_querier_expired(unsigned long data) 760static void br_multicast_querier_expired(struct net_bridge *br,
761 struct bridge_mcast_query *query)
760{ 762{
761 struct net_bridge *br = (void *)data;
762
763 spin_lock(&br->multicast_lock); 763 spin_lock(&br->multicast_lock);
764 if (!netif_running(br->dev) || br->multicast_disabled) 764 if (!netif_running(br->dev) || br->multicast_disabled)
765 goto out; 765 goto out;
766 766
767 br_multicast_start_querier(br); 767 br_multicast_start_querier(br, query);
768 768
769out: 769out:
770 spin_unlock(&br->multicast_lock); 770 spin_unlock(&br->multicast_lock);
771} 771}
772 772
773static void br_ip4_multicast_querier_expired(unsigned long data)
774{
775 struct net_bridge *br = (void *)data;
776
777 br_multicast_querier_expired(br, &br->ip4_query);
778}
779
780#if IS_ENABLED(CONFIG_IPV6)
781static void br_ip6_multicast_querier_expired(unsigned long data)
782{
783 struct net_bridge *br = (void *)data;
784
785 br_multicast_querier_expired(br, &br->ip6_query);
786}
787#endif
788
773static void __br_multicast_send_query(struct net_bridge *br, 789static void __br_multicast_send_query(struct net_bridge *br,
774 struct net_bridge_port *port, 790 struct net_bridge_port *port,
775 struct br_ip *ip) 791 struct br_ip *ip)
@@ -790,37 +806,45 @@ static void __br_multicast_send_query(struct net_bridge *br,
790} 806}
791 807
792static void br_multicast_send_query(struct net_bridge *br, 808static void br_multicast_send_query(struct net_bridge *br,
793 struct net_bridge_port *port, u32 sent) 809 struct net_bridge_port *port,
810 struct bridge_mcast_query *query)
794{ 811{
795 unsigned long time; 812 unsigned long time;
796 struct br_ip br_group; 813 struct br_ip br_group;
814 struct bridge_mcast_querier *querier = NULL;
797 815
798 if (!netif_running(br->dev) || br->multicast_disabled || 816 if (!netif_running(br->dev) || br->multicast_disabled ||
799 !br->multicast_querier || 817 !br->multicast_querier)
800 timer_pending(&br->multicast_querier_timer))
801 return; 818 return;
802 819
803 memset(&br_group.u, 0, sizeof(br_group.u)); 820 memset(&br_group.u, 0, sizeof(br_group.u));
804 821
805 br_group.proto = htons(ETH_P_IP); 822 if (port ? (query == &port->ip4_query) :
806 __br_multicast_send_query(br, port, &br_group); 823 (query == &br->ip4_query)) {
807 824 querier = &br->ip4_querier;
825 br_group.proto = htons(ETH_P_IP);
808#if IS_ENABLED(CONFIG_IPV6) 826#if IS_ENABLED(CONFIG_IPV6)
809 br_group.proto = htons(ETH_P_IPV6); 827 } else {
810 __br_multicast_send_query(br, port, &br_group); 828 querier = &br->ip6_querier;
829 br_group.proto = htons(ETH_P_IPV6);
811#endif 830#endif
831 }
832
833 if (!querier || timer_pending(&querier->timer))
834 return;
835
836 __br_multicast_send_query(br, port, &br_group);
812 837
813 time = jiffies; 838 time = jiffies;
814 time += sent < br->multicast_startup_query_count ? 839 time += query->startup_sent < br->multicast_startup_query_count ?
815 br->multicast_startup_query_interval : 840 br->multicast_startup_query_interval :
816 br->multicast_query_interval; 841 br->multicast_query_interval;
817 mod_timer(port ? &port->multicast_query_timer : 842 mod_timer(&query->timer, time);
818 &br->multicast_query_timer, time);
819} 843}
820 844
821static void br_multicast_port_query_expired(unsigned long data) 845static void br_multicast_port_query_expired(struct net_bridge_port *port,
846 struct bridge_mcast_query *query)
822{ 847{
823 struct net_bridge_port *port = (void *)data;
824 struct net_bridge *br = port->br; 848 struct net_bridge *br = port->br;
825 849
826 spin_lock(&br->multicast_lock); 850 spin_lock(&br->multicast_lock);
@@ -828,25 +852,43 @@ static void br_multicast_port_query_expired(unsigned long data)
828 port->state == BR_STATE_BLOCKING) 852 port->state == BR_STATE_BLOCKING)
829 goto out; 853 goto out;
830 854
831 if (port->multicast_startup_queries_sent < 855 if (query->startup_sent < br->multicast_startup_query_count)
832 br->multicast_startup_query_count) 856 query->startup_sent++;
833 port->multicast_startup_queries_sent++;
834 857
835 br_multicast_send_query(port->br, port, 858 br_multicast_send_query(port->br, port, query);
836 port->multicast_startup_queries_sent);
837 859
838out: 860out:
839 spin_unlock(&br->multicast_lock); 861 spin_unlock(&br->multicast_lock);
840} 862}
841 863
864static void br_ip4_multicast_port_query_expired(unsigned long data)
865{
866 struct net_bridge_port *port = (void *)data;
867
868 br_multicast_port_query_expired(port, &port->ip4_query);
869}
870
871#if IS_ENABLED(CONFIG_IPV6)
872static void br_ip6_multicast_port_query_expired(unsigned long data)
873{
874 struct net_bridge_port *port = (void *)data;
875
876 br_multicast_port_query_expired(port, &port->ip6_query);
877}
878#endif
879
842void br_multicast_add_port(struct net_bridge_port *port) 880void br_multicast_add_port(struct net_bridge_port *port)
843{ 881{
844 port->multicast_router = 1; 882 port->multicast_router = 1;
845 883
846 setup_timer(&port->multicast_router_timer, br_multicast_router_expired, 884 setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
847 (unsigned long)port); 885 (unsigned long)port);
848 setup_timer(&port->multicast_query_timer, 886 setup_timer(&port->ip4_query.timer, br_ip4_multicast_port_query_expired,
849 br_multicast_port_query_expired, (unsigned long)port); 887 (unsigned long)port);
888#if IS_ENABLED(CONFIG_IPV6)
889 setup_timer(&port->ip6_query.timer, br_ip6_multicast_port_query_expired,
890 (unsigned long)port);
891#endif
850} 892}
851 893
852void br_multicast_del_port(struct net_bridge_port *port) 894void br_multicast_del_port(struct net_bridge_port *port)
@@ -854,13 +896,13 @@ void br_multicast_del_port(struct net_bridge_port *port)
854 del_timer_sync(&port->multicast_router_timer); 896 del_timer_sync(&port->multicast_router_timer);
855} 897}
856 898
857static void __br_multicast_enable_port(struct net_bridge_port *port) 899static void br_multicast_enable(struct bridge_mcast_query *query)
858{ 900{
859 port->multicast_startup_queries_sent = 0; 901 query->startup_sent = 0;
860 902
861 if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 || 903 if (try_to_del_timer_sync(&query->timer) >= 0 ||
862 del_timer(&port->multicast_query_timer)) 904 del_timer(&query->timer))
863 mod_timer(&port->multicast_query_timer, jiffies); 905 mod_timer(&query->timer, jiffies);
864} 906}
865 907
866void br_multicast_enable_port(struct net_bridge_port *port) 908void br_multicast_enable_port(struct net_bridge_port *port)
@@ -871,7 +913,10 @@ void br_multicast_enable_port(struct net_bridge_port *port)
871 if (br->multicast_disabled || !netif_running(br->dev)) 913 if (br->multicast_disabled || !netif_running(br->dev))
872 goto out; 914 goto out;
873 915
874 __br_multicast_enable_port(port); 916 br_multicast_enable(&port->ip4_query);
917#if IS_ENABLED(CONFIG_IPV6)
918 br_multicast_enable(&port->ip6_query);
919#endif
875 920
876out: 921out:
877 spin_unlock(&br->multicast_lock); 922 spin_unlock(&br->multicast_lock);
@@ -890,7 +935,10 @@ void br_multicast_disable_port(struct net_bridge_port *port)
890 if (!hlist_unhashed(&port->rlist)) 935 if (!hlist_unhashed(&port->rlist))
891 hlist_del_init_rcu(&port->rlist); 936 hlist_del_init_rcu(&port->rlist);
892 del_timer(&port->multicast_router_timer); 937 del_timer(&port->multicast_router_timer);
893 del_timer(&port->multicast_query_timer); 938 del_timer(&port->ip4_query.timer);
939#if IS_ENABLED(CONFIG_IPV6)
940 del_timer(&port->ip6_query.timer);
941#endif
894 spin_unlock(&br->multicast_lock); 942 spin_unlock(&br->multicast_lock);
895} 943}
896 944
@@ -1015,14 +1063,15 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1015} 1063}
1016#endif 1064#endif
1017 1065
1018static void br_multicast_update_querier_timer(struct net_bridge *br, 1066static void
1019 unsigned long max_delay) 1067br_multicast_update_querier_timer(struct net_bridge *br,
1068 struct bridge_mcast_querier *querier,
1069 unsigned long max_delay)
1020{ 1070{
1021 if (!timer_pending(&br->multicast_querier_timer)) 1071 if (!timer_pending(&querier->timer))
1022 br->multicast_querier_delay_time = jiffies + max_delay; 1072 querier->delay_time = jiffies + max_delay;
1023 1073
1024 mod_timer(&br->multicast_querier_timer, 1074 mod_timer(&querier->timer, jiffies + br->multicast_querier_interval);
1025 jiffies + br->multicast_querier_interval);
1026} 1075}
1027 1076
1028/* 1077/*
@@ -1075,12 +1124,13 @@ timer:
1075 1124
1076static void br_multicast_query_received(struct net_bridge *br, 1125static void br_multicast_query_received(struct net_bridge *br,
1077 struct net_bridge_port *port, 1126 struct net_bridge_port *port,
1127 struct bridge_mcast_querier *querier,
1078 int saddr, 1128 int saddr,
1079 unsigned long max_delay) 1129 unsigned long max_delay)
1080{ 1130{
1081 if (saddr) 1131 if (saddr)
1082 br_multicast_update_querier_timer(br, max_delay); 1132 br_multicast_update_querier_timer(br, querier, max_delay);
1083 else if (timer_pending(&br->multicast_querier_timer)) 1133 else if (timer_pending(&querier->timer))
1084 return; 1134 return;
1085 1135
1086 br_multicast_mark_router(br, port); 1136 br_multicast_mark_router(br, port);
@@ -1130,7 +1180,8 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1130 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1180 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1131 } 1181 }
1132 1182
1133 br_multicast_query_received(br, port, !!iph->saddr, max_delay); 1183 br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr,
1184 max_delay);
1134 1185
1135 if (!group) 1186 if (!group)
1136 goto out; 1187 goto out;
@@ -1208,8 +1259,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1208 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 1259 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
1209 } 1260 }
1210 1261
1211 br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr), 1262 br_multicast_query_received(br, port, &br->ip6_querier,
1212 max_delay); 1263 !ipv6_addr_any(&ip6h->saddr), max_delay);
1213 1264
1214 if (!group) 1265 if (!group)
1215 goto out; 1266 goto out;
@@ -1246,7 +1297,9 @@ out:
1246 1297
1247static void br_multicast_leave_group(struct net_bridge *br, 1298static void br_multicast_leave_group(struct net_bridge *br,
1248 struct net_bridge_port *port, 1299 struct net_bridge_port *port,
1249 struct br_ip *group) 1300 struct br_ip *group,
1301 struct bridge_mcast_querier *querier,
1302 struct bridge_mcast_query *query)
1250{ 1303{
1251 struct net_bridge_mdb_htable *mdb; 1304 struct net_bridge_mdb_htable *mdb;
1252 struct net_bridge_mdb_entry *mp; 1305 struct net_bridge_mdb_entry *mp;
@@ -1257,7 +1310,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
1257 spin_lock(&br->multicast_lock); 1310 spin_lock(&br->multicast_lock);
1258 if (!netif_running(br->dev) || 1311 if (!netif_running(br->dev) ||
1259 (port && port->state == BR_STATE_DISABLED) || 1312 (port && port->state == BR_STATE_DISABLED) ||
1260 timer_pending(&br->multicast_querier_timer)) 1313 timer_pending(&querier->timer))
1261 goto out; 1314 goto out;
1262 1315
1263 mdb = mlock_dereference(br->mdb, br); 1316 mdb = mlock_dereference(br->mdb, br);
@@ -1265,14 +1318,13 @@ static void br_multicast_leave_group(struct net_bridge *br,
1265 if (!mp) 1318 if (!mp)
1266 goto out; 1319 goto out;
1267 1320
1268 if (br->multicast_querier && 1321 if (br->multicast_querier) {
1269 !timer_pending(&br->multicast_querier_timer)) {
1270 __br_multicast_send_query(br, port, &mp->addr); 1322 __br_multicast_send_query(br, port, &mp->addr);
1271 1323
1272 time = jiffies + br->multicast_last_member_count * 1324 time = jiffies + br->multicast_last_member_count *
1273 br->multicast_last_member_interval; 1325 br->multicast_last_member_interval;
1274 mod_timer(port ? &port->multicast_query_timer : 1326
1275 &br->multicast_query_timer, time); 1327 mod_timer(&query->timer, time);
1276 1328
1277 for (p = mlock_dereference(mp->ports, br); 1329 for (p = mlock_dereference(mp->ports, br);
1278 p != NULL; 1330 p != NULL;
@@ -1325,7 +1377,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
1325 mod_timer(&mp->timer, time); 1377 mod_timer(&mp->timer, time);
1326 } 1378 }
1327 } 1379 }
1328
1329out: 1380out:
1330 spin_unlock(&br->multicast_lock); 1381 spin_unlock(&br->multicast_lock);
1331} 1382}
@@ -1336,6 +1387,8 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
1336 __u16 vid) 1387 __u16 vid)
1337{ 1388{
1338 struct br_ip br_group; 1389 struct br_ip br_group;
1390 struct bridge_mcast_query *query = port ? &port->ip4_query :
1391 &br->ip4_query;
1339 1392
1340 if (ipv4_is_local_multicast(group)) 1393 if (ipv4_is_local_multicast(group))
1341 return; 1394 return;
@@ -1344,7 +1397,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
1344 br_group.proto = htons(ETH_P_IP); 1397 br_group.proto = htons(ETH_P_IP);
1345 br_group.vid = vid; 1398 br_group.vid = vid;
1346 1399
1347 br_multicast_leave_group(br, port, &br_group); 1400 br_multicast_leave_group(br, port, &br_group, &br->ip4_querier, query);
1348} 1401}
1349 1402
1350#if IS_ENABLED(CONFIG_IPV6) 1403#if IS_ENABLED(CONFIG_IPV6)
@@ -1354,6 +1407,9 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
1354 __u16 vid) 1407 __u16 vid)
1355{ 1408{
1356 struct br_ip br_group; 1409 struct br_ip br_group;
1410 struct bridge_mcast_query *query = port ? &port->ip6_query :
1411 &br->ip6_query;
1412
1357 1413
1358 if (ipv6_addr_is_ll_all_nodes(group)) 1414 if (ipv6_addr_is_ll_all_nodes(group))
1359 return; 1415 return;
@@ -1362,7 +1418,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
1362 br_group.proto = htons(ETH_P_IPV6); 1418 br_group.proto = htons(ETH_P_IPV6);
1363 br_group.vid = vid; 1419 br_group.vid = vid;
1364 1420
1365 br_multicast_leave_group(br, port, &br_group); 1421 br_multicast_leave_group(br, port, &br_group, &br->ip6_querier, query);
1366} 1422}
1367#endif 1423#endif
1368 1424
@@ -1630,19 +1686,32 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1630 return 0; 1686 return 0;
1631} 1687}
1632 1688
1633static void br_multicast_query_expired(unsigned long data) 1689static void br_multicast_query_expired(struct net_bridge *br,
1690 struct bridge_mcast_query *query)
1691{
1692 spin_lock(&br->multicast_lock);
1693 if (query->startup_sent < br->multicast_startup_query_count)
1694 query->startup_sent++;
1695
1696 br_multicast_send_query(br, NULL, query);
1697 spin_unlock(&br->multicast_lock);
1698}
1699
1700static void br_ip4_multicast_query_expired(unsigned long data)
1634{ 1701{
1635 struct net_bridge *br = (void *)data; 1702 struct net_bridge *br = (void *)data;
1636 1703
1637 spin_lock(&br->multicast_lock); 1704 br_multicast_query_expired(br, &br->ip4_query);
1638 if (br->multicast_startup_queries_sent < 1705}
1639 br->multicast_startup_query_count)
1640 br->multicast_startup_queries_sent++;
1641 1706
1642 br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent); 1707#if IS_ENABLED(CONFIG_IPV6)
1708static void br_ip6_multicast_query_expired(unsigned long data)
1709{
1710 struct net_bridge *br = (void *)data;
1643 1711
1644 spin_unlock(&br->multicast_lock); 1712 br_multicast_query_expired(br, &br->ip6_query);
1645} 1713}
1714#endif
1646 1715
1647void br_multicast_init(struct net_bridge *br) 1716void br_multicast_init(struct net_bridge *br)
1648{ 1717{
@@ -1662,25 +1731,43 @@ void br_multicast_init(struct net_bridge *br)
1662 br->multicast_querier_interval = 255 * HZ; 1731 br->multicast_querier_interval = 255 * HZ;
1663 br->multicast_membership_interval = 260 * HZ; 1732 br->multicast_membership_interval = 260 * HZ;
1664 1733
1665 br->multicast_querier_delay_time = 0; 1734 br->ip4_querier.delay_time = 0;
1735#if IS_ENABLED(CONFIG_IPV6)
1736 br->ip6_querier.delay_time = 0;
1737#endif
1666 1738
1667 spin_lock_init(&br->multicast_lock); 1739 spin_lock_init(&br->multicast_lock);
1668 setup_timer(&br->multicast_router_timer, 1740 setup_timer(&br->multicast_router_timer,
1669 br_multicast_local_router_expired, 0); 1741 br_multicast_local_router_expired, 0);
1670 setup_timer(&br->multicast_querier_timer, 1742 setup_timer(&br->ip4_querier.timer, br_ip4_multicast_querier_expired,
1671 br_multicast_querier_expired, (unsigned long)br); 1743 (unsigned long)br);
1672 setup_timer(&br->multicast_query_timer, br_multicast_query_expired, 1744 setup_timer(&br->ip4_query.timer, br_ip4_multicast_query_expired,
1673 (unsigned long)br); 1745 (unsigned long)br);
1746#if IS_ENABLED(CONFIG_IPV6)
1747 setup_timer(&br->ip6_querier.timer, br_ip6_multicast_querier_expired,
1748 (unsigned long)br);
1749 setup_timer(&br->ip6_query.timer, br_ip6_multicast_query_expired,
1750 (unsigned long)br);
1751#endif
1674} 1752}
1675 1753
1676void br_multicast_open(struct net_bridge *br) 1754static void __br_multicast_open(struct net_bridge *br,
1755 struct bridge_mcast_query *query)
1677{ 1756{
1678 br->multicast_startup_queries_sent = 0; 1757 query->startup_sent = 0;
1679 1758
1680 if (br->multicast_disabled) 1759 if (br->multicast_disabled)
1681 return; 1760 return;
1682 1761
1683 mod_timer(&br->multicast_query_timer, jiffies); 1762 mod_timer(&query->timer, jiffies);
1763}
1764
1765void br_multicast_open(struct net_bridge *br)
1766{
1767 __br_multicast_open(br, &br->ip4_query);
1768#if IS_ENABLED(CONFIG_IPV6)
1769 __br_multicast_open(br, &br->ip6_query);
1770#endif
1684} 1771}
1685 1772
1686void br_multicast_stop(struct net_bridge *br) 1773void br_multicast_stop(struct net_bridge *br)
@@ -1692,8 +1779,12 @@ void br_multicast_stop(struct net_bridge *br)
1692 int i; 1779 int i;
1693 1780
1694 del_timer_sync(&br->multicast_router_timer); 1781 del_timer_sync(&br->multicast_router_timer);
1695 del_timer_sync(&br->multicast_querier_timer); 1782 del_timer_sync(&br->ip4_querier.timer);
1696 del_timer_sync(&br->multicast_query_timer); 1783 del_timer_sync(&br->ip4_query.timer);
1784#if IS_ENABLED(CONFIG_IPV6)
1785 del_timer_sync(&br->ip6_querier.timer);
1786 del_timer_sync(&br->ip6_query.timer);
1787#endif
1697 1788
1698 spin_lock_bh(&br->multicast_lock); 1789 spin_lock_bh(&br->multicast_lock);
1699 mdb = mlock_dereference(br->mdb, br); 1790 mdb = mlock_dereference(br->mdb, br);
@@ -1796,18 +1887,24 @@ unlock:
1796 return err; 1887 return err;
1797} 1888}
1798 1889
1799static void br_multicast_start_querier(struct net_bridge *br) 1890static void br_multicast_start_querier(struct net_bridge *br,
1891 struct bridge_mcast_query *query)
1800{ 1892{
1801 struct net_bridge_port *port; 1893 struct net_bridge_port *port;
1802 1894
1803 br_multicast_open(br); 1895 __br_multicast_open(br, query);
1804 1896
1805 list_for_each_entry(port, &br->port_list, list) { 1897 list_for_each_entry(port, &br->port_list, list) {
1806 if (port->state == BR_STATE_DISABLED || 1898 if (port->state == BR_STATE_DISABLED ||
1807 port->state == BR_STATE_BLOCKING) 1899 port->state == BR_STATE_BLOCKING)
1808 continue; 1900 continue;
1809 1901
1810 __br_multicast_enable_port(port); 1902 if (query == &br->ip4_query)
1903 br_multicast_enable(&port->ip4_query);
1904#if IS_ENABLED(CONFIG_IPV6)
1905 else
1906 br_multicast_enable(&port->ip6_query);
1907#endif
1811 } 1908 }
1812} 1909}
1813 1910
@@ -1842,7 +1939,10 @@ rollback:
1842 goto rollback; 1939 goto rollback;
1843 } 1940 }
1844 1941
1845 br_multicast_start_querier(br); 1942 br_multicast_start_querier(br, &br->ip4_query);
1943#if IS_ENABLED(CONFIG_IPV6)
1944 br_multicast_start_querier(br, &br->ip6_query);
1945#endif
1846 1946
1847unlock: 1947unlock:
1848 spin_unlock_bh(&br->multicast_lock); 1948 spin_unlock_bh(&br->multicast_lock);
@@ -1865,10 +1965,18 @@ int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
1865 goto unlock; 1965 goto unlock;
1866 1966
1867 max_delay = br->multicast_query_response_interval; 1967 max_delay = br->multicast_query_response_interval;
1868 if (!timer_pending(&br->multicast_querier_timer))
1869 br->multicast_querier_delay_time = jiffies + max_delay;
1870 1968
1871 br_multicast_start_querier(br); 1969 if (!timer_pending(&br->ip4_querier.timer))
1970 br->ip4_querier.delay_time = jiffies + max_delay;
1971
1972 br_multicast_start_querier(br, &br->ip4_query);
1973
1974#if IS_ENABLED(CONFIG_IPV6)
1975 if (!timer_pending(&br->ip6_querier.timer))
1976 br->ip6_querier.delay_time = jiffies + max_delay;
1977
1978 br_multicast_start_querier(br, &br->ip6_query);
1979#endif
1872 1980
1873unlock: 1981unlock:
1874 spin_unlock_bh(&br->multicast_lock); 1982 spin_unlock_bh(&br->multicast_lock);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 89b2949be02f..598cb0b333c6 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -66,6 +66,20 @@ struct br_ip
66 __u16 vid; 66 __u16 vid;
67}; 67};
68 68
69#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
70/* our own querier */
71struct bridge_mcast_query {
72 struct timer_list timer;
73 u32 startup_sent;
74};
75
76/* other querier */
77struct bridge_mcast_querier {
78 struct timer_list timer;
79 unsigned long delay_time;
80};
81#endif
82
69struct net_port_vlans { 83struct net_port_vlans {
70 u16 port_idx; 84 u16 port_idx;
71 u16 pvid; 85 u16 pvid;
@@ -162,10 +176,12 @@ struct net_bridge_port
162#define BR_FLOOD 0x00000040 176#define BR_FLOOD 0x00000040
163 177
164#ifdef CONFIG_BRIDGE_IGMP_SNOOPING 178#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
165 u32 multicast_startup_queries_sent; 179 struct bridge_mcast_query ip4_query;
180#if IS_ENABLED(CONFIG_IPV6)
181 struct bridge_mcast_query ip6_query;
182#endif /* IS_ENABLED(CONFIG_IPV6) */
166 unsigned char multicast_router; 183 unsigned char multicast_router;
167 struct timer_list multicast_router_timer; 184 struct timer_list multicast_router_timer;
168 struct timer_list multicast_query_timer;
169 struct hlist_head mglist; 185 struct hlist_head mglist;
170 struct hlist_node rlist; 186 struct hlist_node rlist;
171#endif 187#endif
@@ -258,7 +274,6 @@ struct net_bridge
258 u32 hash_max; 274 u32 hash_max;
259 275
260 u32 multicast_last_member_count; 276 u32 multicast_last_member_count;
261 u32 multicast_startup_queries_sent;
262 u32 multicast_startup_query_count; 277 u32 multicast_startup_query_count;
263 278
264 unsigned long multicast_last_member_interval; 279 unsigned long multicast_last_member_interval;
@@ -267,15 +282,18 @@ struct net_bridge
267 unsigned long multicast_query_interval; 282 unsigned long multicast_query_interval;
268 unsigned long multicast_query_response_interval; 283 unsigned long multicast_query_response_interval;
269 unsigned long multicast_startup_query_interval; 284 unsigned long multicast_startup_query_interval;
270 unsigned long multicast_querier_delay_time;
271 285
272 spinlock_t multicast_lock; 286 spinlock_t multicast_lock;
273 struct net_bridge_mdb_htable __rcu *mdb; 287 struct net_bridge_mdb_htable __rcu *mdb;
274 struct hlist_head router_list; 288 struct hlist_head router_list;
275 289
276 struct timer_list multicast_router_timer; 290 struct timer_list multicast_router_timer;
277 struct timer_list multicast_querier_timer; 291 struct bridge_mcast_querier ip4_querier;
278 struct timer_list multicast_query_timer; 292 struct bridge_mcast_query ip4_query;
293#if IS_ENABLED(CONFIG_IPV6)
294 struct bridge_mcast_querier ip6_querier;
295 struct bridge_mcast_query ip6_query;
296#endif /* IS_ENABLED(CONFIG_IPV6) */
279#endif 297#endif
280 298
281 struct timer_list hello_timer; 299 struct timer_list hello_timer;
@@ -483,11 +501,27 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
483 timer_pending(&br->multicast_router_timer)); 501 timer_pending(&br->multicast_router_timer));
484} 502}
485 503
486static inline bool br_multicast_querier_exists(struct net_bridge *br) 504static inline bool
505__br_multicast_querier_exists(struct net_bridge *br,
506 struct bridge_mcast_querier *querier)
507{
508 return time_is_before_jiffies(querier->delay_time) &&
509 (br->multicast_querier || timer_pending(&querier->timer));
510}
511
512static inline bool br_multicast_querier_exists(struct net_bridge *br,
513 struct ethhdr *eth)
487{ 514{
488 return time_is_before_jiffies(br->multicast_querier_delay_time) && 515 switch (eth->h_proto) {
489 (br->multicast_querier || 516 case (htons(ETH_P_IP)):
490 timer_pending(&br->multicast_querier_timer)); 517 return __br_multicast_querier_exists(br, &br->ip4_querier);
518#if IS_ENABLED(CONFIG_IPV6)
519 case (htons(ETH_P_IPV6)):
520 return __br_multicast_querier_exists(br, &br->ip6_querier);
521#endif
522 default:
523 return false;
524 }
491} 525}
492#else 526#else
493static inline int br_multicast_rcv(struct net_bridge *br, 527static inline int br_multicast_rcv(struct net_bridge *br,
@@ -545,7 +579,8 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
545{ 579{
546 return 0; 580 return 0;
547} 581}
548static inline bool br_multicast_querier_exists(struct net_bridge *br) 582static inline bool br_multicast_querier_exists(struct net_bridge *br,
583 struct ethhdr *eth)
549{ 584{
550 return false; 585 return false;
551} 586}
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 2bd4b58f4372..0f455227da83 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -293,9 +293,10 @@ int cfctrl_linkup_request(struct cflayer *layer,
293 293
294 count = cfctrl_cancel_req(&cfctrl->serv.layer, 294 count = cfctrl_cancel_req(&cfctrl->serv.layer,
295 user_layer); 295 user_layer);
296 if (count != 1) 296 if (count != 1) {
297 pr_err("Could not remove request (%d)", count); 297 pr_err("Could not remove request (%d)", count);
298 return -ENODEV; 298 return -ENODEV;
299 }
299 } 300 }
300 return 0; 301 return 0;
301} 302}
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 159737cac76c..0ff42f029ace 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -350,14 +350,9 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
350 if (new_index < 0) 350 if (new_index < 0)
351 new_index = skb_tx_hash(dev, skb); 351 new_index = skb_tx_hash(dev, skb);
352 352
353 if (queue_index != new_index && sk) { 353 if (queue_index != new_index && sk &&
354 struct dst_entry *dst = 354 rcu_access_pointer(sk->sk_dst_cache))
355 rcu_dereference_check(sk->sk_dst_cache, 1); 355 sk_tx_queue_set(sk, queue_index);
356
357 if (dst && skb_dst(skb) == dst)
358 sk_tx_queue_set(sk, queue_index);
359
360 }
361 356
362 queue_index = new_index; 357 queue_index = new_index;
363 } 358 }
diff --git a/net/core/scm.c b/net/core/scm.c
index 03795d0147f2..b4da80b1cc07 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -54,7 +54,7 @@ static __inline__ int scm_check_creds(struct ucred *creds)
54 return -EINVAL; 54 return -EINVAL;
55 55
56 if ((creds->pid == task_tgid_vnr(current) || 56 if ((creds->pid == task_tgid_vnr(current) ||
57 ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) && 57 ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) &&
58 ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || 58 ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
59 uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && 59 uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
60 ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || 60 ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 4bcabf3ab4ca..9ee17e3d11c3 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -211,14 +211,6 @@ static inline int ip_finish_output2(struct sk_buff *skb)
211 return -EINVAL; 211 return -EINVAL;
212} 212}
213 213
214static inline int ip_skb_dst_mtu(struct sk_buff *skb)
215{
216 struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
217
218 return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
219 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
220}
221
222static int ip_finish_output(struct sk_buff *skb) 214static int ip_finish_output(struct sk_buff *skb)
223{ 215{
224#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) 216#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 87bd2952c733..7f80fb4b82d3 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -190,15 +190,14 @@ static int ipip_rcv(struct sk_buff *skb)
190 struct ip_tunnel *tunnel; 190 struct ip_tunnel *tunnel;
191 const struct iphdr *iph; 191 const struct iphdr *iph;
192 192
193 if (iptunnel_pull_header(skb, 0, tpi.proto))
194 goto drop;
195
196 iph = ip_hdr(skb); 193 iph = ip_hdr(skb);
197 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, 194 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
198 iph->saddr, iph->daddr, 0); 195 iph->saddr, iph->daddr, 0);
199 if (tunnel) { 196 if (tunnel) {
200 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 197 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
201 goto drop; 198 goto drop;
199 if (iptunnel_pull_header(skb, 0, tpi.proto))
200 goto drop;
202 return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); 201 return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
203 } 202 }
204 203
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 41d84505a922..a86c7ae71881 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -571,7 +571,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
571 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, 571 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
572 RT_SCOPE_UNIVERSE, 572 RT_SCOPE_UNIVERSE,
573 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, 573 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
574 inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP, 574 inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP |
575 (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
575 daddr, saddr, 0, 0); 576 daddr, saddr, 0, 0);
576 577
577 if (!inet->hdrincl) { 578 if (!inet->hdrincl) {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index fdf74090a001..6e5617b9f9db 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2468,10 +2468,11 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2468 case TCP_THIN_DUPACK: 2468 case TCP_THIN_DUPACK:
2469 if (val < 0 || val > 1) 2469 if (val < 0 || val > 1)
2470 err = -EINVAL; 2470 err = -EINVAL;
2471 else 2471 else {
2472 tp->thin_dupack = val; 2472 tp->thin_dupack = val;
2473 if (tp->thin_dupack) 2473 if (tp->thin_dupack)
2474 tcp_disable_early_retrans(tp); 2474 tcp_disable_early_retrans(tp);
2475 }
2475 break; 2476 break;
2476 2477
2477 case TCP_REPAIR: 2478 case TCP_REPAIR:
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 93d7e9de4143..1969e16d936d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3574,7 +3574,10 @@ static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr
3574 ++ptr; 3574 ++ptr;
3575 tp->rx_opt.rcv_tsval = ntohl(*ptr); 3575 tp->rx_opt.rcv_tsval = ntohl(*ptr);
3576 ++ptr; 3576 ++ptr;
3577 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; 3577 if (*ptr)
3578 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
3579 else
3580 tp->rx_opt.rcv_tsecr = 0;
3578 return true; 3581 return true;
3579 } 3582 }
3580 return false; 3583 return false;
@@ -3599,7 +3602,7 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb,
3599 } 3602 }
3600 3603
3601 tcp_parse_options(skb, &tp->rx_opt, 1, NULL); 3604 tcp_parse_options(skb, &tp->rx_opt, 1, NULL);
3602 if (tp->rx_opt.saw_tstamp) 3605 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
3603 tp->rx_opt.rcv_tsecr -= tp->tsoffset; 3606 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
3604 3607
3605 return true; 3608 return true;
@@ -5354,7 +5357,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5354 int saved_clamp = tp->rx_opt.mss_clamp; 5357 int saved_clamp = tp->rx_opt.mss_clamp;
5355 5358
5356 tcp_parse_options(skb, &tp->rx_opt, 0, &foc); 5359 tcp_parse_options(skb, &tp->rx_opt, 0, &foc);
5357 if (tp->rx_opt.saw_tstamp) 5360 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
5358 tp->rx_opt.rcv_tsecr -= tp->tsoffset; 5361 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
5359 5362
5360 if (th->ack) { 5363 if (th->ack) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e63ae4c9691d..7c83cb8bf137 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2673,7 +2673,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2673 int tcp_header_size; 2673 int tcp_header_size;
2674 int mss; 2674 int mss;
2675 2675
2676 skb = alloc_skb(MAX_TCP_HEADER + 15, sk_gfp_atomic(sk, GFP_ATOMIC)); 2676 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
2677 if (unlikely(!skb)) { 2677 if (unlikely(!skb)) {
2678 dst_release(dst); 2678 dst_release(dst);
2679 return NULL; 2679 return NULL;
@@ -2817,6 +2817,8 @@ void tcp_connect_init(struct sock *sk)
2817 2817
2818 if (likely(!tp->repair)) 2818 if (likely(!tp->repair))
2819 tp->rcv_nxt = 0; 2819 tp->rcv_nxt = 0;
2820 else
2821 tp->rcv_tstamp = tcp_time_stamp;
2820 tp->rcv_wup = tp->rcv_nxt; 2822 tp->rcv_wup = tp->rcv_nxt;
2821 tp->copied_seq = tp->rcv_nxt; 2823 tp->copied_seq = tp->rcv_nxt;
2822 2824
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 327a617d594c..baa0f63731fd 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -21,7 +21,6 @@
21static int xfrm4_tunnel_check_size(struct sk_buff *skb) 21static int xfrm4_tunnel_check_size(struct sk_buff *skb)
22{ 22{
23 int mtu, ret = 0; 23 int mtu, ret = 0;
24 struct dst_entry *dst;
25 24
26 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE) 25 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
27 goto out; 26 goto out;
@@ -29,12 +28,10 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
29 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df) 28 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df)
30 goto out; 29 goto out;
31 30
32 dst = skb_dst(skb); 31 mtu = dst_mtu(skb_dst(skb));
33 mtu = dst_mtu(dst);
34 if (skb->len > mtu) { 32 if (skb->len > mtu) {
35 if (skb->sk) 33 if (skb->sk)
36 ip_local_error(skb->sk, EMSGSIZE, ip_hdr(skb)->daddr, 34 xfrm_local_error(skb, mtu);
37 inet_sk(skb->sk)->inet_dport, mtu);
38 else 35 else
39 icmp_send(skb, ICMP_DEST_UNREACH, 36 icmp_send(skb, ICMP_DEST_UNREACH,
40 ICMP_FRAG_NEEDED, htonl(mtu)); 37 ICMP_FRAG_NEEDED, htonl(mtu));
@@ -99,3 +96,12 @@ int xfrm4_output(struct sk_buff *skb)
99 x->outer_mode->afinfo->output_finish, 96 x->outer_mode->afinfo->output_finish,
100 !(IPCB(skb)->flags & IPSKB_REROUTED)); 97 !(IPCB(skb)->flags & IPSKB_REROUTED));
101} 98}
99
100void xfrm4_local_error(struct sk_buff *skb, u32 mtu)
101{
102 struct iphdr *hdr;
103
104 hdr = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
105 ip_local_error(skb->sk, EMSGSIZE, hdr->daddr,
106 inet_sk(skb->sk)->inet_dport, mtu);
107}
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 9258e751baba..0b2a0641526a 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -83,6 +83,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = {
83 .extract_input = xfrm4_extract_input, 83 .extract_input = xfrm4_extract_input,
84 .extract_output = xfrm4_extract_output, 84 .extract_output = xfrm4_extract_output,
85 .transport_finish = xfrm4_transport_finish, 85 .transport_finish = xfrm4_transport_finish,
86 .local_error = xfrm4_local_error,
86}; 87};
87 88
88void __init xfrm4_state_init(void) 89void __init xfrm4_state_init(void)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 2a66eaad047f..d6ff12617f36 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1022,7 +1022,6 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
1022 unsigned long regen_advance; 1022 unsigned long regen_advance;
1023 int tmp_plen; 1023 int tmp_plen;
1024 int ret = 0; 1024 int ret = 0;
1025 int max_addresses;
1026 u32 addr_flags; 1025 u32 addr_flags;
1027 unsigned long now = jiffies; 1026 unsigned long now = jiffies;
1028 1027
@@ -1068,7 +1067,6 @@ retry:
1068 idev->cnf.temp_prefered_lft + age - 1067 idev->cnf.temp_prefered_lft + age -
1069 idev->cnf.max_desync_factor); 1068 idev->cnf.max_desync_factor);
1070 tmp_plen = ifp->prefix_len; 1069 tmp_plen = ifp->prefix_len;
1071 max_addresses = idev->cnf.max_addresses;
1072 tmp_tstamp = ifp->tstamp; 1070 tmp_tstamp = ifp->tstamp;
1073 spin_unlock_bh(&ifp->lock); 1071 spin_unlock_bh(&ifp->lock);
1074 1072
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index f083a583a05c..b30ad3741b46 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -251,38 +251,36 @@ static struct ip6addrlbl_entry *ip6addrlbl_alloc(struct net *net,
251/* add a label */ 251/* add a label */
252static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace) 252static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace)
253{ 253{
254 struct hlist_node *n;
255 struct ip6addrlbl_entry *last = NULL, *p = NULL;
254 int ret = 0; 256 int ret = 0;
255 257
256 ADDRLABEL(KERN_DEBUG "%s(newp=%p, replace=%d)\n", 258 ADDRLABEL(KERN_DEBUG "%s(newp=%p, replace=%d)\n", __func__, newp,
257 __func__, 259 replace);
258 newp, replace);
259 260
260 if (hlist_empty(&ip6addrlbl_table.head)) { 261 hlist_for_each_entry_safe(p, n, &ip6addrlbl_table.head, list) {
261 hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head); 262 if (p->prefixlen == newp->prefixlen &&
262 } else { 263 net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) &&
263 struct hlist_node *n; 264 p->ifindex == newp->ifindex &&
264 struct ip6addrlbl_entry *p = NULL; 265 ipv6_addr_equal(&p->prefix, &newp->prefix)) {
265 hlist_for_each_entry_safe(p, n, 266 if (!replace) {
266 &ip6addrlbl_table.head, list) { 267 ret = -EEXIST;
267 if (p->prefixlen == newp->prefixlen &&
268 net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) &&
269 p->ifindex == newp->ifindex &&
270 ipv6_addr_equal(&p->prefix, &newp->prefix)) {
271 if (!replace) {
272 ret = -EEXIST;
273 goto out;
274 }
275 hlist_replace_rcu(&p->list, &newp->list);
276 ip6addrlbl_put(p);
277 goto out;
278 } else if ((p->prefixlen == newp->prefixlen && !p->ifindex) ||
279 (p->prefixlen < newp->prefixlen)) {
280 hlist_add_before_rcu(&newp->list, &p->list);
281 goto out; 268 goto out;
282 } 269 }
270 hlist_replace_rcu(&p->list, &newp->list);
271 ip6addrlbl_put(p);
272 goto out;
273 } else if ((p->prefixlen == newp->prefixlen && !p->ifindex) ||
274 (p->prefixlen < newp->prefixlen)) {
275 hlist_add_before_rcu(&newp->list, &p->list);
276 goto out;
283 } 277 }
284 hlist_add_after_rcu(&p->list, &newp->list); 278 last = p;
285 } 279 }
280 if (last)
281 hlist_add_after_rcu(&last->list, &newp->list);
282 else
283 hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head);
286out: 284out:
287 if (!ret) 285 if (!ret)
288 ip6addrlbl_table.seq++; 286 ip6addrlbl_table.seq++;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 73681c227453..eef8d945b362 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -940,6 +940,14 @@ static const struct icmp6_err {
940 .err = ECONNREFUSED, 940 .err = ECONNREFUSED,
941 .fatal = 1, 941 .fatal = 1,
942 }, 942 },
943 { /* POLICY_FAIL */
944 .err = EACCES,
945 .fatal = 1,
946 },
947 { /* REJECT_ROUTE */
948 .err = EACCES,
949 .fatal = 1,
950 },
943}; 951};
944 952
945int icmpv6_err_convert(u8 type, u8 code, int *err) 953int icmpv6_err_convert(u8 type, u8 code, int *err)
@@ -951,7 +959,7 @@ int icmpv6_err_convert(u8 type, u8 code, int *err)
951 switch (type) { 959 switch (type) {
952 case ICMPV6_DEST_UNREACH: 960 case ICMPV6_DEST_UNREACH:
953 fatal = 1; 961 fatal = 1;
954 if (code <= ICMPV6_PORT_UNREACH) { 962 if (code < ARRAY_SIZE(tab_unreach)) {
955 *err = tab_unreach[code].err; 963 *err = tab_unreach[code].err;
956 fatal = tab_unreach[code].fatal; 964 fatal = tab_unreach[code].fatal;
957 } 965 }
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index db992a373011..6b26e9feafb9 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -722,6 +722,11 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
722 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); 722 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
723 } 723 }
724 724
725 if (likely(!skb->encapsulation)) {
726 skb_reset_inner_headers(skb);
727 skb->encapsulation = 1;
728 }
729
725 skb_push(skb, gre_hlen); 730 skb_push(skb, gre_hlen);
726 skb_reset_network_header(skb); 731 skb_reset_network_header(skb);
727 skb_set_transport_header(skb, sizeof(*ipv6h)); 732 skb_set_transport_header(skb, sizeof(*ipv6h));
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index dd08cfd8999e..3a692d529163 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -213,6 +213,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
213 hdr->saddr = fl6->saddr; 213 hdr->saddr = fl6->saddr;
214 hdr->daddr = *first_hop; 214 hdr->daddr = *first_hop;
215 215
216 skb->protocol = htons(ETH_P_IPV6);
216 skb->priority = sk->sk_priority; 217 skb->priority = sk->sk_priority;
217 skb->mark = sk->sk_mark; 218 skb->mark = sk->sk_mark;
218 219
@@ -1032,6 +1033,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1032 /* initialize protocol header pointer */ 1033 /* initialize protocol header pointer */
1033 skb->transport_header = skb->network_header + fragheaderlen; 1034 skb->transport_header = skb->network_header + fragheaderlen;
1034 1035
1036 skb->protocol = htons(ETH_P_IPV6);
1035 skb->ip_summed = CHECKSUM_PARTIAL; 1037 skb->ip_summed = CHECKSUM_PARTIAL;
1036 skb->csum = 0; 1038 skb->csum = 0;
1037 } 1039 }
@@ -1334,6 +1336,7 @@ alloc_new_skb:
1334 /* 1336 /*
1335 * Fill in the control structures 1337 * Fill in the control structures
1336 */ 1338 */
1339 skb->protocol = htons(ETH_P_IPV6);
1337 skb->ip_summed = CHECKSUM_NONE; 1340 skb->ip_summed = CHECKSUM_NONE;
1338 skb->csum = 0; 1341 skb->csum = 0;
1339 /* reserve for fragmentation and ipsec header */ 1342 /* reserve for fragmentation and ipsec header */
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 55999d923f26..61355f7f4da5 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1028,6 +1028,12 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
1028 init_tel_txopt(&opt, encap_limit); 1028 init_tel_txopt(&opt, encap_limit);
1029 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); 1029 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
1030 } 1030 }
1031
1032 if (likely(!skb->encapsulation)) {
1033 skb_reset_inner_headers(skb);
1034 skb->encapsulation = 1;
1035 }
1036
1031 skb_push(skb, sizeof(struct ipv6hdr)); 1037 skb_push(skb, sizeof(struct ipv6hdr));
1032 skb_reset_network_header(skb); 1038 skb_reset_network_header(skb);
1033 ipv6h = ipv6_hdr(skb); 1039 ipv6h = ipv6_hdr(skb);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index c4bc7a35cd56..12179457b2cd 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -372,14 +372,11 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
372 int tlen = dev->needed_tailroom; 372 int tlen = dev->needed_tailroom;
373 struct sock *sk = dev_net(dev)->ipv6.ndisc_sk; 373 struct sock *sk = dev_net(dev)->ipv6.ndisc_sk;
374 struct sk_buff *skb; 374 struct sk_buff *skb;
375 int err;
376 375
377 skb = sock_alloc_send_skb(sk, 376 skb = alloc_skb(hlen + sizeof(struct ipv6hdr) + len + tlen, GFP_ATOMIC);
378 hlen + sizeof(struct ipv6hdr) + len + tlen,
379 1, &err);
380 if (!skb) { 377 if (!skb) {
381 ND_PRINTK(0, err, "ndisc: %s failed to allocate an skb, err=%d\n", 378 ND_PRINTK(0, err, "ndisc: %s failed to allocate an skb\n",
382 __func__, err); 379 __func__);
383 return NULL; 380 return NULL;
384 } 381 }
385 382
@@ -389,6 +386,11 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
389 skb_reserve(skb, hlen + sizeof(struct ipv6hdr)); 386 skb_reserve(skb, hlen + sizeof(struct ipv6hdr));
390 skb_reset_transport_header(skb); 387 skb_reset_transport_header(skb);
391 388
389 /* Manually assign socket ownership as we avoid calling
390 * sock_alloc_send_pskb() to bypass wmem buffer limits
391 */
392 skb_set_owner_w(skb, sk);
393
392 return skb; 394 return skb;
393} 395}
394 396
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index c1e533498203..58916bbb1728 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -633,6 +633,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
633 goto error; 633 goto error;
634 skb_reserve(skb, hlen); 634 skb_reserve(skb, hlen);
635 635
636 skb->protocol = htons(ETH_P_IPV6);
636 skb->priority = sk->sk_priority; 637 skb->priority = sk->sk_priority;
637 skb->mark = sk->sk_mark; 638 skb->mark = sk->sk_mark;
638 skb_dst_set(skb, &rt->dst); 639 skb_dst_set(skb, &rt->dst);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 19abcc9d6a1a..7ee5cb96db34 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -641,11 +641,7 @@ static int ipip_rcv(struct sk_buff *skb)
641 const struct iphdr *iph; 641 const struct iphdr *iph;
642 struct ip_tunnel *tunnel; 642 struct ip_tunnel *tunnel;
643 643
644 if (iptunnel_pull_header(skb, 0, tpi.proto))
645 goto drop;
646
647 iph = ip_hdr(skb); 644 iph = ip_hdr(skb);
648
649 tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, 645 tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
650 iph->saddr, iph->daddr); 646 iph->saddr, iph->daddr);
651 if (tunnel != NULL) { 647 if (tunnel != NULL) {
@@ -655,6 +651,8 @@ static int ipip_rcv(struct sk_buff *skb)
655 651
656 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 652 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
657 goto drop; 653 goto drop;
654 if (iptunnel_pull_header(skb, 0, tpi.proto))
655 goto drop;
658 return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); 656 return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
659 } 657 }
660 658
@@ -881,6 +879,11 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
881 ttl = iph6->hop_limit; 879 ttl = iph6->hop_limit;
882 tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); 880 tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
883 881
882 if (likely(!skb->encapsulation)) {
883 skb_reset_inner_headers(skb);
884 skb->encapsulation = 1;
885 }
886
884 err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos, 887 err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos,
885 ttl, df, !net_eq(tunnel->net, dev_net(dev))); 888 ttl, df, !net_eq(tunnel->net, dev_net(dev)));
886 iptunnel_xmit_stats(err, &dev->stats, dev->tstats); 889 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 9acdcedf9a14..5c71501fc917 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1425,7 +1425,7 @@ ipv6_pktoptions:
1425 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) 1425 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1426 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit; 1426 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1427 if (np->rxopt.bits.rxtclass) 1427 if (np->rxopt.bits.rxtclass)
1428 np->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(skb)); 1428 np->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(opt_skb));
1429 if (ipv6_opt_accepted(sk, opt_skb)) { 1429 if (ipv6_opt_accepted(sk, opt_skb)) {
1430 skb_set_owner_r(opt_skb, sk); 1430 skb_set_owner_r(opt_skb, sk);
1431 opt_skb = xchg(&np->pktoptions, opt_skb); 1431 opt_skb = xchg(&np->pktoptions, opt_skb);
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 8755a3079d0f..6cd625e37706 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -34,8 +34,10 @@ static int xfrm6_local_dontfrag(struct sk_buff *skb)
34 struct sock *sk = skb->sk; 34 struct sock *sk = skb->sk;
35 35
36 if (sk) { 36 if (sk) {
37 proto = sk->sk_protocol; 37 if (sk->sk_family != AF_INET6)
38 return 0;
38 39
40 proto = sk->sk_protocol;
39 if (proto == IPPROTO_UDP || proto == IPPROTO_RAW) 41 if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
40 return inet6_sk(sk)->dontfrag; 42 return inet6_sk(sk)->dontfrag;
41 } 43 }
@@ -54,13 +56,15 @@ static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu)
54 ipv6_local_rxpmtu(sk, &fl6, mtu); 56 ipv6_local_rxpmtu(sk, &fl6, mtu);
55} 57}
56 58
57static void xfrm6_local_error(struct sk_buff *skb, u32 mtu) 59void xfrm6_local_error(struct sk_buff *skb, u32 mtu)
58{ 60{
59 struct flowi6 fl6; 61 struct flowi6 fl6;
62 const struct ipv6hdr *hdr;
60 struct sock *sk = skb->sk; 63 struct sock *sk = skb->sk;
61 64
65 hdr = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
62 fl6.fl6_dport = inet_sk(sk)->inet_dport; 66 fl6.fl6_dport = inet_sk(sk)->inet_dport;
63 fl6.daddr = ipv6_hdr(skb)->daddr; 67 fl6.daddr = hdr->daddr;
64 68
65 ipv6_local_error(sk, EMSGSIZE, &fl6, mtu); 69 ipv6_local_error(sk, EMSGSIZE, &fl6, mtu);
66} 70}
@@ -80,7 +84,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
80 if (xfrm6_local_dontfrag(skb)) 84 if (xfrm6_local_dontfrag(skb))
81 xfrm6_local_rxpmtu(skb, mtu); 85 xfrm6_local_rxpmtu(skb, mtu);
82 else if (skb->sk) 86 else if (skb->sk)
83 xfrm6_local_error(skb, mtu); 87 xfrm_local_error(skb, mtu);
84 else 88 else
85 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 89 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
86 ret = -EMSGSIZE; 90 ret = -EMSGSIZE;
@@ -136,13 +140,18 @@ static int __xfrm6_output(struct sk_buff *skb)
136{ 140{
137 struct dst_entry *dst = skb_dst(skb); 141 struct dst_entry *dst = skb_dst(skb);
138 struct xfrm_state *x = dst->xfrm; 142 struct xfrm_state *x = dst->xfrm;
139 int mtu = ip6_skb_dst_mtu(skb); 143 int mtu;
144
145 if (skb->protocol == htons(ETH_P_IPV6))
146 mtu = ip6_skb_dst_mtu(skb);
147 else
148 mtu = dst_mtu(skb_dst(skb));
140 149
141 if (skb->len > mtu && xfrm6_local_dontfrag(skb)) { 150 if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
142 xfrm6_local_rxpmtu(skb, mtu); 151 xfrm6_local_rxpmtu(skb, mtu);
143 return -EMSGSIZE; 152 return -EMSGSIZE;
144 } else if (!skb->local_df && skb->len > mtu && skb->sk) { 153 } else if (!skb->local_df && skb->len > mtu && skb->sk) {
145 xfrm6_local_error(skb, mtu); 154 xfrm_local_error(skb, mtu);
146 return -EMSGSIZE; 155 return -EMSGSIZE;
147 } 156 }
148 157
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index d8c70b8efc24..3fc970135fc6 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -183,6 +183,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = {
183 .extract_input = xfrm6_extract_input, 183 .extract_input = xfrm6_extract_input,
184 .extract_output = xfrm6_extract_output, 184 .extract_output = xfrm6_extract_output,
185 .transport_finish = xfrm6_transport_finish, 185 .transport_finish = xfrm6_transport_finish,
186 .local_error = xfrm6_local_error,
186}; 187};
187 188
188int __init xfrm6_state_init(void) 189int __init xfrm6_state_init(void)
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 512718adb0d5..0c741cec4d0d 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -364,7 +364,7 @@ int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops)
364EXPORT_SYMBOL(genl_unregister_ops); 364EXPORT_SYMBOL(genl_unregister_ops);
365 365
366/** 366/**
367 * genl_register_family - register a generic netlink family 367 * __genl_register_family - register a generic netlink family
368 * @family: generic netlink family 368 * @family: generic netlink family
369 * 369 *
370 * Registers the specified family after validating it first. Only one 370 * Registers the specified family after validating it first. Only one
@@ -374,7 +374,7 @@ EXPORT_SYMBOL(genl_unregister_ops);
374 * 374 *
375 * Return 0 on success or a negative error code. 375 * Return 0 on success or a negative error code.
376 */ 376 */
377int genl_register_family(struct genl_family *family) 377int __genl_register_family(struct genl_family *family)
378{ 378{
379 int err = -EINVAL; 379 int err = -EINVAL;
380 380
@@ -430,10 +430,10 @@ errout_locked:
430errout: 430errout:
431 return err; 431 return err;
432} 432}
433EXPORT_SYMBOL(genl_register_family); 433EXPORT_SYMBOL(__genl_register_family);
434 434
435/** 435/**
436 * genl_register_family_with_ops - register a generic netlink family 436 * __genl_register_family_with_ops - register a generic netlink family
437 * @family: generic netlink family 437 * @family: generic netlink family
438 * @ops: operations to be registered 438 * @ops: operations to be registered
439 * @n_ops: number of elements to register 439 * @n_ops: number of elements to register
@@ -457,12 +457,12 @@ EXPORT_SYMBOL(genl_register_family);
457 * 457 *
458 * Return 0 on success or a negative error code. 458 * Return 0 on success or a negative error code.
459 */ 459 */
460int genl_register_family_with_ops(struct genl_family *family, 460int __genl_register_family_with_ops(struct genl_family *family,
461 struct genl_ops *ops, size_t n_ops) 461 struct genl_ops *ops, size_t n_ops)
462{ 462{
463 int err, i; 463 int err, i;
464 464
465 err = genl_register_family(family); 465 err = __genl_register_family(family);
466 if (err) 466 if (err)
467 return err; 467 return err;
468 468
@@ -476,7 +476,7 @@ err_out:
476 genl_unregister_family(family); 476 genl_unregister_family(family);
477 return err; 477 return err;
478} 478}
479EXPORT_SYMBOL(genl_register_family_with_ops); 479EXPORT_SYMBOL(__genl_register_family_with_ops);
480 480
481/** 481/**
482 * genl_unregister_family - unregister generic netlink family 482 * genl_unregister_family - unregister generic netlink family
@@ -544,6 +544,30 @@ void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
544} 544}
545EXPORT_SYMBOL(genlmsg_put); 545EXPORT_SYMBOL(genlmsg_put);
546 546
547static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
548{
549 struct genl_ops *ops = cb->data;
550 int rc;
551
552 genl_lock();
553 rc = ops->dumpit(skb, cb);
554 genl_unlock();
555 return rc;
556}
557
558static int genl_lock_done(struct netlink_callback *cb)
559{
560 struct genl_ops *ops = cb->data;
561 int rc = 0;
562
563 if (ops->done) {
564 genl_lock();
565 rc = ops->done(cb);
566 genl_unlock();
567 }
568 return rc;
569}
570
547static int genl_family_rcv_msg(struct genl_family *family, 571static int genl_family_rcv_msg(struct genl_family *family,
548 struct sk_buff *skb, 572 struct sk_buff *skb,
549 struct nlmsghdr *nlh) 573 struct nlmsghdr *nlh)
@@ -572,15 +596,34 @@ static int genl_family_rcv_msg(struct genl_family *family,
572 return -EPERM; 596 return -EPERM;
573 597
574 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { 598 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
575 struct netlink_dump_control c = { 599 int rc;
576 .dump = ops->dumpit,
577 .done = ops->done,
578 };
579 600
580 if (ops->dumpit == NULL) 601 if (ops->dumpit == NULL)
581 return -EOPNOTSUPP; 602 return -EOPNOTSUPP;
582 603
583 return netlink_dump_start(net->genl_sock, skb, nlh, &c); 604 if (!family->parallel_ops) {
605 struct netlink_dump_control c = {
606 .module = family->module,
607 .data = ops,
608 .dump = genl_lock_dumpit,
609 .done = genl_lock_done,
610 };
611
612 genl_unlock();
613 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
614 genl_lock();
615
616 } else {
617 struct netlink_dump_control c = {
618 .module = family->module,
619 .dump = ops->dumpit,
620 .done = ops->done,
621 };
622
623 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
624 }
625
626 return rc;
584 } 627 }
585 628
586 if (ops->doit == NULL) 629 if (ops->doit == NULL)
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 75edcfad6e26..1504bb11e4f3 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -207,10 +207,13 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
207 pgfrom_base -= copy; 207 pgfrom_base -= copy;
208 208
209 vto = kmap_atomic(*pgto); 209 vto = kmap_atomic(*pgto);
210 vfrom = kmap_atomic(*pgfrom); 210 if (*pgto != *pgfrom) {
211 memmove(vto + pgto_base, vfrom + pgfrom_base, copy); 211 vfrom = kmap_atomic(*pgfrom);
212 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
213 kunmap_atomic(vfrom);
214 } else
215 memmove(vto + pgto_base, vto + pgfrom_base, copy);
212 flush_dcache_page(*pgto); 216 flush_dcache_page(*pgto);
213 kunmap_atomic(vfrom);
214 kunmap_atomic(vto); 217 kunmap_atomic(vto);
215 218
216 } while ((len -= copy) != 0); 219 } while ((len -= copy) != 0);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index ce8249c76827..6cc7ddd2fb7c 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1257,7 +1257,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
1257 /* Accept only ACK or NACK message */ 1257 /* Accept only ACK or NACK message */
1258 if (unlikely(msg_errcode(msg))) { 1258 if (unlikely(msg_errcode(msg))) {
1259 sock->state = SS_DISCONNECTING; 1259 sock->state = SS_DISCONNECTING;
1260 sk->sk_err = -ECONNREFUSED; 1260 sk->sk_err = ECONNREFUSED;
1261 retval = TIPC_OK; 1261 retval = TIPC_OK;
1262 break; 1262 break;
1263 } 1263 }
@@ -1268,7 +1268,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
1268 res = auto_connect(sock, msg); 1268 res = auto_connect(sock, msg);
1269 if (res) { 1269 if (res) {
1270 sock->state = SS_DISCONNECTING; 1270 sock->state = SS_DISCONNECTING;
1271 sk->sk_err = res; 1271 sk->sk_err = -res;
1272 retval = TIPC_OK; 1272 retval = TIPC_OK;
1273 break; 1273 break;
1274 } 1274 }
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index eb4a84288648..3bb2cdc13b46 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -214,5 +214,26 @@ int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
214 return inner_mode->afinfo->extract_output(x, skb); 214 return inner_mode->afinfo->extract_output(x, skb);
215} 215}
216 216
217void xfrm_local_error(struct sk_buff *skb, int mtu)
218{
219 unsigned int proto;
220 struct xfrm_state_afinfo *afinfo;
221
222 if (skb->protocol == htons(ETH_P_IP))
223 proto = AF_INET;
224 else if (skb->protocol == htons(ETH_P_IPV6))
225 proto = AF_INET6;
226 else
227 return;
228
229 afinfo = xfrm_state_get_afinfo(proto);
230 if (!afinfo)
231 return;
232
233 afinfo->local_error(skb, mtu);
234 xfrm_state_put_afinfo(afinfo);
235}
236
217EXPORT_SYMBOL_GPL(xfrm_output); 237EXPORT_SYMBOL_GPL(xfrm_output);
218EXPORT_SYMBOL_GPL(xfrm_inner_extract_output); 238EXPORT_SYMBOL_GPL(xfrm_inner_extract_output);
239EXPORT_SYMBOL_GPL(xfrm_local_error);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index ad8cc7bcf065..ed38d5d81f9e 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -320,10 +320,8 @@ static void xfrm_queue_purge(struct sk_buff_head *list)
320{ 320{
321 struct sk_buff *skb; 321 struct sk_buff *skb;
322 322
323 while ((skb = skb_dequeue(list)) != NULL) { 323 while ((skb = skb_dequeue(list)) != NULL)
324 dev_put(skb->dev);
325 kfree_skb(skb); 324 kfree_skb(skb);
326 }
327} 325}
328 326
329/* Rule must be locked. Release descentant resources, announce 327/* Rule must be locked. Release descentant resources, announce
@@ -1764,7 +1762,6 @@ static void xfrm_policy_queue_process(unsigned long arg)
1764 struct sk_buff *skb; 1762 struct sk_buff *skb;
1765 struct sock *sk; 1763 struct sock *sk;
1766 struct dst_entry *dst; 1764 struct dst_entry *dst;
1767 struct net_device *dev;
1768 struct xfrm_policy *pol = (struct xfrm_policy *)arg; 1765 struct xfrm_policy *pol = (struct xfrm_policy *)arg;
1769 struct xfrm_policy_queue *pq = &pol->polq; 1766 struct xfrm_policy_queue *pq = &pol->polq;
1770 struct flowi fl; 1767 struct flowi fl;
@@ -1811,7 +1808,6 @@ static void xfrm_policy_queue_process(unsigned long arg)
1811 dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path, 1808 dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path,
1812 &fl, skb->sk, 0); 1809 &fl, skb->sk, 0);
1813 if (IS_ERR(dst)) { 1810 if (IS_ERR(dst)) {
1814 dev_put(skb->dev);
1815 kfree_skb(skb); 1811 kfree_skb(skb);
1816 continue; 1812 continue;
1817 } 1813 }
@@ -1820,9 +1816,7 @@ static void xfrm_policy_queue_process(unsigned long arg)
1820 skb_dst_drop(skb); 1816 skb_dst_drop(skb);
1821 skb_dst_set(skb, dst); 1817 skb_dst_set(skb, dst);
1822 1818
1823 dev = skb->dev;
1824 err = dst_output(skb); 1819 err = dst_output(skb);
1825 dev_put(dev);
1826 } 1820 }
1827 1821
1828 return; 1822 return;
@@ -1845,7 +1839,6 @@ static int xdst_queue_output(struct sk_buff *skb)
1845 } 1839 }
1846 1840
1847 skb_dst_force(skb); 1841 skb_dst_force(skb);
1848 dev_hold(skb->dev);
1849 1842
1850 spin_lock_bh(&pq->hold_queue.lock); 1843 spin_lock_bh(&pq->hold_queue.lock);
1851 1844
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 4f8ace855864..b9c3f9e943a9 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -39,9 +39,6 @@ static DEFINE_SPINLOCK(xfrm_state_lock);
39 39
40static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; 40static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
41 41
42static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
43static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
44
45static inline unsigned int xfrm_dst_hash(struct net *net, 42static inline unsigned int xfrm_dst_hash(struct net *net,
46 const xfrm_address_t *daddr, 43 const xfrm_address_t *daddr,
47 const xfrm_address_t *saddr, 44 const xfrm_address_t *saddr,
@@ -1863,7 +1860,7 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1863} 1860}
1864EXPORT_SYMBOL(xfrm_state_unregister_afinfo); 1861EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1865 1862
1866static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) 1863struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1867{ 1864{
1868 struct xfrm_state_afinfo *afinfo; 1865 struct xfrm_state_afinfo *afinfo;
1869 if (unlikely(family >= NPROTO)) 1866 if (unlikely(family >= NPROTO))
@@ -1875,7 +1872,7 @@ static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1875 return afinfo; 1872 return afinfo;
1876} 1873}
1877 1874
1878static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo) 1875void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1879{ 1876{
1880 rcu_read_unlock(); 1877 rcu_read_unlock();
1881} 1878}
diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c
index 103b33373fd4..6effe99bbb9c 100644
--- a/sound/isa/opti9xx/opti92x-ad1848.c
+++ b/sound/isa/opti9xx/opti92x-ad1848.c
@@ -173,11 +173,7 @@ MODULE_DEVICE_TABLE(pnp_card, snd_opti9xx_pnpids);
173 173
174#endif /* CONFIG_PNP */ 174#endif /* CONFIG_PNP */
175 175
176#ifdef OPTi93X 176#define DEV_NAME KBUILD_MODNAME
177#define DEV_NAME "opti93x"
178#else
179#define DEV_NAME "opti92x"
180#endif
181 177
182static char * snd_opti9xx_names[] = { 178static char * snd_opti9xx_names[] = {
183 "unknown", 179 "unknown",
@@ -1167,7 +1163,7 @@ static int snd_opti9xx_pnp_resume(struct pnp_card_link *pcard)
1167 1163
1168static struct pnp_card_driver opti9xx_pnpc_driver = { 1164static struct pnp_card_driver opti9xx_pnpc_driver = {
1169 .flags = PNP_DRIVER_RES_DISABLE, 1165 .flags = PNP_DRIVER_RES_DISABLE,
1170 .name = "opti9xx", 1166 .name = DEV_NAME,
1171 .id_table = snd_opti9xx_pnpids, 1167 .id_table = snd_opti9xx_pnpids,
1172 .probe = snd_opti9xx_pnp_probe, 1168 .probe = snd_opti9xx_pnp_probe,
1173 .remove = snd_opti9xx_pnp_remove, 1169 .remove = snd_opti9xx_pnp_remove,
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 030ca8652a1c..9f3586276871 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1781,6 +1781,9 @@ static int generic_hdmi_build_controls(struct hda_codec *codec)
1781 struct snd_pcm_chmap *chmap; 1781 struct snd_pcm_chmap *chmap;
1782 struct snd_kcontrol *kctl; 1782 struct snd_kcontrol *kctl;
1783 int i; 1783 int i;
1784
1785 if (!codec->pcm_info[pin_idx].pcm)
1786 break;
1784 err = snd_pcm_add_chmap_ctls(codec->pcm_info[pin_idx].pcm, 1787 err = snd_pcm_add_chmap_ctls(codec->pcm_info[pin_idx].pcm,
1785 SNDRV_PCM_STREAM_PLAYBACK, 1788 SNDRV_PCM_STREAM_PLAYBACK,
1786 NULL, 0, pin_idx, &chmap); 1789 NULL, 0, pin_idx, &chmap);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index f303cd898515..389db4c2801b 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4336,6 +4336,7 @@ static const struct hda_fixup alc662_fixups[] = {
4336 4336
4337static const struct snd_pci_quirk alc662_fixup_tbl[] = { 4337static const struct snd_pci_quirk alc662_fixup_tbl[] = {
4338 SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2), 4338 SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2),
4339 SND_PCI_QUIRK(0x1025, 0x022f, "Acer Aspire One", ALC662_FIXUP_INV_DMIC),
4339 SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), 4340 SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
4340 SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE), 4341 SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
4341 SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC), 4342 SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),