diff options
104 files changed, 1142 insertions, 456 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 8197fbd70a3e..b140c8123098 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -6066,7 +6066,7 @@ M: Rob Herring <rob.herring@calxeda.com> | |||
6066 | M: Pawel Moll <pawel.moll@arm.com> | 6066 | M: Pawel Moll <pawel.moll@arm.com> |
6067 | M: Mark Rutland <mark.rutland@arm.com> | 6067 | M: Mark Rutland <mark.rutland@arm.com> |
6068 | M: Stephen Warren <swarren@wwwdotorg.org> | 6068 | M: Stephen Warren <swarren@wwwdotorg.org> |
6069 | M: Ian Campbell <ian.campbell@citrix.com> | 6069 | M: Ian Campbell <ijc+devicetree@hellion.org.uk> |
6070 | L: devicetree@vger.kernel.org | 6070 | L: devicetree@vger.kernel.org |
6071 | S: Maintained | 6071 | S: Maintained |
6072 | F: Documentation/devicetree/ | 6072 | F: Documentation/devicetree/ |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 11 | 2 | PATCHLEVEL = 11 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc7 | 4 | EXTRAVERSION = |
5 | NAME = Linux for Workgroups | 5 | NAME = Linux for Workgroups |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/mach-prima2/common.c b/arch/arm/mach-prima2/common.c index 2c70f74fed5d..e110b6d4ae8c 100644 --- a/arch/arm/mach-prima2/common.c +++ b/arch/arm/mach-prima2/common.c | |||
@@ -42,7 +42,6 @@ static const char *atlas6_dt_match[] __initdata = { | |||
42 | 42 | ||
43 | DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)") | 43 | DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)") |
44 | /* Maintainer: Barry Song <baohua.song@csr.com> */ | 44 | /* Maintainer: Barry Song <baohua.song@csr.com> */ |
45 | .nr_irqs = 128, | ||
46 | .map_io = sirfsoc_map_io, | 45 | .map_io = sirfsoc_map_io, |
47 | .init_time = sirfsoc_init_time, | 46 | .init_time = sirfsoc_init_time, |
48 | .init_late = sirfsoc_init_late, | 47 | .init_late = sirfsoc_init_late, |
@@ -59,7 +58,6 @@ static const char *prima2_dt_match[] __initdata = { | |||
59 | 58 | ||
60 | DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)") | 59 | DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)") |
61 | /* Maintainer: Barry Song <baohua.song@csr.com> */ | 60 | /* Maintainer: Barry Song <baohua.song@csr.com> */ |
62 | .nr_irqs = 128, | ||
63 | .map_io = sirfsoc_map_io, | 61 | .map_io = sirfsoc_map_io, |
64 | .init_time = sirfsoc_init_time, | 62 | .init_time = sirfsoc_init_time, |
65 | .dma_zone_size = SZ_256M, | 63 | .dma_zone_size = SZ_256M, |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index dbd9d3c991e8..9cf59816d3e9 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -979,6 +979,7 @@ config RELOCATABLE | |||
979 | must live at a different physical address than the primary | 979 | must live at a different physical address than the primary |
980 | kernel. | 980 | kernel. |
981 | 981 | ||
982 | # This value must have zeroes in the bottom 60 bits otherwise lots will break | ||
982 | config PAGE_OFFSET | 983 | config PAGE_OFFSET |
983 | hex | 984 | hex |
984 | default "0xc000000000000000" | 985 | default "0xc000000000000000" |
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 988c812aab5b..b9f426212d3a 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h | |||
@@ -211,9 +211,19 @@ extern long long virt_phys_offset; | |||
211 | #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET)) | 211 | #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET)) |
212 | #define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET) | 212 | #define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET) |
213 | #else | 213 | #else |
214 | #ifdef CONFIG_PPC64 | ||
215 | /* | ||
216 | * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET | ||
217 | * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit. | ||
218 | */ | ||
219 | #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET)) | ||
220 | #define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL) | ||
221 | |||
222 | #else /* 32-bit, non book E */ | ||
214 | #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START)) | 223 | #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START)) |
215 | #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START) | 224 | #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START) |
216 | #endif | 225 | #endif |
226 | #endif | ||
217 | 227 | ||
218 | /* | 228 | /* |
219 | * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, | 229 | * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, |
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index d92f3871e9cf..e2a0a162299b 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c | |||
@@ -35,7 +35,13 @@ | |||
35 | #include <asm/vdso_datapage.h> | 35 | #include <asm/vdso_datapage.h> |
36 | #include <asm/vio.h> | 36 | #include <asm/vio.h> |
37 | #include <asm/mmu.h> | 37 | #include <asm/mmu.h> |
38 | #include <asm/machdep.h> | ||
38 | 39 | ||
40 | |||
41 | /* | ||
42 | * This isn't a module but we expose that to userspace | ||
43 | * via /proc so leave the definitions here | ||
44 | */ | ||
39 | #define MODULE_VERS "1.9" | 45 | #define MODULE_VERS "1.9" |
40 | #define MODULE_NAME "lparcfg" | 46 | #define MODULE_NAME "lparcfg" |
41 | 47 | ||
@@ -418,7 +424,8 @@ static void parse_em_data(struct seq_file *m) | |||
418 | { | 424 | { |
419 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | 425 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; |
420 | 426 | ||
421 | if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS) | 427 | if (firmware_has_feature(FW_FEATURE_LPAR) && |
428 | plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS) | ||
422 | seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]); | 429 | seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]); |
423 | } | 430 | } |
424 | 431 | ||
@@ -677,7 +684,6 @@ static int lparcfg_open(struct inode *inode, struct file *file) | |||
677 | } | 684 | } |
678 | 685 | ||
679 | static const struct file_operations lparcfg_fops = { | 686 | static const struct file_operations lparcfg_fops = { |
680 | .owner = THIS_MODULE, | ||
681 | .read = seq_read, | 687 | .read = seq_read, |
682 | .write = lparcfg_write, | 688 | .write = lparcfg_write, |
683 | .open = lparcfg_open, | 689 | .open = lparcfg_open, |
@@ -699,14 +705,4 @@ static int __init lparcfg_init(void) | |||
699 | } | 705 | } |
700 | return 0; | 706 | return 0; |
701 | } | 707 | } |
702 | 708 | machine_device_initcall(pseries, lparcfg_init); | |
703 | static void __exit lparcfg_cleanup(void) | ||
704 | { | ||
705 | remove_proc_subtree("powerpc/lparcfg", NULL); | ||
706 | } | ||
707 | |||
708 | module_init(lparcfg_init); | ||
709 | module_exit(lparcfg_cleanup); | ||
710 | MODULE_DESCRIPTION("Interface for LPAR configuration data"); | ||
711 | MODULE_AUTHOR("Dave Engebretsen"); | ||
712 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index b32ebf92b0ce..67e00740531c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -16,6 +16,7 @@ config X86_64 | |||
16 | def_bool y | 16 | def_bool y |
17 | depends on 64BIT | 17 | depends on 64BIT |
18 | select X86_DEV_DMA_OPS | 18 | select X86_DEV_DMA_OPS |
19 | select ARCH_USE_CMPXCHG_LOCKREF | ||
19 | 20 | ||
20 | ### Arch settings | 21 | ### Arch settings |
21 | config X86 | 22 | config X86 |
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index e3ddd7db723f..e0e668422c75 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -34,6 +34,11 @@ | |||
34 | # define UNLOCK_LOCK_PREFIX | 34 | # define UNLOCK_LOCK_PREFIX |
35 | #endif | 35 | #endif |
36 | 36 | ||
37 | static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) | ||
38 | { | ||
39 | return lock.tickets.head == lock.tickets.tail; | ||
40 | } | ||
41 | |||
37 | /* | 42 | /* |
38 | * Ticket locks are conceptually two parts, one indicating the current head of | 43 | * Ticket locks are conceptually two parts, one indicating the current head of |
39 | * the queue, and the other indicating the current tail. The lock is acquired | 44 | * the queue, and the other indicating the current tail. The lock is acquired |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 2ec29ac78ae6..04664cdb7fda 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -78,8 +78,8 @@ __ref void *alloc_low_pages(unsigned int num) | |||
78 | return __va(pfn << PAGE_SHIFT); | 78 | return __va(pfn << PAGE_SHIFT); |
79 | } | 79 | } |
80 | 80 | ||
81 | /* need 4 4k for initial PMD_SIZE, 4k for 0-ISA_END_ADDRESS */ | 81 | /* need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS */ |
82 | #define INIT_PGT_BUF_SIZE (5 * PAGE_SIZE) | 82 | #define INIT_PGT_BUF_SIZE (6 * PAGE_SIZE) |
83 | RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE); | 83 | RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE); |
84 | void __init early_alloc_pgt_buf(void) | 84 | void __init early_alloc_pgt_buf(void) |
85 | { | 85 | { |
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 2b7813ec6d02..ec386ee9cb22 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
@@ -141,6 +141,8 @@ static ssize_t show_mem_removable(struct device *dev, | |||
141 | container_of(dev, struct memory_block, dev); | 141 | container_of(dev, struct memory_block, dev); |
142 | 142 | ||
143 | for (i = 0; i < sections_per_block; i++) { | 143 | for (i = 0; i < sections_per_block; i++) { |
144 | if (!present_section_nr(mem->start_section_nr + i)) | ||
145 | continue; | ||
144 | pfn = section_nr_to_pfn(mem->start_section_nr + i); | 146 | pfn = section_nr_to_pfn(mem->start_section_nr + i); |
145 | ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); | 147 | ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); |
146 | } | 148 | } |
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index e9a2261a383b..930cad4e5df8 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c | |||
@@ -354,7 +354,7 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg) | |||
354 | } | 354 | } |
355 | 355 | ||
356 | if (!rbnode->blklen) { | 356 | if (!rbnode->blklen) { |
357 | rbnode->blklen = sizeof(*rbnode); | 357 | rbnode->blklen = 1; |
358 | rbnode->base_reg = reg; | 358 | rbnode->base_reg = reg; |
359 | } | 359 | } |
360 | 360 | ||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 6825957c97fb..643d7c7a0d8e 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -194,7 +194,7 @@ config SIRF_DMA | |||
194 | Enable support for the CSR SiRFprimaII DMA engine. | 194 | Enable support for the CSR SiRFprimaII DMA engine. |
195 | 195 | ||
196 | config TI_EDMA | 196 | config TI_EDMA |
197 | tristate "TI EDMA support" | 197 | bool "TI EDMA support" |
198 | depends on ARCH_DAVINCI || ARCH_OMAP | 198 | depends on ARCH_DAVINCI || ARCH_OMAP |
199 | select DMA_ENGINE | 199 | select DMA_ENGINE |
200 | select DMA_VIRTUAL_CHANNELS | 200 | select DMA_VIRTUAL_CHANNELS |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 53cddd985406..342f1f336168 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -4440,7 +4440,7 @@ | |||
4440 | #define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22) | 4440 | #define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22) |
4441 | #define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22) | 4441 | #define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22) |
4442 | #define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22) | 4442 | #define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22) |
4443 | #define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22) | 4443 | #define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22) |
4444 | 4444 | ||
4445 | /* legacy values */ | 4445 | /* legacy values */ |
4446 | #define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22) | 4446 | #define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c index 3751730764a5..1a0bf07fe54b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c | |||
@@ -29,7 +29,9 @@ | |||
29 | #include <drm/drmP.h> | 29 | #include <drm/drmP.h> |
30 | #include <drm/ttm/ttm_bo_driver.h> | 30 | #include <drm/ttm/ttm_bo_driver.h> |
31 | 31 | ||
32 | #define VMW_PPN_SIZE sizeof(unsigned long) | 32 | #define VMW_PPN_SIZE (sizeof(unsigned long)) |
33 | /* A future safe maximum remap size. */ | ||
34 | #define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE) | ||
33 | 35 | ||
34 | static int vmw_gmr2_bind(struct vmw_private *dev_priv, | 36 | static int vmw_gmr2_bind(struct vmw_private *dev_priv, |
35 | struct page *pages[], | 37 | struct page *pages[], |
@@ -38,43 +40,61 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv, | |||
38 | { | 40 | { |
39 | SVGAFifoCmdDefineGMR2 define_cmd; | 41 | SVGAFifoCmdDefineGMR2 define_cmd; |
40 | SVGAFifoCmdRemapGMR2 remap_cmd; | 42 | SVGAFifoCmdRemapGMR2 remap_cmd; |
41 | uint32_t define_size = sizeof(define_cmd) + 4; | ||
42 | uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4; | ||
43 | uint32_t *cmd; | 43 | uint32_t *cmd; |
44 | uint32_t *cmd_orig; | 44 | uint32_t *cmd_orig; |
45 | uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd); | ||
46 | uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0); | ||
47 | uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num; | ||
48 | uint32_t remap_pos = 0; | ||
49 | uint32_t cmd_size = define_size + remap_size; | ||
45 | uint32_t i; | 50 | uint32_t i; |
46 | 51 | ||
47 | cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size); | 52 | cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size); |
48 | if (unlikely(cmd == NULL)) | 53 | if (unlikely(cmd == NULL)) |
49 | return -ENOMEM; | 54 | return -ENOMEM; |
50 | 55 | ||
51 | define_cmd.gmrId = gmr_id; | 56 | define_cmd.gmrId = gmr_id; |
52 | define_cmd.numPages = num_pages; | 57 | define_cmd.numPages = num_pages; |
53 | 58 | ||
59 | *cmd++ = SVGA_CMD_DEFINE_GMR2; | ||
60 | memcpy(cmd, &define_cmd, sizeof(define_cmd)); | ||
61 | cmd += sizeof(define_cmd) / sizeof(*cmd); | ||
62 | |||
63 | /* | ||
64 | * Need to split the command if there are too many | ||
65 | * pages that goes into the gmr. | ||
66 | */ | ||
67 | |||
54 | remap_cmd.gmrId = gmr_id; | 68 | remap_cmd.gmrId = gmr_id; |
55 | remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ? | 69 | remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ? |
56 | SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32; | 70 | SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32; |
57 | remap_cmd.offsetPages = 0; | ||
58 | remap_cmd.numPages = num_pages; | ||
59 | 71 | ||
60 | *cmd++ = SVGA_CMD_DEFINE_GMR2; | 72 | while (num_pages > 0) { |
61 | memcpy(cmd, &define_cmd, sizeof(define_cmd)); | 73 | unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP); |
62 | cmd += sizeof(define_cmd) / sizeof(uint32); | 74 | |
75 | remap_cmd.offsetPages = remap_pos; | ||
76 | remap_cmd.numPages = nr; | ||
63 | 77 | ||
64 | *cmd++ = SVGA_CMD_REMAP_GMR2; | 78 | *cmd++ = SVGA_CMD_REMAP_GMR2; |
65 | memcpy(cmd, &remap_cmd, sizeof(remap_cmd)); | 79 | memcpy(cmd, &remap_cmd, sizeof(remap_cmd)); |
66 | cmd += sizeof(remap_cmd) / sizeof(uint32); | 80 | cmd += sizeof(remap_cmd) / sizeof(*cmd); |
67 | 81 | ||
68 | for (i = 0; i < num_pages; ++i) { | 82 | for (i = 0; i < nr; ++i) { |
69 | if (VMW_PPN_SIZE <= 4) | 83 | if (VMW_PPN_SIZE <= 4) |
70 | *cmd = page_to_pfn(*pages++); | 84 | *cmd = page_to_pfn(*pages++); |
71 | else | 85 | else |
72 | *((uint64_t *)cmd) = page_to_pfn(*pages++); | 86 | *((uint64_t *)cmd) = page_to_pfn(*pages++); |
73 | 87 | ||
74 | cmd += VMW_PPN_SIZE / sizeof(*cmd); | 88 | cmd += VMW_PPN_SIZE / sizeof(*cmd); |
89 | } | ||
90 | |||
91 | num_pages -= nr; | ||
92 | remap_pos += nr; | ||
75 | } | 93 | } |
76 | 94 | ||
77 | vmw_fifo_commit(dev_priv, define_size + remap_size); | 95 | BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd)); |
96 | |||
97 | vmw_fifo_commit(dev_priv, cmd_size); | ||
78 | 98 | ||
79 | return 0; | 99 | return 0; |
80 | } | 100 | } |
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index fa061d46527f..75e3b102ce45 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c | |||
@@ -167,6 +167,7 @@ static const struct xpad_device { | |||
167 | { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, | 167 | { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, |
168 | { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, | 168 | { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, |
169 | { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, | 169 | { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, |
170 | { 0x1689, 0xfd01, "Razer Onza Classic Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, | ||
170 | { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, | 171 | { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, |
171 | { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, | 172 | { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, |
172 | { 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 }, | 173 | { 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 }, |
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 57b2637e153a..8551dcaf24db 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c | |||
@@ -672,6 +672,7 @@ static int elantech_packet_check_v2(struct psmouse *psmouse) | |||
672 | */ | 672 | */ |
673 | static int elantech_packet_check_v3(struct psmouse *psmouse) | 673 | static int elantech_packet_check_v3(struct psmouse *psmouse) |
674 | { | 674 | { |
675 | struct elantech_data *etd = psmouse->private; | ||
675 | const u8 debounce_packet[] = { 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff }; | 676 | const u8 debounce_packet[] = { 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff }; |
676 | unsigned char *packet = psmouse->packet; | 677 | unsigned char *packet = psmouse->packet; |
677 | 678 | ||
@@ -682,19 +683,48 @@ static int elantech_packet_check_v3(struct psmouse *psmouse) | |||
682 | if (!memcmp(packet, debounce_packet, sizeof(debounce_packet))) | 683 | if (!memcmp(packet, debounce_packet, sizeof(debounce_packet))) |
683 | return PACKET_DEBOUNCE; | 684 | return PACKET_DEBOUNCE; |
684 | 685 | ||
685 | if ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0xcf) == 0x02) | 686 | /* |
686 | return PACKET_V3_HEAD; | 687 | * If the hardware flag 'crc_enabled' is set the packets have |
688 | * different signatures. | ||
689 | */ | ||
690 | if (etd->crc_enabled) { | ||
691 | if ((packet[3] & 0x09) == 0x08) | ||
692 | return PACKET_V3_HEAD; | ||
693 | |||
694 | if ((packet[3] & 0x09) == 0x09) | ||
695 | return PACKET_V3_TAIL; | ||
696 | } else { | ||
697 | if ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0xcf) == 0x02) | ||
698 | return PACKET_V3_HEAD; | ||
687 | 699 | ||
688 | if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c) | 700 | if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c) |
689 | return PACKET_V3_TAIL; | 701 | return PACKET_V3_TAIL; |
702 | } | ||
690 | 703 | ||
691 | return PACKET_UNKNOWN; | 704 | return PACKET_UNKNOWN; |
692 | } | 705 | } |
693 | 706 | ||
694 | static int elantech_packet_check_v4(struct psmouse *psmouse) | 707 | static int elantech_packet_check_v4(struct psmouse *psmouse) |
695 | { | 708 | { |
709 | struct elantech_data *etd = psmouse->private; | ||
696 | unsigned char *packet = psmouse->packet; | 710 | unsigned char *packet = psmouse->packet; |
697 | unsigned char packet_type = packet[3] & 0x03; | 711 | unsigned char packet_type = packet[3] & 0x03; |
712 | bool sanity_check; | ||
713 | |||
714 | /* | ||
715 | * Sanity check based on the constant bits of a packet. | ||
716 | * The constant bits change depending on the value of | ||
717 | * the hardware flag 'crc_enabled' but are the same for | ||
718 | * every packet, regardless of the type. | ||
719 | */ | ||
720 | if (etd->crc_enabled) | ||
721 | sanity_check = ((packet[3] & 0x08) == 0x00); | ||
722 | else | ||
723 | sanity_check = ((packet[0] & 0x0c) == 0x04 && | ||
724 | (packet[3] & 0x1c) == 0x10); | ||
725 | |||
726 | if (!sanity_check) | ||
727 | return PACKET_UNKNOWN; | ||
698 | 728 | ||
699 | switch (packet_type) { | 729 | switch (packet_type) { |
700 | case 0: | 730 | case 0: |
@@ -1313,6 +1343,12 @@ static int elantech_set_properties(struct elantech_data *etd) | |||
1313 | etd->reports_pressure = true; | 1343 | etd->reports_pressure = true; |
1314 | } | 1344 | } |
1315 | 1345 | ||
1346 | /* | ||
1347 | * The signatures of v3 and v4 packets change depending on the | ||
1348 | * value of this hardware flag. | ||
1349 | */ | ||
1350 | etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000); | ||
1351 | |||
1316 | return 0; | 1352 | return 0; |
1317 | } | 1353 | } |
1318 | 1354 | ||
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h index 46db3be45ac9..036a04abaef7 100644 --- a/drivers/input/mouse/elantech.h +++ b/drivers/input/mouse/elantech.h | |||
@@ -129,6 +129,7 @@ struct elantech_data { | |||
129 | bool paritycheck; | 129 | bool paritycheck; |
130 | bool jumpy_cursor; | 130 | bool jumpy_cursor; |
131 | bool reports_pressure; | 131 | bool reports_pressure; |
132 | bool crc_enabled; | ||
132 | unsigned char hw_version; | 133 | unsigned char hw_version; |
133 | unsigned int fw_version; | 134 | unsigned int fw_version; |
134 | unsigned int single_finger_reports; | 135 | unsigned int single_finger_reports; |
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig index 94c17c28d268..1e691a3a79cb 100644 --- a/drivers/input/serio/Kconfig +++ b/drivers/input/serio/Kconfig | |||
@@ -22,7 +22,8 @@ config SERIO_I8042 | |||
22 | tristate "i8042 PC Keyboard controller" if EXPERT || !X86 | 22 | tristate "i8042 PC Keyboard controller" if EXPERT || !X86 |
23 | default y | 23 | default y |
24 | depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \ | 24 | depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \ |
25 | (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 | 25 | (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 && \ |
26 | !ARC | ||
26 | help | 27 | help |
27 | i8042 is the chip over which the standard AT keyboard and PS/2 | 28 | i8042 is the chip over which the standard AT keyboard and PS/2 |
28 | mouse are connected to the computer. If you use these devices, | 29 | mouse are connected to the computer. If you use these devices, |
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 384fbcd0cee0..f3e91f0b57ae 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c | |||
@@ -2112,7 +2112,7 @@ static const struct wacom_features wacom_features_0xDA = | |||
2112 | { "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, | 2112 | { "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, |
2113 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, | 2113 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, |
2114 | .touch_max = 2 }; | 2114 | .touch_max = 2 }; |
2115 | static struct wacom_features wacom_features_0xDB = | 2115 | static const struct wacom_features wacom_features_0xDB = |
2116 | { "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023, | 2116 | { "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023, |
2117 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, | 2117 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, |
2118 | .touch_max = 2 }; | 2118 | .touch_max = 2 }; |
@@ -2127,6 +2127,12 @@ static const struct wacom_features wacom_features_0xDF = | |||
2127 | { "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023, | 2127 | { "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023, |
2128 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, | 2128 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, |
2129 | .touch_max = 16 }; | 2129 | .touch_max = 16 }; |
2130 | static const struct wacom_features wacom_features_0x300 = | ||
2131 | { "Wacom Bamboo One S", WACOM_PKGLEN_BBPEN, 14720, 9225, 1023, | ||
2132 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; | ||
2133 | static const struct wacom_features wacom_features_0x301 = | ||
2134 | { "Wacom Bamboo One M", WACOM_PKGLEN_BBPEN, 21648, 13530, 1023, | ||
2135 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; | ||
2130 | static const struct wacom_features wacom_features_0x6004 = | 2136 | static const struct wacom_features wacom_features_0x6004 = |
2131 | { "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255, | 2137 | { "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255, |
2132 | 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; | 2138 | 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; |
@@ -2253,6 +2259,8 @@ const struct usb_device_id wacom_ids[] = { | |||
2253 | { USB_DEVICE_WACOM(0x100) }, | 2259 | { USB_DEVICE_WACOM(0x100) }, |
2254 | { USB_DEVICE_WACOM(0x101) }, | 2260 | { USB_DEVICE_WACOM(0x101) }, |
2255 | { USB_DEVICE_WACOM(0x10D) }, | 2261 | { USB_DEVICE_WACOM(0x10D) }, |
2262 | { USB_DEVICE_WACOM(0x300) }, | ||
2263 | { USB_DEVICE_WACOM(0x301) }, | ||
2256 | { USB_DEVICE_WACOM(0x304) }, | 2264 | { USB_DEVICE_WACOM(0x304) }, |
2257 | { USB_DEVICE_WACOM(0x4001) }, | 2265 | { USB_DEVICE_WACOM(0x4001) }, |
2258 | { USB_DEVICE_WACOM(0x47) }, | 2266 | { USB_DEVICE_WACOM(0x47) }, |
diff --git a/drivers/irqchip/irq-sirfsoc.c b/drivers/irqchip/irq-sirfsoc.c index 69ea44ebcf61..4851afae38dc 100644 --- a/drivers/irqchip/irq-sirfsoc.c +++ b/drivers/irqchip/irq-sirfsoc.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #define SIRFSOC_INT_RISC_LEVEL1 0x0024 | 23 | #define SIRFSOC_INT_RISC_LEVEL1 0x0024 |
24 | #define SIRFSOC_INIT_IRQ_ID 0x0038 | 24 | #define SIRFSOC_INIT_IRQ_ID 0x0038 |
25 | 25 | ||
26 | #define SIRFSOC_NUM_IRQS 128 | 26 | #define SIRFSOC_NUM_IRQS 64 |
27 | 27 | ||
28 | static struct irq_domain *sirfsoc_irqdomain; | 28 | static struct irq_domain *sirfsoc_irqdomain; |
29 | 29 | ||
@@ -32,15 +32,18 @@ sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num) | |||
32 | { | 32 | { |
33 | struct irq_chip_generic *gc; | 33 | struct irq_chip_generic *gc; |
34 | struct irq_chip_type *ct; | 34 | struct irq_chip_type *ct; |
35 | int ret; | ||
36 | unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; | ||
35 | 37 | ||
36 | gc = irq_alloc_generic_chip("SIRFINTC", 1, irq_start, base, handle_level_irq); | 38 | ret = irq_alloc_domain_generic_chips(sirfsoc_irqdomain, num, 1, "irq_sirfsoc", |
37 | ct = gc->chip_types; | 39 | handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE); |
38 | 40 | ||
41 | gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, irq_start); | ||
42 | gc->reg_base = base; | ||
43 | ct = gc->chip_types; | ||
39 | ct->chip.irq_mask = irq_gc_mask_clr_bit; | 44 | ct->chip.irq_mask = irq_gc_mask_clr_bit; |
40 | ct->chip.irq_unmask = irq_gc_mask_set_bit; | 45 | ct->chip.irq_unmask = irq_gc_mask_set_bit; |
41 | ct->regs.mask = SIRFSOC_INT_RISC_MASK0; | 46 | ct->regs.mask = SIRFSOC_INT_RISC_MASK0; |
42 | |||
43 | irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST, 0); | ||
44 | } | 47 | } |
45 | 48 | ||
46 | static asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs) | 49 | static asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs) |
@@ -60,9 +63,8 @@ static int __init sirfsoc_irq_init(struct device_node *np, struct device_node *p | |||
60 | if (!base) | 63 | if (!base) |
61 | panic("unable to map intc cpu registers\n"); | 64 | panic("unable to map intc cpu registers\n"); |
62 | 65 | ||
63 | /* using legacy because irqchip_generic does not work with linear */ | 66 | sirfsoc_irqdomain = irq_domain_add_linear(np, SIRFSOC_NUM_IRQS, |
64 | sirfsoc_irqdomain = irq_domain_add_legacy(np, SIRFSOC_NUM_IRQS, 0, 0, | 67 | &irq_generic_chip_ops, base); |
65 | &irq_domain_simple_ops, base); | ||
66 | 68 | ||
67 | sirfsoc_alloc_gc(base, 0, 32); | 69 | sirfsoc_alloc_gc(base, 0, 32); |
68 | sirfsoc_alloc_gc(base + 4, 32, SIRFSOC_NUM_IRQS - 32); | 70 | sirfsoc_alloc_gc(base + 4, 32, SIRFSOC_NUM_IRQS - 32); |
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c index 22b720ec80cb..77025f5cb57d 100644 --- a/drivers/isdn/mISDN/dsp_core.c +++ b/drivers/isdn/mISDN/dsp_core.c | |||
@@ -288,8 +288,10 @@ dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb) | |||
288 | u8 *data; | 288 | u8 *data; |
289 | int len; | 289 | int len; |
290 | 290 | ||
291 | if (skb->len < sizeof(int)) | 291 | if (skb->len < sizeof(int)) { |
292 | printk(KERN_ERR "%s: PH_CONTROL message too short\n", __func__); | 292 | printk(KERN_ERR "%s: PH_CONTROL message too short\n", __func__); |
293 | return -EINVAL; | ||
294 | } | ||
293 | cont = *((int *)skb->data); | 295 | cont = *((int *)skb->data); |
294 | len = skb->len - sizeof(int); | 296 | len = skb->len - sizeof(int); |
295 | data = skb->data + sizeof(int); | 297 | data = skb->data + sizeof(int); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index f2d1ff10054b..0cc26110868d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -53,6 +53,7 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) | |||
53 | struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; | 53 | struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; |
54 | int old_max_eth_txqs, new_max_eth_txqs; | 54 | int old_max_eth_txqs, new_max_eth_txqs; |
55 | int old_txdata_index = 0, new_txdata_index = 0; | 55 | int old_txdata_index = 0, new_txdata_index = 0; |
56 | struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info; | ||
56 | 57 | ||
57 | /* Copy the NAPI object as it has been already initialized */ | 58 | /* Copy the NAPI object as it has been already initialized */ |
58 | from_fp->napi = to_fp->napi; | 59 | from_fp->napi = to_fp->napi; |
@@ -61,6 +62,11 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) | |||
61 | memcpy(to_fp, from_fp, sizeof(*to_fp)); | 62 | memcpy(to_fp, from_fp, sizeof(*to_fp)); |
62 | to_fp->index = to; | 63 | to_fp->index = to; |
63 | 64 | ||
65 | /* Retain the tpa_info of the original `to' version as we don't want | ||
66 | * 2 FPs to contain the same tpa_info pointer. | ||
67 | */ | ||
68 | to_fp->tpa_info = old_tpa_info; | ||
69 | |||
64 | /* move sp_objs contents as well, as their indices match fp ones */ | 70 | /* move sp_objs contents as well, as their indices match fp ones */ |
65 | memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); | 71 | memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); |
66 | 72 | ||
@@ -2956,8 +2962,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) | |||
2956 | if (IS_PF(bp)) { | 2962 | if (IS_PF(bp)) { |
2957 | if (CNIC_LOADED(bp)) | 2963 | if (CNIC_LOADED(bp)) |
2958 | bnx2x_free_mem_cnic(bp); | 2964 | bnx2x_free_mem_cnic(bp); |
2959 | bnx2x_free_mem(bp); | ||
2960 | } | 2965 | } |
2966 | bnx2x_free_mem(bp); | ||
2967 | |||
2961 | bp->state = BNX2X_STATE_CLOSED; | 2968 | bp->state = BNX2X_STATE_CLOSED; |
2962 | bp->cnic_loaded = false; | 2969 | bp->cnic_loaded = false; |
2963 | 2970 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 8bdc8b973007..1627a4e09c32 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -7855,12 +7855,15 @@ void bnx2x_free_mem(struct bnx2x *bp) | |||
7855 | { | 7855 | { |
7856 | int i; | 7856 | int i; |
7857 | 7857 | ||
7858 | BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, | ||
7859 | sizeof(struct host_sp_status_block)); | ||
7860 | |||
7861 | BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, | 7858 | BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, |
7862 | bp->fw_stats_data_sz + bp->fw_stats_req_sz); | 7859 | bp->fw_stats_data_sz + bp->fw_stats_req_sz); |
7863 | 7860 | ||
7861 | if (IS_VF(bp)) | ||
7862 | return; | ||
7863 | |||
7864 | BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, | ||
7865 | sizeof(struct host_sp_status_block)); | ||
7866 | |||
7864 | BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, | 7867 | BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, |
7865 | sizeof(struct bnx2x_slowpath)); | 7868 | sizeof(struct bnx2x_slowpath)); |
7866 | 7869 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index ad83f4b48777..e8706e19f96f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
@@ -522,23 +522,6 @@ static int bnx2x_vfop_set_user_req(struct bnx2x *bp, | |||
522 | return 0; | 522 | return 0; |
523 | } | 523 | } |
524 | 524 | ||
525 | static int | ||
526 | bnx2x_vfop_config_vlan0(struct bnx2x *bp, | ||
527 | struct bnx2x_vlan_mac_ramrod_params *vlan_mac, | ||
528 | bool add) | ||
529 | { | ||
530 | int rc; | ||
531 | |||
532 | vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD : | ||
533 | BNX2X_VLAN_MAC_DEL; | ||
534 | vlan_mac->user_req.u.vlan.vlan = 0; | ||
535 | |||
536 | rc = bnx2x_config_vlan_mac(bp, vlan_mac); | ||
537 | if (rc == -EEXIST) | ||
538 | rc = 0; | ||
539 | return rc; | ||
540 | } | ||
541 | |||
542 | static int bnx2x_vfop_config_list(struct bnx2x *bp, | 525 | static int bnx2x_vfop_config_list(struct bnx2x *bp, |
543 | struct bnx2x_vfop_filters *filters, | 526 | struct bnx2x_vfop_filters *filters, |
544 | struct bnx2x_vlan_mac_ramrod_params *vlan_mac) | 527 | struct bnx2x_vlan_mac_ramrod_params *vlan_mac) |
@@ -643,30 +626,14 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
643 | 626 | ||
644 | case BNX2X_VFOP_VLAN_CONFIG_LIST: | 627 | case BNX2X_VFOP_VLAN_CONFIG_LIST: |
645 | /* next state */ | 628 | /* next state */ |
646 | vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0; | 629 | vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; |
647 | |||
648 | /* remove vlan0 - could be no-op */ | ||
649 | vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false); | ||
650 | if (vfop->rc) | ||
651 | goto op_err; | ||
652 | 630 | ||
653 | /* Do vlan list config. if this operation fails we try to | 631 | /* do list config */ |
654 | * restore vlan0 to keep the queue is working order | ||
655 | */ | ||
656 | vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); | 632 | vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); |
657 | if (!vfop->rc) { | 633 | if (!vfop->rc) { |
658 | set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); | 634 | set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); |
659 | vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); | 635 | vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); |
660 | } | 636 | } |
661 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */ | ||
662 | |||
663 | case BNX2X_VFOP_VLAN_CONFIG_LIST_0: | ||
664 | /* next state */ | ||
665 | vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; | ||
666 | |||
667 | if (list_empty(&obj->head)) | ||
668 | /* add vlan0 */ | ||
669 | vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true); | ||
670 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); | 637 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); |
671 | 638 | ||
672 | default: | 639 | default: |
@@ -2819,6 +2786,18 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) | |||
2819 | return 0; | 2786 | return 0; |
2820 | } | 2787 | } |
2821 | 2788 | ||
2789 | struct set_vf_state_cookie { | ||
2790 | struct bnx2x_virtf *vf; | ||
2791 | u8 state; | ||
2792 | }; | ||
2793 | |||
2794 | void bnx2x_set_vf_state(void *cookie) | ||
2795 | { | ||
2796 | struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; | ||
2797 | |||
2798 | p->vf->state = p->state; | ||
2799 | } | ||
2800 | |||
2822 | /* VFOP close (teardown the queues, delete mcasts and close HW) */ | 2801 | /* VFOP close (teardown the queues, delete mcasts and close HW) */ |
2823 | static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) | 2802 | static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) |
2824 | { | 2803 | { |
@@ -2869,7 +2848,19 @@ static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
2869 | op_err: | 2848 | op_err: |
2870 | BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); | 2849 | BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); |
2871 | op_done: | 2850 | op_done: |
2872 | vf->state = VF_ACQUIRED; | 2851 | |
2852 | /* need to make sure there are no outstanding stats ramrods which may | ||
2853 | * cause the device to access the VF's stats buffer which it will free | ||
2854 | * as soon as we return from the close flow. | ||
2855 | */ | ||
2856 | { | ||
2857 | struct set_vf_state_cookie cookie; | ||
2858 | |||
2859 | cookie.vf = vf; | ||
2860 | cookie.state = VF_ACQUIRED; | ||
2861 | bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); | ||
2862 | } | ||
2863 | |||
2873 | DP(BNX2X_MSG_IOV, "set state to acquired\n"); | 2864 | DP(BNX2X_MSG_IOV, "set state to acquired\n"); |
2874 | bnx2x_vfop_end(bp, vf, vfop); | 2865 | bnx2x_vfop_end(bp, vf, vfop); |
2875 | } | 2866 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index d63d1327b051..86436c77af03 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c | |||
@@ -522,20 +522,16 @@ static void bnx2x_func_stats_init(struct bnx2x *bp) | |||
522 | /* should be called under stats_sema */ | 522 | /* should be called under stats_sema */ |
523 | static void __bnx2x_stats_start(struct bnx2x *bp) | 523 | static void __bnx2x_stats_start(struct bnx2x *bp) |
524 | { | 524 | { |
525 | /* vfs travel through here as part of the statistics FSM, but no action | 525 | if (IS_PF(bp)) { |
526 | * is required | 526 | if (bp->port.pmf) |
527 | */ | 527 | bnx2x_port_stats_init(bp); |
528 | if (IS_VF(bp)) | ||
529 | return; | ||
530 | |||
531 | if (bp->port.pmf) | ||
532 | bnx2x_port_stats_init(bp); | ||
533 | 528 | ||
534 | else if (bp->func_stx) | 529 | else if (bp->func_stx) |
535 | bnx2x_func_stats_init(bp); | 530 | bnx2x_func_stats_init(bp); |
536 | 531 | ||
537 | bnx2x_hw_stats_post(bp); | 532 | bnx2x_hw_stats_post(bp); |
538 | bnx2x_storm_stats_post(bp); | 533 | bnx2x_storm_stats_post(bp); |
534 | } | ||
539 | 535 | ||
540 | bp->stats_started = true; | 536 | bp->stats_started = true; |
541 | } | 537 | } |
@@ -1997,3 +1993,14 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, | |||
1997 | estats->mac_discard); | 1993 | estats->mac_discard); |
1998 | } | 1994 | } |
1999 | } | 1995 | } |
1996 | |||
1997 | void bnx2x_stats_safe_exec(struct bnx2x *bp, | ||
1998 | void (func_to_exec)(void *cookie), | ||
1999 | void *cookie){ | ||
2000 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
2001 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
2002 | bnx2x_stats_comp(bp); | ||
2003 | func_to_exec(cookie); | ||
2004 | __bnx2x_stats_start(bp); | ||
2005 | up(&bp->stats_sema); | ||
2006 | } | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 853824d258e8..f35845006cdd 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h | |||
@@ -539,6 +539,9 @@ struct bnx2x; | |||
539 | void bnx2x_memset_stats(struct bnx2x *bp); | 539 | void bnx2x_memset_stats(struct bnx2x *bp); |
540 | void bnx2x_stats_init(struct bnx2x *bp); | 540 | void bnx2x_stats_init(struct bnx2x *bp); |
541 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); | 541 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); |
542 | void bnx2x_stats_safe_exec(struct bnx2x *bp, | ||
543 | void (func_to_exec)(void *cookie), | ||
544 | void *cookie); | ||
542 | 545 | ||
543 | /** | 546 | /** |
544 | * bnx2x_save_statistics - save statistics when unloading. | 547 | * bnx2x_save_statistics - save statistics when unloading. |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 4559c35eea13..3d91a5ec61a4 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -4373,6 +4373,10 @@ static int be_resume(struct pci_dev *pdev) | |||
4373 | pci_set_power_state(pdev, PCI_D0); | 4373 | pci_set_power_state(pdev, PCI_D0); |
4374 | pci_restore_state(pdev); | 4374 | pci_restore_state(pdev); |
4375 | 4375 | ||
4376 | status = be_fw_wait_ready(adapter); | ||
4377 | if (status) | ||
4378 | return status; | ||
4379 | |||
4376 | /* tell fw we're ready to fire cmds */ | 4380 | /* tell fw we're ready to fire cmds */ |
4377 | status = be_cmd_fw_init(adapter); | 4381 | status = be_cmd_fw_init(adapter); |
4378 | if (status) | 4382 | if (status) |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 77ea0db0bbfc..c610a2716be4 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -971,8 +971,7 @@ fec_enet_rx(struct net_device *ndev, int budget) | |||
971 | htons(ETH_P_8021Q), | 971 | htons(ETH_P_8021Q), |
972 | vlan_tag); | 972 | vlan_tag); |
973 | 973 | ||
974 | if (!skb_defer_rx_timestamp(skb)) | 974 | napi_gro_receive(&fep->napi, skb); |
975 | napi_gro_receive(&fep->napi, skb); | ||
976 | } | 975 | } |
977 | 976 | ||
978 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, | 977 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, |
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 7fbe6abf6054..23de82a9da82 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c | |||
@@ -3069,7 +3069,7 @@ jme_init_one(struct pci_dev *pdev, | |||
3069 | jwrite32(jme, JME_APMC, apmc); | 3069 | jwrite32(jme, JME_APMC, apmc); |
3070 | } | 3070 | } |
3071 | 3071 | ||
3072 | NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2) | 3072 | NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT) |
3073 | 3073 | ||
3074 | spin_lock_init(&jme->phy_lock); | 3074 | spin_lock_init(&jme->phy_lock); |
3075 | spin_lock_init(&jme->macaddr_lock); | 3075 | spin_lock_init(&jme->macaddr_lock); |
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h index 3fe09ab2d7c9..32675e16021e 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h | |||
@@ -1171,7 +1171,6 @@ typedef struct { | |||
1171 | 1171 | ||
1172 | #define NETXEN_DB_MAPSIZE_BYTES 0x1000 | 1172 | #define NETXEN_DB_MAPSIZE_BYTES 0x1000 |
1173 | 1173 | ||
1174 | #define NETXEN_NETDEV_WEIGHT 128 | ||
1175 | #define NETXEN_ADAPTER_UP_MAGIC 777 | 1174 | #define NETXEN_ADAPTER_UP_MAGIC 777 |
1176 | #define NETXEN_NIC_PEG_TUNE 0 | 1175 | #define NETXEN_NIC_PEG_TUNE 0 |
1177 | 1176 | ||
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index c401b0b4353d..ec4cf7fd4123 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | |||
@@ -197,7 +197,7 @@ netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev) | |||
197 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { | 197 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { |
198 | sds_ring = &recv_ctx->sds_rings[ring]; | 198 | sds_ring = &recv_ctx->sds_rings[ring]; |
199 | netif_napi_add(netdev, &sds_ring->napi, | 199 | netif_napi_add(netdev, &sds_ring->napi, |
200 | netxen_nic_poll, NETXEN_NETDEV_WEIGHT); | 200 | netxen_nic_poll, NAPI_POLL_WEIGHT); |
201 | } | 201 | } |
202 | 202 | ||
203 | return 0; | 203 | return 0; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 03de76c7a177..1c83a44c547b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -71,14 +71,18 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, | |||
71 | plat->force_sf_dma_mode = 1; | 71 | plat->force_sf_dma_mode = 1; |
72 | } | 72 | } |
73 | 73 | ||
74 | dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); | 74 | if (of_find_property(np, "snps,pbl", NULL)) { |
75 | if (!dma_cfg) | 75 | dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), |
76 | return -ENOMEM; | 76 | GFP_KERNEL); |
77 | 77 | if (!dma_cfg) | |
78 | plat->dma_cfg = dma_cfg; | 78 | return -ENOMEM; |
79 | of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); | 79 | plat->dma_cfg = dma_cfg; |
80 | dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); | 80 | of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); |
81 | dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); | 81 | dma_cfg->fixed_burst = |
82 | of_property_read_bool(np, "snps,fixed-burst"); | ||
83 | dma_cfg->mixed_burst = | ||
84 | of_property_read_bool(np, "snps,mixed-burst"); | ||
85 | } | ||
82 | 86 | ||
83 | return 0; | 87 | return 0; |
84 | } | 88 | } |
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index ad32af67e618..9c805e0c0cae 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c | |||
@@ -1466,8 +1466,7 @@ static void gelic_ether_setup_netdev_ops(struct net_device *netdev, | |||
1466 | { | 1466 | { |
1467 | netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; | 1467 | netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; |
1468 | /* NAPI */ | 1468 | /* NAPI */ |
1469 | netif_napi_add(netdev, napi, | 1469 | netif_napi_add(netdev, napi, gelic_net_poll, NAPI_POLL_WEIGHT); |
1470 | gelic_net_poll, GELIC_NET_NAPI_WEIGHT); | ||
1471 | netdev->ethtool_ops = &gelic_ether_ethtool_ops; | 1470 | netdev->ethtool_ops = &gelic_ether_ethtool_ops; |
1472 | netdev->netdev_ops = &gelic_netdevice_ops; | 1471 | netdev->netdev_ops = &gelic_netdevice_ops; |
1473 | } | 1472 | } |
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h index a93df6ac1909..309abb472aa2 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h | |||
@@ -37,7 +37,6 @@ | |||
37 | #define GELIC_NET_RXBUF_ALIGN 128 | 37 | #define GELIC_NET_RXBUF_ALIGN 128 |
38 | #define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */ | 38 | #define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */ |
39 | #define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ | 39 | #define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ |
40 | #define GELIC_NET_NAPI_WEIGHT (GELIC_NET_RX_DESCRIPTORS) | ||
41 | #define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL | 40 | #define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL |
42 | 41 | ||
43 | #define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */ | 42 | #define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */ |
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c index e90e1f46121e..64b4639f43b6 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c | |||
@@ -175,6 +175,7 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np) | |||
175 | printk(KERN_WARNING "Setting MDIO clock divisor to " | 175 | printk(KERN_WARNING "Setting MDIO clock divisor to " |
176 | "default %d\n", DEFAULT_CLOCK_DIVISOR); | 176 | "default %d\n", DEFAULT_CLOCK_DIVISOR); |
177 | clk_div = DEFAULT_CLOCK_DIVISOR; | 177 | clk_div = DEFAULT_CLOCK_DIVISOR; |
178 | of_node_put(np1); | ||
178 | goto issue; | 179 | goto issue; |
179 | } | 180 | } |
180 | 181 | ||
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 872819851aef..25ba7eca9a13 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c | |||
@@ -400,6 +400,10 @@ static const struct usb_device_id mbim_devs[] = { | |||
400 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), | 400 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), |
401 | .driver_info = (unsigned long)&cdc_mbim_info_zlp, | 401 | .driver_info = (unsigned long)&cdc_mbim_info_zlp, |
402 | }, | 402 | }, |
403 | /* HP hs2434 Mobile Broadband Module needs ZLPs */ | ||
404 | { USB_DEVICE_AND_INTERFACE_INFO(0x3f0, 0x4b1d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), | ||
405 | .driver_info = (unsigned long)&cdc_mbim_info_zlp, | ||
406 | }, | ||
403 | { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), | 407 | { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), |
404 | .driver_info = (unsigned long)&cdc_mbim_info, | 408 | .driver_info = (unsigned long)&cdc_mbim_info, |
405 | }, | 409 | }, |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index e602c9519709..c028df76b564 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c | |||
@@ -448,6 +448,7 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv, | |||
448 | struct ieee80211_conf *cur_conf = &priv->hw->conf; | 448 | struct ieee80211_conf *cur_conf = &priv->hw->conf; |
449 | bool txok; | 449 | bool txok; |
450 | int slot; | 450 | int slot; |
451 | int hdrlen, padsize; | ||
451 | 452 | ||
452 | slot = strip_drv_header(priv, skb); | 453 | slot = strip_drv_header(priv, skb); |
453 | if (slot < 0) { | 454 | if (slot < 0) { |
@@ -504,6 +505,15 @@ send_mac80211: | |||
504 | 505 | ||
505 | ath9k_htc_tx_clear_slot(priv, slot); | 506 | ath9k_htc_tx_clear_slot(priv, slot); |
506 | 507 | ||
508 | /* Remove padding before handing frame back to mac80211 */ | ||
509 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | ||
510 | |||
511 | padsize = hdrlen & 3; | ||
512 | if (padsize && skb->len > hdrlen + padsize) { | ||
513 | memmove(skb->data + padsize, skb->data, hdrlen); | ||
514 | skb_pull(skb, padsize); | ||
515 | } | ||
516 | |||
507 | /* Send status to mac80211 */ | 517 | /* Send status to mac80211 */ |
508 | ieee80211_tx_status(priv->hw, skb); | 518 | ieee80211_tx_status(priv->hw, skb); |
509 | } | 519 | } |
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 16f8b201642b..026a2a067b46 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c | |||
@@ -802,7 +802,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) | |||
802 | IEEE80211_HW_PS_NULLFUNC_STACK | | 802 | IEEE80211_HW_PS_NULLFUNC_STACK | |
803 | IEEE80211_HW_SPECTRUM_MGMT | | 803 | IEEE80211_HW_SPECTRUM_MGMT | |
804 | IEEE80211_HW_REPORTS_TX_ACK_STATUS | | 804 | IEEE80211_HW_REPORTS_TX_ACK_STATUS | |
805 | IEEE80211_HW_SUPPORTS_RC_TABLE; | 805 | IEEE80211_HW_SUPPORTS_RC_TABLE | |
806 | IEEE80211_HW_SUPPORTS_HT_CCK_RATES; | ||
806 | 807 | ||
807 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { | 808 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { |
808 | hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; | 809 | hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 1737a3e33685..cb5a65553ac7 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -173,8 +173,7 @@ static void ath_restart_work(struct ath_softc *sc) | |||
173 | { | 173 | { |
174 | ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); | 174 | ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); |
175 | 175 | ||
176 | if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9485(sc->sc_ah) || | 176 | if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah)) |
177 | AR_SREV_9550(sc->sc_ah)) | ||
178 | ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, | 177 | ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, |
179 | msecs_to_jiffies(ATH_PLL_WORK_INTERVAL)); | 178 | msecs_to_jiffies(ATH_PLL_WORK_INTERVAL)); |
180 | 179 | ||
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 4a33c6e39ca2..349fa22a921a 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c | |||
@@ -1860,7 +1860,8 @@ void *carl9170_alloc(size_t priv_size) | |||
1860 | IEEE80211_HW_PS_NULLFUNC_STACK | | 1860 | IEEE80211_HW_PS_NULLFUNC_STACK | |
1861 | IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | | 1861 | IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | |
1862 | IEEE80211_HW_SUPPORTS_RC_TABLE | | 1862 | IEEE80211_HW_SUPPORTS_RC_TABLE | |
1863 | IEEE80211_HW_SIGNAL_DBM; | 1863 | IEEE80211_HW_SIGNAL_DBM | |
1864 | IEEE80211_HW_SUPPORTS_HT_CCK_RATES; | ||
1864 | 1865 | ||
1865 | if (!modparam_noht) { | 1866 | if (!modparam_noht) { |
1866 | /* | 1867 | /* |
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c index f2ed62e37340..7acf5ee23582 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/iwlegacy/4965-mac.c | |||
@@ -4464,9 +4464,9 @@ il4965_irq_tasklet(struct il_priv *il) | |||
4464 | set_bit(S_RFKILL, &il->status); | 4464 | set_bit(S_RFKILL, &il->status); |
4465 | } else { | 4465 | } else { |
4466 | clear_bit(S_RFKILL, &il->status); | 4466 | clear_bit(S_RFKILL, &il->status); |
4467 | wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill); | ||
4468 | il_force_reset(il, true); | 4467 | il_force_reset(il, true); |
4469 | } | 4468 | } |
4469 | wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill); | ||
4470 | 4470 | ||
4471 | handled |= CSR_INT_BIT_RF_KILL; | 4471 | handled |= CSR_INT_BIT_RF_KILL; |
4472 | } | 4472 | } |
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 1f80ea5e29dd..1b41c8eda12d 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c | |||
@@ -6133,7 +6133,8 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) | |||
6133 | IEEE80211_HW_SUPPORTS_PS | | 6133 | IEEE80211_HW_SUPPORTS_PS | |
6134 | IEEE80211_HW_PS_NULLFUNC_STACK | | 6134 | IEEE80211_HW_PS_NULLFUNC_STACK | |
6135 | IEEE80211_HW_AMPDU_AGGREGATION | | 6135 | IEEE80211_HW_AMPDU_AGGREGATION | |
6136 | IEEE80211_HW_REPORTS_TX_ACK_STATUS; | 6136 | IEEE80211_HW_REPORTS_TX_ACK_STATUS | |
6137 | IEEE80211_HW_SUPPORTS_HT_CCK_RATES; | ||
6137 | 6138 | ||
6138 | /* | 6139 | /* |
6139 | * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices | 6140 | * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices |
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 5456f5c73593..4a2195752198 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c | |||
@@ -221,7 +221,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) | |||
221 | pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01; | 221 | pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01; |
222 | for (i = 0; i < PM8001_MAX_INB_NUM; i++) { | 222 | for (i = 0; i < PM8001_MAX_INB_NUM; i++) { |
223 | pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = | 223 | pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = |
224 | PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30); | 224 | PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30); |
225 | pm8001_ha->inbnd_q_tbl[i].upper_base_addr = | 225 | pm8001_ha->inbnd_q_tbl[i].upper_base_addr = |
226 | pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; | 226 | pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; |
227 | pm8001_ha->inbnd_q_tbl[i].lower_base_addr = | 227 | pm8001_ha->inbnd_q_tbl[i].lower_base_addr = |
@@ -247,7 +247,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) | |||
247 | } | 247 | } |
248 | for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { | 248 | for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { |
249 | pm8001_ha->outbnd_q_tbl[i].element_size_cnt = | 249 | pm8001_ha->outbnd_q_tbl[i].element_size_cnt = |
250 | PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30); | 250 | PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30); |
251 | pm8001_ha->outbnd_q_tbl[i].upper_base_addr = | 251 | pm8001_ha->outbnd_q_tbl[i].upper_base_addr = |
252 | pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; | 252 | pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; |
253 | pm8001_ha->outbnd_q_tbl[i].lower_base_addr = | 253 | pm8001_ha->outbnd_q_tbl[i].lower_base_addr = |
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 7f77210f5cf3..9f91030211e8 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c | |||
@@ -275,7 +275,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) | |||
275 | 275 | ||
276 | for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { | 276 | for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { |
277 | pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = | 277 | pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = |
278 | PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30); | 278 | PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30); |
279 | pm8001_ha->inbnd_q_tbl[i].upper_base_addr = | 279 | pm8001_ha->inbnd_q_tbl[i].upper_base_addr = |
280 | pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; | 280 | pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; |
281 | pm8001_ha->inbnd_q_tbl[i].lower_base_addr = | 281 | pm8001_ha->inbnd_q_tbl[i].lower_base_addr = |
@@ -301,7 +301,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) | |||
301 | } | 301 | } |
302 | for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { | 302 | for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { |
303 | pm8001_ha->outbnd_q_tbl[i].element_size_cnt = | 303 | pm8001_ha->outbnd_q_tbl[i].element_size_cnt = |
304 | PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30); | 304 | PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30); |
305 | pm8001_ha->outbnd_q_tbl[i].upper_base_addr = | 305 | pm8001_ha->outbnd_q_tbl[i].upper_base_addr = |
306 | pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; | 306 | pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; |
307 | pm8001_ha->outbnd_q_tbl[i].lower_base_addr = | 307 | pm8001_ha->outbnd_q_tbl[i].lower_base_addr = |
diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c index 3396eb9d57a3..ac2767100df5 100644 --- a/drivers/tty/hvc/hvsi_lib.c +++ b/drivers/tty/hvc/hvsi_lib.c | |||
@@ -341,8 +341,8 @@ void hvsilib_establish(struct hvsi_priv *pv) | |||
341 | 341 | ||
342 | pr_devel("HVSI@%x: ... waiting handshake\n", pv->termno); | 342 | pr_devel("HVSI@%x: ... waiting handshake\n", pv->termno); |
343 | 343 | ||
344 | /* Try for up to 200s */ | 344 | /* Try for up to 400ms */ |
345 | for (timeout = 0; timeout < 20; timeout++) { | 345 | for (timeout = 0; timeout < 40; timeout++) { |
346 | if (pv->established) | 346 | if (pv->established) |
347 | goto established; | 347 | goto established; |
348 | if (!hvsi_get_packet(pv)) | 348 | if (!hvsi_get_packet(pv)) |
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index 0f1d193fef02..279b04910f00 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c | |||
@@ -305,9 +305,11 @@ static int __init ohci_pci_init(void) | |||
305 | 305 | ||
306 | ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides); | 306 | ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides); |
307 | 307 | ||
308 | #ifdef CONFIG_PM | ||
308 | /* Entries for the PCI suspend/resume callbacks are special */ | 309 | /* Entries for the PCI suspend/resume callbacks are special */ |
309 | ohci_pci_hc_driver.pci_suspend = ohci_suspend; | 310 | ohci_pci_hc_driver.pci_suspend = ohci_suspend; |
310 | ohci_pci_hc_driver.pci_resume = ohci_resume; | 311 | ohci_pci_hc_driver.pci_resume = ohci_resume; |
312 | #endif | ||
311 | 313 | ||
312 | return pci_register_driver(&ohci_pci_driver); | 314 | return pci_register_driver(&ohci_pci_driver); |
313 | } | 315 | } |
diff --git a/fs/dcache.c b/fs/dcache.c index 83cfb834db03..96655f4f4574 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -229,7 +229,7 @@ static void __d_free(struct rcu_head *head) | |||
229 | */ | 229 | */ |
230 | static void d_free(struct dentry *dentry) | 230 | static void d_free(struct dentry *dentry) |
231 | { | 231 | { |
232 | BUG_ON(dentry->d_count); | 232 | BUG_ON(dentry->d_lockref.count); |
233 | this_cpu_dec(nr_dentry); | 233 | this_cpu_dec(nr_dentry); |
234 | if (dentry->d_op && dentry->d_op->d_release) | 234 | if (dentry->d_op && dentry->d_op->d_release) |
235 | dentry->d_op->d_release(dentry); | 235 | dentry->d_op->d_release(dentry); |
@@ -467,7 +467,7 @@ relock: | |||
467 | } | 467 | } |
468 | 468 | ||
469 | if (ref) | 469 | if (ref) |
470 | dentry->d_count--; | 470 | dentry->d_lockref.count--; |
471 | /* | 471 | /* |
472 | * inform the fs via d_prune that this dentry is about to be | 472 | * inform the fs via d_prune that this dentry is about to be |
473 | * unhashed and destroyed. | 473 | * unhashed and destroyed. |
@@ -513,15 +513,10 @@ void dput(struct dentry *dentry) | |||
513 | return; | 513 | return; |
514 | 514 | ||
515 | repeat: | 515 | repeat: |
516 | if (dentry->d_count == 1) | 516 | if (dentry->d_lockref.count == 1) |
517 | might_sleep(); | 517 | might_sleep(); |
518 | spin_lock(&dentry->d_lock); | 518 | if (lockref_put_or_lock(&dentry->d_lockref)) |
519 | BUG_ON(!dentry->d_count); | ||
520 | if (dentry->d_count > 1) { | ||
521 | dentry->d_count--; | ||
522 | spin_unlock(&dentry->d_lock); | ||
523 | return; | 519 | return; |
524 | } | ||
525 | 520 | ||
526 | if (dentry->d_flags & DCACHE_OP_DELETE) { | 521 | if (dentry->d_flags & DCACHE_OP_DELETE) { |
527 | if (dentry->d_op->d_delete(dentry)) | 522 | if (dentry->d_op->d_delete(dentry)) |
@@ -535,7 +530,7 @@ repeat: | |||
535 | dentry->d_flags |= DCACHE_REFERENCED; | 530 | dentry->d_flags |= DCACHE_REFERENCED; |
536 | dentry_lru_add(dentry); | 531 | dentry_lru_add(dentry); |
537 | 532 | ||
538 | dentry->d_count--; | 533 | dentry->d_lockref.count--; |
539 | spin_unlock(&dentry->d_lock); | 534 | spin_unlock(&dentry->d_lock); |
540 | return; | 535 | return; |
541 | 536 | ||
@@ -590,7 +585,7 @@ int d_invalidate(struct dentry * dentry) | |||
590 | * We also need to leave mountpoints alone, | 585 | * We also need to leave mountpoints alone, |
591 | * directory or not. | 586 | * directory or not. |
592 | */ | 587 | */ |
593 | if (dentry->d_count > 1 && dentry->d_inode) { | 588 | if (dentry->d_lockref.count > 1 && dentry->d_inode) { |
594 | if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) { | 589 | if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) { |
595 | spin_unlock(&dentry->d_lock); | 590 | spin_unlock(&dentry->d_lock); |
596 | return -EBUSY; | 591 | return -EBUSY; |
@@ -606,20 +601,33 @@ EXPORT_SYMBOL(d_invalidate); | |||
606 | /* This must be called with d_lock held */ | 601 | /* This must be called with d_lock held */ |
607 | static inline void __dget_dlock(struct dentry *dentry) | 602 | static inline void __dget_dlock(struct dentry *dentry) |
608 | { | 603 | { |
609 | dentry->d_count++; | 604 | dentry->d_lockref.count++; |
610 | } | 605 | } |
611 | 606 | ||
612 | static inline void __dget(struct dentry *dentry) | 607 | static inline void __dget(struct dentry *dentry) |
613 | { | 608 | { |
614 | spin_lock(&dentry->d_lock); | 609 | lockref_get(&dentry->d_lockref); |
615 | __dget_dlock(dentry); | ||
616 | spin_unlock(&dentry->d_lock); | ||
617 | } | 610 | } |
618 | 611 | ||
619 | struct dentry *dget_parent(struct dentry *dentry) | 612 | struct dentry *dget_parent(struct dentry *dentry) |
620 | { | 613 | { |
614 | int gotref; | ||
621 | struct dentry *ret; | 615 | struct dentry *ret; |
622 | 616 | ||
617 | /* | ||
618 | * Do optimistic parent lookup without any | ||
619 | * locking. | ||
620 | */ | ||
621 | rcu_read_lock(); | ||
622 | ret = ACCESS_ONCE(dentry->d_parent); | ||
623 | gotref = lockref_get_not_zero(&ret->d_lockref); | ||
624 | rcu_read_unlock(); | ||
625 | if (likely(gotref)) { | ||
626 | if (likely(ret == ACCESS_ONCE(dentry->d_parent))) | ||
627 | return ret; | ||
628 | dput(ret); | ||
629 | } | ||
630 | |||
623 | repeat: | 631 | repeat: |
624 | /* | 632 | /* |
625 | * Don't need rcu_dereference because we re-check it was correct under | 633 | * Don't need rcu_dereference because we re-check it was correct under |
@@ -634,8 +642,8 @@ repeat: | |||
634 | goto repeat; | 642 | goto repeat; |
635 | } | 643 | } |
636 | rcu_read_unlock(); | 644 | rcu_read_unlock(); |
637 | BUG_ON(!ret->d_count); | 645 | BUG_ON(!ret->d_lockref.count); |
638 | ret->d_count++; | 646 | ret->d_lockref.count++; |
639 | spin_unlock(&ret->d_lock); | 647 | spin_unlock(&ret->d_lock); |
640 | return ret; | 648 | return ret; |
641 | } | 649 | } |
@@ -718,7 +726,7 @@ restart: | |||
718 | spin_lock(&inode->i_lock); | 726 | spin_lock(&inode->i_lock); |
719 | hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) { | 727 | hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) { |
720 | spin_lock(&dentry->d_lock); | 728 | spin_lock(&dentry->d_lock); |
721 | if (!dentry->d_count) { | 729 | if (!dentry->d_lockref.count) { |
722 | __dget_dlock(dentry); | 730 | __dget_dlock(dentry); |
723 | __d_drop(dentry); | 731 | __d_drop(dentry); |
724 | spin_unlock(&dentry->d_lock); | 732 | spin_unlock(&dentry->d_lock); |
@@ -763,12 +771,8 @@ static void try_prune_one_dentry(struct dentry *dentry) | |||
763 | /* Prune ancestors. */ | 771 | /* Prune ancestors. */ |
764 | dentry = parent; | 772 | dentry = parent; |
765 | while (dentry) { | 773 | while (dentry) { |
766 | spin_lock(&dentry->d_lock); | 774 | if (lockref_put_or_lock(&dentry->d_lockref)) |
767 | if (dentry->d_count > 1) { | ||
768 | dentry->d_count--; | ||
769 | spin_unlock(&dentry->d_lock); | ||
770 | return; | 775 | return; |
771 | } | ||
772 | dentry = dentry_kill(dentry, 1); | 776 | dentry = dentry_kill(dentry, 1); |
773 | } | 777 | } |
774 | } | 778 | } |
@@ -793,7 +797,7 @@ static void shrink_dentry_list(struct list_head *list) | |||
793 | * the LRU because of laziness during lookup. Do not free | 797 | * the LRU because of laziness during lookup. Do not free |
794 | * it - just keep it off the LRU list. | 798 | * it - just keep it off the LRU list. |
795 | */ | 799 | */ |
796 | if (dentry->d_count) { | 800 | if (dentry->d_lockref.count) { |
797 | dentry_lru_del(dentry); | 801 | dentry_lru_del(dentry); |
798 | spin_unlock(&dentry->d_lock); | 802 | spin_unlock(&dentry->d_lock); |
799 | continue; | 803 | continue; |
@@ -913,7 +917,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) | |||
913 | dentry_lru_del(dentry); | 917 | dentry_lru_del(dentry); |
914 | __d_shrink(dentry); | 918 | __d_shrink(dentry); |
915 | 919 | ||
916 | if (dentry->d_count != 0) { | 920 | if (dentry->d_lockref.count != 0) { |
917 | printk(KERN_ERR | 921 | printk(KERN_ERR |
918 | "BUG: Dentry %p{i=%lx,n=%s}" | 922 | "BUG: Dentry %p{i=%lx,n=%s}" |
919 | " still in use (%d)" | 923 | " still in use (%d)" |
@@ -922,7 +926,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) | |||
922 | dentry->d_inode ? | 926 | dentry->d_inode ? |
923 | dentry->d_inode->i_ino : 0UL, | 927 | dentry->d_inode->i_ino : 0UL, |
924 | dentry->d_name.name, | 928 | dentry->d_name.name, |
925 | dentry->d_count, | 929 | dentry->d_lockref.count, |
926 | dentry->d_sb->s_type->name, | 930 | dentry->d_sb->s_type->name, |
927 | dentry->d_sb->s_id); | 931 | dentry->d_sb->s_id); |
928 | BUG(); | 932 | BUG(); |
@@ -933,7 +937,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) | |||
933 | list_del(&dentry->d_u.d_child); | 937 | list_del(&dentry->d_u.d_child); |
934 | } else { | 938 | } else { |
935 | parent = dentry->d_parent; | 939 | parent = dentry->d_parent; |
936 | parent->d_count--; | 940 | parent->d_lockref.count--; |
937 | list_del(&dentry->d_u.d_child); | 941 | list_del(&dentry->d_u.d_child); |
938 | } | 942 | } |
939 | 943 | ||
@@ -981,7 +985,7 @@ void shrink_dcache_for_umount(struct super_block *sb) | |||
981 | 985 | ||
982 | dentry = sb->s_root; | 986 | dentry = sb->s_root; |
983 | sb->s_root = NULL; | 987 | sb->s_root = NULL; |
984 | dentry->d_count--; | 988 | dentry->d_lockref.count--; |
985 | shrink_dcache_for_umount_subtree(dentry); | 989 | shrink_dcache_for_umount_subtree(dentry); |
986 | 990 | ||
987 | while (!hlist_bl_empty(&sb->s_anon)) { | 991 | while (!hlist_bl_empty(&sb->s_anon)) { |
@@ -1147,7 +1151,7 @@ resume: | |||
1147 | * loop in shrink_dcache_parent() might not make any progress | 1151 | * loop in shrink_dcache_parent() might not make any progress |
1148 | * and loop forever. | 1152 | * and loop forever. |
1149 | */ | 1153 | */ |
1150 | if (dentry->d_count) { | 1154 | if (dentry->d_lockref.count) { |
1151 | dentry_lru_del(dentry); | 1155 | dentry_lru_del(dentry); |
1152 | } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { | 1156 | } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { |
1153 | dentry_lru_move_list(dentry, dispose); | 1157 | dentry_lru_move_list(dentry, dispose); |
@@ -1269,7 +1273,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) | |||
1269 | smp_wmb(); | 1273 | smp_wmb(); |
1270 | dentry->d_name.name = dname; | 1274 | dentry->d_name.name = dname; |
1271 | 1275 | ||
1272 | dentry->d_count = 1; | 1276 | dentry->d_lockref.count = 1; |
1273 | dentry->d_flags = 0; | 1277 | dentry->d_flags = 0; |
1274 | spin_lock_init(&dentry->d_lock); | 1278 | spin_lock_init(&dentry->d_lock); |
1275 | seqcount_init(&dentry->d_seq); | 1279 | seqcount_init(&dentry->d_seq); |
@@ -1782,7 +1786,7 @@ static noinline enum slow_d_compare slow_dentry_cmp( | |||
1782 | * without taking d_lock and checking d_seq sequence count against @seq | 1786 | * without taking d_lock and checking d_seq sequence count against @seq |
1783 | * returned here. | 1787 | * returned here. |
1784 | * | 1788 | * |
1785 | * A refcount may be taken on the found dentry with the __d_rcu_to_refcount | 1789 | * A refcount may be taken on the found dentry with the d_rcu_to_refcount |
1786 | * function. | 1790 | * function. |
1787 | * | 1791 | * |
1788 | * Alternatively, __d_lookup_rcu may be called again to look up the child of | 1792 | * Alternatively, __d_lookup_rcu may be called again to look up the child of |
@@ -1970,7 +1974,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name) | |||
1970 | goto next; | 1974 | goto next; |
1971 | } | 1975 | } |
1972 | 1976 | ||
1973 | dentry->d_count++; | 1977 | dentry->d_lockref.count++; |
1974 | found = dentry; | 1978 | found = dentry; |
1975 | spin_unlock(&dentry->d_lock); | 1979 | spin_unlock(&dentry->d_lock); |
1976 | break; | 1980 | break; |
@@ -2069,7 +2073,7 @@ again: | |||
2069 | spin_lock(&dentry->d_lock); | 2073 | spin_lock(&dentry->d_lock); |
2070 | inode = dentry->d_inode; | 2074 | inode = dentry->d_inode; |
2071 | isdir = S_ISDIR(inode->i_mode); | 2075 | isdir = S_ISDIR(inode->i_mode); |
2072 | if (dentry->d_count == 1) { | 2076 | if (dentry->d_lockref.count == 1) { |
2073 | if (!spin_trylock(&inode->i_lock)) { | 2077 | if (!spin_trylock(&inode->i_lock)) { |
2074 | spin_unlock(&dentry->d_lock); | 2078 | spin_unlock(&dentry->d_lock); |
2075 | cpu_relax(); | 2079 | cpu_relax(); |
@@ -2948,7 +2952,7 @@ resume: | |||
2948 | } | 2952 | } |
2949 | if (!(dentry->d_flags & DCACHE_GENOCIDE)) { | 2953 | if (!(dentry->d_flags & DCACHE_GENOCIDE)) { |
2950 | dentry->d_flags |= DCACHE_GENOCIDE; | 2954 | dentry->d_flags |= DCACHE_GENOCIDE; |
2951 | dentry->d_count--; | 2955 | dentry->d_lockref.count--; |
2952 | } | 2956 | } |
2953 | spin_unlock(&dentry->d_lock); | 2957 | spin_unlock(&dentry->d_lock); |
2954 | } | 2958 | } |
@@ -2956,7 +2960,7 @@ resume: | |||
2956 | struct dentry *child = this_parent; | 2960 | struct dentry *child = this_parent; |
2957 | if (!(this_parent->d_flags & DCACHE_GENOCIDE)) { | 2961 | if (!(this_parent->d_flags & DCACHE_GENOCIDE)) { |
2958 | this_parent->d_flags |= DCACHE_GENOCIDE; | 2962 | this_parent->d_flags |= DCACHE_GENOCIDE; |
2959 | this_parent->d_count--; | 2963 | this_parent->d_lockref.count--; |
2960 | } | 2964 | } |
2961 | this_parent = try_to_ascend(this_parent, locked, seq); | 2965 | this_parent = try_to_ascend(this_parent, locked, seq); |
2962 | if (!this_parent) | 2966 | if (!this_parent) |
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c index 8743ba9c6742..984c2bbf4f61 100644 --- a/fs/jfs/jfs_dtree.c +++ b/fs/jfs/jfs_dtree.c | |||
@@ -3047,6 +3047,14 @@ int jfs_readdir(struct file *file, struct dir_context *ctx) | |||
3047 | 3047 | ||
3048 | dir_index = (u32) ctx->pos; | 3048 | dir_index = (u32) ctx->pos; |
3049 | 3049 | ||
3050 | /* | ||
3051 | * NFSv4 reserves cookies 1 and 2 for . and .. so the value | ||
3052 | * we return to the vfs is one greater than the one we use | ||
3053 | * internally. | ||
3054 | */ | ||
3055 | if (dir_index) | ||
3056 | dir_index--; | ||
3057 | |||
3050 | if (dir_index > 1) { | 3058 | if (dir_index > 1) { |
3051 | struct dir_table_slot dirtab_slot; | 3059 | struct dir_table_slot dirtab_slot; |
3052 | 3060 | ||
@@ -3086,7 +3094,7 @@ int jfs_readdir(struct file *file, struct dir_context *ctx) | |||
3086 | if (p->header.flag & BT_INTERNAL) { | 3094 | if (p->header.flag & BT_INTERNAL) { |
3087 | jfs_err("jfs_readdir: bad index table"); | 3095 | jfs_err("jfs_readdir: bad index table"); |
3088 | DT_PUTPAGE(mp); | 3096 | DT_PUTPAGE(mp); |
3089 | ctx->pos = -1; | 3097 | ctx->pos = DIREND; |
3090 | return 0; | 3098 | return 0; |
3091 | } | 3099 | } |
3092 | } else { | 3100 | } else { |
@@ -3094,14 +3102,14 @@ int jfs_readdir(struct file *file, struct dir_context *ctx) | |||
3094 | /* | 3102 | /* |
3095 | * self "." | 3103 | * self "." |
3096 | */ | 3104 | */ |
3097 | ctx->pos = 0; | 3105 | ctx->pos = 1; |
3098 | if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR)) | 3106 | if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR)) |
3099 | return 0; | 3107 | return 0; |
3100 | } | 3108 | } |
3101 | /* | 3109 | /* |
3102 | * parent ".." | 3110 | * parent ".." |
3103 | */ | 3111 | */ |
3104 | ctx->pos = 1; | 3112 | ctx->pos = 2; |
3105 | if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR)) | 3113 | if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR)) |
3106 | return 0; | 3114 | return 0; |
3107 | 3115 | ||
@@ -3122,22 +3130,23 @@ int jfs_readdir(struct file *file, struct dir_context *ctx) | |||
3122 | /* | 3130 | /* |
3123 | * Legacy filesystem - OS/2 & Linux JFS < 0.3.6 | 3131 | * Legacy filesystem - OS/2 & Linux JFS < 0.3.6 |
3124 | * | 3132 | * |
3125 | * pn = index = 0: First entry "." | 3133 | * pn = 0; index = 1: First entry "." |
3126 | * pn = 0; index = 1: Second entry ".." | 3134 | * pn = 0; index = 2: Second entry ".." |
3127 | * pn > 0: Real entries, pn=1 -> leftmost page | 3135 | * pn > 0: Real entries, pn=1 -> leftmost page |
3128 | * pn = index = -1: No more entries | 3136 | * pn = index = -1: No more entries |
3129 | */ | 3137 | */ |
3130 | dtpos = ctx->pos; | 3138 | dtpos = ctx->pos; |
3131 | if (dtpos == 0) { | 3139 | if (dtpos < 2) { |
3132 | /* build "." entry */ | 3140 | /* build "." entry */ |
3141 | ctx->pos = 1; | ||
3133 | if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR)) | 3142 | if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR)) |
3134 | return 0; | 3143 | return 0; |
3135 | dtoffset->index = 1; | 3144 | dtoffset->index = 2; |
3136 | ctx->pos = dtpos; | 3145 | ctx->pos = dtpos; |
3137 | } | 3146 | } |
3138 | 3147 | ||
3139 | if (dtoffset->pn == 0) { | 3148 | if (dtoffset->pn == 0) { |
3140 | if (dtoffset->index == 1) { | 3149 | if (dtoffset->index == 2) { |
3141 | /* build ".." entry */ | 3150 | /* build ".." entry */ |
3142 | if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR)) | 3151 | if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR)) |
3143 | return 0; | 3152 | return 0; |
@@ -3228,6 +3237,12 @@ int jfs_readdir(struct file *file, struct dir_context *ctx) | |||
3228 | } | 3237 | } |
3229 | jfs_dirent->position = unique_pos++; | 3238 | jfs_dirent->position = unique_pos++; |
3230 | } | 3239 | } |
3240 | /* | ||
3241 | * We add 1 to the index because we may | ||
3242 | * use a value of 2 internally, and NFSv4 | ||
3243 | * doesn't like that. | ||
3244 | */ | ||
3245 | jfs_dirent->position++; | ||
3231 | } else { | 3246 | } else { |
3232 | jfs_dirent->position = dtpos; | 3247 | jfs_dirent->position = dtpos; |
3233 | len = min(d_namleft, DTLHDRDATALEN_LEGACY); | 3248 | len = min(d_namleft, DTLHDRDATALEN_LEGACY); |
diff --git a/fs/namei.c b/fs/namei.c index 89a612e392eb..2c30c84d4ea1 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -494,6 +494,50 @@ static inline void unlock_rcu_walk(void) | |||
494 | br_read_unlock(&vfsmount_lock); | 494 | br_read_unlock(&vfsmount_lock); |
495 | } | 495 | } |
496 | 496 | ||
497 | /* | ||
498 | * When we move over from the RCU domain to properly refcounted | ||
499 | * long-lived dentries, we need to check the sequence numbers | ||
500 | * we got before lookup very carefully. | ||
501 | * | ||
502 | * We cannot blindly increment a dentry refcount - even if it | ||
503 | * is not locked - if it is zero, because it may have gone | ||
504 | * through the final d_kill() logic already. | ||
505 | * | ||
506 | * So for a zero refcount, we need to get the spinlock (which is | ||
507 | * safe even for a dead dentry because the de-allocation is | ||
508 | * RCU-delayed), and check the sequence count under the lock. | ||
509 | * | ||
510 | * Once we have checked the sequence count, we know it is live, | ||
511 | * and since we hold the spinlock it cannot die from under us. | ||
512 | * | ||
513 | * In contrast, if the reference count wasn't zero, we can just | ||
514 | * increment the lockref without having to take the spinlock. | ||
515 | * Even if the sequence number ends up being stale, we haven't | ||
516 | * gone through the final dput() and killed the dentry yet. | ||
517 | */ | ||
518 | static inline int d_rcu_to_refcount(struct dentry *dentry, seqcount_t *validate, unsigned seq) | ||
519 | { | ||
520 | int gotref; | ||
521 | |||
522 | gotref = lockref_get_or_lock(&dentry->d_lockref); | ||
523 | |||
524 | /* Does the sequence number still match? */ | ||
525 | if (read_seqcount_retry(validate, seq)) { | ||
526 | if (gotref) | ||
527 | dput(dentry); | ||
528 | else | ||
529 | spin_unlock(&dentry->d_lock); | ||
530 | return -ECHILD; | ||
531 | } | ||
532 | |||
533 | /* Get the ref now, if we couldn't get it originally */ | ||
534 | if (!gotref) { | ||
535 | dentry->d_lockref.count++; | ||
536 | spin_unlock(&dentry->d_lock); | ||
537 | } | ||
538 | return 0; | ||
539 | } | ||
540 | |||
497 | /** | 541 | /** |
498 | * unlazy_walk - try to switch to ref-walk mode. | 542 | * unlazy_walk - try to switch to ref-walk mode. |
499 | * @nd: nameidata pathwalk data | 543 | * @nd: nameidata pathwalk data |
@@ -518,29 +562,28 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry) | |||
518 | nd->root.dentry != fs->root.dentry) | 562 | nd->root.dentry != fs->root.dentry) |
519 | goto err_root; | 563 | goto err_root; |
520 | } | 564 | } |
521 | spin_lock(&parent->d_lock); | 565 | |
566 | /* | ||
567 | * For a negative lookup, the lookup sequence point is the parents | ||
568 | * sequence point, and it only needs to revalidate the parent dentry. | ||
569 | * | ||
570 | * For a positive lookup, we need to move both the parent and the | ||
571 | * dentry from the RCU domain to be properly refcounted. And the | ||
572 | * sequence number in the dentry validates *both* dentry counters, | ||
573 | * since we checked the sequence number of the parent after we got | ||
574 | * the child sequence number. So we know the parent must still | ||
575 | * be valid if the child sequence number is still valid. | ||
576 | */ | ||
522 | if (!dentry) { | 577 | if (!dentry) { |
523 | if (!__d_rcu_to_refcount(parent, nd->seq)) | 578 | if (d_rcu_to_refcount(parent, &parent->d_seq, nd->seq) < 0) |
524 | goto err_parent; | 579 | goto err_root; |
525 | BUG_ON(nd->inode != parent->d_inode); | 580 | BUG_ON(nd->inode != parent->d_inode); |
526 | } else { | 581 | } else { |
527 | if (dentry->d_parent != parent) | 582 | if (d_rcu_to_refcount(dentry, &dentry->d_seq, nd->seq) < 0) |
583 | goto err_root; | ||
584 | if (d_rcu_to_refcount(parent, &dentry->d_seq, nd->seq) < 0) | ||
528 | goto err_parent; | 585 | goto err_parent; |
529 | spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); | ||
530 | if (!__d_rcu_to_refcount(dentry, nd->seq)) | ||
531 | goto err_child; | ||
532 | /* | ||
533 | * If the sequence check on the child dentry passed, then | ||
534 | * the child has not been removed from its parent. This | ||
535 | * means the parent dentry must be valid and able to take | ||
536 | * a reference at this point. | ||
537 | */ | ||
538 | BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent); | ||
539 | BUG_ON(!parent->d_count); | ||
540 | parent->d_count++; | ||
541 | spin_unlock(&dentry->d_lock); | ||
542 | } | 586 | } |
543 | spin_unlock(&parent->d_lock); | ||
544 | if (want_root) { | 587 | if (want_root) { |
545 | path_get(&nd->root); | 588 | path_get(&nd->root); |
546 | spin_unlock(&fs->lock); | 589 | spin_unlock(&fs->lock); |
@@ -551,10 +594,8 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry) | |||
551 | nd->flags &= ~LOOKUP_RCU; | 594 | nd->flags &= ~LOOKUP_RCU; |
552 | return 0; | 595 | return 0; |
553 | 596 | ||
554 | err_child: | ||
555 | spin_unlock(&dentry->d_lock); | ||
556 | err_parent: | 597 | err_parent: |
557 | spin_unlock(&parent->d_lock); | 598 | dput(dentry); |
558 | err_root: | 599 | err_root: |
559 | if (want_root) | 600 | if (want_root) |
560 | spin_unlock(&fs->lock); | 601 | spin_unlock(&fs->lock); |
@@ -585,14 +626,11 @@ static int complete_walk(struct nameidata *nd) | |||
585 | nd->flags &= ~LOOKUP_RCU; | 626 | nd->flags &= ~LOOKUP_RCU; |
586 | if (!(nd->flags & LOOKUP_ROOT)) | 627 | if (!(nd->flags & LOOKUP_ROOT)) |
587 | nd->root.mnt = NULL; | 628 | nd->root.mnt = NULL; |
588 | spin_lock(&dentry->d_lock); | 629 | |
589 | if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) { | 630 | if (d_rcu_to_refcount(dentry, &dentry->d_seq, nd->seq) < 0) { |
590 | spin_unlock(&dentry->d_lock); | ||
591 | unlock_rcu_walk(); | 631 | unlock_rcu_walk(); |
592 | return -ECHILD; | 632 | return -ECHILD; |
593 | } | 633 | } |
594 | BUG_ON(nd->inode != dentry->d_inode); | ||
595 | spin_unlock(&dentry->d_lock); | ||
596 | mntget(nd->path.mnt); | 634 | mntget(nd->path.mnt); |
597 | unlock_rcu_walk(); | 635 | unlock_rcu_walk(); |
598 | } | 636 | } |
@@ -3327,7 +3365,7 @@ void dentry_unhash(struct dentry *dentry) | |||
3327 | { | 3365 | { |
3328 | shrink_dcache_parent(dentry); | 3366 | shrink_dcache_parent(dentry); |
3329 | spin_lock(&dentry->d_lock); | 3367 | spin_lock(&dentry->d_lock); |
3330 | if (dentry->d_count == 1) | 3368 | if (dentry->d_lockref.count == 1) |
3331 | __d_drop(dentry); | 3369 | __d_drop(dentry); |
3332 | spin_unlock(&dentry->d_lock); | 3370 | spin_unlock(&dentry->d_lock); |
3333 | } | 3371 | } |
@@ -3671,11 +3709,15 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, | |||
3671 | if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) | 3709 | if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) |
3672 | return -EINVAL; | 3710 | return -EINVAL; |
3673 | /* | 3711 | /* |
3674 | * Using empty names is equivalent to using AT_SYMLINK_FOLLOW | 3712 | * To use null names we require CAP_DAC_READ_SEARCH |
3675 | * on /proc/self/fd/<fd>. | 3713 | * This ensures that not everyone will be able to create |
3714 | * handlink using the passed filedescriptor. | ||
3676 | */ | 3715 | */ |
3677 | if (flags & AT_EMPTY_PATH) | 3716 | if (flags & AT_EMPTY_PATH) { |
3717 | if (!capable(CAP_DAC_READ_SEARCH)) | ||
3718 | return -ENOENT; | ||
3678 | how = LOOKUP_EMPTY; | 3719 | how = LOOKUP_EMPTY; |
3720 | } | ||
3679 | 3721 | ||
3680 | if (flags & AT_SYMLINK_FOLLOW) | 3722 | if (flags & AT_SYMLINK_FOLLOW) |
3681 | how |= LOOKUP_FOLLOW; | 3723 | how |= LOOKUP_FOLLOW; |
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 854d80955bf8..121da2dc3be8 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
@@ -1022,7 +1022,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) | |||
1022 | struct inode *inode = NULL; | 1022 | struct inode *inode = NULL; |
1023 | struct ocfs2_super *osb = NULL; | 1023 | struct ocfs2_super *osb = NULL; |
1024 | struct buffer_head *bh = NULL; | 1024 | struct buffer_head *bh = NULL; |
1025 | char nodestr[8]; | 1025 | char nodestr[12]; |
1026 | struct ocfs2_blockcheck_stats stats; | 1026 | struct ocfs2_blockcheck_stats stats; |
1027 | 1027 | ||
1028 | trace_ocfs2_fill_super(sb, data, silent); | 1028 | trace_ocfs2_fill_super(sb, data, silent); |
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 4a12532da8c4..9169b91ea2d2 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/seqlock.h> | 9 | #include <linux/seqlock.h> |
10 | #include <linux/cache.h> | 10 | #include <linux/cache.h> |
11 | #include <linux/rcupdate.h> | 11 | #include <linux/rcupdate.h> |
12 | #include <linux/lockref.h> | ||
12 | 13 | ||
13 | struct nameidata; | 14 | struct nameidata; |
14 | struct path; | 15 | struct path; |
@@ -100,6 +101,8 @@ extern unsigned int full_name_hash(const unsigned char *, unsigned int); | |||
100 | # endif | 101 | # endif |
101 | #endif | 102 | #endif |
102 | 103 | ||
104 | #define d_lock d_lockref.lock | ||
105 | |||
103 | struct dentry { | 106 | struct dentry { |
104 | /* RCU lookup touched fields */ | 107 | /* RCU lookup touched fields */ |
105 | unsigned int d_flags; /* protected by d_lock */ | 108 | unsigned int d_flags; /* protected by d_lock */ |
@@ -112,8 +115,7 @@ struct dentry { | |||
112 | unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */ | 115 | unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */ |
113 | 116 | ||
114 | /* Ref lookup also touches following */ | 117 | /* Ref lookup also touches following */ |
115 | unsigned int d_count; /* protected by d_lock */ | 118 | struct lockref d_lockref; /* per-dentry lock and refcount */ |
116 | spinlock_t d_lock; /* per dentry lock */ | ||
117 | const struct dentry_operations *d_op; | 119 | const struct dentry_operations *d_op; |
118 | struct super_block *d_sb; /* The root of the dentry tree */ | 120 | struct super_block *d_sb; /* The root of the dentry tree */ |
119 | unsigned long d_time; /* used by d_revalidate */ | 121 | unsigned long d_time; /* used by d_revalidate */ |
@@ -302,31 +304,9 @@ extern struct dentry *__d_lookup(const struct dentry *, const struct qstr *); | |||
302 | extern struct dentry *__d_lookup_rcu(const struct dentry *parent, | 304 | extern struct dentry *__d_lookup_rcu(const struct dentry *parent, |
303 | const struct qstr *name, unsigned *seq); | 305 | const struct qstr *name, unsigned *seq); |
304 | 306 | ||
305 | /** | ||
306 | * __d_rcu_to_refcount - take a refcount on dentry if sequence check is ok | ||
307 | * @dentry: dentry to take a ref on | ||
308 | * @seq: seqcount to verify against | ||
309 | * Returns: 0 on failure, else 1. | ||
310 | * | ||
311 | * __d_rcu_to_refcount operates on a dentry,seq pair that was returned | ||
312 | * by __d_lookup_rcu, to get a reference on an rcu-walk dentry. | ||
313 | */ | ||
314 | static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq) | ||
315 | { | ||
316 | int ret = 0; | ||
317 | |||
318 | assert_spin_locked(&dentry->d_lock); | ||
319 | if (!read_seqcount_retry(&dentry->d_seq, seq)) { | ||
320 | ret = 1; | ||
321 | dentry->d_count++; | ||
322 | } | ||
323 | |||
324 | return ret; | ||
325 | } | ||
326 | |||
327 | static inline unsigned d_count(const struct dentry *dentry) | 307 | static inline unsigned d_count(const struct dentry *dentry) |
328 | { | 308 | { |
329 | return dentry->d_count; | 309 | return dentry->d_lockref.count; |
330 | } | 310 | } |
331 | 311 | ||
332 | /* validate "insecure" dentry pointer */ | 312 | /* validate "insecure" dentry pointer */ |
@@ -357,17 +337,14 @@ extern char *dentry_path(struct dentry *, char *, int); | |||
357 | static inline struct dentry *dget_dlock(struct dentry *dentry) | 337 | static inline struct dentry *dget_dlock(struct dentry *dentry) |
358 | { | 338 | { |
359 | if (dentry) | 339 | if (dentry) |
360 | dentry->d_count++; | 340 | dentry->d_lockref.count++; |
361 | return dentry; | 341 | return dentry; |
362 | } | 342 | } |
363 | 343 | ||
364 | static inline struct dentry *dget(struct dentry *dentry) | 344 | static inline struct dentry *dget(struct dentry *dentry) |
365 | { | 345 | { |
366 | if (dentry) { | 346 | if (dentry) |
367 | spin_lock(&dentry->d_lock); | 347 | lockref_get(&dentry->d_lockref); |
368 | dget_dlock(dentry); | ||
369 | spin_unlock(&dentry->d_lock); | ||
370 | } | ||
371 | return dentry; | 348 | return dentry; |
372 | } | 349 | } |
373 | 350 | ||
diff --git a/include/linux/lockref.h b/include/linux/lockref.h new file mode 100644 index 000000000000..ca07b5028b01 --- /dev/null +++ b/include/linux/lockref.h | |||
@@ -0,0 +1,36 @@ | |||
1 | #ifndef __LINUX_LOCKREF_H | ||
2 | #define __LINUX_LOCKREF_H | ||
3 | |||
4 | /* | ||
5 | * Locked reference counts. | ||
6 | * | ||
7 | * These are different from just plain atomic refcounts in that they | ||
8 | * are atomic with respect to the spinlock that goes with them. In | ||
9 | * particular, there can be implementations that don't actually get | ||
10 | * the spinlock for the common decrement/increment operations, but they | ||
11 | * still have to check that the operation is done semantically as if | ||
12 | * the spinlock had been taken (using a cmpxchg operation that covers | ||
13 | * both the lock and the count word, or using memory transactions, for | ||
14 | * example). | ||
15 | */ | ||
16 | |||
17 | #include <linux/spinlock.h> | ||
18 | |||
19 | struct lockref { | ||
20 | union { | ||
21 | #ifdef CONFIG_CMPXCHG_LOCKREF | ||
22 | aligned_u64 lock_count; | ||
23 | #endif | ||
24 | struct { | ||
25 | spinlock_t lock; | ||
26 | unsigned int count; | ||
27 | }; | ||
28 | }; | ||
29 | }; | ||
30 | |||
31 | extern void lockref_get(struct lockref *); | ||
32 | extern int lockref_get_not_zero(struct lockref *); | ||
33 | extern int lockref_get_or_lock(struct lockref *); | ||
34 | extern int lockref_put_or_lock(struct lockref *); | ||
35 | |||
36 | #endif /* __LINUX_LOCKREF_H */ | ||
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h index 10e5947491c7..b4ec59d159ac 100644 --- a/include/linux/nsproxy.h +++ b/include/linux/nsproxy.h | |||
@@ -14,6 +14,10 @@ struct fs_struct; | |||
14 | * A structure to contain pointers to all per-process | 14 | * A structure to contain pointers to all per-process |
15 | * namespaces - fs (mount), uts, network, sysvipc, etc. | 15 | * namespaces - fs (mount), uts, network, sysvipc, etc. |
16 | * | 16 | * |
17 | * The pid namespace is an exception -- it's accessed using | ||
18 | * task_active_pid_ns. The pid namespace here is the | ||
19 | * namespace that children will use. | ||
20 | * | ||
17 | * 'count' is the number of tasks holding a reference. | 21 | * 'count' is the number of tasks holding a reference. |
18 | * The count for each namespace, then, will be the number | 22 | * The count for each namespace, then, will be the number |
19 | * of nsproxies pointing to it, not the number of tasks. | 23 | * of nsproxies pointing to it, not the number of tasks. |
@@ -27,7 +31,7 @@ struct nsproxy { | |||
27 | struct uts_namespace *uts_ns; | 31 | struct uts_namespace *uts_ns; |
28 | struct ipc_namespace *ipc_ns; | 32 | struct ipc_namespace *ipc_ns; |
29 | struct mnt_namespace *mnt_ns; | 33 | struct mnt_namespace *mnt_ns; |
30 | struct pid_namespace *pid_ns; | 34 | struct pid_namespace *pid_ns_for_children; |
31 | struct net *net_ns; | 35 | struct net *net_ns; |
32 | }; | 36 | }; |
33 | extern struct nsproxy init_nsproxy; | 37 | extern struct nsproxy init_nsproxy; |
diff --git a/include/linux/regmap.h b/include/linux/regmap.h index bafda8759be6..a10380bfbeac 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/list.h> | 16 | #include <linux/list.h> |
17 | #include <linux/rbtree.h> | 17 | #include <linux/rbtree.h> |
18 | #include <linux/err.h> | 18 | #include <linux/err.h> |
19 | #include <linux/bug.h> | ||
19 | 20 | ||
20 | struct module; | 21 | struct module; |
21 | struct device; | 22 | struct device; |
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index 8a358a2c97e6..829627d7b846 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h | |||
@@ -123,6 +123,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock) | |||
123 | /* local bh are disabled so it is ok to use _BH */ | 123 | /* local bh are disabled so it is ok to use _BH */ |
124 | NET_ADD_STATS_BH(sock_net(sk), | 124 | NET_ADD_STATS_BH(sock_net(sk), |
125 | LINUX_MIB_BUSYPOLLRXPACKETS, rc); | 125 | LINUX_MIB_BUSYPOLLRXPACKETS, rc); |
126 | cpu_relax(); | ||
126 | 127 | ||
127 | } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && | 128 | } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && |
128 | !need_resched() && !busy_loop_timeout(end_time)); | 129 | !need_resched() && !busy_loop_timeout(end_time)); |
diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 93024a47e0e2..8e0b6c856a13 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h | |||
@@ -61,6 +61,7 @@ struct genl_family { | |||
61 | struct list_head ops_list; /* private */ | 61 | struct list_head ops_list; /* private */ |
62 | struct list_head family_list; /* private */ | 62 | struct list_head family_list; /* private */ |
63 | struct list_head mcast_groups; /* private */ | 63 | struct list_head mcast_groups; /* private */ |
64 | struct module *module; | ||
64 | }; | 65 | }; |
65 | 66 | ||
66 | /** | 67 | /** |
@@ -121,9 +122,24 @@ struct genl_ops { | |||
121 | struct list_head ops_list; | 122 | struct list_head ops_list; |
122 | }; | 123 | }; |
123 | 124 | ||
124 | extern int genl_register_family(struct genl_family *family); | 125 | extern int __genl_register_family(struct genl_family *family); |
125 | extern int genl_register_family_with_ops(struct genl_family *family, | 126 | |
127 | static inline int genl_register_family(struct genl_family *family) | ||
128 | { | ||
129 | family->module = THIS_MODULE; | ||
130 | return __genl_register_family(family); | ||
131 | } | ||
132 | |||
133 | extern int __genl_register_family_with_ops(struct genl_family *family, | ||
126 | struct genl_ops *ops, size_t n_ops); | 134 | struct genl_ops *ops, size_t n_ops); |
135 | |||
136 | static inline int genl_register_family_with_ops(struct genl_family *family, | ||
137 | struct genl_ops *ops, size_t n_ops) | ||
138 | { | ||
139 | family->module = THIS_MODULE; | ||
140 | return __genl_register_family_with_ops(family, ops, n_ops); | ||
141 | } | ||
142 | |||
127 | extern int genl_unregister_family(struct genl_family *family); | 143 | extern int genl_unregister_family(struct genl_family *family); |
128 | extern int genl_register_ops(struct genl_family *, struct genl_ops *ops); | 144 | extern int genl_register_ops(struct genl_family *, struct genl_ops *ops); |
129 | extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops); | 145 | extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops); |
diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 5b7a3dadadde..551ba6a6a073 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h | |||
@@ -1499,6 +1499,7 @@ enum ieee80211_hw_flags { | |||
1499 | IEEE80211_HW_SUPPORTS_RC_TABLE = 1<<24, | 1499 | IEEE80211_HW_SUPPORTS_RC_TABLE = 1<<24, |
1500 | IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF = 1<<25, | 1500 | IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF = 1<<25, |
1501 | IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26, | 1501 | IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26, |
1502 | IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 1<<27, | ||
1502 | }; | 1503 | }; |
1503 | 1504 | ||
1504 | /** | 1505 | /** |
diff --git a/include/net/route.h b/include/net/route.h index 2ea40c1b5e00..afdeeb5bec25 100644 --- a/include/net/route.h +++ b/include/net/route.h | |||
@@ -317,4 +317,12 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst) | |||
317 | return hoplimit; | 317 | return hoplimit; |
318 | } | 318 | } |
319 | 319 | ||
320 | static inline int ip_skb_dst_mtu(struct sk_buff *skb) | ||
321 | { | ||
322 | struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL; | ||
323 | |||
324 | return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ? | ||
325 | skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); | ||
326 | } | ||
327 | |||
320 | #endif /* _ROUTE_H */ | 328 | #endif /* _ROUTE_H */ |
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 94ce082b29dc..e823786e7c66 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
@@ -341,10 +341,13 @@ struct xfrm_state_afinfo { | |||
341 | struct sk_buff *skb); | 341 | struct sk_buff *skb); |
342 | int (*transport_finish)(struct sk_buff *skb, | 342 | int (*transport_finish)(struct sk_buff *skb, |
343 | int async); | 343 | int async); |
344 | void (*local_error)(struct sk_buff *skb, u32 mtu); | ||
344 | }; | 345 | }; |
345 | 346 | ||
346 | extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo); | 347 | extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo); |
347 | extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo); | 348 | extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo); |
349 | extern struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); | ||
350 | extern void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); | ||
348 | 351 | ||
349 | extern void xfrm_state_delete_tunnel(struct xfrm_state *x); | 352 | extern void xfrm_state_delete_tunnel(struct xfrm_state *x); |
350 | 353 | ||
@@ -1477,6 +1480,7 @@ extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr); | |||
1477 | extern int xfrm_output_resume(struct sk_buff *skb, int err); | 1480 | extern int xfrm_output_resume(struct sk_buff *skb, int err); |
1478 | extern int xfrm_output(struct sk_buff *skb); | 1481 | extern int xfrm_output(struct sk_buff *skb); |
1479 | extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb); | 1482 | extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb); |
1483 | extern void xfrm_local_error(struct sk_buff *skb, int mtu); | ||
1480 | extern int xfrm4_extract_header(struct sk_buff *skb); | 1484 | extern int xfrm4_extract_header(struct sk_buff *skb); |
1481 | extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb); | 1485 | extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb); |
1482 | extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, | 1486 | extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, |
@@ -1497,6 +1501,7 @@ extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short fam | |||
1497 | extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); | 1501 | extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); |
1498 | extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler); | 1502 | extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler); |
1499 | extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler); | 1503 | extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler); |
1504 | extern void xfrm4_local_error(struct sk_buff *skb, u32 mtu); | ||
1500 | extern int xfrm6_extract_header(struct sk_buff *skb); | 1505 | extern int xfrm6_extract_header(struct sk_buff *skb); |
1501 | extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb); | 1506 | extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb); |
1502 | extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi); | 1507 | extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi); |
@@ -1514,6 +1519,7 @@ extern int xfrm6_output(struct sk_buff *skb); | |||
1514 | extern int xfrm6_output_finish(struct sk_buff *skb); | 1519 | extern int xfrm6_output_finish(struct sk_buff *skb); |
1515 | extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, | 1520 | extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, |
1516 | u8 **prevhdr); | 1521 | u8 **prevhdr); |
1522 | extern void xfrm6_local_error(struct sk_buff *skb, u32 mtu); | ||
1517 | 1523 | ||
1518 | #ifdef CONFIG_XFRM | 1524 | #ifdef CONFIG_XFRM |
1519 | extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); | 1525 | extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); |
diff --git a/include/uapi/linux/cm4000_cs.h b/include/uapi/linux/cm4000_cs.h index bc51f77db918..1217f751a1bc 100644 --- a/include/uapi/linux/cm4000_cs.h +++ b/include/uapi/linux/cm4000_cs.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _UAPI_CM4000_H_ | 2 | #define _UAPI_CM4000_H_ |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/ioctl.h> | ||
5 | 6 | ||
6 | #define MAX_ATR 33 | 7 | #define MAX_ATR 33 |
7 | 8 | ||
@@ -839,7 +839,7 @@ static inline void free_copy(struct msg_msg *copy) | |||
839 | 839 | ||
840 | static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode) | 840 | static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode) |
841 | { | 841 | { |
842 | struct msg_msg *msg; | 842 | struct msg_msg *msg, *found = NULL; |
843 | long count = 0; | 843 | long count = 0; |
844 | 844 | ||
845 | list_for_each_entry(msg, &msq->q_messages, m_list) { | 845 | list_for_each_entry(msg, &msq->q_messages, m_list) { |
@@ -848,6 +848,7 @@ static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode) | |||
848 | *msgtyp, mode)) { | 848 | *msgtyp, mode)) { |
849 | if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) { | 849 | if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) { |
850 | *msgtyp = msg->m_type - 1; | 850 | *msgtyp = msg->m_type - 1; |
851 | found = msg; | ||
851 | } else if (mode == SEARCH_NUMBER) { | 852 | } else if (mode == SEARCH_NUMBER) { |
852 | if (*msgtyp == count) | 853 | if (*msgtyp == count) |
853 | return msg; | 854 | return msg; |
@@ -857,7 +858,7 @@ static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode) | |||
857 | } | 858 | } |
858 | } | 859 | } |
859 | 860 | ||
860 | return ERR_PTR(-EAGAIN); | 861 | return found ?: ERR_PTR(-EAGAIN); |
861 | } | 862 | } |
862 | 863 | ||
863 | long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg, | 864 | long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg, |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 781845a013ab..e91963302c0d 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -4480,6 +4480,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) | |||
4480 | struct dentry *d = cgrp->dentry; | 4480 | struct dentry *d = cgrp->dentry; |
4481 | struct cgroup_event *event, *tmp; | 4481 | struct cgroup_event *event, *tmp; |
4482 | struct cgroup_subsys *ss; | 4482 | struct cgroup_subsys *ss; |
4483 | struct cgroup *child; | ||
4483 | bool empty; | 4484 | bool empty; |
4484 | 4485 | ||
4485 | lockdep_assert_held(&d->d_inode->i_mutex); | 4486 | lockdep_assert_held(&d->d_inode->i_mutex); |
@@ -4490,12 +4491,28 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) | |||
4490 | * @cgrp from being removed while __put_css_set() is in progress. | 4491 | * @cgrp from being removed while __put_css_set() is in progress. |
4491 | */ | 4492 | */ |
4492 | read_lock(&css_set_lock); | 4493 | read_lock(&css_set_lock); |
4493 | empty = list_empty(&cgrp->cset_links) && list_empty(&cgrp->children); | 4494 | empty = list_empty(&cgrp->cset_links); |
4494 | read_unlock(&css_set_lock); | 4495 | read_unlock(&css_set_lock); |
4495 | if (!empty) | 4496 | if (!empty) |
4496 | return -EBUSY; | 4497 | return -EBUSY; |
4497 | 4498 | ||
4498 | /* | 4499 | /* |
4500 | * Make sure there's no live children. We can't test ->children | ||
4501 | * emptiness as dead children linger on it while being destroyed; | ||
4502 | * otherwise, "rmdir parent/child parent" may fail with -EBUSY. | ||
4503 | */ | ||
4504 | empty = true; | ||
4505 | rcu_read_lock(); | ||
4506 | list_for_each_entry_rcu(child, &cgrp->children, sibling) { | ||
4507 | empty = cgroup_is_dead(child); | ||
4508 | if (!empty) | ||
4509 | break; | ||
4510 | } | ||
4511 | rcu_read_unlock(); | ||
4512 | if (!empty) | ||
4513 | return -EBUSY; | ||
4514 | |||
4515 | /* | ||
4499 | * Block new css_tryget() by killing css refcnts. cgroup core | 4516 | * Block new css_tryget() by killing css refcnts. cgroup core |
4500 | * guarantees that, by the time ->css_offline() is invoked, no new | 4517 | * guarantees that, by the time ->css_offline() is invoked, no new |
4501 | * css reference will be given out via css_tryget(). We can't | 4518 | * css reference will be given out via css_tryget(). We can't |
diff --git a/kernel/fork.c b/kernel/fork.c index e23bb19e2a3e..bf46287c91a4 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1177,7 +1177,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1177 | * don't allow the creation of threads. | 1177 | * don't allow the creation of threads. |
1178 | */ | 1178 | */ |
1179 | if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) && | 1179 | if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) && |
1180 | (task_active_pid_ns(current) != current->nsproxy->pid_ns)) | 1180 | (task_active_pid_ns(current) != |
1181 | current->nsproxy->pid_ns_for_children)) | ||
1181 | return ERR_PTR(-EINVAL); | 1182 | return ERR_PTR(-EINVAL); |
1182 | 1183 | ||
1183 | retval = security_task_create(clone_flags); | 1184 | retval = security_task_create(clone_flags); |
@@ -1351,7 +1352,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1351 | 1352 | ||
1352 | if (pid != &init_struct_pid) { | 1353 | if (pid != &init_struct_pid) { |
1353 | retval = -ENOMEM; | 1354 | retval = -ENOMEM; |
1354 | pid = alloc_pid(p->nsproxy->pid_ns); | 1355 | pid = alloc_pid(p->nsproxy->pid_ns_for_children); |
1355 | if (!pid) | 1356 | if (!pid) |
1356 | goto bad_fork_cleanup_io; | 1357 | goto bad_fork_cleanup_io; |
1357 | } | 1358 | } |
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index 364ceab15f0c..997cbb951a3b 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c | |||
@@ -29,15 +29,15 @@ | |||
29 | static struct kmem_cache *nsproxy_cachep; | 29 | static struct kmem_cache *nsproxy_cachep; |
30 | 30 | ||
31 | struct nsproxy init_nsproxy = { | 31 | struct nsproxy init_nsproxy = { |
32 | .count = ATOMIC_INIT(1), | 32 | .count = ATOMIC_INIT(1), |
33 | .uts_ns = &init_uts_ns, | 33 | .uts_ns = &init_uts_ns, |
34 | #if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC) | 34 | #if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC) |
35 | .ipc_ns = &init_ipc_ns, | 35 | .ipc_ns = &init_ipc_ns, |
36 | #endif | 36 | #endif |
37 | .mnt_ns = NULL, | 37 | .mnt_ns = NULL, |
38 | .pid_ns = &init_pid_ns, | 38 | .pid_ns_for_children = &init_pid_ns, |
39 | #ifdef CONFIG_NET | 39 | #ifdef CONFIG_NET |
40 | .net_ns = &init_net, | 40 | .net_ns = &init_net, |
41 | #endif | 41 | #endif |
42 | }; | 42 | }; |
43 | 43 | ||
@@ -85,9 +85,10 @@ static struct nsproxy *create_new_namespaces(unsigned long flags, | |||
85 | goto out_ipc; | 85 | goto out_ipc; |
86 | } | 86 | } |
87 | 87 | ||
88 | new_nsp->pid_ns = copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns); | 88 | new_nsp->pid_ns_for_children = |
89 | if (IS_ERR(new_nsp->pid_ns)) { | 89 | copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns_for_children); |
90 | err = PTR_ERR(new_nsp->pid_ns); | 90 | if (IS_ERR(new_nsp->pid_ns_for_children)) { |
91 | err = PTR_ERR(new_nsp->pid_ns_for_children); | ||
91 | goto out_pid; | 92 | goto out_pid; |
92 | } | 93 | } |
93 | 94 | ||
@@ -100,8 +101,8 @@ static struct nsproxy *create_new_namespaces(unsigned long flags, | |||
100 | return new_nsp; | 101 | return new_nsp; |
101 | 102 | ||
102 | out_net: | 103 | out_net: |
103 | if (new_nsp->pid_ns) | 104 | if (new_nsp->pid_ns_for_children) |
104 | put_pid_ns(new_nsp->pid_ns); | 105 | put_pid_ns(new_nsp->pid_ns_for_children); |
105 | out_pid: | 106 | out_pid: |
106 | if (new_nsp->ipc_ns) | 107 | if (new_nsp->ipc_ns) |
107 | put_ipc_ns(new_nsp->ipc_ns); | 108 | put_ipc_ns(new_nsp->ipc_ns); |
@@ -174,8 +175,8 @@ void free_nsproxy(struct nsproxy *ns) | |||
174 | put_uts_ns(ns->uts_ns); | 175 | put_uts_ns(ns->uts_ns); |
175 | if (ns->ipc_ns) | 176 | if (ns->ipc_ns) |
176 | put_ipc_ns(ns->ipc_ns); | 177 | put_ipc_ns(ns->ipc_ns); |
177 | if (ns->pid_ns) | 178 | if (ns->pid_ns_for_children) |
178 | put_pid_ns(ns->pid_ns); | 179 | put_pid_ns(ns->pid_ns_for_children); |
179 | put_net(ns->net_ns); | 180 | put_net(ns->net_ns); |
180 | kmem_cache_free(nsproxy_cachep, ns); | 181 | kmem_cache_free(nsproxy_cachep, ns); |
181 | } | 182 | } |
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index 6917e8edb48e..601bb361c235 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c | |||
@@ -349,8 +349,8 @@ static int pidns_install(struct nsproxy *nsproxy, void *ns) | |||
349 | if (ancestor != active) | 349 | if (ancestor != active) |
350 | return -EINVAL; | 350 | return -EINVAL; |
351 | 351 | ||
352 | put_pid_ns(nsproxy->pid_ns); | 352 | put_pid_ns(nsproxy->pid_ns_for_children); |
353 | nsproxy->pid_ns = get_pid_ns(new); | 353 | nsproxy->pid_ns_for_children = get_pid_ns(new); |
354 | return 0; | 354 | return 0; |
355 | } | 355 | } |
356 | 356 | ||
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 3bdf28323012..61ed862cdd37 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c | |||
@@ -265,10 +265,9 @@ static inline void timer_list_header(struct seq_file *m, u64 now) | |||
265 | static int timer_list_show(struct seq_file *m, void *v) | 265 | static int timer_list_show(struct seq_file *m, void *v) |
266 | { | 266 | { |
267 | struct timer_list_iter *iter = v; | 267 | struct timer_list_iter *iter = v; |
268 | u64 now = ktime_to_ns(ktime_get()); | ||
269 | 268 | ||
270 | if (iter->cpu == -1 && !iter->second_pass) | 269 | if (iter->cpu == -1 && !iter->second_pass) |
271 | timer_list_header(m, now); | 270 | timer_list_header(m, iter->now); |
272 | else if (!iter->second_pass) | 271 | else if (!iter->second_pass) |
273 | print_cpu(m, iter->cpu, iter->now); | 272 | print_cpu(m, iter->cpu, iter->now); |
274 | #ifdef CONFIG_GENERIC_CLOCKEVENTS | 273 | #ifdef CONFIG_GENERIC_CLOCKEVENTS |
@@ -298,33 +297,41 @@ void sysrq_timer_list_show(void) | |||
298 | return; | 297 | return; |
299 | } | 298 | } |
300 | 299 | ||
301 | static void *timer_list_start(struct seq_file *file, loff_t *offset) | 300 | static void *move_iter(struct timer_list_iter *iter, loff_t offset) |
302 | { | 301 | { |
303 | struct timer_list_iter *iter = file->private; | 302 | for (; offset; offset--) { |
304 | 303 | iter->cpu = cpumask_next(iter->cpu, cpu_online_mask); | |
305 | if (!*offset) { | 304 | if (iter->cpu >= nr_cpu_ids) { |
306 | iter->cpu = -1; | ||
307 | iter->now = ktime_to_ns(ktime_get()); | ||
308 | } else if (iter->cpu >= nr_cpu_ids) { | ||
309 | #ifdef CONFIG_GENERIC_CLOCKEVENTS | 305 | #ifdef CONFIG_GENERIC_CLOCKEVENTS |
310 | if (!iter->second_pass) { | 306 | if (!iter->second_pass) { |
311 | iter->cpu = -1; | 307 | iter->cpu = -1; |
312 | iter->second_pass = true; | 308 | iter->second_pass = true; |
313 | } else | 309 | } else |
314 | return NULL; | 310 | return NULL; |
315 | #else | 311 | #else |
316 | return NULL; | 312 | return NULL; |
317 | #endif | 313 | #endif |
314 | } | ||
318 | } | 315 | } |
319 | return iter; | 316 | return iter; |
320 | } | 317 | } |
321 | 318 | ||
319 | static void *timer_list_start(struct seq_file *file, loff_t *offset) | ||
320 | { | ||
321 | struct timer_list_iter *iter = file->private; | ||
322 | |||
323 | if (!*offset) | ||
324 | iter->now = ktime_to_ns(ktime_get()); | ||
325 | iter->cpu = -1; | ||
326 | iter->second_pass = false; | ||
327 | return move_iter(iter, *offset); | ||
328 | } | ||
329 | |||
322 | static void *timer_list_next(struct seq_file *file, void *v, loff_t *offset) | 330 | static void *timer_list_next(struct seq_file *file, void *v, loff_t *offset) |
323 | { | 331 | { |
324 | struct timer_list_iter *iter = file->private; | 332 | struct timer_list_iter *iter = file->private; |
325 | iter->cpu = cpumask_next(iter->cpu, cpu_online_mask); | ||
326 | ++*offset; | 333 | ++*offset; |
327 | return timer_list_start(file, offset); | 334 | return move_iter(iter, 1); |
328 | } | 335 | } |
329 | 336 | ||
330 | static void timer_list_stop(struct seq_file *seq, void *v) | 337 | static void timer_list_stop(struct seq_file *seq, void *v) |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 7f5d4be22034..e93f7b9067d8 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -2201,6 +2201,15 @@ __acquires(&pool->lock) | |||
2201 | dump_stack(); | 2201 | dump_stack(); |
2202 | } | 2202 | } |
2203 | 2203 | ||
2204 | /* | ||
2205 | * The following prevents a kworker from hogging CPU on !PREEMPT | ||
2206 | * kernels, where a requeueing work item waiting for something to | ||
2207 | * happen could deadlock with stop_machine as such work item could | ||
2208 | * indefinitely requeue itself while all other CPUs are trapped in | ||
2209 | * stop_machine. | ||
2210 | */ | ||
2211 | cond_resched(); | ||
2212 | |||
2204 | spin_lock_irq(&pool->lock); | 2213 | spin_lock_irq(&pool->lock); |
2205 | 2214 | ||
2206 | /* clear cpu intensive status */ | 2215 | /* clear cpu intensive status */ |
diff --git a/lib/Kconfig b/lib/Kconfig index 71d9f81f6eed..65561716c16c 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -48,6 +48,16 @@ config STMP_DEVICE | |||
48 | config PERCPU_RWSEM | 48 | config PERCPU_RWSEM |
49 | boolean | 49 | boolean |
50 | 50 | ||
51 | config ARCH_USE_CMPXCHG_LOCKREF | ||
52 | bool | ||
53 | |||
54 | config CMPXCHG_LOCKREF | ||
55 | def_bool y if ARCH_USE_CMPXCHG_LOCKREF | ||
56 | depends on SMP | ||
57 | depends on !GENERIC_LOCKBREAK | ||
58 | depends on !DEBUG_SPINLOCK | ||
59 | depends on !DEBUG_LOCK_ALLOC | ||
60 | |||
51 | config CRC_CCITT | 61 | config CRC_CCITT |
52 | tristate "CRC-CCITT functions" | 62 | tristate "CRC-CCITT functions" |
53 | help | 63 | help |
diff --git a/lib/Makefile b/lib/Makefile index 7baccfd8a4e9..f2cb3082697c 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -20,6 +20,7 @@ lib-$(CONFIG_MMU) += ioremap.o | |||
20 | lib-$(CONFIG_SMP) += cpumask.o | 20 | lib-$(CONFIG_SMP) += cpumask.o |
21 | 21 | ||
22 | lib-y += kobject.o klist.o | 22 | lib-y += kobject.o klist.o |
23 | obj-y += lockref.o | ||
23 | 24 | ||
24 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 25 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
25 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ | 26 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
diff --git a/lib/lockref.c b/lib/lockref.c new file mode 100644 index 000000000000..7819c2d1d315 --- /dev/null +++ b/lib/lockref.c | |||
@@ -0,0 +1,127 @@ | |||
1 | #include <linux/export.h> | ||
2 | #include <linux/lockref.h> | ||
3 | |||
4 | #ifdef CONFIG_CMPXCHG_LOCKREF | ||
5 | |||
6 | /* | ||
7 | * Note that the "cmpxchg()" reloads the "old" value for the | ||
8 | * failure case. | ||
9 | */ | ||
10 | #define CMPXCHG_LOOP(CODE, SUCCESS) do { \ | ||
11 | struct lockref old; \ | ||
12 | BUILD_BUG_ON(sizeof(old) != 8); \ | ||
13 | old.lock_count = ACCESS_ONCE(lockref->lock_count); \ | ||
14 | while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ | ||
15 | struct lockref new = old, prev = old; \ | ||
16 | CODE \ | ||
17 | old.lock_count = cmpxchg(&lockref->lock_count, \ | ||
18 | old.lock_count, new.lock_count); \ | ||
19 | if (likely(old.lock_count == prev.lock_count)) { \ | ||
20 | SUCCESS; \ | ||
21 | } \ | ||
22 | } \ | ||
23 | } while (0) | ||
24 | |||
25 | #else | ||
26 | |||
27 | #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0) | ||
28 | |||
29 | #endif | ||
30 | |||
31 | /** | ||
32 | * lockref_get - Increments reference count unconditionally | ||
33 | * @lockcnt: pointer to lockref structure | ||
34 | * | ||
35 | * This operation is only valid if you already hold a reference | ||
36 | * to the object, so you know the count cannot be zero. | ||
37 | */ | ||
38 | void lockref_get(struct lockref *lockref) | ||
39 | { | ||
40 | CMPXCHG_LOOP( | ||
41 | new.count++; | ||
42 | , | ||
43 | return; | ||
44 | ); | ||
45 | |||
46 | spin_lock(&lockref->lock); | ||
47 | lockref->count++; | ||
48 | spin_unlock(&lockref->lock); | ||
49 | } | ||
50 | EXPORT_SYMBOL(lockref_get); | ||
51 | |||
52 | /** | ||
53 | * lockref_get_not_zero - Increments count unless the count is 0 | ||
54 | * @lockcnt: pointer to lockref structure | ||
55 | * Return: 1 if count updated successfully or 0 if count was zero | ||
56 | */ | ||
57 | int lockref_get_not_zero(struct lockref *lockref) | ||
58 | { | ||
59 | int retval; | ||
60 | |||
61 | CMPXCHG_LOOP( | ||
62 | new.count++; | ||
63 | if (!old.count) | ||
64 | return 0; | ||
65 | , | ||
66 | return 1; | ||
67 | ); | ||
68 | |||
69 | spin_lock(&lockref->lock); | ||
70 | retval = 0; | ||
71 | if (lockref->count) { | ||
72 | lockref->count++; | ||
73 | retval = 1; | ||
74 | } | ||
75 | spin_unlock(&lockref->lock); | ||
76 | return retval; | ||
77 | } | ||
78 | EXPORT_SYMBOL(lockref_get_not_zero); | ||
79 | |||
80 | /** | ||
81 | * lockref_get_or_lock - Increments count unless the count is 0 | ||
82 | * @lockcnt: pointer to lockref structure | ||
83 | * Return: 1 if count updated successfully or 0 if count was zero | ||
84 | * and we got the lock instead. | ||
85 | */ | ||
86 | int lockref_get_or_lock(struct lockref *lockref) | ||
87 | { | ||
88 | CMPXCHG_LOOP( | ||
89 | new.count++; | ||
90 | if (!old.count) | ||
91 | break; | ||
92 | , | ||
93 | return 1; | ||
94 | ); | ||
95 | |||
96 | spin_lock(&lockref->lock); | ||
97 | if (!lockref->count) | ||
98 | return 0; | ||
99 | lockref->count++; | ||
100 | spin_unlock(&lockref->lock); | ||
101 | return 1; | ||
102 | } | ||
103 | EXPORT_SYMBOL(lockref_get_or_lock); | ||
104 | |||
105 | /** | ||
106 | * lockref_put_or_lock - decrements count unless count <= 1 before decrement | ||
107 | * @lockcnt: pointer to lockref structure | ||
108 | * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken | ||
109 | */ | ||
110 | int lockref_put_or_lock(struct lockref *lockref) | ||
111 | { | ||
112 | CMPXCHG_LOOP( | ||
113 | new.count--; | ||
114 | if (old.count <= 1) | ||
115 | break; | ||
116 | , | ||
117 | return 1; | ||
118 | ); | ||
119 | |||
120 | spin_lock(&lockref->lock); | ||
121 | if (lockref->count <= 1) | ||
122 | return 0; | ||
123 | lockref->count--; | ||
124 | spin_unlock(&lockref->lock); | ||
125 | return 1; | ||
126 | } | ||
127 | EXPORT_SYMBOL(lockref_put_or_lock); | ||
diff --git a/mm/mremap.c b/mm/mremap.c index 457d34ef3bf2..0843feb66f3d 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/swap.h> | 15 | #include <linux/swap.h> |
16 | #include <linux/capability.h> | 16 | #include <linux/capability.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | #include <linux/swapops.h> | ||
18 | #include <linux/highmem.h> | 19 | #include <linux/highmem.h> |
19 | #include <linux/security.h> | 20 | #include <linux/security.h> |
20 | #include <linux/syscalls.h> | 21 | #include <linux/syscalls.h> |
@@ -69,6 +70,23 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, | |||
69 | return pmd; | 70 | return pmd; |
70 | } | 71 | } |
71 | 72 | ||
73 | static pte_t move_soft_dirty_pte(pte_t pte) | ||
74 | { | ||
75 | /* | ||
76 | * Set soft dirty bit so we can notice | ||
77 | * in userspace the ptes were moved. | ||
78 | */ | ||
79 | #ifdef CONFIG_MEM_SOFT_DIRTY | ||
80 | if (pte_present(pte)) | ||
81 | pte = pte_mksoft_dirty(pte); | ||
82 | else if (is_swap_pte(pte)) | ||
83 | pte = pte_swp_mksoft_dirty(pte); | ||
84 | else if (pte_file(pte)) | ||
85 | pte = pte_file_mksoft_dirty(pte); | ||
86 | #endif | ||
87 | return pte; | ||
88 | } | ||
89 | |||
72 | static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | 90 | static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, |
73 | unsigned long old_addr, unsigned long old_end, | 91 | unsigned long old_addr, unsigned long old_end, |
74 | struct vm_area_struct *new_vma, pmd_t *new_pmd, | 92 | struct vm_area_struct *new_vma, pmd_t *new_pmd, |
@@ -126,7 +144,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | |||
126 | continue; | 144 | continue; |
127 | pte = ptep_get_and_clear(mm, old_addr, old_pte); | 145 | pte = ptep_get_and_clear(mm, old_addr, old_pte); |
128 | pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); | 146 | pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); |
129 | set_pte_at(mm, new_addr, new_pte, pte_mksoft_dirty(pte)); | 147 | pte = move_soft_dirty_pte(pte); |
148 | set_pte_at(mm, new_addr, new_pte, pte); | ||
130 | } | 149 | } |
131 | 150 | ||
132 | arch_leave_lazy_mmu_mode(); | 151 | arch_leave_lazy_mmu_mode(); |
@@ -162,6 +162,8 @@ static inline const char *cache_name(struct kmem_cache *s) | |||
162 | 162 | ||
163 | static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx) | 163 | static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx) |
164 | { | 164 | { |
165 | if (!s->memcg_params) | ||
166 | return NULL; | ||
165 | return s->memcg_params->memcg_caches[idx]; | 167 | return s->memcg_params->memcg_caches[idx]; |
166 | } | 168 | } |
167 | 169 | ||
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 69363bd37f64..89659d4ed1f9 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -71,7 +71,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
71 | 71 | ||
72 | mdst = br_mdb_get(br, skb, vid); | 72 | mdst = br_mdb_get(br, skb, vid); |
73 | if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && | 73 | if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && |
74 | br_multicast_querier_exists(br)) | 74 | br_multicast_querier_exists(br, eth_hdr(skb))) |
75 | br_multicast_deliver(mdst, skb); | 75 | br_multicast_deliver(mdst, skb); |
76 | else | 76 | else |
77 | br_flood_deliver(br, skb, false); | 77 | br_flood_deliver(br, skb, false); |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 8c561c0aa636..a2fd37ec35f7 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -102,7 +102,7 @@ int br_handle_frame_finish(struct sk_buff *skb) | |||
102 | } else if (is_multicast_ether_addr(dest)) { | 102 | } else if (is_multicast_ether_addr(dest)) { |
103 | mdst = br_mdb_get(br, skb, vid); | 103 | mdst = br_mdb_get(br, skb, vid); |
104 | if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && | 104 | if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && |
105 | br_multicast_querier_exists(br)) { | 105 | br_multicast_querier_exists(br, eth_hdr(skb))) { |
106 | if ((mdst && mdst->mglist) || | 106 | if ((mdst && mdst->mglist) || |
107 | br_multicast_is_router(br)) | 107 | br_multicast_is_router(br)) |
108 | skb2 = skb; | 108 | skb2 = skb; |
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 0daae3ec2355..6319c4333c39 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c | |||
@@ -414,16 +414,20 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) | |||
414 | if (!netif_running(br->dev) || br->multicast_disabled) | 414 | if (!netif_running(br->dev) || br->multicast_disabled) |
415 | return -EINVAL; | 415 | return -EINVAL; |
416 | 416 | ||
417 | if (timer_pending(&br->multicast_querier_timer)) | ||
418 | return -EBUSY; | ||
419 | |||
420 | ip.proto = entry->addr.proto; | 417 | ip.proto = entry->addr.proto; |
421 | if (ip.proto == htons(ETH_P_IP)) | 418 | if (ip.proto == htons(ETH_P_IP)) { |
419 | if (timer_pending(&br->ip4_querier.timer)) | ||
420 | return -EBUSY; | ||
421 | |||
422 | ip.u.ip4 = entry->addr.u.ip4; | 422 | ip.u.ip4 = entry->addr.u.ip4; |
423 | #if IS_ENABLED(CONFIG_IPV6) | 423 | #if IS_ENABLED(CONFIG_IPV6) |
424 | else | 424 | } else { |
425 | if (timer_pending(&br->ip6_querier.timer)) | ||
426 | return -EBUSY; | ||
427 | |||
425 | ip.u.ip6 = entry->addr.u.ip6; | 428 | ip.u.ip6 = entry->addr.u.ip6; |
426 | #endif | 429 | #endif |
430 | } | ||
427 | 431 | ||
428 | spin_lock_bh(&br->multicast_lock); | 432 | spin_lock_bh(&br->multicast_lock); |
429 | mdb = mlock_dereference(br->mdb, br); | 433 | mdb = mlock_dereference(br->mdb, br); |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 08e576ada0b2..bbcb43582496 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -33,7 +33,8 @@ | |||
33 | 33 | ||
34 | #include "br_private.h" | 34 | #include "br_private.h" |
35 | 35 | ||
36 | static void br_multicast_start_querier(struct net_bridge *br); | 36 | static void br_multicast_start_querier(struct net_bridge *br, |
37 | struct bridge_mcast_query *query); | ||
37 | unsigned int br_mdb_rehash_seq; | 38 | unsigned int br_mdb_rehash_seq; |
38 | 39 | ||
39 | static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) | 40 | static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) |
@@ -755,20 +756,35 @@ static void br_multicast_local_router_expired(unsigned long data) | |||
755 | { | 756 | { |
756 | } | 757 | } |
757 | 758 | ||
758 | static void br_multicast_querier_expired(unsigned long data) | 759 | static void br_multicast_querier_expired(struct net_bridge *br, |
760 | struct bridge_mcast_query *query) | ||
759 | { | 761 | { |
760 | struct net_bridge *br = (void *)data; | ||
761 | |||
762 | spin_lock(&br->multicast_lock); | 762 | spin_lock(&br->multicast_lock); |
763 | if (!netif_running(br->dev) || br->multicast_disabled) | 763 | if (!netif_running(br->dev) || br->multicast_disabled) |
764 | goto out; | 764 | goto out; |
765 | 765 | ||
766 | br_multicast_start_querier(br); | 766 | br_multicast_start_querier(br, query); |
767 | 767 | ||
768 | out: | 768 | out: |
769 | spin_unlock(&br->multicast_lock); | 769 | spin_unlock(&br->multicast_lock); |
770 | } | 770 | } |
771 | 771 | ||
772 | static void br_ip4_multicast_querier_expired(unsigned long data) | ||
773 | { | ||
774 | struct net_bridge *br = (void *)data; | ||
775 | |||
776 | br_multicast_querier_expired(br, &br->ip4_query); | ||
777 | } | ||
778 | |||
779 | #if IS_ENABLED(CONFIG_IPV6) | ||
780 | static void br_ip6_multicast_querier_expired(unsigned long data) | ||
781 | { | ||
782 | struct net_bridge *br = (void *)data; | ||
783 | |||
784 | br_multicast_querier_expired(br, &br->ip6_query); | ||
785 | } | ||
786 | #endif | ||
787 | |||
772 | static void __br_multicast_send_query(struct net_bridge *br, | 788 | static void __br_multicast_send_query(struct net_bridge *br, |
773 | struct net_bridge_port *port, | 789 | struct net_bridge_port *port, |
774 | struct br_ip *ip) | 790 | struct br_ip *ip) |
@@ -789,37 +805,45 @@ static void __br_multicast_send_query(struct net_bridge *br, | |||
789 | } | 805 | } |
790 | 806 | ||
791 | static void br_multicast_send_query(struct net_bridge *br, | 807 | static void br_multicast_send_query(struct net_bridge *br, |
792 | struct net_bridge_port *port, u32 sent) | 808 | struct net_bridge_port *port, |
809 | struct bridge_mcast_query *query) | ||
793 | { | 810 | { |
794 | unsigned long time; | 811 | unsigned long time; |
795 | struct br_ip br_group; | 812 | struct br_ip br_group; |
813 | struct bridge_mcast_querier *querier = NULL; | ||
796 | 814 | ||
797 | if (!netif_running(br->dev) || br->multicast_disabled || | 815 | if (!netif_running(br->dev) || br->multicast_disabled || |
798 | !br->multicast_querier || | 816 | !br->multicast_querier) |
799 | timer_pending(&br->multicast_querier_timer)) | ||
800 | return; | 817 | return; |
801 | 818 | ||
802 | memset(&br_group.u, 0, sizeof(br_group.u)); | 819 | memset(&br_group.u, 0, sizeof(br_group.u)); |
803 | 820 | ||
804 | br_group.proto = htons(ETH_P_IP); | 821 | if (port ? (query == &port->ip4_query) : |
805 | __br_multicast_send_query(br, port, &br_group); | 822 | (query == &br->ip4_query)) { |
806 | 823 | querier = &br->ip4_querier; | |
824 | br_group.proto = htons(ETH_P_IP); | ||
807 | #if IS_ENABLED(CONFIG_IPV6) | 825 | #if IS_ENABLED(CONFIG_IPV6) |
808 | br_group.proto = htons(ETH_P_IPV6); | 826 | } else { |
809 | __br_multicast_send_query(br, port, &br_group); | 827 | querier = &br->ip6_querier; |
828 | br_group.proto = htons(ETH_P_IPV6); | ||
810 | #endif | 829 | #endif |
830 | } | ||
831 | |||
832 | if (!querier || timer_pending(&querier->timer)) | ||
833 | return; | ||
834 | |||
835 | __br_multicast_send_query(br, port, &br_group); | ||
811 | 836 | ||
812 | time = jiffies; | 837 | time = jiffies; |
813 | time += sent < br->multicast_startup_query_count ? | 838 | time += query->startup_sent < br->multicast_startup_query_count ? |
814 | br->multicast_startup_query_interval : | 839 | br->multicast_startup_query_interval : |
815 | br->multicast_query_interval; | 840 | br->multicast_query_interval; |
816 | mod_timer(port ? &port->multicast_query_timer : | 841 | mod_timer(&query->timer, time); |
817 | &br->multicast_query_timer, time); | ||
818 | } | 842 | } |
819 | 843 | ||
820 | static void br_multicast_port_query_expired(unsigned long data) | 844 | static void br_multicast_port_query_expired(struct net_bridge_port *port, |
845 | struct bridge_mcast_query *query) | ||
821 | { | 846 | { |
822 | struct net_bridge_port *port = (void *)data; | ||
823 | struct net_bridge *br = port->br; | 847 | struct net_bridge *br = port->br; |
824 | 848 | ||
825 | spin_lock(&br->multicast_lock); | 849 | spin_lock(&br->multicast_lock); |
@@ -827,25 +851,43 @@ static void br_multicast_port_query_expired(unsigned long data) | |||
827 | port->state == BR_STATE_BLOCKING) | 851 | port->state == BR_STATE_BLOCKING) |
828 | goto out; | 852 | goto out; |
829 | 853 | ||
830 | if (port->multicast_startup_queries_sent < | 854 | if (query->startup_sent < br->multicast_startup_query_count) |
831 | br->multicast_startup_query_count) | 855 | query->startup_sent++; |
832 | port->multicast_startup_queries_sent++; | ||
833 | 856 | ||
834 | br_multicast_send_query(port->br, port, | 857 | br_multicast_send_query(port->br, port, query); |
835 | port->multicast_startup_queries_sent); | ||
836 | 858 | ||
837 | out: | 859 | out: |
838 | spin_unlock(&br->multicast_lock); | 860 | spin_unlock(&br->multicast_lock); |
839 | } | 861 | } |
840 | 862 | ||
863 | static void br_ip4_multicast_port_query_expired(unsigned long data) | ||
864 | { | ||
865 | struct net_bridge_port *port = (void *)data; | ||
866 | |||
867 | br_multicast_port_query_expired(port, &port->ip4_query); | ||
868 | } | ||
869 | |||
870 | #if IS_ENABLED(CONFIG_IPV6) | ||
871 | static void br_ip6_multicast_port_query_expired(unsigned long data) | ||
872 | { | ||
873 | struct net_bridge_port *port = (void *)data; | ||
874 | |||
875 | br_multicast_port_query_expired(port, &port->ip6_query); | ||
876 | } | ||
877 | #endif | ||
878 | |||
841 | void br_multicast_add_port(struct net_bridge_port *port) | 879 | void br_multicast_add_port(struct net_bridge_port *port) |
842 | { | 880 | { |
843 | port->multicast_router = 1; | 881 | port->multicast_router = 1; |
844 | 882 | ||
845 | setup_timer(&port->multicast_router_timer, br_multicast_router_expired, | 883 | setup_timer(&port->multicast_router_timer, br_multicast_router_expired, |
846 | (unsigned long)port); | 884 | (unsigned long)port); |
847 | setup_timer(&port->multicast_query_timer, | 885 | setup_timer(&port->ip4_query.timer, br_ip4_multicast_port_query_expired, |
848 | br_multicast_port_query_expired, (unsigned long)port); | 886 | (unsigned long)port); |
887 | #if IS_ENABLED(CONFIG_IPV6) | ||
888 | setup_timer(&port->ip6_query.timer, br_ip6_multicast_port_query_expired, | ||
889 | (unsigned long)port); | ||
890 | #endif | ||
849 | } | 891 | } |
850 | 892 | ||
851 | void br_multicast_del_port(struct net_bridge_port *port) | 893 | void br_multicast_del_port(struct net_bridge_port *port) |
@@ -853,13 +895,13 @@ void br_multicast_del_port(struct net_bridge_port *port) | |||
853 | del_timer_sync(&port->multicast_router_timer); | 895 | del_timer_sync(&port->multicast_router_timer); |
854 | } | 896 | } |
855 | 897 | ||
856 | static void __br_multicast_enable_port(struct net_bridge_port *port) | 898 | static void br_multicast_enable(struct bridge_mcast_query *query) |
857 | { | 899 | { |
858 | port->multicast_startup_queries_sent = 0; | 900 | query->startup_sent = 0; |
859 | 901 | ||
860 | if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 || | 902 | if (try_to_del_timer_sync(&query->timer) >= 0 || |
861 | del_timer(&port->multicast_query_timer)) | 903 | del_timer(&query->timer)) |
862 | mod_timer(&port->multicast_query_timer, jiffies); | 904 | mod_timer(&query->timer, jiffies); |
863 | } | 905 | } |
864 | 906 | ||
865 | void br_multicast_enable_port(struct net_bridge_port *port) | 907 | void br_multicast_enable_port(struct net_bridge_port *port) |
@@ -870,7 +912,10 @@ void br_multicast_enable_port(struct net_bridge_port *port) | |||
870 | if (br->multicast_disabled || !netif_running(br->dev)) | 912 | if (br->multicast_disabled || !netif_running(br->dev)) |
871 | goto out; | 913 | goto out; |
872 | 914 | ||
873 | __br_multicast_enable_port(port); | 915 | br_multicast_enable(&port->ip4_query); |
916 | #if IS_ENABLED(CONFIG_IPV6) | ||
917 | br_multicast_enable(&port->ip6_query); | ||
918 | #endif | ||
874 | 919 | ||
875 | out: | 920 | out: |
876 | spin_unlock(&br->multicast_lock); | 921 | spin_unlock(&br->multicast_lock); |
@@ -889,7 +934,10 @@ void br_multicast_disable_port(struct net_bridge_port *port) | |||
889 | if (!hlist_unhashed(&port->rlist)) | 934 | if (!hlist_unhashed(&port->rlist)) |
890 | hlist_del_init_rcu(&port->rlist); | 935 | hlist_del_init_rcu(&port->rlist); |
891 | del_timer(&port->multicast_router_timer); | 936 | del_timer(&port->multicast_router_timer); |
892 | del_timer(&port->multicast_query_timer); | 937 | del_timer(&port->ip4_query.timer); |
938 | #if IS_ENABLED(CONFIG_IPV6) | ||
939 | del_timer(&port->ip6_query.timer); | ||
940 | #endif | ||
893 | spin_unlock(&br->multicast_lock); | 941 | spin_unlock(&br->multicast_lock); |
894 | } | 942 | } |
895 | 943 | ||
@@ -1014,14 +1062,15 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br, | |||
1014 | } | 1062 | } |
1015 | #endif | 1063 | #endif |
1016 | 1064 | ||
1017 | static void br_multicast_update_querier_timer(struct net_bridge *br, | 1065 | static void |
1018 | unsigned long max_delay) | 1066 | br_multicast_update_querier_timer(struct net_bridge *br, |
1067 | struct bridge_mcast_querier *querier, | ||
1068 | unsigned long max_delay) | ||
1019 | { | 1069 | { |
1020 | if (!timer_pending(&br->multicast_querier_timer)) | 1070 | if (!timer_pending(&querier->timer)) |
1021 | br->multicast_querier_delay_time = jiffies + max_delay; | 1071 | querier->delay_time = jiffies + max_delay; |
1022 | 1072 | ||
1023 | mod_timer(&br->multicast_querier_timer, | 1073 | mod_timer(&querier->timer, jiffies + br->multicast_querier_interval); |
1024 | jiffies + br->multicast_querier_interval); | ||
1025 | } | 1074 | } |
1026 | 1075 | ||
1027 | /* | 1076 | /* |
@@ -1074,12 +1123,13 @@ timer: | |||
1074 | 1123 | ||
1075 | static void br_multicast_query_received(struct net_bridge *br, | 1124 | static void br_multicast_query_received(struct net_bridge *br, |
1076 | struct net_bridge_port *port, | 1125 | struct net_bridge_port *port, |
1126 | struct bridge_mcast_querier *querier, | ||
1077 | int saddr, | 1127 | int saddr, |
1078 | unsigned long max_delay) | 1128 | unsigned long max_delay) |
1079 | { | 1129 | { |
1080 | if (saddr) | 1130 | if (saddr) |
1081 | br_multicast_update_querier_timer(br, max_delay); | 1131 | br_multicast_update_querier_timer(br, querier, max_delay); |
1082 | else if (timer_pending(&br->multicast_querier_timer)) | 1132 | else if (timer_pending(&querier->timer)) |
1083 | return; | 1133 | return; |
1084 | 1134 | ||
1085 | br_multicast_mark_router(br, port); | 1135 | br_multicast_mark_router(br, port); |
@@ -1129,7 +1179,8 @@ static int br_ip4_multicast_query(struct net_bridge *br, | |||
1129 | IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; | 1179 | IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; |
1130 | } | 1180 | } |
1131 | 1181 | ||
1132 | br_multicast_query_received(br, port, !!iph->saddr, max_delay); | 1182 | br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr, |
1183 | max_delay); | ||
1133 | 1184 | ||
1134 | if (!group) | 1185 | if (!group) |
1135 | goto out; | 1186 | goto out; |
@@ -1203,11 +1254,12 @@ static int br_ip6_multicast_query(struct net_bridge *br, | |||
1203 | mld2q = (struct mld2_query *)icmp6_hdr(skb); | 1254 | mld2q = (struct mld2_query *)icmp6_hdr(skb); |
1204 | if (!mld2q->mld2q_nsrcs) | 1255 | if (!mld2q->mld2q_nsrcs) |
1205 | group = &mld2q->mld2q_mca; | 1256 | group = &mld2q->mld2q_mca; |
1206 | max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1; | 1257 | |
1258 | max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mld2q->mld2q_mrc))), 1UL); | ||
1207 | } | 1259 | } |
1208 | 1260 | ||
1209 | br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr), | 1261 | br_multicast_query_received(br, port, &br->ip6_querier, |
1210 | max_delay); | 1262 | !ipv6_addr_any(&ip6h->saddr), max_delay); |
1211 | 1263 | ||
1212 | if (!group) | 1264 | if (!group) |
1213 | goto out; | 1265 | goto out; |
@@ -1244,7 +1296,9 @@ out: | |||
1244 | 1296 | ||
1245 | static void br_multicast_leave_group(struct net_bridge *br, | 1297 | static void br_multicast_leave_group(struct net_bridge *br, |
1246 | struct net_bridge_port *port, | 1298 | struct net_bridge_port *port, |
1247 | struct br_ip *group) | 1299 | struct br_ip *group, |
1300 | struct bridge_mcast_querier *querier, | ||
1301 | struct bridge_mcast_query *query) | ||
1248 | { | 1302 | { |
1249 | struct net_bridge_mdb_htable *mdb; | 1303 | struct net_bridge_mdb_htable *mdb; |
1250 | struct net_bridge_mdb_entry *mp; | 1304 | struct net_bridge_mdb_entry *mp; |
@@ -1255,7 +1309,7 @@ static void br_multicast_leave_group(struct net_bridge *br, | |||
1255 | spin_lock(&br->multicast_lock); | 1309 | spin_lock(&br->multicast_lock); |
1256 | if (!netif_running(br->dev) || | 1310 | if (!netif_running(br->dev) || |
1257 | (port && port->state == BR_STATE_DISABLED) || | 1311 | (port && port->state == BR_STATE_DISABLED) || |
1258 | timer_pending(&br->multicast_querier_timer)) | 1312 | timer_pending(&querier->timer)) |
1259 | goto out; | 1313 | goto out; |
1260 | 1314 | ||
1261 | mdb = mlock_dereference(br->mdb, br); | 1315 | mdb = mlock_dereference(br->mdb, br); |
@@ -1263,14 +1317,13 @@ static void br_multicast_leave_group(struct net_bridge *br, | |||
1263 | if (!mp) | 1317 | if (!mp) |
1264 | goto out; | 1318 | goto out; |
1265 | 1319 | ||
1266 | if (br->multicast_querier && | 1320 | if (br->multicast_querier) { |
1267 | !timer_pending(&br->multicast_querier_timer)) { | ||
1268 | __br_multicast_send_query(br, port, &mp->addr); | 1321 | __br_multicast_send_query(br, port, &mp->addr); |
1269 | 1322 | ||
1270 | time = jiffies + br->multicast_last_member_count * | 1323 | time = jiffies + br->multicast_last_member_count * |
1271 | br->multicast_last_member_interval; | 1324 | br->multicast_last_member_interval; |
1272 | mod_timer(port ? &port->multicast_query_timer : | 1325 | |
1273 | &br->multicast_query_timer, time); | 1326 | mod_timer(&query->timer, time); |
1274 | 1327 | ||
1275 | for (p = mlock_dereference(mp->ports, br); | 1328 | for (p = mlock_dereference(mp->ports, br); |
1276 | p != NULL; | 1329 | p != NULL; |
@@ -1323,7 +1376,6 @@ static void br_multicast_leave_group(struct net_bridge *br, | |||
1323 | mod_timer(&mp->timer, time); | 1376 | mod_timer(&mp->timer, time); |
1324 | } | 1377 | } |
1325 | } | 1378 | } |
1326 | |||
1327 | out: | 1379 | out: |
1328 | spin_unlock(&br->multicast_lock); | 1380 | spin_unlock(&br->multicast_lock); |
1329 | } | 1381 | } |
@@ -1334,6 +1386,8 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br, | |||
1334 | __u16 vid) | 1386 | __u16 vid) |
1335 | { | 1387 | { |
1336 | struct br_ip br_group; | 1388 | struct br_ip br_group; |
1389 | struct bridge_mcast_query *query = port ? &port->ip4_query : | ||
1390 | &br->ip4_query; | ||
1337 | 1391 | ||
1338 | if (ipv4_is_local_multicast(group)) | 1392 | if (ipv4_is_local_multicast(group)) |
1339 | return; | 1393 | return; |
@@ -1342,7 +1396,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br, | |||
1342 | br_group.proto = htons(ETH_P_IP); | 1396 | br_group.proto = htons(ETH_P_IP); |
1343 | br_group.vid = vid; | 1397 | br_group.vid = vid; |
1344 | 1398 | ||
1345 | br_multicast_leave_group(br, port, &br_group); | 1399 | br_multicast_leave_group(br, port, &br_group, &br->ip4_querier, query); |
1346 | } | 1400 | } |
1347 | 1401 | ||
1348 | #if IS_ENABLED(CONFIG_IPV6) | 1402 | #if IS_ENABLED(CONFIG_IPV6) |
@@ -1352,6 +1406,9 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br, | |||
1352 | __u16 vid) | 1406 | __u16 vid) |
1353 | { | 1407 | { |
1354 | struct br_ip br_group; | 1408 | struct br_ip br_group; |
1409 | struct bridge_mcast_query *query = port ? &port->ip6_query : | ||
1410 | &br->ip6_query; | ||
1411 | |||
1355 | 1412 | ||
1356 | if (!ipv6_is_transient_multicast(group)) | 1413 | if (!ipv6_is_transient_multicast(group)) |
1357 | return; | 1414 | return; |
@@ -1360,7 +1417,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br, | |||
1360 | br_group.proto = htons(ETH_P_IPV6); | 1417 | br_group.proto = htons(ETH_P_IPV6); |
1361 | br_group.vid = vid; | 1418 | br_group.vid = vid; |
1362 | 1419 | ||
1363 | br_multicast_leave_group(br, port, &br_group); | 1420 | br_multicast_leave_group(br, port, &br_group, &br->ip6_querier, query); |
1364 | } | 1421 | } |
1365 | #endif | 1422 | #endif |
1366 | 1423 | ||
@@ -1622,19 +1679,32 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, | |||
1622 | return 0; | 1679 | return 0; |
1623 | } | 1680 | } |
1624 | 1681 | ||
1625 | static void br_multicast_query_expired(unsigned long data) | 1682 | static void br_multicast_query_expired(struct net_bridge *br, |
1683 | struct bridge_mcast_query *query) | ||
1684 | { | ||
1685 | spin_lock(&br->multicast_lock); | ||
1686 | if (query->startup_sent < br->multicast_startup_query_count) | ||
1687 | query->startup_sent++; | ||
1688 | |||
1689 | br_multicast_send_query(br, NULL, query); | ||
1690 | spin_unlock(&br->multicast_lock); | ||
1691 | } | ||
1692 | |||
1693 | static void br_ip4_multicast_query_expired(unsigned long data) | ||
1626 | { | 1694 | { |
1627 | struct net_bridge *br = (void *)data; | 1695 | struct net_bridge *br = (void *)data; |
1628 | 1696 | ||
1629 | spin_lock(&br->multicast_lock); | 1697 | br_multicast_query_expired(br, &br->ip4_query); |
1630 | if (br->multicast_startup_queries_sent < | 1698 | } |
1631 | br->multicast_startup_query_count) | ||
1632 | br->multicast_startup_queries_sent++; | ||
1633 | 1699 | ||
1634 | br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent); | 1700 | #if IS_ENABLED(CONFIG_IPV6) |
1701 | static void br_ip6_multicast_query_expired(unsigned long data) | ||
1702 | { | ||
1703 | struct net_bridge *br = (void *)data; | ||
1635 | 1704 | ||
1636 | spin_unlock(&br->multicast_lock); | 1705 | br_multicast_query_expired(br, &br->ip6_query); |
1637 | } | 1706 | } |
1707 | #endif | ||
1638 | 1708 | ||
1639 | void br_multicast_init(struct net_bridge *br) | 1709 | void br_multicast_init(struct net_bridge *br) |
1640 | { | 1710 | { |
@@ -1654,25 +1724,43 @@ void br_multicast_init(struct net_bridge *br) | |||
1654 | br->multicast_querier_interval = 255 * HZ; | 1724 | br->multicast_querier_interval = 255 * HZ; |
1655 | br->multicast_membership_interval = 260 * HZ; | 1725 | br->multicast_membership_interval = 260 * HZ; |
1656 | 1726 | ||
1657 | br->multicast_querier_delay_time = 0; | 1727 | br->ip4_querier.delay_time = 0; |
1728 | #if IS_ENABLED(CONFIG_IPV6) | ||
1729 | br->ip6_querier.delay_time = 0; | ||
1730 | #endif | ||
1658 | 1731 | ||
1659 | spin_lock_init(&br->multicast_lock); | 1732 | spin_lock_init(&br->multicast_lock); |
1660 | setup_timer(&br->multicast_router_timer, | 1733 | setup_timer(&br->multicast_router_timer, |
1661 | br_multicast_local_router_expired, 0); | 1734 | br_multicast_local_router_expired, 0); |
1662 | setup_timer(&br->multicast_querier_timer, | 1735 | setup_timer(&br->ip4_querier.timer, br_ip4_multicast_querier_expired, |
1663 | br_multicast_querier_expired, (unsigned long)br); | 1736 | (unsigned long)br); |
1664 | setup_timer(&br->multicast_query_timer, br_multicast_query_expired, | 1737 | setup_timer(&br->ip4_query.timer, br_ip4_multicast_query_expired, |
1665 | (unsigned long)br); | 1738 | (unsigned long)br); |
1739 | #if IS_ENABLED(CONFIG_IPV6) | ||
1740 | setup_timer(&br->ip6_querier.timer, br_ip6_multicast_querier_expired, | ||
1741 | (unsigned long)br); | ||
1742 | setup_timer(&br->ip6_query.timer, br_ip6_multicast_query_expired, | ||
1743 | (unsigned long)br); | ||
1744 | #endif | ||
1666 | } | 1745 | } |
1667 | 1746 | ||
1668 | void br_multicast_open(struct net_bridge *br) | 1747 | static void __br_multicast_open(struct net_bridge *br, |
1748 | struct bridge_mcast_query *query) | ||
1669 | { | 1749 | { |
1670 | br->multicast_startup_queries_sent = 0; | 1750 | query->startup_sent = 0; |
1671 | 1751 | ||
1672 | if (br->multicast_disabled) | 1752 | if (br->multicast_disabled) |
1673 | return; | 1753 | return; |
1674 | 1754 | ||
1675 | mod_timer(&br->multicast_query_timer, jiffies); | 1755 | mod_timer(&query->timer, jiffies); |
1756 | } | ||
1757 | |||
1758 | void br_multicast_open(struct net_bridge *br) | ||
1759 | { | ||
1760 | __br_multicast_open(br, &br->ip4_query); | ||
1761 | #if IS_ENABLED(CONFIG_IPV6) | ||
1762 | __br_multicast_open(br, &br->ip6_query); | ||
1763 | #endif | ||
1676 | } | 1764 | } |
1677 | 1765 | ||
1678 | void br_multicast_stop(struct net_bridge *br) | 1766 | void br_multicast_stop(struct net_bridge *br) |
@@ -1684,8 +1772,12 @@ void br_multicast_stop(struct net_bridge *br) | |||
1684 | int i; | 1772 | int i; |
1685 | 1773 | ||
1686 | del_timer_sync(&br->multicast_router_timer); | 1774 | del_timer_sync(&br->multicast_router_timer); |
1687 | del_timer_sync(&br->multicast_querier_timer); | 1775 | del_timer_sync(&br->ip4_querier.timer); |
1688 | del_timer_sync(&br->multicast_query_timer); | 1776 | del_timer_sync(&br->ip4_query.timer); |
1777 | #if IS_ENABLED(CONFIG_IPV6) | ||
1778 | del_timer_sync(&br->ip6_querier.timer); | ||
1779 | del_timer_sync(&br->ip6_query.timer); | ||
1780 | #endif | ||
1689 | 1781 | ||
1690 | spin_lock_bh(&br->multicast_lock); | 1782 | spin_lock_bh(&br->multicast_lock); |
1691 | mdb = mlock_dereference(br->mdb, br); | 1783 | mdb = mlock_dereference(br->mdb, br); |
@@ -1788,18 +1880,24 @@ unlock: | |||
1788 | return err; | 1880 | return err; |
1789 | } | 1881 | } |
1790 | 1882 | ||
1791 | static void br_multicast_start_querier(struct net_bridge *br) | 1883 | static void br_multicast_start_querier(struct net_bridge *br, |
1884 | struct bridge_mcast_query *query) | ||
1792 | { | 1885 | { |
1793 | struct net_bridge_port *port; | 1886 | struct net_bridge_port *port; |
1794 | 1887 | ||
1795 | br_multicast_open(br); | 1888 | __br_multicast_open(br, query); |
1796 | 1889 | ||
1797 | list_for_each_entry(port, &br->port_list, list) { | 1890 | list_for_each_entry(port, &br->port_list, list) { |
1798 | if (port->state == BR_STATE_DISABLED || | 1891 | if (port->state == BR_STATE_DISABLED || |
1799 | port->state == BR_STATE_BLOCKING) | 1892 | port->state == BR_STATE_BLOCKING) |
1800 | continue; | 1893 | continue; |
1801 | 1894 | ||
1802 | __br_multicast_enable_port(port); | 1895 | if (query == &br->ip4_query) |
1896 | br_multicast_enable(&port->ip4_query); | ||
1897 | #if IS_ENABLED(CONFIG_IPV6) | ||
1898 | else | ||
1899 | br_multicast_enable(&port->ip6_query); | ||
1900 | #endif | ||
1803 | } | 1901 | } |
1804 | } | 1902 | } |
1805 | 1903 | ||
@@ -1834,7 +1932,10 @@ rollback: | |||
1834 | goto rollback; | 1932 | goto rollback; |
1835 | } | 1933 | } |
1836 | 1934 | ||
1837 | br_multicast_start_querier(br); | 1935 | br_multicast_start_querier(br, &br->ip4_query); |
1936 | #if IS_ENABLED(CONFIG_IPV6) | ||
1937 | br_multicast_start_querier(br, &br->ip6_query); | ||
1938 | #endif | ||
1838 | 1939 | ||
1839 | unlock: | 1940 | unlock: |
1840 | spin_unlock_bh(&br->multicast_lock); | 1941 | spin_unlock_bh(&br->multicast_lock); |
@@ -1857,10 +1958,18 @@ int br_multicast_set_querier(struct net_bridge *br, unsigned long val) | |||
1857 | goto unlock; | 1958 | goto unlock; |
1858 | 1959 | ||
1859 | max_delay = br->multicast_query_response_interval; | 1960 | max_delay = br->multicast_query_response_interval; |
1860 | if (!timer_pending(&br->multicast_querier_timer)) | ||
1861 | br->multicast_querier_delay_time = jiffies + max_delay; | ||
1862 | 1961 | ||
1863 | br_multicast_start_querier(br); | 1962 | if (!timer_pending(&br->ip4_querier.timer)) |
1963 | br->ip4_querier.delay_time = jiffies + max_delay; | ||
1964 | |||
1965 | br_multicast_start_querier(br, &br->ip4_query); | ||
1966 | |||
1967 | #if IS_ENABLED(CONFIG_IPV6) | ||
1968 | if (!timer_pending(&br->ip6_querier.timer)) | ||
1969 | br->ip6_querier.delay_time = jiffies + max_delay; | ||
1970 | |||
1971 | br_multicast_start_querier(br, &br->ip6_query); | ||
1972 | #endif | ||
1864 | 1973 | ||
1865 | unlock: | 1974 | unlock: |
1866 | spin_unlock_bh(&br->multicast_lock); | 1975 | spin_unlock_bh(&br->multicast_lock); |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 2f7da41851bf..263ba9034468 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -66,6 +66,20 @@ struct br_ip | |||
66 | __u16 vid; | 66 | __u16 vid; |
67 | }; | 67 | }; |
68 | 68 | ||
69 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING | ||
70 | /* our own querier */ | ||
71 | struct bridge_mcast_query { | ||
72 | struct timer_list timer; | ||
73 | u32 startup_sent; | ||
74 | }; | ||
75 | |||
76 | /* other querier */ | ||
77 | struct bridge_mcast_querier { | ||
78 | struct timer_list timer; | ||
79 | unsigned long delay_time; | ||
80 | }; | ||
81 | #endif | ||
82 | |||
69 | struct net_port_vlans { | 83 | struct net_port_vlans { |
70 | u16 port_idx; | 84 | u16 port_idx; |
71 | u16 pvid; | 85 | u16 pvid; |
@@ -162,10 +176,12 @@ struct net_bridge_port | |||
162 | #define BR_FLOOD 0x00000040 | 176 | #define BR_FLOOD 0x00000040 |
163 | 177 | ||
164 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING | 178 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING |
165 | u32 multicast_startup_queries_sent; | 179 | struct bridge_mcast_query ip4_query; |
180 | #if IS_ENABLED(CONFIG_IPV6) | ||
181 | struct bridge_mcast_query ip6_query; | ||
182 | #endif /* IS_ENABLED(CONFIG_IPV6) */ | ||
166 | unsigned char multicast_router; | 183 | unsigned char multicast_router; |
167 | struct timer_list multicast_router_timer; | 184 | struct timer_list multicast_router_timer; |
168 | struct timer_list multicast_query_timer; | ||
169 | struct hlist_head mglist; | 185 | struct hlist_head mglist; |
170 | struct hlist_node rlist; | 186 | struct hlist_node rlist; |
171 | #endif | 187 | #endif |
@@ -258,7 +274,6 @@ struct net_bridge | |||
258 | u32 hash_max; | 274 | u32 hash_max; |
259 | 275 | ||
260 | u32 multicast_last_member_count; | 276 | u32 multicast_last_member_count; |
261 | u32 multicast_startup_queries_sent; | ||
262 | u32 multicast_startup_query_count; | 277 | u32 multicast_startup_query_count; |
263 | 278 | ||
264 | unsigned long multicast_last_member_interval; | 279 | unsigned long multicast_last_member_interval; |
@@ -267,15 +282,18 @@ struct net_bridge | |||
267 | unsigned long multicast_query_interval; | 282 | unsigned long multicast_query_interval; |
268 | unsigned long multicast_query_response_interval; | 283 | unsigned long multicast_query_response_interval; |
269 | unsigned long multicast_startup_query_interval; | 284 | unsigned long multicast_startup_query_interval; |
270 | unsigned long multicast_querier_delay_time; | ||
271 | 285 | ||
272 | spinlock_t multicast_lock; | 286 | spinlock_t multicast_lock; |
273 | struct net_bridge_mdb_htable __rcu *mdb; | 287 | struct net_bridge_mdb_htable __rcu *mdb; |
274 | struct hlist_head router_list; | 288 | struct hlist_head router_list; |
275 | 289 | ||
276 | struct timer_list multicast_router_timer; | 290 | struct timer_list multicast_router_timer; |
277 | struct timer_list multicast_querier_timer; | 291 | struct bridge_mcast_querier ip4_querier; |
278 | struct timer_list multicast_query_timer; | 292 | struct bridge_mcast_query ip4_query; |
293 | #if IS_ENABLED(CONFIG_IPV6) | ||
294 | struct bridge_mcast_querier ip6_querier; | ||
295 | struct bridge_mcast_query ip6_query; | ||
296 | #endif /* IS_ENABLED(CONFIG_IPV6) */ | ||
279 | #endif | 297 | #endif |
280 | 298 | ||
281 | struct timer_list hello_timer; | 299 | struct timer_list hello_timer; |
@@ -503,11 +521,27 @@ static inline bool br_multicast_is_router(struct net_bridge *br) | |||
503 | timer_pending(&br->multicast_router_timer)); | 521 | timer_pending(&br->multicast_router_timer)); |
504 | } | 522 | } |
505 | 523 | ||
506 | static inline bool br_multicast_querier_exists(struct net_bridge *br) | 524 | static inline bool |
525 | __br_multicast_querier_exists(struct net_bridge *br, | ||
526 | struct bridge_mcast_querier *querier) | ||
527 | { | ||
528 | return time_is_before_jiffies(querier->delay_time) && | ||
529 | (br->multicast_querier || timer_pending(&querier->timer)); | ||
530 | } | ||
531 | |||
532 | static inline bool br_multicast_querier_exists(struct net_bridge *br, | ||
533 | struct ethhdr *eth) | ||
507 | { | 534 | { |
508 | return time_is_before_jiffies(br->multicast_querier_delay_time) && | 535 | switch (eth->h_proto) { |
509 | (br->multicast_querier || | 536 | case (htons(ETH_P_IP)): |
510 | timer_pending(&br->multicast_querier_timer)); | 537 | return __br_multicast_querier_exists(br, &br->ip4_querier); |
538 | #if IS_ENABLED(CONFIG_IPV6) | ||
539 | case (htons(ETH_P_IPV6)): | ||
540 | return __br_multicast_querier_exists(br, &br->ip6_querier); | ||
541 | #endif | ||
542 | default: | ||
543 | return false; | ||
544 | } | ||
511 | } | 545 | } |
512 | #else | 546 | #else |
513 | static inline int br_multicast_rcv(struct net_bridge *br, | 547 | static inline int br_multicast_rcv(struct net_bridge *br, |
@@ -565,7 +599,8 @@ static inline bool br_multicast_is_router(struct net_bridge *br) | |||
565 | { | 599 | { |
566 | return 0; | 600 | return 0; |
567 | } | 601 | } |
568 | static inline bool br_multicast_querier_exists(struct net_bridge *br) | 602 | static inline bool br_multicast_querier_exists(struct net_bridge *br, |
603 | struct ethhdr *eth) | ||
569 | { | 604 | { |
570 | return false; | 605 | return false; |
571 | } | 606 | } |
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index b84a1b155bc1..d12e3a9a5356 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
@@ -346,14 +346,9 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) | |||
346 | if (new_index < 0) | 346 | if (new_index < 0) |
347 | new_index = skb_tx_hash(dev, skb); | 347 | new_index = skb_tx_hash(dev, skb); |
348 | 348 | ||
349 | if (queue_index != new_index && sk) { | 349 | if (queue_index != new_index && sk && |
350 | struct dst_entry *dst = | 350 | rcu_access_pointer(sk->sk_dst_cache)) |
351 | rcu_dereference_check(sk->sk_dst_cache, 1); | 351 | sk_tx_queue_set(sk, queue_index); |
352 | |||
353 | if (dst && skb_dst(skb) == dst) | ||
354 | sk_tx_queue_set(sk, queue_index); | ||
355 | |||
356 | } | ||
357 | 352 | ||
358 | queue_index = new_index; | 353 | queue_index = new_index; |
359 | } | 354 | } |
diff --git a/net/core/scm.c b/net/core/scm.c index 03795d0147f2..b4da80b1cc07 100644 --- a/net/core/scm.c +++ b/net/core/scm.c | |||
@@ -54,7 +54,7 @@ static __inline__ int scm_check_creds(struct ucred *creds) | |||
54 | return -EINVAL; | 54 | return -EINVAL; |
55 | 55 | ||
56 | if ((creds->pid == task_tgid_vnr(current) || | 56 | if ((creds->pid == task_tgid_vnr(current) || |
57 | ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) && | 57 | ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) && |
58 | ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || | 58 | ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || |
59 | uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && | 59 | uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && |
60 | ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || | 60 | ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 4bcabf3ab4ca..9ee17e3d11c3 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -211,14 +211,6 @@ static inline int ip_finish_output2(struct sk_buff *skb) | |||
211 | return -EINVAL; | 211 | return -EINVAL; |
212 | } | 212 | } |
213 | 213 | ||
214 | static inline int ip_skb_dst_mtu(struct sk_buff *skb) | ||
215 | { | ||
216 | struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL; | ||
217 | |||
218 | return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ? | ||
219 | skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); | ||
220 | } | ||
221 | |||
222 | static int ip_finish_output(struct sk_buff *skb) | 214 | static int ip_finish_output(struct sk_buff *skb) |
223 | { | 215 | { |
224 | #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) | 216 | #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 51fc2a1dcdd3..b3ac3c3f6219 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -190,15 +190,14 @@ static int ipip_rcv(struct sk_buff *skb) | |||
190 | struct ip_tunnel *tunnel; | 190 | struct ip_tunnel *tunnel; |
191 | const struct iphdr *iph; | 191 | const struct iphdr *iph; |
192 | 192 | ||
193 | if (iptunnel_pull_header(skb, 0, tpi.proto)) | ||
194 | goto drop; | ||
195 | |||
196 | iph = ip_hdr(skb); | 193 | iph = ip_hdr(skb); |
197 | tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, | 194 | tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, |
198 | iph->saddr, iph->daddr, 0); | 195 | iph->saddr, iph->daddr, 0); |
199 | if (tunnel) { | 196 | if (tunnel) { |
200 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) | 197 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) |
201 | goto drop; | 198 | goto drop; |
199 | if (iptunnel_pull_header(skb, 0, tpi.proto)) | ||
200 | goto drop; | ||
202 | return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); | 201 | return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); |
203 | } | 202 | } |
204 | 203 | ||
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index dd44e0ab600c..61e60d67adca 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -571,7 +571,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
571 | flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, | 571 | flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, |
572 | RT_SCOPE_UNIVERSE, | 572 | RT_SCOPE_UNIVERSE, |
573 | inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, | 573 | inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, |
574 | inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP, | 574 | inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP | |
575 | (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), | ||
575 | daddr, saddr, 0, 0); | 576 | daddr, saddr, 0, 0); |
576 | 577 | ||
577 | if (!inet->hdrincl) { | 578 | if (!inet->hdrincl) { |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 28af45abe062..3ca2139a130b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -3535,7 +3535,10 @@ static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr | |||
3535 | ++ptr; | 3535 | ++ptr; |
3536 | tp->rx_opt.rcv_tsval = ntohl(*ptr); | 3536 | tp->rx_opt.rcv_tsval = ntohl(*ptr); |
3537 | ++ptr; | 3537 | ++ptr; |
3538 | tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; | 3538 | if (*ptr) |
3539 | tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; | ||
3540 | else | ||
3541 | tp->rx_opt.rcv_tsecr = 0; | ||
3539 | return true; | 3542 | return true; |
3540 | } | 3543 | } |
3541 | return false; | 3544 | return false; |
@@ -3560,7 +3563,7 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb, | |||
3560 | } | 3563 | } |
3561 | 3564 | ||
3562 | tcp_parse_options(skb, &tp->rx_opt, 1, NULL); | 3565 | tcp_parse_options(skb, &tp->rx_opt, 1, NULL); |
3563 | if (tp->rx_opt.saw_tstamp) | 3566 | if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) |
3564 | tp->rx_opt.rcv_tsecr -= tp->tsoffset; | 3567 | tp->rx_opt.rcv_tsecr -= tp->tsoffset; |
3565 | 3568 | ||
3566 | return true; | 3569 | return true; |
@@ -5316,7 +5319,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
5316 | int saved_clamp = tp->rx_opt.mss_clamp; | 5319 | int saved_clamp = tp->rx_opt.mss_clamp; |
5317 | 5320 | ||
5318 | tcp_parse_options(skb, &tp->rx_opt, 0, &foc); | 5321 | tcp_parse_options(skb, &tp->rx_opt, 0, &foc); |
5319 | if (tp->rx_opt.saw_tstamp) | 5322 | if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) |
5320 | tp->rx_opt.rcv_tsecr -= tp->tsoffset; | 5323 | tp->rx_opt.rcv_tsecr -= tp->tsoffset; |
5321 | 5324 | ||
5322 | if (th->ack) { | 5325 | if (th->ack) { |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 92fde8d1aa82..170737a9d56d 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2670,7 +2670,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2670 | int tcp_header_size; | 2670 | int tcp_header_size; |
2671 | int mss; | 2671 | int mss; |
2672 | 2672 | ||
2673 | skb = alloc_skb(MAX_TCP_HEADER + 15, sk_gfp_atomic(sk, GFP_ATOMIC)); | 2673 | skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); |
2674 | if (unlikely(!skb)) { | 2674 | if (unlikely(!skb)) { |
2675 | dst_release(dst); | 2675 | dst_release(dst); |
2676 | return NULL; | 2676 | return NULL; |
@@ -2814,6 +2814,8 @@ void tcp_connect_init(struct sock *sk) | |||
2814 | 2814 | ||
2815 | if (likely(!tp->repair)) | 2815 | if (likely(!tp->repair)) |
2816 | tp->rcv_nxt = 0; | 2816 | tp->rcv_nxt = 0; |
2817 | else | ||
2818 | tp->rcv_tstamp = tcp_time_stamp; | ||
2817 | tp->rcv_wup = tp->rcv_nxt; | 2819 | tp->rcv_wup = tp->rcv_nxt; |
2818 | tp->copied_seq = tp->rcv_nxt; | 2820 | tp->copied_seq = tp->rcv_nxt; |
2819 | 2821 | ||
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 327a617d594c..baa0f63731fd 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c | |||
@@ -21,7 +21,6 @@ | |||
21 | static int xfrm4_tunnel_check_size(struct sk_buff *skb) | 21 | static int xfrm4_tunnel_check_size(struct sk_buff *skb) |
22 | { | 22 | { |
23 | int mtu, ret = 0; | 23 | int mtu, ret = 0; |
24 | struct dst_entry *dst; | ||
25 | 24 | ||
26 | if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE) | 25 | if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE) |
27 | goto out; | 26 | goto out; |
@@ -29,12 +28,10 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb) | |||
29 | if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df) | 28 | if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df) |
30 | goto out; | 29 | goto out; |
31 | 30 | ||
32 | dst = skb_dst(skb); | 31 | mtu = dst_mtu(skb_dst(skb)); |
33 | mtu = dst_mtu(dst); | ||
34 | if (skb->len > mtu) { | 32 | if (skb->len > mtu) { |
35 | if (skb->sk) | 33 | if (skb->sk) |
36 | ip_local_error(skb->sk, EMSGSIZE, ip_hdr(skb)->daddr, | 34 | xfrm_local_error(skb, mtu); |
37 | inet_sk(skb->sk)->inet_dport, mtu); | ||
38 | else | 35 | else |
39 | icmp_send(skb, ICMP_DEST_UNREACH, | 36 | icmp_send(skb, ICMP_DEST_UNREACH, |
40 | ICMP_FRAG_NEEDED, htonl(mtu)); | 37 | ICMP_FRAG_NEEDED, htonl(mtu)); |
@@ -99,3 +96,12 @@ int xfrm4_output(struct sk_buff *skb) | |||
99 | x->outer_mode->afinfo->output_finish, | 96 | x->outer_mode->afinfo->output_finish, |
100 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | 97 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
101 | } | 98 | } |
99 | |||
100 | void xfrm4_local_error(struct sk_buff *skb, u32 mtu) | ||
101 | { | ||
102 | struct iphdr *hdr; | ||
103 | |||
104 | hdr = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); | ||
105 | ip_local_error(skb->sk, EMSGSIZE, hdr->daddr, | ||
106 | inet_sk(skb->sk)->inet_dport, mtu); | ||
107 | } | ||
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c index 9258e751baba..0b2a0641526a 100644 --- a/net/ipv4/xfrm4_state.c +++ b/net/ipv4/xfrm4_state.c | |||
@@ -83,6 +83,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = { | |||
83 | .extract_input = xfrm4_extract_input, | 83 | .extract_input = xfrm4_extract_input, |
84 | .extract_output = xfrm4_extract_output, | 84 | .extract_output = xfrm4_extract_output, |
85 | .transport_finish = xfrm4_transport_finish, | 85 | .transport_finish = xfrm4_transport_finish, |
86 | .local_error = xfrm4_local_error, | ||
86 | }; | 87 | }; |
87 | 88 | ||
88 | void __init xfrm4_state_init(void) | 89 | void __init xfrm4_state_init(void) |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index ecd60733e5e2..90747f1973fe 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -724,6 +724,11 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, | |||
724 | ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); | 724 | ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); |
725 | } | 725 | } |
726 | 726 | ||
727 | if (likely(!skb->encapsulation)) { | ||
728 | skb_reset_inner_headers(skb); | ||
729 | skb->encapsulation = 1; | ||
730 | } | ||
731 | |||
727 | skb_push(skb, gre_hlen); | 732 | skb_push(skb, gre_hlen); |
728 | skb_reset_network_header(skb); | 733 | skb_reset_network_header(skb); |
729 | skb_set_transport_header(skb, sizeof(*ipv6h)); | 734 | skb_set_transport_header(skb, sizeof(*ipv6h)); |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 6e3ddf806ec2..e7ceb6c871d1 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -238,6 +238,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, | |||
238 | hdr->saddr = fl6->saddr; | 238 | hdr->saddr = fl6->saddr; |
239 | hdr->daddr = *first_hop; | 239 | hdr->daddr = *first_hop; |
240 | 240 | ||
241 | skb->protocol = htons(ETH_P_IPV6); | ||
241 | skb->priority = sk->sk_priority; | 242 | skb->priority = sk->sk_priority; |
242 | skb->mark = sk->sk_mark; | 243 | skb->mark = sk->sk_mark; |
243 | 244 | ||
@@ -1057,6 +1058,7 @@ static inline int ip6_ufo_append_data(struct sock *sk, | |||
1057 | /* initialize protocol header pointer */ | 1058 | /* initialize protocol header pointer */ |
1058 | skb->transport_header = skb->network_header + fragheaderlen; | 1059 | skb->transport_header = skb->network_header + fragheaderlen; |
1059 | 1060 | ||
1061 | skb->protocol = htons(ETH_P_IPV6); | ||
1060 | skb->ip_summed = CHECKSUM_PARTIAL; | 1062 | skb->ip_summed = CHECKSUM_PARTIAL; |
1061 | skb->csum = 0; | 1063 | skb->csum = 0; |
1062 | } | 1064 | } |
@@ -1359,6 +1361,7 @@ alloc_new_skb: | |||
1359 | /* | 1361 | /* |
1360 | * Fill in the control structures | 1362 | * Fill in the control structures |
1361 | */ | 1363 | */ |
1364 | skb->protocol = htons(ETH_P_IPV6); | ||
1362 | skb->ip_summed = CHECKSUM_NONE; | 1365 | skb->ip_summed = CHECKSUM_NONE; |
1363 | skb->csum = 0; | 1366 | skb->csum = 0; |
1364 | /* reserve for fragmentation and ipsec header */ | 1367 | /* reserve for fragmentation and ipsec header */ |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 1e55866cead7..46ba243605a3 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -1027,6 +1027,12 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, | |||
1027 | init_tel_txopt(&opt, encap_limit); | 1027 | init_tel_txopt(&opt, encap_limit); |
1028 | ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); | 1028 | ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); |
1029 | } | 1029 | } |
1030 | |||
1031 | if (likely(!skb->encapsulation)) { | ||
1032 | skb_reset_inner_headers(skb); | ||
1033 | skb->encapsulation = 1; | ||
1034 | } | ||
1035 | |||
1030 | skb_push(skb, sizeof(struct ipv6hdr)); | 1036 | skb_push(skb, sizeof(struct ipv6hdr)); |
1031 | skb_reset_network_header(skb); | 1037 | skb_reset_network_header(skb); |
1032 | ipv6h = ipv6_hdr(skb); | 1038 | ipv6h = ipv6_hdr(skb); |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index c45f7a5c36e9..cdaed47ba932 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -628,6 +628,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, | |||
628 | goto error; | 628 | goto error; |
629 | skb_reserve(skb, hlen); | 629 | skb_reserve(skb, hlen); |
630 | 630 | ||
631 | skb->protocol = htons(ETH_P_IPV6); | ||
631 | skb->priority = sk->sk_priority; | 632 | skb->priority = sk->sk_priority; |
632 | skb->mark = sk->sk_mark; | 633 | skb->mark = sk->sk_mark; |
633 | skb_dst_set(skb, &rt->dst); | 634 | skb_dst_set(skb, &rt->dst); |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index a3437a4cd07e..21b25dd8466b 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -645,11 +645,7 @@ static int ipip_rcv(struct sk_buff *skb) | |||
645 | const struct iphdr *iph; | 645 | const struct iphdr *iph; |
646 | struct ip_tunnel *tunnel; | 646 | struct ip_tunnel *tunnel; |
647 | 647 | ||
648 | if (iptunnel_pull_header(skb, 0, tpi.proto)) | ||
649 | goto drop; | ||
650 | |||
651 | iph = ip_hdr(skb); | 648 | iph = ip_hdr(skb); |
652 | |||
653 | tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, | 649 | tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, |
654 | iph->saddr, iph->daddr); | 650 | iph->saddr, iph->daddr); |
655 | if (tunnel != NULL) { | 651 | if (tunnel != NULL) { |
@@ -659,6 +655,8 @@ static int ipip_rcv(struct sk_buff *skb) | |||
659 | 655 | ||
660 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) | 656 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) |
661 | goto drop; | 657 | goto drop; |
658 | if (iptunnel_pull_header(skb, 0, tpi.proto)) | ||
659 | goto drop; | ||
662 | return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); | 660 | return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); |
663 | } | 661 | } |
664 | 662 | ||
@@ -888,6 +886,11 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
888 | ttl = iph6->hop_limit; | 886 | ttl = iph6->hop_limit; |
889 | tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); | 887 | tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); |
890 | 888 | ||
889 | if (likely(!skb->encapsulation)) { | ||
890 | skb_reset_inner_headers(skb); | ||
891 | skb->encapsulation = 1; | ||
892 | } | ||
893 | |||
891 | err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, fl4.daddr, | 894 | err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, fl4.daddr, |
892 | IPPROTO_IPV6, tos, ttl, df); | 895 | IPPROTO_IPV6, tos, ttl, df); |
893 | iptunnel_xmit_stats(err, &dev->stats, dev->tstats); | 896 | iptunnel_xmit_stats(err, &dev->stats, dev->tstats); |
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 8755a3079d0f..6cd625e37706 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c | |||
@@ -34,8 +34,10 @@ static int xfrm6_local_dontfrag(struct sk_buff *skb) | |||
34 | struct sock *sk = skb->sk; | 34 | struct sock *sk = skb->sk; |
35 | 35 | ||
36 | if (sk) { | 36 | if (sk) { |
37 | proto = sk->sk_protocol; | 37 | if (sk->sk_family != AF_INET6) |
38 | return 0; | ||
38 | 39 | ||
40 | proto = sk->sk_protocol; | ||
39 | if (proto == IPPROTO_UDP || proto == IPPROTO_RAW) | 41 | if (proto == IPPROTO_UDP || proto == IPPROTO_RAW) |
40 | return inet6_sk(sk)->dontfrag; | 42 | return inet6_sk(sk)->dontfrag; |
41 | } | 43 | } |
@@ -54,13 +56,15 @@ static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu) | |||
54 | ipv6_local_rxpmtu(sk, &fl6, mtu); | 56 | ipv6_local_rxpmtu(sk, &fl6, mtu); |
55 | } | 57 | } |
56 | 58 | ||
57 | static void xfrm6_local_error(struct sk_buff *skb, u32 mtu) | 59 | void xfrm6_local_error(struct sk_buff *skb, u32 mtu) |
58 | { | 60 | { |
59 | struct flowi6 fl6; | 61 | struct flowi6 fl6; |
62 | const struct ipv6hdr *hdr; | ||
60 | struct sock *sk = skb->sk; | 63 | struct sock *sk = skb->sk; |
61 | 64 | ||
65 | hdr = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); | ||
62 | fl6.fl6_dport = inet_sk(sk)->inet_dport; | 66 | fl6.fl6_dport = inet_sk(sk)->inet_dport; |
63 | fl6.daddr = ipv6_hdr(skb)->daddr; | 67 | fl6.daddr = hdr->daddr; |
64 | 68 | ||
65 | ipv6_local_error(sk, EMSGSIZE, &fl6, mtu); | 69 | ipv6_local_error(sk, EMSGSIZE, &fl6, mtu); |
66 | } | 70 | } |
@@ -80,7 +84,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb) | |||
80 | if (xfrm6_local_dontfrag(skb)) | 84 | if (xfrm6_local_dontfrag(skb)) |
81 | xfrm6_local_rxpmtu(skb, mtu); | 85 | xfrm6_local_rxpmtu(skb, mtu); |
82 | else if (skb->sk) | 86 | else if (skb->sk) |
83 | xfrm6_local_error(skb, mtu); | 87 | xfrm_local_error(skb, mtu); |
84 | else | 88 | else |
85 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 89 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
86 | ret = -EMSGSIZE; | 90 | ret = -EMSGSIZE; |
@@ -136,13 +140,18 @@ static int __xfrm6_output(struct sk_buff *skb) | |||
136 | { | 140 | { |
137 | struct dst_entry *dst = skb_dst(skb); | 141 | struct dst_entry *dst = skb_dst(skb); |
138 | struct xfrm_state *x = dst->xfrm; | 142 | struct xfrm_state *x = dst->xfrm; |
139 | int mtu = ip6_skb_dst_mtu(skb); | 143 | int mtu; |
144 | |||
145 | if (skb->protocol == htons(ETH_P_IPV6)) | ||
146 | mtu = ip6_skb_dst_mtu(skb); | ||
147 | else | ||
148 | mtu = dst_mtu(skb_dst(skb)); | ||
140 | 149 | ||
141 | if (skb->len > mtu && xfrm6_local_dontfrag(skb)) { | 150 | if (skb->len > mtu && xfrm6_local_dontfrag(skb)) { |
142 | xfrm6_local_rxpmtu(skb, mtu); | 151 | xfrm6_local_rxpmtu(skb, mtu); |
143 | return -EMSGSIZE; | 152 | return -EMSGSIZE; |
144 | } else if (!skb->local_df && skb->len > mtu && skb->sk) { | 153 | } else if (!skb->local_df && skb->len > mtu && skb->sk) { |
145 | xfrm6_local_error(skb, mtu); | 154 | xfrm_local_error(skb, mtu); |
146 | return -EMSGSIZE; | 155 | return -EMSGSIZE; |
147 | } | 156 | } |
148 | 157 | ||
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c index d8c70b8efc24..3fc970135fc6 100644 --- a/net/ipv6/xfrm6_state.c +++ b/net/ipv6/xfrm6_state.c | |||
@@ -183,6 +183,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = { | |||
183 | .extract_input = xfrm6_extract_input, | 183 | .extract_input = xfrm6_extract_input, |
184 | .extract_output = xfrm6_extract_output, | 184 | .extract_output = xfrm6_extract_output, |
185 | .transport_finish = xfrm6_transport_finish, | 185 | .transport_finish = xfrm6_transport_finish, |
186 | .local_error = xfrm6_local_error, | ||
186 | }; | 187 | }; |
187 | 188 | ||
188 | int __init xfrm6_state_init(void) | 189 | int __init xfrm6_state_init(void) |
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index ea7b9c2c7e66..2d45643c964e 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
@@ -36,7 +36,7 @@ | |||
36 | 36 | ||
37 | static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | 37 | static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, |
38 | const u8 *bssid, const int beacon_int, | 38 | const u8 *bssid, const int beacon_int, |
39 | struct ieee80211_channel *chan, | 39 | struct cfg80211_chan_def *req_chandef, |
40 | const u32 basic_rates, | 40 | const u32 basic_rates, |
41 | const u16 capability, u64 tsf, | 41 | const u16 capability, u64 tsf, |
42 | bool creator) | 42 | bool creator) |
@@ -51,6 +51,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
51 | u32 bss_change; | 51 | u32 bss_change; |
52 | u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; | 52 | u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; |
53 | struct cfg80211_chan_def chandef; | 53 | struct cfg80211_chan_def chandef; |
54 | struct ieee80211_channel *chan; | ||
54 | struct beacon_data *presp; | 55 | struct beacon_data *presp; |
55 | int frame_len; | 56 | int frame_len; |
56 | 57 | ||
@@ -81,7 +82,9 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
81 | 82 | ||
82 | sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; | 83 | sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; |
83 | 84 | ||
84 | chandef = ifibss->chandef; | 85 | /* make a copy of the chandef, it could be modified below. */ |
86 | chandef = *req_chandef; | ||
87 | chan = chandef.chan; | ||
85 | if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) { | 88 | if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) { |
86 | chandef.width = NL80211_CHAN_WIDTH_20; | 89 | chandef.width = NL80211_CHAN_WIDTH_20; |
87 | chandef.center_freq1 = chan->center_freq; | 90 | chandef.center_freq1 = chan->center_freq; |
@@ -259,10 +262,12 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
259 | struct cfg80211_bss *cbss = | 262 | struct cfg80211_bss *cbss = |
260 | container_of((void *)bss, struct cfg80211_bss, priv); | 263 | container_of((void *)bss, struct cfg80211_bss, priv); |
261 | struct ieee80211_supported_band *sband; | 264 | struct ieee80211_supported_band *sband; |
265 | struct cfg80211_chan_def chandef; | ||
262 | u32 basic_rates; | 266 | u32 basic_rates; |
263 | int i, j; | 267 | int i, j; |
264 | u16 beacon_int = cbss->beacon_interval; | 268 | u16 beacon_int = cbss->beacon_interval; |
265 | const struct cfg80211_bss_ies *ies; | 269 | const struct cfg80211_bss_ies *ies; |
270 | enum nl80211_channel_type chan_type; | ||
266 | u64 tsf; | 271 | u64 tsf; |
267 | 272 | ||
268 | sdata_assert_lock(sdata); | 273 | sdata_assert_lock(sdata); |
@@ -270,6 +275,26 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
270 | if (beacon_int < 10) | 275 | if (beacon_int < 10) |
271 | beacon_int = 10; | 276 | beacon_int = 10; |
272 | 277 | ||
278 | switch (sdata->u.ibss.chandef.width) { | ||
279 | case NL80211_CHAN_WIDTH_20_NOHT: | ||
280 | case NL80211_CHAN_WIDTH_20: | ||
281 | case NL80211_CHAN_WIDTH_40: | ||
282 | chan_type = cfg80211_get_chandef_type(&sdata->u.ibss.chandef); | ||
283 | cfg80211_chandef_create(&chandef, cbss->channel, chan_type); | ||
284 | break; | ||
285 | case NL80211_CHAN_WIDTH_5: | ||
286 | case NL80211_CHAN_WIDTH_10: | ||
287 | cfg80211_chandef_create(&chandef, cbss->channel, | ||
288 | NL80211_CHAN_WIDTH_20_NOHT); | ||
289 | chandef.width = sdata->u.ibss.chandef.width; | ||
290 | break; | ||
291 | default: | ||
292 | /* fall back to 20 MHz for unsupported modes */ | ||
293 | cfg80211_chandef_create(&chandef, cbss->channel, | ||
294 | NL80211_CHAN_WIDTH_20_NOHT); | ||
295 | break; | ||
296 | } | ||
297 | |||
273 | sband = sdata->local->hw.wiphy->bands[cbss->channel->band]; | 298 | sband = sdata->local->hw.wiphy->bands[cbss->channel->band]; |
274 | 299 | ||
275 | basic_rates = 0; | 300 | basic_rates = 0; |
@@ -294,7 +319,7 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
294 | 319 | ||
295 | __ieee80211_sta_join_ibss(sdata, cbss->bssid, | 320 | __ieee80211_sta_join_ibss(sdata, cbss->bssid, |
296 | beacon_int, | 321 | beacon_int, |
297 | cbss->channel, | 322 | &chandef, |
298 | basic_rates, | 323 | basic_rates, |
299 | cbss->capability, | 324 | cbss->capability, |
300 | tsf, false); | 325 | tsf, false); |
@@ -736,7 +761,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) | |||
736 | sdata->drop_unencrypted = 0; | 761 | sdata->drop_unencrypted = 0; |
737 | 762 | ||
738 | __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, | 763 | __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, |
739 | ifibss->chandef.chan, ifibss->basic_rates, | 764 | &ifibss->chandef, ifibss->basic_rates, |
740 | capability, 0, true); | 765 | capability, 0, true); |
741 | } | 766 | } |
742 | 767 | ||
@@ -1138,6 +1163,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) | |||
1138 | clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); | 1163 | clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); |
1139 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | | 1164 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | |
1140 | BSS_CHANGED_IBSS); | 1165 | BSS_CHANGED_IBSS); |
1166 | ieee80211_vif_release_channel(sdata); | ||
1141 | synchronize_rcu(); | 1167 | synchronize_rcu(); |
1142 | kfree(presp); | 1168 | kfree(presp); |
1143 | 1169 | ||
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index f5aed963b22e..f3bbea1eb9e7 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c | |||
@@ -828,6 +828,9 @@ minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, | |||
828 | if (sband->band != IEEE80211_BAND_2GHZ) | 828 | if (sband->band != IEEE80211_BAND_2GHZ) |
829 | return; | 829 | return; |
830 | 830 | ||
831 | if (!(mp->hw->flags & IEEE80211_HW_SUPPORTS_HT_CCK_RATES)) | ||
832 | return; | ||
833 | |||
831 | mi->cck_supported = 0; | 834 | mi->cck_supported = 0; |
832 | mi->cck_supported_short = 0; | 835 | mi->cck_supported_short = 0; |
833 | for (i = 0; i < 4; i++) { | 836 | for (i = 0; i < 4; i++) { |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 512718adb0d5..0c741cec4d0d 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -364,7 +364,7 @@ int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops) | |||
364 | EXPORT_SYMBOL(genl_unregister_ops); | 364 | EXPORT_SYMBOL(genl_unregister_ops); |
365 | 365 | ||
366 | /** | 366 | /** |
367 | * genl_register_family - register a generic netlink family | 367 | * __genl_register_family - register a generic netlink family |
368 | * @family: generic netlink family | 368 | * @family: generic netlink family |
369 | * | 369 | * |
370 | * Registers the specified family after validating it first. Only one | 370 | * Registers the specified family after validating it first. Only one |
@@ -374,7 +374,7 @@ EXPORT_SYMBOL(genl_unregister_ops); | |||
374 | * | 374 | * |
375 | * Return 0 on success or a negative error code. | 375 | * Return 0 on success or a negative error code. |
376 | */ | 376 | */ |
377 | int genl_register_family(struct genl_family *family) | 377 | int __genl_register_family(struct genl_family *family) |
378 | { | 378 | { |
379 | int err = -EINVAL; | 379 | int err = -EINVAL; |
380 | 380 | ||
@@ -430,10 +430,10 @@ errout_locked: | |||
430 | errout: | 430 | errout: |
431 | return err; | 431 | return err; |
432 | } | 432 | } |
433 | EXPORT_SYMBOL(genl_register_family); | 433 | EXPORT_SYMBOL(__genl_register_family); |
434 | 434 | ||
435 | /** | 435 | /** |
436 | * genl_register_family_with_ops - register a generic netlink family | 436 | * __genl_register_family_with_ops - register a generic netlink family |
437 | * @family: generic netlink family | 437 | * @family: generic netlink family |
438 | * @ops: operations to be registered | 438 | * @ops: operations to be registered |
439 | * @n_ops: number of elements to register | 439 | * @n_ops: number of elements to register |
@@ -457,12 +457,12 @@ EXPORT_SYMBOL(genl_register_family); | |||
457 | * | 457 | * |
458 | * Return 0 on success or a negative error code. | 458 | * Return 0 on success or a negative error code. |
459 | */ | 459 | */ |
460 | int genl_register_family_with_ops(struct genl_family *family, | 460 | int __genl_register_family_with_ops(struct genl_family *family, |
461 | struct genl_ops *ops, size_t n_ops) | 461 | struct genl_ops *ops, size_t n_ops) |
462 | { | 462 | { |
463 | int err, i; | 463 | int err, i; |
464 | 464 | ||
465 | err = genl_register_family(family); | 465 | err = __genl_register_family(family); |
466 | if (err) | 466 | if (err) |
467 | return err; | 467 | return err; |
468 | 468 | ||
@@ -476,7 +476,7 @@ err_out: | |||
476 | genl_unregister_family(family); | 476 | genl_unregister_family(family); |
477 | return err; | 477 | return err; |
478 | } | 478 | } |
479 | EXPORT_SYMBOL(genl_register_family_with_ops); | 479 | EXPORT_SYMBOL(__genl_register_family_with_ops); |
480 | 480 | ||
481 | /** | 481 | /** |
482 | * genl_unregister_family - unregister generic netlink family | 482 | * genl_unregister_family - unregister generic netlink family |
@@ -544,6 +544,30 @@ void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, | |||
544 | } | 544 | } |
545 | EXPORT_SYMBOL(genlmsg_put); | 545 | EXPORT_SYMBOL(genlmsg_put); |
546 | 546 | ||
547 | static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb) | ||
548 | { | ||
549 | struct genl_ops *ops = cb->data; | ||
550 | int rc; | ||
551 | |||
552 | genl_lock(); | ||
553 | rc = ops->dumpit(skb, cb); | ||
554 | genl_unlock(); | ||
555 | return rc; | ||
556 | } | ||
557 | |||
558 | static int genl_lock_done(struct netlink_callback *cb) | ||
559 | { | ||
560 | struct genl_ops *ops = cb->data; | ||
561 | int rc = 0; | ||
562 | |||
563 | if (ops->done) { | ||
564 | genl_lock(); | ||
565 | rc = ops->done(cb); | ||
566 | genl_unlock(); | ||
567 | } | ||
568 | return rc; | ||
569 | } | ||
570 | |||
547 | static int genl_family_rcv_msg(struct genl_family *family, | 571 | static int genl_family_rcv_msg(struct genl_family *family, |
548 | struct sk_buff *skb, | 572 | struct sk_buff *skb, |
549 | struct nlmsghdr *nlh) | 573 | struct nlmsghdr *nlh) |
@@ -572,15 +596,34 @@ static int genl_family_rcv_msg(struct genl_family *family, | |||
572 | return -EPERM; | 596 | return -EPERM; |
573 | 597 | ||
574 | if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { | 598 | if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { |
575 | struct netlink_dump_control c = { | 599 | int rc; |
576 | .dump = ops->dumpit, | ||
577 | .done = ops->done, | ||
578 | }; | ||
579 | 600 | ||
580 | if (ops->dumpit == NULL) | 601 | if (ops->dumpit == NULL) |
581 | return -EOPNOTSUPP; | 602 | return -EOPNOTSUPP; |
582 | 603 | ||
583 | return netlink_dump_start(net->genl_sock, skb, nlh, &c); | 604 | if (!family->parallel_ops) { |
605 | struct netlink_dump_control c = { | ||
606 | .module = family->module, | ||
607 | .data = ops, | ||
608 | .dump = genl_lock_dumpit, | ||
609 | .done = genl_lock_done, | ||
610 | }; | ||
611 | |||
612 | genl_unlock(); | ||
613 | rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c); | ||
614 | genl_lock(); | ||
615 | |||
616 | } else { | ||
617 | struct netlink_dump_control c = { | ||
618 | .module = family->module, | ||
619 | .dump = ops->dumpit, | ||
620 | .done = ops->done, | ||
621 | }; | ||
622 | |||
623 | rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c); | ||
624 | } | ||
625 | |||
626 | return rc; | ||
584 | } | 627 | } |
585 | 628 | ||
586 | if (ops->doit == NULL) | 629 | if (ops->doit == NULL) |
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 75edcfad6e26..1504bb11e4f3 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c | |||
@@ -207,10 +207,13 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base, | |||
207 | pgfrom_base -= copy; | 207 | pgfrom_base -= copy; |
208 | 208 | ||
209 | vto = kmap_atomic(*pgto); | 209 | vto = kmap_atomic(*pgto); |
210 | vfrom = kmap_atomic(*pgfrom); | 210 | if (*pgto != *pgfrom) { |
211 | memmove(vto + pgto_base, vfrom + pgfrom_base, copy); | 211 | vfrom = kmap_atomic(*pgfrom); |
212 | memcpy(vto + pgto_base, vfrom + pgfrom_base, copy); | ||
213 | kunmap_atomic(vfrom); | ||
214 | } else | ||
215 | memmove(vto + pgto_base, vto + pgfrom_base, copy); | ||
212 | flush_dcache_page(*pgto); | 216 | flush_dcache_page(*pgto); |
213 | kunmap_atomic(vfrom); | ||
214 | kunmap_atomic(vto); | 217 | kunmap_atomic(vto); |
215 | 218 | ||
216 | } while ((len -= copy) != 0); | 219 | } while ((len -= copy) != 0); |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index ce8249c76827..6cc7ddd2fb7c 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -1257,7 +1257,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf) | |||
1257 | /* Accept only ACK or NACK message */ | 1257 | /* Accept only ACK or NACK message */ |
1258 | if (unlikely(msg_errcode(msg))) { | 1258 | if (unlikely(msg_errcode(msg))) { |
1259 | sock->state = SS_DISCONNECTING; | 1259 | sock->state = SS_DISCONNECTING; |
1260 | sk->sk_err = -ECONNREFUSED; | 1260 | sk->sk_err = ECONNREFUSED; |
1261 | retval = TIPC_OK; | 1261 | retval = TIPC_OK; |
1262 | break; | 1262 | break; |
1263 | } | 1263 | } |
@@ -1268,7 +1268,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf) | |||
1268 | res = auto_connect(sock, msg); | 1268 | res = auto_connect(sock, msg); |
1269 | if (res) { | 1269 | if (res) { |
1270 | sock->state = SS_DISCONNECTING; | 1270 | sock->state = SS_DISCONNECTING; |
1271 | sk->sk_err = res; | 1271 | sk->sk_err = -res; |
1272 | retval = TIPC_OK; | 1272 | retval = TIPC_OK; |
1273 | break; | 1273 | break; |
1274 | } | 1274 | } |
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index eb4a84288648..3bb2cdc13b46 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c | |||
@@ -214,5 +214,26 @@ int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb) | |||
214 | return inner_mode->afinfo->extract_output(x, skb); | 214 | return inner_mode->afinfo->extract_output(x, skb); |
215 | } | 215 | } |
216 | 216 | ||
217 | void xfrm_local_error(struct sk_buff *skb, int mtu) | ||
218 | { | ||
219 | unsigned int proto; | ||
220 | struct xfrm_state_afinfo *afinfo; | ||
221 | |||
222 | if (skb->protocol == htons(ETH_P_IP)) | ||
223 | proto = AF_INET; | ||
224 | else if (skb->protocol == htons(ETH_P_IPV6)) | ||
225 | proto = AF_INET6; | ||
226 | else | ||
227 | return; | ||
228 | |||
229 | afinfo = xfrm_state_get_afinfo(proto); | ||
230 | if (!afinfo) | ||
231 | return; | ||
232 | |||
233 | afinfo->local_error(skb, mtu); | ||
234 | xfrm_state_put_afinfo(afinfo); | ||
235 | } | ||
236 | |||
217 | EXPORT_SYMBOL_GPL(xfrm_output); | 237 | EXPORT_SYMBOL_GPL(xfrm_output); |
218 | EXPORT_SYMBOL_GPL(xfrm_inner_extract_output); | 238 | EXPORT_SYMBOL_GPL(xfrm_inner_extract_output); |
239 | EXPORT_SYMBOL_GPL(xfrm_local_error); | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index e52cab3591dd..f77c371ea72b 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -320,10 +320,8 @@ static void xfrm_queue_purge(struct sk_buff_head *list) | |||
320 | { | 320 | { |
321 | struct sk_buff *skb; | 321 | struct sk_buff *skb; |
322 | 322 | ||
323 | while ((skb = skb_dequeue(list)) != NULL) { | 323 | while ((skb = skb_dequeue(list)) != NULL) |
324 | dev_put(skb->dev); | ||
325 | kfree_skb(skb); | 324 | kfree_skb(skb); |
326 | } | ||
327 | } | 325 | } |
328 | 326 | ||
329 | /* Rule must be locked. Release descentant resources, announce | 327 | /* Rule must be locked. Release descentant resources, announce |
@@ -1758,7 +1756,6 @@ static void xfrm_policy_queue_process(unsigned long arg) | |||
1758 | struct sk_buff *skb; | 1756 | struct sk_buff *skb; |
1759 | struct sock *sk; | 1757 | struct sock *sk; |
1760 | struct dst_entry *dst; | 1758 | struct dst_entry *dst; |
1761 | struct net_device *dev; | ||
1762 | struct xfrm_policy *pol = (struct xfrm_policy *)arg; | 1759 | struct xfrm_policy *pol = (struct xfrm_policy *)arg; |
1763 | struct xfrm_policy_queue *pq = &pol->polq; | 1760 | struct xfrm_policy_queue *pq = &pol->polq; |
1764 | struct flowi fl; | 1761 | struct flowi fl; |
@@ -1805,7 +1802,6 @@ static void xfrm_policy_queue_process(unsigned long arg) | |||
1805 | dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path, | 1802 | dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path, |
1806 | &fl, skb->sk, 0); | 1803 | &fl, skb->sk, 0); |
1807 | if (IS_ERR(dst)) { | 1804 | if (IS_ERR(dst)) { |
1808 | dev_put(skb->dev); | ||
1809 | kfree_skb(skb); | 1805 | kfree_skb(skb); |
1810 | continue; | 1806 | continue; |
1811 | } | 1807 | } |
@@ -1814,9 +1810,7 @@ static void xfrm_policy_queue_process(unsigned long arg) | |||
1814 | skb_dst_drop(skb); | 1810 | skb_dst_drop(skb); |
1815 | skb_dst_set(skb, dst); | 1811 | skb_dst_set(skb, dst); |
1816 | 1812 | ||
1817 | dev = skb->dev; | ||
1818 | err = dst_output(skb); | 1813 | err = dst_output(skb); |
1819 | dev_put(dev); | ||
1820 | } | 1814 | } |
1821 | 1815 | ||
1822 | return; | 1816 | return; |
@@ -1839,7 +1833,6 @@ static int xdst_queue_output(struct sk_buff *skb) | |||
1839 | } | 1833 | } |
1840 | 1834 | ||
1841 | skb_dst_force(skb); | 1835 | skb_dst_force(skb); |
1842 | dev_hold(skb->dev); | ||
1843 | 1836 | ||
1844 | spin_lock_bh(&pq->hold_queue.lock); | 1837 | spin_lock_bh(&pq->hold_queue.lock); |
1845 | 1838 | ||
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 78f66fa92449..54c0acd29468 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -39,9 +39,6 @@ static DEFINE_SPINLOCK(xfrm_state_lock); | |||
39 | 39 | ||
40 | static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; | 40 | static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; |
41 | 41 | ||
42 | static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); | ||
43 | static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); | ||
44 | |||
45 | static inline unsigned int xfrm_dst_hash(struct net *net, | 42 | static inline unsigned int xfrm_dst_hash(struct net *net, |
46 | const xfrm_address_t *daddr, | 43 | const xfrm_address_t *daddr, |
47 | const xfrm_address_t *saddr, | 44 | const xfrm_address_t *saddr, |
@@ -1860,7 +1857,7 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo) | |||
1860 | } | 1857 | } |
1861 | EXPORT_SYMBOL(xfrm_state_unregister_afinfo); | 1858 | EXPORT_SYMBOL(xfrm_state_unregister_afinfo); |
1862 | 1859 | ||
1863 | static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) | 1860 | struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) |
1864 | { | 1861 | { |
1865 | struct xfrm_state_afinfo *afinfo; | 1862 | struct xfrm_state_afinfo *afinfo; |
1866 | if (unlikely(family >= NPROTO)) | 1863 | if (unlikely(family >= NPROTO)) |
@@ -1872,7 +1869,7 @@ static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) | |||
1872 | return afinfo; | 1869 | return afinfo; |
1873 | } | 1870 | } |
1874 | 1871 | ||
1875 | static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo) | 1872 | void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo) |
1876 | { | 1873 | { |
1877 | rcu_read_unlock(); | 1874 | rcu_read_unlock(); |
1878 | } | 1875 | } |
diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c index 103b33373fd4..6effe99bbb9c 100644 --- a/sound/isa/opti9xx/opti92x-ad1848.c +++ b/sound/isa/opti9xx/opti92x-ad1848.c | |||
@@ -173,11 +173,7 @@ MODULE_DEVICE_TABLE(pnp_card, snd_opti9xx_pnpids); | |||
173 | 173 | ||
174 | #endif /* CONFIG_PNP */ | 174 | #endif /* CONFIG_PNP */ |
175 | 175 | ||
176 | #ifdef OPTi93X | 176 | #define DEV_NAME KBUILD_MODNAME |
177 | #define DEV_NAME "opti93x" | ||
178 | #else | ||
179 | #define DEV_NAME "opti92x" | ||
180 | #endif | ||
181 | 177 | ||
182 | static char * snd_opti9xx_names[] = { | 178 | static char * snd_opti9xx_names[] = { |
183 | "unknown", | 179 | "unknown", |
@@ -1167,7 +1163,7 @@ static int snd_opti9xx_pnp_resume(struct pnp_card_link *pcard) | |||
1167 | 1163 | ||
1168 | static struct pnp_card_driver opti9xx_pnpc_driver = { | 1164 | static struct pnp_card_driver opti9xx_pnpc_driver = { |
1169 | .flags = PNP_DRIVER_RES_DISABLE, | 1165 | .flags = PNP_DRIVER_RES_DISABLE, |
1170 | .name = "opti9xx", | 1166 | .name = DEV_NAME, |
1171 | .id_table = snd_opti9xx_pnpids, | 1167 | .id_table = snd_opti9xx_pnpids, |
1172 | .probe = snd_opti9xx_pnp_probe, | 1168 | .probe = snd_opti9xx_pnp_probe, |
1173 | .remove = snd_opti9xx_pnp_remove, | 1169 | .remove = snd_opti9xx_pnp_remove, |
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 030ca8652a1c..9f3586276871 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c | |||
@@ -1781,6 +1781,9 @@ static int generic_hdmi_build_controls(struct hda_codec *codec) | |||
1781 | struct snd_pcm_chmap *chmap; | 1781 | struct snd_pcm_chmap *chmap; |
1782 | struct snd_kcontrol *kctl; | 1782 | struct snd_kcontrol *kctl; |
1783 | int i; | 1783 | int i; |
1784 | |||
1785 | if (!codec->pcm_info[pin_idx].pcm) | ||
1786 | break; | ||
1784 | err = snd_pcm_add_chmap_ctls(codec->pcm_info[pin_idx].pcm, | 1787 | err = snd_pcm_add_chmap_ctls(codec->pcm_info[pin_idx].pcm, |
1785 | SNDRV_PCM_STREAM_PLAYBACK, | 1788 | SNDRV_PCM_STREAM_PLAYBACK, |
1786 | NULL, 0, pin_idx, &chmap); | 1789 | NULL, 0, pin_idx, &chmap); |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index f303cd898515..389db4c2801b 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -4336,6 +4336,7 @@ static const struct hda_fixup alc662_fixups[] = { | |||
4336 | 4336 | ||
4337 | static const struct snd_pci_quirk alc662_fixup_tbl[] = { | 4337 | static const struct snd_pci_quirk alc662_fixup_tbl[] = { |
4338 | SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2), | 4338 | SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2), |
4339 | SND_PCI_QUIRK(0x1025, 0x022f, "Acer Aspire One", ALC662_FIXUP_INV_DMIC), | ||
4339 | SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), | 4340 | SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), |
4340 | SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE), | 4341 | SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE), |
4341 | SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC), | 4342 | SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC), |