aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-01-18 03:55:41 -0500
committerDavid S. Miller <davem@davemloft.net>2014-01-18 03:55:41 -0500
commit41804420586ab41049a14ab7ef04eaa2280b8647 (patch)
treef05b39b0c5f1ed229b073f7a993ef717f9f1a363
parent7b1e46c5a265b142dd05ff3463fa3e0a1f4e4172 (diff)
parent7d0d46da750a252371cb747b48ddda27d1047881 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c net/ipv4/tcp_metrics.c Overlapping changes between the "don't create two tcp metrics objects with the same key" race fix in net and the addition of the destination address in the lookup key in net-next. Minor overlapping changes in bnx2x driver. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--MAINTAINERS3
-rw-r--r--Makefile2
-rw-r--r--arch/arm/kernel/devtree.c2
-rw-r--r--arch/arm/kernel/perf_event_cpu.c2
-rw-r--r--arch/arm/kernel/traps.c5
-rw-r--r--arch/arm/mach-highbank/highbank.c1
-rw-r--r--arch/arm/mach-omap2/omap4-common.c1
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/net/bpf_jit_32.c6
-rw-r--r--arch/arm64/include/asm/io.h2
-rw-r--r--arch/mips/include/asm/cacheops.h2
-rw-r--r--arch/mips/include/asm/r4kcache.h51
-rw-r--r--arch/mips/mm/c-r4k.c11
-rw-r--r--arch/parisc/include/uapi/asm/socket.h2
-rw-r--r--arch/powerpc/kernel/prom_init.c22
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c7
-rw-r--r--arch/s390/net/bpf_jit_comp.c29
-rw-r--r--arch/sparc/net/bpf_jit_comp.c17
-rw-r--r--arch/x86/include/asm/fpu-internal.h13
-rw-r--r--arch/x86/kernel/entry_32.S4
-rw-r--r--arch/x86/kernel/entry_64.S2
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/net/bpf_jit_comp.c14
-rw-r--r--arch/x86/vdso/vclock_gettime.c8
-rw-r--r--drivers/block/null_blk.c10
-rw-r--r--drivers/clocksource/cadence_ttc_timer.c21
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_display.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/instmem.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c2
-rw-r--r--drivers/hwmon/coretemp.c2
-rw-r--r--drivers/md/md.c18
-rw-r--r--drivers/md/md.h3
-rw-r--r--drivers/md/raid1.c3
-rw-r--r--drivers/md/raid10.c12
-rw-r--r--drivers/md/raid5.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c30
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c11
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c2
-rw-r--r--drivers/net/ethernet/via/via-rhine.c1
-rw-r--r--drivers/net/usb/dm9601.c12
-rw-r--r--fs/dcache.c7
-rw-r--r--fs/fs-writeback.c15
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/nilfs2/segment.c10
-rw-r--r--include/linux/crash_dump.h2
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/seqlock.h27
-rw-r--r--include/net/if_inet6.h1
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/time/sched_clock.c6
-rw-r--r--lib/percpu_counter.c4
-rw-r--r--mm/huge_memory.c6
-rw-r--r--mm/util.c5
-rw-r--r--net/batman-adv/main.c2
-rw-r--r--net/core/filter.c30
-rw-r--r--net/ieee802154/nl-phy.c6
-rw-r--r--net/ipv4/ipmr.c7
-rw-r--r--net/ipv4/tcp_metrics.c52
-rw-r--r--net/ipv6/addrconf.c38
-rw-r--r--net/ipv6/ip6mr.c7
-rw-r--r--net/rds/ib_recv.c7
-rw-r--r--security/selinux/hooks.c20
-rw-r--r--security/selinux/include/objsec.h5
72 files changed, 392 insertions, 266 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index b358a3f0cacd..a723219072d9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9230,6 +9230,7 @@ F: include/media/videobuf2-*
9230 9230
9231VIRTIO CONSOLE DRIVER 9231VIRTIO CONSOLE DRIVER
9232M: Amit Shah <amit.shah@redhat.com> 9232M: Amit Shah <amit.shah@redhat.com>
9233L: virtio-dev@lists.oasis-open.org
9233L: virtualization@lists.linux-foundation.org 9234L: virtualization@lists.linux-foundation.org
9234S: Maintained 9235S: Maintained
9235F: drivers/char/virtio_console.c 9236F: drivers/char/virtio_console.c
@@ -9239,6 +9240,7 @@ F: include/uapi/linux/virtio_console.h
9239VIRTIO CORE, NET AND BLOCK DRIVERS 9240VIRTIO CORE, NET AND BLOCK DRIVERS
9240M: Rusty Russell <rusty@rustcorp.com.au> 9241M: Rusty Russell <rusty@rustcorp.com.au>
9241M: "Michael S. Tsirkin" <mst@redhat.com> 9242M: "Michael S. Tsirkin" <mst@redhat.com>
9243L: virtio-dev@lists.oasis-open.org
9242L: virtualization@lists.linux-foundation.org 9244L: virtualization@lists.linux-foundation.org
9243S: Maintained 9245S: Maintained
9244F: drivers/virtio/ 9246F: drivers/virtio/
@@ -9251,6 +9253,7 @@ F: include/uapi/linux/virtio_*.h
9251VIRTIO HOST (VHOST) 9253VIRTIO HOST (VHOST)
9252M: "Michael S. Tsirkin" <mst@redhat.com> 9254M: "Michael S. Tsirkin" <mst@redhat.com>
9253L: kvm@vger.kernel.org 9255L: kvm@vger.kernel.org
9256L: virtio-dev@lists.oasis-open.org
9254L: virtualization@lists.linux-foundation.org 9257L: virtualization@lists.linux-foundation.org
9255L: netdev@vger.kernel.org 9258L: netdev@vger.kernel.org
9256S: Maintained 9259S: Maintained
diff --git a/Makefile b/Makefile
index ae1a55ad687c..eeec740776f3 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 13 2PATCHLEVEL = 13
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc7 4EXTRAVERSION = -rc8
5NAME = One Giant Leap for Frogkind 5NAME = One Giant Leap for Frogkind
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index 739c3dfc1da2..34d5fd585bbb 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -171,7 +171,7 @@ void __init arm_dt_init_cpu_maps(void)
171 171
172bool arch_match_cpu_phys_id(int cpu, u64 phys_id) 172bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
173{ 173{
174 return (phys_id & MPIDR_HWID_BITMASK) == cpu_logical_map(cpu); 174 return phys_id == cpu_logical_map(cpu);
175} 175}
176 176
177static const void * __init arch_get_next_mach(const char *const **match) 177static const void * __init arch_get_next_mach(const char *const **match)
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index d85055cd24ba..20d553c9f5e2 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -254,7 +254,7 @@ static int probe_current_pmu(struct arm_pmu *pmu)
254static int cpu_pmu_device_probe(struct platform_device *pdev) 254static int cpu_pmu_device_probe(struct platform_device *pdev)
255{ 255{
256 const struct of_device_id *of_id; 256 const struct of_device_id *of_id;
257 int (*init_fn)(struct arm_pmu *); 257 const int (*init_fn)(struct arm_pmu *);
258 struct device_node *node = pdev->dev.of_node; 258 struct device_node *node = pdev->dev.of_node;
259 struct arm_pmu *pmu; 259 struct arm_pmu *pmu;
260 int ret = -ENODEV; 260 int ret = -ENODEV;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 6eda3bf85c52..4636d56af2db 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -431,9 +431,10 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
431 instr2 = __mem_to_opcode_thumb16(instr2); 431 instr2 = __mem_to_opcode_thumb16(instr2);
432 instr = __opcode_thumb32_compose(instr, instr2); 432 instr = __opcode_thumb32_compose(instr, instr2);
433 } 433 }
434 } else if (get_user(instr, (u32 __user *)pc)) { 434 } else {
435 if (get_user(instr, (u32 __user *)pc))
436 goto die_sig;
435 instr = __mem_to_opcode_arm(instr); 437 instr = __mem_to_opcode_arm(instr);
436 goto die_sig;
437 } 438 }
438 439
439 if (call_undef_hook(regs, instr) == 0) 440 if (call_undef_hook(regs, instr) == 0)
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index bd3bf66ce344..c7de89b263dd 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -53,6 +53,7 @@ static void __init highbank_scu_map_io(void)
53 53
54static void highbank_l2x0_disable(void) 54static void highbank_l2x0_disable(void)
55{ 55{
56 outer_flush_all();
56 /* Disable PL310 L2 Cache controller */ 57 /* Disable PL310 L2 Cache controller */
57 highbank_smc1(0x102, 0x0); 58 highbank_smc1(0x102, 0x0);
58} 59}
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index b39efd46abf9..c0ab9b26be3d 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -162,6 +162,7 @@ void __iomem *omap4_get_l2cache_base(void)
162 162
163static void omap4_l2x0_disable(void) 163static void omap4_l2x0_disable(void)
164{ 164{
165 outer_flush_all();
165 /* Disable PL310 L2 Cache controller */ 166 /* Disable PL310 L2 Cache controller */
166 omap_smc1(0x102, 0x0); 167 omap_smc1(0x102, 0x0);
167} 168}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 1f7b19a47060..3e8f106ee5fe 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -229,7 +229,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
229#ifdef CONFIG_ZONE_DMA 229#ifdef CONFIG_ZONE_DMA
230 if (mdesc->dma_zone_size) { 230 if (mdesc->dma_zone_size) {
231 arm_dma_zone_size = mdesc->dma_zone_size; 231 arm_dma_zone_size = mdesc->dma_zone_size;
232 arm_dma_limit = __pv_phys_offset + arm_dma_zone_size - 1; 232 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
233 } else 233 } else
234 arm_dma_limit = 0xffffffff; 234 arm_dma_limit = 0xffffffff;
235 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; 235 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 9ed155ad0f97..271b5e971568 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -641,10 +641,10 @@ load_ind:
641 emit(ARM_MUL(r_A, r_A, r_X), ctx); 641 emit(ARM_MUL(r_A, r_A, r_X), ctx);
642 break; 642 break;
643 case BPF_S_ALU_DIV_K: 643 case BPF_S_ALU_DIV_K:
644 /* current k == reciprocal_value(userspace k) */ 644 if (k == 1)
645 break;
645 emit_mov_i(r_scratch, k, ctx); 646 emit_mov_i(r_scratch, k, ctx);
646 /* A = top 32 bits of the product */ 647 emit_udiv(r_A, r_A, r_scratch, ctx);
647 emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx);
648 break; 648 break;
649 case BPF_S_ALU_DIV_X: 649 case BPF_S_ALU_DIV_X:
650 update_on_xread(ctx); 650 update_on_xread(ctx);
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 572769727227..4cc813eddacb 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -229,7 +229,7 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot
229extern void __iounmap(volatile void __iomem *addr); 229extern void __iounmap(volatile void __iomem *addr);
230extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); 230extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
231 231
232#define PROT_DEFAULT (pgprot_default | PTE_DIRTY) 232#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
233#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) 233#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
234#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) 234#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
235#define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) 235#define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
diff --git a/arch/mips/include/asm/cacheops.h b/arch/mips/include/asm/cacheops.h
index c75025f27c20..06b9bc7ea14b 100644
--- a/arch/mips/include/asm/cacheops.h
+++ b/arch/mips/include/asm/cacheops.h
@@ -83,6 +83,6 @@
83/* 83/*
84 * Loongson2-specific cacheops 84 * Loongson2-specific cacheops
85 */ 85 */
86#define Hit_Invalidate_I_Loongson23 0x00 86#define Hit_Invalidate_I_Loongson2 0x00
87 87
88#endif /* __ASM_CACHEOPS_H */ 88#endif /* __ASM_CACHEOPS_H */
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index 34d1a1917125..c84caddb8bde 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -165,7 +165,7 @@ static inline void flush_icache_line(unsigned long addr)
165 __iflush_prologue 165 __iflush_prologue
166 switch (boot_cpu_type()) { 166 switch (boot_cpu_type()) {
167 case CPU_LOONGSON2: 167 case CPU_LOONGSON2:
168 cache_op(Hit_Invalidate_I_Loongson23, addr); 168 cache_op(Hit_Invalidate_I_Loongson2, addr);
169 break; 169 break;
170 170
171 default: 171 default:
@@ -219,7 +219,7 @@ static inline void protected_flush_icache_line(unsigned long addr)
219{ 219{
220 switch (boot_cpu_type()) { 220 switch (boot_cpu_type()) {
221 case CPU_LOONGSON2: 221 case CPU_LOONGSON2:
222 protected_cache_op(Hit_Invalidate_I_Loongson23, addr); 222 protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
223 break; 223 break;
224 224
225 default: 225 default:
@@ -357,8 +357,8 @@ static inline void invalidate_tcache_page(unsigned long addr)
357 "i" (op)); 357 "i" (op));
358 358
359/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */ 359/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
360#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \ 360#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
361static inline void blast_##pfx##cache##lsize(void) \ 361static inline void extra##blast_##pfx##cache##lsize(void) \
362{ \ 362{ \
363 unsigned long start = INDEX_BASE; \ 363 unsigned long start = INDEX_BASE; \
364 unsigned long end = start + current_cpu_data.desc.waysize; \ 364 unsigned long end = start + current_cpu_data.desc.waysize; \
@@ -376,7 +376,7 @@ static inline void blast_##pfx##cache##lsize(void) \
376 __##pfx##flush_epilogue \ 376 __##pfx##flush_epilogue \
377} \ 377} \
378 \ 378 \
379static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ 379static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
380{ \ 380{ \
381 unsigned long start = page; \ 381 unsigned long start = page; \
382 unsigned long end = page + PAGE_SIZE; \ 382 unsigned long end = page + PAGE_SIZE; \
@@ -391,7 +391,7 @@ static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
391 __##pfx##flush_epilogue \ 391 __##pfx##flush_epilogue \
392} \ 392} \
393 \ 393 \
394static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ 394static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
395{ \ 395{ \
396 unsigned long indexmask = current_cpu_data.desc.waysize - 1; \ 396 unsigned long indexmask = current_cpu_data.desc.waysize - 1; \
397 unsigned long start = INDEX_BASE + (page & indexmask); \ 397 unsigned long start = INDEX_BASE + (page & indexmask); \
@@ -410,23 +410,24 @@ static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page)
410 __##pfx##flush_epilogue \ 410 __##pfx##flush_epilogue \
411} 411}
412 412
413__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16) 413__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
414__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16) 414__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
415__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16) 415__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
416__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32) 416__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
417__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32) 417__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
418__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32) 418__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
419__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64) 419__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
420__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64) 420__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
421__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64) 421__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
422__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128) 422__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
423 423__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
424__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16) 424
425__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32) 425__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
426__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16) 426__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
427__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32) 427__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
428__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64) 428__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
429__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128) 429__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
430__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
430 431
431/* build blast_xxx_range, protected_blast_xxx_range */ 432/* build blast_xxx_range, protected_blast_xxx_range */
432#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \ 433#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
@@ -452,8 +453,8 @@ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start,
452__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, ) 453__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
453__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, ) 454__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
454__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, ) 455__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
455__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson23, \ 456__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
456 protected_, loongson23_) 457 protected_, loongson2_)
457__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , ) 458__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
458__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , ) 459__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
459/* blast_inv_dcache_range */ 460/* blast_inv_dcache_range */
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 62ffd20ea869..49e572d879e1 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -237,6 +237,8 @@ static void r4k_blast_icache_page_setup(void)
237 r4k_blast_icache_page = (void *)cache_noop; 237 r4k_blast_icache_page = (void *)cache_noop;
238 else if (ic_lsize == 16) 238 else if (ic_lsize == 16)
239 r4k_blast_icache_page = blast_icache16_page; 239 r4k_blast_icache_page = blast_icache16_page;
240 else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2)
241 r4k_blast_icache_page = loongson2_blast_icache32_page;
240 else if (ic_lsize == 32) 242 else if (ic_lsize == 32)
241 r4k_blast_icache_page = blast_icache32_page; 243 r4k_blast_icache_page = blast_icache32_page;
242 else if (ic_lsize == 64) 244 else if (ic_lsize == 64)
@@ -261,6 +263,9 @@ static void r4k_blast_icache_page_indexed_setup(void)
261 else if (TX49XX_ICACHE_INDEX_INV_WAR) 263 else if (TX49XX_ICACHE_INDEX_INV_WAR)
262 r4k_blast_icache_page_indexed = 264 r4k_blast_icache_page_indexed =
263 tx49_blast_icache32_page_indexed; 265 tx49_blast_icache32_page_indexed;
266 else if (current_cpu_type() == CPU_LOONGSON2)
267 r4k_blast_icache_page_indexed =
268 loongson2_blast_icache32_page_indexed;
264 else 269 else
265 r4k_blast_icache_page_indexed = 270 r4k_blast_icache_page_indexed =
266 blast_icache32_page_indexed; 271 blast_icache32_page_indexed;
@@ -284,6 +289,8 @@ static void r4k_blast_icache_setup(void)
284 r4k_blast_icache = blast_r4600_v1_icache32; 289 r4k_blast_icache = blast_r4600_v1_icache32;
285 else if (TX49XX_ICACHE_INDEX_INV_WAR) 290 else if (TX49XX_ICACHE_INDEX_INV_WAR)
286 r4k_blast_icache = tx49_blast_icache32; 291 r4k_blast_icache = tx49_blast_icache32;
292 else if (current_cpu_type() == CPU_LOONGSON2)
293 r4k_blast_icache = loongson2_blast_icache32;
287 else 294 else
288 r4k_blast_icache = blast_icache32; 295 r4k_blast_icache = blast_icache32;
289 } else if (ic_lsize == 64) 296 } else if (ic_lsize == 64)
@@ -580,11 +587,11 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo
580 else { 587 else {
581 switch (boot_cpu_type()) { 588 switch (boot_cpu_type()) {
582 case CPU_LOONGSON2: 589 case CPU_LOONGSON2:
583 protected_blast_icache_range(start, end); 590 protected_loongson2_blast_icache_range(start, end);
584 break; 591 break;
585 592
586 default: 593 default:
587 protected_loongson23_blast_icache_range(start, end); 594 protected_blast_icache_range(start, end);
588 break; 595 break;
589 } 596 }
590 } 597 }
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index f33113a6141e..70b3674dac4e 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -75,6 +75,6 @@
75 75
76#define SO_BUSY_POLL 0x4027 76#define SO_BUSY_POLL 0x4027
77 77
78#define SO_MAX_PACING_RATE 0x4048 78#define SO_MAX_PACING_RATE 0x4028
79 79
80#endif /* _UAPI_ASM_SOCKET_H */ 80#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index cb64a6e1dc51..078145acf7fb 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1986,19 +1986,23 @@ static void __init prom_init_stdout(void)
1986 /* Get the full OF pathname of the stdout device */ 1986 /* Get the full OF pathname of the stdout device */
1987 memset(path, 0, 256); 1987 memset(path, 0, 256);
1988 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255); 1988 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
1989 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
1990 val = cpu_to_be32(stdout_node);
1991 prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
1992 &val, sizeof(val));
1993 prom_printf("OF stdout device is: %s\n", of_stdout_device); 1989 prom_printf("OF stdout device is: %s\n", of_stdout_device);
1994 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path", 1990 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
1995 path, strlen(path) + 1); 1991 path, strlen(path) + 1);
1996 1992
1997 /* If it's a display, note it */ 1993 /* instance-to-package fails on PA-Semi */
1998 memset(type, 0, sizeof(type)); 1994 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
1999 prom_getprop(stdout_node, "device_type", type, sizeof(type)); 1995 if (stdout_node != PROM_ERROR) {
2000 if (strcmp(type, "display") == 0) 1996 val = cpu_to_be32(stdout_node);
2001 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0); 1997 prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
1998 &val, sizeof(val));
1999
2000 /* If it's a display, note it */
2001 memset(type, 0, sizeof(type));
2002 prom_getprop(stdout_node, "device_type", type, sizeof(type));
2003 if (strcmp(type, "display") == 0)
2004 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
2005 }
2002} 2006}
2003 2007
2004static int __init prom_find_machine_type(void) 2008static int __init prom_find_machine_type(void)
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index ac3c2a10dafd..555034f8505e 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -223,10 +223,11 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
223 } 223 }
224 PPC_DIVWU(r_A, r_A, r_X); 224 PPC_DIVWU(r_A, r_A, r_X);
225 break; 225 break;
226 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */ 226 case BPF_S_ALU_DIV_K: /* A /= K */
227 if (K == 1)
228 break;
227 PPC_LI32(r_scratch1, K); 229 PPC_LI32(r_scratch1, K);
228 /* Top 32 bits of 64bit result -> A */ 230 PPC_DIVWU(r_A, r_A, r_scratch1);
229 PPC_MULHWU(r_A, r_A, r_scratch1);
230 break; 231 break;
231 case BPF_S_ALU_AND_X: 232 case BPF_S_ALU_AND_X:
232 ctx->seen |= SEEN_XREG; 233 ctx->seen |= SEEN_XREG;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 16871da37371..708d60e40066 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -368,14 +368,16 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
368 EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg)); 368 EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
369 /* lhi %r4,0 */ 369 /* lhi %r4,0 */
370 EMIT4(0xa7480000); 370 EMIT4(0xa7480000);
371 /* dr %r4,%r12 */ 371 /* dlr %r4,%r12 */
372 EMIT2(0x1d4c); 372 EMIT4(0xb997004c);
373 break; 373 break;
374 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K) */ 374 case BPF_S_ALU_DIV_K: /* A /= K */
375 /* m %r4,<d(K)>(%r13) */ 375 if (K == 1)
376 EMIT4_DISP(0x5c40d000, EMIT_CONST(K)); 376 break;
377 /* lr %r5,%r4 */ 377 /* lhi %r4,0 */
378 EMIT2(0x1854); 378 EMIT4(0xa7480000);
379 /* dl %r4,<d(K)>(%r13) */
380 EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
379 break; 381 break;
380 case BPF_S_ALU_MOD_X: /* A %= X */ 382 case BPF_S_ALU_MOD_X: /* A %= X */
381 jit->seen |= SEEN_XREG | SEEN_RET0; 383 jit->seen |= SEEN_XREG | SEEN_RET0;
@@ -385,16 +387,21 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
385 EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg)); 387 EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
386 /* lhi %r4,0 */ 388 /* lhi %r4,0 */
387 EMIT4(0xa7480000); 389 EMIT4(0xa7480000);
388 /* dr %r4,%r12 */ 390 /* dlr %r4,%r12 */
389 EMIT2(0x1d4c); 391 EMIT4(0xb997004c);
390 /* lr %r5,%r4 */ 392 /* lr %r5,%r4 */
391 EMIT2(0x1854); 393 EMIT2(0x1854);
392 break; 394 break;
393 case BPF_S_ALU_MOD_K: /* A %= K */ 395 case BPF_S_ALU_MOD_K: /* A %= K */
396 if (K == 1) {
397 /* lhi %r5,0 */
398 EMIT4(0xa7580000);
399 break;
400 }
394 /* lhi %r4,0 */ 401 /* lhi %r4,0 */
395 EMIT4(0xa7480000); 402 EMIT4(0xa7480000);
396 /* d %r4,<d(K)>(%r13) */ 403 /* dl %r4,<d(K)>(%r13) */
397 EMIT4_DISP(0x5d40d000, EMIT_CONST(K)); 404 EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
398 /* lr %r5,%r4 */ 405 /* lr %r5,%r4 */
399 EMIT2(0x1854); 406 EMIT2(0x1854);
400 break; 407 break;
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index 218b6b23c378..01fe9946d388 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -497,9 +497,20 @@ void bpf_jit_compile(struct sk_filter *fp)
497 case BPF_S_ALU_MUL_K: /* A *= K */ 497 case BPF_S_ALU_MUL_K: /* A *= K */
498 emit_alu_K(MUL, K); 498 emit_alu_K(MUL, K);
499 break; 499 break;
500 case BPF_S_ALU_DIV_K: /* A /= K */ 500 case BPF_S_ALU_DIV_K: /* A /= K with K != 0*/
501 emit_alu_K(MUL, K); 501 if (K == 1)
502 emit_read_y(r_A); 502 break;
503 emit_write_y(G0);
504#ifdef CONFIG_SPARC32
505 /* The Sparc v8 architecture requires
506 * three instructions between a %y
507 * register write and the first use.
508 */
509 emit_nop();
510 emit_nop();
511 emit_nop();
512#endif
513 emit_alu_K(DIV, K);
503 break; 514 break;
504 case BPF_S_ALU_DIV_X: /* A /= X; */ 515 case BPF_S_ALU_DIV_X: /* A /= X; */
505 emit_cmpi(r_X, 0); 516 emit_cmpi(r_X, 0);
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index c49a613c6452..cea1c76d49bf 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -293,12 +293,13 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
293 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception 293 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
294 is pending. Clear the x87 state here by setting it to fixed 294 is pending. Clear the x87 state here by setting it to fixed
295 values. "m" is a random variable that should be in L1 */ 295 values. "m" is a random variable that should be in L1 */
296 alternative_input( 296 if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) {
297 ASM_NOP8 ASM_NOP2, 297 asm volatile(
298 "emms\n\t" /* clear stack tags */ 298 "fnclex\n\t"
299 "fildl %P[addr]", /* set F?P to defined value */ 299 "emms\n\t"
300 X86_FEATURE_FXSAVE_LEAK, 300 "fildl %P[addr]" /* set F?P to defined value */
301 [addr] "m" (tsk->thread.fpu.has_fpu)); 301 : : [addr] "m" (tsk->thread.fpu.has_fpu));
302 }
302 303
303 return fpu_restore_checking(&tsk->thread.fpu); 304 return fpu_restore_checking(&tsk->thread.fpu);
304} 305}
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 51e2988c5728..a2a4f4697889 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1082,7 +1082,7 @@ ENTRY(ftrace_caller)
1082 pushl $0 /* Pass NULL as regs pointer */ 1082 pushl $0 /* Pass NULL as regs pointer */
1083 movl 4*4(%esp), %eax 1083 movl 4*4(%esp), %eax
1084 movl 0x4(%ebp), %edx 1084 movl 0x4(%ebp), %edx
1085 leal function_trace_op, %ecx 1085 movl function_trace_op, %ecx
1086 subl $MCOUNT_INSN_SIZE, %eax 1086 subl $MCOUNT_INSN_SIZE, %eax
1087 1087
1088.globl ftrace_call 1088.globl ftrace_call
@@ -1140,7 +1140,7 @@ ENTRY(ftrace_regs_caller)
1140 movl 12*4(%esp), %eax /* Load ip (1st parameter) */ 1140 movl 12*4(%esp), %eax /* Load ip (1st parameter) */
1141 subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ 1141 subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
1142 movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ 1142 movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
1143 leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ 1143 movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
1144 pushl %esp /* Save pt_regs as 4th parameter */ 1144 pushl %esp /* Save pt_regs as 4th parameter */
1145 1145
1146GLOBAL(ftrace_regs_call) 1146GLOBAL(ftrace_regs_call)
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index e21b0785a85b..1e96c3628bf2 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -88,7 +88,7 @@ END(function_hook)
88 MCOUNT_SAVE_FRAME \skip 88 MCOUNT_SAVE_FRAME \skip
89 89
90 /* Load the ftrace_ops into the 3rd parameter */ 90 /* Load the ftrace_ops into the 3rd parameter */
91 leaq function_trace_op, %rdx 91 movq function_trace_op(%rip), %rdx
92 92
93 /* Load ip into the first parameter */ 93 /* Load ip into the first parameter */
94 movq RIP(%rsp), %rdi 94 movq RIP(%rsp), %rdi
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 1673940cf9c3..775702f649ca 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1355,7 +1355,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
1355 vcpu->arch.apic_base = value; 1355 vcpu->arch.apic_base = value;
1356 1356
1357 /* update jump label if enable bit changes */ 1357 /* update jump label if enable bit changes */
1358 if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) { 1358 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
1359 if (value & MSR_IA32_APICBASE_ENABLE) 1359 if (value & MSR_IA32_APICBASE_ENABLE)
1360 static_key_slow_dec_deferred(&apic_hw_disabled); 1360 static_key_slow_dec_deferred(&apic_hw_disabled);
1361 else 1361 else
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 26328e800869..4ed75dd81d05 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -359,15 +359,21 @@ void bpf_jit_compile(struct sk_filter *fp)
359 EMIT2(0x89, 0xd0); /* mov %edx,%eax */ 359 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
360 break; 360 break;
361 case BPF_S_ALU_MOD_K: /* A %= K; */ 361 case BPF_S_ALU_MOD_K: /* A %= K; */
362 if (K == 1) {
363 CLEAR_A();
364 break;
365 }
362 EMIT2(0x31, 0xd2); /* xor %edx,%edx */ 366 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
363 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */ 367 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
364 EMIT2(0xf7, 0xf1); /* div %ecx */ 368 EMIT2(0xf7, 0xf1); /* div %ecx */
365 EMIT2(0x89, 0xd0); /* mov %edx,%eax */ 369 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
366 break; 370 break;
367 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */ 371 case BPF_S_ALU_DIV_K: /* A /= K */
368 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */ 372 if (K == 1)
369 EMIT(K, 4); 373 break;
370 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */ 374 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
375 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
376 EMIT2(0xf7, 0xf1); /* div %ecx */
371 break; 377 break;
372 case BPF_S_ALU_AND_X: 378 case BPF_S_ALU_AND_X:
373 seen |= SEEN_XREG; 379 seen |= SEEN_XREG;
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 2ada505067cc..eb5d7a56f8d4 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -178,7 +178,7 @@ notrace static int __always_inline do_realtime(struct timespec *ts)
178 178
179 ts->tv_nsec = 0; 179 ts->tv_nsec = 0;
180 do { 180 do {
181 seq = read_seqcount_begin_no_lockdep(&gtod->seq); 181 seq = raw_read_seqcount_begin(&gtod->seq);
182 mode = gtod->clock.vclock_mode; 182 mode = gtod->clock.vclock_mode;
183 ts->tv_sec = gtod->wall_time_sec; 183 ts->tv_sec = gtod->wall_time_sec;
184 ns = gtod->wall_time_snsec; 184 ns = gtod->wall_time_snsec;
@@ -198,7 +198,7 @@ notrace static int do_monotonic(struct timespec *ts)
198 198
199 ts->tv_nsec = 0; 199 ts->tv_nsec = 0;
200 do { 200 do {
201 seq = read_seqcount_begin_no_lockdep(&gtod->seq); 201 seq = raw_read_seqcount_begin(&gtod->seq);
202 mode = gtod->clock.vclock_mode; 202 mode = gtod->clock.vclock_mode;
203 ts->tv_sec = gtod->monotonic_time_sec; 203 ts->tv_sec = gtod->monotonic_time_sec;
204 ns = gtod->monotonic_time_snsec; 204 ns = gtod->monotonic_time_snsec;
@@ -214,7 +214,7 @@ notrace static int do_realtime_coarse(struct timespec *ts)
214{ 214{
215 unsigned long seq; 215 unsigned long seq;
216 do { 216 do {
217 seq = read_seqcount_begin_no_lockdep(&gtod->seq); 217 seq = raw_read_seqcount_begin(&gtod->seq);
218 ts->tv_sec = gtod->wall_time_coarse.tv_sec; 218 ts->tv_sec = gtod->wall_time_coarse.tv_sec;
219 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; 219 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
220 } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); 220 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
@@ -225,7 +225,7 @@ notrace static int do_monotonic_coarse(struct timespec *ts)
225{ 225{
226 unsigned long seq; 226 unsigned long seq;
227 do { 227 do {
228 seq = read_seqcount_begin_no_lockdep(&gtod->seq); 228 seq = raw_read_seqcount_begin(&gtod->seq);
229 ts->tv_sec = gtod->monotonic_time_coarse.tv_sec; 229 ts->tv_sec = gtod->monotonic_time_coarse.tv_sec;
230 ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec; 230 ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec;
231 } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); 231 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index a2e69d26266d..83a598ebb65a 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -425,10 +425,7 @@ static void null_del_dev(struct nullb *nullb)
425 list_del_init(&nullb->list); 425 list_del_init(&nullb->list);
426 426
427 del_gendisk(nullb->disk); 427 del_gendisk(nullb->disk);
428 if (queue_mode == NULL_Q_MQ) 428 blk_cleanup_queue(nullb->q);
429 blk_mq_free_queue(nullb->q);
430 else
431 blk_cleanup_queue(nullb->q);
432 put_disk(nullb->disk); 429 put_disk(nullb->disk);
433 kfree(nullb); 430 kfree(nullb);
434} 431}
@@ -578,10 +575,7 @@ static int null_add_dev(void)
578 disk = nullb->disk = alloc_disk_node(1, home_node); 575 disk = nullb->disk = alloc_disk_node(1, home_node);
579 if (!disk) { 576 if (!disk) {
580queue_fail: 577queue_fail:
581 if (queue_mode == NULL_Q_MQ) 578 blk_cleanup_queue(nullb->q);
582 blk_mq_free_queue(nullb->q);
583 else
584 blk_cleanup_queue(nullb->q);
585 cleanup_queues(nullb); 579 cleanup_queues(nullb);
586err: 580err:
587 kfree(nullb); 581 kfree(nullb);
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
index b2bb3a4bc205..a92350b55d32 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -67,11 +67,13 @@
67 * struct ttc_timer - This definition defines local timer structure 67 * struct ttc_timer - This definition defines local timer structure
68 * 68 *
69 * @base_addr: Base address of timer 69 * @base_addr: Base address of timer
70 * @freq: Timer input clock frequency
70 * @clk: Associated clock source 71 * @clk: Associated clock source
71 * @clk_rate_change_nb Notifier block for clock rate changes 72 * @clk_rate_change_nb Notifier block for clock rate changes
72 */ 73 */
73struct ttc_timer { 74struct ttc_timer {
74 void __iomem *base_addr; 75 void __iomem *base_addr;
76 unsigned long freq;
75 struct clk *clk; 77 struct clk *clk;
76 struct notifier_block clk_rate_change_nb; 78 struct notifier_block clk_rate_change_nb;
77}; 79};
@@ -196,9 +198,8 @@ static void ttc_set_mode(enum clock_event_mode mode,
196 198
197 switch (mode) { 199 switch (mode) {
198 case CLOCK_EVT_MODE_PERIODIC: 200 case CLOCK_EVT_MODE_PERIODIC:
199 ttc_set_interval(timer, 201 ttc_set_interval(timer, DIV_ROUND_CLOSEST(ttce->ttc.freq,
200 DIV_ROUND_CLOSEST(clk_get_rate(ttce->ttc.clk), 202 PRESCALE * HZ));
201 PRESCALE * HZ));
202 break; 203 break;
203 case CLOCK_EVT_MODE_ONESHOT: 204 case CLOCK_EVT_MODE_ONESHOT:
204 case CLOCK_EVT_MODE_UNUSED: 205 case CLOCK_EVT_MODE_UNUSED:
@@ -273,6 +274,8 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base)
273 return; 274 return;
274 } 275 }
275 276
277 ttccs->ttc.freq = clk_get_rate(ttccs->ttc.clk);
278
276 ttccs->ttc.clk_rate_change_nb.notifier_call = 279 ttccs->ttc.clk_rate_change_nb.notifier_call =
277 ttc_rate_change_clocksource_cb; 280 ttc_rate_change_clocksource_cb;
278 ttccs->ttc.clk_rate_change_nb.next = NULL; 281 ttccs->ttc.clk_rate_change_nb.next = NULL;
@@ -298,16 +301,14 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base)
298 __raw_writel(CNT_CNTRL_RESET, 301 __raw_writel(CNT_CNTRL_RESET,
299 ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET); 302 ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
300 303
301 err = clocksource_register_hz(&ttccs->cs, 304 err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE);
302 clk_get_rate(ttccs->ttc.clk) / PRESCALE);
303 if (WARN_ON(err)) { 305 if (WARN_ON(err)) {
304 kfree(ttccs); 306 kfree(ttccs);
305 return; 307 return;
306 } 308 }
307 309
308 ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET; 310 ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET;
309 setup_sched_clock(ttc_sched_clock_read, 16, 311 setup_sched_clock(ttc_sched_clock_read, 16, ttccs->ttc.freq / PRESCALE);
310 clk_get_rate(ttccs->ttc.clk) / PRESCALE);
311} 312}
312 313
313static int ttc_rate_change_clockevent_cb(struct notifier_block *nb, 314static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
@@ -334,6 +335,9 @@ static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
334 ndata->new_rate / PRESCALE); 335 ndata->new_rate / PRESCALE);
335 local_irq_restore(flags); 336 local_irq_restore(flags);
336 337
338 /* update cached frequency */
339 ttc->freq = ndata->new_rate;
340
337 /* fall through */ 341 /* fall through */
338 } 342 }
339 case PRE_RATE_CHANGE: 343 case PRE_RATE_CHANGE:
@@ -367,6 +371,7 @@ static void __init ttc_setup_clockevent(struct clk *clk,
367 if (clk_notifier_register(ttcce->ttc.clk, 371 if (clk_notifier_register(ttcce->ttc.clk,
368 &ttcce->ttc.clk_rate_change_nb)) 372 &ttcce->ttc.clk_rate_change_nb))
369 pr_warn("Unable to register clock notifier.\n"); 373 pr_warn("Unable to register clock notifier.\n");
374 ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk);
370 375
371 ttcce->ttc.base_addr = base; 376 ttcce->ttc.base_addr = base;
372 ttcce->ce.name = "ttc_clockevent"; 377 ttcce->ce.name = "ttc_clockevent";
@@ -396,7 +401,7 @@ static void __init ttc_setup_clockevent(struct clk *clk,
396 } 401 }
397 402
398 clockevents_config_and_register(&ttcce->ce, 403 clockevents_config_and_register(&ttcce->ce,
399 clk_get_rate(ttcce->ttc.clk) / PRESCALE, 1, 0xfffe); 404 ttcce->ttc.freq / PRESCALE, 1, 0xfffe);
400} 405}
401 406
402/** 407/**
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 85071a1c4547..b0733153dfd2 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1041,7 +1041,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
1041 /* if equal delete the probed mode */ 1041 /* if equal delete the probed mode */
1042 mode->status = pmode->status; 1042 mode->status = pmode->status;
1043 /* Merge type bits together */ 1043 /* Merge type bits together */
1044 mode->type = pmode->type; 1044 mode->type |= pmode->type;
1045 list_del(&pmode->head); 1045 list_del(&pmode->head);
1046 drm_mode_destroy(connector->dev, pmode); 1046 drm_mode_destroy(connector->dev, pmode);
1047 break; 1047 break;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 5d1dedc02f15..f13d5edc39d5 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2713,6 +2713,8 @@ static void gen8_irq_preinstall(struct drm_device *dev)
2713#undef GEN8_IRQ_INIT_NDX 2713#undef GEN8_IRQ_INIT_NDX
2714 2714
2715 POSTING_READ(GEN8_PCU_IIR); 2715 POSTING_READ(GEN8_PCU_IIR);
2716
2717 ibx_irq_preinstall(dev);
2716} 2718}
2717 2719
2718static void ibx_hpd_irq_setup(struct drm_device *dev) 2720static void ibx_hpd_irq_setup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 526c8ded16b0..b69dc3e66c16 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1057,12 +1057,18 @@ void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
1057 enum pipe pipe; 1057 enum pipe pipe;
1058 struct intel_crtc *intel_crtc; 1058 struct intel_crtc *intel_crtc;
1059 1059
1060 dev_priv->ddi_plls.spll_refcount = 0;
1061 dev_priv->ddi_plls.wrpll1_refcount = 0;
1062 dev_priv->ddi_plls.wrpll2_refcount = 0;
1063
1060 for_each_pipe(pipe) { 1064 for_each_pipe(pipe) {
1061 intel_crtc = 1065 intel_crtc =
1062 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 1066 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
1063 1067
1064 if (!intel_crtc->active) 1068 if (!intel_crtc->active) {
1069 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
1065 continue; 1070 continue;
1071 }
1066 1072
1067 intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv, 1073 intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
1068 pipe); 1074 pipe);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 769b864465a9..2bde35d34eb9 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11053,10 +11053,10 @@ void intel_modeset_gem_init(struct drm_device *dev)
11053 11053
11054 intel_setup_overlay(dev); 11054 intel_setup_overlay(dev);
11055 11055
11056 drm_modeset_lock_all(dev); 11056 mutex_lock(&dev->mode_config.mutex);
11057 drm_mode_config_reset(dev); 11057 drm_mode_config_reset(dev);
11058 intel_modeset_setup_hw_state(dev, false); 11058 intel_modeset_setup_hw_state(dev, false);
11059 drm_modeset_unlock_all(dev); 11059 mutex_unlock(&dev->mode_config.mutex);
11060} 11060}
11061 11061
11062void intel_modeset_cleanup(struct drm_device *dev) 11062void intel_modeset_cleanup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 9fa5da723871..7f50a858b16f 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -73,7 +73,7 @@ struct nouveau_i2c {
73 int (*identify)(struct nouveau_i2c *, int index, 73 int (*identify)(struct nouveau_i2c *, int index,
74 const char *what, struct nouveau_i2c_board_info *, 74 const char *what, struct nouveau_i2c_board_info *,
75 bool (*match)(struct nouveau_i2c_port *, 75 bool (*match)(struct nouveau_i2c_port *,
76 struct i2c_board_info *)); 76 struct i2c_board_info *, void *), void *);
77 struct list_head ports; 77 struct list_head ports;
78}; 78};
79 79
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
index ec7a54e91a08..4aca33887aaa 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
@@ -50,6 +50,13 @@ struct nouveau_instmem {
50static inline struct nouveau_instmem * 50static inline struct nouveau_instmem *
51nouveau_instmem(void *obj) 51nouveau_instmem(void *obj)
52{ 52{
53 /* nv04/nv40 impls need to create objects in their constructor,
54 * which is before the subdev pointer is valid
55 */
56 if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
57 nv_subidx(obj) == NVDEV_SUBDEV_INSTMEM)
58 return obj;
59
53 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM]; 60 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM];
54} 61}
55 62
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 041fd5edaebf..c33c03d2f4af 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -197,7 +197,7 @@ static int
197nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what, 197nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
198 struct nouveau_i2c_board_info *info, 198 struct nouveau_i2c_board_info *info,
199 bool (*match)(struct nouveau_i2c_port *, 199 bool (*match)(struct nouveau_i2c_port *,
200 struct i2c_board_info *)) 200 struct i2c_board_info *, void *), void *data)
201{ 201{
202 struct nouveau_i2c_port *port = nouveau_i2c_find(i2c, index); 202 struct nouveau_i2c_port *port = nouveau_i2c_find(i2c, index);
203 int i; 203 int i;
@@ -221,7 +221,7 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
221 } 221 }
222 222
223 if (nv_probe_i2c(port, info[i].dev.addr) && 223 if (nv_probe_i2c(port, info[i].dev.addr) &&
224 (!match || match(port, &info[i].dev))) { 224 (!match || match(port, &info[i].dev, data))) {
225 nv_info(i2c, "detected %s: %s\n", what, 225 nv_info(i2c, "detected %s: %s\n", what,
226 info[i].dev.type); 226 info[i].dev.type);
227 return i; 227 return i;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
index e44ed7b93c6d..7610fc5f8fa2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -29,9 +29,9 @@
29 29
30static bool 30static bool
31probe_monitoring_device(struct nouveau_i2c_port *i2c, 31probe_monitoring_device(struct nouveau_i2c_port *i2c,
32 struct i2c_board_info *info) 32 struct i2c_board_info *info, void *data)
33{ 33{
34 struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c); 34 struct nouveau_therm_priv *priv = data;
35 struct nvbios_therm_sensor *sensor = &priv->bios_sensor; 35 struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
36 struct i2c_client *client; 36 struct i2c_client *client;
37 37
@@ -96,7 +96,7 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
96 }; 96 };
97 97
98 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", 98 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
99 board, probe_monitoring_device); 99 board, probe_monitoring_device, therm);
100 if (priv->ic) 100 if (priv->ic)
101 return; 101 return;
102 } 102 }
@@ -108,7 +108,7 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
108 }; 108 };
109 109
110 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", 110 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
111 board, probe_monitoring_device); 111 board, probe_monitoring_device, therm);
112 if (priv->ic) 112 if (priv->ic)
113 return; 113 return;
114 } 114 }
@@ -117,5 +117,5 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
117 device. Let's try our static list. 117 device. Let's try our static list.
118 */ 118 */
119 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", 119 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
120 nv_board_infos, probe_monitoring_device); 120 nv_board_infos, probe_monitoring_device, therm);
121} 121}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 936a71c59080..7fdc51e2a571 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -643,7 +643,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
643 get_tmds_slave(encoder)) 643 get_tmds_slave(encoder))
644 return; 644 return;
645 645
646 type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL); 646 type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL, NULL);
647 if (type < 0) 647 if (type < 0)
648 return; 648 return;
649 649
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index cc4b208ce546..244822df8ffc 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -59,7 +59,7 @@ int nv04_tv_identify(struct drm_device *dev, int i2c_index)
59 struct nouveau_i2c *i2c = nouveau_i2c(drm->device); 59 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
60 60
61 return i2c->identify(i2c, i2c_index, "TV encoder", 61 return i2c->identify(i2c, i2c_index, "TV encoder",
62 nv04_tv_encoder_info, NULL); 62 nv04_tv_encoder_info, NULL, NULL);
63} 63}
64 64
65 65
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 78be66176840..942509892895 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
52 52
53#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ 53#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
54#define NUM_REAL_CORES 32 /* Number of Real cores per cpu */ 54#define NUM_REAL_CORES 32 /* Number of Real cores per cpu */
55#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */ 55#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */
56#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ 56#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
57#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) 57#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
58#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) 58#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 21f4d7ff0da2..369d919bdafe 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1077,6 +1077,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
1077 rdev->raid_disk = -1; 1077 rdev->raid_disk = -1;
1078 clear_bit(Faulty, &rdev->flags); 1078 clear_bit(Faulty, &rdev->flags);
1079 clear_bit(In_sync, &rdev->flags); 1079 clear_bit(In_sync, &rdev->flags);
1080 clear_bit(Bitmap_sync, &rdev->flags);
1080 clear_bit(WriteMostly, &rdev->flags); 1081 clear_bit(WriteMostly, &rdev->flags);
1081 1082
1082 if (mddev->raid_disks == 0) { 1083 if (mddev->raid_disks == 0) {
@@ -1155,6 +1156,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
1155 */ 1156 */
1156 if (ev1 < mddev->bitmap->events_cleared) 1157 if (ev1 < mddev->bitmap->events_cleared)
1157 return 0; 1158 return 0;
1159 if (ev1 < mddev->events)
1160 set_bit(Bitmap_sync, &rdev->flags);
1158 } else { 1161 } else {
1159 if (ev1 < mddev->events) 1162 if (ev1 < mddev->events)
1160 /* just a hot-add of a new device, leave raid_disk at -1 */ 1163 /* just a hot-add of a new device, leave raid_disk at -1 */
@@ -1563,6 +1566,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1563 rdev->raid_disk = -1; 1566 rdev->raid_disk = -1;
1564 clear_bit(Faulty, &rdev->flags); 1567 clear_bit(Faulty, &rdev->flags);
1565 clear_bit(In_sync, &rdev->flags); 1568 clear_bit(In_sync, &rdev->flags);
1569 clear_bit(Bitmap_sync, &rdev->flags);
1566 clear_bit(WriteMostly, &rdev->flags); 1570 clear_bit(WriteMostly, &rdev->flags);
1567 1571
1568 if (mddev->raid_disks == 0) { 1572 if (mddev->raid_disks == 0) {
@@ -1645,6 +1649,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1645 */ 1649 */
1646 if (ev1 < mddev->bitmap->events_cleared) 1650 if (ev1 < mddev->bitmap->events_cleared)
1647 return 0; 1651 return 0;
1652 if (ev1 < mddev->events)
1653 set_bit(Bitmap_sync, &rdev->flags);
1648 } else { 1654 } else {
1649 if (ev1 < mddev->events) 1655 if (ev1 < mddev->events)
1650 /* just a hot-add of a new device, leave raid_disk at -1 */ 1656 /* just a hot-add of a new device, leave raid_disk at -1 */
@@ -2788,6 +2794,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
2788 else 2794 else
2789 rdev->saved_raid_disk = -1; 2795 rdev->saved_raid_disk = -1;
2790 clear_bit(In_sync, &rdev->flags); 2796 clear_bit(In_sync, &rdev->flags);
2797 clear_bit(Bitmap_sync, &rdev->flags);
2791 err = rdev->mddev->pers-> 2798 err = rdev->mddev->pers->
2792 hot_add_disk(rdev->mddev, rdev); 2799 hot_add_disk(rdev->mddev, rdev);
2793 if (err) { 2800 if (err) {
@@ -5760,6 +5767,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
5760 info->raid_disk < mddev->raid_disks) { 5767 info->raid_disk < mddev->raid_disks) {
5761 rdev->raid_disk = info->raid_disk; 5768 rdev->raid_disk = info->raid_disk;
5762 set_bit(In_sync, &rdev->flags); 5769 set_bit(In_sync, &rdev->flags);
5770 clear_bit(Bitmap_sync, &rdev->flags);
5763 } else 5771 } else
5764 rdev->raid_disk = -1; 5772 rdev->raid_disk = -1;
5765 } else 5773 } else
@@ -7706,7 +7714,8 @@ static int remove_and_add_spares(struct mddev *mddev,
7706 if (test_bit(Faulty, &rdev->flags)) 7714 if (test_bit(Faulty, &rdev->flags))
7707 continue; 7715 continue;
7708 if (mddev->ro && 7716 if (mddev->ro &&
7709 rdev->saved_raid_disk < 0) 7717 ! (rdev->saved_raid_disk >= 0 &&
7718 !test_bit(Bitmap_sync, &rdev->flags)))
7710 continue; 7719 continue;
7711 7720
7712 rdev->recovery_offset = 0; 7721 rdev->recovery_offset = 0;
@@ -7787,9 +7796,12 @@ void md_check_recovery(struct mddev *mddev)
7787 * As we only add devices that are already in-sync, 7796 * As we only add devices that are already in-sync,
7788 * we can activate the spares immediately. 7797 * we can activate the spares immediately.
7789 */ 7798 */
7790 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7791 remove_and_add_spares(mddev, NULL); 7799 remove_and_add_spares(mddev, NULL);
7792 mddev->pers->spare_active(mddev); 7800 /* There is no thread, but we need to call
7801 * ->spare_active and clear saved_raid_disk
7802 */
7803 md_reap_sync_thread(mddev);
7804 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7793 goto unlock; 7805 goto unlock;
7794 } 7806 }
7795 7807
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 2f5cc8a7ef3e..0095ec84ffc7 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -129,6 +129,9 @@ struct md_rdev {
129enum flag_bits { 129enum flag_bits {
130 Faulty, /* device is known to have a fault */ 130 Faulty, /* device is known to have a fault */
131 In_sync, /* device is in_sync with rest of array */ 131 In_sync, /* device is in_sync with rest of array */
132 Bitmap_sync, /* ..actually, not quite In_sync. Need a
133 * bitmap-based recovery to get fully in sync
134 */
132 Unmerged, /* device is being added to array and should 135 Unmerged, /* device is being added to array and should
133 * be considerred for bvec_merge_fn but not 136 * be considerred for bvec_merge_fn but not
134 * yet for actual IO 137 * yet for actual IO
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1e5a540995e9..a49cfcc7a343 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -924,9 +924,8 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
924 conf->next_window_requests++; 924 conf->next_window_requests++;
925 else 925 else
926 conf->current_window_requests++; 926 conf->current_window_requests++;
927 }
928 if (bio->bi_sector >= conf->start_next_window)
929 sector = conf->start_next_window; 927 sector = conf->start_next_window;
928 }
930 } 929 }
931 930
932 conf->nr_pending++; 931 conf->nr_pending++;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c504e8389e69..06eeb99ea6fc 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1319,7 +1319,7 @@ read_again:
1319 /* Could not read all from this device, so we will 1319 /* Could not read all from this device, so we will
1320 * need another r10_bio. 1320 * need another r10_bio.
1321 */ 1321 */
1322 sectors_handled = (r10_bio->sectors + max_sectors 1322 sectors_handled = (r10_bio->sector + max_sectors
1323 - bio->bi_sector); 1323 - bio->bi_sector);
1324 r10_bio->sectors = max_sectors; 1324 r10_bio->sectors = max_sectors;
1325 spin_lock_irq(&conf->device_lock); 1325 spin_lock_irq(&conf->device_lock);
@@ -1327,7 +1327,7 @@ read_again:
1327 bio->bi_phys_segments = 2; 1327 bio->bi_phys_segments = 2;
1328 else 1328 else
1329 bio->bi_phys_segments++; 1329 bio->bi_phys_segments++;
1330 spin_unlock(&conf->device_lock); 1330 spin_unlock_irq(&conf->device_lock);
1331 /* Cannot call generic_make_request directly 1331 /* Cannot call generic_make_request directly
1332 * as that will be queued in __generic_make_request 1332 * as that will be queued in __generic_make_request
1333 * and subsequent mempool_alloc might block 1333 * and subsequent mempool_alloc might block
@@ -3218,10 +3218,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3218 if (j == conf->copies) { 3218 if (j == conf->copies) {
3219 /* Cannot recover, so abort the recovery or 3219 /* Cannot recover, so abort the recovery or
3220 * record a bad block */ 3220 * record a bad block */
3221 put_buf(r10_bio);
3222 if (rb2)
3223 atomic_dec(&rb2->remaining);
3224 r10_bio = rb2;
3225 if (any_working) { 3221 if (any_working) {
3226 /* problem is that there are bad blocks 3222 /* problem is that there are bad blocks
3227 * on other device(s) 3223 * on other device(s)
@@ -3253,6 +3249,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3253 mirror->recovery_disabled 3249 mirror->recovery_disabled
3254 = mddev->recovery_disabled; 3250 = mddev->recovery_disabled;
3255 } 3251 }
3252 put_buf(r10_bio);
3253 if (rb2)
3254 atomic_dec(&rb2->remaining);
3255 r10_bio = rb2;
3256 break; 3256 break;
3257 } 3257 }
3258 } 3258 }
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index cc055da02e2a..cbb15716a5db 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -687,7 +687,8 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
687 } else { 687 } else {
688 if (!test_bit(STRIPE_HANDLE, &sh->state)) 688 if (!test_bit(STRIPE_HANDLE, &sh->state))
689 atomic_inc(&conf->active_stripes); 689 atomic_inc(&conf->active_stripes);
690 BUG_ON(list_empty(&sh->lru)); 690 BUG_ON(list_empty(&sh->lru) &&
691 !test_bit(STRIPE_EXPANDING, &sh->state));
691 list_del_init(&sh->lru); 692 list_del_init(&sh->lru);
692 if (sh->group) { 693 if (sh->group) {
693 sh->group->stripes_cnt--; 694 sh->group->stripes_cnt--;
@@ -3608,7 +3609,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
3608 */ 3609 */
3609 set_bit(R5_Insync, &dev->flags); 3610 set_bit(R5_Insync, &dev->flags);
3610 3611
3611 if (rdev && test_bit(R5_WriteError, &dev->flags)) { 3612 if (test_bit(R5_WriteError, &dev->flags)) {
3612 /* This flag does not apply to '.replacement' 3613 /* This flag does not apply to '.replacement'
3613 * only to .rdev, so make sure to check that*/ 3614 * only to .rdev, so make sure to check that*/
3614 struct md_rdev *rdev2 = rcu_dereference( 3615 struct md_rdev *rdev2 = rcu_dereference(
@@ -3621,7 +3622,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
3621 } else 3622 } else
3622 clear_bit(R5_WriteError, &dev->flags); 3623 clear_bit(R5_WriteError, &dev->flags);
3623 } 3624 }
3624 if (rdev && test_bit(R5_MadeGood, &dev->flags)) { 3625 if (test_bit(R5_MadeGood, &dev->flags)) {
3625 /* This flag does not apply to '.replacement' 3626 /* This flag does not apply to '.replacement'
3626 * only to .rdev, so make sure to check that*/ 3627 * only to .rdev, so make sure to check that*/
3627 struct md_rdev *rdev2 = rcu_dereference( 3628 struct md_rdev *rdev2 = rcu_dereference(
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index cf17b660b4ee..e118a3ec62bc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13083,26 +13083,26 @@ static void __bnx2x_remove(struct pci_dev *pdev,
13083 } 13083 }
13084 13084
13085 bnx2x_disable_pcie_error_reporting(bp); 13085 bnx2x_disable_pcie_error_reporting(bp);
13086 if (remove_netdev) {
13087 if (bp->regview)
13088 iounmap(bp->regview);
13086 13089
13087 if (bp->regview) 13090 /* For vfs, doorbells are part of the regview and were unmapped
13088 iounmap(bp->regview); 13091 * along with it. FW is only loaded by PF.
13089 13092 */
13090 /* for vf doorbells are part of the regview and were unmapped along with 13093 if (IS_PF(bp)) {
13091 * it. FW is only loaded by PF. 13094 if (bp->doorbells)
13092 */ 13095 iounmap(bp->doorbells);
13093 if (IS_PF(bp)) {
13094 if (bp->doorbells)
13095 iounmap(bp->doorbells);
13096 13096
13097 bnx2x_release_firmware(bp); 13097 bnx2x_release_firmware(bp);
13098 } 13098 }
13099 bnx2x_free_mem_bp(bp); 13099 bnx2x_free_mem_bp(bp);
13100 13100
13101 if (remove_netdev)
13102 free_netdev(dev); 13101 free_netdev(dev);
13103 13102
13104 if (atomic_read(&pdev->enable_cnt) == 1) 13103 if (atomic_read(&pdev->enable_cnt) == 1)
13105 pci_release_regions(pdev); 13104 pci_release_regions(pdev);
13105 }
13106 13106
13107 pci_disable_device(pdev); 13107 pci_disable_device(pdev);
13108} 13108}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6d22d6f439e3..4dc96394912d 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1767,6 +1767,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1767 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL; 1767 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1768 struct be_queue_info *rxq = &rxo->q; 1768 struct be_queue_info *rxq = &rxo->q;
1769 struct page *pagep = NULL; 1769 struct page *pagep = NULL;
1770 struct device *dev = &adapter->pdev->dev;
1770 struct be_eth_rx_d *rxd; 1771 struct be_eth_rx_d *rxd;
1771 u64 page_dmaaddr = 0, frag_dmaaddr; 1772 u64 page_dmaaddr = 0, frag_dmaaddr;
1772 u32 posted, page_offset = 0; 1773 u32 posted, page_offset = 0;
@@ -1779,9 +1780,15 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1779 rx_stats(rxo)->rx_post_fail++; 1780 rx_stats(rxo)->rx_post_fail++;
1780 break; 1781 break;
1781 } 1782 }
1782 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep, 1783 page_dmaaddr = dma_map_page(dev, pagep, 0,
1783 0, adapter->big_page_size, 1784 adapter->big_page_size,
1784 DMA_FROM_DEVICE); 1785 DMA_FROM_DEVICE);
1786 if (dma_mapping_error(dev, page_dmaaddr)) {
1787 put_page(pagep);
1788 pagep = NULL;
1789 rx_stats(rxo)->rx_post_fail++;
1790 break;
1791 }
1785 page_info->page_offset = 0; 1792 page_info->page_offset = 0;
1786 } else { 1793 } else {
1787 get_page(pagep); 1794 get_page(pagep);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index d6570b2d5a6b..6d91933c4cdd 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6184,7 +6184,7 @@ static int __e1000_resume(struct pci_dev *pdev)
6184 return 0; 6184 return 0;
6185} 6185}
6186 6186
6187#ifdef CONFIG_PM 6187#ifdef CONFIG_PM_SLEEP
6188static int e1000_suspend(struct device *dev) 6188static int e1000_suspend(struct device *dev)
6189{ 6189{
6190 struct pci_dev *pdev = to_pci_dev(dev); 6190 struct pci_dev *pdev = to_pci_dev(dev);
@@ -6203,7 +6203,7 @@ static int e1000_resume(struct device *dev)
6203 6203
6204 return __e1000_resume(pdev); 6204 return __e1000_resume(pdev);
6205} 6205}
6206#endif /* CONFIG_PM */ 6206#endif /* CONFIG_PM_SLEEP */
6207 6207
6208#ifdef CONFIG_PM_RUNTIME 6208#ifdef CONFIG_PM_RUNTIME
6209static int e1000_runtime_suspend(struct device *dev) 6209static int e1000_runtime_suspend(struct device *dev)
@@ -7025,13 +7025,11 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
7025}; 7025};
7026MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 7026MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
7027 7027
7028#ifdef CONFIG_PM
7029static const struct dev_pm_ops e1000_pm_ops = { 7028static const struct dev_pm_ops e1000_pm_ops = {
7030 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) 7029 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
7031 SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume, 7030 SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume,
7032 e1000_idle) 7031 e1000_idle)
7033}; 7032};
7034#endif
7035 7033
7036/* PCI Device API Driver */ 7034/* PCI Device API Driver */
7037static struct pci_driver e1000_driver = { 7035static struct pci_driver e1000_driver = {
@@ -7039,11 +7037,9 @@ static struct pci_driver e1000_driver = {
7039 .id_table = e1000_pci_tbl, 7037 .id_table = e1000_pci_tbl,
7040 .probe = e1000_probe, 7038 .probe = e1000_probe,
7041 .remove = e1000_remove, 7039 .remove = e1000_remove,
7042#ifdef CONFIG_PM
7043 .driver = { 7040 .driver = {
7044 .pm = &e1000_pm_ops, 7041 .pm = &e1000_pm_ops,
7045 }, 7042 },
7046#endif
7047 .shutdown = e1000_shutdown, 7043 .shutdown = e1000_shutdown,
7048 .err_handler = &e1000_err_handler 7044 .err_handler = &e1000_err_handler
7049}; 7045};
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 394c0a538e1c..ce2cfddbed50 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4764,6 +4764,8 @@ static int qlge_probe(struct pci_dev *pdev,
4764 NETIF_F_RXCSUM; 4764 NETIF_F_RXCSUM;
4765 ndev->features = ndev->hw_features; 4765 ndev->features = ndev->hw_features;
4766 ndev->vlan_features = ndev->hw_features; 4766 ndev->vlan_features = ndev->hw_features;
4767 /* vlan gets same features (except vlan filter) */
4768 ndev->vlan_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4767 4769
4768 if (test_bit(QL_DMA64, &qdev->flags)) 4770 if (test_bit(QL_DMA64, &qdev->flags))
4769 ndev->features |= NETIF_F_HIGHDMA; 4771 ndev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index cce6c4bc556a..ef312bc6b865 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1618,6 +1618,7 @@ static void rhine_reset_task(struct work_struct *work)
1618 goto out_unlock; 1618 goto out_unlock;
1619 1619
1620 napi_disable(&rp->napi); 1620 napi_disable(&rp->napi);
1621 netif_tx_disable(dev);
1621 spin_lock_bh(&rp->lock); 1622 spin_lock_bh(&rp->lock);
1622 1623
1623 /* clear all descriptors */ 1624 /* clear all descriptors */
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 15e3f8e459f5..6e9c344c7a20 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -613,6 +613,18 @@ static const struct usb_device_id products[] = {
613 USB_DEVICE(0x0a46, 0x9621), /* DM9621A USB to Fast Ethernet Adapter */ 613 USB_DEVICE(0x0a46, 0x9621), /* DM9621A USB to Fast Ethernet Adapter */
614 .driver_info = (unsigned long)&dm9601_info, 614 .driver_info = (unsigned long)&dm9601_info,
615 }, 615 },
616 {
617 USB_DEVICE(0x0a46, 0x9622), /* DM9622 USB to Fast Ethernet Adapter */
618 .driver_info = (unsigned long)&dm9601_info,
619 },
620 {
621 USB_DEVICE(0x0a46, 0x0269), /* DM962OA USB to Fast Ethernet Adapter */
622 .driver_info = (unsigned long)&dm9601_info,
623 },
624 {
625 USB_DEVICE(0x0a46, 0x1269), /* DM9621A USB to Fast Ethernet Adapter */
626 .driver_info = (unsigned long)&dm9601_info,
627 },
616 {}, // END 628 {}, // END
617}; 629};
618 630
diff --git a/fs/dcache.c b/fs/dcache.c
index 6055d61811d3..cb4a10690868 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -3061,8 +3061,13 @@ char *d_path(const struct path *path, char *buf, int buflen)
3061 * thus don't need to be hashed. They also don't need a name until a 3061 * thus don't need to be hashed. They also don't need a name until a
3062 * user wants to identify the object in /proc/pid/fd/. The little hack 3062 * user wants to identify the object in /proc/pid/fd/. The little hack
3063 * below allows us to generate a name for these objects on demand: 3063 * below allows us to generate a name for these objects on demand:
3064 *
3065 * Some pseudo inodes are mountable. When they are mounted
3066 * path->dentry == path->mnt->mnt_root. In that case don't call d_dname
3067 * and instead have d_path return the mounted path.
3064 */ 3068 */
3065 if (path->dentry->d_op && path->dentry->d_op->d_dname) 3069 if (path->dentry->d_op && path->dentry->d_op->d_dname &&
3070 (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
3066 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 3071 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
3067 3072
3068 rcu_read_lock(); 3073 rcu_read_lock();
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 1f4a10ece2f1..e0259a163f98 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -516,13 +516,16 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
516 } 516 }
517 WARN_ON(inode->i_state & I_SYNC); 517 WARN_ON(inode->i_state & I_SYNC);
518 /* 518 /*
519 * Skip inode if it is clean. We don't want to mess with writeback 519 * Skip inode if it is clean and we have no outstanding writeback in
520 * lists in this function since flusher thread may be doing for example 520 * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this
521 * sync in parallel and if we move the inode, it could get skipped. So 521 * function since flusher thread may be doing for example sync in
522 * here we make sure inode is on some writeback list and leave it there 522 * parallel and if we move the inode, it could get skipped. So here we
523 * unless we have completely cleaned the inode. 523 * make sure inode is on some writeback list and leave it there unless
524 * we have completely cleaned the inode.
524 */ 525 */
525 if (!(inode->i_state & I_DIRTY)) 526 if (!(inode->i_state & I_DIRTY) &&
527 (wbc->sync_mode != WB_SYNC_ALL ||
528 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
526 goto out; 529 goto out;
527 inode->i_state |= I_SYNC; 530 inode->i_state |= I_SYNC;
528 spin_unlock(&inode->i_lock); 531 spin_unlock(&inode->i_lock);
diff --git a/fs/namespace.c b/fs/namespace.c
index ac2ce8a766e1..be32ebccdeb1 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2886,7 +2886,7 @@ bool fs_fully_visible(struct file_system_type *type)
2886 struct inode *inode = child->mnt_mountpoint->d_inode; 2886 struct inode *inode = child->mnt_mountpoint->d_inode;
2887 if (!S_ISDIR(inode->i_mode)) 2887 if (!S_ISDIR(inode->i_mode))
2888 goto next; 2888 goto next;
2889 if (inode->i_nlink != 2) 2889 if (inode->i_nlink > 2)
2890 goto next; 2890 goto next;
2891 } 2891 }
2892 visible = true; 2892 visible = true;
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 9f6b486b6c01..a1a191634abc 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1440,17 +1440,19 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1440 1440
1441 nilfs_clear_logs(&sci->sc_segbufs); 1441 nilfs_clear_logs(&sci->sc_segbufs);
1442 1442
1443 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1444 if (unlikely(err))
1445 return err;
1446
1447 if (sci->sc_stage.flags & NILFS_CF_SUFREED) { 1443 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1448 err = nilfs_sufile_cancel_freev(nilfs->ns_sufile, 1444 err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1449 sci->sc_freesegs, 1445 sci->sc_freesegs,
1450 sci->sc_nfreesegs, 1446 sci->sc_nfreesegs,
1451 NULL); 1447 NULL);
1452 WARN_ON(err); /* do not happen */ 1448 WARN_ON(err); /* do not happen */
1449 sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
1453 } 1450 }
1451
1452 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1453 if (unlikely(err))
1454 return err;
1455
1454 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA); 1456 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1455 sci->sc_stage = prev_stage; 1457 sci->sc_stage = prev_stage;
1456 } 1458 }
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index fe68a5a98583..7032518f8542 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -6,6 +6,8 @@
6#include <linux/proc_fs.h> 6#include <linux/proc_fs.h>
7#include <linux/elf.h> 7#include <linux/elf.h>
8 8
9#include <asm/pgtable.h> /* for pgprot_t */
10
9#define ELFCORE_ADDR_MAX (-1ULL) 11#define ELFCORE_ADDR_MAX (-1ULL)
10#define ELFCORE_ADDR_ERR (-2ULL) 12#define ELFCORE_ADDR_ERR (-2ULL)
11 13
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index eff50e062be8..d9c8dbd3373f 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -445,7 +445,7 @@ static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data)
445static inline struct i2c_adapter * 445static inline struct i2c_adapter *
446i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter) 446i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter)
447{ 447{
448#if IS_ENABLED(I2C_MUX) 448#if IS_ENABLED(CONFIG_I2C_MUX)
449 struct device *parent = adapter->dev.parent; 449 struct device *parent = adapter->dev.parent;
450 450
451 if (parent != NULL && parent->type == &i2c_adapter_type) 451 if (parent != NULL && parent->type == &i2c_adapter_type)
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index cf87a24c0f92..535f158977b9 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -117,15 +117,15 @@ repeat:
117} 117}
118 118
119/** 119/**
120 * read_seqcount_begin_no_lockdep - start seq-read critical section w/o lockdep 120 * raw_read_seqcount_begin - start seq-read critical section w/o lockdep
121 * @s: pointer to seqcount_t 121 * @s: pointer to seqcount_t
122 * Returns: count to be passed to read_seqcount_retry 122 * Returns: count to be passed to read_seqcount_retry
123 * 123 *
124 * read_seqcount_begin_no_lockdep opens a read critical section of the given 124 * raw_read_seqcount_begin opens a read critical section of the given
125 * seqcount, but without any lockdep checking. Validity of the critical 125 * seqcount, but without any lockdep checking. Validity of the critical
126 * section is tested by checking read_seqcount_retry function. 126 * section is tested by checking read_seqcount_retry function.
127 */ 127 */
128static inline unsigned read_seqcount_begin_no_lockdep(const seqcount_t *s) 128static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
129{ 129{
130 unsigned ret = __read_seqcount_begin(s); 130 unsigned ret = __read_seqcount_begin(s);
131 smp_rmb(); 131 smp_rmb();
@@ -144,7 +144,7 @@ static inline unsigned read_seqcount_begin_no_lockdep(const seqcount_t *s)
144static inline unsigned read_seqcount_begin(const seqcount_t *s) 144static inline unsigned read_seqcount_begin(const seqcount_t *s)
145{ 145{
146 seqcount_lockdep_reader_access(s); 146 seqcount_lockdep_reader_access(s);
147 return read_seqcount_begin_no_lockdep(s); 147 return raw_read_seqcount_begin(s);
148} 148}
149 149
150/** 150/**
@@ -206,14 +206,26 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
206} 206}
207 207
208 208
209
210static inline void raw_write_seqcount_begin(seqcount_t *s)
211{
212 s->sequence++;
213 smp_wmb();
214}
215
216static inline void raw_write_seqcount_end(seqcount_t *s)
217{
218 smp_wmb();
219 s->sequence++;
220}
221
209/* 222/*
210 * Sequence counter only version assumes that callers are using their 223 * Sequence counter only version assumes that callers are using their
211 * own mutexing. 224 * own mutexing.
212 */ 225 */
213static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass) 226static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
214{ 227{
215 s->sequence++; 228 raw_write_seqcount_begin(s);
216 smp_wmb();
217 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); 229 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
218} 230}
219 231
@@ -225,8 +237,7 @@ static inline void write_seqcount_begin(seqcount_t *s)
225static inline void write_seqcount_end(seqcount_t *s) 237static inline void write_seqcount_end(seqcount_t *s)
226{ 238{
227 seqcount_release(&s->dep_map, 1, _RET_IP_); 239 seqcount_release(&s->dep_map, 1, _RET_IP_);
228 smp_wmb(); 240 raw_write_seqcount_end(s);
229 s->sequence++;
230} 241}
231 242
232/** 243/**
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index b58c36c1c3f6..9650a3ffd2d2 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -165,7 +165,6 @@ struct inet6_dev {
165 struct net_device *dev; 165 struct net_device *dev;
166 166
167 struct list_head addr_list; 167 struct list_head addr_list;
168 int valid_ll_addr_cnt;
169 168
170 struct ifmcaddr6 *mc_list; 169 struct ifmcaddr6 *mc_list;
171 struct ifmcaddr6 *mc_tomb; 170 struct ifmcaddr6 *mc_tomb;
diff --git a/kernel/fork.c b/kernel/fork.c
index 5721f0e3f2da..dfa736c98d17 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1172,7 +1172,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1172 * do not allow it to share a thread group or signal handlers or 1172 * do not allow it to share a thread group or signal handlers or
1173 * parent with the forking task. 1173 * parent with the forking task.
1174 */ 1174 */
1175 if (clone_flags & (CLONE_SIGHAND | CLONE_PARENT)) { 1175 if (clone_flags & CLONE_SIGHAND) {
1176 if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) || 1176 if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
1177 (task_active_pid_ns(current) != 1177 (task_active_pid_ns(current) !=
1178 current->nsproxy->pid_ns_for_children)) 1178 current->nsproxy->pid_ns_for_children))
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c7395d97e4cb..e64b0794060e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3923,7 +3923,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
3923{ 3923{
3924 struct sched_entity *se = tg->se[cpu]; 3924 struct sched_entity *se = tg->se[cpu];
3925 3925
3926 if (!tg->parent || !wl) /* the trivial, non-cgroup case */ 3926 if (!tg->parent) /* the trivial, non-cgroup case */
3927 return wl; 3927 return wl;
3928 3928
3929 for_each_sched_entity(se) { 3929 for_each_sched_entity(se) {
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 68b799375981..0abb36464281 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -74,7 +74,7 @@ unsigned long long notrace sched_clock(void)
74 return cd.epoch_ns; 74 return cd.epoch_ns;
75 75
76 do { 76 do {
77 seq = read_seqcount_begin(&cd.seq); 77 seq = raw_read_seqcount_begin(&cd.seq);
78 epoch_cyc = cd.epoch_cyc; 78 epoch_cyc = cd.epoch_cyc;
79 epoch_ns = cd.epoch_ns; 79 epoch_ns = cd.epoch_ns;
80 } while (read_seqcount_retry(&cd.seq, seq)); 80 } while (read_seqcount_retry(&cd.seq, seq));
@@ -99,10 +99,10 @@ static void notrace update_sched_clock(void)
99 cd.mult, cd.shift); 99 cd.mult, cd.shift);
100 100
101 raw_local_irq_save(flags); 101 raw_local_irq_save(flags);
102 write_seqcount_begin(&cd.seq); 102 raw_write_seqcount_begin(&cd.seq);
103 cd.epoch_ns = ns; 103 cd.epoch_ns = ns;
104 cd.epoch_cyc = cyc; 104 cd.epoch_cyc = cyc;
105 write_seqcount_end(&cd.seq); 105 raw_write_seqcount_end(&cd.seq);
106 raw_local_irq_restore(flags); 106 raw_local_irq_restore(flags);
107} 107}
108 108
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 7473ee3b4ee7..8280a5dd1727 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -82,10 +82,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
82 unsigned long flags; 82 unsigned long flags;
83 raw_spin_lock_irqsave(&fbc->lock, flags); 83 raw_spin_lock_irqsave(&fbc->lock, flags);
84 fbc->count += count; 84 fbc->count += count;
85 __this_cpu_sub(*fbc->counters, count - amount);
85 raw_spin_unlock_irqrestore(&fbc->lock, flags); 86 raw_spin_unlock_irqrestore(&fbc->lock, flags);
86 __this_cpu_write(*fbc->counters, 0);
87 } else { 87 } else {
88 __this_cpu_write(*fbc->counters, count); 88 this_cpu_add(*fbc->counters, amount);
89 } 89 }
90 preempt_enable(); 90 preempt_enable();
91} 91}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 9c0b17295ba0..95d1acb0f3d2 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1154,7 +1154,7 @@ alloc:
1154 new_page = NULL; 1154 new_page = NULL;
1155 1155
1156 if (unlikely(!new_page)) { 1156 if (unlikely(!new_page)) {
1157 if (is_huge_zero_pmd(orig_pmd)) { 1157 if (!page) {
1158 ret = do_huge_pmd_wp_zero_page_fallback(mm, vma, 1158 ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
1159 address, pmd, orig_pmd, haddr); 1159 address, pmd, orig_pmd, haddr);
1160 } else { 1160 } else {
@@ -1181,7 +1181,7 @@ alloc:
1181 1181
1182 count_vm_event(THP_FAULT_ALLOC); 1182 count_vm_event(THP_FAULT_ALLOC);
1183 1183
1184 if (is_huge_zero_pmd(orig_pmd)) 1184 if (!page)
1185 clear_huge_page(new_page, haddr, HPAGE_PMD_NR); 1185 clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
1186 else 1186 else
1187 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); 1187 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
@@ -1207,7 +1207,7 @@ alloc:
1207 page_add_new_anon_rmap(new_page, vma, haddr); 1207 page_add_new_anon_rmap(new_page, vma, haddr);
1208 set_pmd_at(mm, haddr, pmd, entry); 1208 set_pmd_at(mm, haddr, pmd, entry);
1209 update_mmu_cache_pmd(vma, address, pmd); 1209 update_mmu_cache_pmd(vma, address, pmd);
1210 if (is_huge_zero_pmd(orig_pmd)) { 1210 if (!page) {
1211 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 1211 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
1212 put_huge_zero_page(); 1212 put_huge_zero_page();
1213 } else { 1213 } else {
diff --git a/mm/util.c b/mm/util.c
index f7bc2096071c..808f375648e7 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -390,7 +390,10 @@ struct address_space *page_mapping(struct page *page)
390{ 390{
391 struct address_space *mapping = page->mapping; 391 struct address_space *mapping = page->mapping;
392 392
393 VM_BUG_ON(PageSlab(page)); 393 /* This happens if someone calls flush_dcache_page on slab page */
394 if (unlikely(PageSlab(page)))
395 return NULL;
396
394 if (unlikely(PageSwapCache(page))) { 397 if (unlikely(PageSwapCache(page))) {
395 swp_entry_t entry; 398 swp_entry_t entry;
396 399
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index e56b4d6a43b6..66ae135b9f27 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -275,7 +275,7 @@ int batadv_max_header_len(void)
275 sizeof(struct batadv_coded_packet)); 275 sizeof(struct batadv_coded_packet));
276#endif 276#endif
277 277
278 return header_len; 278 return header_len + ETH_HLEN;
279} 279}
280 280
281/** 281/**
diff --git a/net/core/filter.c b/net/core/filter.c
index 01b780856db2..ad30d626a5bd 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -36,7 +36,6 @@
36#include <asm/uaccess.h> 36#include <asm/uaccess.h>
37#include <asm/unaligned.h> 37#include <asm/unaligned.h>
38#include <linux/filter.h> 38#include <linux/filter.h>
39#include <linux/reciprocal_div.h>
40#include <linux/ratelimit.h> 39#include <linux/ratelimit.h>
41#include <linux/seccomp.h> 40#include <linux/seccomp.h>
42#include <linux/if_vlan.h> 41#include <linux/if_vlan.h>
@@ -166,7 +165,7 @@ unsigned int sk_run_filter(const struct sk_buff *skb,
166 A /= X; 165 A /= X;
167 continue; 166 continue;
168 case BPF_S_ALU_DIV_K: 167 case BPF_S_ALU_DIV_K:
169 A = reciprocal_divide(A, K); 168 A /= K;
170 continue; 169 continue;
171 case BPF_S_ALU_MOD_X: 170 case BPF_S_ALU_MOD_X:
172 if (X == 0) 171 if (X == 0)
@@ -553,11 +552,6 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
553 /* Some instructions need special checks */ 552 /* Some instructions need special checks */
554 switch (code) { 553 switch (code) {
555 case BPF_S_ALU_DIV_K: 554 case BPF_S_ALU_DIV_K:
556 /* check for division by zero */
557 if (ftest->k == 0)
558 return -EINVAL;
559 ftest->k = reciprocal_value(ftest->k);
560 break;
561 case BPF_S_ALU_MOD_K: 555 case BPF_S_ALU_MOD_K:
562 /* check for division by zero */ 556 /* check for division by zero */
563 if (ftest->k == 0) 557 if (ftest->k == 0)
@@ -853,27 +847,7 @@ void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
853 to->code = decodes[code]; 847 to->code = decodes[code];
854 to->jt = filt->jt; 848 to->jt = filt->jt;
855 to->jf = filt->jf; 849 to->jf = filt->jf;
856 850 to->k = filt->k;
857 if (code == BPF_S_ALU_DIV_K) {
858 /*
859 * When loaded this rule user gave us X, which was
860 * translated into R = r(X). Now we calculate the
861 * RR = r(R) and report it back. If next time this
862 * value is loaded and RRR = r(RR) is calculated
863 * then the R == RRR will be true.
864 *
865 * One exception. X == 1 translates into R == 0 and
866 * we can't calculate RR out of it with r().
867 */
868
869 if (filt->k == 0)
870 to->k = 1;
871 else
872 to->k = reciprocal_value(filt->k);
873
874 BUG_ON(reciprocal_value(to->k) != filt->k);
875 } else
876 to->k = filt->k;
877} 851}
878 852
879int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len) 853int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len)
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index d08c7a43dcd1..89b265aea151 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -221,8 +221,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
221 221
222 if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) { 222 if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) {
223 type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]); 223 type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]);
224 if (type >= __IEEE802154_DEV_MAX) 224 if (type >= __IEEE802154_DEV_MAX) {
225 return -EINVAL; 225 rc = -EINVAL;
226 goto nla_put_failure;
227 }
226 } 228 }
227 229
228 dev = phy->add_iface(phy, devname, type); 230 dev = phy->add_iface(phy, devname, type);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 421a24934ffd..b9b3472975ba 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -157,9 +157,12 @@ static struct mr_table *ipmr_get_table(struct net *net, u32 id)
157static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, 157static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
158 struct mr_table **mrt) 158 struct mr_table **mrt)
159{ 159{
160 struct ipmr_result res;
161 struct fib_lookup_arg arg = { .result = &res, };
162 int err; 160 int err;
161 struct ipmr_result res;
162 struct fib_lookup_arg arg = {
163 .result = &res,
164 .flags = FIB_LOOKUP_NOREF,
165 };
163 166
164 err = fib_rules_lookup(net->ipv4.mr_rules_ops, 167 err = fib_rules_lookup(net->ipv4.mr_rules_ops,
165 flowi4_to_flowi(flp4), 0, &arg); 168 flowi4_to_flowi(flp4), 0, &arg);
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 699a42faab9c..fa950941de65 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -22,6 +22,10 @@
22 22
23int sysctl_tcp_nometrics_save __read_mostly; 23int sysctl_tcp_nometrics_save __read_mostly;
24 24
25static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
26 const struct inetpeer_addr *daddr,
27 struct net *net, unsigned int hash);
28
25struct tcp_fastopen_metrics { 29struct tcp_fastopen_metrics {
26 u16 mss; 30 u16 mss;
27 u16 syn_loss:10; /* Recurring Fast Open SYN losses */ 31 u16 syn_loss:10; /* Recurring Fast Open SYN losses */
@@ -131,17 +135,42 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst,
131 } 135 }
132} 136}
133 137
138#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
139
140static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
141{
142 if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
143 tcpm_suck_dst(tm, dst, false);
144}
145
146#define TCP_METRICS_RECLAIM_DEPTH 5
147#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
148
134static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, 149static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
135 struct inetpeer_addr *saddr, 150 struct inetpeer_addr *saddr,
136 struct inetpeer_addr *daddr, 151 struct inetpeer_addr *daddr,
137 unsigned int hash, 152 unsigned int hash)
138 bool reclaim)
139{ 153{
140 struct tcp_metrics_block *tm; 154 struct tcp_metrics_block *tm;
141 struct net *net; 155 struct net *net;
156 bool reclaim = false;
142 157
143 spin_lock_bh(&tcp_metrics_lock); 158 spin_lock_bh(&tcp_metrics_lock);
144 net = dev_net(dst->dev); 159 net = dev_net(dst->dev);
160
161 /* While waiting for the spin-lock the cache might have been populated
162 * with this entry and so we have to check again.
163 */
164 tm = __tcp_get_metrics(saddr, daddr, net, hash);
165 if (tm == TCP_METRICS_RECLAIM_PTR) {
166 reclaim = true;
167 tm = NULL;
168 }
169 if (tm) {
170 tcpm_check_stamp(tm, dst);
171 goto out_unlock;
172 }
173
145 if (unlikely(reclaim)) { 174 if (unlikely(reclaim)) {
146 struct tcp_metrics_block *oldest; 175 struct tcp_metrics_block *oldest;
147 176
@@ -172,17 +201,6 @@ out_unlock:
172 return tm; 201 return tm;
173} 202}
174 203
175#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
176
177static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
178{
179 if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
180 tcpm_suck_dst(tm, dst, false);
181}
182
183#define TCP_METRICS_RECLAIM_DEPTH 5
184#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
185
186static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth) 204static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
187{ 205{
188 if (tm) 206 if (tm)
@@ -295,7 +313,6 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
295 struct inetpeer_addr saddr, daddr; 313 struct inetpeer_addr saddr, daddr;
296 unsigned int hash; 314 unsigned int hash;
297 struct net *net; 315 struct net *net;
298 bool reclaim;
299 316
300 saddr.family = sk->sk_family; 317 saddr.family = sk->sk_family;
301 daddr.family = sk->sk_family; 318 daddr.family = sk->sk_family;
@@ -320,13 +337,10 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
320 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log); 337 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
321 338
322 tm = __tcp_get_metrics(&saddr, &daddr, net, hash); 339 tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
323 reclaim = false; 340 if (tm == TCP_METRICS_RECLAIM_PTR)
324 if (tm == TCP_METRICS_RECLAIM_PTR) {
325 reclaim = true;
326 tm = NULL; 341 tm = NULL;
327 }
328 if (!tm && create) 342 if (!tm && create)
329 tm = tcpm_new(dst, &saddr, &daddr, hash, reclaim); 343 tm = tcpm_new(dst, &saddr, &daddr, hash);
330 else 344 else
331 tcpm_check_stamp(tm, dst); 345 tcpm_check_stamp(tm, dst);
332 346
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 6913a82f4669..f91e107d5f88 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3233,6 +3233,22 @@ out:
3233 in6_ifa_put(ifp); 3233 in6_ifa_put(ifp);
3234} 3234}
3235 3235
3236/* ifp->idev must be at least read locked */
3237static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
3238{
3239 struct inet6_ifaddr *ifpiter;
3240 struct inet6_dev *idev = ifp->idev;
3241
3242 list_for_each_entry(ifpiter, &idev->addr_list, if_list) {
3243 if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
3244 (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
3245 IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
3246 IFA_F_PERMANENT)
3247 return false;
3248 }
3249 return true;
3250}
3251
3236static void addrconf_dad_completed(struct inet6_ifaddr *ifp) 3252static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
3237{ 3253{
3238 struct net_device *dev = ifp->idev->dev; 3254 struct net_device *dev = ifp->idev->dev;
@@ -3252,14 +3268,11 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
3252 */ 3268 */
3253 3269
3254 read_lock_bh(&ifp->idev->lock); 3270 read_lock_bh(&ifp->idev->lock);
3255 spin_lock(&ifp->lock); 3271 send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
3256 send_mld = ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL &&
3257 ifp->idev->valid_ll_addr_cnt == 1;
3258 send_rs = send_mld && 3272 send_rs = send_mld &&
3259 ipv6_accept_ra(ifp->idev) && 3273 ipv6_accept_ra(ifp->idev) &&
3260 ifp->idev->cnf.rtr_solicits > 0 && 3274 ifp->idev->cnf.rtr_solicits > 0 &&
3261 (dev->flags&IFF_LOOPBACK) == 0; 3275 (dev->flags&IFF_LOOPBACK) == 0;
3262 spin_unlock(&ifp->lock);
3263 read_unlock_bh(&ifp->idev->lock); 3276 read_unlock_bh(&ifp->idev->lock);
3264 3277
3265 /* While dad is in progress mld report's source address is in6_addrany. 3278 /* While dad is in progress mld report's source address is in6_addrany.
@@ -4598,19 +4611,6 @@ errout:
4598 rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err); 4611 rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
4599} 4612}
4600 4613
4601static void update_valid_ll_addr_cnt(struct inet6_ifaddr *ifp, int count)
4602{
4603 write_lock_bh(&ifp->idev->lock);
4604 spin_lock(&ifp->lock);
4605 if (((ifp->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|
4606 IFA_F_DADFAILED)) == IFA_F_PERMANENT) &&
4607 (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL))
4608 ifp->idev->valid_ll_addr_cnt += count;
4609 WARN_ON(ifp->idev->valid_ll_addr_cnt < 0);
4610 spin_unlock(&ifp->lock);
4611 write_unlock_bh(&ifp->idev->lock);
4612}
4613
4614static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) 4614static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
4615{ 4615{
4616 struct net *net = dev_net(ifp->idev->dev); 4616 struct net *net = dev_net(ifp->idev->dev);
@@ -4619,8 +4619,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
4619 4619
4620 switch (event) { 4620 switch (event) {
4621 case RTM_NEWADDR: 4621 case RTM_NEWADDR:
4622 update_valid_ll_addr_cnt(ifp, 1);
4623
4624 /* 4622 /*
4625 * If the address was optimistic 4623 * If the address was optimistic
4626 * we inserted the route at the start of 4624 * we inserted the route at the start of
@@ -4636,8 +4634,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
4636 ifp->idev->dev, 0, 0); 4634 ifp->idev->dev, 0, 0);
4637 break; 4635 break;
4638 case RTM_DELADDR: 4636 case RTM_DELADDR:
4639 update_valid_ll_addr_cnt(ifp, -1);
4640
4641 if (ifp->idev->cnf.forwarding) 4637 if (ifp->idev->cnf.forwarding)
4642 addrconf_leave_anycast(ifp); 4638 addrconf_leave_anycast(ifp);
4643 addrconf_leave_solict(ifp->idev, &ifp->addr); 4639 addrconf_leave_solict(ifp->idev, &ifp->addr);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index f365310bfcca..0eb4038a4d63 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -141,9 +141,12 @@ static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
141static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6, 141static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
142 struct mr6_table **mrt) 142 struct mr6_table **mrt)
143{ 143{
144 struct ip6mr_result res;
145 struct fib_lookup_arg arg = { .result = &res, };
146 int err; 144 int err;
145 struct ip6mr_result res;
146 struct fib_lookup_arg arg = {
147 .result = &res,
148 .flags = FIB_LOOKUP_NOREF,
149 };
147 150
148 err = fib_rules_lookup(net->ipv6.mr6_rules_ops, 151 err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
149 flowi6_to_flowi(flp6), 0, &arg); 152 flowi6_to_flowi(flp6), 0, &arg);
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 8eb9501e3d60..b7ebe23cdedf 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -421,8 +421,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item,
421 struct rds_ib_refill_cache *cache) 421 struct rds_ib_refill_cache *cache)
422{ 422{
423 unsigned long flags; 423 unsigned long flags;
424 struct list_head *old; 424 struct list_head *old, *chpfirst;
425 struct list_head __percpu *chpfirst;
426 425
427 local_irq_save(flags); 426 local_irq_save(flags);
428 427
@@ -432,7 +431,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item,
432 else /* put on front */ 431 else /* put on front */
433 list_add_tail(new_item, chpfirst); 432 list_add_tail(new_item, chpfirst);
434 433
435 __this_cpu_write(chpfirst, new_item); 434 __this_cpu_write(cache->percpu->first, new_item);
436 __this_cpu_inc(cache->percpu->count); 435 __this_cpu_inc(cache->percpu->count);
437 436
438 if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT) 437 if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT)
@@ -452,7 +451,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item,
452 } while (old); 451 } while (old);
453 452
454 453
455 __this_cpu_write(chpfirst, NULL); 454 __this_cpu_write(cache->percpu->first, NULL);
456 __this_cpu_write(cache->percpu->count, 0); 455 __this_cpu_write(cache->percpu->count, 0);
457end: 456end:
458 local_irq_restore(flags); 457 local_irq_restore(flags);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 6625699f497c..57b0b49f4e6e 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -234,6 +234,14 @@ static int inode_alloc_security(struct inode *inode)
234 return 0; 234 return 0;
235} 235}
236 236
237static void inode_free_rcu(struct rcu_head *head)
238{
239 struct inode_security_struct *isec;
240
241 isec = container_of(head, struct inode_security_struct, rcu);
242 kmem_cache_free(sel_inode_cache, isec);
243}
244
237static void inode_free_security(struct inode *inode) 245static void inode_free_security(struct inode *inode)
238{ 246{
239 struct inode_security_struct *isec = inode->i_security; 247 struct inode_security_struct *isec = inode->i_security;
@@ -244,8 +252,16 @@ static void inode_free_security(struct inode *inode)
244 list_del_init(&isec->list); 252 list_del_init(&isec->list);
245 spin_unlock(&sbsec->isec_lock); 253 spin_unlock(&sbsec->isec_lock);
246 254
247 inode->i_security = NULL; 255 /*
248 kmem_cache_free(sel_inode_cache, isec); 256 * The inode may still be referenced in a path walk and
257 * a call to selinux_inode_permission() can be made
258 * after inode_free_security() is called. Ideally, the VFS
259 * wouldn't do this, but fixing that is a much harder
260 * job. For now, simply free the i_security via RCU, and
261 * leave the current inode->i_security pointer intact.
262 * The inode will be freed after the RCU grace period too.
263 */
264 call_rcu(&isec->rcu, inode_free_rcu);
249} 265}
250 266
251static int file_alloc_security(struct file *file) 267static int file_alloc_security(struct file *file)
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index b1dfe1049450..078e553f52f2 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -38,7 +38,10 @@ struct task_security_struct {
38 38
39struct inode_security_struct { 39struct inode_security_struct {
40 struct inode *inode; /* back pointer to inode object */ 40 struct inode *inode; /* back pointer to inode object */
41 struct list_head list; /* list of inode_security_struct */ 41 union {
42 struct list_head list; /* list of inode_security_struct */
43 struct rcu_head rcu; /* for freeing the inode_security_struct */
44 };
42 u32 task_sid; /* SID of creating task */ 45 u32 task_sid; /* SID of creating task */
43 u32 sid; /* SID of this object */ 46 u32 sid; /* SID of this object */
44 u16 sclass; /* security class of this object */ 47 u16 sclass; /* security class of this object */