aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2016-12-02 05:13:44 -0500
committerIngo Molnar <mingo@kernel.org>2016-12-02 05:13:44 -0500
commit1b95b1a06cb27badb3e53329fb56af2a2113fd80 (patch)
treebbe52b10fde003833c980b0a30dd23b0e0855c87
parent3cded41794818d788aa1dc028ede4a1c1222d937 (diff)
parent1be5d4fa0af34fb7bafa205aeb59f5c7cc7a089d (diff)
Merge branch 'locking/urgent' into locking/core, to pick up dependent fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/tile/kernel/time.c4
-rw-r--r--arch/x86/boot/compressed/Makefile5
-rw-r--r--arch/x86/boot/cpu.c6
-rw-r--r--arch/x86/events/amd/core.c8
-rw-r--r--arch/x86/events/core.c10
-rw-r--r--arch/x86/events/intel/ds.c35
-rw-r--r--arch/x86/events/intel/uncore.c8
-rw-r--r--arch/x86/events/intel/uncore_snb.c12
-rw-r--r--arch/x86/events/perf_event.h2
-rw-r--r--arch/x86/kernel/dumpstack.c2
-rw-r--r--arch/x86/kernel/fpu/core.c16
-rw-r--r--arch/x86/kernel/head_32.S9
-rw-r--r--arch/x86/kernel/sysfb_simplefb.c39
-rw-r--r--arch/x86/kernel/unwind_guess.c8
-rw-r--r--arch/x86/mm/extable.c7
-rw-r--r--arch/x86/platform/intel-mid/device_libs/Makefile2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c (renamed from arch/x86/platform/intel-mid/device_libs/platform_wdt.c)34
-rw-r--r--crypto/algif_hash.c2
-rw-r--r--crypto/scatterwalk.c4
-rw-r--r--drivers/clk/berlin/bg2.c2
-rw-r--r--drivers/clk/berlin/bg2q.c2
-rw-r--r--drivers/clk/clk-efm32gg.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c12
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c21
-rw-r--r--drivers/thermal/intel_powerclamp.c9
-rw-r--r--fs/nfs/callback.c2
-rw-r--r--fs/nfs/nfs4_fs.h7
-rw-r--r--fs/nfs/nfs4proc.c38
-rw-r--r--fs/nfs/nfs4state.c1
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/events/core.c13
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/locking/rtmutex.c68
-rw-r--r--kernel/locking/rtmutex_common.h5
-rw-r--r--kernel/sched/auto_group.c36
-rw-r--r--lib/locking-selftest.c66
38 files changed, 367 insertions, 150 deletions
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 178989e6d3e3..ea960d660917 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -218,8 +218,8 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
218 */ 218 */
219unsigned long long sched_clock(void) 219unsigned long long sched_clock(void)
220{ 220{
221 return clocksource_cyc2ns(get_cycles(), 221 return mult_frac(get_cycles(),
222 sched_clock_mult, SCHED_CLOCK_SHIFT); 222 sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT);
223} 223}
224 224
225int setup_profiling_timer(unsigned int multiplier) 225int setup_profiling_timer(unsigned int multiplier)
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 536ccfcc01c6..34d9e15857c3 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -40,8 +40,8 @@ GCOV_PROFILE := n
40UBSAN_SANITIZE :=n 40UBSAN_SANITIZE :=n
41 41
42LDFLAGS := -m elf_$(UTS_MACHINE) 42LDFLAGS := -m elf_$(UTS_MACHINE)
43ifeq ($(CONFIG_RELOCATABLE),y) 43# Compressed kernel should be built as PIE since it may be loaded at any
44# If kernel is relocatable, build compressed kernel as PIE. 44# address by the bootloader.
45ifeq ($(CONFIG_X86_32),y) 45ifeq ($(CONFIG_X86_32),y)
46LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker) 46LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker)
47else 47else
@@ -51,7 +51,6 @@ else
51LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \ 51LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \
52 && echo "-z noreloc-overflow -pie --no-dynamic-linker") 52 && echo "-z noreloc-overflow -pie --no-dynamic-linker")
53endif 53endif
54endif
55LDFLAGS_vmlinux := -T 54LDFLAGS_vmlinux := -T
56 55
57hostprogs-y := mkpiggy 56hostprogs-y := mkpiggy
diff --git a/arch/x86/boot/cpu.c b/arch/x86/boot/cpu.c
index 26240dde081e..4224ede43b4e 100644
--- a/arch/x86/boot/cpu.c
+++ b/arch/x86/boot/cpu.c
@@ -87,6 +87,12 @@ int validate_cpu(void)
87 return -1; 87 return -1;
88 } 88 }
89 89
90 if (CONFIG_X86_MINIMUM_CPU_FAMILY <= 4 && !IS_ENABLED(CONFIG_M486) &&
91 !has_eflag(X86_EFLAGS_ID)) {
92 printf("This kernel requires a CPU with the CPUID instruction. Build with CONFIG_M486=y to run on this CPU.\n");
93 return -1;
94 }
95
90 if (err_flags) { 96 if (err_flags) {
91 puts("This kernel requires the following features " 97 puts("This kernel requires the following features "
92 "not present on the CPU:\n"); 98 "not present on the CPU:\n");
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index f5f4b3fbbbc2..afb222b63cae 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -662,7 +662,13 @@ static int __init amd_core_pmu_init(void)
662 pr_cont("Fam15h "); 662 pr_cont("Fam15h ");
663 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; 663 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
664 break; 664 break;
665 665 case 0x17:
666 pr_cont("Fam17h ");
667 /*
668 * In family 17h, there are no event constraints in the PMC hardware.
669 * We fallback to using default amd_get_event_constraints.
670 */
671 break;
666 default: 672 default:
667 pr_err("core perfctr but no constraints; unknown hardware!\n"); 673 pr_err("core perfctr but no constraints; unknown hardware!\n");
668 return -ENODEV; 674 return -ENODEV;
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index d31735f37ed7..9d4bf3ab049e 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2352,7 +2352,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
2352 frame.next_frame = 0; 2352 frame.next_frame = 0;
2353 frame.return_address = 0; 2353 frame.return_address = 0;
2354 2354
2355 if (!access_ok(VERIFY_READ, fp, 8)) 2355 if (!valid_user_frame(fp, sizeof(frame)))
2356 break; 2356 break;
2357 2357
2358 bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4); 2358 bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4);
@@ -2362,9 +2362,6 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
2362 if (bytes != 0) 2362 if (bytes != 0)
2363 break; 2363 break;
2364 2364
2365 if (!valid_user_frame(fp, sizeof(frame)))
2366 break;
2367
2368 perf_callchain_store(entry, cs_base + frame.return_address); 2365 perf_callchain_store(entry, cs_base + frame.return_address);
2369 fp = compat_ptr(ss_base + frame.next_frame); 2366 fp = compat_ptr(ss_base + frame.next_frame);
2370 } 2367 }
@@ -2413,7 +2410,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
2413 frame.next_frame = NULL; 2410 frame.next_frame = NULL;
2414 frame.return_address = 0; 2411 frame.return_address = 0;
2415 2412
2416 if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2)) 2413 if (!valid_user_frame(fp, sizeof(frame)))
2417 break; 2414 break;
2418 2415
2419 bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp)); 2416 bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
@@ -2423,9 +2420,6 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
2423 if (bytes != 0) 2420 if (bytes != 0)
2424 break; 2421 break;
2425 2422
2426 if (!valid_user_frame(fp, sizeof(frame)))
2427 break;
2428
2429 perf_callchain_store(entry, frame.return_address); 2423 perf_callchain_store(entry, frame.return_address);
2430 fp = (void __user *)frame.next_frame; 2424 fp = (void __user *)frame.next_frame;
2431 } 2425 }
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 0319311dbdbb..be202390bbd3 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1108,20 +1108,20 @@ static void setup_pebs_sample_data(struct perf_event *event,
1108 } 1108 }
1109 1109
1110 /* 1110 /*
1111 * We use the interrupt regs as a base because the PEBS record 1111 * We use the interrupt regs as a base because the PEBS record does not
1112 * does not contain a full regs set, specifically it seems to 1112 * contain a full regs set, specifically it seems to lack segment
1113 * lack segment descriptors, which get used by things like 1113 * descriptors, which get used by things like user_mode().
1114 * user_mode().
1115 * 1114 *
1116 * In the simple case fix up only the IP and BP,SP regs, for 1115 * In the simple case fix up only the IP for PERF_SAMPLE_IP.
1117 * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly. 1116 *
1118 * A possible PERF_SAMPLE_REGS will have to transfer all regs. 1117 * We must however always use BP,SP from iregs for the unwinder to stay
1118 * sane; the record BP,SP can point into thin air when the record is
1119 * from a previous PMI context or an (I)RET happend between the record
1120 * and PMI.
1119 */ 1121 */
1120 *regs = *iregs; 1122 *regs = *iregs;
1121 regs->flags = pebs->flags; 1123 regs->flags = pebs->flags;
1122 set_linear_ip(regs, pebs->ip); 1124 set_linear_ip(regs, pebs->ip);
1123 regs->bp = pebs->bp;
1124 regs->sp = pebs->sp;
1125 1125
1126 if (sample_type & PERF_SAMPLE_REGS_INTR) { 1126 if (sample_type & PERF_SAMPLE_REGS_INTR) {
1127 regs->ax = pebs->ax; 1127 regs->ax = pebs->ax;
@@ -1130,10 +1130,21 @@ static void setup_pebs_sample_data(struct perf_event *event,
1130 regs->dx = pebs->dx; 1130 regs->dx = pebs->dx;
1131 regs->si = pebs->si; 1131 regs->si = pebs->si;
1132 regs->di = pebs->di; 1132 regs->di = pebs->di;
1133 regs->bp = pebs->bp;
1134 regs->sp = pebs->sp;
1135 1133
1136 regs->flags = pebs->flags; 1134 /*
1135 * Per the above; only set BP,SP if we don't need callchains.
1136 *
1137 * XXX: does this make sense?
1138 */
1139 if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
1140 regs->bp = pebs->bp;
1141 regs->sp = pebs->sp;
1142 }
1143
1144 /*
1145 * Preserve PERF_EFLAGS_VM from set_linear_ip().
1146 */
1147 regs->flags = pebs->flags | (regs->flags & PERF_EFLAGS_VM);
1137#ifndef CONFIG_X86_32 1148#ifndef CONFIG_X86_32
1138 regs->r8 = pebs->r8; 1149 regs->r8 = pebs->r8;
1139 regs->r9 = pebs->r9; 1150 regs->r9 = pebs->r9;
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index efca2685d876..dbaaf7dc8373 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -319,9 +319,9 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
319 */ 319 */
320static int uncore_pmu_event_init(struct perf_event *event); 320static int uncore_pmu_event_init(struct perf_event *event);
321 321
322static bool is_uncore_event(struct perf_event *event) 322static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
323{ 323{
324 return event->pmu->event_init == uncore_pmu_event_init; 324 return &box->pmu->pmu == event->pmu;
325} 325}
326 326
327static int 327static int
@@ -340,7 +340,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
340 340
341 n = box->n_events; 341 n = box->n_events;
342 342
343 if (is_uncore_event(leader)) { 343 if (is_box_event(box, leader)) {
344 box->event_list[n] = leader; 344 box->event_list[n] = leader;
345 n++; 345 n++;
346 } 346 }
@@ -349,7 +349,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
349 return n; 349 return n;
350 350
351 list_for_each_entry(event, &leader->sibling_list, group_entry) { 351 list_for_each_entry(event, &leader->sibling_list, group_entry) {
352 if (!is_uncore_event(event) || 352 if (!is_box_event(box, event) ||
353 event->state <= PERF_EVENT_STATE_OFF) 353 event->state <= PERF_EVENT_STATE_OFF)
354 continue; 354 continue;
355 355
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index 81195cca7eae..a3dcc12bef4a 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -490,24 +490,12 @@ static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
490 490
491 snb_uncore_imc_event_start(event, 0); 491 snb_uncore_imc_event_start(event, 0);
492 492
493 box->n_events++;
494
495 return 0; 493 return 0;
496} 494}
497 495
498static void snb_uncore_imc_event_del(struct perf_event *event, int flags) 496static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
499{ 497{
500 struct intel_uncore_box *box = uncore_event_to_box(event);
501 int i;
502
503 snb_uncore_imc_event_stop(event, PERF_EF_UPDATE); 498 snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
504
505 for (i = 0; i < box->n_events; i++) {
506 if (event == box->event_list[i]) {
507 --box->n_events;
508 break;
509 }
510 }
511} 499}
512 500
513int snb_pci2phy_map_init(int devid) 501int snb_pci2phy_map_init(int devid)
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 5874d8de1f8d..a77ee026643d 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -113,7 +113,7 @@ struct debug_store {
113 * Per register state. 113 * Per register state.
114 */ 114 */
115struct er_account { 115struct er_account {
116 raw_spinlock_t lock; /* per-core: protect structure */ 116 raw_spinlock_t lock; /* per-core: protect structure */
117 u64 config; /* extra MSR config */ 117 u64 config; /* extra MSR config */
118 u64 reg; /* extra MSR number */ 118 u64 reg; /* extra MSR number */
119 atomic_t ref; /* reference count */ 119 atomic_t ref; /* reference count */
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 9b7cf5c28f5f..85f854b98a9d 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -112,7 +112,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
112 for (; stack < stack_info.end; stack++) { 112 for (; stack < stack_info.end; stack++) {
113 unsigned long real_addr; 113 unsigned long real_addr;
114 int reliable = 0; 114 int reliable = 0;
115 unsigned long addr = *stack; 115 unsigned long addr = READ_ONCE_NOCHECK(*stack);
116 unsigned long *ret_addr_p = 116 unsigned long *ret_addr_p =
117 unwind_get_return_address_ptr(&state); 117 unwind_get_return_address_ptr(&state);
118 118
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 47004010ad5d..ebb4e95fbd74 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -521,14 +521,14 @@ void fpu__clear(struct fpu *fpu)
521{ 521{
522 WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */ 522 WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
523 523
524 if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) { 524 fpu__drop(fpu);
525 /* FPU state will be reallocated lazily at the first use. */ 525
526 fpu__drop(fpu); 526 /*
527 } else { 527 * Make sure fpstate is cleared and initialized.
528 if (!fpu->fpstate_active) { 528 */
529 fpu__activate_curr(fpu); 529 if (static_cpu_has(X86_FEATURE_FPU)) {
530 user_fpu_begin(); 530 fpu__activate_curr(fpu);
531 } 531 user_fpu_begin();
532 copy_init_fpstate_to_fpregs(); 532 copy_init_fpstate_to_fpregs();
533 } 533 }
534} 534}
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index b6b2f0264af3..2dabea46f039 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -665,14 +665,17 @@ __PAGE_ALIGNED_BSS
665initial_pg_pmd: 665initial_pg_pmd:
666 .fill 1024*KPMDS,4,0 666 .fill 1024*KPMDS,4,0
667#else 667#else
668ENTRY(initial_page_table) 668.globl initial_page_table
669initial_page_table:
669 .fill 1024,4,0 670 .fill 1024,4,0
670#endif 671#endif
671initial_pg_fixmap: 672initial_pg_fixmap:
672 .fill 1024,4,0 673 .fill 1024,4,0
673ENTRY(empty_zero_page) 674.globl empty_zero_page
675empty_zero_page:
674 .fill 4096,1,0 676 .fill 4096,1,0
675ENTRY(swapper_pg_dir) 677.globl swapper_pg_dir
678swapper_pg_dir:
676 .fill 1024,4,0 679 .fill 1024,4,0
677EXPORT_SYMBOL(empty_zero_page) 680EXPORT_SYMBOL(empty_zero_page)
678 681
diff --git a/arch/x86/kernel/sysfb_simplefb.c b/arch/x86/kernel/sysfb_simplefb.c
index 764a29f84de7..85195d447a92 100644
--- a/arch/x86/kernel/sysfb_simplefb.c
+++ b/arch/x86/kernel/sysfb_simplefb.c
@@ -66,13 +66,36 @@ __init int create_simplefb(const struct screen_info *si,
66{ 66{
67 struct platform_device *pd; 67 struct platform_device *pd;
68 struct resource res; 68 struct resource res;
69 unsigned long len; 69 u64 base, size;
70 u32 length;
70 71
71 /* don't use lfb_size as it may contain the whole VMEM instead of only 72 /*
72 * the part that is occupied by the framebuffer */ 73 * If the 64BIT_BASE capability is set, ext_lfb_base will contain the
73 len = mode->height * mode->stride; 74 * upper half of the base address. Assemble the address, then make sure
74 len = PAGE_ALIGN(len); 75 * it is valid and we can actually access it.
75 if (len > (u64)si->lfb_size << 16) { 76 */
77 base = si->lfb_base;
78 if (si->capabilities & VIDEO_CAPABILITY_64BIT_BASE)
79 base |= (u64)si->ext_lfb_base << 32;
80 if (!base || (u64)(resource_size_t)base != base) {
81 printk(KERN_DEBUG "sysfb: inaccessible VRAM base\n");
82 return -EINVAL;
83 }
84
85 /*
86 * Don't use lfb_size as IORESOURCE size, since it may contain the
87 * entire VMEM, and thus require huge mappings. Use just the part we
88 * need, that is, the part where the framebuffer is located. But verify
89 * that it does not exceed the advertised VMEM.
90 * Note that in case of VBE, the lfb_size is shifted by 16 bits for
91 * historical reasons.
92 */
93 size = si->lfb_size;
94 if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
95 size <<= 16;
96 length = mode->height * mode->stride;
97 length = PAGE_ALIGN(length);
98 if (length > size) {
76 printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n"); 99 printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
77 return -EINVAL; 100 return -EINVAL;
78 } 101 }
@@ -81,8 +104,8 @@ __init int create_simplefb(const struct screen_info *si,
81 memset(&res, 0, sizeof(res)); 104 memset(&res, 0, sizeof(res));
82 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; 105 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
83 res.name = simplefb_resname; 106 res.name = simplefb_resname;
84 res.start = si->lfb_base; 107 res.start = base;
85 res.end = si->lfb_base + len - 1; 108 res.end = res.start + length - 1;
86 if (res.end <= res.start) 109 if (res.end <= res.start)
87 return -EINVAL; 110 return -EINVAL;
88 111
diff --git a/arch/x86/kernel/unwind_guess.c b/arch/x86/kernel/unwind_guess.c
index 2d721e533cf4..b80e8bf43cc6 100644
--- a/arch/x86/kernel/unwind_guess.c
+++ b/arch/x86/kernel/unwind_guess.c
@@ -7,11 +7,13 @@
7 7
8unsigned long unwind_get_return_address(struct unwind_state *state) 8unsigned long unwind_get_return_address(struct unwind_state *state)
9{ 9{
10 unsigned long addr = READ_ONCE_NOCHECK(*state->sp);
11
10 if (unwind_done(state)) 12 if (unwind_done(state))
11 return 0; 13 return 0;
12 14
13 return ftrace_graph_ret_addr(state->task, &state->graph_idx, 15 return ftrace_graph_ret_addr(state->task, &state->graph_idx,
14 *state->sp, state->sp); 16 addr, state->sp);
15} 17}
16EXPORT_SYMBOL_GPL(unwind_get_return_address); 18EXPORT_SYMBOL_GPL(unwind_get_return_address);
17 19
@@ -23,8 +25,10 @@ bool unwind_next_frame(struct unwind_state *state)
23 return false; 25 return false;
24 26
25 do { 27 do {
28 unsigned long addr = READ_ONCE_NOCHECK(*state->sp);
29
26 for (state->sp++; state->sp < info->end; state->sp++) 30 for (state->sp++; state->sp < info->end; state->sp++)
27 if (__kernel_text_address(*state->sp)) 31 if (__kernel_text_address(addr))
28 return true; 32 return true;
29 33
30 state->sp = info->next_sp; 34 state->sp = info->next_sp;
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 79ae939970d3..fcd06f7526de 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -135,7 +135,12 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
135 if (early_recursion_flag > 2) 135 if (early_recursion_flag > 2)
136 goto halt_loop; 136 goto halt_loop;
137 137
138 if (regs->cs != __KERNEL_CS) 138 /*
139 * Old CPUs leave the high bits of CS on the stack
140 * undefined. I'm not sure which CPUs do this, but at least
141 * the 486 DX works this way.
142 */
143 if ((regs->cs & 0xFFFF) != __KERNEL_CS)
139 goto fail; 144 goto fail;
140 145
141 /* 146 /*
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile
index 429d08be7848..dd6cfa4ad3ac 100644
--- a/arch/x86/platform/intel-mid/device_libs/Makefile
+++ b/arch/x86/platform/intel-mid/device_libs/Makefile
@@ -28,4 +28,4 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
28obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o 28obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
29# MISC Devices 29# MISC Devices
30obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o 30obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
31obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_wdt.o 31obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
index de734134bc8d..3f1f1c77d090 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * platform_wdt.c: Watchdog platform library file 2 * Intel Merrifield watchdog platform device library file
3 * 3 *
4 * (C) Copyright 2014 Intel Corporation 4 * (C) Copyright 2014 Intel Corporation
5 * Author: David Cohen <david.a.cohen@linux.intel.com> 5 * Author: David Cohen <david.a.cohen@linux.intel.com>
@@ -14,7 +14,9 @@
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/platform_data/intel-mid_wdt.h> 16#include <linux/platform_data/intel-mid_wdt.h>
17
17#include <asm/intel-mid.h> 18#include <asm/intel-mid.h>
19#include <asm/intel_scu_ipc.h>
18#include <asm/io_apic.h> 20#include <asm/io_apic.h>
19 21
20#define TANGIER_EXT_TIMER0_MSI 15 22#define TANGIER_EXT_TIMER0_MSI 15
@@ -50,14 +52,34 @@ static struct intel_mid_wdt_pdata tangier_pdata = {
50 .probe = tangier_probe, 52 .probe = tangier_probe,
51}; 53};
52 54
53static int __init register_mid_wdt(void) 55static int wdt_scu_status_change(struct notifier_block *nb,
56 unsigned long code, void *data)
54{ 57{
55 if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) { 58 if (code == SCU_DOWN) {
56 wdt_dev.dev.platform_data = &tangier_pdata; 59 platform_device_unregister(&wdt_dev);
57 return platform_device_register(&wdt_dev); 60 return 0;
58 } 61 }
59 62
60 return -ENODEV; 63 return platform_device_register(&wdt_dev);
61} 64}
62 65
66static struct notifier_block wdt_scu_notifier = {
67 .notifier_call = wdt_scu_status_change,
68};
69
70static int __init register_mid_wdt(void)
71{
72 if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
73 return -ENODEV;
74
75 wdt_dev.dev.platform_data = &tangier_pdata;
76
77 /*
78 * We need to be sure that the SCU IPC is ready before watchdog device
79 * can be registered:
80 */
81 intel_scu_notifier_add(&wdt_scu_notifier);
82
83 return 0;
84}
63rootfs_initcall(register_mid_wdt); 85rootfs_initcall(register_mid_wdt);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 05e21b464433..d19b09cdf284 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -214,7 +214,7 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
214 214
215 ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); 215 ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
216 216
217 if (!result) { 217 if (!result && !ctx->more) {
218 err = af_alg_wait_for_completion( 218 err = af_alg_wait_for_completion(
219 crypto_ahash_init(&ctx->req), 219 crypto_ahash_init(&ctx->req),
220 &ctx->completion); 220 &ctx->completion);
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 52ce17a3dd63..c16c94f88733 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -68,10 +68,6 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
68 68
69 sg = scatterwalk_ffwd(tmp, sg, start); 69 sg = scatterwalk_ffwd(tmp, sg, start);
70 70
71 if (sg_page(sg) == virt_to_page(buf) &&
72 sg->offset == offset_in_page(buf))
73 return;
74
75 scatterwalk_start(&walk, sg); 71 scatterwalk_start(&walk, sg);
76 scatterwalk_copychunks(buf, &walk, nbytes, out); 72 scatterwalk_copychunks(buf, &walk, nbytes, out);
77 scatterwalk_done(&walk, out, 0); 73 scatterwalk_done(&walk, out, 0);
diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c
index edf3b96b3b73..1d99292e2039 100644
--- a/drivers/clk/berlin/bg2.c
+++ b/drivers/clk/berlin/bg2.c
@@ -685,7 +685,7 @@ static void __init berlin2_clock_setup(struct device_node *np)
685 } 685 }
686 686
687 /* register clk-provider */ 687 /* register clk-provider */
688 of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data); 688 of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
689 689
690 return; 690 return;
691 691
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
index 0718e831475f..3b784b593afd 100644
--- a/drivers/clk/berlin/bg2q.c
+++ b/drivers/clk/berlin/bg2q.c
@@ -382,7 +382,7 @@ static void __init berlin2q_clock_setup(struct device_node *np)
382 } 382 }
383 383
384 /* register clk-provider */ 384 /* register clk-provider */
385 of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data); 385 of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
386 386
387 return; 387 return;
388 388
diff --git a/drivers/clk/clk-efm32gg.c b/drivers/clk/clk-efm32gg.c
index 8802a2dd56ac..f674778fb3ac 100644
--- a/drivers/clk/clk-efm32gg.c
+++ b/drivers/clk/clk-efm32gg.c
@@ -82,6 +82,6 @@ static void __init efm32gg_cmu_init(struct device_node *np)
82 hws[clk_HFPERCLKDAC0] = clk_hw_register_gate(NULL, "HFPERCLK.DAC0", 82 hws[clk_HFPERCLKDAC0] = clk_hw_register_gate(NULL, "HFPERCLK.DAC0",
83 "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL); 83 "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL);
84 84
85 of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data); 85 of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
86} 86}
87CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init); 87CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init);
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 79596463e0d9..4a82a49cff5e 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -191,6 +191,8 @@ static struct clk_div_table axi_div_table[] = {
191static SUNXI_CCU_DIV_TABLE(axi_clk, "axi", "cpu", 191static SUNXI_CCU_DIV_TABLE(axi_clk, "axi", "cpu",
192 0x050, 0, 3, axi_div_table, 0); 192 0x050, 0, 3, axi_div_table, 0);
193 193
194#define SUN6I_A31_AHB1_REG 0x054
195
194static const char * const ahb1_parents[] = { "osc32k", "osc24M", 196static const char * const ahb1_parents[] = { "osc32k", "osc24M",
195 "axi", "pll-periph" }; 197 "axi", "pll-periph" };
196 198
@@ -1230,6 +1232,16 @@ static void __init sun6i_a31_ccu_setup(struct device_node *node)
1230 val &= BIT(16); 1232 val &= BIT(16);
1231 writel(val, reg + SUN6I_A31_PLL_MIPI_REG); 1233 writel(val, reg + SUN6I_A31_PLL_MIPI_REG);
1232 1234
1235 /* Force AHB1 to PLL6 / 3 */
1236 val = readl(reg + SUN6I_A31_AHB1_REG);
1237 /* set PLL6 pre-div = 3 */
1238 val &= ~GENMASK(7, 6);
1239 val |= 0x2 << 6;
1240 /* select PLL6 / pre-div */
1241 val &= ~GENMASK(13, 12);
1242 val |= 0x3 << 12;
1243 writel(val, reg + SUN6I_A31_AHB1_REG);
1244
1233 sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc); 1245 sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc);
1234 1246
1235 ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk, 1247 ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk,
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 838b22aa8b67..f2c9274b8bd5 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -373,7 +373,7 @@ static void sun4i_get_apb1_factors(struct factors_request *req)
373 else 373 else
374 calcp = 3; 374 calcp = 3;
375 375
376 calcm = (req->parent_rate >> calcp) - 1; 376 calcm = (div >> calcp) - 1;
377 377
378 req->rate = (req->parent_rate >> calcp) / (calcm + 1); 378 req->rate = (req->parent_rate >> calcp) / (calcm + 1);
379 req->m = calcm; 379 req->m = calcm;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 8aa769a2d919..91b70bc46e7f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -4010,7 +4010,10 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
4010 SAM_STAT_CHECK_CONDITION; 4010 SAM_STAT_CHECK_CONDITION;
4011} 4011}
4012 4012
4013 4013static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
4014{
4015 return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
4016}
4014 4017
4015/** 4018/**
4016 * scsih_qcmd - main scsi request entry point 4019 * scsih_qcmd - main scsi request entry point
@@ -4038,6 +4041,13 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4038 if (ioc->logging_level & MPT_DEBUG_SCSI) 4041 if (ioc->logging_level & MPT_DEBUG_SCSI)
4039 scsi_print_command(scmd); 4042 scsi_print_command(scmd);
4040 4043
4044 /*
4045 * Lock the device for any subsequent command until command is
4046 * done.
4047 */
4048 if (ata_12_16_cmd(scmd))
4049 scsi_internal_device_block(scmd->device);
4050
4041 sas_device_priv_data = scmd->device->hostdata; 4051 sas_device_priv_data = scmd->device->hostdata;
4042 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 4052 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
4043 scmd->result = DID_NO_CONNECT << 16; 4053 scmd->result = DID_NO_CONNECT << 16;
@@ -4613,6 +4623,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4613 if (scmd == NULL) 4623 if (scmd == NULL)
4614 return 1; 4624 return 1;
4615 4625
4626 if (ata_12_16_cmd(scmd))
4627 scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
4628
4616 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4629 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4617 4630
4618 if (mpi_reply == NULL) { 4631 if (mpi_reply == NULL) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 567fa080e261..56d6142852a5 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1456,15 +1456,20 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1456 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 1456 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
1457 sp = req->outstanding_cmds[cnt]; 1457 sp = req->outstanding_cmds[cnt];
1458 if (sp) { 1458 if (sp) {
1459 /* Get a reference to the sp and drop the lock. 1459 /* Don't abort commands in adapter during EEH
1460 * The reference ensures this sp->done() call 1460 * recovery as it's not accessible/responding.
1461 * - and not the call in qla2xxx_eh_abort() -
1462 * ends the SCSI command (with result 'res').
1463 */ 1461 */
1464 sp_get(sp); 1462 if (!ha->flags.eeh_busy) {
1465 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1463 /* Get a reference to the sp and drop the lock.
1466 qla2xxx_eh_abort(GET_CMD_SP(sp)); 1464 * The reference ensures this sp->done() call
1467 spin_lock_irqsave(&ha->hardware_lock, flags); 1465 * - and not the call in qla2xxx_eh_abort() -
1466 * ends the SCSI command (with result 'res').
1467 */
1468 sp_get(sp);
1469 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1470 qla2xxx_eh_abort(GET_CMD_SP(sp));
1471 spin_lock_irqsave(&ha->hardware_lock, flags);
1472 }
1468 req->outstanding_cmds[cnt] = NULL; 1473 req->outstanding_cmds[cnt] = NULL;
1469 sp->done(vha, sp, res); 1474 sp->done(vha, sp, res);
1470 } 1475 }
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 7a223074df3d..afada655f861 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -669,9 +669,16 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
669 .set_cur_state = powerclamp_set_cur_state, 669 .set_cur_state = powerclamp_set_cur_state,
670}; 670};
671 671
672static const struct x86_cpu_id __initconst intel_powerclamp_ids[] = {
673 { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
674 {}
675};
676MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
677
672static int __init powerclamp_probe(void) 678static int __init powerclamp_probe(void)
673{ 679{
674 if (!boot_cpu_has(X86_FEATURE_MWAIT)) { 680
681 if (!x86_match_cpu(intel_powerclamp_ids)) {
675 pr_err("CPU does not support MWAIT"); 682 pr_err("CPU does not support MWAIT");
676 return -ENODEV; 683 return -ENODEV;
677 } 684 }
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 532d8e242d4d..484bebc20bca 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -197,7 +197,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
197 } 197 }
198 198
199 ret = -EPROTONOSUPPORT; 199 ret = -EPROTONOSUPPORT;
200 if (minorversion == 0) 200 if (!IS_ENABLED(CONFIG_NFS_V4_1) || minorversion == 0)
201 ret = nfs4_callback_up_net(serv, net); 201 ret = nfs4_callback_up_net(serv, net);
202 else if (xprt->ops->bc_up) 202 else if (xprt->ops->bc_up)
203 ret = xprt->ops->bc_up(serv, net); 203 ret = xprt->ops->bc_up(serv, net);
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 9b3a82abab07..1452177c822d 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -542,6 +542,13 @@ static inline bool nfs4_valid_open_stateid(const struct nfs4_state *state)
542 return test_bit(NFS_STATE_RECOVERY_FAILED, &state->flags) == 0; 542 return test_bit(NFS_STATE_RECOVERY_FAILED, &state->flags) == 0;
543} 543}
544 544
545static inline bool nfs4_state_match_open_stateid_other(const struct nfs4_state *state,
546 const nfs4_stateid *stateid)
547{
548 return test_bit(NFS_OPEN_STATE, &state->flags) &&
549 nfs4_stateid_match_other(&state->open_stateid, stateid);
550}
551
545#else 552#else
546 553
547#define nfs4_close_state(a, b) do { } while (0) 554#define nfs4_close_state(a, b) do { } while (0)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 7897826d7c51..241da19b7da4 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1451,7 +1451,6 @@ static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1451} 1451}
1452 1452
1453static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1453static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1454 nfs4_stateid *arg_stateid,
1455 nfs4_stateid *stateid, fmode_t fmode) 1454 nfs4_stateid *stateid, fmode_t fmode)
1456{ 1455{
1457 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1456 clear_bit(NFS_O_RDWR_STATE, &state->flags);
@@ -1469,10 +1468,9 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1469 } 1468 }
1470 if (stateid == NULL) 1469 if (stateid == NULL)
1471 return; 1470 return;
1472 /* Handle races with OPEN */ 1471 /* Handle OPEN+OPEN_DOWNGRADE races */
1473 if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) || 1472 if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
1474 (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1473 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
1475 !nfs4_stateid_is_newer(stateid, &state->open_stateid))) {
1476 nfs_resync_open_stateid_locked(state); 1474 nfs_resync_open_stateid_locked(state);
1477 return; 1475 return;
1478 } 1476 }
@@ -1486,7 +1484,9 @@ static void nfs_clear_open_stateid(struct nfs4_state *state,
1486 nfs4_stateid *stateid, fmode_t fmode) 1484 nfs4_stateid *stateid, fmode_t fmode)
1487{ 1485{
1488 write_seqlock(&state->seqlock); 1486 write_seqlock(&state->seqlock);
1489 nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode); 1487 /* Ignore, if the CLOSE argment doesn't match the current stateid */
1488 if (nfs4_state_match_open_stateid_other(state, arg_stateid))
1489 nfs_clear_open_stateid_locked(state, stateid, fmode);
1490 write_sequnlock(&state->seqlock); 1490 write_sequnlock(&state->seqlock);
1491 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1491 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1492 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1492 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
@@ -2564,15 +2564,23 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2564static int nfs41_check_expired_locks(struct nfs4_state *state) 2564static int nfs41_check_expired_locks(struct nfs4_state *state)
2565{ 2565{
2566 int status, ret = NFS_OK; 2566 int status, ret = NFS_OK;
2567 struct nfs4_lock_state *lsp; 2567 struct nfs4_lock_state *lsp, *prev = NULL;
2568 struct nfs_server *server = NFS_SERVER(state->inode); 2568 struct nfs_server *server = NFS_SERVER(state->inode);
2569 2569
2570 if (!test_bit(LK_STATE_IN_USE, &state->flags)) 2570 if (!test_bit(LK_STATE_IN_USE, &state->flags))
2571 goto out; 2571 goto out;
2572
2573 spin_lock(&state->state_lock);
2572 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 2574 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
2573 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 2575 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
2574 struct rpc_cred *cred = lsp->ls_state->owner->so_cred; 2576 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
2575 2577
2578 atomic_inc(&lsp->ls_count);
2579 spin_unlock(&state->state_lock);
2580
2581 nfs4_put_lock_state(prev);
2582 prev = lsp;
2583
2576 status = nfs41_test_and_free_expired_stateid(server, 2584 status = nfs41_test_and_free_expired_stateid(server,
2577 &lsp->ls_stateid, 2585 &lsp->ls_stateid,
2578 cred); 2586 cred);
@@ -2585,10 +2593,14 @@ static int nfs41_check_expired_locks(struct nfs4_state *state)
2585 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 2593 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
2586 } else if (status != NFS_OK) { 2594 } else if (status != NFS_OK) {
2587 ret = status; 2595 ret = status;
2588 break; 2596 nfs4_put_lock_state(prev);
2597 goto out;
2589 } 2598 }
2599 spin_lock(&state->state_lock);
2590 } 2600 }
2591 }; 2601 }
2602 spin_unlock(&state->state_lock);
2603 nfs4_put_lock_state(prev);
2592out: 2604out:
2593 return ret; 2605 return ret;
2594} 2606}
@@ -3122,7 +3134,8 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
3122 } else if (is_rdwr) 3134 } else if (is_rdwr)
3123 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; 3135 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
3124 3136
3125 if (!nfs4_valid_open_stateid(state)) 3137 if (!nfs4_valid_open_stateid(state) ||
3138 test_bit(NFS_OPEN_STATE, &state->flags) == 0)
3126 call_close = 0; 3139 call_close = 0;
3127 spin_unlock(&state->owner->so_lock); 3140 spin_unlock(&state->owner->so_lock);
3128 3141
@@ -5569,6 +5582,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
5569 switch (task->tk_status) { 5582 switch (task->tk_status) {
5570 case 0: 5583 case 0:
5571 renew_lease(data->res.server, data->timestamp); 5584 renew_lease(data->res.server, data->timestamp);
5585 break;
5572 case -NFS4ERR_ADMIN_REVOKED: 5586 case -NFS4ERR_ADMIN_REVOKED:
5573 case -NFS4ERR_DELEG_REVOKED: 5587 case -NFS4ERR_DELEG_REVOKED:
5574 case -NFS4ERR_EXPIRED: 5588 case -NFS4ERR_EXPIRED:
@@ -5579,8 +5593,6 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
5579 case -NFS4ERR_OLD_STATEID: 5593 case -NFS4ERR_OLD_STATEID:
5580 case -NFS4ERR_STALE_STATEID: 5594 case -NFS4ERR_STALE_STATEID:
5581 task->tk_status = 0; 5595 task->tk_status = 0;
5582 if (data->roc)
5583 pnfs_roc_set_barrier(data->inode, data->roc_barrier);
5584 break; 5596 break;
5585 default: 5597 default:
5586 if (nfs4_async_handle_error(task, data->res.server, 5598 if (nfs4_async_handle_error(task, data->res.server,
@@ -5590,6 +5602,8 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
5590 } 5602 }
5591 } 5603 }
5592 data->rpc_status = task->tk_status; 5604 data->rpc_status = task->tk_status;
5605 if (data->roc && data->rpc_status == 0)
5606 pnfs_roc_set_barrier(data->inode, data->roc_barrier);
5593} 5607}
5594 5608
5595static void nfs4_delegreturn_release(void *calldata) 5609static void nfs4_delegreturn_release(void *calldata)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 5f4281ec5f72..0959c9661662 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1547,6 +1547,7 @@ restart:
1547 ssleep(1); 1547 ssleep(1);
1548 case -NFS4ERR_ADMIN_REVOKED: 1548 case -NFS4ERR_ADMIN_REVOKED:
1549 case -NFS4ERR_STALE_STATEID: 1549 case -NFS4ERR_STALE_STATEID:
1550 case -NFS4ERR_OLD_STATEID:
1550 case -NFS4ERR_BAD_STATEID: 1551 case -NFS4ERR_BAD_STATEID:
1551 case -NFS4ERR_RECLAIM_BAD: 1552 case -NFS4ERR_RECLAIM_BAD:
1552 case -NFS4ERR_RECLAIM_CONFLICT: 1553 case -NFS4ERR_RECLAIM_CONFLICT:
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 37261afbf16a..8863bdf582d5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2571,6 +2571,7 @@ extern void sched_autogroup_create_attach(struct task_struct *p);
2571extern void sched_autogroup_detach(struct task_struct *p); 2571extern void sched_autogroup_detach(struct task_struct *p);
2572extern void sched_autogroup_fork(struct signal_struct *sig); 2572extern void sched_autogroup_fork(struct signal_struct *sig);
2573extern void sched_autogroup_exit(struct signal_struct *sig); 2573extern void sched_autogroup_exit(struct signal_struct *sig);
2574extern void sched_autogroup_exit_task(struct task_struct *p);
2574#ifdef CONFIG_PROC_FS 2575#ifdef CONFIG_PROC_FS
2575extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); 2576extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2576extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); 2577extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
@@ -2580,6 +2581,7 @@ static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2580static inline void sched_autogroup_detach(struct task_struct *p) { } 2581static inline void sched_autogroup_detach(struct task_struct *p) { }
2581static inline void sched_autogroup_fork(struct signal_struct *sig) { } 2582static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2582static inline void sched_autogroup_exit(struct signal_struct *sig) { } 2583static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2584static inline void sched_autogroup_exit_task(struct task_struct *p) { }
2583#endif 2585#endif
2584 2586
2585extern int yield_to(struct task_struct *p, bool preempt); 2587extern int yield_to(struct task_struct *p, bool preempt);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0e292132efac..6ee1febdf6ff 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -902,6 +902,17 @@ list_update_cgroup_event(struct perf_event *event,
902 * this will always be called from the right CPU. 902 * this will always be called from the right CPU.
903 */ 903 */
904 cpuctx = __get_cpu_context(ctx); 904 cpuctx = __get_cpu_context(ctx);
905
906 /* Only set/clear cpuctx->cgrp if current task uses event->cgrp. */
907 if (perf_cgroup_from_task(current, ctx) != event->cgrp) {
908 /*
909 * We are removing the last cpu event in this context.
910 * If that event is not active in this cpu, cpuctx->cgrp
911 * should've been cleared by perf_cgroup_switch.
912 */
913 WARN_ON_ONCE(!add && cpuctx->cgrp);
914 return;
915 }
905 cpuctx->cgrp = add ? event->cgrp : NULL; 916 cpuctx->cgrp = add ? event->cgrp : NULL;
906} 917}
907 918
@@ -8018,6 +8029,7 @@ restart:
8018 * if <size> is not specified, the range is treated as a single address. 8029 * if <size> is not specified, the range is treated as a single address.
8019 */ 8030 */
8020enum { 8031enum {
8032 IF_ACT_NONE = -1,
8021 IF_ACT_FILTER, 8033 IF_ACT_FILTER,
8022 IF_ACT_START, 8034 IF_ACT_START,
8023 IF_ACT_STOP, 8035 IF_ACT_STOP,
@@ -8041,6 +8053,7 @@ static const match_table_t if_tokens = {
8041 { IF_SRC_KERNEL, "%u/%u" }, 8053 { IF_SRC_KERNEL, "%u/%u" },
8042 { IF_SRC_FILEADDR, "%u@%s" }, 8054 { IF_SRC_FILEADDR, "%u@%s" },
8043 { IF_SRC_KERNELADDR, "%u" }, 8055 { IF_SRC_KERNELADDR, "%u" },
8056 { IF_ACT_NONE, NULL },
8044}; 8057};
8045 8058
8046/* 8059/*
diff --git a/kernel/exit.c b/kernel/exit.c
index 9d68c45ebbe3..3076f3089919 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -836,6 +836,7 @@ void __noreturn do_exit(long code)
836 */ 836 */
837 perf_event_exit_task(tsk); 837 perf_event_exit_task(tsk);
838 838
839 sched_autogroup_exit_task(tsk);
839 cgroup_exit(tsk); 840 cgroup_exit(tsk);
840 841
841 /* 842 /*
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index d8d7a153b711..6e6cab7ac12f 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -65,8 +65,72 @@ static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
65 65
66static void fixup_rt_mutex_waiters(struct rt_mutex *lock) 66static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
67{ 67{
68 if (!rt_mutex_has_waiters(lock)) 68 unsigned long owner, *p = (unsigned long *) &lock->owner;
69 clear_rt_mutex_waiters(lock); 69
70 if (rt_mutex_has_waiters(lock))
71 return;
72
73 /*
74 * The rbtree has no waiters enqueued, now make sure that the
75 * lock->owner still has the waiters bit set, otherwise the
76 * following can happen:
77 *
78 * CPU 0 CPU 1 CPU2
79 * l->owner=T1
80 * rt_mutex_lock(l)
81 * lock(l->lock)
82 * l->owner = T1 | HAS_WAITERS;
83 * enqueue(T2)
84 * boost()
85 * unlock(l->lock)
86 * block()
87 *
88 * rt_mutex_lock(l)
89 * lock(l->lock)
90 * l->owner = T1 | HAS_WAITERS;
91 * enqueue(T3)
92 * boost()
93 * unlock(l->lock)
94 * block()
95 * signal(->T2) signal(->T3)
96 * lock(l->lock)
97 * dequeue(T2)
98 * deboost()
99 * unlock(l->lock)
100 * lock(l->lock)
101 * dequeue(T3)
102 * ==> wait list is empty
103 * deboost()
104 * unlock(l->lock)
105 * lock(l->lock)
106 * fixup_rt_mutex_waiters()
107 * if (wait_list_empty(l) {
108 * l->owner = owner
109 * owner = l->owner & ~HAS_WAITERS;
110 * ==> l->owner = T1
111 * }
112 * lock(l->lock)
113 * rt_mutex_unlock(l) fixup_rt_mutex_waiters()
114 * if (wait_list_empty(l) {
115 * owner = l->owner & ~HAS_WAITERS;
116 * cmpxchg(l->owner, T1, NULL)
117 * ===> Success (l->owner = NULL)
118 *
119 * l->owner = owner
120 * ==> l->owner = T1
121 * }
122 *
123 * With the check for the waiter bit in place T3 on CPU2 will not
124 * overwrite. All tasks fiddling with the waiters bit are
125 * serialized by l->lock, so nothing else can modify the waiters
126 * bit. If the bit is set then nothing can change l->owner either
127 * so the simple RMW is safe. The cmpxchg() will simply fail if it
128 * happens in the middle of the RMW because the waiters bit is
129 * still set.
130 */
131 owner = READ_ONCE(*p);
132 if (owner & RT_MUTEX_HAS_WAITERS)
133 WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
70} 134}
71 135
72/* 136/*
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index 4f5f83c7d2d3..e317e1cbb3eb 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -75,8 +75,9 @@ task_top_pi_waiter(struct task_struct *p)
75 75
76static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) 76static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
77{ 77{
78 return (struct task_struct *) 78 unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
79 ((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL); 79
80 return (struct task_struct *) (owner & ~RT_MUTEX_OWNER_MASKALL);
80} 81}
81 82
82/* 83/*
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
index a5d966cb8891..f1c8fd566246 100644
--- a/kernel/sched/auto_group.c
+++ b/kernel/sched/auto_group.c
@@ -111,10 +111,13 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
111{ 111{
112 if (tg != &root_task_group) 112 if (tg != &root_task_group)
113 return false; 113 return false;
114
115 /* 114 /*
116 * We can only assume the task group can't go away on us if 115 * If we race with autogroup_move_group() the caller can use the old
117 * autogroup_move_group() can see us on ->thread_group list. 116 * value of signal->autogroup but in this case sched_move_task() will
117 * be called again before autogroup_kref_put().
118 *
119 * However, there is no way sched_autogroup_exit_task() could tell us
120 * to avoid autogroup->tg, so we abuse PF_EXITING flag for this case.
118 */ 121 */
119 if (p->flags & PF_EXITING) 122 if (p->flags & PF_EXITING)
120 return false; 123 return false;
@@ -122,6 +125,16 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
122 return true; 125 return true;
123} 126}
124 127
128void sched_autogroup_exit_task(struct task_struct *p)
129{
130 /*
131 * We are going to call exit_notify() and autogroup_move_group() can't
132 * see this thread after that: we can no longer use signal->autogroup.
133 * See the PF_EXITING check in task_wants_autogroup().
134 */
135 sched_move_task(p);
136}
137
125static void 138static void
126autogroup_move_group(struct task_struct *p, struct autogroup *ag) 139autogroup_move_group(struct task_struct *p, struct autogroup *ag)
127{ 140{
@@ -138,13 +151,20 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
138 } 151 }
139 152
140 p->signal->autogroup = autogroup_kref_get(ag); 153 p->signal->autogroup = autogroup_kref_get(ag);
141 154 /*
142 if (!READ_ONCE(sysctl_sched_autogroup_enabled)) 155 * We can't avoid sched_move_task() after we changed signal->autogroup,
143 goto out; 156 * this process can already run with task_group() == prev->tg or we can
144 157 * race with cgroup code which can read autogroup = prev under rq->lock.
158 * In the latter case for_each_thread() can not miss a migrating thread,
159 * cpu_cgroup_attach() must not be possible after cgroup_exit() and it
160 * can't be removed from thread list, we hold ->siglock.
161 *
162 * If an exiting thread was already removed from thread list we rely on
163 * sched_autogroup_exit_task().
164 */
145 for_each_thread(p, t) 165 for_each_thread(p, t)
146 sched_move_task(t); 166 sched_move_task(t);
147out: 167
148 unlock_task_sighand(p, &flags); 168 unlock_task_sighand(p, &flags);
149 autogroup_kref_put(prev); 169 autogroup_kref_put(prev);
150} 170}
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 872a15a2a637..f3a217ea0388 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -980,23 +980,23 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
980#ifndef CONFIG_PROVE_LOCKING 980#ifndef CONFIG_PROVE_LOCKING
981 if (expected == FAILURE && debug_locks) { 981 if (expected == FAILURE && debug_locks) {
982 expected_testcase_failures++; 982 expected_testcase_failures++;
983 printk("failed|"); 983 pr_cont("failed|");
984 } 984 }
985 else 985 else
986#endif 986#endif
987 if (debug_locks != expected) { 987 if (debug_locks != expected) {
988 unexpected_testcase_failures++; 988 unexpected_testcase_failures++;
989 printk("FAILED|"); 989 pr_cont("FAILED|");
990 990
991 dump_stack(); 991 dump_stack();
992 } else { 992 } else {
993 testcase_successes++; 993 testcase_successes++;
994 printk(" ok |"); 994 pr_cont(" ok |");
995 } 995 }
996 testcase_total++; 996 testcase_total++;
997 997
998 if (debug_locks_verbose) 998 if (debug_locks_verbose)
999 printk(" lockclass mask: %x, debug_locks: %d, expected: %d\n", 999 pr_cont(" lockclass mask: %x, debug_locks: %d, expected: %d\n",
1000 lockclass_mask, debug_locks, expected); 1000 lockclass_mask, debug_locks, expected);
1001 /* 1001 /*
1002 * Some tests (e.g. double-unlock) might corrupt the preemption 1002 * Some tests (e.g. double-unlock) might corrupt the preemption
@@ -1021,26 +1021,26 @@ static inline void print_testname(const char *testname)
1021#define DO_TESTCASE_1(desc, name, nr) \ 1021#define DO_TESTCASE_1(desc, name, nr) \
1022 print_testname(desc"/"#nr); \ 1022 print_testname(desc"/"#nr); \
1023 dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ 1023 dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
1024 printk("\n"); 1024 pr_cont("\n");
1025 1025
1026#define DO_TESTCASE_1B(desc, name, nr) \ 1026#define DO_TESTCASE_1B(desc, name, nr) \
1027 print_testname(desc"/"#nr); \ 1027 print_testname(desc"/"#nr); \
1028 dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK); \ 1028 dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK); \
1029 printk("\n"); 1029 pr_cont("\n");
1030 1030
1031#define DO_TESTCASE_3(desc, name, nr) \ 1031#define DO_TESTCASE_3(desc, name, nr) \
1032 print_testname(desc"/"#nr); \ 1032 print_testname(desc"/"#nr); \
1033 dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN); \ 1033 dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN); \
1034 dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \ 1034 dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \
1035 dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ 1035 dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
1036 printk("\n"); 1036 pr_cont("\n");
1037 1037
1038#define DO_TESTCASE_3RW(desc, name, nr) \ 1038#define DO_TESTCASE_3RW(desc, name, nr) \
1039 print_testname(desc"/"#nr); \ 1039 print_testname(desc"/"#nr); \
1040 dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN|LOCKTYPE_RWLOCK);\ 1040 dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN|LOCKTYPE_RWLOCK);\
1041 dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \ 1041 dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \
1042 dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ 1042 dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
1043 printk("\n"); 1043 pr_cont("\n");
1044 1044
1045#define DO_TESTCASE_6(desc, name) \ 1045#define DO_TESTCASE_6(desc, name) \
1046 print_testname(desc); \ 1046 print_testname(desc); \
@@ -1050,7 +1050,7 @@ static inline void print_testname(const char *testname)
1050 dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \ 1050 dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \
1051 dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \ 1051 dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \
1052 dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \ 1052 dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \
1053 printk("\n"); 1053 pr_cont("\n");
1054 1054
1055#define DO_TESTCASE_6_SUCCESS(desc, name) \ 1055#define DO_TESTCASE_6_SUCCESS(desc, name) \
1056 print_testname(desc); \ 1056 print_testname(desc); \
@@ -1060,7 +1060,7 @@ static inline void print_testname(const char *testname)
1060 dotest(name##_mutex, SUCCESS, LOCKTYPE_MUTEX); \ 1060 dotest(name##_mutex, SUCCESS, LOCKTYPE_MUTEX); \
1061 dotest(name##_wsem, SUCCESS, LOCKTYPE_RWSEM); \ 1061 dotest(name##_wsem, SUCCESS, LOCKTYPE_RWSEM); \
1062 dotest(name##_rsem, SUCCESS, LOCKTYPE_RWSEM); \ 1062 dotest(name##_rsem, SUCCESS, LOCKTYPE_RWSEM); \
1063 printk("\n"); 1063 pr_cont("\n");
1064 1064
1065/* 1065/*
1066 * 'read' variant: rlocks must not trigger. 1066 * 'read' variant: rlocks must not trigger.
@@ -1073,7 +1073,7 @@ static inline void print_testname(const char *testname)
1073 dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \ 1073 dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \
1074 dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \ 1074 dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \
1075 dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \ 1075 dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \
1076 printk("\n"); 1076 pr_cont("\n");
1077 1077
1078#define DO_TESTCASE_2I(desc, name, nr) \ 1078#define DO_TESTCASE_2I(desc, name, nr) \
1079 DO_TESTCASE_1("hard-"desc, name##_hard, nr); \ 1079 DO_TESTCASE_1("hard-"desc, name##_hard, nr); \
@@ -1726,25 +1726,25 @@ static void ww_tests(void)
1726 dotest(ww_test_fail_acquire, SUCCESS, LOCKTYPE_WW); 1726 dotest(ww_test_fail_acquire, SUCCESS, LOCKTYPE_WW);
1727 dotest(ww_test_normal, SUCCESS, LOCKTYPE_WW); 1727 dotest(ww_test_normal, SUCCESS, LOCKTYPE_WW);
1728 dotest(ww_test_unneeded_slow, FAILURE, LOCKTYPE_WW); 1728 dotest(ww_test_unneeded_slow, FAILURE, LOCKTYPE_WW);
1729 printk("\n"); 1729 pr_cont("\n");
1730 1730
1731 print_testname("ww contexts mixing"); 1731 print_testname("ww contexts mixing");
1732 dotest(ww_test_two_contexts, FAILURE, LOCKTYPE_WW); 1732 dotest(ww_test_two_contexts, FAILURE, LOCKTYPE_WW);
1733 dotest(ww_test_diff_class, FAILURE, LOCKTYPE_WW); 1733 dotest(ww_test_diff_class, FAILURE, LOCKTYPE_WW);
1734 printk("\n"); 1734 pr_cont("\n");
1735 1735
1736 print_testname("finishing ww context"); 1736 print_testname("finishing ww context");
1737 dotest(ww_test_context_done_twice, FAILURE, LOCKTYPE_WW); 1737 dotest(ww_test_context_done_twice, FAILURE, LOCKTYPE_WW);
1738 dotest(ww_test_context_unlock_twice, FAILURE, LOCKTYPE_WW); 1738 dotest(ww_test_context_unlock_twice, FAILURE, LOCKTYPE_WW);
1739 dotest(ww_test_context_fini_early, FAILURE, LOCKTYPE_WW); 1739 dotest(ww_test_context_fini_early, FAILURE, LOCKTYPE_WW);
1740 dotest(ww_test_context_lock_after_done, FAILURE, LOCKTYPE_WW); 1740 dotest(ww_test_context_lock_after_done, FAILURE, LOCKTYPE_WW);
1741 printk("\n"); 1741 pr_cont("\n");
1742 1742
1743 print_testname("locking mismatches"); 1743 print_testname("locking mismatches");
1744 dotest(ww_test_object_unlock_twice, FAILURE, LOCKTYPE_WW); 1744 dotest(ww_test_object_unlock_twice, FAILURE, LOCKTYPE_WW);
1745 dotest(ww_test_object_lock_unbalanced, FAILURE, LOCKTYPE_WW); 1745 dotest(ww_test_object_lock_unbalanced, FAILURE, LOCKTYPE_WW);
1746 dotest(ww_test_object_lock_stale_context, FAILURE, LOCKTYPE_WW); 1746 dotest(ww_test_object_lock_stale_context, FAILURE, LOCKTYPE_WW);
1747 printk("\n"); 1747 pr_cont("\n");
1748 1748
1749 print_testname("EDEADLK handling"); 1749 print_testname("EDEADLK handling");
1750 dotest(ww_test_edeadlk_normal, SUCCESS, LOCKTYPE_WW); 1750 dotest(ww_test_edeadlk_normal, SUCCESS, LOCKTYPE_WW);
@@ -1757,11 +1757,11 @@ static void ww_tests(void)
1757 dotest(ww_test_edeadlk_acquire_more_edeadlk_slow, FAILURE, LOCKTYPE_WW); 1757 dotest(ww_test_edeadlk_acquire_more_edeadlk_slow, FAILURE, LOCKTYPE_WW);
1758 dotest(ww_test_edeadlk_acquire_wrong, FAILURE, LOCKTYPE_WW); 1758 dotest(ww_test_edeadlk_acquire_wrong, FAILURE, LOCKTYPE_WW);
1759 dotest(ww_test_edeadlk_acquire_wrong_slow, FAILURE, LOCKTYPE_WW); 1759 dotest(ww_test_edeadlk_acquire_wrong_slow, FAILURE, LOCKTYPE_WW);
1760 printk("\n"); 1760 pr_cont("\n");
1761 1761
1762 print_testname("spinlock nest unlocked"); 1762 print_testname("spinlock nest unlocked");
1763 dotest(ww_test_spin_nest_unlocked, FAILURE, LOCKTYPE_WW); 1763 dotest(ww_test_spin_nest_unlocked, FAILURE, LOCKTYPE_WW);
1764 printk("\n"); 1764 pr_cont("\n");
1765 1765
1766 printk(" -----------------------------------------------------\n"); 1766 printk(" -----------------------------------------------------\n");
1767 printk(" |block | try |context|\n"); 1767 printk(" |block | try |context|\n");
@@ -1771,25 +1771,25 @@ static void ww_tests(void)
1771 dotest(ww_test_context_block, FAILURE, LOCKTYPE_WW); 1771 dotest(ww_test_context_block, FAILURE, LOCKTYPE_WW);
1772 dotest(ww_test_context_try, SUCCESS, LOCKTYPE_WW); 1772 dotest(ww_test_context_try, SUCCESS, LOCKTYPE_WW);
1773 dotest(ww_test_context_context, SUCCESS, LOCKTYPE_WW); 1773 dotest(ww_test_context_context, SUCCESS, LOCKTYPE_WW);
1774 printk("\n"); 1774 pr_cont("\n");
1775 1775
1776 print_testname("try"); 1776 print_testname("try");
1777 dotest(ww_test_try_block, FAILURE, LOCKTYPE_WW); 1777 dotest(ww_test_try_block, FAILURE, LOCKTYPE_WW);
1778 dotest(ww_test_try_try, SUCCESS, LOCKTYPE_WW); 1778 dotest(ww_test_try_try, SUCCESS, LOCKTYPE_WW);
1779 dotest(ww_test_try_context, FAILURE, LOCKTYPE_WW); 1779 dotest(ww_test_try_context, FAILURE, LOCKTYPE_WW);
1780 printk("\n"); 1780 pr_cont("\n");
1781 1781
1782 print_testname("block"); 1782 print_testname("block");
1783 dotest(ww_test_block_block, FAILURE, LOCKTYPE_WW); 1783 dotest(ww_test_block_block, FAILURE, LOCKTYPE_WW);
1784 dotest(ww_test_block_try, SUCCESS, LOCKTYPE_WW); 1784 dotest(ww_test_block_try, SUCCESS, LOCKTYPE_WW);
1785 dotest(ww_test_block_context, FAILURE, LOCKTYPE_WW); 1785 dotest(ww_test_block_context, FAILURE, LOCKTYPE_WW);
1786 printk("\n"); 1786 pr_cont("\n");
1787 1787
1788 print_testname("spinlock"); 1788 print_testname("spinlock");
1789 dotest(ww_test_spin_block, FAILURE, LOCKTYPE_WW); 1789 dotest(ww_test_spin_block, FAILURE, LOCKTYPE_WW);
1790 dotest(ww_test_spin_try, SUCCESS, LOCKTYPE_WW); 1790 dotest(ww_test_spin_try, SUCCESS, LOCKTYPE_WW);
1791 dotest(ww_test_spin_context, FAILURE, LOCKTYPE_WW); 1791 dotest(ww_test_spin_context, FAILURE, LOCKTYPE_WW);
1792 printk("\n"); 1792 pr_cont("\n");
1793} 1793}
1794 1794
1795void locking_selftest(void) 1795void locking_selftest(void)
@@ -1829,32 +1829,32 @@ void locking_selftest(void)
1829 1829
1830 printk(" --------------------------------------------------------------------------\n"); 1830 printk(" --------------------------------------------------------------------------\n");
1831 print_testname("recursive read-lock"); 1831 print_testname("recursive read-lock");
1832 printk(" |"); 1832 pr_cont(" |");
1833 dotest(rlock_AA1, SUCCESS, LOCKTYPE_RWLOCK); 1833 dotest(rlock_AA1, SUCCESS, LOCKTYPE_RWLOCK);
1834 printk(" |"); 1834 pr_cont(" |");
1835 dotest(rsem_AA1, FAILURE, LOCKTYPE_RWSEM); 1835 dotest(rsem_AA1, FAILURE, LOCKTYPE_RWSEM);
1836 printk("\n"); 1836 pr_cont("\n");
1837 1837
1838 print_testname("recursive read-lock #2"); 1838 print_testname("recursive read-lock #2");
1839 printk(" |"); 1839 pr_cont(" |");
1840 dotest(rlock_AA1B, SUCCESS, LOCKTYPE_RWLOCK); 1840 dotest(rlock_AA1B, SUCCESS, LOCKTYPE_RWLOCK);
1841 printk(" |"); 1841 pr_cont(" |");
1842 dotest(rsem_AA1B, FAILURE, LOCKTYPE_RWSEM); 1842 dotest(rsem_AA1B, FAILURE, LOCKTYPE_RWSEM);
1843 printk("\n"); 1843 pr_cont("\n");
1844 1844
1845 print_testname("mixed read-write-lock"); 1845 print_testname("mixed read-write-lock");
1846 printk(" |"); 1846 pr_cont(" |");
1847 dotest(rlock_AA2, FAILURE, LOCKTYPE_RWLOCK); 1847 dotest(rlock_AA2, FAILURE, LOCKTYPE_RWLOCK);
1848 printk(" |"); 1848 pr_cont(" |");
1849 dotest(rsem_AA2, FAILURE, LOCKTYPE_RWSEM); 1849 dotest(rsem_AA2, FAILURE, LOCKTYPE_RWSEM);
1850 printk("\n"); 1850 pr_cont("\n");
1851 1851
1852 print_testname("mixed write-read-lock"); 1852 print_testname("mixed write-read-lock");
1853 printk(" |"); 1853 pr_cont(" |");
1854 dotest(rlock_AA3, FAILURE, LOCKTYPE_RWLOCK); 1854 dotest(rlock_AA3, FAILURE, LOCKTYPE_RWLOCK);
1855 printk(" |"); 1855 pr_cont(" |");
1856 dotest(rsem_AA3, FAILURE, LOCKTYPE_RWSEM); 1856 dotest(rsem_AA3, FAILURE, LOCKTYPE_RWSEM);
1857 printk("\n"); 1857 pr_cont("\n");
1858 1858
1859 printk(" --------------------------------------------------------------------------\n"); 1859 printk(" --------------------------------------------------------------------------\n");
1860 1860