aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2013-10-15 15:44:33 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2013-10-15 15:44:33 -0400
commit460aebac739df78b0e40a347934fdea377310577 (patch)
tree6082a8c5b5ecee1b8c3437f337f0cefca930f75f
parent4b0d3f0fde41a3c4454adb4d474618c23cfd4131 (diff)
parent5d5a08003d3e678372e375d99c65a24e0d33d2f5 (diff)
Merge branches 'doc.2013.09.25b' and 'fixes.2013.09.23b' into HEAD
doc.2013.09.25b: Topic branch for documentation updates. fixes.2013.09.23b: Topic branch for miscellaneous fixes.
-rw-r--r--include/linux/rculist.h23
-rw-r--r--kernel/rcu.h7
-rw-r--r--kernel/rcupdate.c2
-rw-r--r--kernel/rcutiny.c17
-rw-r--r--kernel/rcutree.c97
-rw-r--r--kernel/rcutree_plugin.h23
-rw-r--r--mm/mlock.c1
7 files changed, 119 insertions, 51 deletions
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 4106721c4e5e..45a0a9e81478 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -19,6 +19,21 @@
19 */ 19 */
20 20
21/* 21/*
22 * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
23 * @list: list to be initialized
24 *
25 * You should instead use INIT_LIST_HEAD() for normal initialization and
26 * cleanup tasks, when readers have no access to the list being initialized.
27 * However, if the list being initialized is visible to readers, you
28 * need to keep the compiler from being too mischievous.
29 */
30static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
31{
32 ACCESS_ONCE(list->next) = list;
33 ACCESS_ONCE(list->prev) = list;
34}
35
36/*
22 * return the ->next pointer of a list_head in an rcu safe 37 * return the ->next pointer of a list_head in an rcu safe
23 * way, we must not access it directly 38 * way, we must not access it directly
24 */ 39 */
@@ -191,9 +206,13 @@ static inline void list_splice_init_rcu(struct list_head *list,
191 if (list_empty(list)) 206 if (list_empty(list))
192 return; 207 return;
193 208
194 /* "first" and "last" tracking list, so initialize it. */ 209 /*
210 * "first" and "last" tracking list, so initialize it. RCU readers
211 * have access to this list, so we must use INIT_LIST_HEAD_RCU()
212 * instead of INIT_LIST_HEAD().
213 */
195 214
196 INIT_LIST_HEAD(list); 215 INIT_LIST_HEAD_RCU(list);
197 216
198 /* 217 /*
199 * At this point, the list body still points to the source list. 218 * At this point, the list body still points to the source list.
diff --git a/kernel/rcu.h b/kernel/rcu.h
index 77131966c4ad..7859a0a3951e 100644
--- a/kernel/rcu.h
+++ b/kernel/rcu.h
@@ -122,4 +122,11 @@ int rcu_jiffies_till_stall_check(void);
122 122
123#endif /* #ifdef CONFIG_RCU_STALL_COMMON */ 123#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
124 124
125/*
126 * Strings used in tracepoints need to be exported via the
127 * tracing system such that tools like perf and trace-cmd can
128 * translate the string address pointers to actual text.
129 */
130#define TPS(x) tracepoint_string(x)
131
125#endif /* __LINUX_RCU_H */ 132#endif /* __LINUX_RCU_H */
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index b02a339836b4..3260a1074b48 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -298,7 +298,7 @@ EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
298#endif 298#endif
299 299
300int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ 300int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
301int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; 301static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
302 302
303module_param(rcu_cpu_stall_suppress, int, 0644); 303module_param(rcu_cpu_stall_suppress, int, 0644);
304module_param(rcu_cpu_stall_timeout, int, 0644); 304module_param(rcu_cpu_stall_timeout, int, 0644);
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 9ed6075dc562..e99eb5fb10af 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -35,6 +35,7 @@
35#include <linux/time.h> 35#include <linux/time.h>
36#include <linux/cpu.h> 36#include <linux/cpu.h>
37#include <linux/prefetch.h> 37#include <linux/prefetch.h>
38#include <linux/ftrace_event.h>
38 39
39#ifdef CONFIG_RCU_TRACE 40#ifdef CONFIG_RCU_TRACE
40#include <trace/events/rcu.h> 41#include <trace/events/rcu.h>
@@ -58,16 +59,17 @@ static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
58static void rcu_idle_enter_common(long long newval) 59static void rcu_idle_enter_common(long long newval)
59{ 60{
60 if (newval) { 61 if (newval) {
61 RCU_TRACE(trace_rcu_dyntick("--=", 62 RCU_TRACE(trace_rcu_dyntick(TPS("--="),
62 rcu_dynticks_nesting, newval)); 63 rcu_dynticks_nesting, newval));
63 rcu_dynticks_nesting = newval; 64 rcu_dynticks_nesting = newval;
64 return; 65 return;
65 } 66 }
66 RCU_TRACE(trace_rcu_dyntick("Start", rcu_dynticks_nesting, newval)); 67 RCU_TRACE(trace_rcu_dyntick(TPS("Start"),
68 rcu_dynticks_nesting, newval));
67 if (!is_idle_task(current)) { 69 if (!is_idle_task(current)) {
68 struct task_struct *idle = idle_task(smp_processor_id()); 70 struct task_struct *idle = idle_task(smp_processor_id());
69 71
70 RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task", 72 RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"),
71 rcu_dynticks_nesting, newval)); 73 rcu_dynticks_nesting, newval));
72 ftrace_dump(DUMP_ALL); 74 ftrace_dump(DUMP_ALL);
73 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", 75 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
@@ -120,15 +122,15 @@ EXPORT_SYMBOL_GPL(rcu_irq_exit);
120static void rcu_idle_exit_common(long long oldval) 122static void rcu_idle_exit_common(long long oldval)
121{ 123{
122 if (oldval) { 124 if (oldval) {
123 RCU_TRACE(trace_rcu_dyntick("++=", 125 RCU_TRACE(trace_rcu_dyntick(TPS("++="),
124 oldval, rcu_dynticks_nesting)); 126 oldval, rcu_dynticks_nesting));
125 return; 127 return;
126 } 128 }
127 RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting)); 129 RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting));
128 if (!is_idle_task(current)) { 130 if (!is_idle_task(current)) {
129 struct task_struct *idle = idle_task(smp_processor_id()); 131 struct task_struct *idle = idle_task(smp_processor_id());
130 132
131 RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task", 133 RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"),
132 oldval, rcu_dynticks_nesting)); 134 oldval, rcu_dynticks_nesting));
133 ftrace_dump(DUMP_ALL); 135 ftrace_dump(DUMP_ALL);
134 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", 136 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
@@ -304,7 +306,8 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
304 RCU_TRACE(cb_count++); 306 RCU_TRACE(cb_count++);
305 } 307 }
306 RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count)); 308 RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
307 RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(), 309 RCU_TRACE(trace_rcu_batch_end(rcp->name,
310 cb_count, 0, need_resched(),
308 is_idle_task(current), 311 is_idle_task(current),
309 false)); 312 false));
310} 313}
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 32618b3fe4e6..62aab5ceefe9 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -61,13 +61,6 @@
61 61
62#include "rcu.h" 62#include "rcu.h"
63 63
64/*
65 * Strings used in tracepoints need to be exported via the
66 * tracing system such that tools like perf and trace-cmd can
67 * translate the string address pointers to actual text.
68 */
69#define TPS(x) tracepoint_string(x)
70
71/* Data structures. */ 64/* Data structures. */
72 65
73static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; 66static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
@@ -222,7 +215,7 @@ void rcu_note_context_switch(int cpu)
222} 215}
223EXPORT_SYMBOL_GPL(rcu_note_context_switch); 216EXPORT_SYMBOL_GPL(rcu_note_context_switch);
224 217
225DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { 218static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
226 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, 219 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
227 .dynticks = ATOMIC_INIT(1), 220 .dynticks = ATOMIC_INIT(1),
228#ifdef CONFIG_NO_HZ_FULL_SYSIDLE 221#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
@@ -371,7 +364,8 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
371{ 364{
372 trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting); 365 trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
373 if (!user && !is_idle_task(current)) { 366 if (!user && !is_idle_task(current)) {
374 struct task_struct *idle = idle_task(smp_processor_id()); 367 struct task_struct *idle __maybe_unused =
368 idle_task(smp_processor_id());
375 369
376 trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0); 370 trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
377 ftrace_dump(DUMP_ORIG); 371 ftrace_dump(DUMP_ORIG);
@@ -407,7 +401,7 @@ static void rcu_eqs_enter(bool user)
407 long long oldval; 401 long long oldval;
408 struct rcu_dynticks *rdtp; 402 struct rcu_dynticks *rdtp;
409 403
410 rdtp = &__get_cpu_var(rcu_dynticks); 404 rdtp = this_cpu_ptr(&rcu_dynticks);
411 oldval = rdtp->dynticks_nesting; 405 oldval = rdtp->dynticks_nesting;
412 WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); 406 WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
413 if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) 407 if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
@@ -435,7 +429,7 @@ void rcu_idle_enter(void)
435 429
436 local_irq_save(flags); 430 local_irq_save(flags);
437 rcu_eqs_enter(false); 431 rcu_eqs_enter(false);
438 rcu_sysidle_enter(&__get_cpu_var(rcu_dynticks), 0); 432 rcu_sysidle_enter(this_cpu_ptr(&rcu_dynticks), 0);
439 local_irq_restore(flags); 433 local_irq_restore(flags);
440} 434}
441EXPORT_SYMBOL_GPL(rcu_idle_enter); 435EXPORT_SYMBOL_GPL(rcu_idle_enter);
@@ -478,7 +472,7 @@ void rcu_irq_exit(void)
478 struct rcu_dynticks *rdtp; 472 struct rcu_dynticks *rdtp;
479 473
480 local_irq_save(flags); 474 local_irq_save(flags);
481 rdtp = &__get_cpu_var(rcu_dynticks); 475 rdtp = this_cpu_ptr(&rcu_dynticks);
482 oldval = rdtp->dynticks_nesting; 476 oldval = rdtp->dynticks_nesting;
483 rdtp->dynticks_nesting--; 477 rdtp->dynticks_nesting--;
484 WARN_ON_ONCE(rdtp->dynticks_nesting < 0); 478 WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
@@ -508,7 +502,8 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
508 rcu_cleanup_after_idle(smp_processor_id()); 502 rcu_cleanup_after_idle(smp_processor_id());
509 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting); 503 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
510 if (!user && !is_idle_task(current)) { 504 if (!user && !is_idle_task(current)) {
511 struct task_struct *idle = idle_task(smp_processor_id()); 505 struct task_struct *idle __maybe_unused =
506 idle_task(smp_processor_id());
512 507
513 trace_rcu_dyntick(TPS("Error on exit: not idle task"), 508 trace_rcu_dyntick(TPS("Error on exit: not idle task"),
514 oldval, rdtp->dynticks_nesting); 509 oldval, rdtp->dynticks_nesting);
@@ -528,7 +523,7 @@ static void rcu_eqs_exit(bool user)
528 struct rcu_dynticks *rdtp; 523 struct rcu_dynticks *rdtp;
529 long long oldval; 524 long long oldval;
530 525
531 rdtp = &__get_cpu_var(rcu_dynticks); 526 rdtp = this_cpu_ptr(&rcu_dynticks);
532 oldval = rdtp->dynticks_nesting; 527 oldval = rdtp->dynticks_nesting;
533 WARN_ON_ONCE(oldval < 0); 528 WARN_ON_ONCE(oldval < 0);
534 if (oldval & DYNTICK_TASK_NEST_MASK) 529 if (oldval & DYNTICK_TASK_NEST_MASK)
@@ -555,7 +550,7 @@ void rcu_idle_exit(void)
555 550
556 local_irq_save(flags); 551 local_irq_save(flags);
557 rcu_eqs_exit(false); 552 rcu_eqs_exit(false);
558 rcu_sysidle_exit(&__get_cpu_var(rcu_dynticks), 0); 553 rcu_sysidle_exit(this_cpu_ptr(&rcu_dynticks), 0);
559 local_irq_restore(flags); 554 local_irq_restore(flags);
560} 555}
561EXPORT_SYMBOL_GPL(rcu_idle_exit); 556EXPORT_SYMBOL_GPL(rcu_idle_exit);
@@ -599,7 +594,7 @@ void rcu_irq_enter(void)
599 long long oldval; 594 long long oldval;
600 595
601 local_irq_save(flags); 596 local_irq_save(flags);
602 rdtp = &__get_cpu_var(rcu_dynticks); 597 rdtp = this_cpu_ptr(&rcu_dynticks);
603 oldval = rdtp->dynticks_nesting; 598 oldval = rdtp->dynticks_nesting;
604 rdtp->dynticks_nesting++; 599 rdtp->dynticks_nesting++;
605 WARN_ON_ONCE(rdtp->dynticks_nesting == 0); 600 WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
@@ -620,7 +615,7 @@ void rcu_irq_enter(void)
620 */ 615 */
621void rcu_nmi_enter(void) 616void rcu_nmi_enter(void)
622{ 617{
623 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); 618 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
624 619
625 if (rdtp->dynticks_nmi_nesting == 0 && 620 if (rdtp->dynticks_nmi_nesting == 0 &&
626 (atomic_read(&rdtp->dynticks) & 0x1)) 621 (atomic_read(&rdtp->dynticks) & 0x1))
@@ -642,7 +637,7 @@ void rcu_nmi_enter(void)
642 */ 637 */
643void rcu_nmi_exit(void) 638void rcu_nmi_exit(void)
644{ 639{
645 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); 640 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
646 641
647 if (rdtp->dynticks_nmi_nesting == 0 || 642 if (rdtp->dynticks_nmi_nesting == 0 ||
648 --rdtp->dynticks_nmi_nesting != 0) 643 --rdtp->dynticks_nmi_nesting != 0)
@@ -665,7 +660,7 @@ int rcu_is_cpu_idle(void)
665 int ret; 660 int ret;
666 661
667 preempt_disable(); 662 preempt_disable();
668 ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0; 663 ret = (atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1) == 0;
669 preempt_enable(); 664 preempt_enable();
670 return ret; 665 return ret;
671} 666}
@@ -703,7 +698,7 @@ bool rcu_lockdep_current_cpu_online(void)
703 if (in_nmi()) 698 if (in_nmi())
704 return 1; 699 return 1;
705 preempt_disable(); 700 preempt_disable();
706 rdp = &__get_cpu_var(rcu_sched_data); 701 rdp = this_cpu_ptr(&rcu_sched_data);
707 rnp = rdp->mynode; 702 rnp = rdp->mynode;
708 ret = (rdp->grpmask & rnp->qsmaskinit) || 703 ret = (rdp->grpmask & rnp->qsmaskinit) ||
709 !rcu_scheduler_fully_active; 704 !rcu_scheduler_fully_active;
@@ -723,7 +718,7 @@ EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
723 */ 718 */
724static int rcu_is_cpu_rrupt_from_idle(void) 719static int rcu_is_cpu_rrupt_from_idle(void)
725{ 720{
726 return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1; 721 return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
727} 722}
728 723
729/* 724/*
@@ -802,8 +797,11 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
802 797
803static void record_gp_stall_check_time(struct rcu_state *rsp) 798static void record_gp_stall_check_time(struct rcu_state *rsp)
804{ 799{
805 rsp->gp_start = jiffies; 800 unsigned long j = ACCESS_ONCE(jiffies);
806 rsp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check(); 801
802 rsp->gp_start = j;
803 smp_wmb(); /* Record start time before stall time. */
804 rsp->jiffies_stall = j + rcu_jiffies_till_stall_check();
807} 805}
808 806
809/* 807/*
@@ -932,17 +930,48 @@ static void print_cpu_stall(struct rcu_state *rsp)
932 930
933static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) 931static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
934{ 932{
933 unsigned long completed;
934 unsigned long gpnum;
935 unsigned long gps;
935 unsigned long j; 936 unsigned long j;
936 unsigned long js; 937 unsigned long js;
937 struct rcu_node *rnp; 938 struct rcu_node *rnp;
938 939
939 if (rcu_cpu_stall_suppress) 940 if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
940 return; 941 return;
941 j = ACCESS_ONCE(jiffies); 942 j = ACCESS_ONCE(jiffies);
943
944 /*
945 * Lots of memory barriers to reject false positives.
946 *
947 * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
948 * then rsp->gp_start, and finally rsp->completed. These values
949 * are updated in the opposite order with memory barriers (or
950 * equivalent) during grace-period initialization and cleanup.
951 * Now, a false positive can occur if we get an new value of
952 * rsp->gp_start and a old value of rsp->jiffies_stall. But given
953 * the memory barriers, the only way that this can happen is if one
954 * grace period ends and another starts between these two fetches.
955 * Detect this by comparing rsp->completed with the previous fetch
956 * from rsp->gpnum.
957 *
958 * Given this check, comparisons of jiffies, rsp->jiffies_stall,
959 * and rsp->gp_start suffice to forestall false positives.
960 */
961 gpnum = ACCESS_ONCE(rsp->gpnum);
962 smp_rmb(); /* Pick up ->gpnum first... */
942 js = ACCESS_ONCE(rsp->jiffies_stall); 963 js = ACCESS_ONCE(rsp->jiffies_stall);
964 smp_rmb(); /* ...then ->jiffies_stall before the rest... */
965 gps = ACCESS_ONCE(rsp->gp_start);
966 smp_rmb(); /* ...and finally ->gp_start before ->completed. */
967 completed = ACCESS_ONCE(rsp->completed);
968 if (ULONG_CMP_GE(completed, gpnum) ||
969 ULONG_CMP_LT(j, js) ||
970 ULONG_CMP_GE(gps, js))
971 return; /* No stall or GP completed since entering function. */
943 rnp = rdp->mynode; 972 rnp = rdp->mynode;
944 if (rcu_gp_in_progress(rsp) && 973 if (rcu_gp_in_progress(rsp) &&
945 (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) { 974 (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask)) {
946 975
947 /* We haven't checked in, so go dump stack. */ 976 /* We haven't checked in, so go dump stack. */
948 print_cpu_stall(rsp); 977 print_cpu_stall(rsp);
@@ -1315,9 +1344,10 @@ static int rcu_gp_init(struct rcu_state *rsp)
1315 } 1344 }
1316 1345
1317 /* Advance to a new grace period and initialize state. */ 1346 /* Advance to a new grace period and initialize state. */
1347 record_gp_stall_check_time(rsp);
1348 smp_wmb(); /* Record GP times before starting GP. */
1318 rsp->gpnum++; 1349 rsp->gpnum++;
1319 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start")); 1350 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
1320 record_gp_stall_check_time(rsp);
1321 raw_spin_unlock_irq(&rnp->lock); 1351 raw_spin_unlock_irq(&rnp->lock);
1322 1352
1323 /* Exclude any concurrent CPU-hotplug operations. */ 1353 /* Exclude any concurrent CPU-hotplug operations. */
@@ -1366,7 +1396,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
1366/* 1396/*
1367 * Do one round of quiescent-state forcing. 1397 * Do one round of quiescent-state forcing.
1368 */ 1398 */
1369int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in) 1399static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
1370{ 1400{
1371 int fqs_state = fqs_state_in; 1401 int fqs_state = fqs_state_in;
1372 bool isidle = false; 1402 bool isidle = false;
@@ -1452,7 +1482,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
1452 rdp = this_cpu_ptr(rsp->rda); 1482 rdp = this_cpu_ptr(rsp->rda);
1453 rcu_advance_cbs(rsp, rnp, rdp); /* Reduce false positives below. */ 1483 rcu_advance_cbs(rsp, rnp, rdp); /* Reduce false positives below. */
1454 if (cpu_needs_another_gp(rsp, rdp)) 1484 if (cpu_needs_another_gp(rsp, rdp))
1455 rsp->gp_flags = 1; 1485 rsp->gp_flags = RCU_GP_FLAG_INIT;
1456 raw_spin_unlock_irq(&rnp->lock); 1486 raw_spin_unlock_irq(&rnp->lock);
1457} 1487}
1458 1488
@@ -2725,10 +2755,13 @@ static int rcu_cpu_has_callbacks(int cpu, bool *all_lazy)
2725 2755
2726 for_each_rcu_flavor(rsp) { 2756 for_each_rcu_flavor(rsp) {
2727 rdp = per_cpu_ptr(rsp->rda, cpu); 2757 rdp = per_cpu_ptr(rsp->rda, cpu);
2728 if (rdp->qlen != rdp->qlen_lazy) 2758 if (!rdp->nxtlist)
2759 continue;
2760 hc = true;
2761 if (rdp->qlen != rdp->qlen_lazy || !all_lazy) {
2729 al = false; 2762 al = false;
2730 if (rdp->nxtlist) 2763 break;
2731 hc = true; 2764 }
2732 } 2765 }
2733 if (all_lazy) 2766 if (all_lazy)
2734 *all_lazy = al; 2767 *all_lazy = al;
@@ -3295,8 +3328,8 @@ void __init rcu_init(void)
3295 3328
3296 rcu_bootup_announce(); 3329 rcu_bootup_announce();
3297 rcu_init_geometry(); 3330 rcu_init_geometry();
3298 rcu_init_one(&rcu_sched_state, &rcu_sched_data);
3299 rcu_init_one(&rcu_bh_state, &rcu_bh_data); 3331 rcu_init_one(&rcu_bh_state, &rcu_bh_data);
3332 rcu_init_one(&rcu_sched_state, &rcu_sched_data);
3300 __rcu_init_preempt(); 3333 __rcu_init_preempt();
3301 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 3334 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
3302 3335
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 130c97b027f2..1855d66bf705 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -96,10 +96,15 @@ static void __init rcu_bootup_announce_oddness(void)
96#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */ 96#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
97#ifdef CONFIG_RCU_NOCB_CPU_ALL 97#ifdef CONFIG_RCU_NOCB_CPU_ALL
98 pr_info("\tOffload RCU callbacks from all CPUs\n"); 98 pr_info("\tOffload RCU callbacks from all CPUs\n");
99 cpumask_setall(rcu_nocb_mask); 99 cpumask_copy(rcu_nocb_mask, cpu_possible_mask);
100#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */ 100#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
101#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */ 101#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
102 if (have_rcu_nocb_mask) { 102 if (have_rcu_nocb_mask) {
103 if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
104 pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n");
105 cpumask_and(rcu_nocb_mask, cpu_possible_mask,
106 rcu_nocb_mask);
107 }
103 cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask); 108 cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
104 pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf); 109 pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf);
105 if (rcu_nocb_poll) 110 if (rcu_nocb_poll)
@@ -660,7 +665,7 @@ static void rcu_preempt_check_callbacks(int cpu)
660 665
661static void rcu_preempt_do_callbacks(void) 666static void rcu_preempt_do_callbacks(void)
662{ 667{
663 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data)); 668 rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data));
664} 669}
665 670
666#endif /* #ifdef CONFIG_RCU_BOOST */ 671#endif /* #ifdef CONFIG_RCU_BOOST */
@@ -1332,7 +1337,7 @@ static void invoke_rcu_callbacks_kthread(void)
1332 */ 1337 */
1333static bool rcu_is_callbacks_kthread(void) 1338static bool rcu_is_callbacks_kthread(void)
1334{ 1339{
1335 return __get_cpu_var(rcu_cpu_kthread_task) == current; 1340 return __this_cpu_read(rcu_cpu_kthread_task) == current;
1336} 1341}
1337 1342
1338#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) 1343#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
@@ -1382,8 +1387,8 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1382 1387
1383static void rcu_kthread_do_work(void) 1388static void rcu_kthread_do_work(void)
1384{ 1389{
1385 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); 1390 rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
1386 rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); 1391 rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
1387 rcu_preempt_do_callbacks(); 1392 rcu_preempt_do_callbacks();
1388} 1393}
1389 1394
@@ -1402,7 +1407,7 @@ static void rcu_cpu_kthread_park(unsigned int cpu)
1402 1407
1403static int rcu_cpu_kthread_should_run(unsigned int cpu) 1408static int rcu_cpu_kthread_should_run(unsigned int cpu)
1404{ 1409{
1405 return __get_cpu_var(rcu_cpu_has_work); 1410 return __this_cpu_read(rcu_cpu_has_work);
1406} 1411}
1407 1412
1408/* 1413/*
@@ -1412,8 +1417,8 @@ static int rcu_cpu_kthread_should_run(unsigned int cpu)
1412 */ 1417 */
1413static void rcu_cpu_kthread(unsigned int cpu) 1418static void rcu_cpu_kthread(unsigned int cpu)
1414{ 1419{
1415 unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status); 1420 unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
1416 char work, *workp = &__get_cpu_var(rcu_cpu_has_work); 1421 char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
1417 int spincnt; 1422 int spincnt;
1418 1423
1419 for (spincnt = 0; spincnt < 10; spincnt++) { 1424 for (spincnt = 0; spincnt < 10; spincnt++) {
@@ -2108,7 +2113,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
2108 2113
2109 /* If we are not being polled and there is a kthread, awaken it ... */ 2114 /* If we are not being polled and there is a kthread, awaken it ... */
2110 t = ACCESS_ONCE(rdp->nocb_kthread); 2115 t = ACCESS_ONCE(rdp->nocb_kthread);
2111 if (rcu_nocb_poll | !t) 2116 if (rcu_nocb_poll || !t)
2112 return; 2117 return;
2113 len = atomic_long_read(&rdp->nocb_q_count); 2118 len = atomic_long_read(&rdp->nocb_q_count);
2114 if (old_rhpp == &rdp->nocb_head) { 2119 if (old_rhpp == &rdp->nocb_head) {
diff --git a/mm/mlock.c b/mm/mlock.c
index d63802663242..67ba6da7d0e3 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -736,6 +736,7 @@ static int do_mlockall(int flags)
736 736
737 /* Ignore errors */ 737 /* Ignore errors */
738 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); 738 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
739 cond_resched();
739 } 740 }
740out: 741out:
741 return 0; 742 return 0;