diff options
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 237 |
1 files changed, 55 insertions, 182 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 5271a020887e..7f3244c0df01 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -68,17 +68,21 @@ static void __init rcu_bootup_announce_oddness(void) | |||
68 | printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n"); | 68 | printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n"); |
69 | #endif | 69 | #endif |
70 | #if NUM_RCU_LVL_4 != 0 | 70 | #if NUM_RCU_LVL_4 != 0 |
71 | printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n"); | 71 | printk(KERN_INFO "\tFour-level hierarchy is enabled.\n"); |
72 | #endif | 72 | #endif |
73 | if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF) | ||
74 | printk(KERN_INFO "\tExperimental boot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf); | ||
75 | if (nr_cpu_ids != NR_CPUS) | ||
76 | printk(KERN_INFO "\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids); | ||
73 | } | 77 | } |
74 | 78 | ||
75 | #ifdef CONFIG_TREE_PREEMPT_RCU | 79 | #ifdef CONFIG_TREE_PREEMPT_RCU |
76 | 80 | ||
77 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt); | 81 | struct rcu_state rcu_preempt_state = |
82 | RCU_STATE_INITIALIZER(rcu_preempt, call_rcu); | ||
78 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); | 83 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); |
79 | static struct rcu_state *rcu_state = &rcu_preempt_state; | 84 | static struct rcu_state *rcu_state = &rcu_preempt_state; |
80 | 85 | ||
81 | static void rcu_read_unlock_special(struct task_struct *t); | ||
82 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); | 86 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); |
83 | 87 | ||
84 | /* | 88 | /* |
@@ -153,7 +157,7 @@ static void rcu_preempt_qs(int cpu) | |||
153 | * | 157 | * |
154 | * Caller must disable preemption. | 158 | * Caller must disable preemption. |
155 | */ | 159 | */ |
156 | void rcu_preempt_note_context_switch(void) | 160 | static void rcu_preempt_note_context_switch(int cpu) |
157 | { | 161 | { |
158 | struct task_struct *t = current; | 162 | struct task_struct *t = current; |
159 | unsigned long flags; | 163 | unsigned long flags; |
@@ -164,7 +168,7 @@ void rcu_preempt_note_context_switch(void) | |||
164 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { | 168 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { |
165 | 169 | ||
166 | /* Possibly blocking in an RCU read-side critical section. */ | 170 | /* Possibly blocking in an RCU read-side critical section. */ |
167 | rdp = __this_cpu_ptr(rcu_preempt_state.rda); | 171 | rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); |
168 | rnp = rdp->mynode; | 172 | rnp = rdp->mynode; |
169 | raw_spin_lock_irqsave(&rnp->lock, flags); | 173 | raw_spin_lock_irqsave(&rnp->lock, flags); |
170 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; | 174 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; |
@@ -228,23 +232,11 @@ void rcu_preempt_note_context_switch(void) | |||
228 | * means that we continue to block the current grace period. | 232 | * means that we continue to block the current grace period. |
229 | */ | 233 | */ |
230 | local_irq_save(flags); | 234 | local_irq_save(flags); |
231 | rcu_preempt_qs(smp_processor_id()); | 235 | rcu_preempt_qs(cpu); |
232 | local_irq_restore(flags); | 236 | local_irq_restore(flags); |
233 | } | 237 | } |
234 | 238 | ||
235 | /* | 239 | /* |
236 | * Tree-preemptible RCU implementation for rcu_read_lock(). | ||
237 | * Just increment ->rcu_read_lock_nesting, shared state will be updated | ||
238 | * if we block. | ||
239 | */ | ||
240 | void __rcu_read_lock(void) | ||
241 | { | ||
242 | current->rcu_read_lock_nesting++; | ||
243 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */ | ||
244 | } | ||
245 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | ||
246 | |||
247 | /* | ||
248 | * Check for preempted RCU readers blocking the current grace period | 240 | * Check for preempted RCU readers blocking the current grace period |
249 | * for the specified rcu_node structure. If the caller needs a reliable | 241 | * for the specified rcu_node structure. If the caller needs a reliable |
250 | * answer, it must hold the rcu_node's ->lock. | 242 | * answer, it must hold the rcu_node's ->lock. |
@@ -310,7 +302,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t, | |||
310 | * notify RCU core processing or task having blocked during the RCU | 302 | * notify RCU core processing or task having blocked during the RCU |
311 | * read-side critical section. | 303 | * read-side critical section. |
312 | */ | 304 | */ |
313 | static noinline void rcu_read_unlock_special(struct task_struct *t) | 305 | void rcu_read_unlock_special(struct task_struct *t) |
314 | { | 306 | { |
315 | int empty; | 307 | int empty; |
316 | int empty_exp; | 308 | int empty_exp; |
@@ -398,8 +390,9 @@ static noinline void rcu_read_unlock_special(struct task_struct *t) | |||
398 | rnp->grphi, | 390 | rnp->grphi, |
399 | !!rnp->gp_tasks); | 391 | !!rnp->gp_tasks); |
400 | rcu_report_unblock_qs_rnp(rnp, flags); | 392 | rcu_report_unblock_qs_rnp(rnp, flags); |
401 | } else | 393 | } else { |
402 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 394 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
395 | } | ||
403 | 396 | ||
404 | #ifdef CONFIG_RCU_BOOST | 397 | #ifdef CONFIG_RCU_BOOST |
405 | /* Unboost if we were boosted. */ | 398 | /* Unboost if we were boosted. */ |
@@ -418,38 +411,6 @@ static noinline void rcu_read_unlock_special(struct task_struct *t) | |||
418 | } | 411 | } |
419 | } | 412 | } |
420 | 413 | ||
421 | /* | ||
422 | * Tree-preemptible RCU implementation for rcu_read_unlock(). | ||
423 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost | ||
424 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | ||
425 | * invoke rcu_read_unlock_special() to clean up after a context switch | ||
426 | * in an RCU read-side critical section and other special cases. | ||
427 | */ | ||
428 | void __rcu_read_unlock(void) | ||
429 | { | ||
430 | struct task_struct *t = current; | ||
431 | |||
432 | if (t->rcu_read_lock_nesting != 1) | ||
433 | --t->rcu_read_lock_nesting; | ||
434 | else { | ||
435 | barrier(); /* critical section before exit code. */ | ||
436 | t->rcu_read_lock_nesting = INT_MIN; | ||
437 | barrier(); /* assign before ->rcu_read_unlock_special load */ | ||
438 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | ||
439 | rcu_read_unlock_special(t); | ||
440 | barrier(); /* ->rcu_read_unlock_special load before assign */ | ||
441 | t->rcu_read_lock_nesting = 0; | ||
442 | } | ||
443 | #ifdef CONFIG_PROVE_LOCKING | ||
444 | { | ||
445 | int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); | ||
446 | |||
447 | WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); | ||
448 | } | ||
449 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | ||
450 | } | ||
451 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | ||
452 | |||
453 | #ifdef CONFIG_RCU_CPU_STALL_VERBOSE | 414 | #ifdef CONFIG_RCU_CPU_STALL_VERBOSE |
454 | 415 | ||
455 | /* | 416 | /* |
@@ -540,16 +501,6 @@ static int rcu_print_task_stall(struct rcu_node *rnp) | |||
540 | } | 501 | } |
541 | 502 | ||
542 | /* | 503 | /* |
543 | * Suppress preemptible RCU's CPU stall warnings by pushing the | ||
544 | * time of the next stall-warning message comfortably far into the | ||
545 | * future. | ||
546 | */ | ||
547 | static void rcu_preempt_stall_reset(void) | ||
548 | { | ||
549 | rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2; | ||
550 | } | ||
551 | |||
552 | /* | ||
553 | * Check that the list of blocked tasks for the newly completed grace | 504 | * Check that the list of blocked tasks for the newly completed grace |
554 | * period is in fact empty. It is a serious bug to complete a grace | 505 | * period is in fact empty. It is a serious bug to complete a grace |
555 | * period that still has RCU readers blocked! This function must be | 506 | * period that still has RCU readers blocked! This function must be |
@@ -650,14 +601,6 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
650 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 601 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
651 | 602 | ||
652 | /* | 603 | /* |
653 | * Do CPU-offline processing for preemptible RCU. | ||
654 | */ | ||
655 | static void rcu_preempt_cleanup_dead_cpu(int cpu) | ||
656 | { | ||
657 | rcu_cleanup_dead_cpu(cpu, &rcu_preempt_state); | ||
658 | } | ||
659 | |||
660 | /* | ||
661 | * Check for a quiescent state from the current CPU. When a task blocks, | 604 | * Check for a quiescent state from the current CPU. When a task blocks, |
662 | * the task is recorded in the corresponding CPU's rcu_node structure, | 605 | * the task is recorded in the corresponding CPU's rcu_node structure, |
663 | * which is checked elsewhere. | 606 | * which is checked elsewhere. |
@@ -677,15 +620,6 @@ static void rcu_preempt_check_callbacks(int cpu) | |||
677 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; | 620 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; |
678 | } | 621 | } |
679 | 622 | ||
680 | /* | ||
681 | * Process callbacks for preemptible RCU. | ||
682 | */ | ||
683 | static void rcu_preempt_process_callbacks(void) | ||
684 | { | ||
685 | __rcu_process_callbacks(&rcu_preempt_state, | ||
686 | &__get_cpu_var(rcu_preempt_data)); | ||
687 | } | ||
688 | |||
689 | #ifdef CONFIG_RCU_BOOST | 623 | #ifdef CONFIG_RCU_BOOST |
690 | 624 | ||
691 | static void rcu_preempt_do_callbacks(void) | 625 | static void rcu_preempt_do_callbacks(void) |
@@ -824,9 +758,9 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) | |||
824 | int must_wait = 0; | 758 | int must_wait = 0; |
825 | 759 | ||
826 | raw_spin_lock_irqsave(&rnp->lock, flags); | 760 | raw_spin_lock_irqsave(&rnp->lock, flags); |
827 | if (list_empty(&rnp->blkd_tasks)) | 761 | if (list_empty(&rnp->blkd_tasks)) { |
828 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 762 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
829 | else { | 763 | } else { |
830 | rnp->exp_tasks = rnp->blkd_tasks.next; | 764 | rnp->exp_tasks = rnp->blkd_tasks.next; |
831 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ | 765 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ |
832 | must_wait = 1; | 766 | must_wait = 1; |
@@ -870,9 +804,9 @@ void synchronize_rcu_expedited(void) | |||
870 | * expedited grace period for us, just leave. | 804 | * expedited grace period for us, just leave. |
871 | */ | 805 | */ |
872 | while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { | 806 | while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { |
873 | if (trycount++ < 10) | 807 | if (trycount++ < 10) { |
874 | udelay(trycount * num_online_cpus()); | 808 | udelay(trycount * num_online_cpus()); |
875 | else { | 809 | } else { |
876 | synchronize_rcu(); | 810 | synchronize_rcu(); |
877 | return; | 811 | return; |
878 | } | 812 | } |
@@ -917,51 +851,16 @@ mb_ret: | |||
917 | } | 851 | } |
918 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | 852 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); |
919 | 853 | ||
920 | /* | ||
921 | * Check to see if there is any immediate preemptible-RCU-related work | ||
922 | * to be done. | ||
923 | */ | ||
924 | static int rcu_preempt_pending(int cpu) | ||
925 | { | ||
926 | return __rcu_pending(&rcu_preempt_state, | ||
927 | &per_cpu(rcu_preempt_data, cpu)); | ||
928 | } | ||
929 | |||
930 | /* | ||
931 | * Does preemptible RCU have callbacks on this CPU? | ||
932 | */ | ||
933 | static int rcu_preempt_cpu_has_callbacks(int cpu) | ||
934 | { | ||
935 | return !!per_cpu(rcu_preempt_data, cpu).nxtlist; | ||
936 | } | ||
937 | |||
938 | /** | 854 | /** |
939 | * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. | 855 | * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. |
940 | */ | 856 | */ |
941 | void rcu_barrier(void) | 857 | void rcu_barrier(void) |
942 | { | 858 | { |
943 | _rcu_barrier(&rcu_preempt_state, call_rcu); | 859 | _rcu_barrier(&rcu_preempt_state); |
944 | } | 860 | } |
945 | EXPORT_SYMBOL_GPL(rcu_barrier); | 861 | EXPORT_SYMBOL_GPL(rcu_barrier); |
946 | 862 | ||
947 | /* | 863 | /* |
948 | * Initialize preemptible RCU's per-CPU data. | ||
949 | */ | ||
950 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | ||
951 | { | ||
952 | rcu_init_percpu_data(cpu, &rcu_preempt_state, 1); | ||
953 | } | ||
954 | |||
955 | /* | ||
956 | * Move preemptible RCU's callbacks from dying CPU to other online CPU | ||
957 | * and record a quiescent state. | ||
958 | */ | ||
959 | static void rcu_preempt_cleanup_dying_cpu(void) | ||
960 | { | ||
961 | rcu_cleanup_dying_cpu(&rcu_preempt_state); | ||
962 | } | ||
963 | |||
964 | /* | ||
965 | * Initialize preemptible RCU's state structures. | 864 | * Initialize preemptible RCU's state structures. |
966 | */ | 865 | */ |
967 | static void __init __rcu_init_preempt(void) | 866 | static void __init __rcu_init_preempt(void) |
@@ -1002,6 +901,14 @@ void rcu_force_quiescent_state(void) | |||
1002 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | 901 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); |
1003 | 902 | ||
1004 | /* | 903 | /* |
904 | * Because preemptible RCU does not exist, we never have to check for | ||
905 | * CPUs being in quiescent states. | ||
906 | */ | ||
907 | static void rcu_preempt_note_context_switch(int cpu) | ||
908 | { | ||
909 | } | ||
910 | |||
911 | /* | ||
1005 | * Because preemptible RCU does not exist, there are never any preempted | 912 | * Because preemptible RCU does not exist, there are never any preempted |
1006 | * RCU readers. | 913 | * RCU readers. |
1007 | */ | 914 | */ |
@@ -1038,14 +945,6 @@ static int rcu_print_task_stall(struct rcu_node *rnp) | |||
1038 | } | 945 | } |
1039 | 946 | ||
1040 | /* | 947 | /* |
1041 | * Because preemptible RCU does not exist, there is no need to suppress | ||
1042 | * its CPU stall warnings. | ||
1043 | */ | ||
1044 | static void rcu_preempt_stall_reset(void) | ||
1045 | { | ||
1046 | } | ||
1047 | |||
1048 | /* | ||
1049 | * Because there is no preemptible RCU, there can be no readers blocked, | 948 | * Because there is no preemptible RCU, there can be no readers blocked, |
1050 | * so there is no need to check for blocked tasks. So check only for | 949 | * so there is no need to check for blocked tasks. So check only for |
1051 | * bogus qsmask values. | 950 | * bogus qsmask values. |
@@ -1073,14 +972,6 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
1073 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 972 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
1074 | 973 | ||
1075 | /* | 974 | /* |
1076 | * Because preemptible RCU does not exist, it never needs CPU-offline | ||
1077 | * processing. | ||
1078 | */ | ||
1079 | static void rcu_preempt_cleanup_dead_cpu(int cpu) | ||
1080 | { | ||
1081 | } | ||
1082 | |||
1083 | /* | ||
1084 | * Because preemptible RCU does not exist, it never has any callbacks | 975 | * Because preemptible RCU does not exist, it never has any callbacks |
1085 | * to check. | 976 | * to check. |
1086 | */ | 977 | */ |
@@ -1089,14 +980,6 @@ static void rcu_preempt_check_callbacks(int cpu) | |||
1089 | } | 980 | } |
1090 | 981 | ||
1091 | /* | 982 | /* |
1092 | * Because preemptible RCU does not exist, it never has any callbacks | ||
1093 | * to process. | ||
1094 | */ | ||
1095 | static void rcu_preempt_process_callbacks(void) | ||
1096 | { | ||
1097 | } | ||
1098 | |||
1099 | /* | ||
1100 | * Queue an RCU callback for lazy invocation after a grace period. | 983 | * Queue an RCU callback for lazy invocation after a grace period. |
1101 | * This will likely be later named something like "call_rcu_lazy()", | 984 | * This will likely be later named something like "call_rcu_lazy()", |
1102 | * but this change will require some way of tagging the lazy RCU | 985 | * but this change will require some way of tagging the lazy RCU |
@@ -1137,22 +1020,6 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, | |||
1137 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 1020 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
1138 | 1021 | ||
1139 | /* | 1022 | /* |
1140 | * Because preemptible RCU does not exist, it never has any work to do. | ||
1141 | */ | ||
1142 | static int rcu_preempt_pending(int cpu) | ||
1143 | { | ||
1144 | return 0; | ||
1145 | } | ||
1146 | |||
1147 | /* | ||
1148 | * Because preemptible RCU does not exist, it never has callbacks | ||
1149 | */ | ||
1150 | static int rcu_preempt_cpu_has_callbacks(int cpu) | ||
1151 | { | ||
1152 | return 0; | ||
1153 | } | ||
1154 | |||
1155 | /* | ||
1156 | * Because preemptible RCU does not exist, rcu_barrier() is just | 1023 | * Because preemptible RCU does not exist, rcu_barrier() is just |
1157 | * another name for rcu_barrier_sched(). | 1024 | * another name for rcu_barrier_sched(). |
1158 | */ | 1025 | */ |
@@ -1163,21 +1030,6 @@ void rcu_barrier(void) | |||
1163 | EXPORT_SYMBOL_GPL(rcu_barrier); | 1030 | EXPORT_SYMBOL_GPL(rcu_barrier); |
1164 | 1031 | ||
1165 | /* | 1032 | /* |
1166 | * Because preemptible RCU does not exist, there is no per-CPU | ||
1167 | * data to initialize. | ||
1168 | */ | ||
1169 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | ||
1170 | { | ||
1171 | } | ||
1172 | |||
1173 | /* | ||
1174 | * Because there is no preemptible RCU, there is no cleanup to do. | ||
1175 | */ | ||
1176 | static void rcu_preempt_cleanup_dying_cpu(void) | ||
1177 | { | ||
1178 | } | ||
1179 | |||
1180 | /* | ||
1181 | * Because preemptible RCU does not exist, it need not be initialized. | 1033 | * Because preemptible RCU does not exist, it need not be initialized. |
1182 | */ | 1034 | */ |
1183 | static void __init __rcu_init_preempt(void) | 1035 | static void __init __rcu_init_preempt(void) |
@@ -1960,9 +1812,11 @@ static void rcu_idle_count_callbacks_posted(void) | |||
1960 | */ | 1812 | */ |
1961 | #define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */ | 1813 | #define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */ |
1962 | #define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */ | 1814 | #define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */ |
1963 | #define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ | 1815 | #define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */ |
1964 | #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ | 1816 | #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ |
1965 | 1817 | ||
1818 | extern int tick_nohz_enabled; | ||
1819 | |||
1966 | /* | 1820 | /* |
1967 | * Does the specified flavor of RCU have non-lazy callbacks pending on | 1821 | * Does the specified flavor of RCU have non-lazy callbacks pending on |
1968 | * the specified CPU? Both RCU flavor and CPU are specified by the | 1822 | * the specified CPU? Both RCU flavor and CPU are specified by the |
@@ -2039,10 +1893,13 @@ int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) | |||
2039 | return 1; | 1893 | return 1; |
2040 | } | 1894 | } |
2041 | /* Set up for the possibility that RCU will post a timer. */ | 1895 | /* Set up for the possibility that RCU will post a timer. */ |
2042 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) | 1896 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) { |
2043 | *delta_jiffies = RCU_IDLE_GP_DELAY; | 1897 | *delta_jiffies = round_up(RCU_IDLE_GP_DELAY + jiffies, |
2044 | else | 1898 | RCU_IDLE_GP_DELAY) - jiffies; |
2045 | *delta_jiffies = RCU_IDLE_LAZY_GP_DELAY; | 1899 | } else { |
1900 | *delta_jiffies = jiffies + RCU_IDLE_LAZY_GP_DELAY; | ||
1901 | *delta_jiffies = round_jiffies(*delta_jiffies) - jiffies; | ||
1902 | } | ||
2046 | return 0; | 1903 | return 0; |
2047 | } | 1904 | } |
2048 | 1905 | ||
@@ -2101,6 +1958,7 @@ static void rcu_cleanup_after_idle(int cpu) | |||
2101 | 1958 | ||
2102 | del_timer(&rdtp->idle_gp_timer); | 1959 | del_timer(&rdtp->idle_gp_timer); |
2103 | trace_rcu_prep_idle("Cleanup after idle"); | 1960 | trace_rcu_prep_idle("Cleanup after idle"); |
1961 | rdtp->tick_nohz_enabled_snap = ACCESS_ONCE(tick_nohz_enabled); | ||
2104 | } | 1962 | } |
2105 | 1963 | ||
2106 | /* | 1964 | /* |
@@ -2126,6 +1984,18 @@ static void rcu_prepare_for_idle(int cpu) | |||
2126 | { | 1984 | { |
2127 | struct timer_list *tp; | 1985 | struct timer_list *tp; |
2128 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); | 1986 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); |
1987 | int tne; | ||
1988 | |||
1989 | /* Handle nohz enablement switches conservatively. */ | ||
1990 | tne = ACCESS_ONCE(tick_nohz_enabled); | ||
1991 | if (tne != rdtp->tick_nohz_enabled_snap) { | ||
1992 | if (rcu_cpu_has_callbacks(cpu)) | ||
1993 | invoke_rcu_core(); /* force nohz to see update. */ | ||
1994 | rdtp->tick_nohz_enabled_snap = tne; | ||
1995 | return; | ||
1996 | } | ||
1997 | if (!tne) | ||
1998 | return; | ||
2129 | 1999 | ||
2130 | /* | 2000 | /* |
2131 | * If this is an idle re-entry, for example, due to use of | 2001 | * If this is an idle re-entry, for example, due to use of |
@@ -2179,10 +2049,11 @@ static void rcu_prepare_for_idle(int cpu) | |||
2179 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) { | 2049 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) { |
2180 | trace_rcu_prep_idle("Dyntick with callbacks"); | 2050 | trace_rcu_prep_idle("Dyntick with callbacks"); |
2181 | rdtp->idle_gp_timer_expires = | 2051 | rdtp->idle_gp_timer_expires = |
2182 | jiffies + RCU_IDLE_GP_DELAY; | 2052 | round_up(jiffies + RCU_IDLE_GP_DELAY, |
2053 | RCU_IDLE_GP_DELAY); | ||
2183 | } else { | 2054 | } else { |
2184 | rdtp->idle_gp_timer_expires = | 2055 | rdtp->idle_gp_timer_expires = |
2185 | jiffies + RCU_IDLE_LAZY_GP_DELAY; | 2056 | round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY); |
2186 | trace_rcu_prep_idle("Dyntick with lazy callbacks"); | 2057 | trace_rcu_prep_idle("Dyntick with lazy callbacks"); |
2187 | } | 2058 | } |
2188 | tp = &rdtp->idle_gp_timer; | 2059 | tp = &rdtp->idle_gp_timer; |
@@ -2223,8 +2094,9 @@ static void rcu_prepare_for_idle(int cpu) | |||
2223 | if (rcu_cpu_has_callbacks(cpu)) { | 2094 | if (rcu_cpu_has_callbacks(cpu)) { |
2224 | trace_rcu_prep_idle("More callbacks"); | 2095 | trace_rcu_prep_idle("More callbacks"); |
2225 | invoke_rcu_core(); | 2096 | invoke_rcu_core(); |
2226 | } else | 2097 | } else { |
2227 | trace_rcu_prep_idle("Callbacks drained"); | 2098 | trace_rcu_prep_idle("Callbacks drained"); |
2099 | } | ||
2228 | } | 2100 | } |
2229 | 2101 | ||
2230 | /* | 2102 | /* |
@@ -2261,6 +2133,7 @@ static void print_cpu_stall_fast_no_hz(char *cp, int cpu) | |||
2261 | 2133 | ||
2262 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) | 2134 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) |
2263 | { | 2135 | { |
2136 | *cp = '\0'; | ||
2264 | } | 2137 | } |
2265 | 2138 | ||
2266 | #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */ | 2139 | #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */ |