diff options
Diffstat (limited to 'kernel/rcutree_plugin.h')
| -rw-r--r-- | kernel/rcutree_plugin.h | 223 |
1 files changed, 44 insertions, 179 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 3e4899459f3d..7f3244c0df01 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
| @@ -68,17 +68,21 @@ static void __init rcu_bootup_announce_oddness(void) | |||
| 68 | printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n"); | 68 | printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n"); |
| 69 | #endif | 69 | #endif |
| 70 | #if NUM_RCU_LVL_4 != 0 | 70 | #if NUM_RCU_LVL_4 != 0 |
| 71 | printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n"); | 71 | printk(KERN_INFO "\tFour-level hierarchy is enabled.\n"); |
| 72 | #endif | 72 | #endif |
| 73 | if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF) | ||
| 74 | printk(KERN_INFO "\tExperimental boot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf); | ||
| 75 | if (nr_cpu_ids != NR_CPUS) | ||
| 76 | printk(KERN_INFO "\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids); | ||
| 73 | } | 77 | } |
| 74 | 78 | ||
| 75 | #ifdef CONFIG_TREE_PREEMPT_RCU | 79 | #ifdef CONFIG_TREE_PREEMPT_RCU |
| 76 | 80 | ||
| 77 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt); | 81 | struct rcu_state rcu_preempt_state = |
| 82 | RCU_STATE_INITIALIZER(rcu_preempt, call_rcu); | ||
| 78 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); | 83 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); |
| 79 | static struct rcu_state *rcu_state = &rcu_preempt_state; | 84 | static struct rcu_state *rcu_state = &rcu_preempt_state; |
| 80 | 85 | ||
| 81 | static void rcu_read_unlock_special(struct task_struct *t); | ||
| 82 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); | 86 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); |
| 83 | 87 | ||
| 84 | /* | 88 | /* |
| @@ -233,18 +237,6 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
| 233 | } | 237 | } |
| 234 | 238 | ||
| 235 | /* | 239 | /* |
| 236 | * Tree-preemptible RCU implementation for rcu_read_lock(). | ||
| 237 | * Just increment ->rcu_read_lock_nesting, shared state will be updated | ||
| 238 | * if we block. | ||
| 239 | */ | ||
| 240 | void __rcu_read_lock(void) | ||
| 241 | { | ||
| 242 | current->rcu_read_lock_nesting++; | ||
| 243 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */ | ||
| 244 | } | ||
| 245 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | ||
| 246 | |||
| 247 | /* | ||
| 248 | * Check for preempted RCU readers blocking the current grace period | 240 | * Check for preempted RCU readers blocking the current grace period |
| 249 | * for the specified rcu_node structure. If the caller needs a reliable | 241 | * for the specified rcu_node structure. If the caller needs a reliable |
| 250 | * answer, it must hold the rcu_node's ->lock. | 242 | * answer, it must hold the rcu_node's ->lock. |
| @@ -310,7 +302,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t, | |||
| 310 | * notify RCU core processing or task having blocked during the RCU | 302 | * notify RCU core processing or task having blocked during the RCU |
| 311 | * read-side critical section. | 303 | * read-side critical section. |
| 312 | */ | 304 | */ |
| 313 | static noinline void rcu_read_unlock_special(struct task_struct *t) | 305 | void rcu_read_unlock_special(struct task_struct *t) |
| 314 | { | 306 | { |
| 315 | int empty; | 307 | int empty; |
| 316 | int empty_exp; | 308 | int empty_exp; |
| @@ -398,8 +390,9 @@ static noinline void rcu_read_unlock_special(struct task_struct *t) | |||
| 398 | rnp->grphi, | 390 | rnp->grphi, |
| 399 | !!rnp->gp_tasks); | 391 | !!rnp->gp_tasks); |
| 400 | rcu_report_unblock_qs_rnp(rnp, flags); | 392 | rcu_report_unblock_qs_rnp(rnp, flags); |
| 401 | } else | 393 | } else { |
| 402 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 394 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
| 395 | } | ||
| 403 | 396 | ||
| 404 | #ifdef CONFIG_RCU_BOOST | 397 | #ifdef CONFIG_RCU_BOOST |
| 405 | /* Unboost if we were boosted. */ | 398 | /* Unboost if we were boosted. */ |
| @@ -418,38 +411,6 @@ static noinline void rcu_read_unlock_special(struct task_struct *t) | |||
| 418 | } | 411 | } |
| 419 | } | 412 | } |
| 420 | 413 | ||
| 421 | /* | ||
| 422 | * Tree-preemptible RCU implementation for rcu_read_unlock(). | ||
| 423 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost | ||
| 424 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | ||
| 425 | * invoke rcu_read_unlock_special() to clean up after a context switch | ||
| 426 | * in an RCU read-side critical section and other special cases. | ||
| 427 | */ | ||
| 428 | void __rcu_read_unlock(void) | ||
| 429 | { | ||
| 430 | struct task_struct *t = current; | ||
| 431 | |||
| 432 | if (t->rcu_read_lock_nesting != 1) | ||
| 433 | --t->rcu_read_lock_nesting; | ||
| 434 | else { | ||
| 435 | barrier(); /* critical section before exit code. */ | ||
| 436 | t->rcu_read_lock_nesting = INT_MIN; | ||
| 437 | barrier(); /* assign before ->rcu_read_unlock_special load */ | ||
| 438 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | ||
| 439 | rcu_read_unlock_special(t); | ||
| 440 | barrier(); /* ->rcu_read_unlock_special load before assign */ | ||
| 441 | t->rcu_read_lock_nesting = 0; | ||
| 442 | } | ||
| 443 | #ifdef CONFIG_PROVE_LOCKING | ||
| 444 | { | ||
| 445 | int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); | ||
| 446 | |||
| 447 | WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); | ||
| 448 | } | ||
| 449 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | ||
| 450 | } | ||
| 451 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | ||
| 452 | |||
| 453 | #ifdef CONFIG_RCU_CPU_STALL_VERBOSE | 414 | #ifdef CONFIG_RCU_CPU_STALL_VERBOSE |
| 454 | 415 | ||
| 455 | /* | 416 | /* |
| @@ -540,16 +501,6 @@ static int rcu_print_task_stall(struct rcu_node *rnp) | |||
| 540 | } | 501 | } |
| 541 | 502 | ||
| 542 | /* | 503 | /* |
| 543 | * Suppress preemptible RCU's CPU stall warnings by pushing the | ||
| 544 | * time of the next stall-warning message comfortably far into the | ||
| 545 | * future. | ||
| 546 | */ | ||
| 547 | static void rcu_preempt_stall_reset(void) | ||
| 548 | { | ||
| 549 | rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2; | ||
| 550 | } | ||
| 551 | |||
| 552 | /* | ||
| 553 | * Check that the list of blocked tasks for the newly completed grace | 504 | * Check that the list of blocked tasks for the newly completed grace |
| 554 | * period is in fact empty. It is a serious bug to complete a grace | 505 | * period is in fact empty. It is a serious bug to complete a grace |
| 555 | * period that still has RCU readers blocked! This function must be | 506 | * period that still has RCU readers blocked! This function must be |
| @@ -650,14 +601,6 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
| 650 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 601 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
| 651 | 602 | ||
| 652 | /* | 603 | /* |
| 653 | * Do CPU-offline processing for preemptible RCU. | ||
| 654 | */ | ||
| 655 | static void rcu_preempt_cleanup_dead_cpu(int cpu) | ||
| 656 | { | ||
| 657 | rcu_cleanup_dead_cpu(cpu, &rcu_preempt_state); | ||
| 658 | } | ||
| 659 | |||
| 660 | /* | ||
| 661 | * Check for a quiescent state from the current CPU. When a task blocks, | 604 | * Check for a quiescent state from the current CPU. When a task blocks, |
| 662 | * the task is recorded in the corresponding CPU's rcu_node structure, | 605 | * the task is recorded in the corresponding CPU's rcu_node structure, |
| 663 | * which is checked elsewhere. | 606 | * which is checked elsewhere. |
| @@ -677,15 +620,6 @@ static void rcu_preempt_check_callbacks(int cpu) | |||
| 677 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; | 620 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; |
| 678 | } | 621 | } |
| 679 | 622 | ||
| 680 | /* | ||
| 681 | * Process callbacks for preemptible RCU. | ||
| 682 | */ | ||
| 683 | static void rcu_preempt_process_callbacks(void) | ||
| 684 | { | ||
| 685 | __rcu_process_callbacks(&rcu_preempt_state, | ||
| 686 | &__get_cpu_var(rcu_preempt_data)); | ||
| 687 | } | ||
| 688 | |||
| 689 | #ifdef CONFIG_RCU_BOOST | 623 | #ifdef CONFIG_RCU_BOOST |
| 690 | 624 | ||
| 691 | static void rcu_preempt_do_callbacks(void) | 625 | static void rcu_preempt_do_callbacks(void) |
| @@ -824,9 +758,9 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) | |||
| 824 | int must_wait = 0; | 758 | int must_wait = 0; |
| 825 | 759 | ||
| 826 | raw_spin_lock_irqsave(&rnp->lock, flags); | 760 | raw_spin_lock_irqsave(&rnp->lock, flags); |
| 827 | if (list_empty(&rnp->blkd_tasks)) | 761 | if (list_empty(&rnp->blkd_tasks)) { |
| 828 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 762 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
| 829 | else { | 763 | } else { |
| 830 | rnp->exp_tasks = rnp->blkd_tasks.next; | 764 | rnp->exp_tasks = rnp->blkd_tasks.next; |
| 831 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ | 765 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ |
| 832 | must_wait = 1; | 766 | must_wait = 1; |
| @@ -870,9 +804,9 @@ void synchronize_rcu_expedited(void) | |||
| 870 | * expedited grace period for us, just leave. | 804 | * expedited grace period for us, just leave. |
| 871 | */ | 805 | */ |
| 872 | while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { | 806 | while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { |
| 873 | if (trycount++ < 10) | 807 | if (trycount++ < 10) { |
| 874 | udelay(trycount * num_online_cpus()); | 808 | udelay(trycount * num_online_cpus()); |
| 875 | else { | 809 | } else { |
| 876 | synchronize_rcu(); | 810 | synchronize_rcu(); |
| 877 | return; | 811 | return; |
| 878 | } | 812 | } |
| @@ -917,51 +851,16 @@ mb_ret: | |||
| 917 | } | 851 | } |
| 918 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | 852 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); |
| 919 | 853 | ||
| 920 | /* | ||
| 921 | * Check to see if there is any immediate preemptible-RCU-related work | ||
| 922 | * to be done. | ||
| 923 | */ | ||
| 924 | static int rcu_preempt_pending(int cpu) | ||
| 925 | { | ||
| 926 | return __rcu_pending(&rcu_preempt_state, | ||
| 927 | &per_cpu(rcu_preempt_data, cpu)); | ||
| 928 | } | ||
| 929 | |||
| 930 | /* | ||
| 931 | * Does preemptible RCU have callbacks on this CPU? | ||
| 932 | */ | ||
| 933 | static int rcu_preempt_cpu_has_callbacks(int cpu) | ||
| 934 | { | ||
| 935 | return !!per_cpu(rcu_preempt_data, cpu).nxtlist; | ||
| 936 | } | ||
| 937 | |||
| 938 | /** | 854 | /** |
| 939 | * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. | 855 | * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. |
| 940 | */ | 856 | */ |
| 941 | void rcu_barrier(void) | 857 | void rcu_barrier(void) |
| 942 | { | 858 | { |
| 943 | _rcu_barrier(&rcu_preempt_state, call_rcu); | 859 | _rcu_barrier(&rcu_preempt_state); |
| 944 | } | 860 | } |
| 945 | EXPORT_SYMBOL_GPL(rcu_barrier); | 861 | EXPORT_SYMBOL_GPL(rcu_barrier); |
| 946 | 862 | ||
| 947 | /* | 863 | /* |
| 948 | * Initialize preemptible RCU's per-CPU data. | ||
| 949 | */ | ||
| 950 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | ||
| 951 | { | ||
| 952 | rcu_init_percpu_data(cpu, &rcu_preempt_state, 1); | ||
| 953 | } | ||
| 954 | |||
| 955 | /* | ||
| 956 | * Move preemptible RCU's callbacks from dying CPU to other online CPU | ||
| 957 | * and record a quiescent state. | ||
| 958 | */ | ||
| 959 | static void rcu_preempt_cleanup_dying_cpu(void) | ||
| 960 | { | ||
| 961 | rcu_cleanup_dying_cpu(&rcu_preempt_state); | ||
| 962 | } | ||
| 963 | |||
| 964 | /* | ||
| 965 | * Initialize preemptible RCU's state structures. | 864 | * Initialize preemptible RCU's state structures. |
| 966 | */ | 865 | */ |
| 967 | static void __init __rcu_init_preempt(void) | 866 | static void __init __rcu_init_preempt(void) |
| @@ -1046,14 +945,6 @@ static int rcu_print_task_stall(struct rcu_node *rnp) | |||
| 1046 | } | 945 | } |
| 1047 | 946 | ||
| 1048 | /* | 947 | /* |
| 1049 | * Because preemptible RCU does not exist, there is no need to suppress | ||
| 1050 | * its CPU stall warnings. | ||
| 1051 | */ | ||
| 1052 | static void rcu_preempt_stall_reset(void) | ||
| 1053 | { | ||
| 1054 | } | ||
| 1055 | |||
| 1056 | /* | ||
| 1057 | * Because there is no preemptible RCU, there can be no readers blocked, | 948 | * Because there is no preemptible RCU, there can be no readers blocked, |
| 1058 | * so there is no need to check for blocked tasks. So check only for | 949 | * so there is no need to check for blocked tasks. So check only for |
| 1059 | * bogus qsmask values. | 950 | * bogus qsmask values. |
| @@ -1081,14 +972,6 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
| 1081 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 972 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
| 1082 | 973 | ||
| 1083 | /* | 974 | /* |
| 1084 | * Because preemptible RCU does not exist, it never needs CPU-offline | ||
| 1085 | * processing. | ||
| 1086 | */ | ||
| 1087 | static void rcu_preempt_cleanup_dead_cpu(int cpu) | ||
| 1088 | { | ||
| 1089 | } | ||
| 1090 | |||
| 1091 | /* | ||
| 1092 | * Because preemptible RCU does not exist, it never has any callbacks | 975 | * Because preemptible RCU does not exist, it never has any callbacks |
| 1093 | * to check. | 976 | * to check. |
| 1094 | */ | 977 | */ |
| @@ -1097,14 +980,6 @@ static void rcu_preempt_check_callbacks(int cpu) | |||
| 1097 | } | 980 | } |
| 1098 | 981 | ||
| 1099 | /* | 982 | /* |
| 1100 | * Because preemptible RCU does not exist, it never has any callbacks | ||
| 1101 | * to process. | ||
| 1102 | */ | ||
| 1103 | static void rcu_preempt_process_callbacks(void) | ||
| 1104 | { | ||
| 1105 | } | ||
| 1106 | |||
| 1107 | /* | ||
| 1108 | * Queue an RCU callback for lazy invocation after a grace period. | 983 | * Queue an RCU callback for lazy invocation after a grace period. |
| 1109 | * This will likely be later named something like "call_rcu_lazy()", | 984 | * This will likely be later named something like "call_rcu_lazy()", |
| 1110 | * but this change will require some way of tagging the lazy RCU | 985 | * but this change will require some way of tagging the lazy RCU |
| @@ -1145,22 +1020,6 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, | |||
| 1145 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 1020 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
| 1146 | 1021 | ||
| 1147 | /* | 1022 | /* |
| 1148 | * Because preemptible RCU does not exist, it never has any work to do. | ||
| 1149 | */ | ||
| 1150 | static int rcu_preempt_pending(int cpu) | ||
| 1151 | { | ||
| 1152 | return 0; | ||
| 1153 | } | ||
| 1154 | |||
| 1155 | /* | ||
| 1156 | * Because preemptible RCU does not exist, it never has callbacks | ||
| 1157 | */ | ||
| 1158 | static int rcu_preempt_cpu_has_callbacks(int cpu) | ||
| 1159 | { | ||
| 1160 | return 0; | ||
| 1161 | } | ||
| 1162 | |||
| 1163 | /* | ||
| 1164 | * Because preemptible RCU does not exist, rcu_barrier() is just | 1023 | * Because preemptible RCU does not exist, rcu_barrier() is just |
| 1165 | * another name for rcu_barrier_sched(). | 1024 | * another name for rcu_barrier_sched(). |
| 1166 | */ | 1025 | */ |
| @@ -1171,21 +1030,6 @@ void rcu_barrier(void) | |||
| 1171 | EXPORT_SYMBOL_GPL(rcu_barrier); | 1030 | EXPORT_SYMBOL_GPL(rcu_barrier); |
| 1172 | 1031 | ||
| 1173 | /* | 1032 | /* |
| 1174 | * Because preemptible RCU does not exist, there is no per-CPU | ||
| 1175 | * data to initialize. | ||
| 1176 | */ | ||
| 1177 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | ||
| 1178 | { | ||
| 1179 | } | ||
| 1180 | |||
| 1181 | /* | ||
| 1182 | * Because there is no preemptible RCU, there is no cleanup to do. | ||
| 1183 | */ | ||
| 1184 | static void rcu_preempt_cleanup_dying_cpu(void) | ||
| 1185 | { | ||
| 1186 | } | ||
| 1187 | |||
| 1188 | /* | ||
| 1189 | * Because preemptible RCU does not exist, it need not be initialized. | 1033 | * Because preemptible RCU does not exist, it need not be initialized. |
| 1190 | */ | 1034 | */ |
| 1191 | static void __init __rcu_init_preempt(void) | 1035 | static void __init __rcu_init_preempt(void) |
| @@ -1968,9 +1812,11 @@ static void rcu_idle_count_callbacks_posted(void) | |||
| 1968 | */ | 1812 | */ |
| 1969 | #define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */ | 1813 | #define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */ |
| 1970 | #define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */ | 1814 | #define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */ |
| 1971 | #define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ | 1815 | #define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */ |
| 1972 | #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ | 1816 | #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ |
| 1973 | 1817 | ||
| 1818 | extern int tick_nohz_enabled; | ||
| 1819 | |||
| 1974 | /* | 1820 | /* |
| 1975 | * Does the specified flavor of RCU have non-lazy callbacks pending on | 1821 | * Does the specified flavor of RCU have non-lazy callbacks pending on |
| 1976 | * the specified CPU? Both RCU flavor and CPU are specified by the | 1822 | * the specified CPU? Both RCU flavor and CPU are specified by the |
| @@ -2047,10 +1893,13 @@ int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) | |||
| 2047 | return 1; | 1893 | return 1; |
| 2048 | } | 1894 | } |
| 2049 | /* Set up for the possibility that RCU will post a timer. */ | 1895 | /* Set up for the possibility that RCU will post a timer. */ |
| 2050 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) | 1896 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) { |
| 2051 | *delta_jiffies = RCU_IDLE_GP_DELAY; | 1897 | *delta_jiffies = round_up(RCU_IDLE_GP_DELAY + jiffies, |
| 2052 | else | 1898 | RCU_IDLE_GP_DELAY) - jiffies; |
| 2053 | *delta_jiffies = RCU_IDLE_LAZY_GP_DELAY; | 1899 | } else { |
| 1900 | *delta_jiffies = jiffies + RCU_IDLE_LAZY_GP_DELAY; | ||
| 1901 | *delta_jiffies = round_jiffies(*delta_jiffies) - jiffies; | ||
| 1902 | } | ||
| 2054 | return 0; | 1903 | return 0; |
| 2055 | } | 1904 | } |
| 2056 | 1905 | ||
| @@ -2109,6 +1958,7 @@ static void rcu_cleanup_after_idle(int cpu) | |||
| 2109 | 1958 | ||
| 2110 | del_timer(&rdtp->idle_gp_timer); | 1959 | del_timer(&rdtp->idle_gp_timer); |
| 2111 | trace_rcu_prep_idle("Cleanup after idle"); | 1960 | trace_rcu_prep_idle("Cleanup after idle"); |
| 1961 | rdtp->tick_nohz_enabled_snap = ACCESS_ONCE(tick_nohz_enabled); | ||
| 2112 | } | 1962 | } |
| 2113 | 1963 | ||
| 2114 | /* | 1964 | /* |
| @@ -2134,6 +1984,18 @@ static void rcu_prepare_for_idle(int cpu) | |||
| 2134 | { | 1984 | { |
| 2135 | struct timer_list *tp; | 1985 | struct timer_list *tp; |
| 2136 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); | 1986 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); |
| 1987 | int tne; | ||
| 1988 | |||
| 1989 | /* Handle nohz enablement switches conservatively. */ | ||
| 1990 | tne = ACCESS_ONCE(tick_nohz_enabled); | ||
| 1991 | if (tne != rdtp->tick_nohz_enabled_snap) { | ||
| 1992 | if (rcu_cpu_has_callbacks(cpu)) | ||
| 1993 | invoke_rcu_core(); /* force nohz to see update. */ | ||
| 1994 | rdtp->tick_nohz_enabled_snap = tne; | ||
| 1995 | return; | ||
| 1996 | } | ||
| 1997 | if (!tne) | ||
| 1998 | return; | ||
| 2137 | 1999 | ||
| 2138 | /* | 2000 | /* |
| 2139 | * If this is an idle re-entry, for example, due to use of | 2001 | * If this is an idle re-entry, for example, due to use of |
| @@ -2187,10 +2049,11 @@ static void rcu_prepare_for_idle(int cpu) | |||
| 2187 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) { | 2049 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) { |
| 2188 | trace_rcu_prep_idle("Dyntick with callbacks"); | 2050 | trace_rcu_prep_idle("Dyntick with callbacks"); |
| 2189 | rdtp->idle_gp_timer_expires = | 2051 | rdtp->idle_gp_timer_expires = |
| 2190 | jiffies + RCU_IDLE_GP_DELAY; | 2052 | round_up(jiffies + RCU_IDLE_GP_DELAY, |
| 2053 | RCU_IDLE_GP_DELAY); | ||
| 2191 | } else { | 2054 | } else { |
| 2192 | rdtp->idle_gp_timer_expires = | 2055 | rdtp->idle_gp_timer_expires = |
| 2193 | jiffies + RCU_IDLE_LAZY_GP_DELAY; | 2056 | round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY); |
| 2194 | trace_rcu_prep_idle("Dyntick with lazy callbacks"); | 2057 | trace_rcu_prep_idle("Dyntick with lazy callbacks"); |
| 2195 | } | 2058 | } |
| 2196 | tp = &rdtp->idle_gp_timer; | 2059 | tp = &rdtp->idle_gp_timer; |
| @@ -2231,8 +2094,9 @@ static void rcu_prepare_for_idle(int cpu) | |||
| 2231 | if (rcu_cpu_has_callbacks(cpu)) { | 2094 | if (rcu_cpu_has_callbacks(cpu)) { |
| 2232 | trace_rcu_prep_idle("More callbacks"); | 2095 | trace_rcu_prep_idle("More callbacks"); |
| 2233 | invoke_rcu_core(); | 2096 | invoke_rcu_core(); |
| 2234 | } else | 2097 | } else { |
| 2235 | trace_rcu_prep_idle("Callbacks drained"); | 2098 | trace_rcu_prep_idle("Callbacks drained"); |
| 2099 | } | ||
| 2236 | } | 2100 | } |
| 2237 | 2101 | ||
| 2238 | /* | 2102 | /* |
| @@ -2269,6 +2133,7 @@ static void print_cpu_stall_fast_no_hz(char *cp, int cpu) | |||
| 2269 | 2133 | ||
| 2270 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) | 2134 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) |
| 2271 | { | 2135 | { |
| 2136 | *cp = '\0'; | ||
| 2272 | } | 2137 | } |
| 2273 | 2138 | ||
| 2274 | #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */ | 2139 | #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */ |
