diff options
Diffstat (limited to 'kernel/rcutree.c')
| -rw-r--r-- | kernel/rcutree.c | 131 |
1 files changed, 96 insertions, 35 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 3ec8160fc75f..d4437345706f 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
| @@ -46,6 +46,7 @@ | |||
| 46 | #include <linux/cpu.h> | 46 | #include <linux/cpu.h> |
| 47 | #include <linux/mutex.h> | 47 | #include <linux/mutex.h> |
| 48 | #include <linux/time.h> | 48 | #include <linux/time.h> |
| 49 | #include <linux/kernel_stat.h> | ||
| 49 | 50 | ||
| 50 | #include "rcutree.h" | 51 | #include "rcutree.h" |
| 51 | 52 | ||
| @@ -53,8 +54,8 @@ | |||
| 53 | 54 | ||
| 54 | static struct lock_class_key rcu_node_class[NUM_RCU_LVLS]; | 55 | static struct lock_class_key rcu_node_class[NUM_RCU_LVLS]; |
| 55 | 56 | ||
| 56 | #define RCU_STATE_INITIALIZER(name) { \ | 57 | #define RCU_STATE_INITIALIZER(structname) { \ |
| 57 | .level = { &name.node[0] }, \ | 58 | .level = { &structname.node[0] }, \ |
| 58 | .levelcnt = { \ | 59 | .levelcnt = { \ |
| 59 | NUM_RCU_LVL_0, /* root of hierarchy. */ \ | 60 | NUM_RCU_LVL_0, /* root of hierarchy. */ \ |
| 60 | NUM_RCU_LVL_1, \ | 61 | NUM_RCU_LVL_1, \ |
| @@ -65,13 +66,14 @@ static struct lock_class_key rcu_node_class[NUM_RCU_LVLS]; | |||
| 65 | .signaled = RCU_GP_IDLE, \ | 66 | .signaled = RCU_GP_IDLE, \ |
| 66 | .gpnum = -300, \ | 67 | .gpnum = -300, \ |
| 67 | .completed = -300, \ | 68 | .completed = -300, \ |
| 68 | .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&name.onofflock), \ | 69 | .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname.onofflock), \ |
| 69 | .orphan_cbs_list = NULL, \ | 70 | .orphan_cbs_list = NULL, \ |
| 70 | .orphan_cbs_tail = &name.orphan_cbs_list, \ | 71 | .orphan_cbs_tail = &structname.orphan_cbs_list, \ |
| 71 | .orphan_qlen = 0, \ | 72 | .orphan_qlen = 0, \ |
| 72 | .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&name.fqslock), \ | 73 | .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname.fqslock), \ |
| 73 | .n_force_qs = 0, \ | 74 | .n_force_qs = 0, \ |
| 74 | .n_force_qs_ngp = 0, \ | 75 | .n_force_qs_ngp = 0, \ |
| 76 | .name = #structname, \ | ||
| 75 | } | 77 | } |
| 76 | 78 | ||
| 77 | struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state); | 79 | struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state); |
| @@ -80,6 +82,9 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); | |||
| 80 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); | 82 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); |
| 81 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | 83 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
| 82 | 84 | ||
| 85 | int rcu_scheduler_active __read_mostly; | ||
| 86 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | ||
| 87 | |||
| 83 | /* | 88 | /* |
| 84 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s | 89 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s |
| 85 | * permit this function to be invoked without holding the root rcu_node | 90 | * permit this function to be invoked without holding the root rcu_node |
| @@ -97,25 +102,32 @@ static int rcu_gp_in_progress(struct rcu_state *rsp) | |||
| 97 | */ | 102 | */ |
| 98 | void rcu_sched_qs(int cpu) | 103 | void rcu_sched_qs(int cpu) |
| 99 | { | 104 | { |
| 100 | struct rcu_data *rdp; | 105 | struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu); |
| 101 | 106 | ||
| 102 | rdp = &per_cpu(rcu_sched_data, cpu); | ||
| 103 | rdp->passed_quiesc_completed = rdp->gpnum - 1; | 107 | rdp->passed_quiesc_completed = rdp->gpnum - 1; |
| 104 | barrier(); | 108 | barrier(); |
| 105 | rdp->passed_quiesc = 1; | 109 | rdp->passed_quiesc = 1; |
| 106 | rcu_preempt_note_context_switch(cpu); | ||
| 107 | } | 110 | } |
| 108 | 111 | ||
| 109 | void rcu_bh_qs(int cpu) | 112 | void rcu_bh_qs(int cpu) |
| 110 | { | 113 | { |
| 111 | struct rcu_data *rdp; | 114 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); |
| 112 | 115 | ||
| 113 | rdp = &per_cpu(rcu_bh_data, cpu); | ||
| 114 | rdp->passed_quiesc_completed = rdp->gpnum - 1; | 116 | rdp->passed_quiesc_completed = rdp->gpnum - 1; |
| 115 | barrier(); | 117 | barrier(); |
| 116 | rdp->passed_quiesc = 1; | 118 | rdp->passed_quiesc = 1; |
| 117 | } | 119 | } |
| 118 | 120 | ||
| 121 | /* | ||
| 122 | * Note a context switch. This is a quiescent state for RCU-sched, | ||
| 123 | * and requires special handling for preemptible RCU. | ||
| 124 | */ | ||
| 125 | void rcu_note_context_switch(int cpu) | ||
| 126 | { | ||
| 127 | rcu_sched_qs(cpu); | ||
| 128 | rcu_preempt_note_context_switch(cpu); | ||
| 129 | } | ||
| 130 | |||
| 119 | #ifdef CONFIG_NO_HZ | 131 | #ifdef CONFIG_NO_HZ |
| 120 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { | 132 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { |
| 121 | .dynticks_nesting = 1, | 133 | .dynticks_nesting = 1, |
| @@ -438,6 +450,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |||
| 438 | 450 | ||
| 439 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 451 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
| 440 | 452 | ||
| 453 | int rcu_cpu_stall_panicking __read_mostly; | ||
| 454 | |||
| 441 | static void record_gp_stall_check_time(struct rcu_state *rsp) | 455 | static void record_gp_stall_check_time(struct rcu_state *rsp) |
| 442 | { | 456 | { |
| 443 | rsp->gp_start = jiffies; | 457 | rsp->gp_start = jiffies; |
| @@ -470,7 +484,8 @@ static void print_other_cpu_stall(struct rcu_state *rsp) | |||
| 470 | 484 | ||
| 471 | /* OK, time to rat on our buddy... */ | 485 | /* OK, time to rat on our buddy... */ |
| 472 | 486 | ||
| 473 | printk(KERN_ERR "INFO: RCU detected CPU stalls:"); | 487 | printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {", |
| 488 | rsp->name); | ||
| 474 | rcu_for_each_leaf_node(rsp, rnp) { | 489 | rcu_for_each_leaf_node(rsp, rnp) { |
| 475 | raw_spin_lock_irqsave(&rnp->lock, flags); | 490 | raw_spin_lock_irqsave(&rnp->lock, flags); |
| 476 | rcu_print_task_stall(rnp); | 491 | rcu_print_task_stall(rnp); |
| @@ -481,7 +496,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp) | |||
| 481 | if (rnp->qsmask & (1UL << cpu)) | 496 | if (rnp->qsmask & (1UL << cpu)) |
| 482 | printk(" %d", rnp->grplo + cpu); | 497 | printk(" %d", rnp->grplo + cpu); |
| 483 | } | 498 | } |
| 484 | printk(" (detected by %d, t=%ld jiffies)\n", | 499 | printk("} (detected by %d, t=%ld jiffies)\n", |
| 485 | smp_processor_id(), (long)(jiffies - rsp->gp_start)); | 500 | smp_processor_id(), (long)(jiffies - rsp->gp_start)); |
| 486 | trigger_all_cpu_backtrace(); | 501 | trigger_all_cpu_backtrace(); |
| 487 | 502 | ||
| @@ -497,8 +512,8 @@ static void print_cpu_stall(struct rcu_state *rsp) | |||
| 497 | unsigned long flags; | 512 | unsigned long flags; |
| 498 | struct rcu_node *rnp = rcu_get_root(rsp); | 513 | struct rcu_node *rnp = rcu_get_root(rsp); |
| 499 | 514 | ||
| 500 | printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n", | 515 | printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n", |
| 501 | smp_processor_id(), jiffies - rsp->gp_start); | 516 | rsp->name, smp_processor_id(), jiffies - rsp->gp_start); |
| 502 | trigger_all_cpu_backtrace(); | 517 | trigger_all_cpu_backtrace(); |
| 503 | 518 | ||
| 504 | raw_spin_lock_irqsave(&rnp->lock, flags); | 519 | raw_spin_lock_irqsave(&rnp->lock, flags); |
| @@ -515,6 +530,8 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 515 | long delta; | 530 | long delta; |
| 516 | struct rcu_node *rnp; | 531 | struct rcu_node *rnp; |
| 517 | 532 | ||
| 533 | if (rcu_cpu_stall_panicking) | ||
| 534 | return; | ||
| 518 | delta = jiffies - rsp->jiffies_stall; | 535 | delta = jiffies - rsp->jiffies_stall; |
| 519 | rnp = rdp->mynode; | 536 | rnp = rdp->mynode; |
| 520 | if ((rnp->qsmask & rdp->grpmask) && delta >= 0) { | 537 | if ((rnp->qsmask & rdp->grpmask) && delta >= 0) { |
| @@ -529,6 +546,21 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 529 | } | 546 | } |
| 530 | } | 547 | } |
| 531 | 548 | ||
| 549 | static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) | ||
| 550 | { | ||
| 551 | rcu_cpu_stall_panicking = 1; | ||
| 552 | return NOTIFY_DONE; | ||
| 553 | } | ||
| 554 | |||
| 555 | static struct notifier_block rcu_panic_block = { | ||
| 556 | .notifier_call = rcu_panic, | ||
| 557 | }; | ||
| 558 | |||
| 559 | static void __init check_cpu_stall_init(void) | ||
| 560 | { | ||
| 561 | atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); | ||
| 562 | } | ||
| 563 | |||
| 532 | #else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 564 | #else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
| 533 | 565 | ||
| 534 | static void record_gp_stall_check_time(struct rcu_state *rsp) | 566 | static void record_gp_stall_check_time(struct rcu_state *rsp) |
| @@ -539,6 +571,10 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 539 | { | 571 | { |
| 540 | } | 572 | } |
| 541 | 573 | ||
| 574 | static void __init check_cpu_stall_init(void) | ||
| 575 | { | ||
| 576 | } | ||
| 577 | |||
| 542 | #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 578 | #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
| 543 | 579 | ||
| 544 | /* | 580 | /* |
| @@ -1125,8 +1161,6 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 1125 | */ | 1161 | */ |
| 1126 | void rcu_check_callbacks(int cpu, int user) | 1162 | void rcu_check_callbacks(int cpu, int user) |
| 1127 | { | 1163 | { |
| 1128 | if (!rcu_pending(cpu)) | ||
| 1129 | return; /* if nothing for RCU to do. */ | ||
| 1130 | if (user || | 1164 | if (user || |
| 1131 | (idle_cpu(cpu) && rcu_scheduler_active && | 1165 | (idle_cpu(cpu) && rcu_scheduler_active && |
| 1132 | !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | 1166 | !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { |
| @@ -1158,7 +1192,8 @@ void rcu_check_callbacks(int cpu, int user) | |||
| 1158 | rcu_bh_qs(cpu); | 1192 | rcu_bh_qs(cpu); |
| 1159 | } | 1193 | } |
| 1160 | rcu_preempt_check_callbacks(cpu); | 1194 | rcu_preempt_check_callbacks(cpu); |
| 1161 | raise_softirq(RCU_SOFTIRQ); | 1195 | if (rcu_pending(cpu)) |
| 1196 | raise_softirq(RCU_SOFTIRQ); | ||
| 1162 | } | 1197 | } |
| 1163 | 1198 | ||
| 1164 | #ifdef CONFIG_SMP | 1199 | #ifdef CONFIG_SMP |
| @@ -1236,11 +1271,11 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
| 1236 | break; /* grace period idle or initializing, ignore. */ | 1271 | break; /* grace period idle or initializing, ignore. */ |
| 1237 | 1272 | ||
| 1238 | case RCU_SAVE_DYNTICK: | 1273 | case RCU_SAVE_DYNTICK: |
| 1239 | |||
| 1240 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ | ||
| 1241 | if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK) | 1274 | if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK) |
| 1242 | break; /* So gcc recognizes the dead code. */ | 1275 | break; /* So gcc recognizes the dead code. */ |
| 1243 | 1276 | ||
| 1277 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ | ||
| 1278 | |||
| 1244 | /* Record dyntick-idle state. */ | 1279 | /* Record dyntick-idle state. */ |
| 1245 | force_qs_rnp(rsp, dyntick_save_progress_counter); | 1280 | force_qs_rnp(rsp, dyntick_save_progress_counter); |
| 1246 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ | 1281 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ |
| @@ -1449,11 +1484,13 @@ void synchronize_sched(void) | |||
| 1449 | if (rcu_blocking_is_gp()) | 1484 | if (rcu_blocking_is_gp()) |
| 1450 | return; | 1485 | return; |
| 1451 | 1486 | ||
| 1487 | init_rcu_head_on_stack(&rcu.head); | ||
| 1452 | init_completion(&rcu.completion); | 1488 | init_completion(&rcu.completion); |
| 1453 | /* Will wake me after RCU finished. */ | 1489 | /* Will wake me after RCU finished. */ |
| 1454 | call_rcu_sched(&rcu.head, wakeme_after_rcu); | 1490 | call_rcu_sched(&rcu.head, wakeme_after_rcu); |
| 1455 | /* Wait for it. */ | 1491 | /* Wait for it. */ |
| 1456 | wait_for_completion(&rcu.completion); | 1492 | wait_for_completion(&rcu.completion); |
| 1493 | destroy_rcu_head_on_stack(&rcu.head); | ||
| 1457 | } | 1494 | } |
| 1458 | EXPORT_SYMBOL_GPL(synchronize_sched); | 1495 | EXPORT_SYMBOL_GPL(synchronize_sched); |
| 1459 | 1496 | ||
| @@ -1473,11 +1510,13 @@ void synchronize_rcu_bh(void) | |||
| 1473 | if (rcu_blocking_is_gp()) | 1510 | if (rcu_blocking_is_gp()) |
| 1474 | return; | 1511 | return; |
| 1475 | 1512 | ||
| 1513 | init_rcu_head_on_stack(&rcu.head); | ||
| 1476 | init_completion(&rcu.completion); | 1514 | init_completion(&rcu.completion); |
| 1477 | /* Will wake me after RCU finished. */ | 1515 | /* Will wake me after RCU finished. */ |
| 1478 | call_rcu_bh(&rcu.head, wakeme_after_rcu); | 1516 | call_rcu_bh(&rcu.head, wakeme_after_rcu); |
| 1479 | /* Wait for it. */ | 1517 | /* Wait for it. */ |
| 1480 | wait_for_completion(&rcu.completion); | 1518 | wait_for_completion(&rcu.completion); |
| 1519 | destroy_rcu_head_on_stack(&rcu.head); | ||
| 1481 | } | 1520 | } |
| 1482 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | 1521 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); |
| 1483 | 1522 | ||
| @@ -1498,8 +1537,20 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 1498 | check_cpu_stall(rsp, rdp); | 1537 | check_cpu_stall(rsp, rdp); |
| 1499 | 1538 | ||
| 1500 | /* Is the RCU core waiting for a quiescent state from this CPU? */ | 1539 | /* Is the RCU core waiting for a quiescent state from this CPU? */ |
| 1501 | if (rdp->qs_pending) { | 1540 | if (rdp->qs_pending && !rdp->passed_quiesc) { |
| 1541 | |||
| 1542 | /* | ||
| 1543 | * If force_quiescent_state() coming soon and this CPU | ||
| 1544 | * needs a quiescent state, and this is either RCU-sched | ||
| 1545 | * or RCU-bh, force a local reschedule. | ||
| 1546 | */ | ||
| 1502 | rdp->n_rp_qs_pending++; | 1547 | rdp->n_rp_qs_pending++; |
| 1548 | if (!rdp->preemptable && | ||
| 1549 | ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1, | ||
| 1550 | jiffies)) | ||
| 1551 | set_need_resched(); | ||
| 1552 | } else if (rdp->qs_pending && rdp->passed_quiesc) { | ||
| 1553 | rdp->n_rp_report_qs++; | ||
| 1503 | return 1; | 1554 | return 1; |
| 1504 | } | 1555 | } |
| 1505 | 1556 | ||
| @@ -1767,6 +1818,21 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |||
| 1767 | } | 1818 | } |
| 1768 | 1819 | ||
| 1769 | /* | 1820 | /* |
| 1821 | * This function is invoked towards the end of the scheduler's initialization | ||
| 1822 | * process. Before this is called, the idle task might contain | ||
| 1823 | * RCU read-side critical sections (during which time, this idle | ||
| 1824 | * task is booting the system). After this function is called, the | ||
| 1825 | * idle tasks are prohibited from containing RCU read-side critical | ||
| 1826 | * sections. This function also enables RCU lockdep checking. | ||
| 1827 | */ | ||
| 1828 | void rcu_scheduler_starting(void) | ||
| 1829 | { | ||
| 1830 | WARN_ON(num_online_cpus() != 1); | ||
| 1831 | WARN_ON(nr_context_switches() > 0); | ||
| 1832 | rcu_scheduler_active = 1; | ||
| 1833 | } | ||
| 1834 | |||
| 1835 | /* | ||
| 1770 | * Compute the per-level fanout, either using the exact fanout specified | 1836 | * Compute the per-level fanout, either using the exact fanout specified |
| 1771 | * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT. | 1837 | * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT. |
| 1772 | */ | 1838 | */ |
| @@ -1849,6 +1915,14 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
| 1849 | INIT_LIST_HEAD(&rnp->blocked_tasks[3]); | 1915 | INIT_LIST_HEAD(&rnp->blocked_tasks[3]); |
| 1850 | } | 1916 | } |
| 1851 | } | 1917 | } |
| 1918 | |||
| 1919 | rnp = rsp->level[NUM_RCU_LVLS - 1]; | ||
| 1920 | for_each_possible_cpu(i) { | ||
| 1921 | while (i > rnp->grphi) | ||
| 1922 | rnp++; | ||
| 1923 | rsp->rda[i]->mynode = rnp; | ||
| 1924 | rcu_boot_init_percpu_data(i, rsp); | ||
| 1925 | } | ||
| 1852 | } | 1926 | } |
| 1853 | 1927 | ||
| 1854 | /* | 1928 | /* |
| @@ -1859,19 +1933,11 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
| 1859 | #define RCU_INIT_FLAVOR(rsp, rcu_data) \ | 1933 | #define RCU_INIT_FLAVOR(rsp, rcu_data) \ |
| 1860 | do { \ | 1934 | do { \ |
| 1861 | int i; \ | 1935 | int i; \ |
| 1862 | int j; \ | ||
| 1863 | struct rcu_node *rnp; \ | ||
| 1864 | \ | 1936 | \ |
| 1865 | rcu_init_one(rsp); \ | ||
| 1866 | rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ | ||
| 1867 | j = 0; \ | ||
| 1868 | for_each_possible_cpu(i) { \ | 1937 | for_each_possible_cpu(i) { \ |
| 1869 | if (i > rnp[j].grphi) \ | ||
| 1870 | j++; \ | ||
| 1871 | per_cpu(rcu_data, i).mynode = &rnp[j]; \ | ||
| 1872 | (rsp)->rda[i] = &per_cpu(rcu_data, i); \ | 1938 | (rsp)->rda[i] = &per_cpu(rcu_data, i); \ |
| 1873 | rcu_boot_init_percpu_data(i, rsp); \ | ||
| 1874 | } \ | 1939 | } \ |
| 1940 | rcu_init_one(rsp); \ | ||
| 1875 | } while (0) | 1941 | } while (0) |
| 1876 | 1942 | ||
| 1877 | void __init rcu_init(void) | 1943 | void __init rcu_init(void) |
| @@ -1879,12 +1945,6 @@ void __init rcu_init(void) | |||
| 1879 | int cpu; | 1945 | int cpu; |
| 1880 | 1946 | ||
| 1881 | rcu_bootup_announce(); | 1947 | rcu_bootup_announce(); |
| 1882 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
| 1883 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); | ||
| 1884 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
| 1885 | #if NUM_RCU_LVL_4 != 0 | ||
| 1886 | printk(KERN_INFO "Experimental four-level hierarchy is enabled.\n"); | ||
| 1887 | #endif /* #if NUM_RCU_LVL_4 != 0 */ | ||
| 1888 | RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); | 1948 | RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); |
| 1889 | RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); | 1949 | RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); |
| 1890 | __rcu_init_preempt(); | 1950 | __rcu_init_preempt(); |
| @@ -1898,6 +1958,7 @@ void __init rcu_init(void) | |||
| 1898 | cpu_notifier(rcu_cpu_notify, 0); | 1958 | cpu_notifier(rcu_cpu_notify, 0); |
| 1899 | for_each_online_cpu(cpu) | 1959 | for_each_online_cpu(cpu) |
| 1900 | rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); | 1960 | rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); |
| 1961 | check_cpu_stall_init(); | ||
| 1901 | } | 1962 | } |
| 1902 | 1963 | ||
| 1903 | #include "rcutree_plugin.h" | 1964 | #include "rcutree_plugin.h" |
