diff options
Diffstat (limited to 'kernel/rcutree_plugin.h')
| -rw-r--r-- | kernel/rcutree_plugin.h | 47 |
1 files changed, 30 insertions, 17 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 0e4f420245d9..71a4147473f9 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
| @@ -57,7 +57,7 @@ static void __init rcu_bootup_announce_oddness(void) | |||
| 57 | printk(KERN_INFO | 57 | printk(KERN_INFO |
| 58 | "\tRCU-based detection of stalled CPUs is disabled.\n"); | 58 | "\tRCU-based detection of stalled CPUs is disabled.\n"); |
| 59 | #endif | 59 | #endif |
| 60 | #ifndef CONFIG_RCU_CPU_STALL_VERBOSE | 60 | #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE) |
| 61 | printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n"); | 61 | printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n"); |
| 62 | #endif | 62 | #endif |
| 63 | #if NUM_RCU_LVL_4 != 0 | 63 | #if NUM_RCU_LVL_4 != 0 |
| @@ -154,7 +154,7 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
| 154 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { | 154 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { |
| 155 | 155 | ||
| 156 | /* Possibly blocking in an RCU read-side critical section. */ | 156 | /* Possibly blocking in an RCU read-side critical section. */ |
| 157 | rdp = rcu_preempt_state.rda[cpu]; | 157 | rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); |
| 158 | rnp = rdp->mynode; | 158 | rnp = rdp->mynode; |
| 159 | raw_spin_lock_irqsave(&rnp->lock, flags); | 159 | raw_spin_lock_irqsave(&rnp->lock, flags); |
| 160 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; | 160 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; |
| @@ -201,7 +201,7 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
| 201 | */ | 201 | */ |
| 202 | void __rcu_read_lock(void) | 202 | void __rcu_read_lock(void) |
| 203 | { | 203 | { |
| 204 | ACCESS_ONCE(current->rcu_read_lock_nesting)++; | 204 | current->rcu_read_lock_nesting++; |
| 205 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */ | 205 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */ |
| 206 | } | 206 | } |
| 207 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | 207 | EXPORT_SYMBOL_GPL(__rcu_read_lock); |
| @@ -344,7 +344,9 @@ void __rcu_read_unlock(void) | |||
| 344 | struct task_struct *t = current; | 344 | struct task_struct *t = current; |
| 345 | 345 | ||
| 346 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ | 346 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ |
| 347 | if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 && | 347 | --t->rcu_read_lock_nesting; |
| 348 | barrier(); /* decrement before load of ->rcu_read_unlock_special */ | ||
| 349 | if (t->rcu_read_lock_nesting == 0 && | ||
| 348 | unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | 350 | unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) |
| 349 | rcu_read_unlock_special(t); | 351 | rcu_read_unlock_special(t); |
| 350 | #ifdef CONFIG_PROVE_LOCKING | 352 | #ifdef CONFIG_PROVE_LOCKING |
| @@ -417,6 +419,16 @@ static void rcu_print_task_stall(struct rcu_node *rnp) | |||
| 417 | } | 419 | } |
| 418 | } | 420 | } |
| 419 | 421 | ||
| 422 | /* | ||
| 423 | * Suppress preemptible RCU's CPU stall warnings by pushing the | ||
| 424 | * time of the next stall-warning message comfortably far into the | ||
| 425 | * future. | ||
| 426 | */ | ||
| 427 | static void rcu_preempt_stall_reset(void) | ||
| 428 | { | ||
| 429 | rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2; | ||
| 430 | } | ||
| 431 | |||
| 420 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 432 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
| 421 | 433 | ||
| 422 | /* | 434 | /* |
| @@ -546,9 +558,11 @@ EXPORT_SYMBOL_GPL(call_rcu); | |||
| 546 | * | 558 | * |
| 547 | * Control will return to the caller some time after a full grace | 559 | * Control will return to the caller some time after a full grace |
| 548 | * period has elapsed, in other words after all currently executing RCU | 560 | * period has elapsed, in other words after all currently executing RCU |
| 549 | * read-side critical sections have completed. RCU read-side critical | 561 | * read-side critical sections have completed. Note, however, that |
| 550 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | 562 | * upon return from synchronize_rcu(), the caller might well be executing |
| 551 | * and may be nested. | 563 | * concurrently with new RCU read-side critical sections that began while |
| 564 | * synchronize_rcu() was waiting. RCU read-side critical sections are | ||
| 565 | * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. | ||
| 552 | */ | 566 | */ |
| 553 | void synchronize_rcu(void) | 567 | void synchronize_rcu(void) |
| 554 | { | 568 | { |
| @@ -771,7 +785,7 @@ static void rcu_preempt_send_cbs_to_orphanage(void) | |||
| 771 | */ | 785 | */ |
| 772 | static void __init __rcu_init_preempt(void) | 786 | static void __init __rcu_init_preempt(void) |
| 773 | { | 787 | { |
| 774 | RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data); | 788 | rcu_init_one(&rcu_preempt_state, &rcu_preempt_data); |
| 775 | } | 789 | } |
| 776 | 790 | ||
| 777 | /* | 791 | /* |
| @@ -865,6 +879,14 @@ static void rcu_print_task_stall(struct rcu_node *rnp) | |||
| 865 | { | 879 | { |
| 866 | } | 880 | } |
| 867 | 881 | ||
| 882 | /* | ||
| 883 | * Because preemptible RCU does not exist, there is no need to suppress | ||
| 884 | * its CPU stall warnings. | ||
| 885 | */ | ||
| 886 | static void rcu_preempt_stall_reset(void) | ||
| 887 | { | ||
| 888 | } | ||
| 889 | |||
| 868 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 890 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
| 869 | 891 | ||
| 870 | /* | 892 | /* |
| @@ -919,15 +941,6 @@ static void rcu_preempt_process_callbacks(void) | |||
| 919 | } | 941 | } |
| 920 | 942 | ||
| 921 | /* | 943 | /* |
| 922 | * In classic RCU, call_rcu() is just call_rcu_sched(). | ||
| 923 | */ | ||
| 924 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | ||
| 925 | { | ||
| 926 | call_rcu_sched(head, func); | ||
| 927 | } | ||
| 928 | EXPORT_SYMBOL_GPL(call_rcu); | ||
| 929 | |||
| 930 | /* | ||
| 931 | * Wait for an rcu-preempt grace period, but make it happen quickly. | 944 | * Wait for an rcu-preempt grace period, but make it happen quickly. |
| 932 | * But because preemptable RCU does not exist, map to rcu-sched. | 945 | * But because preemptable RCU does not exist, map to rcu-sched. |
| 933 | */ | 946 | */ |
