diff options
Diffstat (limited to 'kernel/rcutiny_plugin.h')
-rw-r--r-- | kernel/rcutiny_plugin.h | 77 |
1 files changed, 60 insertions, 17 deletions
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h index 9cb1ae4aabdd..22ecea0dfb62 100644 --- a/kernel/rcutiny_plugin.h +++ b/kernel/rcutiny_plugin.h | |||
@@ -132,6 +132,7 @@ static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = { | |||
132 | RCU_TRACE(.rcb.name = "rcu_preempt") | 132 | RCU_TRACE(.rcb.name = "rcu_preempt") |
133 | }; | 133 | }; |
134 | 134 | ||
135 | static void rcu_read_unlock_special(struct task_struct *t); | ||
135 | static int rcu_preempted_readers_exp(void); | 136 | static int rcu_preempted_readers_exp(void); |
136 | static void rcu_report_exp_done(void); | 137 | static void rcu_report_exp_done(void); |
137 | 138 | ||
@@ -146,6 +147,16 @@ static int rcu_cpu_blocking_cur_gp(void) | |||
146 | /* | 147 | /* |
147 | * Check for a running RCU reader. Because there is only one CPU, | 148 | * Check for a running RCU reader. Because there is only one CPU, |
148 | * there can be but one running RCU reader at a time. ;-) | 149 | * there can be but one running RCU reader at a time. ;-) |
150 | * | ||
151 | * Returns zero if there are no running readers. Returns a positive | ||
152 | * number if there is at least one reader within its RCU read-side | ||
153 | * critical section. Returns a negative number if an outermost reader | ||
154 | * is in the midst of exiting from its RCU read-side critical section | ||
155 | * | ||
156 | * Returns zero if there are no running readers. Returns a positive | ||
157 | * number if there is at least one reader within its RCU read-side | ||
158 | * critical section. Returns a negative number if an outermost reader | ||
159 | * is in the midst of exiting from its RCU read-side critical section. | ||
149 | */ | 160 | */ |
150 | static int rcu_preempt_running_reader(void) | 161 | static int rcu_preempt_running_reader(void) |
151 | { | 162 | { |
@@ -307,7 +318,6 @@ static int rcu_boost(void) | |||
307 | t = container_of(tb, struct task_struct, rcu_node_entry); | 318 | t = container_of(tb, struct task_struct, rcu_node_entry); |
308 | rt_mutex_init_proxy_locked(&mtx, t); | 319 | rt_mutex_init_proxy_locked(&mtx, t); |
309 | t->rcu_boost_mutex = &mtx; | 320 | t->rcu_boost_mutex = &mtx; |
310 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED; | ||
311 | raw_local_irq_restore(flags); | 321 | raw_local_irq_restore(flags); |
312 | rt_mutex_lock(&mtx); | 322 | rt_mutex_lock(&mtx); |
313 | rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ | 323 | rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ |
@@ -475,7 +485,7 @@ void rcu_preempt_note_context_switch(void) | |||
475 | unsigned long flags; | 485 | unsigned long flags; |
476 | 486 | ||
477 | local_irq_save(flags); /* must exclude scheduler_tick(). */ | 487 | local_irq_save(flags); /* must exclude scheduler_tick(). */ |
478 | if (rcu_preempt_running_reader() && | 488 | if (rcu_preempt_running_reader() > 0 && |
479 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { | 489 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { |
480 | 490 | ||
481 | /* Possibly blocking in an RCU read-side critical section. */ | 491 | /* Possibly blocking in an RCU read-side critical section. */ |
@@ -494,6 +504,13 @@ void rcu_preempt_note_context_switch(void) | |||
494 | list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks); | 504 | list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks); |
495 | if (rcu_cpu_blocking_cur_gp()) | 505 | if (rcu_cpu_blocking_cur_gp()) |
496 | rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry; | 506 | rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry; |
507 | } else if (rcu_preempt_running_reader() < 0 && | ||
508 | t->rcu_read_unlock_special) { | ||
509 | /* | ||
510 | * Complete exit from RCU read-side critical section on | ||
511 | * behalf of preempted instance of __rcu_read_unlock(). | ||
512 | */ | ||
513 | rcu_read_unlock_special(t); | ||
497 | } | 514 | } |
498 | 515 | ||
499 | /* | 516 | /* |
@@ -526,12 +543,15 @@ EXPORT_SYMBOL_GPL(__rcu_read_lock); | |||
526 | * notify RCU core processing or task having blocked during the RCU | 543 | * notify RCU core processing or task having blocked during the RCU |
527 | * read-side critical section. | 544 | * read-side critical section. |
528 | */ | 545 | */ |
529 | static void rcu_read_unlock_special(struct task_struct *t) | 546 | static noinline void rcu_read_unlock_special(struct task_struct *t) |
530 | { | 547 | { |
531 | int empty; | 548 | int empty; |
532 | int empty_exp; | 549 | int empty_exp; |
533 | unsigned long flags; | 550 | unsigned long flags; |
534 | struct list_head *np; | 551 | struct list_head *np; |
552 | #ifdef CONFIG_RCU_BOOST | ||
553 | struct rt_mutex *rbmp = NULL; | ||
554 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
535 | int special; | 555 | int special; |
536 | 556 | ||
537 | /* | 557 | /* |
@@ -552,7 +572,7 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
552 | rcu_preempt_cpu_qs(); | 572 | rcu_preempt_cpu_qs(); |
553 | 573 | ||
554 | /* Hardware IRQ handlers cannot block. */ | 574 | /* Hardware IRQ handlers cannot block. */ |
555 | if (in_irq()) { | 575 | if (in_irq() || in_serving_softirq()) { |
556 | local_irq_restore(flags); | 576 | local_irq_restore(flags); |
557 | return; | 577 | return; |
558 | } | 578 | } |
@@ -597,10 +617,10 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
597 | } | 617 | } |
598 | #ifdef CONFIG_RCU_BOOST | 618 | #ifdef CONFIG_RCU_BOOST |
599 | /* Unboost self if was boosted. */ | 619 | /* Unboost self if was boosted. */ |
600 | if (special & RCU_READ_UNLOCK_BOOSTED) { | 620 | if (t->rcu_boost_mutex != NULL) { |
601 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED; | 621 | rbmp = t->rcu_boost_mutex; |
602 | rt_mutex_unlock(t->rcu_boost_mutex); | ||
603 | t->rcu_boost_mutex = NULL; | 622 | t->rcu_boost_mutex = NULL; |
623 | rt_mutex_unlock(rbmp); | ||
604 | } | 624 | } |
605 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 625 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
606 | local_irq_restore(flags); | 626 | local_irq_restore(flags); |
@@ -618,13 +638,22 @@ void __rcu_read_unlock(void) | |||
618 | struct task_struct *t = current; | 638 | struct task_struct *t = current; |
619 | 639 | ||
620 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */ | 640 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */ |
621 | --t->rcu_read_lock_nesting; | 641 | if (t->rcu_read_lock_nesting != 1) |
622 | barrier(); /* decrement before load of ->rcu_read_unlock_special */ | 642 | --t->rcu_read_lock_nesting; |
623 | if (t->rcu_read_lock_nesting == 0 && | 643 | else { |
624 | unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | 644 | t->rcu_read_lock_nesting = INT_MIN; |
625 | rcu_read_unlock_special(t); | 645 | barrier(); /* assign before ->rcu_read_unlock_special load */ |
646 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | ||
647 | rcu_read_unlock_special(t); | ||
648 | barrier(); /* ->rcu_read_unlock_special load before assign */ | ||
649 | t->rcu_read_lock_nesting = 0; | ||
650 | } | ||
626 | #ifdef CONFIG_PROVE_LOCKING | 651 | #ifdef CONFIG_PROVE_LOCKING |
627 | WARN_ON_ONCE(t->rcu_read_lock_nesting < 0); | 652 | { |
653 | int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); | ||
654 | |||
655 | WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); | ||
656 | } | ||
628 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | 657 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ |
629 | } | 658 | } |
630 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | 659 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); |
@@ -649,7 +678,7 @@ static void rcu_preempt_check_callbacks(void) | |||
649 | invoke_rcu_callbacks(); | 678 | invoke_rcu_callbacks(); |
650 | if (rcu_preempt_gp_in_progress() && | 679 | if (rcu_preempt_gp_in_progress() && |
651 | rcu_cpu_blocking_cur_gp() && | 680 | rcu_cpu_blocking_cur_gp() && |
652 | rcu_preempt_running_reader()) | 681 | rcu_preempt_running_reader() > 0) |
653 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; | 682 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; |
654 | } | 683 | } |
655 | 684 | ||
@@ -706,6 +735,11 @@ EXPORT_SYMBOL_GPL(call_rcu); | |||
706 | */ | 735 | */ |
707 | void synchronize_rcu(void) | 736 | void synchronize_rcu(void) |
708 | { | 737 | { |
738 | rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) && | ||
739 | !lock_is_held(&rcu_lock_map) && | ||
740 | !lock_is_held(&rcu_sched_lock_map), | ||
741 | "Illegal synchronize_rcu() in RCU read-side critical section"); | ||
742 | |||
709 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 743 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
710 | if (!rcu_scheduler_active) | 744 | if (!rcu_scheduler_active) |
711 | return; | 745 | return; |
@@ -882,7 +916,8 @@ static void rcu_preempt_process_callbacks(void) | |||
882 | static void invoke_rcu_callbacks(void) | 916 | static void invoke_rcu_callbacks(void) |
883 | { | 917 | { |
884 | have_rcu_kthread_work = 1; | 918 | have_rcu_kthread_work = 1; |
885 | wake_up(&rcu_kthread_wq); | 919 | if (rcu_kthread_task != NULL) |
920 | wake_up(&rcu_kthread_wq); | ||
886 | } | 921 | } |
887 | 922 | ||
888 | #ifdef CONFIG_RCU_TRACE | 923 | #ifdef CONFIG_RCU_TRACE |
@@ -943,12 +978,16 @@ early_initcall(rcu_spawn_kthreads); | |||
943 | 978 | ||
944 | #else /* #ifdef CONFIG_RCU_BOOST */ | 979 | #else /* #ifdef CONFIG_RCU_BOOST */ |
945 | 980 | ||
981 | /* Hold off callback invocation until early_initcall() time. */ | ||
982 | static int rcu_scheduler_fully_active __read_mostly; | ||
983 | |||
946 | /* | 984 | /* |
947 | * Start up softirq processing of callbacks. | 985 | * Start up softirq processing of callbacks. |
948 | */ | 986 | */ |
949 | void invoke_rcu_callbacks(void) | 987 | void invoke_rcu_callbacks(void) |
950 | { | 988 | { |
951 | raise_softirq(RCU_SOFTIRQ); | 989 | if (rcu_scheduler_fully_active) |
990 | raise_softirq(RCU_SOFTIRQ); | ||
952 | } | 991 | } |
953 | 992 | ||
954 | #ifdef CONFIG_RCU_TRACE | 993 | #ifdef CONFIG_RCU_TRACE |
@@ -963,10 +1002,14 @@ static bool rcu_is_callbacks_kthread(void) | |||
963 | 1002 | ||
964 | #endif /* #ifdef CONFIG_RCU_TRACE */ | 1003 | #endif /* #ifdef CONFIG_RCU_TRACE */ |
965 | 1004 | ||
966 | void rcu_init(void) | 1005 | static int __init rcu_scheduler_really_started(void) |
967 | { | 1006 | { |
1007 | rcu_scheduler_fully_active = 1; | ||
968 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 1008 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
1009 | raise_softirq(RCU_SOFTIRQ); /* Invoke any callbacks from early boot. */ | ||
1010 | return 0; | ||
969 | } | 1011 | } |
1012 | early_initcall(rcu_scheduler_really_started); | ||
970 | 1013 | ||
971 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ | 1014 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ |
972 | 1015 | ||