aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-03-02 16:15:15 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-05-06 02:16:59 -0400
commit6cc68793e380bb51f447d8d02af873b7bc01f222 (patch)
treef2b66ace23acc0a31a08f136ad25659d6b51c4b1 /kernel/rcutree_plugin.h
parent13491a0ee1ef862b6c842132b6eb9c5e721af5ad (diff)
rcu: fix spelling
The "preemptible" spelling is preferable. May as well fix it. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h62
1 files changed, 31 insertions, 31 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 11b27f377b8b..f629479d4b1f 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic 3 * Internal non-public definitions that provide either classic
4 * or preemptable semantics. 4 * or preemptible semantics.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -75,7 +75,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp);
75 */ 75 */
76static void __init rcu_bootup_announce(void) 76static void __init rcu_bootup_announce(void)
77{ 77{
78 printk(KERN_INFO "Preemptable hierarchical RCU implementation.\n"); 78 printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n");
79 rcu_bootup_announce_oddness(); 79 rcu_bootup_announce_oddness();
80} 80}
81 81
@@ -108,7 +108,7 @@ void rcu_force_quiescent_state(void)
108EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 108EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
109 109
110/* 110/*
111 * Record a preemptable-RCU quiescent state for the specified CPU. Note 111 * Record a preemptible-RCU quiescent state for the specified CPU. Note
112 * that this just means that the task currently running on the CPU is 112 * that this just means that the task currently running on the CPU is
113 * not in a quiescent state. There might be any number of tasks blocked 113 * not in a quiescent state. There might be any number of tasks blocked
114 * while in an RCU read-side critical section. 114 * while in an RCU read-side critical section.
@@ -207,7 +207,7 @@ static void rcu_preempt_note_context_switch(int cpu)
207} 207}
208 208
209/* 209/*
210 * Tree-preemptable RCU implementation for rcu_read_lock(). 210 * Tree-preemptible RCU implementation for rcu_read_lock().
211 * Just increment ->rcu_read_lock_nesting, shared state will be updated 211 * Just increment ->rcu_read_lock_nesting, shared state will be updated
212 * if we block. 212 * if we block.
213 */ 213 */
@@ -376,7 +376,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
376} 376}
377 377
378/* 378/*
379 * Tree-preemptable RCU implementation for rcu_read_unlock(). 379 * Tree-preemptible RCU implementation for rcu_read_unlock().
380 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost 380 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
381 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then 381 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
382 * invoke rcu_read_unlock_special() to clean up after a context switch 382 * invoke rcu_read_unlock_special() to clean up after a context switch
@@ -565,7 +565,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
565} 565}
566 566
567/* 567/*
568 * Do CPU-offline processing for preemptable RCU. 568 * Do CPU-offline processing for preemptible RCU.
569 */ 569 */
570static void rcu_preempt_offline_cpu(int cpu) 570static void rcu_preempt_offline_cpu(int cpu)
571{ 571{
@@ -594,7 +594,7 @@ static void rcu_preempt_check_callbacks(int cpu)
594} 594}
595 595
596/* 596/*
597 * Process callbacks for preemptable RCU. 597 * Process callbacks for preemptible RCU.
598 */ 598 */
599static void rcu_preempt_process_callbacks(void) 599static void rcu_preempt_process_callbacks(void)
600{ 600{
@@ -603,7 +603,7 @@ static void rcu_preempt_process_callbacks(void)
603} 603}
604 604
605/* 605/*
606 * Queue a preemptable-RCU callback for invocation after a grace period. 606 * Queue a preemptible-RCU callback for invocation after a grace period.
607 */ 607 */
608void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 608void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
609{ 609{
@@ -795,7 +795,7 @@ mb_ret:
795EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 795EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
796 796
797/* 797/*
798 * Check to see if there is any immediate preemptable-RCU-related work 798 * Check to see if there is any immediate preemptible-RCU-related work
799 * to be done. 799 * to be done.
800 */ 800 */
801static int rcu_preempt_pending(int cpu) 801static int rcu_preempt_pending(int cpu)
@@ -805,7 +805,7 @@ static int rcu_preempt_pending(int cpu)
805} 805}
806 806
807/* 807/*
808 * Does preemptable RCU need the CPU to stay out of dynticks mode? 808 * Does preemptible RCU need the CPU to stay out of dynticks mode?
809 */ 809 */
810static int rcu_preempt_needs_cpu(int cpu) 810static int rcu_preempt_needs_cpu(int cpu)
811{ 811{
@@ -822,7 +822,7 @@ void rcu_barrier(void)
822EXPORT_SYMBOL_GPL(rcu_barrier); 822EXPORT_SYMBOL_GPL(rcu_barrier);
823 823
824/* 824/*
825 * Initialize preemptable RCU's per-CPU data. 825 * Initialize preemptible RCU's per-CPU data.
826 */ 826 */
827static void __cpuinit rcu_preempt_init_percpu_data(int cpu) 827static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
828{ 828{
@@ -830,7 +830,7 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
830} 830}
831 831
832/* 832/*
833 * Move preemptable RCU's callbacks from dying CPU to other online CPU. 833 * Move preemptible RCU's callbacks from dying CPU to other online CPU.
834 */ 834 */
835static void rcu_preempt_send_cbs_to_online(void) 835static void rcu_preempt_send_cbs_to_online(void)
836{ 836{
@@ -838,7 +838,7 @@ static void rcu_preempt_send_cbs_to_online(void)
838} 838}
839 839
840/* 840/*
841 * Initialize preemptable RCU's state structures. 841 * Initialize preemptible RCU's state structures.
842 */ 842 */
843static void __init __rcu_init_preempt(void) 843static void __init __rcu_init_preempt(void)
844{ 844{
@@ -846,7 +846,7 @@ static void __init __rcu_init_preempt(void)
846} 846}
847 847
848/* 848/*
849 * Check for a task exiting while in a preemptable-RCU read-side 849 * Check for a task exiting while in a preemptible-RCU read-side
850 * critical section, clean up if so. No need to issue warnings, 850 * critical section, clean up if so. No need to issue warnings,
851 * as debug_check_no_locks_held() already does this if lockdep 851 * as debug_check_no_locks_held() already does this if lockdep
852 * is enabled. 852 * is enabled.
@@ -894,7 +894,7 @@ void rcu_force_quiescent_state(void)
894EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 894EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
895 895
896/* 896/*
897 * Because preemptable RCU does not exist, we never have to check for 897 * Because preemptible RCU does not exist, we never have to check for
898 * CPUs being in quiescent states. 898 * CPUs being in quiescent states.
899 */ 899 */
900static void rcu_preempt_note_context_switch(int cpu) 900static void rcu_preempt_note_context_switch(int cpu)
@@ -902,7 +902,7 @@ static void rcu_preempt_note_context_switch(int cpu)
902} 902}
903 903
904/* 904/*
905 * Because preemptable RCU does not exist, there are never any preempted 905 * Because preemptible RCU does not exist, there are never any preempted
906 * RCU readers. 906 * RCU readers.
907 */ 907 */
908static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) 908static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
@@ -921,7 +921,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
921#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 921#endif /* #ifdef CONFIG_HOTPLUG_CPU */
922 922
923/* 923/*
924 * Because preemptable RCU does not exist, we never have to check for 924 * Because preemptible RCU does not exist, we never have to check for
925 * tasks blocked within RCU read-side critical sections. 925 * tasks blocked within RCU read-side critical sections.
926 */ 926 */
927static void rcu_print_detail_task_stall(struct rcu_state *rsp) 927static void rcu_print_detail_task_stall(struct rcu_state *rsp)
@@ -929,7 +929,7 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)
929} 929}
930 930
931/* 931/*
932 * Because preemptable RCU does not exist, we never have to check for 932 * Because preemptible RCU does not exist, we never have to check for
933 * tasks blocked within RCU read-side critical sections. 933 * tasks blocked within RCU read-side critical sections.
934 */ 934 */
935static void rcu_print_task_stall(struct rcu_node *rnp) 935static void rcu_print_task_stall(struct rcu_node *rnp)
@@ -945,7 +945,7 @@ static void rcu_preempt_stall_reset(void)
945} 945}
946 946
947/* 947/*
948 * Because there is no preemptable RCU, there can be no readers blocked, 948 * Because there is no preemptible RCU, there can be no readers blocked,
949 * so there is no need to check for blocked tasks. So check only for 949 * so there is no need to check for blocked tasks. So check only for
950 * bogus qsmask values. 950 * bogus qsmask values.
951 */ 951 */
@@ -957,7 +957,7 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
957#ifdef CONFIG_HOTPLUG_CPU 957#ifdef CONFIG_HOTPLUG_CPU
958 958
959/* 959/*
960 * Because preemptable RCU does not exist, it never needs to migrate 960 * Because preemptible RCU does not exist, it never needs to migrate
961 * tasks that were blocked within RCU read-side critical sections, and 961 * tasks that were blocked within RCU read-side critical sections, and
962 * such non-existent tasks cannot possibly have been blocking the current 962 * such non-existent tasks cannot possibly have been blocking the current
963 * grace period. 963 * grace period.
@@ -970,7 +970,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
970} 970}
971 971
972/* 972/*
973 * Because preemptable RCU does not exist, it never needs CPU-offline 973 * Because preemptible RCU does not exist, it never needs CPU-offline
974 * processing. 974 * processing.
975 */ 975 */
976static void rcu_preempt_offline_cpu(int cpu) 976static void rcu_preempt_offline_cpu(int cpu)
@@ -980,7 +980,7 @@ static void rcu_preempt_offline_cpu(int cpu)
980#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 980#endif /* #ifdef CONFIG_HOTPLUG_CPU */
981 981
982/* 982/*
983 * Because preemptable RCU does not exist, it never has any callbacks 983 * Because preemptible RCU does not exist, it never has any callbacks
984 * to check. 984 * to check.
985 */ 985 */
986static void rcu_preempt_check_callbacks(int cpu) 986static void rcu_preempt_check_callbacks(int cpu)
@@ -988,7 +988,7 @@ static void rcu_preempt_check_callbacks(int cpu)
988} 988}
989 989
990/* 990/*
991 * Because preemptable RCU does not exist, it never has any callbacks 991 * Because preemptible RCU does not exist, it never has any callbacks
992 * to process. 992 * to process.
993 */ 993 */
994static void rcu_preempt_process_callbacks(void) 994static void rcu_preempt_process_callbacks(void)
@@ -997,7 +997,7 @@ static void rcu_preempt_process_callbacks(void)
997 997
998/* 998/*
999 * Wait for an rcu-preempt grace period, but make it happen quickly. 999 * Wait for an rcu-preempt grace period, but make it happen quickly.
1000 * But because preemptable RCU does not exist, map to rcu-sched. 1000 * But because preemptible RCU does not exist, map to rcu-sched.
1001 */ 1001 */
1002void synchronize_rcu_expedited(void) 1002void synchronize_rcu_expedited(void)
1003{ 1003{
@@ -1008,7 +1008,7 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1008#ifdef CONFIG_HOTPLUG_CPU 1008#ifdef CONFIG_HOTPLUG_CPU
1009 1009
1010/* 1010/*
1011 * Because preemptable RCU does not exist, there is never any need to 1011 * Because preemptible RCU does not exist, there is never any need to
1012 * report on tasks preempted in RCU read-side critical sections during 1012 * report on tasks preempted in RCU read-side critical sections during
1013 * expedited RCU grace periods. 1013 * expedited RCU grace periods.
1014 */ 1014 */
@@ -1020,7 +1020,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
1020#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 1020#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1021 1021
1022/* 1022/*
1023 * Because preemptable RCU does not exist, it never has any work to do. 1023 * Because preemptible RCU does not exist, it never has any work to do.
1024 */ 1024 */
1025static int rcu_preempt_pending(int cpu) 1025static int rcu_preempt_pending(int cpu)
1026{ 1026{
@@ -1028,7 +1028,7 @@ static int rcu_preempt_pending(int cpu)
1028} 1028}
1029 1029
1030/* 1030/*
1031 * Because preemptable RCU does not exist, it never needs any CPU. 1031 * Because preemptible RCU does not exist, it never needs any CPU.
1032 */ 1032 */
1033static int rcu_preempt_needs_cpu(int cpu) 1033static int rcu_preempt_needs_cpu(int cpu)
1034{ 1034{
@@ -1036,7 +1036,7 @@ static int rcu_preempt_needs_cpu(int cpu)
1036} 1036}
1037 1037
1038/* 1038/*
1039 * Because preemptable RCU does not exist, rcu_barrier() is just 1039 * Because preemptible RCU does not exist, rcu_barrier() is just
1040 * another name for rcu_barrier_sched(). 1040 * another name for rcu_barrier_sched().
1041 */ 1041 */
1042void rcu_barrier(void) 1042void rcu_barrier(void)
@@ -1046,7 +1046,7 @@ void rcu_barrier(void)
1046EXPORT_SYMBOL_GPL(rcu_barrier); 1046EXPORT_SYMBOL_GPL(rcu_barrier);
1047 1047
1048/* 1048/*
1049 * Because preemptable RCU does not exist, there is no per-CPU 1049 * Because preemptible RCU does not exist, there is no per-CPU
1050 * data to initialize. 1050 * data to initialize.
1051 */ 1051 */
1052static void __cpuinit rcu_preempt_init_percpu_data(int cpu) 1052static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
@@ -1054,14 +1054,14 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
1054} 1054}
1055 1055
1056/* 1056/*
1057 * Because there is no preemptable RCU, there are no callbacks to move. 1057 * Because there is no preemptible RCU, there are no callbacks to move.
1058 */ 1058 */
1059static void rcu_preempt_send_cbs_to_online(void) 1059static void rcu_preempt_send_cbs_to_online(void)
1060{ 1060{
1061} 1061}
1062 1062
1063/* 1063/*
1064 * Because preemptable RCU does not exist, it need not be initialized. 1064 * Because preemptible RCU does not exist, it need not be initialized.
1065 */ 1065 */
1066static void __init __rcu_init_preempt(void) 1066static void __init __rcu_init_preempt(void)
1067{ 1067{