diff options
Diffstat (limited to 'kernel/rcu/rcutorture.c')
-rw-r--r-- | kernel/rcu/rcutorture.c | 278 |
1 files changed, 198 insertions, 80 deletions
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 948a7693748e..240fa9094f83 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c | |||
@@ -49,11 +49,19 @@ | |||
49 | #include <linux/trace_clock.h> | 49 | #include <linux/trace_clock.h> |
50 | #include <asm/byteorder.h> | 50 | #include <asm/byteorder.h> |
51 | #include <linux/torture.h> | 51 | #include <linux/torture.h> |
52 | #include <linux/vmalloc.h> | ||
52 | 53 | ||
53 | MODULE_LICENSE("GPL"); | 54 | MODULE_LICENSE("GPL"); |
54 | MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); | 55 | MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); |
55 | 56 | ||
56 | 57 | ||
58 | torture_param(int, cbflood_inter_holdoff, HZ, | ||
59 | "Holdoff between floods (jiffies)"); | ||
60 | torture_param(int, cbflood_intra_holdoff, 1, | ||
61 | "Holdoff between bursts (jiffies)"); | ||
62 | torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable"); | ||
63 | torture_param(int, cbflood_n_per_burst, 20000, | ||
64 | "# callbacks per burst in flood"); | ||
57 | torture_param(int, fqs_duration, 0, | 65 | torture_param(int, fqs_duration, 0, |
58 | "Duration of fqs bursts (us), 0 to disable"); | 66 | "Duration of fqs bursts (us), 0 to disable"); |
59 | torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); | 67 | torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); |
@@ -96,10 +104,12 @@ module_param(torture_type, charp, 0444); | |||
96 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)"); | 104 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)"); |
97 | 105 | ||
98 | static int nrealreaders; | 106 | static int nrealreaders; |
107 | static int ncbflooders; | ||
99 | static struct task_struct *writer_task; | 108 | static struct task_struct *writer_task; |
100 | static struct task_struct **fakewriter_tasks; | 109 | static struct task_struct **fakewriter_tasks; |
101 | static struct task_struct **reader_tasks; | 110 | static struct task_struct **reader_tasks; |
102 | static struct task_struct *stats_task; | 111 | static struct task_struct *stats_task; |
112 | static struct task_struct **cbflood_task; | ||
103 | static struct task_struct *fqs_task; | 113 | static struct task_struct *fqs_task; |
104 | static struct task_struct *boost_tasks[NR_CPUS]; | 114 | static struct task_struct *boost_tasks[NR_CPUS]; |
105 | static struct task_struct *stall_task; | 115 | static struct task_struct *stall_task; |
@@ -138,6 +148,7 @@ static long n_rcu_torture_boosts; | |||
138 | static long n_rcu_torture_timers; | 148 | static long n_rcu_torture_timers; |
139 | static long n_barrier_attempts; | 149 | static long n_barrier_attempts; |
140 | static long n_barrier_successes; | 150 | static long n_barrier_successes; |
151 | static atomic_long_t n_cbfloods; | ||
141 | static struct list_head rcu_torture_removed; | 152 | static struct list_head rcu_torture_removed; |
142 | 153 | ||
143 | static int rcu_torture_writer_state; | 154 | static int rcu_torture_writer_state; |
@@ -157,9 +168,9 @@ static int rcu_torture_writer_state; | |||
157 | #else | 168 | #else |
158 | #define RCUTORTURE_RUNNABLE_INIT 0 | 169 | #define RCUTORTURE_RUNNABLE_INIT 0 |
159 | #endif | 170 | #endif |
160 | int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; | 171 | static int torture_runnable = RCUTORTURE_RUNNABLE_INIT; |
161 | module_param(rcutorture_runnable, int, 0444); | 172 | module_param(torture_runnable, int, 0444); |
162 | MODULE_PARM_DESC(rcutorture_runnable, "Start rcutorture at boot"); | 173 | MODULE_PARM_DESC(torture_runnable, "Start rcutorture at boot"); |
163 | 174 | ||
164 | #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) | 175 | #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) |
165 | #define rcu_can_boost() 1 | 176 | #define rcu_can_boost() 1 |
@@ -182,7 +193,7 @@ static u64 notrace rcu_trace_clock_local(void) | |||
182 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ | 193 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ |
183 | 194 | ||
184 | static unsigned long boost_starttime; /* jiffies of next boost test start. */ | 195 | static unsigned long boost_starttime; /* jiffies of next boost test start. */ |
185 | DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ | 196 | static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ |
186 | /* and boost task create/destroy. */ | 197 | /* and boost task create/destroy. */ |
187 | static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ | 198 | static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ |
188 | static bool barrier_phase; /* Test phase. */ | 199 | static bool barrier_phase; /* Test phase. */ |
@@ -242,7 +253,7 @@ struct rcu_torture_ops { | |||
242 | void (*call)(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); | 253 | void (*call)(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); |
243 | void (*cb_barrier)(void); | 254 | void (*cb_barrier)(void); |
244 | void (*fqs)(void); | 255 | void (*fqs)(void); |
245 | void (*stats)(char *page); | 256 | void (*stats)(void); |
246 | int irq_capable; | 257 | int irq_capable; |
247 | int can_boost; | 258 | int can_boost; |
248 | const char *name; | 259 | const char *name; |
@@ -525,21 +536,21 @@ static void srcu_torture_barrier(void) | |||
525 | srcu_barrier(&srcu_ctl); | 536 | srcu_barrier(&srcu_ctl); |
526 | } | 537 | } |
527 | 538 | ||
528 | static void srcu_torture_stats(char *page) | 539 | static void srcu_torture_stats(void) |
529 | { | 540 | { |
530 | int cpu; | 541 | int cpu; |
531 | int idx = srcu_ctl.completed & 0x1; | 542 | int idx = srcu_ctl.completed & 0x1; |
532 | 543 | ||
533 | page += sprintf(page, "%s%s per-CPU(idx=%d):", | 544 | pr_alert("%s%s per-CPU(idx=%d):", |
534 | torture_type, TORTURE_FLAG, idx); | 545 | torture_type, TORTURE_FLAG, idx); |
535 | for_each_possible_cpu(cpu) { | 546 | for_each_possible_cpu(cpu) { |
536 | long c0, c1; | 547 | long c0, c1; |
537 | 548 | ||
538 | c0 = (long)per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx]; | 549 | c0 = (long)per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx]; |
539 | c1 = (long)per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]; | 550 | c1 = (long)per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]; |
540 | page += sprintf(page, " %d(%ld,%ld)", cpu, c0, c1); | 551 | pr_cont(" %d(%ld,%ld)", cpu, c0, c1); |
541 | } | 552 | } |
542 | sprintf(page, "\n"); | 553 | pr_cont("\n"); |
543 | } | 554 | } |
544 | 555 | ||
545 | static void srcu_torture_synchronize_expedited(void) | 556 | static void srcu_torture_synchronize_expedited(void) |
@@ -601,6 +612,52 @@ static struct rcu_torture_ops sched_ops = { | |||
601 | .name = "sched" | 612 | .name = "sched" |
602 | }; | 613 | }; |
603 | 614 | ||
615 | #ifdef CONFIG_TASKS_RCU | ||
616 | |||
617 | /* | ||
618 | * Definitions for RCU-tasks torture testing. | ||
619 | */ | ||
620 | |||
621 | static int tasks_torture_read_lock(void) | ||
622 | { | ||
623 | return 0; | ||
624 | } | ||
625 | |||
626 | static void tasks_torture_read_unlock(int idx) | ||
627 | { | ||
628 | } | ||
629 | |||
630 | static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) | ||
631 | { | ||
632 | call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); | ||
633 | } | ||
634 | |||
635 | static struct rcu_torture_ops tasks_ops = { | ||
636 | .ttype = RCU_TASKS_FLAVOR, | ||
637 | .init = rcu_sync_torture_init, | ||
638 | .readlock = tasks_torture_read_lock, | ||
639 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | ||
640 | .readunlock = tasks_torture_read_unlock, | ||
641 | .completed = rcu_no_completed, | ||
642 | .deferred_free = rcu_tasks_torture_deferred_free, | ||
643 | .sync = synchronize_rcu_tasks, | ||
644 | .exp_sync = synchronize_rcu_tasks, | ||
645 | .call = call_rcu_tasks, | ||
646 | .cb_barrier = rcu_barrier_tasks, | ||
647 | .fqs = NULL, | ||
648 | .stats = NULL, | ||
649 | .irq_capable = 1, | ||
650 | .name = "tasks" | ||
651 | }; | ||
652 | |||
653 | #define RCUTORTURE_TASKS_OPS &tasks_ops, | ||
654 | |||
655 | #else /* #ifdef CONFIG_TASKS_RCU */ | ||
656 | |||
657 | #define RCUTORTURE_TASKS_OPS | ||
658 | |||
659 | #endif /* #else #ifdef CONFIG_TASKS_RCU */ | ||
660 | |||
604 | /* | 661 | /* |
605 | * RCU torture priority-boost testing. Runs one real-time thread per | 662 | * RCU torture priority-boost testing. Runs one real-time thread per |
606 | * CPU for moderate bursts, repeatedly registering RCU callbacks and | 663 | * CPU for moderate bursts, repeatedly registering RCU callbacks and |
@@ -667,7 +724,7 @@ static int rcu_torture_boost(void *arg) | |||
667 | } | 724 | } |
668 | call_rcu_time = jiffies; | 725 | call_rcu_time = jiffies; |
669 | } | 726 | } |
670 | cond_resched(); | 727 | cond_resched_rcu_qs(); |
671 | stutter_wait("rcu_torture_boost"); | 728 | stutter_wait("rcu_torture_boost"); |
672 | if (torture_must_stop()) | 729 | if (torture_must_stop()) |
673 | goto checkwait; | 730 | goto checkwait; |
@@ -707,6 +764,58 @@ checkwait: stutter_wait("rcu_torture_boost"); | |||
707 | return 0; | 764 | return 0; |
708 | } | 765 | } |
709 | 766 | ||
767 | static void rcu_torture_cbflood_cb(struct rcu_head *rhp) | ||
768 | { | ||
769 | } | ||
770 | |||
771 | /* | ||
772 | * RCU torture callback-flood kthread. Repeatedly induces bursts of calls | ||
773 | * to call_rcu() or analogous, increasing the probability of occurrence | ||
774 | * of callback-overflow corner cases. | ||
775 | */ | ||
776 | static int | ||
777 | rcu_torture_cbflood(void *arg) | ||
778 | { | ||
779 | int err = 1; | ||
780 | int i; | ||
781 | int j; | ||
782 | struct rcu_head *rhp; | ||
783 | |||
784 | if (cbflood_n_per_burst > 0 && | ||
785 | cbflood_inter_holdoff > 0 && | ||
786 | cbflood_intra_holdoff > 0 && | ||
787 | cur_ops->call && | ||
788 | cur_ops->cb_barrier) { | ||
789 | rhp = vmalloc(sizeof(*rhp) * | ||
790 | cbflood_n_burst * cbflood_n_per_burst); | ||
791 | err = !rhp; | ||
792 | } | ||
793 | if (err) { | ||
794 | VERBOSE_TOROUT_STRING("rcu_torture_cbflood disabled: Bad args or OOM"); | ||
795 | while (!torture_must_stop()) | ||
796 | schedule_timeout_interruptible(HZ); | ||
797 | return 0; | ||
798 | } | ||
799 | VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started"); | ||
800 | do { | ||
801 | schedule_timeout_interruptible(cbflood_inter_holdoff); | ||
802 | atomic_long_inc(&n_cbfloods); | ||
803 | WARN_ON(signal_pending(current)); | ||
804 | for (i = 0; i < cbflood_n_burst; i++) { | ||
805 | for (j = 0; j < cbflood_n_per_burst; j++) { | ||
806 | cur_ops->call(&rhp[i * cbflood_n_per_burst + j], | ||
807 | rcu_torture_cbflood_cb); | ||
808 | } | ||
809 | schedule_timeout_interruptible(cbflood_intra_holdoff); | ||
810 | WARN_ON(signal_pending(current)); | ||
811 | } | ||
812 | cur_ops->cb_barrier(); | ||
813 | stutter_wait("rcu_torture_cbflood"); | ||
814 | } while (!torture_must_stop()); | ||
815 | torture_kthread_stopping("rcu_torture_cbflood"); | ||
816 | return 0; | ||
817 | } | ||
818 | |||
710 | /* | 819 | /* |
711 | * RCU torture force-quiescent-state kthread. Repeatedly induces | 820 | * RCU torture force-quiescent-state kthread. Repeatedly induces |
712 | * bursts of calls to force_quiescent_state(), increasing the probability | 821 | * bursts of calls to force_quiescent_state(), increasing the probability |
@@ -1019,7 +1128,7 @@ rcu_torture_reader(void *arg) | |||
1019 | __this_cpu_inc(rcu_torture_batch[completed]); | 1128 | __this_cpu_inc(rcu_torture_batch[completed]); |
1020 | preempt_enable(); | 1129 | preempt_enable(); |
1021 | cur_ops->readunlock(idx); | 1130 | cur_ops->readunlock(idx); |
1022 | cond_resched(); | 1131 | cond_resched_rcu_qs(); |
1023 | stutter_wait("rcu_torture_reader"); | 1132 | stutter_wait("rcu_torture_reader"); |
1024 | } while (!torture_must_stop()); | 1133 | } while (!torture_must_stop()); |
1025 | if (irqreader && cur_ops->irq_capable) { | 1134 | if (irqreader && cur_ops->irq_capable) { |
@@ -1031,10 +1140,15 @@ rcu_torture_reader(void *arg) | |||
1031 | } | 1140 | } |
1032 | 1141 | ||
1033 | /* | 1142 | /* |
1034 | * Create an RCU-torture statistics message in the specified buffer. | 1143 | * Print torture statistics. Caller must ensure that there is only |
1144 | * one call to this function at a given time!!! This is normally | ||
1145 | * accomplished by relying on the module system to only have one copy | ||
1146 | * of the module loaded, and then by giving the rcu_torture_stats | ||
1147 | * kthread full control (or the init/cleanup functions when rcu_torture_stats | ||
1148 | * thread is not running). | ||
1035 | */ | 1149 | */ |
1036 | static void | 1150 | static void |
1037 | rcu_torture_printk(char *page) | 1151 | rcu_torture_stats_print(void) |
1038 | { | 1152 | { |
1039 | int cpu; | 1153 | int cpu; |
1040 | int i; | 1154 | int i; |
@@ -1052,55 +1166,61 @@ rcu_torture_printk(char *page) | |||
1052 | if (pipesummary[i] != 0) | 1166 | if (pipesummary[i] != 0) |
1053 | break; | 1167 | break; |
1054 | } | 1168 | } |
1055 | page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG); | 1169 | |
1056 | page += sprintf(page, | 1170 | pr_alert("%s%s ", torture_type, TORTURE_FLAG); |
1057 | "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", | 1171 | pr_cont("rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", |
1058 | rcu_torture_current, | 1172 | rcu_torture_current, |
1059 | rcu_torture_current_version, | 1173 | rcu_torture_current_version, |
1060 | list_empty(&rcu_torture_freelist), | 1174 | list_empty(&rcu_torture_freelist), |
1061 | atomic_read(&n_rcu_torture_alloc), | 1175 | atomic_read(&n_rcu_torture_alloc), |
1062 | atomic_read(&n_rcu_torture_alloc_fail), | 1176 | atomic_read(&n_rcu_torture_alloc_fail), |
1063 | atomic_read(&n_rcu_torture_free)); | 1177 | atomic_read(&n_rcu_torture_free)); |
1064 | page += sprintf(page, "rtmbe: %d rtbke: %ld rtbre: %ld ", | 1178 | pr_cont("rtmbe: %d rtbke: %ld rtbre: %ld ", |
1065 | atomic_read(&n_rcu_torture_mberror), | 1179 | atomic_read(&n_rcu_torture_mberror), |
1066 | n_rcu_torture_boost_ktrerror, | 1180 | n_rcu_torture_boost_ktrerror, |
1067 | n_rcu_torture_boost_rterror); | 1181 | n_rcu_torture_boost_rterror); |
1068 | page += sprintf(page, "rtbf: %ld rtb: %ld nt: %ld ", | 1182 | pr_cont("rtbf: %ld rtb: %ld nt: %ld ", |
1069 | n_rcu_torture_boost_failure, | 1183 | n_rcu_torture_boost_failure, |
1070 | n_rcu_torture_boosts, | 1184 | n_rcu_torture_boosts, |
1071 | n_rcu_torture_timers); | 1185 | n_rcu_torture_timers); |
1072 | page = torture_onoff_stats(page); | 1186 | torture_onoff_stats(); |
1073 | page += sprintf(page, "barrier: %ld/%ld:%ld", | 1187 | pr_cont("barrier: %ld/%ld:%ld ", |
1074 | n_barrier_successes, | 1188 | n_barrier_successes, |
1075 | n_barrier_attempts, | 1189 | n_barrier_attempts, |
1076 | n_rcu_torture_barrier_error); | 1190 | n_rcu_torture_barrier_error); |
1077 | page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG); | 1191 | pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods)); |
1192 | |||
1193 | pr_alert("%s%s ", torture_type, TORTURE_FLAG); | ||
1078 | if (atomic_read(&n_rcu_torture_mberror) != 0 || | 1194 | if (atomic_read(&n_rcu_torture_mberror) != 0 || |
1079 | n_rcu_torture_barrier_error != 0 || | 1195 | n_rcu_torture_barrier_error != 0 || |
1080 | n_rcu_torture_boost_ktrerror != 0 || | 1196 | n_rcu_torture_boost_ktrerror != 0 || |
1081 | n_rcu_torture_boost_rterror != 0 || | 1197 | n_rcu_torture_boost_rterror != 0 || |
1082 | n_rcu_torture_boost_failure != 0 || | 1198 | n_rcu_torture_boost_failure != 0 || |
1083 | i > 1) { | 1199 | i > 1) { |
1084 | page += sprintf(page, "!!! "); | 1200 | pr_cont("%s", "!!! "); |
1085 | atomic_inc(&n_rcu_torture_error); | 1201 | atomic_inc(&n_rcu_torture_error); |
1086 | WARN_ON_ONCE(1); | 1202 | WARN_ON_ONCE(1); |
1087 | } | 1203 | } |
1088 | page += sprintf(page, "Reader Pipe: "); | 1204 | pr_cont("Reader Pipe: "); |
1089 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) | 1205 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) |
1090 | page += sprintf(page, " %ld", pipesummary[i]); | 1206 | pr_cont(" %ld", pipesummary[i]); |
1091 | page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG); | 1207 | pr_cont("\n"); |
1092 | page += sprintf(page, "Reader Batch: "); | 1208 | |
1209 | pr_alert("%s%s ", torture_type, TORTURE_FLAG); | ||
1210 | pr_cont("Reader Batch: "); | ||
1093 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) | 1211 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) |
1094 | page += sprintf(page, " %ld", batchsummary[i]); | 1212 | pr_cont(" %ld", batchsummary[i]); |
1095 | page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG); | 1213 | pr_cont("\n"); |
1096 | page += sprintf(page, "Free-Block Circulation: "); | 1214 | |
1215 | pr_alert("%s%s ", torture_type, TORTURE_FLAG); | ||
1216 | pr_cont("Free-Block Circulation: "); | ||
1097 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { | 1217 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { |
1098 | page += sprintf(page, " %d", | 1218 | pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); |
1099 | atomic_read(&rcu_torture_wcount[i])); | ||
1100 | } | 1219 | } |
1101 | page += sprintf(page, "\n"); | 1220 | pr_cont("\n"); |
1221 | |||
1102 | if (cur_ops->stats) | 1222 | if (cur_ops->stats) |
1103 | cur_ops->stats(page); | 1223 | cur_ops->stats(); |
1104 | if (rtcv_snap == rcu_torture_current_version && | 1224 | if (rtcv_snap == rcu_torture_current_version && |
1105 | rcu_torture_current != NULL) { | 1225 | rcu_torture_current != NULL) { |
1106 | int __maybe_unused flags; | 1226 | int __maybe_unused flags; |
@@ -1109,10 +1229,9 @@ rcu_torture_printk(char *page) | |||
1109 | 1229 | ||
1110 | rcutorture_get_gp_data(cur_ops->ttype, | 1230 | rcutorture_get_gp_data(cur_ops->ttype, |
1111 | &flags, &gpnum, &completed); | 1231 | &flags, &gpnum, &completed); |
1112 | page += sprintf(page, | 1232 | pr_alert("??? Writer stall state %d g%lu c%lu f%#x\n", |
1113 | "??? Writer stall state %d g%lu c%lu f%#x\n", | 1233 | rcu_torture_writer_state, |
1114 | rcu_torture_writer_state, | 1234 | gpnum, completed, flags); |
1115 | gpnum, completed, flags); | ||
1116 | show_rcu_gp_kthreads(); | 1235 | show_rcu_gp_kthreads(); |
1117 | rcutorture_trace_dump(); | 1236 | rcutorture_trace_dump(); |
1118 | } | 1237 | } |
@@ -1120,30 +1239,6 @@ rcu_torture_printk(char *page) | |||
1120 | } | 1239 | } |
1121 | 1240 | ||
1122 | /* | 1241 | /* |
1123 | * Print torture statistics. Caller must ensure that there is only | ||
1124 | * one call to this function at a given time!!! This is normally | ||
1125 | * accomplished by relying on the module system to only have one copy | ||
1126 | * of the module loaded, and then by giving the rcu_torture_stats | ||
1127 | * kthread full control (or the init/cleanup functions when rcu_torture_stats | ||
1128 | * thread is not running). | ||
1129 | */ | ||
1130 | static void | ||
1131 | rcu_torture_stats_print(void) | ||
1132 | { | ||
1133 | int size = nr_cpu_ids * 200 + 8192; | ||
1134 | char *buf; | ||
1135 | |||
1136 | buf = kmalloc(size, GFP_KERNEL); | ||
1137 | if (!buf) { | ||
1138 | pr_err("rcu-torture: Out of memory, need: %d", size); | ||
1139 | return; | ||
1140 | } | ||
1141 | rcu_torture_printk(buf); | ||
1142 | pr_alert("%s", buf); | ||
1143 | kfree(buf); | ||
1144 | } | ||
1145 | |||
1146 | /* | ||
1147 | * Periodically prints torture statistics, if periodic statistics printing | 1242 | * Periodically prints torture statistics, if periodic statistics printing |
1148 | * was specified via the stat_interval module parameter. | 1243 | * was specified via the stat_interval module parameter. |
1149 | */ | 1244 | */ |
@@ -1295,7 +1390,8 @@ static int rcu_torture_barrier_cbs(void *arg) | |||
1295 | if (atomic_dec_and_test(&barrier_cbs_count)) | 1390 | if (atomic_dec_and_test(&barrier_cbs_count)) |
1296 | wake_up(&barrier_wq); | 1391 | wake_up(&barrier_wq); |
1297 | } while (!torture_must_stop()); | 1392 | } while (!torture_must_stop()); |
1298 | cur_ops->cb_barrier(); | 1393 | if (cur_ops->cb_barrier != NULL) |
1394 | cur_ops->cb_barrier(); | ||
1299 | destroy_rcu_head_on_stack(&rcu); | 1395 | destroy_rcu_head_on_stack(&rcu); |
1300 | torture_kthread_stopping("rcu_torture_barrier_cbs"); | 1396 | torture_kthread_stopping("rcu_torture_barrier_cbs"); |
1301 | return 0; | 1397 | return 0; |
@@ -1418,7 +1514,7 @@ rcu_torture_cleanup(void) | |||
1418 | int i; | 1514 | int i; |
1419 | 1515 | ||
1420 | rcutorture_record_test_transition(); | 1516 | rcutorture_record_test_transition(); |
1421 | if (torture_cleanup()) { | 1517 | if (torture_cleanup_begin()) { |
1422 | if (cur_ops->cb_barrier != NULL) | 1518 | if (cur_ops->cb_barrier != NULL) |
1423 | cur_ops->cb_barrier(); | 1519 | cur_ops->cb_barrier(); |
1424 | return; | 1520 | return; |
@@ -1447,6 +1543,8 @@ rcu_torture_cleanup(void) | |||
1447 | 1543 | ||
1448 | torture_stop_kthread(rcu_torture_stats, stats_task); | 1544 | torture_stop_kthread(rcu_torture_stats, stats_task); |
1449 | torture_stop_kthread(rcu_torture_fqs, fqs_task); | 1545 | torture_stop_kthread(rcu_torture_fqs, fqs_task); |
1546 | for (i = 0; i < ncbflooders; i++) | ||
1547 | torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]); | ||
1450 | if ((test_boost == 1 && cur_ops->can_boost) || | 1548 | if ((test_boost == 1 && cur_ops->can_boost) || |
1451 | test_boost == 2) { | 1549 | test_boost == 2) { |
1452 | unregister_cpu_notifier(&rcutorture_cpu_nb); | 1550 | unregister_cpu_notifier(&rcutorture_cpu_nb); |
@@ -1468,6 +1566,7 @@ rcu_torture_cleanup(void) | |||
1468 | "End of test: RCU_HOTPLUG"); | 1566 | "End of test: RCU_HOTPLUG"); |
1469 | else | 1567 | else |
1470 | rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); | 1568 | rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); |
1569 | torture_cleanup_end(); | ||
1471 | } | 1570 | } |
1472 | 1571 | ||
1473 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD | 1572 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
@@ -1534,9 +1633,10 @@ rcu_torture_init(void) | |||
1534 | int firsterr = 0; | 1633 | int firsterr = 0; |
1535 | static struct rcu_torture_ops *torture_ops[] = { | 1634 | static struct rcu_torture_ops *torture_ops[] = { |
1536 | &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &sched_ops, | 1635 | &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &sched_ops, |
1636 | RCUTORTURE_TASKS_OPS | ||
1537 | }; | 1637 | }; |
1538 | 1638 | ||
1539 | if (!torture_init_begin(torture_type, verbose, &rcutorture_runnable)) | 1639 | if (!torture_init_begin(torture_type, verbose, &torture_runnable)) |
1540 | return -EBUSY; | 1640 | return -EBUSY; |
1541 | 1641 | ||
1542 | /* Process args and tell the world that the torturer is on the job. */ | 1642 | /* Process args and tell the world that the torturer is on the job. */ |
@@ -1693,6 +1793,24 @@ rcu_torture_init(void) | |||
1693 | goto unwind; | 1793 | goto unwind; |
1694 | if (object_debug) | 1794 | if (object_debug) |
1695 | rcu_test_debug_objects(); | 1795 | rcu_test_debug_objects(); |
1796 | if (cbflood_n_burst > 0) { | ||
1797 | /* Create the cbflood threads */ | ||
1798 | ncbflooders = (num_online_cpus() + 3) / 4; | ||
1799 | cbflood_task = kcalloc(ncbflooders, sizeof(*cbflood_task), | ||
1800 | GFP_KERNEL); | ||
1801 | if (!cbflood_task) { | ||
1802 | VERBOSE_TOROUT_ERRSTRING("out of memory"); | ||
1803 | firsterr = -ENOMEM; | ||
1804 | goto unwind; | ||
1805 | } | ||
1806 | for (i = 0; i < ncbflooders; i++) { | ||
1807 | firsterr = torture_create_kthread(rcu_torture_cbflood, | ||
1808 | NULL, | ||
1809 | cbflood_task[i]); | ||
1810 | if (firsterr) | ||
1811 | goto unwind; | ||
1812 | } | ||
1813 | } | ||
1696 | rcutorture_record_test_transition(); | 1814 | rcutorture_record_test_transition(); |
1697 | torture_init_end(); | 1815 | torture_init_end(); |
1698 | return 0; | 1816 | return 0; |