aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c479
1 files changed, 1 insertions, 478 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index abc8512ceb5f..ec77ec336f58 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -102,11 +102,6 @@ int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
102/* Number of rcu_nodes at specified level. */ 102/* Number of rcu_nodes at specified level. */
103int num_rcu_lvl[] = NUM_RCU_LVL_INIT; 103int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
104int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */ 104int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
105/* panic() on RCU Stall sysctl. */
106int sysctl_panic_on_rcu_stall __read_mostly;
107/* Commandeer a sysrq key to dump RCU's tree. */
108static bool sysrq_rcu;
109module_param(sysrq_rcu, bool, 0444);
110 105
111/* 106/*
112 * The rcu_scheduler_active variable is initialized to the value 107 * The rcu_scheduler_active variable is initialized to the value
@@ -514,74 +509,6 @@ static const char *gp_state_getname(short gs)
514} 509}
515 510
516/* 511/*
517 * Show the state of the grace-period kthreads.
518 */
519void show_rcu_gp_kthreads(void)
520{
521 int cpu;
522 unsigned long j;
523 unsigned long ja;
524 unsigned long jr;
525 unsigned long jw;
526 struct rcu_data *rdp;
527 struct rcu_node *rnp;
528
529 j = jiffies;
530 ja = j - READ_ONCE(rcu_state.gp_activity);
531 jr = j - READ_ONCE(rcu_state.gp_req_activity);
532 jw = j - READ_ONCE(rcu_state.gp_wake_time);
533 pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
534 rcu_state.name, gp_state_getname(rcu_state.gp_state),
535 rcu_state.gp_state,
536 rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL,
537 ja, jr, jw, (long)READ_ONCE(rcu_state.gp_wake_seq),
538 (long)READ_ONCE(rcu_state.gp_seq),
539 (long)READ_ONCE(rcu_get_root()->gp_seq_needed),
540 READ_ONCE(rcu_state.gp_flags));
541 rcu_for_each_node_breadth_first(rnp) {
542 if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
543 continue;
544 pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
545 rnp->grplo, rnp->grphi, (long)rnp->gp_seq,
546 (long)rnp->gp_seq_needed);
547 if (!rcu_is_leaf_node(rnp))
548 continue;
549 for_each_leaf_node_possible_cpu(rnp, cpu) {
550 rdp = per_cpu_ptr(&rcu_data, cpu);
551 if (rdp->gpwrap ||
552 ULONG_CMP_GE(rcu_state.gp_seq,
553 rdp->gp_seq_needed))
554 continue;
555 pr_info("\tcpu %d ->gp_seq_needed %ld\n",
556 cpu, (long)rdp->gp_seq_needed);
557 }
558 }
559 /* sched_show_task(rcu_state.gp_kthread); */
560}
561EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
562
563/* Dump grace-period-request information due to commandeered sysrq. */
564static void sysrq_show_rcu(int key)
565{
566 show_rcu_gp_kthreads();
567}
568
569static struct sysrq_key_op sysrq_rcudump_op = {
570 .handler = sysrq_show_rcu,
571 .help_msg = "show-rcu(y)",
572 .action_msg = "Show RCU tree",
573 .enable_mask = SYSRQ_ENABLE_DUMP,
574};
575
576static int __init rcu_sysrq_init(void)
577{
578 if (sysrq_rcu)
579 return register_sysrq_key('y', &sysrq_rcudump_op);
580 return 0;
581}
582early_initcall(rcu_sysrq_init);
583
584/*
585 * Send along grace-period-related data for rcutorture diagnostics. 512 * Send along grace-period-related data for rcutorture diagnostics.
586 */ 513 */
587void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, 514void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
@@ -1035,27 +962,6 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
1035} 962}
1036 963
1037/* 964/*
1038 * Handler for the irq_work request posted when a grace period has
1039 * gone on for too long, but not yet long enough for an RCU CPU
1040 * stall warning. Set state appropriately, but just complain if
1041 * there is unexpected state on entry.
1042 */
1043static void rcu_iw_handler(struct irq_work *iwp)
1044{
1045 struct rcu_data *rdp;
1046 struct rcu_node *rnp;
1047
1048 rdp = container_of(iwp, struct rcu_data, rcu_iw);
1049 rnp = rdp->mynode;
1050 raw_spin_lock_rcu_node(rnp);
1051 if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
1052 rdp->rcu_iw_gp_seq = rnp->gp_seq;
1053 rdp->rcu_iw_pending = false;
1054 }
1055 raw_spin_unlock_rcu_node(rnp);
1056}
1057
1058/*
1059 * Return true if the specified CPU has passed through a quiescent 965 * Return true if the specified CPU has passed through a quiescent
1060 * state by virtue of being in or having passed through an dynticks 966 * state by virtue of being in or having passed through an dynticks
1061 * idle state since the last call to dyntick_save_progress_counter() 967 * idle state since the last call to dyntick_save_progress_counter()
@@ -1168,295 +1074,6 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1168 return 0; 1074 return 0;
1169} 1075}
1170 1076
1171static void record_gp_stall_check_time(void)
1172{
1173 unsigned long j = jiffies;
1174 unsigned long j1;
1175
1176 rcu_state.gp_start = j;
1177 j1 = rcu_jiffies_till_stall_check();
1178 /* Record ->gp_start before ->jiffies_stall. */
1179 smp_store_release(&rcu_state.jiffies_stall, j + j1); /* ^^^ */
1180 rcu_state.jiffies_resched = j + j1 / 2;
1181 rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
1182}
1183
1184/*
1185 * Complain about starvation of grace-period kthread.
1186 */
1187static void rcu_check_gp_kthread_starvation(void)
1188{
1189 struct task_struct *gpk = rcu_state.gp_kthread;
1190 unsigned long j;
1191
1192 j = jiffies - READ_ONCE(rcu_state.gp_activity);
1193 if (j > 2 * HZ) {
1194 pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
1195 rcu_state.name, j,
1196 (long)rcu_seq_current(&rcu_state.gp_seq),
1197 READ_ONCE(rcu_state.gp_flags),
1198 gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
1199 gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
1200 if (gpk) {
1201 pr_err("RCU grace-period kthread stack dump:\n");
1202 sched_show_task(gpk);
1203 wake_up_process(gpk);
1204 }
1205 }
1206}
1207
1208/*
1209 * Dump stacks of all tasks running on stalled CPUs. First try using
1210 * NMIs, but fall back to manual remote stack tracing on architectures
1211 * that don't support NMI-based stack dumps. The NMI-triggered stack
1212 * traces are more accurate because they are printed by the target CPU.
1213 */
1214static void rcu_dump_cpu_stacks(void)
1215{
1216 int cpu;
1217 unsigned long flags;
1218 struct rcu_node *rnp;
1219
1220 rcu_for_each_leaf_node(rnp) {
1221 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1222 for_each_leaf_node_possible_cpu(rnp, cpu)
1223 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
1224 if (!trigger_single_cpu_backtrace(cpu))
1225 dump_cpu_task(cpu);
1226 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1227 }
1228}
1229
1230/*
1231 * If too much time has passed in the current grace period, and if
1232 * so configured, go kick the relevant kthreads.
1233 */
1234static void rcu_stall_kick_kthreads(void)
1235{
1236 unsigned long j;
1237
1238 if (!rcu_kick_kthreads)
1239 return;
1240 j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
1241 if (time_after(jiffies, j) && rcu_state.gp_kthread &&
1242 (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
1243 WARN_ONCE(1, "Kicking %s grace-period kthread\n",
1244 rcu_state.name);
1245 rcu_ftrace_dump(DUMP_ALL);
1246 wake_up_process(rcu_state.gp_kthread);
1247 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
1248 }
1249}
1250
1251static void panic_on_rcu_stall(void)
1252{
1253 if (sysctl_panic_on_rcu_stall)
1254 panic("RCU Stall\n");
1255}
1256
1257static void print_other_cpu_stall(unsigned long gp_seq)
1258{
1259 int cpu;
1260 unsigned long flags;
1261 unsigned long gpa;
1262 unsigned long j;
1263 int ndetected = 0;
1264 struct rcu_node *rnp = rcu_get_root();
1265 long totqlen = 0;
1266
1267 /* Kick and suppress, if so configured. */
1268 rcu_stall_kick_kthreads();
1269 if (rcu_cpu_stall_suppress)
1270 return;
1271
1272 /*
1273 * OK, time to rat on our buddy...
1274 * See Documentation/RCU/stallwarn.txt for info on how to debug
1275 * RCU CPU stall warnings.
1276 */
1277 pr_err("INFO: %s detected stalls on CPUs/tasks:", rcu_state.name);
1278 print_cpu_stall_info_begin();
1279 rcu_for_each_leaf_node(rnp) {
1280 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1281 ndetected += rcu_print_task_stall(rnp);
1282 if (rnp->qsmask != 0) {
1283 for_each_leaf_node_possible_cpu(rnp, cpu)
1284 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
1285 print_cpu_stall_info(cpu);
1286 ndetected++;
1287 }
1288 }
1289 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1290 }
1291
1292 print_cpu_stall_info_end();
1293 for_each_possible_cpu(cpu)
1294 totqlen += rcu_get_n_cbs_cpu(cpu);
1295 pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
1296 smp_processor_id(), (long)(jiffies - rcu_state.gp_start),
1297 (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
1298 if (ndetected) {
1299 rcu_dump_cpu_stacks();
1300
1301 /* Complain about tasks blocking the grace period. */
1302 rcu_print_detail_task_stall();
1303 } else {
1304 if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
1305 pr_err("INFO: Stall ended before state dump start\n");
1306 } else {
1307 j = jiffies;
1308 gpa = READ_ONCE(rcu_state.gp_activity);
1309 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
1310 rcu_state.name, j - gpa, j, gpa,
1311 READ_ONCE(jiffies_till_next_fqs),
1312 rcu_get_root()->qsmask);
1313 /* In this case, the current CPU might be at fault. */
1314 sched_show_task(current);
1315 }
1316 }
1317 /* Rewrite if needed in case of slow consoles. */
1318 if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
1319 WRITE_ONCE(rcu_state.jiffies_stall,
1320 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1321
1322 rcu_check_gp_kthread_starvation();
1323
1324 panic_on_rcu_stall();
1325
1326 rcu_force_quiescent_state(); /* Kick them all. */
1327}
1328
1329static void print_cpu_stall(void)
1330{
1331 int cpu;
1332 unsigned long flags;
1333 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1334 struct rcu_node *rnp = rcu_get_root();
1335 long totqlen = 0;
1336
1337 /* Kick and suppress, if so configured. */
1338 rcu_stall_kick_kthreads();
1339 if (rcu_cpu_stall_suppress)
1340 return;
1341
1342 /*
1343 * OK, time to rat on ourselves...
1344 * See Documentation/RCU/stallwarn.txt for info on how to debug
1345 * RCU CPU stall warnings.
1346 */
1347 pr_err("INFO: %s self-detected stall on CPU", rcu_state.name);
1348 print_cpu_stall_info_begin();
1349 raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
1350 print_cpu_stall_info(smp_processor_id());
1351 raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
1352 print_cpu_stall_info_end();
1353 for_each_possible_cpu(cpu)
1354 totqlen += rcu_get_n_cbs_cpu(cpu);
1355 pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n",
1356 jiffies - rcu_state.gp_start,
1357 (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
1358
1359 rcu_check_gp_kthread_starvation();
1360
1361 rcu_dump_cpu_stacks();
1362
1363 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1364 /* Rewrite if needed in case of slow consoles. */
1365 if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
1366 WRITE_ONCE(rcu_state.jiffies_stall,
1367 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1368 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1369
1370 panic_on_rcu_stall();
1371
1372 /*
1373 * Attempt to revive the RCU machinery by forcing a context switch.
1374 *
1375 * A context switch would normally allow the RCU state machine to make
1376 * progress and it could be we're stuck in kernel space without context
1377 * switches for an entirely unreasonable amount of time.
1378 */
1379 set_tsk_need_resched(current);
1380 set_preempt_need_resched();
1381}
1382
1383static void check_cpu_stall(struct rcu_data *rdp)
1384{
1385 unsigned long gs1;
1386 unsigned long gs2;
1387 unsigned long gps;
1388 unsigned long j;
1389 unsigned long jn;
1390 unsigned long js;
1391 struct rcu_node *rnp;
1392
1393 if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
1394 !rcu_gp_in_progress())
1395 return;
1396 rcu_stall_kick_kthreads();
1397 j = jiffies;
1398
1399 /*
1400 * Lots of memory barriers to reject false positives.
1401 *
1402 * The idea is to pick up rcu_state.gp_seq, then
1403 * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
1404 * another copy of rcu_state.gp_seq. These values are updated in
1405 * the opposite order with memory barriers (or equivalent) during
1406 * grace-period initialization and cleanup. Now, a false positive
1407 * can occur if we get an new value of rcu_state.gp_start and a old
1408 * value of rcu_state.jiffies_stall. But given the memory barriers,
1409 * the only way that this can happen is if one grace period ends
1410 * and another starts between these two fetches. This is detected
1411 * by comparing the second fetch of rcu_state.gp_seq with the
1412 * previous fetch from rcu_state.gp_seq.
1413 *
1414 * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
1415 * and rcu_state.gp_start suffice to forestall false positives.
1416 */
1417 gs1 = READ_ONCE(rcu_state.gp_seq);
1418 smp_rmb(); /* Pick up ->gp_seq first... */
1419 js = READ_ONCE(rcu_state.jiffies_stall);
1420 smp_rmb(); /* ...then ->jiffies_stall before the rest... */
1421 gps = READ_ONCE(rcu_state.gp_start);
1422 smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
1423 gs2 = READ_ONCE(rcu_state.gp_seq);
1424 if (gs1 != gs2 ||
1425 ULONG_CMP_LT(j, js) ||
1426 ULONG_CMP_GE(gps, js))
1427 return; /* No stall or GP completed since entering function. */
1428 rnp = rdp->mynode;
1429 jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
1430 if (rcu_gp_in_progress() &&
1431 (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
1432 cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
1433
1434 /* We haven't checked in, so go dump stack. */
1435 print_cpu_stall();
1436
1437 } else if (rcu_gp_in_progress() &&
1438 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
1439 cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
1440
1441 /* They had a few time units to dump stack, so complain. */
1442 print_other_cpu_stall(gs2);
1443 }
1444}
1445
1446/**
1447 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
1448 *
1449 * Set the stall-warning timeout way off into the future, thus preventing
1450 * any RCU CPU stall-warning messages from appearing in the current set of
1451 * RCU grace periods.
1452 *
1453 * The caller must disable hard irqs.
1454 */
1455void rcu_cpu_stall_reset(void)
1456{
1457 WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
1458}
1459
1460/* Trace-event wrapper function for trace_rcu_future_grace_period. */ 1077/* Trace-event wrapper function for trace_rcu_future_grace_period. */
1461static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, 1078static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1462 unsigned long gp_seq_req, const char *s) 1079 unsigned long gp_seq_req, const char *s)
@@ -2635,101 +2252,6 @@ void rcu_force_quiescent_state(void)
2635} 2252}
2636EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 2253EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2637 2254
2638/*
2639 * This function checks for grace-period requests that fail to motivate
2640 * RCU to come out of its idle mode.
2641 */
2642void
2643rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
2644 const unsigned long gpssdelay)
2645{
2646 unsigned long flags;
2647 unsigned long j;
2648 struct rcu_node *rnp_root = rcu_get_root();
2649 static atomic_t warned = ATOMIC_INIT(0);
2650
2651 if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
2652 ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
2653 return;
2654 j = jiffies; /* Expensive access, and in common case don't get here. */
2655 if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
2656 time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
2657 atomic_read(&warned))
2658 return;
2659
2660 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2661 j = jiffies;
2662 if (rcu_gp_in_progress() ||
2663 ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
2664 time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
2665 time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
2666 atomic_read(&warned)) {
2667 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2668 return;
2669 }
2670 /* Hold onto the leaf lock to make others see warned==1. */
2671
2672 if (rnp_root != rnp)
2673 raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
2674 j = jiffies;
2675 if (rcu_gp_in_progress() ||
2676 ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
2677 time_before(j, rcu_state.gp_req_activity + gpssdelay) ||
2678 time_before(j, rcu_state.gp_activity + gpssdelay) ||
2679 atomic_xchg(&warned, 1)) {
2680 raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */
2681 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2682 return;
2683 }
2684 WARN_ON(1);
2685 if (rnp_root != rnp)
2686 raw_spin_unlock_rcu_node(rnp_root);
2687 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2688 show_rcu_gp_kthreads();
2689}
2690
2691/*
2692 * Do a forward-progress check for rcutorture. This is normally invoked
2693 * due to an OOM event. The argument "j" gives the time period during
2694 * which rcutorture would like progress to have been made.
2695 */
2696void rcu_fwd_progress_check(unsigned long j)
2697{
2698 unsigned long cbs;
2699 int cpu;
2700 unsigned long max_cbs = 0;
2701 int max_cpu = -1;
2702 struct rcu_data *rdp;
2703
2704 if (rcu_gp_in_progress()) {
2705 pr_info("%s: GP age %lu jiffies\n",
2706 __func__, jiffies - rcu_state.gp_start);
2707 show_rcu_gp_kthreads();
2708 } else {
2709 pr_info("%s: Last GP end %lu jiffies ago\n",
2710 __func__, jiffies - rcu_state.gp_end);
2711 preempt_disable();
2712 rdp = this_cpu_ptr(&rcu_data);
2713 rcu_check_gp_start_stall(rdp->mynode, rdp, j);
2714 preempt_enable();
2715 }
2716 for_each_possible_cpu(cpu) {
2717 cbs = rcu_get_n_cbs_cpu(cpu);
2718 if (!cbs)
2719 continue;
2720 if (max_cpu < 0)
2721 pr_info("%s: callbacks", __func__);
2722 pr_cont(" %d: %lu", cpu, cbs);
2723 if (cbs <= max_cbs)
2724 continue;
2725 max_cbs = cbs;
2726 max_cpu = cpu;
2727 }
2728 if (max_cpu >= 0)
2729 pr_cont("\n");
2730}
2731EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
2732
2733/* Perform RCU core processing work for the current CPU. */ 2255/* Perform RCU core processing work for the current CPU. */
2734static __latent_entropy void rcu_core(struct softirq_action *unused) 2256static __latent_entropy void rcu_core(struct softirq_action *unused)
2735{ 2257{
@@ -3855,5 +3377,6 @@ void __init rcu_init(void)
3855 srcu_init(); 3377 srcu_init();
3856} 3378}
3857 3379
3380#include "tree_stall.h"
3858#include "tree_exp.h" 3381#include "tree_exp.h"
3859#include "tree_plugin.h" 3382#include "tree_plugin.h"