diff options
| -rw-r--r-- | Documentation/filesystems/proc.txt | 1 | ||||
| -rw-r--r-- | include/linux/interrupt.h | 1 | ||||
| -rw-r--r-- | include/trace/events/irq.h | 3 | ||||
| -rw-r--r-- | kernel/rcutree.c | 398 | ||||
| -rw-r--r-- | kernel/rcutree.h | 12 | ||||
| -rw-r--r-- | kernel/rcutree_plugin.h | 419 | ||||
| -rw-r--r-- | kernel/rcutree_trace.c | 32 | ||||
| -rw-r--r-- | kernel/softirq.c | 2 | ||||
| -rw-r--r-- | tools/perf/util/trace-event-parse.c | 1 |
9 files changed, 466 insertions, 403 deletions
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index f48178024067..db3b1aba32a3 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt | |||
| @@ -843,6 +843,7 @@ Provides counts of softirq handlers serviced since boot time, for each cpu. | |||
| 843 | TASKLET: 0 0 0 290 | 843 | TASKLET: 0 0 0 290 |
| 844 | SCHED: 27035 26983 26971 26746 | 844 | SCHED: 27035 26983 26971 26746 |
| 845 | HRTIMER: 0 0 0 0 | 845 | HRTIMER: 0 0 0 0 |
| 846 | RCU: 1678 1769 2178 2250 | ||
| 846 | 847 | ||
| 847 | 848 | ||
| 848 | 1.3 IDE devices in /proc/ide | 849 | 1.3 IDE devices in /proc/ide |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 6c12989839d9..f6efed0039ed 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
| @@ -414,6 +414,7 @@ enum | |||
| 414 | TASKLET_SOFTIRQ, | 414 | TASKLET_SOFTIRQ, |
| 415 | SCHED_SOFTIRQ, | 415 | SCHED_SOFTIRQ, |
| 416 | HRTIMER_SOFTIRQ, | 416 | HRTIMER_SOFTIRQ, |
| 417 | RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ | ||
| 417 | 418 | ||
| 418 | NR_SOFTIRQS | 419 | NR_SOFTIRQS |
| 419 | }; | 420 | }; |
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h index ae045ca7d356..1c09820df585 100644 --- a/include/trace/events/irq.h +++ b/include/trace/events/irq.h | |||
| @@ -20,7 +20,8 @@ struct softirq_action; | |||
| 20 | softirq_name(BLOCK_IOPOLL), \ | 20 | softirq_name(BLOCK_IOPOLL), \ |
| 21 | softirq_name(TASKLET), \ | 21 | softirq_name(TASKLET), \ |
| 22 | softirq_name(SCHED), \ | 22 | softirq_name(SCHED), \ |
| 23 | softirq_name(HRTIMER)) | 23 | softirq_name(HRTIMER), \ |
| 24 | softirq_name(RCU)) | ||
| 24 | 25 | ||
| 25 | /** | 26 | /** |
| 26 | * irq_handler_entry - called immediately before the irq action handler | 27 | * irq_handler_entry - called immediately before the irq action handler |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 89419ff92e99..7e59ffb3d0ba 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
| @@ -87,6 +87,8 @@ static struct rcu_state *rcu_state; | |||
| 87 | int rcu_scheduler_active __read_mostly; | 87 | int rcu_scheduler_active __read_mostly; |
| 88 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | 88 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); |
| 89 | 89 | ||
| 90 | #ifdef CONFIG_RCU_BOOST | ||
| 91 | |||
| 90 | /* | 92 | /* |
| 91 | * Control variables for per-CPU and per-rcu_node kthreads. These | 93 | * Control variables for per-CPU and per-rcu_node kthreads. These |
| 92 | * handle all flavors of RCU. | 94 | * handle all flavors of RCU. |
| @@ -98,8 +100,11 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); | |||
| 98 | DEFINE_PER_CPU(char, rcu_cpu_has_work); | 100 | DEFINE_PER_CPU(char, rcu_cpu_has_work); |
| 99 | static char rcu_kthreads_spawnable; | 101 | static char rcu_kthreads_spawnable; |
| 100 | 102 | ||
| 103 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
| 104 | |||
| 101 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); | 105 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); |
| 102 | static void invoke_rcu_cpu_kthread(void); | 106 | static void invoke_rcu_core(void); |
| 107 | static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); | ||
| 103 | 108 | ||
| 104 | #define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */ | 109 | #define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */ |
| 105 | 110 | ||
| @@ -1088,14 +1093,8 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
| 1088 | int need_report = 0; | 1093 | int need_report = 0; |
| 1089 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | 1094 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
| 1090 | struct rcu_node *rnp; | 1095 | struct rcu_node *rnp; |
| 1091 | struct task_struct *t; | ||
| 1092 | 1096 | ||
| 1093 | /* Stop the CPU's kthread. */ | 1097 | rcu_stop_cpu_kthread(cpu); |
| 1094 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
| 1095 | if (t != NULL) { | ||
| 1096 | per_cpu(rcu_cpu_kthread_task, cpu) = NULL; | ||
| 1097 | kthread_stop(t); | ||
| 1098 | } | ||
| 1099 | 1098 | ||
| 1100 | /* Exclude any attempts to start a new grace period. */ | 1099 | /* Exclude any attempts to start a new grace period. */ |
| 1101 | raw_spin_lock_irqsave(&rsp->onofflock, flags); | 1100 | raw_spin_lock_irqsave(&rsp->onofflock, flags); |
| @@ -1231,7 +1230,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 1231 | 1230 | ||
| 1232 | /* Re-raise the RCU softirq if there are callbacks remaining. */ | 1231 | /* Re-raise the RCU softirq if there are callbacks remaining. */ |
| 1233 | if (cpu_has_callbacks_ready_to_invoke(rdp)) | 1232 | if (cpu_has_callbacks_ready_to_invoke(rdp)) |
| 1234 | invoke_rcu_cpu_kthread(); | 1233 | invoke_rcu_core(); |
| 1235 | } | 1234 | } |
| 1236 | 1235 | ||
| 1237 | /* | 1236 | /* |
| @@ -1277,7 +1276,7 @@ void rcu_check_callbacks(int cpu, int user) | |||
| 1277 | } | 1276 | } |
| 1278 | rcu_preempt_check_callbacks(cpu); | 1277 | rcu_preempt_check_callbacks(cpu); |
| 1279 | if (rcu_pending(cpu)) | 1278 | if (rcu_pending(cpu)) |
| 1280 | invoke_rcu_cpu_kthread(); | 1279 | invoke_rcu_core(); |
| 1281 | } | 1280 | } |
| 1282 | 1281 | ||
| 1283 | #ifdef CONFIG_SMP | 1282 | #ifdef CONFIG_SMP |
| @@ -1442,13 +1441,14 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 1442 | } | 1441 | } |
| 1443 | 1442 | ||
| 1444 | /* If there are callbacks ready, invoke them. */ | 1443 | /* If there are callbacks ready, invoke them. */ |
| 1445 | rcu_do_batch(rsp, rdp); | 1444 | if (cpu_has_callbacks_ready_to_invoke(rdp)) |
| 1445 | invoke_rcu_callbacks(rsp, rdp); | ||
| 1446 | } | 1446 | } |
| 1447 | 1447 | ||
| 1448 | /* | 1448 | /* |
| 1449 | * Do softirq processing for the current CPU. | 1449 | * Do softirq processing for the current CPU. |
| 1450 | */ | 1450 | */ |
| 1451 | static void rcu_process_callbacks(void) | 1451 | static void rcu_process_callbacks(struct softirq_action *unused) |
| 1452 | { | 1452 | { |
| 1453 | __rcu_process_callbacks(&rcu_sched_state, | 1453 | __rcu_process_callbacks(&rcu_sched_state, |
| 1454 | &__get_cpu_var(rcu_sched_data)); | 1454 | &__get_cpu_var(rcu_sched_data)); |
| @@ -1465,342 +1465,20 @@ static void rcu_process_callbacks(void) | |||
| 1465 | * the current CPU with interrupts disabled, the rcu_cpu_kthread_task | 1465 | * the current CPU with interrupts disabled, the rcu_cpu_kthread_task |
| 1466 | * cannot disappear out from under us. | 1466 | * cannot disappear out from under us. |
| 1467 | */ | 1467 | */ |
| 1468 | static void invoke_rcu_cpu_kthread(void) | 1468 | static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) |
| 1469 | { | ||
| 1470 | unsigned long flags; | ||
| 1471 | |||
| 1472 | local_irq_save(flags); | ||
| 1473 | __this_cpu_write(rcu_cpu_has_work, 1); | ||
| 1474 | if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) { | ||
| 1475 | local_irq_restore(flags); | ||
| 1476 | return; | ||
| 1477 | } | ||
| 1478 | wake_up_process(__this_cpu_read(rcu_cpu_kthread_task)); | ||
| 1479 | local_irq_restore(flags); | ||
| 1480 | } | ||
| 1481 | |||
| 1482 | /* | ||
| 1483 | * Wake up the specified per-rcu_node-structure kthread. | ||
| 1484 | * Because the per-rcu_node kthreads are immortal, we don't need | ||
| 1485 | * to do anything to keep them alive. | ||
| 1486 | */ | ||
| 1487 | static void invoke_rcu_node_kthread(struct rcu_node *rnp) | ||
| 1488 | { | ||
| 1489 | struct task_struct *t; | ||
| 1490 | |||
| 1491 | t = rnp->node_kthread_task; | ||
| 1492 | if (t != NULL) | ||
| 1493 | wake_up_process(t); | ||
| 1494 | } | ||
| 1495 | |||
| 1496 | /* | ||
| 1497 | * Set the specified CPU's kthread to run RT or not, as specified by | ||
| 1498 | * the to_rt argument. The CPU-hotplug locks are held, so the task | ||
| 1499 | * is not going away. | ||
| 1500 | */ | ||
| 1501 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | ||
| 1502 | { | ||
| 1503 | int policy; | ||
| 1504 | struct sched_param sp; | ||
| 1505 | struct task_struct *t; | ||
| 1506 | |||
| 1507 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
| 1508 | if (t == NULL) | ||
| 1509 | return; | ||
| 1510 | if (to_rt) { | ||
| 1511 | policy = SCHED_FIFO; | ||
| 1512 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
| 1513 | } else { | ||
| 1514 | policy = SCHED_NORMAL; | ||
| 1515 | sp.sched_priority = 0; | ||
| 1516 | } | ||
| 1517 | sched_setscheduler_nocheck(t, policy, &sp); | ||
| 1518 | } | ||
| 1519 | |||
| 1520 | /* | ||
| 1521 | * Timer handler to initiate the waking up of per-CPU kthreads that | ||
| 1522 | * have yielded the CPU due to excess numbers of RCU callbacks. | ||
| 1523 | * We wake up the per-rcu_node kthread, which in turn will wake up | ||
| 1524 | * the booster kthread. | ||
| 1525 | */ | ||
| 1526 | static void rcu_cpu_kthread_timer(unsigned long arg) | ||
| 1527 | { | ||
| 1528 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg); | ||
| 1529 | struct rcu_node *rnp = rdp->mynode; | ||
| 1530 | |||
| 1531 | atomic_or(rdp->grpmask, &rnp->wakemask); | ||
| 1532 | invoke_rcu_node_kthread(rnp); | ||
| 1533 | } | ||
| 1534 | |||
| 1535 | /* | ||
| 1536 | * Drop to non-real-time priority and yield, but only after posting a | ||
| 1537 | * timer that will cause us to regain our real-time priority if we | ||
| 1538 | * remain preempted. Either way, we restore our real-time priority | ||
| 1539 | * before returning. | ||
| 1540 | */ | ||
| 1541 | static void rcu_yield(void (*f)(unsigned long), unsigned long arg) | ||
| 1542 | { | ||
| 1543 | struct sched_param sp; | ||
| 1544 | struct timer_list yield_timer; | ||
| 1545 | |||
| 1546 | setup_timer_on_stack(&yield_timer, f, arg); | ||
| 1547 | mod_timer(&yield_timer, jiffies + 2); | ||
| 1548 | sp.sched_priority = 0; | ||
| 1549 | sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp); | ||
| 1550 | set_user_nice(current, 19); | ||
| 1551 | schedule(); | ||
| 1552 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
| 1553 | sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); | ||
| 1554 | del_timer(&yield_timer); | ||
| 1555 | } | ||
| 1556 | |||
| 1557 | /* | ||
| 1558 | * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU. | ||
| 1559 | * This can happen while the corresponding CPU is either coming online | ||
| 1560 | * or going offline. We cannot wait until the CPU is fully online | ||
| 1561 | * before starting the kthread, because the various notifier functions | ||
| 1562 | * can wait for RCU grace periods. So we park rcu_cpu_kthread() until | ||
| 1563 | * the corresponding CPU is online. | ||
| 1564 | * | ||
| 1565 | * Return 1 if the kthread needs to stop, 0 otherwise. | ||
| 1566 | * | ||
| 1567 | * Caller must disable bh. This function can momentarily enable it. | ||
| 1568 | */ | ||
| 1569 | static int rcu_cpu_kthread_should_stop(int cpu) | ||
| 1570 | { | ||
| 1571 | while (cpu_is_offline(cpu) || | ||
| 1572 | !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) || | ||
| 1573 | smp_processor_id() != cpu) { | ||
| 1574 | if (kthread_should_stop()) | ||
| 1575 | return 1; | ||
| 1576 | per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; | ||
| 1577 | per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id(); | ||
| 1578 | local_bh_enable(); | ||
| 1579 | schedule_timeout_uninterruptible(1); | ||
| 1580 | if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu))) | ||
| 1581 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | ||
| 1582 | local_bh_disable(); | ||
| 1583 | } | ||
| 1584 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
| 1585 | return 0; | ||
| 1586 | } | ||
| 1587 | |||
| 1588 | /* | ||
| 1589 | * Per-CPU kernel thread that invokes RCU callbacks. This replaces the | ||
| 1590 | * earlier RCU softirq. | ||
| 1591 | */ | ||
| 1592 | static int rcu_cpu_kthread(void *arg) | ||
| 1593 | { | ||
| 1594 | int cpu = (int)(long)arg; | ||
| 1595 | unsigned long flags; | ||
| 1596 | int spincnt = 0; | ||
| 1597 | unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu); | ||
| 1598 | char work; | ||
| 1599 | char *workp = &per_cpu(rcu_cpu_has_work, cpu); | ||
| 1600 | |||
| 1601 | for (;;) { | ||
| 1602 | *statusp = RCU_KTHREAD_WAITING; | ||
| 1603 | rcu_wait(*workp != 0 || kthread_should_stop()); | ||
| 1604 | local_bh_disable(); | ||
| 1605 | if (rcu_cpu_kthread_should_stop(cpu)) { | ||
| 1606 | local_bh_enable(); | ||
| 1607 | break; | ||
| 1608 | } | ||
| 1609 | *statusp = RCU_KTHREAD_RUNNING; | ||
| 1610 | per_cpu(rcu_cpu_kthread_loops, cpu)++; | ||
| 1611 | local_irq_save(flags); | ||
| 1612 | work = *workp; | ||
| 1613 | *workp = 0; | ||
| 1614 | local_irq_restore(flags); | ||
| 1615 | if (work) | ||
| 1616 | rcu_process_callbacks(); | ||
| 1617 | local_bh_enable(); | ||
| 1618 | if (*workp != 0) | ||
| 1619 | spincnt++; | ||
| 1620 | else | ||
| 1621 | spincnt = 0; | ||
| 1622 | if (spincnt > 10) { | ||
| 1623 | *statusp = RCU_KTHREAD_YIELDING; | ||
| 1624 | rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu); | ||
| 1625 | spincnt = 0; | ||
| 1626 | } | ||
| 1627 | } | ||
| 1628 | *statusp = RCU_KTHREAD_STOPPED; | ||
| 1629 | return 0; | ||
| 1630 | } | ||
| 1631 | |||
| 1632 | /* | ||
| 1633 | * Spawn a per-CPU kthread, setting up affinity and priority. | ||
| 1634 | * Because the CPU hotplug lock is held, no other CPU will be attempting | ||
| 1635 | * to manipulate rcu_cpu_kthread_task. There might be another CPU | ||
| 1636 | * attempting to access it during boot, but the locking in kthread_bind() | ||
| 1637 | * will enforce sufficient ordering. | ||
| 1638 | */ | ||
| 1639 | static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) | ||
| 1640 | { | 1469 | { |
| 1641 | struct sched_param sp; | 1470 | if (likely(!rsp->boost)) { |
| 1642 | struct task_struct *t; | 1471 | rcu_do_batch(rsp, rdp); |
| 1643 | |||
| 1644 | if (!rcu_kthreads_spawnable || | ||
| 1645 | per_cpu(rcu_cpu_kthread_task, cpu) != NULL) | ||
| 1646 | return 0; | ||
| 1647 | t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu); | ||
| 1648 | if (IS_ERR(t)) | ||
| 1649 | return PTR_ERR(t); | ||
| 1650 | kthread_bind(t, cpu); | ||
| 1651 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
| 1652 | WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); | ||
| 1653 | per_cpu(rcu_cpu_kthread_task, cpu) = t; | ||
| 1654 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
| 1655 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
| 1656 | return 0; | ||
| 1657 | } | ||
| 1658 | |||
| 1659 | /* | ||
| 1660 | * Per-rcu_node kthread, which is in charge of waking up the per-CPU | ||
| 1661 | * kthreads when needed. We ignore requests to wake up kthreads | ||
| 1662 | * for offline CPUs, which is OK because force_quiescent_state() | ||
| 1663 | * takes care of this case. | ||
| 1664 | */ | ||
| 1665 | static int rcu_node_kthread(void *arg) | ||
| 1666 | { | ||
| 1667 | int cpu; | ||
| 1668 | unsigned long flags; | ||
| 1669 | unsigned long mask; | ||
| 1670 | struct rcu_node *rnp = (struct rcu_node *)arg; | ||
| 1671 | struct sched_param sp; | ||
| 1672 | struct task_struct *t; | ||
| 1673 | |||
| 1674 | for (;;) { | ||
| 1675 | rnp->node_kthread_status = RCU_KTHREAD_WAITING; | ||
| 1676 | rcu_wait(atomic_read(&rnp->wakemask) != 0); | ||
| 1677 | rnp->node_kthread_status = RCU_KTHREAD_RUNNING; | ||
| 1678 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
| 1679 | mask = atomic_xchg(&rnp->wakemask, 0); | ||
| 1680 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ | ||
| 1681 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) { | ||
| 1682 | if ((mask & 0x1) == 0) | ||
| 1683 | continue; | ||
| 1684 | preempt_disable(); | ||
| 1685 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
| 1686 | if (!cpu_online(cpu) || t == NULL) { | ||
| 1687 | preempt_enable(); | ||
| 1688 | continue; | ||
| 1689 | } | ||
| 1690 | per_cpu(rcu_cpu_has_work, cpu) = 1; | ||
| 1691 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
| 1692 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
| 1693 | preempt_enable(); | ||
| 1694 | } | ||
| 1695 | } | ||
| 1696 | /* NOTREACHED */ | ||
| 1697 | rnp->node_kthread_status = RCU_KTHREAD_STOPPED; | ||
| 1698 | return 0; | ||
| 1699 | } | ||
| 1700 | |||
| 1701 | /* | ||
| 1702 | * Set the per-rcu_node kthread's affinity to cover all CPUs that are | ||
| 1703 | * served by the rcu_node in question. The CPU hotplug lock is still | ||
| 1704 | * held, so the value of rnp->qsmaskinit will be stable. | ||
| 1705 | * | ||
| 1706 | * We don't include outgoingcpu in the affinity set, use -1 if there is | ||
| 1707 | * no outgoing CPU. If there are no CPUs left in the affinity set, | ||
| 1708 | * this function allows the kthread to execute on any CPU. | ||
| 1709 | */ | ||
| 1710 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | ||
| 1711 | { | ||
| 1712 | cpumask_var_t cm; | ||
| 1713 | int cpu; | ||
| 1714 | unsigned long mask = rnp->qsmaskinit; | ||
| 1715 | |||
| 1716 | if (rnp->node_kthread_task == NULL) | ||
| 1717 | return; | ||
| 1718 | if (!alloc_cpumask_var(&cm, GFP_KERNEL)) | ||
| 1719 | return; | 1472 | return; |
| 1720 | cpumask_clear(cm); | ||
| 1721 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) | ||
| 1722 | if ((mask & 0x1) && cpu != outgoingcpu) | ||
| 1723 | cpumask_set_cpu(cpu, cm); | ||
| 1724 | if (cpumask_weight(cm) == 0) { | ||
| 1725 | cpumask_setall(cm); | ||
| 1726 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) | ||
| 1727 | cpumask_clear_cpu(cpu, cm); | ||
| 1728 | WARN_ON_ONCE(cpumask_weight(cm) == 0); | ||
| 1729 | } | 1473 | } |
| 1730 | set_cpus_allowed_ptr(rnp->node_kthread_task, cm); | 1474 | invoke_rcu_callbacks_kthread(); |
| 1731 | rcu_boost_kthread_setaffinity(rnp, cm); | ||
| 1732 | free_cpumask_var(cm); | ||
| 1733 | } | 1475 | } |
| 1734 | 1476 | ||
| 1735 | /* | 1477 | static void invoke_rcu_core(void) |
| 1736 | * Spawn a per-rcu_node kthread, setting priority and affinity. | ||
| 1737 | * Called during boot before online/offline can happen, or, if | ||
| 1738 | * during runtime, with the main CPU-hotplug locks held. So only | ||
| 1739 | * one of these can be executing at a time. | ||
| 1740 | */ | ||
| 1741 | static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, | ||
| 1742 | struct rcu_node *rnp) | ||
| 1743 | { | 1478 | { |
| 1744 | unsigned long flags; | 1479 | raise_softirq(RCU_SOFTIRQ); |
| 1745 | int rnp_index = rnp - &rsp->node[0]; | ||
| 1746 | struct sched_param sp; | ||
| 1747 | struct task_struct *t; | ||
| 1748 | |||
| 1749 | if (!rcu_kthreads_spawnable || | ||
| 1750 | rnp->qsmaskinit == 0) | ||
| 1751 | return 0; | ||
| 1752 | if (rnp->node_kthread_task == NULL) { | ||
| 1753 | t = kthread_create(rcu_node_kthread, (void *)rnp, | ||
| 1754 | "rcun%d", rnp_index); | ||
| 1755 | if (IS_ERR(t)) | ||
| 1756 | return PTR_ERR(t); | ||
| 1757 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
| 1758 | rnp->node_kthread_task = t; | ||
| 1759 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
| 1760 | sp.sched_priority = 99; | ||
| 1761 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
| 1762 | } | ||
| 1763 | return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); | ||
| 1764 | } | 1480 | } |
| 1765 | 1481 | ||
| 1766 | static void rcu_wake_one_boost_kthread(struct rcu_node *rnp); | ||
| 1767 | |||
| 1768 | /* | ||
| 1769 | * Spawn all kthreads -- called as soon as the scheduler is running. | ||
| 1770 | */ | ||
| 1771 | static int __init rcu_spawn_kthreads(void) | ||
| 1772 | { | ||
| 1773 | int cpu; | ||
| 1774 | struct rcu_node *rnp; | ||
| 1775 | struct task_struct *t; | ||
| 1776 | |||
| 1777 | rcu_kthreads_spawnable = 1; | ||
| 1778 | for_each_possible_cpu(cpu) { | ||
| 1779 | per_cpu(rcu_cpu_has_work, cpu) = 0; | ||
| 1780 | if (cpu_online(cpu)) { | ||
| 1781 | (void)rcu_spawn_one_cpu_kthread(cpu); | ||
| 1782 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
| 1783 | if (t) | ||
| 1784 | wake_up_process(t); | ||
| 1785 | } | ||
| 1786 | } | ||
| 1787 | rnp = rcu_get_root(rcu_state); | ||
| 1788 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
| 1789 | if (rnp->node_kthread_task) | ||
| 1790 | wake_up_process(rnp->node_kthread_task); | ||
| 1791 | if (NUM_RCU_NODES > 1) { | ||
| 1792 | rcu_for_each_leaf_node(rcu_state, rnp) { | ||
| 1793 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
| 1794 | t = rnp->node_kthread_task; | ||
| 1795 | if (t) | ||
| 1796 | wake_up_process(t); | ||
| 1797 | rcu_wake_one_boost_kthread(rnp); | ||
| 1798 | } | ||
| 1799 | } | ||
| 1800 | return 0; | ||
| 1801 | } | ||
| 1802 | early_initcall(rcu_spawn_kthreads); | ||
| 1803 | |||
| 1804 | static void | 1482 | static void |
| 1805 | __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | 1483 | __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), |
| 1806 | struct rcu_state *rsp) | 1484 | struct rcu_state *rsp) |
| @@ -2207,44 +1885,6 @@ static void __cpuinit rcu_prepare_cpu(int cpu) | |||
| 2207 | rcu_preempt_init_percpu_data(cpu); | 1885 | rcu_preempt_init_percpu_data(cpu); |
| 2208 | } | 1886 | } |
| 2209 | 1887 | ||
| 2210 | static void __cpuinit rcu_prepare_kthreads(int cpu) | ||
| 2211 | { | ||
| 2212 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | ||
| 2213 | struct rcu_node *rnp = rdp->mynode; | ||
| 2214 | |||
| 2215 | /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ | ||
| 2216 | if (rcu_kthreads_spawnable) { | ||
| 2217 | (void)rcu_spawn_one_cpu_kthread(cpu); | ||
| 2218 | if (rnp->node_kthread_task == NULL) | ||
| 2219 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
| 2220 | } | ||
| 2221 | } | ||
| 2222 | |||
| 2223 | /* | ||
| 2224 | * kthread_create() creates threads in TASK_UNINTERRUPTIBLE state, | ||
| 2225 | * but the RCU threads are woken on demand, and if demand is low this | ||
| 2226 | * could be a while triggering the hung task watchdog. | ||
| 2227 | * | ||
| 2228 | * In order to avoid this, poke all tasks once the CPU is fully | ||
| 2229 | * up and running. | ||
| 2230 | */ | ||
| 2231 | static void __cpuinit rcu_online_kthreads(int cpu) | ||
| 2232 | { | ||
| 2233 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | ||
| 2234 | struct rcu_node *rnp = rdp->mynode; | ||
| 2235 | struct task_struct *t; | ||
| 2236 | |||
| 2237 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
| 2238 | if (t) | ||
| 2239 | wake_up_process(t); | ||
| 2240 | |||
| 2241 | t = rnp->node_kthread_task; | ||
| 2242 | if (t) | ||
| 2243 | wake_up_process(t); | ||
| 2244 | |||
| 2245 | rcu_wake_one_boost_kthread(rnp); | ||
| 2246 | } | ||
| 2247 | |||
| 2248 | /* | 1888 | /* |
| 2249 | * Handle CPU online/offline notification events. | 1889 | * Handle CPU online/offline notification events. |
| 2250 | */ | 1890 | */ |
| @@ -2262,7 +1902,6 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |||
| 2262 | rcu_prepare_kthreads(cpu); | 1902 | rcu_prepare_kthreads(cpu); |
| 2263 | break; | 1903 | break; |
| 2264 | case CPU_ONLINE: | 1904 | case CPU_ONLINE: |
| 2265 | rcu_online_kthreads(cpu); | ||
| 2266 | case CPU_DOWN_FAILED: | 1905 | case CPU_DOWN_FAILED: |
| 2267 | rcu_node_kthread_setaffinity(rnp, -1); | 1906 | rcu_node_kthread_setaffinity(rnp, -1); |
| 2268 | rcu_cpu_kthread_setrt(cpu, 1); | 1907 | rcu_cpu_kthread_setrt(cpu, 1); |
| @@ -2410,6 +2049,7 @@ void __init rcu_init(void) | |||
| 2410 | rcu_init_one(&rcu_sched_state, &rcu_sched_data); | 2049 | rcu_init_one(&rcu_sched_state, &rcu_sched_data); |
| 2411 | rcu_init_one(&rcu_bh_state, &rcu_bh_data); | 2050 | rcu_init_one(&rcu_bh_state, &rcu_bh_data); |
| 2412 | __rcu_init_preempt(); | 2051 | __rcu_init_preempt(); |
| 2052 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | ||
| 2413 | 2053 | ||
| 2414 | /* | 2054 | /* |
| 2415 | * We don't need protection against CPU-hotplug here because | 2055 | * We don't need protection against CPU-hotplug here because |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 7b9a08b4aaea..01b2ccda26fb 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
| @@ -369,6 +369,7 @@ struct rcu_state { | |||
| 369 | /* period because */ | 369 | /* period because */ |
| 370 | /* force_quiescent_state() */ | 370 | /* force_quiescent_state() */ |
| 371 | /* was running. */ | 371 | /* was running. */ |
| 372 | u8 boost; /* Subject to priority boost. */ | ||
| 372 | unsigned long gpnum; /* Current gp number. */ | 373 | unsigned long gpnum; /* Current gp number. */ |
| 373 | unsigned long completed; /* # of last completed gp. */ | 374 | unsigned long completed; /* # of last completed gp. */ |
| 374 | 375 | ||
| @@ -426,6 +427,7 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); | |||
| 426 | #ifdef CONFIG_HOTPLUG_CPU | 427 | #ifdef CONFIG_HOTPLUG_CPU |
| 427 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, | 428 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, |
| 428 | unsigned long flags); | 429 | unsigned long flags); |
| 430 | static void rcu_stop_cpu_kthread(int cpu); | ||
| 429 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 431 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
| 430 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); | 432 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); |
| 431 | static void rcu_print_task_stall(struct rcu_node *rnp); | 433 | static void rcu_print_task_stall(struct rcu_node *rnp); |
| @@ -450,11 +452,19 @@ static void rcu_preempt_send_cbs_to_online(void); | |||
| 450 | static void __init __rcu_init_preempt(void); | 452 | static void __init __rcu_init_preempt(void); |
| 451 | static void rcu_needs_cpu_flush(void); | 453 | static void rcu_needs_cpu_flush(void); |
| 452 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); | 454 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); |
| 455 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); | ||
| 456 | static void invoke_rcu_callbacks_kthread(void); | ||
| 457 | #ifdef CONFIG_RCU_BOOST | ||
| 458 | static void rcu_preempt_do_callbacks(void); | ||
| 453 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, | 459 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, |
| 454 | cpumask_var_t cm); | 460 | cpumask_var_t cm); |
| 455 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); | ||
| 456 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | 461 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, |
| 457 | struct rcu_node *rnp, | 462 | struct rcu_node *rnp, |
| 458 | int rnp_index); | 463 | int rnp_index); |
| 464 | static void invoke_rcu_node_kthread(struct rcu_node *rnp); | ||
| 465 | static void rcu_yield(void (*f)(unsigned long), unsigned long arg); | ||
| 466 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
| 467 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt); | ||
| 468 | static void __cpuinit rcu_prepare_kthreads(int cpu); | ||
| 459 | 469 | ||
| 460 | #endif /* #ifndef RCU_TREE_NONCORE */ | 470 | #endif /* #ifndef RCU_TREE_NONCORE */ |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index c8bff3099a89..14dc7dd00902 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
| @@ -602,6 +602,15 @@ static void rcu_preempt_process_callbacks(void) | |||
| 602 | &__get_cpu_var(rcu_preempt_data)); | 602 | &__get_cpu_var(rcu_preempt_data)); |
| 603 | } | 603 | } |
| 604 | 604 | ||
| 605 | #ifdef CONFIG_RCU_BOOST | ||
| 606 | |||
| 607 | static void rcu_preempt_do_callbacks(void) | ||
| 608 | { | ||
| 609 | rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data)); | ||
| 610 | } | ||
| 611 | |||
| 612 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
| 613 | |||
| 605 | /* | 614 | /* |
| 606 | * Queue a preemptible-RCU callback for invocation after a grace period. | 615 | * Queue a preemptible-RCU callback for invocation after a grace period. |
| 607 | */ | 616 | */ |
| @@ -1249,6 +1258,23 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) | |||
| 1249 | } | 1258 | } |
| 1250 | 1259 | ||
| 1251 | /* | 1260 | /* |
| 1261 | * Wake up the per-CPU kthread to invoke RCU callbacks. | ||
| 1262 | */ | ||
| 1263 | static void invoke_rcu_callbacks_kthread(void) | ||
| 1264 | { | ||
| 1265 | unsigned long flags; | ||
| 1266 | |||
| 1267 | local_irq_save(flags); | ||
| 1268 | __this_cpu_write(rcu_cpu_has_work, 1); | ||
| 1269 | if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) { | ||
| 1270 | local_irq_restore(flags); | ||
| 1271 | return; | ||
| 1272 | } | ||
| 1273 | wake_up_process(__this_cpu_read(rcu_cpu_kthread_task)); | ||
| 1274 | local_irq_restore(flags); | ||
| 1275 | } | ||
| 1276 | |||
| 1277 | /* | ||
| 1252 | * Set the affinity of the boost kthread. The CPU-hotplug locks are | 1278 | * Set the affinity of the boost kthread. The CPU-hotplug locks are |
| 1253 | * held, so no one should be messing with the existence of the boost | 1279 | * held, so no one should be messing with the existence of the boost |
| 1254 | * kthread. | 1280 | * kthread. |
| @@ -1288,6 +1314,7 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
| 1288 | 1314 | ||
| 1289 | if (&rcu_preempt_state != rsp) | 1315 | if (&rcu_preempt_state != rsp) |
| 1290 | return 0; | 1316 | return 0; |
| 1317 | rsp->boost = 1; | ||
| 1291 | if (rnp->boost_kthread_task != NULL) | 1318 | if (rnp->boost_kthread_task != NULL) |
| 1292 | return 0; | 1319 | return 0; |
| 1293 | t = kthread_create(rcu_boost_kthread, (void *)rnp, | 1320 | t = kthread_create(rcu_boost_kthread, (void *)rnp, |
| @@ -1299,13 +1326,372 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
| 1299 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1326 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
| 1300 | sp.sched_priority = RCU_KTHREAD_PRIO; | 1327 | sp.sched_priority = RCU_KTHREAD_PRIO; |
| 1301 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | 1328 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); |
| 1329 | wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ | ||
| 1302 | return 0; | 1330 | return 0; |
| 1303 | } | 1331 | } |
| 1304 | 1332 | ||
| 1305 | static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp) | 1333 | #ifdef CONFIG_HOTPLUG_CPU |
| 1334 | |||
| 1335 | /* | ||
| 1336 | * Stop the RCU's per-CPU kthread when its CPU goes offline,. | ||
| 1337 | */ | ||
| 1338 | static void rcu_stop_cpu_kthread(int cpu) | ||
| 1306 | { | 1339 | { |
| 1307 | if (rnp->boost_kthread_task) | 1340 | struct task_struct *t; |
| 1308 | wake_up_process(rnp->boost_kthread_task); | 1341 | |
| 1342 | /* Stop the CPU's kthread. */ | ||
| 1343 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
| 1344 | if (t != NULL) { | ||
| 1345 | per_cpu(rcu_cpu_kthread_task, cpu) = NULL; | ||
| 1346 | kthread_stop(t); | ||
| 1347 | } | ||
| 1348 | } | ||
| 1349 | |||
| 1350 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
| 1351 | |||
| 1352 | static void rcu_kthread_do_work(void) | ||
| 1353 | { | ||
| 1354 | rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); | ||
| 1355 | rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); | ||
| 1356 | rcu_preempt_do_callbacks(); | ||
| 1357 | } | ||
| 1358 | |||
| 1359 | /* | ||
| 1360 | * Wake up the specified per-rcu_node-structure kthread. | ||
| 1361 | * Because the per-rcu_node kthreads are immortal, we don't need | ||
| 1362 | * to do anything to keep them alive. | ||
| 1363 | */ | ||
| 1364 | static void invoke_rcu_node_kthread(struct rcu_node *rnp) | ||
| 1365 | { | ||
| 1366 | struct task_struct *t; | ||
| 1367 | |||
| 1368 | t = rnp->node_kthread_task; | ||
| 1369 | if (t != NULL) | ||
| 1370 | wake_up_process(t); | ||
| 1371 | } | ||
| 1372 | |||
| 1373 | /* | ||
| 1374 | * Set the specified CPU's kthread to run RT or not, as specified by | ||
| 1375 | * the to_rt argument. The CPU-hotplug locks are held, so the task | ||
| 1376 | * is not going away. | ||
| 1377 | */ | ||
| 1378 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | ||
| 1379 | { | ||
| 1380 | int policy; | ||
| 1381 | struct sched_param sp; | ||
| 1382 | struct task_struct *t; | ||
| 1383 | |||
| 1384 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
| 1385 | if (t == NULL) | ||
| 1386 | return; | ||
| 1387 | if (to_rt) { | ||
| 1388 | policy = SCHED_FIFO; | ||
| 1389 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
| 1390 | } else { | ||
| 1391 | policy = SCHED_NORMAL; | ||
| 1392 | sp.sched_priority = 0; | ||
| 1393 | } | ||
| 1394 | sched_setscheduler_nocheck(t, policy, &sp); | ||
| 1395 | } | ||
| 1396 | |||
| 1397 | /* | ||
| 1398 | * Timer handler to initiate the waking up of per-CPU kthreads that | ||
| 1399 | * have yielded the CPU due to excess numbers of RCU callbacks. | ||
| 1400 | * We wake up the per-rcu_node kthread, which in turn will wake up | ||
| 1401 | * the booster kthread. | ||
| 1402 | */ | ||
| 1403 | static void rcu_cpu_kthread_timer(unsigned long arg) | ||
| 1404 | { | ||
| 1405 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg); | ||
| 1406 | struct rcu_node *rnp = rdp->mynode; | ||
| 1407 | |||
| 1408 | atomic_or(rdp->grpmask, &rnp->wakemask); | ||
| 1409 | invoke_rcu_node_kthread(rnp); | ||
| 1410 | } | ||
| 1411 | |||
| 1412 | /* | ||
| 1413 | * Drop to non-real-time priority and yield, but only after posting a | ||
| 1414 | * timer that will cause us to regain our real-time priority if we | ||
| 1415 | * remain preempted. Either way, we restore our real-time priority | ||
| 1416 | * before returning. | ||
| 1417 | */ | ||
| 1418 | static void rcu_yield(void (*f)(unsigned long), unsigned long arg) | ||
| 1419 | { | ||
| 1420 | struct sched_param sp; | ||
| 1421 | struct timer_list yield_timer; | ||
| 1422 | |||
| 1423 | setup_timer_on_stack(&yield_timer, f, arg); | ||
| 1424 | mod_timer(&yield_timer, jiffies + 2); | ||
| 1425 | sp.sched_priority = 0; | ||
| 1426 | sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp); | ||
| 1427 | set_user_nice(current, 19); | ||
| 1428 | schedule(); | ||
| 1429 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
| 1430 | sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); | ||
| 1431 | del_timer(&yield_timer); | ||
| 1432 | } | ||
| 1433 | |||
| 1434 | /* | ||
| 1435 | * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU. | ||
| 1436 | * This can happen while the corresponding CPU is either coming online | ||
| 1437 | * or going offline. We cannot wait until the CPU is fully online | ||
| 1438 | * before starting the kthread, because the various notifier functions | ||
| 1439 | * can wait for RCU grace periods. So we park rcu_cpu_kthread() until | ||
| 1440 | * the corresponding CPU is online. | ||
| 1441 | * | ||
| 1442 | * Return 1 if the kthread needs to stop, 0 otherwise. | ||
| 1443 | * | ||
| 1444 | * Caller must disable bh. This function can momentarily enable it. | ||
| 1445 | */ | ||
| 1446 | static int rcu_cpu_kthread_should_stop(int cpu) | ||
| 1447 | { | ||
| 1448 | while (cpu_is_offline(cpu) || | ||
| 1449 | !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) || | ||
| 1450 | smp_processor_id() != cpu) { | ||
| 1451 | if (kthread_should_stop()) | ||
| 1452 | return 1; | ||
| 1453 | per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; | ||
| 1454 | per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id(); | ||
| 1455 | local_bh_enable(); | ||
| 1456 | schedule_timeout_uninterruptible(1); | ||
| 1457 | if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu))) | ||
| 1458 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | ||
| 1459 | local_bh_disable(); | ||
| 1460 | } | ||
| 1461 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
| 1462 | return 0; | ||
| 1463 | } | ||
| 1464 | |||
| 1465 | /* | ||
| 1466 | * Per-CPU kernel thread that invokes RCU callbacks. This replaces the | ||
| 1467 | * earlier RCU softirq. | ||
| 1468 | */ | ||
| 1469 | static int rcu_cpu_kthread(void *arg) | ||
| 1470 | { | ||
| 1471 | int cpu = (int)(long)arg; | ||
| 1472 | unsigned long flags; | ||
| 1473 | int spincnt = 0; | ||
| 1474 | unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu); | ||
| 1475 | char work; | ||
| 1476 | char *workp = &per_cpu(rcu_cpu_has_work, cpu); | ||
| 1477 | |||
| 1478 | for (;;) { | ||
| 1479 | *statusp = RCU_KTHREAD_WAITING; | ||
| 1480 | rcu_wait(*workp != 0 || kthread_should_stop()); | ||
| 1481 | local_bh_disable(); | ||
| 1482 | if (rcu_cpu_kthread_should_stop(cpu)) { | ||
| 1483 | local_bh_enable(); | ||
| 1484 | break; | ||
| 1485 | } | ||
| 1486 | *statusp = RCU_KTHREAD_RUNNING; | ||
| 1487 | per_cpu(rcu_cpu_kthread_loops, cpu)++; | ||
| 1488 | local_irq_save(flags); | ||
| 1489 | work = *workp; | ||
| 1490 | *workp = 0; | ||
| 1491 | local_irq_restore(flags); | ||
| 1492 | if (work) | ||
| 1493 | rcu_kthread_do_work(); | ||
| 1494 | local_bh_enable(); | ||
| 1495 | if (*workp != 0) | ||
| 1496 | spincnt++; | ||
| 1497 | else | ||
| 1498 | spincnt = 0; | ||
| 1499 | if (spincnt > 10) { | ||
| 1500 | *statusp = RCU_KTHREAD_YIELDING; | ||
| 1501 | rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu); | ||
| 1502 | spincnt = 0; | ||
| 1503 | } | ||
| 1504 | } | ||
| 1505 | *statusp = RCU_KTHREAD_STOPPED; | ||
| 1506 | return 0; | ||
| 1507 | } | ||
| 1508 | |||
| 1509 | /* | ||
| 1510 | * Spawn a per-CPU kthread, setting up affinity and priority. | ||
| 1511 | * Because the CPU hotplug lock is held, no other CPU will be attempting | ||
| 1512 | * to manipulate rcu_cpu_kthread_task. There might be another CPU | ||
| 1513 | * attempting to access it during boot, but the locking in kthread_bind() | ||
| 1514 | * will enforce sufficient ordering. | ||
| 1515 | * | ||
| 1516 | * Please note that we cannot simply refuse to wake up the per-CPU | ||
| 1517 | * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state, | ||
| 1518 | * which can result in softlockup complaints if the task ends up being | ||
| 1519 | * idle for more than a couple of minutes. | ||
| 1520 | * | ||
| 1521 | * However, please note also that we cannot bind the per-CPU kthread to its | ||
| 1522 | * CPU until that CPU is fully online. We also cannot wait until the | ||
| 1523 | * CPU is fully online before we create its per-CPU kthread, as this would | ||
| 1524 | * deadlock the system when CPU notifiers tried waiting for grace | ||
| 1525 | * periods. So we bind the per-CPU kthread to its CPU only if the CPU | ||
| 1526 | * is online. If its CPU is not yet fully online, then the code in | ||
| 1527 | * rcu_cpu_kthread() will wait until it is fully online, and then do | ||
| 1528 | * the binding. | ||
| 1529 | */ | ||
| 1530 | static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) | ||
| 1531 | { | ||
| 1532 | struct sched_param sp; | ||
| 1533 | struct task_struct *t; | ||
| 1534 | |||
| 1535 | if (!rcu_kthreads_spawnable || | ||
| 1536 | per_cpu(rcu_cpu_kthread_task, cpu) != NULL) | ||
| 1537 | return 0; | ||
| 1538 | t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu); | ||
| 1539 | if (IS_ERR(t)) | ||
| 1540 | return PTR_ERR(t); | ||
| 1541 | if (cpu_online(cpu)) | ||
| 1542 | kthread_bind(t, cpu); | ||
| 1543 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
| 1544 | WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); | ||
| 1545 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
| 1546 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
| 1547 | per_cpu(rcu_cpu_kthread_task, cpu) = t; | ||
| 1548 | wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */ | ||
| 1549 | return 0; | ||
| 1550 | } | ||
| 1551 | |||
| 1552 | /* | ||
| 1553 | * Per-rcu_node kthread, which is in charge of waking up the per-CPU | ||
| 1554 | * kthreads when needed. We ignore requests to wake up kthreads | ||
| 1555 | * for offline CPUs, which is OK because force_quiescent_state() | ||
| 1556 | * takes care of this case. | ||
| 1557 | */ | ||
| 1558 | static int rcu_node_kthread(void *arg) | ||
| 1559 | { | ||
| 1560 | int cpu; | ||
| 1561 | unsigned long flags; | ||
| 1562 | unsigned long mask; | ||
| 1563 | struct rcu_node *rnp = (struct rcu_node *)arg; | ||
| 1564 | struct sched_param sp; | ||
| 1565 | struct task_struct *t; | ||
| 1566 | |||
| 1567 | for (;;) { | ||
| 1568 | rnp->node_kthread_status = RCU_KTHREAD_WAITING; | ||
| 1569 | rcu_wait(atomic_read(&rnp->wakemask) != 0); | ||
| 1570 | rnp->node_kthread_status = RCU_KTHREAD_RUNNING; | ||
| 1571 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
| 1572 | mask = atomic_xchg(&rnp->wakemask, 0); | ||
| 1573 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ | ||
| 1574 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) { | ||
| 1575 | if ((mask & 0x1) == 0) | ||
| 1576 | continue; | ||
| 1577 | preempt_disable(); | ||
| 1578 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
| 1579 | if (!cpu_online(cpu) || t == NULL) { | ||
| 1580 | preempt_enable(); | ||
| 1581 | continue; | ||
| 1582 | } | ||
| 1583 | per_cpu(rcu_cpu_has_work, cpu) = 1; | ||
| 1584 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
| 1585 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
| 1586 | preempt_enable(); | ||
| 1587 | } | ||
| 1588 | } | ||
| 1589 | /* NOTREACHED */ | ||
| 1590 | rnp->node_kthread_status = RCU_KTHREAD_STOPPED; | ||
| 1591 | return 0; | ||
| 1592 | } | ||
| 1593 | |||
| 1594 | /* | ||
| 1595 | * Set the per-rcu_node kthread's affinity to cover all CPUs that are | ||
| 1596 | * served by the rcu_node in question. The CPU hotplug lock is still | ||
| 1597 | * held, so the value of rnp->qsmaskinit will be stable. | ||
| 1598 | * | ||
| 1599 | * We don't include outgoingcpu in the affinity set, use -1 if there is | ||
| 1600 | * no outgoing CPU. If there are no CPUs left in the affinity set, | ||
| 1601 | * this function allows the kthread to execute on any CPU. | ||
| 1602 | */ | ||
| 1603 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | ||
| 1604 | { | ||
| 1605 | cpumask_var_t cm; | ||
| 1606 | int cpu; | ||
| 1607 | unsigned long mask = rnp->qsmaskinit; | ||
| 1608 | |||
| 1609 | if (rnp->node_kthread_task == NULL) | ||
| 1610 | return; | ||
| 1611 | if (!alloc_cpumask_var(&cm, GFP_KERNEL)) | ||
| 1612 | return; | ||
| 1613 | cpumask_clear(cm); | ||
| 1614 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) | ||
| 1615 | if ((mask & 0x1) && cpu != outgoingcpu) | ||
| 1616 | cpumask_set_cpu(cpu, cm); | ||
| 1617 | if (cpumask_weight(cm) == 0) { | ||
| 1618 | cpumask_setall(cm); | ||
| 1619 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) | ||
| 1620 | cpumask_clear_cpu(cpu, cm); | ||
| 1621 | WARN_ON_ONCE(cpumask_weight(cm) == 0); | ||
| 1622 | } | ||
| 1623 | set_cpus_allowed_ptr(rnp->node_kthread_task, cm); | ||
| 1624 | rcu_boost_kthread_setaffinity(rnp, cm); | ||
| 1625 | free_cpumask_var(cm); | ||
| 1626 | } | ||
| 1627 | |||
| 1628 | /* | ||
| 1629 | * Spawn a per-rcu_node kthread, setting priority and affinity. | ||
| 1630 | * Called during boot before online/offline can happen, or, if | ||
| 1631 | * during runtime, with the main CPU-hotplug locks held. So only | ||
| 1632 | * one of these can be executing at a time. | ||
| 1633 | */ | ||
| 1634 | static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, | ||
| 1635 | struct rcu_node *rnp) | ||
| 1636 | { | ||
| 1637 | unsigned long flags; | ||
| 1638 | int rnp_index = rnp - &rsp->node[0]; | ||
| 1639 | struct sched_param sp; | ||
| 1640 | struct task_struct *t; | ||
| 1641 | |||
| 1642 | if (!rcu_kthreads_spawnable || | ||
| 1643 | rnp->qsmaskinit == 0) | ||
| 1644 | return 0; | ||
| 1645 | if (rnp->node_kthread_task == NULL) { | ||
| 1646 | t = kthread_create(rcu_node_kthread, (void *)rnp, | ||
| 1647 | "rcun%d", rnp_index); | ||
| 1648 | if (IS_ERR(t)) | ||
| 1649 | return PTR_ERR(t); | ||
| 1650 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
| 1651 | rnp->node_kthread_task = t; | ||
| 1652 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
| 1653 | sp.sched_priority = 99; | ||
| 1654 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
| 1655 | wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ | ||
| 1656 | } | ||
| 1657 | return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); | ||
| 1658 | } | ||
| 1659 | |||
| 1660 | /* | ||
| 1661 | * Spawn all kthreads -- called as soon as the scheduler is running. | ||
| 1662 | */ | ||
| 1663 | static int __init rcu_spawn_kthreads(void) | ||
| 1664 | { | ||
| 1665 | int cpu; | ||
| 1666 | struct rcu_node *rnp; | ||
| 1667 | |||
| 1668 | rcu_kthreads_spawnable = 1; | ||
| 1669 | for_each_possible_cpu(cpu) { | ||
| 1670 | per_cpu(rcu_cpu_has_work, cpu) = 0; | ||
| 1671 | if (cpu_online(cpu)) | ||
| 1672 | (void)rcu_spawn_one_cpu_kthread(cpu); | ||
| 1673 | } | ||
| 1674 | rnp = rcu_get_root(rcu_state); | ||
| 1675 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
| 1676 | if (NUM_RCU_NODES > 1) { | ||
| 1677 | rcu_for_each_leaf_node(rcu_state, rnp) | ||
| 1678 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
| 1679 | } | ||
| 1680 | return 0; | ||
| 1681 | } | ||
| 1682 | early_initcall(rcu_spawn_kthreads); | ||
| 1683 | |||
| 1684 | static void __cpuinit rcu_prepare_kthreads(int cpu) | ||
| 1685 | { | ||
| 1686 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | ||
| 1687 | struct rcu_node *rnp = rdp->mynode; | ||
| 1688 | |||
| 1689 | /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ | ||
| 1690 | if (rcu_kthreads_spawnable) { | ||
| 1691 | (void)rcu_spawn_one_cpu_kthread(cpu); | ||
| 1692 | if (rnp->node_kthread_task == NULL) | ||
| 1693 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
| 1694 | } | ||
| 1309 | } | 1695 | } |
| 1310 | 1696 | ||
| 1311 | #else /* #ifdef CONFIG_RCU_BOOST */ | 1697 | #else /* #ifdef CONFIG_RCU_BOOST */ |
| @@ -1315,23 +1701,32 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) | |||
| 1315 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1701 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
| 1316 | } | 1702 | } |
| 1317 | 1703 | ||
| 1318 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, | 1704 | static void invoke_rcu_callbacks_kthread(void) |
| 1319 | cpumask_var_t cm) | ||
| 1320 | { | 1705 | { |
| 1706 | WARN_ON_ONCE(1); | ||
| 1321 | } | 1707 | } |
| 1322 | 1708 | ||
| 1323 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) | 1709 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) |
| 1324 | { | 1710 | { |
| 1325 | } | 1711 | } |
| 1326 | 1712 | ||
| 1327 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | 1713 | #ifdef CONFIG_HOTPLUG_CPU |
| 1328 | struct rcu_node *rnp, | 1714 | |
| 1329 | int rnp_index) | 1715 | static void rcu_stop_cpu_kthread(int cpu) |
| 1716 | { | ||
| 1717 | } | ||
| 1718 | |||
| 1719 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
| 1720 | |||
| 1721 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | ||
| 1722 | { | ||
| 1723 | } | ||
| 1724 | |||
| 1725 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | ||
| 1330 | { | 1726 | { |
| 1331 | return 0; | ||
| 1332 | } | 1727 | } |
| 1333 | 1728 | ||
| 1334 | static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp) | 1729 | static void __cpuinit rcu_prepare_kthreads(int cpu) |
| 1335 | { | 1730 | { |
| 1336 | } | 1731 | } |
| 1337 | 1732 | ||
| @@ -1509,7 +1904,7 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); | |||
| 1509 | * | 1904 | * |
| 1510 | * Because it is not legal to invoke rcu_process_callbacks() with irqs | 1905 | * Because it is not legal to invoke rcu_process_callbacks() with irqs |
| 1511 | * disabled, we do one pass of force_quiescent_state(), then do a | 1906 | * disabled, we do one pass of force_quiescent_state(), then do a |
| 1512 | * invoke_rcu_cpu_kthread() to cause rcu_process_callbacks() to be invoked | 1907 | * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked |
| 1513 | * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. | 1908 | * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. |
| 1514 | */ | 1909 | */ |
| 1515 | int rcu_needs_cpu(int cpu) | 1910 | int rcu_needs_cpu(int cpu) |
| @@ -1560,7 +1955,7 @@ int rcu_needs_cpu(int cpu) | |||
| 1560 | 1955 | ||
| 1561 | /* If RCU callbacks are still pending, RCU still needs this CPU. */ | 1956 | /* If RCU callbacks are still pending, RCU still needs this CPU. */ |
| 1562 | if (c) | 1957 | if (c) |
| 1563 | invoke_rcu_cpu_kthread(); | 1958 | invoke_rcu_core(); |
| 1564 | return c; | 1959 | return c; |
| 1565 | } | 1960 | } |
| 1566 | 1961 | ||
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index 9678cc3650f5..4e144876dc68 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c | |||
| @@ -46,6 +46,8 @@ | |||
| 46 | #define RCU_TREE_NONCORE | 46 | #define RCU_TREE_NONCORE |
| 47 | #include "rcutree.h" | 47 | #include "rcutree.h" |
| 48 | 48 | ||
| 49 | #ifdef CONFIG_RCU_BOOST | ||
| 50 | |||
| 49 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); | 51 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); |
| 50 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu); | 52 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu); |
| 51 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); | 53 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); |
| @@ -58,6 +60,8 @@ static char convert_kthread_status(unsigned int kthread_status) | |||
| 58 | return "SRWOY"[kthread_status]; | 60 | return "SRWOY"[kthread_status]; |
| 59 | } | 61 | } |
| 60 | 62 | ||
| 63 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
| 64 | |||
| 61 | static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) | 65 | static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) |
| 62 | { | 66 | { |
| 63 | if (!rdp->beenonline) | 67 | if (!rdp->beenonline) |
| @@ -76,7 +80,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) | |||
| 76 | rdp->dynticks_fqs); | 80 | rdp->dynticks_fqs); |
| 77 | #endif /* #ifdef CONFIG_NO_HZ */ | 81 | #endif /* #ifdef CONFIG_NO_HZ */ |
| 78 | seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); | 82 | seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); |
| 79 | seq_printf(m, " ql=%ld qs=%c%c%c%c kt=%d/%c/%d ktl=%x b=%ld", | 83 | seq_printf(m, " ql=%ld qs=%c%c%c%c", |
| 80 | rdp->qlen, | 84 | rdp->qlen, |
| 81 | ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != | 85 | ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != |
| 82 | rdp->nxttail[RCU_NEXT_TAIL]], | 86 | rdp->nxttail[RCU_NEXT_TAIL]], |
| @@ -84,13 +88,16 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) | |||
| 84 | rdp->nxttail[RCU_NEXT_READY_TAIL]], | 88 | rdp->nxttail[RCU_NEXT_READY_TAIL]], |
| 85 | ".W"[rdp->nxttail[RCU_DONE_TAIL] != | 89 | ".W"[rdp->nxttail[RCU_DONE_TAIL] != |
| 86 | rdp->nxttail[RCU_WAIT_TAIL]], | 90 | rdp->nxttail[RCU_WAIT_TAIL]], |
| 87 | ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]], | 91 | ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]); |
| 92 | #ifdef CONFIG_RCU_BOOST | ||
| 93 | seq_printf(m, " kt=%d/%c/%d ktl=%x", | ||
| 88 | per_cpu(rcu_cpu_has_work, rdp->cpu), | 94 | per_cpu(rcu_cpu_has_work, rdp->cpu), |
| 89 | convert_kthread_status(per_cpu(rcu_cpu_kthread_status, | 95 | convert_kthread_status(per_cpu(rcu_cpu_kthread_status, |
| 90 | rdp->cpu)), | 96 | rdp->cpu)), |
| 91 | per_cpu(rcu_cpu_kthread_cpu, rdp->cpu), | 97 | per_cpu(rcu_cpu_kthread_cpu, rdp->cpu), |
| 92 | per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff, | 98 | per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff); |
| 93 | rdp->blimit); | 99 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
| 100 | seq_printf(m, " b=%ld", rdp->blimit); | ||
| 94 | seq_printf(m, " ci=%lu co=%lu ca=%lu\n", | 101 | seq_printf(m, " ci=%lu co=%lu ca=%lu\n", |
| 95 | rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); | 102 | rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); |
| 96 | } | 103 | } |
| @@ -147,18 +154,21 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) | |||
| 147 | rdp->dynticks_fqs); | 154 | rdp->dynticks_fqs); |
| 148 | #endif /* #ifdef CONFIG_NO_HZ */ | 155 | #endif /* #ifdef CONFIG_NO_HZ */ |
| 149 | seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); | 156 | seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); |
| 150 | seq_printf(m, ",%ld,\"%c%c%c%c\",%d,\"%c\",%ld", rdp->qlen, | 157 | seq_printf(m, ",%ld,\"%c%c%c%c\"", rdp->qlen, |
| 151 | ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != | 158 | ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != |
| 152 | rdp->nxttail[RCU_NEXT_TAIL]], | 159 | rdp->nxttail[RCU_NEXT_TAIL]], |
| 153 | ".R"[rdp->nxttail[RCU_WAIT_TAIL] != | 160 | ".R"[rdp->nxttail[RCU_WAIT_TAIL] != |
| 154 | rdp->nxttail[RCU_NEXT_READY_TAIL]], | 161 | rdp->nxttail[RCU_NEXT_READY_TAIL]], |
| 155 | ".W"[rdp->nxttail[RCU_DONE_TAIL] != | 162 | ".W"[rdp->nxttail[RCU_DONE_TAIL] != |
| 156 | rdp->nxttail[RCU_WAIT_TAIL]], | 163 | rdp->nxttail[RCU_WAIT_TAIL]], |
| 157 | ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]], | 164 | ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]); |
| 165 | #ifdef CONFIG_RCU_BOOST | ||
| 166 | seq_printf(m, ",%d,\"%c\"", | ||
| 158 | per_cpu(rcu_cpu_has_work, rdp->cpu), | 167 | per_cpu(rcu_cpu_has_work, rdp->cpu), |
| 159 | convert_kthread_status(per_cpu(rcu_cpu_kthread_status, | 168 | convert_kthread_status(per_cpu(rcu_cpu_kthread_status, |
| 160 | rdp->cpu)), | 169 | rdp->cpu))); |
| 161 | rdp->blimit); | 170 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
| 171 | seq_printf(m, ",%ld", rdp->blimit); | ||
| 162 | seq_printf(m, ",%lu,%lu,%lu\n", | 172 | seq_printf(m, ",%lu,%lu,%lu\n", |
| 163 | rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); | 173 | rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); |
| 164 | } | 174 | } |
| @@ -169,7 +179,11 @@ static int show_rcudata_csv(struct seq_file *m, void *unused) | |||
| 169 | #ifdef CONFIG_NO_HZ | 179 | #ifdef CONFIG_NO_HZ |
| 170 | seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\","); | 180 | seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\","); |
| 171 | #endif /* #ifdef CONFIG_NO_HZ */ | 181 | #endif /* #ifdef CONFIG_NO_HZ */ |
| 172 | seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n"); | 182 | seq_puts(m, "\"of\",\"ri\",\"ql\",\"qs\""); |
| 183 | #ifdef CONFIG_RCU_BOOST | ||
| 184 | seq_puts(m, "\"kt\",\"ktl\""); | ||
| 185 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
| 186 | seq_puts(m, ",\"b\",\"ci\",\"co\",\"ca\"\n"); | ||
| 173 | #ifdef CONFIG_TREE_PREEMPT_RCU | 187 | #ifdef CONFIG_TREE_PREEMPT_RCU |
| 174 | seq_puts(m, "\"rcu_preempt:\"\n"); | 188 | seq_puts(m, "\"rcu_preempt:\"\n"); |
| 175 | PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m); | 189 | PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m); |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 13960170cad4..40cf63ddd4b3 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -58,7 +58,7 @@ DEFINE_PER_CPU(struct task_struct *, ksoftirqd); | |||
| 58 | 58 | ||
| 59 | char *softirq_to_name[NR_SOFTIRQS] = { | 59 | char *softirq_to_name[NR_SOFTIRQS] = { |
| 60 | "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", | 60 | "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", |
| 61 | "TASKLET", "SCHED", "HRTIMER" | 61 | "TASKLET", "SCHED", "HRTIMER", "RCU" |
| 62 | }; | 62 | }; |
| 63 | 63 | ||
| 64 | /* | 64 | /* |
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 1e88485c16a0..0a7ed5b5e281 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c | |||
| @@ -2187,6 +2187,7 @@ static const struct flag flags[] = { | |||
| 2187 | { "TASKLET_SOFTIRQ", 6 }, | 2187 | { "TASKLET_SOFTIRQ", 6 }, |
| 2188 | { "SCHED_SOFTIRQ", 7 }, | 2188 | { "SCHED_SOFTIRQ", 7 }, |
| 2189 | { "HRTIMER_SOFTIRQ", 8 }, | 2189 | { "HRTIMER_SOFTIRQ", 8 }, |
| 2190 | { "RCU_SOFTIRQ", 9 }, | ||
| 2190 | 2191 | ||
| 2191 | { "HRTIMER_NORESTART", 0 }, | 2192 | { "HRTIMER_NORESTART", 0 }, |
| 2192 | { "HRTIMER_RESTART", 1 }, | 2193 | { "HRTIMER_RESTART", 1 }, |
