diff options
author | Avi Kivity <avi@qumranet.com> | 2007-07-26 07:40:43 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-07-26 07:40:43 -0400 |
commit | e107be36efb2a233833e8c9899039a370e4b2318 (patch) | |
tree | 655955aa81aefeff441c412adaafe9b51a00ff56 /kernel | |
parent | b47e8608a08766ef8121cd747d3aaf6c3dc22649 (diff) |
[PATCH] sched: arch preempt notifier mechanism
This adds a general mechanism whereby a task can request the scheduler to
notify it whenever it is preempted or scheduled back in. This allows the
task to swap any special-purpose registers like the fpu or Intel's VT
registers.
Signed-off-by: Avi Kivity <avi@qumranet.com>
[ mingo@elte.hu: fixes, cleanups ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Kconfig.preempt | 3 | ||||
-rw-r--r-- | kernel/sched.c | 73 |
2 files changed, 74 insertions, 2 deletions
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt index c64ce9c14207..6b066632e40c 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt | |||
@@ -63,3 +63,6 @@ config PREEMPT_BKL | |||
63 | Say Y here if you are building a kernel for a desktop system. | 63 | Say Y here if you are building a kernel for a desktop system. |
64 | Say N if you are unsure. | 64 | Say N if you are unsure. |
65 | 65 | ||
66 | config PREEMPT_NOTIFIERS | ||
67 | bool | ||
68 | |||
diff --git a/kernel/sched.c b/kernel/sched.c index 93cf241cfbe9..e901aa59f206 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1592,6 +1592,10 @@ static void __sched_fork(struct task_struct *p) | |||
1592 | INIT_LIST_HEAD(&p->run_list); | 1592 | INIT_LIST_HEAD(&p->run_list); |
1593 | p->se.on_rq = 0; | 1593 | p->se.on_rq = 0; |
1594 | 1594 | ||
1595 | #ifdef CONFIG_PREEMPT_NOTIFIERS | ||
1596 | INIT_HLIST_HEAD(&p->preempt_notifiers); | ||
1597 | #endif | ||
1598 | |||
1595 | /* | 1599 | /* |
1596 | * We mark the process as running here, but have not actually | 1600 | * We mark the process as running here, but have not actually |
1597 | * inserted it onto the runqueue yet. This guarantees that | 1601 | * inserted it onto the runqueue yet. This guarantees that |
@@ -1673,6 +1677,63 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
1673 | task_rq_unlock(rq, &flags); | 1677 | task_rq_unlock(rq, &flags); |
1674 | } | 1678 | } |
1675 | 1679 | ||
1680 | #ifdef CONFIG_PREEMPT_NOTIFIERS | ||
1681 | |||
1682 | /** | ||
1683 | * preempt_notifier_register - tell me when current is being being preempted | ||
1684 | * and rescheduled | ||
1685 | */ | ||
1686 | void preempt_notifier_register(struct preempt_notifier *notifier) | ||
1687 | { | ||
1688 | hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); | ||
1689 | } | ||
1690 | EXPORT_SYMBOL_GPL(preempt_notifier_register); | ||
1691 | |||
1692 | /** | ||
1693 | * preempt_notifier_unregister - no longer interested in preemption notifications | ||
1694 | * | ||
1695 | * This is safe to call from within a preemption notifier. | ||
1696 | */ | ||
1697 | void preempt_notifier_unregister(struct preempt_notifier *notifier) | ||
1698 | { | ||
1699 | hlist_del(¬ifier->link); | ||
1700 | } | ||
1701 | EXPORT_SYMBOL_GPL(preempt_notifier_unregister); | ||
1702 | |||
1703 | static void fire_sched_in_preempt_notifiers(struct task_struct *curr) | ||
1704 | { | ||
1705 | struct preempt_notifier *notifier; | ||
1706 | struct hlist_node *node; | ||
1707 | |||
1708 | hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) | ||
1709 | notifier->ops->sched_in(notifier, raw_smp_processor_id()); | ||
1710 | } | ||
1711 | |||
1712 | static void | ||
1713 | fire_sched_out_preempt_notifiers(struct task_struct *curr, | ||
1714 | struct task_struct *next) | ||
1715 | { | ||
1716 | struct preempt_notifier *notifier; | ||
1717 | struct hlist_node *node; | ||
1718 | |||
1719 | hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) | ||
1720 | notifier->ops->sched_out(notifier, next); | ||
1721 | } | ||
1722 | |||
1723 | #else | ||
1724 | |||
1725 | static void fire_sched_in_preempt_notifiers(struct task_struct *curr) | ||
1726 | { | ||
1727 | } | ||
1728 | |||
1729 | static void | ||
1730 | fire_sched_out_preempt_notifiers(struct task_struct *curr, | ||
1731 | struct task_struct *next) | ||
1732 | { | ||
1733 | } | ||
1734 | |||
1735 | #endif | ||
1736 | |||
1676 | /** | 1737 | /** |
1677 | * prepare_task_switch - prepare to switch tasks | 1738 | * prepare_task_switch - prepare to switch tasks |
1678 | * @rq: the runqueue preparing to switch | 1739 | * @rq: the runqueue preparing to switch |
@@ -1685,8 +1746,11 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
1685 | * prepare_task_switch sets up locking and calls architecture specific | 1746 | * prepare_task_switch sets up locking and calls architecture specific |
1686 | * hooks. | 1747 | * hooks. |
1687 | */ | 1748 | */ |
1688 | static inline void prepare_task_switch(struct rq *rq, struct task_struct *next) | 1749 | static inline void |
1750 | prepare_task_switch(struct rq *rq, struct task_struct *prev, | ||
1751 | struct task_struct *next) | ||
1689 | { | 1752 | { |
1753 | fire_sched_out_preempt_notifiers(prev, next); | ||
1690 | prepare_lock_switch(rq, next); | 1754 | prepare_lock_switch(rq, next); |
1691 | prepare_arch_switch(next); | 1755 | prepare_arch_switch(next); |
1692 | } | 1756 | } |
@@ -1728,6 +1792,7 @@ static inline void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
1728 | prev_state = prev->state; | 1792 | prev_state = prev->state; |
1729 | finish_arch_switch(prev); | 1793 | finish_arch_switch(prev); |
1730 | finish_lock_switch(rq, prev); | 1794 | finish_lock_switch(rq, prev); |
1795 | fire_sched_in_preempt_notifiers(current); | ||
1731 | if (mm) | 1796 | if (mm) |
1732 | mmdrop(mm); | 1797 | mmdrop(mm); |
1733 | if (unlikely(prev_state == TASK_DEAD)) { | 1798 | if (unlikely(prev_state == TASK_DEAD)) { |
@@ -1768,7 +1833,7 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
1768 | { | 1833 | { |
1769 | struct mm_struct *mm, *oldmm; | 1834 | struct mm_struct *mm, *oldmm; |
1770 | 1835 | ||
1771 | prepare_task_switch(rq, next); | 1836 | prepare_task_switch(rq, prev, next); |
1772 | mm = next->mm; | 1837 | mm = next->mm; |
1773 | oldmm = prev->active_mm; | 1838 | oldmm = prev->active_mm; |
1774 | /* | 1839 | /* |
@@ -6335,6 +6400,10 @@ void __init sched_init(void) | |||
6335 | 6400 | ||
6336 | set_load_weight(&init_task); | 6401 | set_load_weight(&init_task); |
6337 | 6402 | ||
6403 | #ifdef CONFIG_PREEMPT_NOTIFIERS | ||
6404 | INIT_HLIST_HEAD(&init_task.preempt_notifiers); | ||
6405 | #endif | ||
6406 | |||
6338 | #ifdef CONFIG_SMP | 6407 | #ifdef CONFIG_SMP |
6339 | nr_cpu_ids = highest_cpu + 1; | 6408 | nr_cpu_ids = highest_cpu + 1; |
6340 | open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL); | 6409 | open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL); |