aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/preempt.h44
-rw-r--r--include/linux/sched.h5
-rw-r--r--kernel/Kconfig.preempt3
-rw-r--r--kernel/sched.c73
4 files changed, 123 insertions, 2 deletions
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index d0926d63406c..484988ed301e 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -8,6 +8,7 @@
8 8
9#include <linux/thread_info.h> 9#include <linux/thread_info.h>
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <linux/list.h>
11 12
12#ifdef CONFIG_DEBUG_PREEMPT 13#ifdef CONFIG_DEBUG_PREEMPT
13 extern void fastcall add_preempt_count(int val); 14 extern void fastcall add_preempt_count(int val);
@@ -60,4 +61,47 @@ do { \
60 61
61#endif 62#endif
62 63
64#ifdef CONFIG_PREEMPT_NOTIFIERS
65
66struct preempt_notifier;
67
68/**
69 * preempt_ops - notifiers called when a task is preempted and rescheduled
70 * @sched_in: we're about to be rescheduled:
71 * notifier: struct preempt_notifier for the task being scheduled
72 * cpu: cpu we're scheduled on
73 * @sched_out: we've just been preempted
74 * notifier: struct preempt_notifier for the task being preempted
75 * next: the task that's kicking us out
76 */
77struct preempt_ops {
78 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
79 void (*sched_out)(struct preempt_notifier *notifier,
80 struct task_struct *next);
81};
82
83/**
84 * preempt_notifier - key for installing preemption notifiers
85 * @link: internal use
86 * @ops: defines the notifier functions to be called
87 *
88 * Usually used in conjunction with container_of().
89 */
90struct preempt_notifier {
91 struct hlist_node link;
92 struct preempt_ops *ops;
93};
94
95void preempt_notifier_register(struct preempt_notifier *notifier);
96void preempt_notifier_unregister(struct preempt_notifier *notifier);
97
98static inline void preempt_notifier_init(struct preempt_notifier *notifier,
99 struct preempt_ops *ops)
100{
101 INIT_HLIST_NODE(&notifier->link);
102 notifier->ops = ops;
103}
104
105#endif
106
63#endif /* __LINUX_PREEMPT_H */ 107#endif /* __LINUX_PREEMPT_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7c61b50823fa..7a4de8768748 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -935,6 +935,11 @@ struct task_struct {
935 struct sched_class *sched_class; 935 struct sched_class *sched_class;
936 struct sched_entity se; 936 struct sched_entity se;
937 937
938#ifdef CONFIG_PREEMPT_NOTIFIERS
939 /* list of struct preempt_notifier: */
940 struct hlist_head preempt_notifiers;
941#endif
942
938 unsigned short ioprio; 943 unsigned short ioprio;
939#ifdef CONFIG_BLK_DEV_IO_TRACE 944#ifdef CONFIG_BLK_DEV_IO_TRACE
940 unsigned int btrace_seq; 945 unsigned int btrace_seq;
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index c64ce9c14207..6b066632e40c 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -63,3 +63,6 @@ config PREEMPT_BKL
63 Say Y here if you are building a kernel for a desktop system. 63 Say Y here if you are building a kernel for a desktop system.
64 Say N if you are unsure. 64 Say N if you are unsure.
65 65
66config PREEMPT_NOTIFIERS
67 bool
68
diff --git a/kernel/sched.c b/kernel/sched.c
index 93cf241cfbe9..e901aa59f206 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1592,6 +1592,10 @@ static void __sched_fork(struct task_struct *p)
1592 INIT_LIST_HEAD(&p->run_list); 1592 INIT_LIST_HEAD(&p->run_list);
1593 p->se.on_rq = 0; 1593 p->se.on_rq = 0;
1594 1594
1595#ifdef CONFIG_PREEMPT_NOTIFIERS
1596 INIT_HLIST_HEAD(&p->preempt_notifiers);
1597#endif
1598
1595 /* 1599 /*
1596 * We mark the process as running here, but have not actually 1600 * We mark the process as running here, but have not actually
1597 * inserted it onto the runqueue yet. This guarantees that 1601 * inserted it onto the runqueue yet. This guarantees that
@@ -1673,6 +1677,63 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
1673 task_rq_unlock(rq, &flags); 1677 task_rq_unlock(rq, &flags);
1674} 1678}
1675 1679
1680#ifdef CONFIG_PREEMPT_NOTIFIERS
1681
1682/**
1683 * preempt_notifier_register - tell me when current is being being preempted
1684 * and rescheduled
1685 */
1686void preempt_notifier_register(struct preempt_notifier *notifier)
1687{
1688 hlist_add_head(&notifier->link, &current->preempt_notifiers);
1689}
1690EXPORT_SYMBOL_GPL(preempt_notifier_register);
1691
1692/**
1693 * preempt_notifier_unregister - no longer interested in preemption notifications
1694 *
1695 * This is safe to call from within a preemption notifier.
1696 */
1697void preempt_notifier_unregister(struct preempt_notifier *notifier)
1698{
1699 hlist_del(&notifier->link);
1700}
1701EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
1702
1703static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1704{
1705 struct preempt_notifier *notifier;
1706 struct hlist_node *node;
1707
1708 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
1709 notifier->ops->sched_in(notifier, raw_smp_processor_id());
1710}
1711
1712static void
1713fire_sched_out_preempt_notifiers(struct task_struct *curr,
1714 struct task_struct *next)
1715{
1716 struct preempt_notifier *notifier;
1717 struct hlist_node *node;
1718
1719 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
1720 notifier->ops->sched_out(notifier, next);
1721}
1722
1723#else
1724
1725static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1726{
1727}
1728
1729static void
1730fire_sched_out_preempt_notifiers(struct task_struct *curr,
1731 struct task_struct *next)
1732{
1733}
1734
1735#endif
1736
1676/** 1737/**
1677 * prepare_task_switch - prepare to switch tasks 1738 * prepare_task_switch - prepare to switch tasks
1678 * @rq: the runqueue preparing to switch 1739 * @rq: the runqueue preparing to switch
@@ -1685,8 +1746,11 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
1685 * prepare_task_switch sets up locking and calls architecture specific 1746 * prepare_task_switch sets up locking and calls architecture specific
1686 * hooks. 1747 * hooks.
1687 */ 1748 */
1688static inline void prepare_task_switch(struct rq *rq, struct task_struct *next) 1749static inline void
1750prepare_task_switch(struct rq *rq, struct task_struct *prev,
1751 struct task_struct *next)
1689{ 1752{
1753 fire_sched_out_preempt_notifiers(prev, next);
1690 prepare_lock_switch(rq, next); 1754 prepare_lock_switch(rq, next);
1691 prepare_arch_switch(next); 1755 prepare_arch_switch(next);
1692} 1756}
@@ -1728,6 +1792,7 @@ static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
1728 prev_state = prev->state; 1792 prev_state = prev->state;
1729 finish_arch_switch(prev); 1793 finish_arch_switch(prev);
1730 finish_lock_switch(rq, prev); 1794 finish_lock_switch(rq, prev);
1795 fire_sched_in_preempt_notifiers(current);
1731 if (mm) 1796 if (mm)
1732 mmdrop(mm); 1797 mmdrop(mm);
1733 if (unlikely(prev_state == TASK_DEAD)) { 1798 if (unlikely(prev_state == TASK_DEAD)) {
@@ -1768,7 +1833,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
1768{ 1833{
1769 struct mm_struct *mm, *oldmm; 1834 struct mm_struct *mm, *oldmm;
1770 1835
1771 prepare_task_switch(rq, next); 1836 prepare_task_switch(rq, prev, next);
1772 mm = next->mm; 1837 mm = next->mm;
1773 oldmm = prev->active_mm; 1838 oldmm = prev->active_mm;
1774 /* 1839 /*
@@ -6335,6 +6400,10 @@ void __init sched_init(void)
6335 6400
6336 set_load_weight(&init_task); 6401 set_load_weight(&init_task);
6337 6402
6403#ifdef CONFIG_PREEMPT_NOTIFIERS
6404 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
6405#endif
6406
6338#ifdef CONFIG_SMP 6407#ifdef CONFIG_SMP
6339 nr_cpu_ids = highest_cpu + 1; 6408 nr_cpu_ids = highest_cpu + 1;
6340 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL); 6409 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL);