aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-26 16:59:59 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-26 16:59:59 -0400
commit257f49251c802c67030c11f63cee4ed7b50f6639 (patch)
tree99fcefa267be1b2ae63b48bf2094157732ecece1 /include/linux
parent293a032eb95f3c6c212c1541e94c14b111731313 (diff)
parente692ab53473c93c0d0820618c97aa74a62ab67da (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched: [PATCH] sched: debug feature - make the sched-domains tree runtime-tweakable [PATCH] sched: add above_background_load() function [PATCH] sched: update Documentation/sched-stats.txt [PATCH] sched: mark sysrq_sched_debug_show() static [PATCH] sched: make cpu_clock() not use the rq clock [PATCH] sched: remove unused rq->load_balance_class [PATCH] sched: arch preempt notifier mechanism [PATCH] sched: increase SCHED_LOAD_SCALE_FUZZ
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/preempt.h44
-rw-r--r--include/linux/sched.h23
2 files changed, 66 insertions, 1 deletions
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index d0926d63406c..484988ed301e 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -8,6 +8,7 @@
8 8
9#include <linux/thread_info.h> 9#include <linux/thread_info.h>
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <linux/list.h>
11 12
12#ifdef CONFIG_DEBUG_PREEMPT 13#ifdef CONFIG_DEBUG_PREEMPT
13 extern void fastcall add_preempt_count(int val); 14 extern void fastcall add_preempt_count(int val);
@@ -60,4 +61,47 @@ do { \
60 61
61#endif 62#endif
62 63
64#ifdef CONFIG_PREEMPT_NOTIFIERS
65
66struct preempt_notifier;
67
68/**
69 * preempt_ops - notifiers called when a task is preempted and rescheduled
70 * @sched_in: we're about to be rescheduled:
71 * notifier: struct preempt_notifier for the task being scheduled
72 * cpu: cpu we're scheduled on
73 * @sched_out: we've just been preempted
74 * notifier: struct preempt_notifier for the task being preempted
75 * next: the task that's kicking us out
76 */
77struct preempt_ops {
78 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
79 void (*sched_out)(struct preempt_notifier *notifier,
80 struct task_struct *next);
81};
82
83/**
84 * preempt_notifier - key for installing preemption notifiers
85 * @link: internal use
86 * @ops: defines the notifier functions to be called
87 *
88 * Usually used in conjunction with container_of().
89 */
90struct preempt_notifier {
91 struct hlist_node link;
92 struct preempt_ops *ops;
93};
94
95void preempt_notifier_register(struct preempt_notifier *notifier);
96void preempt_notifier_unregister(struct preempt_notifier *notifier);
97
98static inline void preempt_notifier_init(struct preempt_notifier *notifier,
99 struct preempt_ops *ops)
100{
101 INIT_HLIST_NODE(&notifier->link);
102 notifier->ops = ops;
103}
104
105#endif
106
63#endif /* __LINUX_PREEMPT_H */ 107#endif /* __LINUX_PREEMPT_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 33b9b4841ee7..2e490271acf6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -681,7 +681,7 @@ enum cpu_idle_type {
681#define SCHED_LOAD_SHIFT 10 681#define SCHED_LOAD_SHIFT 10
682#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) 682#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
683 683
684#define SCHED_LOAD_SCALE_FUZZ (SCHED_LOAD_SCALE >> 5) 684#define SCHED_LOAD_SCALE_FUZZ (SCHED_LOAD_SCALE >> 1)
685 685
686#ifdef CONFIG_SMP 686#ifdef CONFIG_SMP
687#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ 687#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */
@@ -786,6 +786,22 @@ extern int partition_sched_domains(cpumask_t *partition1,
786 786
787#endif /* CONFIG_SMP */ 787#endif /* CONFIG_SMP */
788 788
789/*
790 * A runqueue laden with a single nice 0 task scores a weighted_cpuload of
791 * SCHED_LOAD_SCALE. This function returns 1 if any cpu is laden with a
792 * task of nice 0 or enough lower priority tasks to bring up the
793 * weighted_cpuload
794 */
795static inline int above_background_load(void)
796{
797 unsigned long cpu;
798
799 for_each_online_cpu(cpu) {
800 if (weighted_cpuload(cpu) >= SCHED_LOAD_SCALE)
801 return 1;
802 }
803 return 0;
804}
789 805
790struct io_context; /* See blkdev.h */ 806struct io_context; /* See blkdev.h */
791struct cpuset; 807struct cpuset;
@@ -935,6 +951,11 @@ struct task_struct {
935 struct sched_class *sched_class; 951 struct sched_class *sched_class;
936 struct sched_entity se; 952 struct sched_entity se;
937 953
954#ifdef CONFIG_PREEMPT_NOTIFIERS
955 /* list of struct preempt_notifier: */
956 struct hlist_head preempt_notifiers;
957#endif
958
938 unsigned short ioprio; 959 unsigned short ioprio;
939#ifdef CONFIG_BLK_DEV_IO_TRACE 960#ifdef CONFIG_BLK_DEV_IO_TRACE
940 unsigned int btrace_seq; 961 unsigned int btrace_seq;