aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2009-12-17 21:23:36 -0500
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 17:05:45 -0400
commit4b38febbd59fd33542a343991262119eb9860f5e (patch)
tree1af88a0d354abe344c2c2869631f76a1806d75c3 /include
parent22763c5cf3690a681551162c15d34d935308c8d7 (diff)
[ported from 2008.3] Core LITMUS^RT infrastructure
Port 2008.3 Core LITMUS^RT infrastructure to Linux 2.6.32 litmus_sched_class implements 4 new methods: - prio_changed: void - switched_to: void - get_rr_interval: return infinity (i.e., 0) - select_task_rq: return current cpu
Diffstat (limited to 'include')
-rw-r--r--include/linux/sched.h7
-rw-r--r--include/litmus/feather_buffer.h94
-rw-r--r--include/litmus/feather_trace.h36
-rw-r--r--include/litmus/heap.h77
-rw-r--r--include/litmus/jobs.h9
-rw-r--r--include/litmus/litmus.h177
-rw-r--r--include/litmus/rt_param.h175
-rw-r--r--include/litmus/sched_plugin.h159
-rw-r--r--include/litmus/sched_trace.h191
-rw-r--r--include/litmus/trace.h113
10 files changed, 1038 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 75e6e60bf583..bb046c0adf99 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -38,6 +38,7 @@
38#define SCHED_BATCH 3 38#define SCHED_BATCH 3
39/* SCHED_ISO: reserved but not implemented yet */ 39/* SCHED_ISO: reserved but not implemented yet */
40#define SCHED_IDLE 5 40#define SCHED_IDLE 5
41#define SCHED_LITMUS 6
41/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ 42/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
42#define SCHED_RESET_ON_FORK 0x40000000 43#define SCHED_RESET_ON_FORK 0x40000000
43 44
@@ -94,6 +95,8 @@ struct sched_param {
94 95
95#include <asm/processor.h> 96#include <asm/processor.h>
96 97
98#include <litmus/rt_param.h>
99
97struct exec_domain; 100struct exec_domain;
98struct futex_pi_state; 101struct futex_pi_state;
99struct robust_list_head; 102struct robust_list_head;
@@ -1505,6 +1508,10 @@ struct task_struct {
1505 int make_it_fail; 1508 int make_it_fail;
1506#endif 1509#endif
1507 struct prop_local_single dirties; 1510 struct prop_local_single dirties;
1511
1512 /* LITMUS RT parameters and state */
1513 struct rt_param rt_param;
1514
1508#ifdef CONFIG_LATENCYTOP 1515#ifdef CONFIG_LATENCYTOP
1509 int latency_record_count; 1516 int latency_record_count;
1510 struct latency_record latency_record[LT_SAVECOUNT]; 1517 struct latency_record latency_record[LT_SAVECOUNT];
diff --git a/include/litmus/feather_buffer.h b/include/litmus/feather_buffer.h
new file mode 100644
index 000000000000..6c18277fdfc9
--- /dev/null
+++ b/include/litmus/feather_buffer.h
@@ -0,0 +1,94 @@
1#ifndef _FEATHER_BUFFER_H_
2#define _FEATHER_BUFFER_H_
3
4/* requires UINT_MAX and memcpy */
5
6#define SLOT_FREE 0
7#define SLOT_BUSY 1
8#define SLOT_READY 2
9
10struct ft_buffer {
11 unsigned int slot_count;
12 unsigned int slot_size;
13
14 int free_count;
15 unsigned int write_idx;
16 unsigned int read_idx;
17
18 char* slots;
19 void* buffer_mem;
20 unsigned int failed_writes;
21};
22
23static inline int init_ft_buffer(struct ft_buffer* buf,
24 unsigned int slot_count,
25 unsigned int slot_size,
26 char* slots,
27 void* buffer_mem)
28{
29 int i = 0;
30 if (!slot_count || UINT_MAX % slot_count != slot_count - 1) {
31 /* The slot count must divide UNIT_MAX + 1 so that when it
32 * wraps around the index correctly points to 0.
33 */
34 return 0;
35 } else {
36 buf->slot_count = slot_count;
37 buf->slot_size = slot_size;
38 buf->slots = slots;
39 buf->buffer_mem = buffer_mem;
40 buf->free_count = slot_count;
41 buf->write_idx = 0;
42 buf->read_idx = 0;
43 buf->failed_writes = 0;
44 for (i = 0; i < slot_count; i++)
45 buf->slots[i] = SLOT_FREE;
46 return 1;
47 }
48}
49
50static inline int ft_buffer_start_write(struct ft_buffer* buf, void **ptr)
51{
52 int free = fetch_and_dec(&buf->free_count);
53 unsigned int idx;
54 if (free <= 0) {
55 fetch_and_inc(&buf->free_count);
56 *ptr = 0;
57 fetch_and_inc(&buf->failed_writes);
58 return 0;
59 } else {
60 idx = fetch_and_inc((int*) &buf->write_idx) % buf->slot_count;
61 buf->slots[idx] = SLOT_BUSY;
62 *ptr = ((char*) buf->buffer_mem) + idx * buf->slot_size;
63 return 1;
64 }
65}
66
67static inline void ft_buffer_finish_write(struct ft_buffer* buf, void *ptr)
68{
69 unsigned int idx = ((char*) ptr - (char*) buf->buffer_mem) / buf->slot_size;
70 buf->slots[idx] = SLOT_READY;
71}
72
73
74/* exclusive reader access is assumed */
75static inline int ft_buffer_read(struct ft_buffer* buf, void* dest)
76{
77 unsigned int idx;
78 if (buf->free_count == buf->slot_count)
79 /* nothing available */
80 return 0;
81 idx = buf->read_idx % buf->slot_count;
82 if (buf->slots[idx] == SLOT_READY) {
83 memcpy(dest, ((char*) buf->buffer_mem) + idx * buf->slot_size,
84 buf->slot_size);
85 buf->slots[idx] = SLOT_FREE;
86 buf->read_idx++;
87 fetch_and_inc(&buf->free_count);
88 return 1;
89 } else
90 return 0;
91}
92
93
94#endif
diff --git a/include/litmus/feather_trace.h b/include/litmus/feather_trace.h
new file mode 100644
index 000000000000..3ac1ee5e0277
--- /dev/null
+++ b/include/litmus/feather_trace.h
@@ -0,0 +1,36 @@
1#ifndef _FEATHER_TRACE_H_
2#define _FEATHER_TRACE_H_
3
4
5int ft_enable_event(unsigned long id);
6int ft_disable_event(unsigned long id);
7int ft_is_event_enabled(unsigned long id);
8int ft_disable_all_events(void);
9
10#ifndef __ARCH_HAS_FEATHER_TRACE
11/* provide default implementation */
12
13#define feather_callback
14
15#define MAX_EVENTS 1024
16
17extern int ft_events[MAX_EVENTS];
18
19#define ft_event(id, callback) \
20 if (ft_events[id]) callback();
21
22#define ft_event0(id, callback) \
23 if (ft_events[id]) callback(id);
24
25#define ft_event1(id, callback, param) \
26 if (ft_events[id]) callback(id, param);
27
28#define ft_event2(id, callback, param, param2) \
29 if (ft_events[id]) callback(id, param, param2);
30
31#define ft_event3(id, callback, p, p2, p3) \
32 if (ft_events[id]) callback(id, p, p2, p3);
33#endif
34
35
36#endif
diff --git a/include/litmus/heap.h b/include/litmus/heap.h
new file mode 100644
index 000000000000..da959b0bec9c
--- /dev/null
+++ b/include/litmus/heap.h
@@ -0,0 +1,77 @@
1/* heaps.h -- Binomial Heaps
2 *
3 * (c) 2008, 2009 Bjoern Brandenburg
4 */
5
6#ifndef HEAP_H
7#define HEAP_H
8
9#define NOT_IN_HEAP UINT_MAX
10
11struct heap_node {
12 struct heap_node* parent;
13 struct heap_node* next;
14 struct heap_node* child;
15
16 unsigned int degree;
17 void* value;
18 struct heap_node** ref;
19};
20
21struct heap {
22 struct heap_node* head;
23 /* We cache the minimum of the heap.
24 * This speeds up repeated peek operations.
25 */
26 struct heap_node* min;
27};
28
29typedef int (*heap_prio_t)(struct heap_node* a, struct heap_node* b);
30
31void heap_init(struct heap* heap);
32void heap_node_init(struct heap_node** ref_to_heap_node_ptr, void* value);
33
34static inline int heap_node_in_heap(struct heap_node* h)
35{
36 return h->degree != NOT_IN_HEAP;
37}
38
39static inline int heap_empty(struct heap* heap)
40{
41 return heap->head == NULL && heap->min == NULL;
42}
43
44/* insert (and reinitialize) a node into the heap */
45void heap_insert(heap_prio_t higher_prio,
46 struct heap* heap,
47 struct heap_node* node);
48
49/* merge addition into target */
50void heap_union(heap_prio_t higher_prio,
51 struct heap* target,
52 struct heap* addition);
53
54struct heap_node* heap_peek(heap_prio_t higher_prio,
55 struct heap* heap);
56
57struct heap_node* heap_take(heap_prio_t higher_prio,
58 struct heap* heap);
59
60void heap_uncache_min(heap_prio_t higher_prio, struct heap* heap);
61int heap_decrease(heap_prio_t higher_prio, struct heap_node* node);
62
63void heap_delete(heap_prio_t higher_prio,
64 struct heap* heap,
65 struct heap_node* node);
66
67/* allocate from memcache */
68struct heap_node* heap_node_alloc(int gfp_flags);
69void heap_node_free(struct heap_node* hn);
70
71/* allocate a heap node for value and insert into the heap */
72int heap_add(heap_prio_t higher_prio, struct heap* heap,
73 void* value, int gfp_flags);
74
75void* heap_take_del(heap_prio_t higher_prio,
76 struct heap* heap);
77#endif
diff --git a/include/litmus/jobs.h b/include/litmus/jobs.h
new file mode 100644
index 000000000000..9bd361ef3943
--- /dev/null
+++ b/include/litmus/jobs.h
@@ -0,0 +1,9 @@
1#ifndef __LITMUS_JOBS_H__
2#define __LITMUS_JOBS_H__
3
4void prepare_for_next_period(struct task_struct *t);
5void release_at(struct task_struct *t, lt_t start);
6long complete_job(void);
7
8#endif
9
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
new file mode 100644
index 000000000000..380fcb8acb33
--- /dev/null
+++ b/include/litmus/litmus.h
@@ -0,0 +1,177 @@
1/*
2 * Constant definitions related to
3 * scheduling policy.
4 */
5
6#ifndef _LINUX_LITMUS_H_
7#define _LINUX_LITMUS_H_
8
9#include <linux/jiffies.h>
10#include <litmus/sched_trace.h>
11
12extern atomic_t release_master_cpu;
13
14extern atomic_t __log_seq_no;
15
16#define TRACE(fmt, args...) \
17 sched_trace_log_message("%d P%d: " fmt, atomic_add_return(1, &__log_seq_no), \
18 raw_smp_processor_id(), ## args)
19
20#define TRACE_TASK(t, fmt, args...) \
21 TRACE("(%s/%d) " fmt, (t)->comm, (t)->pid, ##args)
22
23#define TRACE_CUR(fmt, args...) \
24 TRACE_TASK(current, fmt, ## args)
25
26#define TRACE_BUG_ON(cond) \
27 do { if (cond) TRACE("BUG_ON(%s) at %s:%d " \
28 "called from %p current=%s/%d state=%d " \
29 "flags=%x partition=%d cpu=%d rtflags=%d"\
30 " job=%u knp=%d timeslice=%u\n", \
31 #cond, __FILE__, __LINE__, __builtin_return_address(0), current->comm, \
32 current->pid, current->state, current->flags, \
33 get_partition(current), smp_processor_id(), get_rt_flags(current), \
34 current->rt_param.job_params.job_no, current->rt_param.kernel_np, \
35 current->rt.time_slice\
36 ); } while(0);
37
38
39/* in_list - is a given list_head queued on some list?
40 */
41static inline int in_list(struct list_head* list)
42{
43 return !( /* case 1: deleted */
44 (list->next == LIST_POISON1 &&
45 list->prev == LIST_POISON2)
46 ||
47 /* case 2: initialized */
48 (list->next == list &&
49 list->prev == list)
50 );
51}
52
53#define NO_CPU 0xffffffff
54
55void litmus_fork(struct task_struct *tsk);
56void litmus_exec(void);
57/* clean up real-time state of a task */
58void exit_litmus(struct task_struct *dead_tsk);
59
60long litmus_admit_task(struct task_struct *tsk);
61void litmus_exit_task(struct task_struct *tsk);
62
63#define is_realtime(t) ((t)->policy == SCHED_LITMUS)
64#define rt_transition_pending(t) \
65 ((t)->rt_param.transition_pending)
66
67#define tsk_rt(t) (&(t)->rt_param)
68
69/* Realtime utility macros */
70#define get_rt_flags(t) (tsk_rt(t)->flags)
71#define set_rt_flags(t,f) (tsk_rt(t)->flags=(f))
72#define get_exec_cost(t) (tsk_rt(t)->task_params.exec_cost)
73#define get_exec_time(t) (tsk_rt(t)->job_params.exec_time)
74#define get_rt_period(t) (tsk_rt(t)->task_params.period)
75#define get_rt_phase(t) (tsk_rt(t)->task_params.phase)
76#define get_partition(t) (tsk_rt(t)->task_params.cpu)
77#define get_deadline(t) (tsk_rt(t)->job_params.deadline)
78#define get_release(t) (tsk_rt(t)->job_params.release)
79#define get_class(t) (tsk_rt(t)->task_params.cls)
80
81inline static int budget_exhausted(struct task_struct* t)
82{
83 return get_exec_time(t) >= get_exec_cost(t);
84}
85
86
87#define is_hrt(t) \
88 (tsk_rt(t)->task_params.class == RT_CLASS_HARD)
89#define is_srt(t) \
90 (tsk_rt(t)->task_params.class == RT_CLASS_SOFT)
91#define is_be(t) \
92 (tsk_rt(t)->task_params.class == RT_CLASS_BEST_EFFORT)
93
94/* Our notion of time within LITMUS: kernel monotonic time. */
95static inline lt_t litmus_clock(void)
96{
97 return ktime_to_ns(ktime_get());
98}
99
100/* A macro to convert from nanoseconds to ktime_t. */
101#define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t)
102
103#define get_domain(t) (tsk_rt(t)->domain)
104
105/* Honor the flag in the preempt_count variable that is set
106 * when scheduling is in progress.
107 */
108#define is_running(t) \
109 ((t)->state == TASK_RUNNING || \
110 task_thread_info(t)->preempt_count & PREEMPT_ACTIVE)
111
112#define is_blocked(t) \
113 (!is_running(t))
114#define is_released(t, now) \
115 (lt_before_eq(get_release(t), now))
116#define is_tardy(t, now) \
117 (lt_before_eq(tsk_rt(t)->job_params.deadline, now))
118
119/* real-time comparison macros */
120#define earlier_deadline(a, b) (lt_before(\
121 (a)->rt_param.job_params.deadline,\
122 (b)->rt_param.job_params.deadline))
123#define earlier_release(a, b) (lt_before(\
124 (a)->rt_param.job_params.release,\
125 (b)->rt_param.job_params.release))
126
127#define make_np(t) do {t->rt_param.kernel_np++;} while(0);
128#define take_np(t) do {t->rt_param.kernel_np--;} while(0);
129
130#ifdef CONFIG_SRP
131void srp_ceiling_block(void);
132#else
133#define srp_ceiling_block() /* nothing */
134#endif
135
136#define heap2task(hn) ((struct task_struct*) hn->value)
137
138static inline int is_np(struct task_struct *t)
139{
140 return tsk_rt(t)->kernel_np;
141}
142
143#define request_exit_np(t)
144
145static inline int is_present(struct task_struct* t)
146{
147 return t && tsk_rt(t)->present;
148}
149
150
151/* make the unit explicit */
152typedef unsigned long quanta_t;
153
154enum round {
155 FLOOR,
156 CEIL
157};
158
159
160/* Tick period is used to convert ns-specified execution
161 * costs and periods into tick-based equivalents.
162 */
163extern ktime_t tick_period;
164
165static inline quanta_t time2quanta(lt_t time, enum round round)
166{
167 s64 quantum_length = ktime_to_ns(tick_period);
168
169 if (do_div(time, quantum_length) && round == CEIL)
170 time++;
171 return (quanta_t) time;
172}
173
174/* By how much is cpu staggered behind CPU 0? */
175u64 cpu_stagger_offset(int cpu);
176
177#endif
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
new file mode 100644
index 000000000000..c599f848d1ed
--- /dev/null
+++ b/include/litmus/rt_param.h
@@ -0,0 +1,175 @@
1/*
2 * Definition of the scheduler plugin interface.
3 *
4 */
5#ifndef _LINUX_RT_PARAM_H_
6#define _LINUX_RT_PARAM_H_
7
8/* Litmus time type. */
9typedef unsigned long long lt_t;
10
11static inline int lt_after(lt_t a, lt_t b)
12{
13 return ((long long) b) - ((long long) a) < 0;
14}
15#define lt_before(a, b) lt_after(b, a)
16
17static inline int lt_after_eq(lt_t a, lt_t b)
18{
19 return ((long long) a) - ((long long) b) >= 0;
20}
21#define lt_before_eq(a, b) lt_after_eq(b, a)
22
23/* different types of clients */
24typedef enum {
25 RT_CLASS_HARD,
26 RT_CLASS_SOFT,
27 RT_CLASS_BEST_EFFORT
28} task_class_t;
29
30struct rt_task {
31 lt_t exec_cost;
32 lt_t period;
33 lt_t phase;
34 unsigned int cpu;
35 task_class_t cls;
36};
37
38/* don't export internal data structures to user space (liblitmus) */
39#ifdef __KERNEL__
40
41struct _rt_domain;
42struct heap_node;
43struct release_heap;
44
45struct rt_job {
46 /* Time instant the the job was or will be released. */
47 lt_t release;
48 /* What is the current deadline? */
49 lt_t deadline;
50
51 /* How much service has this job received so far? */
52 lt_t exec_time;
53
54 /* Which job is this. This is used to let user space
55 * specify which job to wait for, which is important if jobs
56 * overrun. If we just call sys_sleep_next_period() then we
57 * will unintentionally miss jobs after an overrun.
58 *
59 * Increase this sequence number when a job is released.
60 */
61 unsigned int job_no;
62};
63
64
65struct pfair_param;
66
67/* RT task parameters for scheduling extensions
68 * These parameters are inherited during clone and therefore must
69 * be explicitly set up before the task set is launched.
70 */
71struct rt_param {
72 /* is the task sleeping? */
73 unsigned int flags:8;
74
75 /* do we need to check for srp blocking? */
76 unsigned int srp_non_recurse:1;
77
78 /* is the task present? (true if it can be scheduled) */
79 unsigned int present:1;
80
81 /* user controlled parameters */
82 struct rt_task task_params;
83
84 /* timing parameters */
85 struct rt_job job_params;
86
87 /* task representing the current "inherited" task
88 * priority, assigned by inherit_priority and
89 * return priority in the scheduler plugins.
90 * could point to self if PI does not result in
91 * an increased task priority.
92 */
93 struct task_struct* inh_task;
94
95 /* Don't just dereference this pointer in kernel space!
96 * It might very well point to junk or nothing at all.
97 * NULL indicates that the task has not requested any non-preemptable
98 * section support.
99 * Not inherited upon fork.
100 */
101 short* np_flag;
102
103 /* re-use unused counter in plugins that don't need it */
104 union {
105 /* For the FMLP under PSN-EDF, it is required to make the task
106 * non-preemptive from kernel space. In order not to interfere with
107 * user space, this counter indicates the kernel space np setting.
108 * kernel_np > 0 => task is non-preemptive
109 */
110 unsigned int kernel_np;
111
112 /* Used by GQ-EDF */
113 unsigned int last_cpu;
114 };
115
116 /* This field can be used by plugins to store where the task
117 * is currently scheduled. It is the responsibility of the
118 * plugin to avoid race conditions.
119 *
120 * This used by GSN-EDF and PFAIR.
121 */
122 volatile int scheduled_on;
123
124 /* Is the stack of the task currently in use? This is updated by
125 * the LITMUS core.
126 *
127 * Be careful to avoid deadlocks!
128 */
129 volatile int stack_in_use;
130
131 /* This field can be used by plugins to store where the task
132 * is currently linked. It is the responsibility of the plugin
133 * to avoid race conditions.
134 *
135 * Used by GSN-EDF.
136 */
137 volatile int linked_on;
138
139 /* PFAIR/PD^2 state. Allocated on demand. */
140 struct pfair_param* pfair;
141
142 /* Fields saved before BE->RT transition.
143 */
144 int old_policy;
145 int old_prio;
146
147 /* ready queue for this task */
148 struct _rt_domain* domain;
149
150 /* heap element for this task
151 *
152 * Warning: Don't statically allocate this node. The heap
153 * implementation swaps these between tasks, thus after
154 * dequeuing from a heap you may end up with a different node
155 * then the one you had when enqueuing the task. For the same
156 * reason, don't obtain and store references to this node
157 * other than this pointer (which is updated by the heap
158 * implementation).
159 */
160 struct heap_node* heap_node;
161 struct release_heap* rel_heap;
162
163 /* Used by rt_domain to queue task in release list.
164 */
165 struct list_head list;
166};
167
168/* Possible RT flags */
169#define RT_F_RUNNING 0x00000000
170#define RT_F_SLEEP 0x00000001
171#define RT_F_EXIT_SEM 0x00000008
172
173#endif
174
175#endif
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
new file mode 100644
index 000000000000..94952f6ccbfa
--- /dev/null
+++ b/include/litmus/sched_plugin.h
@@ -0,0 +1,159 @@
1/*
2 * Definition of the scheduler plugin interface.
3 *
4 */
5#ifndef _LINUX_SCHED_PLUGIN_H_
6#define _LINUX_SCHED_PLUGIN_H_
7
8#include <linux/sched.h>
9
10/* struct for semaphore with priority inheritance */
11struct pi_semaphore {
12 atomic_t count;
13 int sleepers;
14 wait_queue_head_t wait;
15 union {
16 /* highest-prio holder/waiter */
17 struct task_struct *task;
18 struct task_struct* cpu_task[NR_CPUS];
19 } hp;
20 /* current lock holder */
21 struct task_struct *holder;
22};
23
24/************************ setup/tear down ********************/
25
26typedef long (*activate_plugin_t) (void);
27typedef long (*deactivate_plugin_t) (void);
28
29
30
31/********************* scheduler invocation ******************/
32
33/* Plugin-specific realtime tick handler */
34typedef void (*scheduler_tick_t) (struct task_struct *cur);
35/* Novell make sched decision function */
36typedef struct task_struct* (*schedule_t)(struct task_struct * prev);
37/* Clean up after the task switch has occured.
38 * This function is called after every (even non-rt) task switch.
39 */
40typedef void (*finish_switch_t)(struct task_struct *prev);
41
42
43/********************* task state changes ********************/
44
45/* Called to setup a new real-time task.
46 * Release the first job, enqueue, etc.
47 * Task may already be running.
48 */
49typedef void (*task_new_t) (struct task_struct *task,
50 int on_rq,
51 int running);
52
53/* Called to re-introduce a task after blocking.
54 * Can potentially be called multiple times.
55 */
56typedef void (*task_wake_up_t) (struct task_struct *task);
57/* called to notify the plugin of a blocking real-time task
58 * it will only be called for real-time tasks and before schedule is called */
59typedef void (*task_block_t) (struct task_struct *task);
60/* Called when a real-time task exits or changes to a different scheduling
61 * class.
62 * Free any allocated resources
63 */
64typedef void (*task_exit_t) (struct task_struct *);
65
66/* Called when the new_owner is released from the wait queue
67 * it should now inherit the priority from sem, _before_ it gets readded
68 * to any queue
69 */
70typedef long (*inherit_priority_t) (struct pi_semaphore *sem,
71 struct task_struct *new_owner);
72
73/* Called when the current task releases a semahpore where it might have
74 * inherited a piority from
75 */
76typedef long (*return_priority_t) (struct pi_semaphore *sem);
77
78/* Called when a task tries to acquire a semaphore and fails. Check if its
79 * priority is higher than that of the current holder.
80 */
81typedef long (*pi_block_t) (struct pi_semaphore *sem, struct task_struct *t);
82
83
84
85
86/********************* sys call backends ********************/
87/* This function causes the caller to sleep until the next release */
88typedef long (*complete_job_t) (void);
89
90typedef long (*admit_task_t)(struct task_struct* tsk);
91
92typedef void (*release_at_t)(struct task_struct *t, lt_t start);
93
94struct sched_plugin {
95 struct list_head list;
96 /* basic info */
97 char *plugin_name;
98
99 /* setup */
100 activate_plugin_t activate_plugin;
101 deactivate_plugin_t deactivate_plugin;
102
103#ifdef CONFIG_SRP
104 unsigned int srp_active;
105#endif
106
107 /* scheduler invocation */
108 scheduler_tick_t tick;
109 schedule_t schedule;
110 finish_switch_t finish_switch;
111
112 /* syscall backend */
113 complete_job_t complete_job;
114 release_at_t release_at;
115
116 /* task state changes */
117 admit_task_t admit_task;
118
119 task_new_t task_new;
120 task_wake_up_t task_wake_up;
121 task_block_t task_block;
122 task_exit_t task_exit;
123
124#ifdef CONFIG_FMLP
125 /* priority inheritance */
126 unsigned int fmlp_active;
127 inherit_priority_t inherit_priority;
128 return_priority_t return_priority;
129 pi_block_t pi_block;
130#endif
131} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
132
133
134extern struct sched_plugin *litmus;
135
136int register_sched_plugin(struct sched_plugin* plugin);
137struct sched_plugin* find_sched_plugin(const char* name);
138int print_sched_plugins(char* buf, int max);
139
140static inline int srp_active(void)
141{
142#ifdef CONFIG_SRP
143 return litmus->srp_active;
144#else
145 return 0;
146#endif
147}
148static inline int fmlp_active(void)
149{
150#ifdef CONFIG_FMLP
151 return litmus->fmlp_active;
152#else
153 return 0;
154#endif
155}
156
157extern struct sched_plugin linux_sched_plugin;
158
159#endif
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h
new file mode 100644
index 000000000000..afd0391d127b
--- /dev/null
+++ b/include/litmus/sched_trace.h
@@ -0,0 +1,191 @@
1/* sched_trace.h -- record scheduler events to a byte stream for offline analysis.
2 */
3#ifndef _LINUX_SCHED_TRACE_H_
4#define _LINUX_SCHED_TRACE_H_
5
6/* all times in nanoseconds */
7
8struct st_trace_header {
9 u8 type; /* Of what type is this record? */
10 u8 cpu; /* On which CPU was it recorded? */
11 u16 pid; /* PID of the task. */
12 u32 job; /* The job sequence number. */
13};
14
15#define ST_NAME_LEN 16
16struct st_name_data {
17 char cmd[ST_NAME_LEN];/* The name of the executable of this process. */
18};
19
20struct st_param_data { /* regular params */
21 u32 wcet;
22 u32 period;
23 u32 phase;
24 u8 partition;
25 u8 __unused[3];
26};
27
28struct st_release_data { /* A job is was/is going to be released. */
29 u64 release; /* What's the release time? */
30 u64 deadline; /* By when must it finish? */
31};
32
33struct st_assigned_data { /* A job was asigned to a CPU. */
34 u64 when;
35 u8 target; /* Where should it execute? */
36 u8 __unused[3];
37};
38
39struct st_switch_to_data { /* A process was switched to on a given CPU. */
40 u64 when; /* When did this occur? */
41 u32 exec_time; /* Time the current job has executed. */
42
43};
44
45struct st_switch_away_data { /* A process was switched away from on a given CPU. */
46 u64 when;
47 u64 exec_time;
48};
49
50struct st_completion_data { /* A job completed. */
51 u64 when;
52 u8 forced:1; /* Set to 1 if job overran and kernel advanced to the
53 * next task automatically; set to 0 otherwise.
54 */
55 u8 __uflags:7;
56 u8 __unused[3];
57};
58
59struct st_block_data { /* A task blocks. */
60 u64 when;
61 u64 __unused;
62};
63
64struct st_resume_data { /* A task resumes. */
65 u64 when;
66 u64 __unused;
67};
68
69struct st_sys_release_data {
70 u64 when;
71 u64 release;
72};
73
74#define DATA(x) struct st_ ## x ## _data x;
75
76typedef enum {
77 ST_NAME = 1, /* Start at one, so that we can spot
78 * uninitialized records. */
79 ST_PARAM,
80 ST_RELEASE,
81 ST_ASSIGNED,
82 ST_SWITCH_TO,
83 ST_SWITCH_AWAY,
84 ST_COMPLETION,
85 ST_BLOCK,
86 ST_RESUME,
87 ST_SYS_RELEASE,
88} st_event_record_type_t;
89
90struct st_event_record {
91 struct st_trace_header hdr;
92 union {
93 u64 raw[2];
94
95 DATA(name);
96 DATA(param);
97 DATA(release);
98 DATA(assigned);
99 DATA(switch_to);
100 DATA(switch_away);
101 DATA(completion);
102 DATA(block);
103 DATA(resume);
104 DATA(sys_release);
105
106 } data;
107};
108
109#undef DATA
110
111#ifdef __KERNEL__
112
113#include <linux/sched.h>
114#include <litmus/feather_trace.h>
115
116#ifdef CONFIG_SCHED_TASK_TRACE
117
118#define SCHED_TRACE(id, callback, task) \
119 ft_event1(id, callback, task)
120#define SCHED_TRACE2(id, callback, task, xtra) \
121 ft_event2(id, callback, task, xtra)
122
123/* provide prototypes; needed on sparc64 */
124#ifndef NO_TASK_TRACE_DECLS
125feather_callback void do_sched_trace_task_name(unsigned long id,
126 struct task_struct* task);
127feather_callback void do_sched_trace_task_param(unsigned long id,
128 struct task_struct* task);
129feather_callback void do_sched_trace_task_release(unsigned long id,
130 struct task_struct* task);
131feather_callback void do_sched_trace_task_switch_to(unsigned long id,
132 struct task_struct* task);
133feather_callback void do_sched_trace_task_switch_away(unsigned long id,
134 struct task_struct* task);
135feather_callback void do_sched_trace_task_completion(unsigned long id,
136 struct task_struct* task,
137 unsigned long forced);
138feather_callback void do_sched_trace_task_block(unsigned long id,
139 struct task_struct* task);
140feather_callback void do_sched_trace_task_resume(unsigned long id,
141 struct task_struct* task);
142feather_callback void do_sched_trace_sys_release(unsigned long id,
143 lt_t* start);
144#endif
145
146#else
147
148#define SCHED_TRACE(id, callback, task) /* no tracing */
149#define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */
150
151#endif
152
153
154#define SCHED_TRACE_BASE_ID 500
155
156
157#define sched_trace_task_name(t) \
158 SCHED_TRACE(SCHED_TRACE_BASE_ID + 1, do_sched_trace_task_name, t)
159#define sched_trace_task_param(t) \
160 SCHED_TRACE(SCHED_TRACE_BASE_ID + 2, do_sched_trace_task_param, t)
161#define sched_trace_task_release(t) \
162 SCHED_TRACE(SCHED_TRACE_BASE_ID + 3, do_sched_trace_task_release, t)
163#define sched_trace_task_switch_to(t) \
164 SCHED_TRACE(SCHED_TRACE_BASE_ID + 4, do_sched_trace_task_switch_to, t)
165#define sched_trace_task_switch_away(t) \
166 SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, do_sched_trace_task_switch_away, t)
167#define sched_trace_task_completion(t, forced) \
168 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, do_sched_trace_task_completion, t, \
169 forced)
170#define sched_trace_task_block(t) \
171 SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, do_sched_trace_task_block, t)
172#define sched_trace_task_resume(t) \
173 SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, do_sched_trace_task_resume, t)
174
175#define sched_trace_sys_release(when) \
176 SCHED_TRACE(SCHED_TRACE_BASE_ID + 9, do_sched_trace_sys_release, when)
177
178#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */
179
180#ifdef CONFIG_SCHED_DEBUG_TRACE
181void sched_trace_log_message(const char* fmt, ...);
182void dump_trace_buffer(int max);
183#else
184
185#define sched_trace_log_message(fmt, ...)
186
187#endif
188
189#endif /* __KERNEL__ */
190
191#endif
diff --git a/include/litmus/trace.h b/include/litmus/trace.h
new file mode 100644
index 000000000000..e8e0c7b6cc6a
--- /dev/null
+++ b/include/litmus/trace.h
@@ -0,0 +1,113 @@
1#ifndef _SYS_TRACE_H_
2#define _SYS_TRACE_H_
3
4#ifdef CONFIG_SCHED_OVERHEAD_TRACE
5
6#include <litmus/feather_trace.h>
7#include <litmus/feather_buffer.h>
8
9
10/*********************** TIMESTAMPS ************************/
11
12enum task_type_marker {
13 TSK_BE,
14 TSK_RT,
15 TSK_UNKNOWN
16};
17
18struct timestamp {
19 uint64_t timestamp;
20 uint32_t seq_no;
21 uint8_t cpu;
22 uint8_t event;
23 uint8_t task_type;
24};
25
26/* tracing callbacks */
27feather_callback void save_timestamp(unsigned long event);
28feather_callback void save_timestamp_def(unsigned long event, unsigned long type);
29feather_callback void save_timestamp_task(unsigned long event, unsigned long t_ptr);
30feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu);
31
32
33#define TIMESTAMP(id) ft_event0(id, save_timestamp)
34
35#define DTIMESTAMP(id, def) ft_event1(id, save_timestamp_def, def)
36
37#define TTIMESTAMP(id, task) \
38 ft_event1(id, save_timestamp_task, (unsigned long) task)
39
40#define CTIMESTAMP(id, cpu) \
41 ft_event1(id, save_timestamp_cpu, cpu)
42
43#else /* !CONFIG_SCHED_OVERHEAD_TRACE */
44
45#define TIMESTAMP(id) /* no tracing */
46
47#define DTIMESTAMP(id, def) /* no tracing */
48
49#define TTIMESTAMP(id, task) /* no tracing */
50
51#define CTIMESTAMP(id, cpu) /* no tracing */
52
53#endif
54
55
56/* Convention for timestamps
57 * =========================
58 *
59 * In order to process the trace files with a common tool, we use the following
60 * convention to measure execution times: The end time id of a code segment is
61 * always the next number after the start time event id.
62 */
63
64#define TS_SCHED_START DTIMESTAMP(100, TSK_UNKNOWN) /* we only
65 * care
66 * about
67 * next */
68#define TS_SCHED_END(t) TTIMESTAMP(101, t)
69#define TS_SCHED2_START(t) TTIMESTAMP(102, t)
70#define TS_SCHED2_END(t) TTIMESTAMP(103, t)
71
72#define TS_CXS_START(t) TTIMESTAMP(104, t)
73#define TS_CXS_END(t) TTIMESTAMP(105, t)
74
75#define TS_RELEASE_START DTIMESTAMP(106, TSK_RT)
76#define TS_RELEASE_END DTIMESTAMP(107, TSK_RT)
77
78#define TS_TICK_START(t) TTIMESTAMP(110, t)
79#define TS_TICK_END(t) TTIMESTAMP(111, t)
80
81
82#define TS_PLUGIN_SCHED_START /* TIMESTAMP(120) */ /* currently unused */
83#define TS_PLUGIN_SCHED_END /* TIMESTAMP(121) */
84
85#define TS_PLUGIN_TICK_START /* TIMESTAMP(130) */
86#define TS_PLUGIN_TICK_END /* TIMESTAMP(131) */
87
88#define TS_ENTER_NP_START TIMESTAMP(140)
89#define TS_ENTER_NP_END TIMESTAMP(141)
90
91#define TS_EXIT_NP_START TIMESTAMP(150)
92#define TS_EXIT_NP_END TIMESTAMP(151)
93
94#define TS_SRP_UP_START TIMESTAMP(160)
95#define TS_SRP_UP_END TIMESTAMP(161)
96#define TS_SRP_DOWN_START TIMESTAMP(162)
97#define TS_SRP_DOWN_END TIMESTAMP(163)
98
99#define TS_PI_UP_START TIMESTAMP(170)
100#define TS_PI_UP_END TIMESTAMP(171)
101#define TS_PI_DOWN_START TIMESTAMP(172)
102#define TS_PI_DOWN_END TIMESTAMP(173)
103
104#define TS_FIFO_UP_START TIMESTAMP(180)
105#define TS_FIFO_UP_END TIMESTAMP(181)
106#define TS_FIFO_DOWN_START TIMESTAMP(182)
107#define TS_FIFO_DOWN_END TIMESTAMP(183)
108
109#define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c)
110#define TS_SEND_RESCHED_END DTIMESTAMP(191, TSK_UNKNOWN)
111
112
113#endif /* !_SYS_TRACE_H_ */