aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/completion.h1
-rw-r--r--include/linux/fs.h21
-rw-r--r--include/linux/hrtimer.h32
-rw-r--r--include/linux/sched.h19
-rw-r--r--include/linux/smp.h5
-rw-r--r--include/linux/tick.h5
-rw-r--r--include/litmus/bheap.h77
-rw-r--r--include/litmus/budget.h8
-rw-r--r--include/litmus/clustered.h44
-rw-r--r--include/litmus/debug_trace.h37
-rw-r--r--include/litmus/edf_common.h25
-rw-r--r--include/litmus/fdso.h71
-rw-r--r--include/litmus/feather_buffer.h94
-rw-r--r--include/litmus/feather_trace.h65
-rw-r--r--include/litmus/ftdev.h52
-rw-r--r--include/litmus/jobs.h9
-rw-r--r--include/litmus/litmus.h246
-rw-r--r--include/litmus/litmus_proc.h25
-rw-r--r--include/litmus/locking.h28
-rw-r--r--include/litmus/preempt.h164
-rw-r--r--include/litmus/rt_domain.h182
-rw-r--r--include/litmus/rt_param.h203
-rw-r--r--include/litmus/sched_plugin.h111
-rw-r--r--include/litmus/sched_trace.h200
-rw-r--r--include/litmus/srp.h28
-rw-r--r--include/litmus/trace.h103
-rw-r--r--include/litmus/unistd_32.h21
-rw-r--r--include/litmus/unistd_64.h33
28 files changed, 1897 insertions, 12 deletions
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 51494e6b5548..9d727271c9fe 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -90,6 +90,7 @@ extern bool completion_done(struct completion *x);
90 90
91extern void complete(struct completion *); 91extern void complete(struct completion *);
92extern void complete_all(struct completion *); 92extern void complete_all(struct completion *);
93extern void complete_n(struct completion *, int n);
93 94
94/** 95/**
95 * INIT_COMPLETION - reinitialize a completion structure 96 * INIT_COMPLETION - reinitialize a completion structure
diff --git a/include/linux/fs.h b/include/linux/fs.h
index b5b979247863..8d5834bcb891 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -17,8 +17,8 @@
17 * nr_file rlimit, so it's safe to set up a ridiculously high absolute 17 * nr_file rlimit, so it's safe to set up a ridiculously high absolute
18 * upper limit on files-per-process. 18 * upper limit on files-per-process.
19 * 19 *
20 * Some programs (notably those using select()) may have to be 20 * Some programs (notably those using select()) may have to be
21 * recompiled to take full advantage of the new limits.. 21 * recompiled to take full advantage of the new limits..
22 */ 22 */
23 23
24/* Fixed constants first: */ 24/* Fixed constants first: */
@@ -172,7 +172,7 @@ struct inodes_stat_t {
172#define SEL_EX 4 172#define SEL_EX 4
173 173
174/* public flags for file_system_type */ 174/* public flags for file_system_type */
175#define FS_REQUIRES_DEV 1 175#define FS_REQUIRES_DEV 1
176#define FS_BINARY_MOUNTDATA 2 176#define FS_BINARY_MOUNTDATA 2
177#define FS_HAS_SUBTYPE 4 177#define FS_HAS_SUBTYPE 4
178#define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */ 178#define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */
@@ -480,7 +480,7 @@ struct iattr {
480 */ 480 */
481#include <linux/quota.h> 481#include <linux/quota.h>
482 482
483/** 483/**
484 * enum positive_aop_returns - aop return codes with specific semantics 484 * enum positive_aop_returns - aop return codes with specific semantics
485 * 485 *
486 * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has 486 * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has
@@ -490,7 +490,7 @@ struct iattr {
490 * be a candidate for writeback again in the near 490 * be a candidate for writeback again in the near
491 * future. Other callers must be careful to unlock 491 * future. Other callers must be careful to unlock
492 * the page if they get this return. Returned by 492 * the page if they get this return. Returned by
493 * writepage(); 493 * writepage();
494 * 494 *
495 * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has 495 * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has
496 * unlocked it and the page might have been truncated. 496 * unlocked it and the page might have been truncated.
@@ -734,6 +734,7 @@ static inline int mapping_writably_mapped(struct address_space *mapping)
734 734
735struct posix_acl; 735struct posix_acl;
736#define ACL_NOT_CACHED ((void *)(-1)) 736#define ACL_NOT_CACHED ((void *)(-1))
737struct inode_obj_id_table;
737 738
738struct inode { 739struct inode {
739 /* RCU path lookup touches following: */ 740 /* RCU path lookup touches following: */
@@ -807,6 +808,8 @@ struct inode {
807 struct posix_acl *i_acl; 808 struct posix_acl *i_acl;
808 struct posix_acl *i_default_acl; 809 struct posix_acl *i_default_acl;
809#endif 810#endif
811 struct list_head i_obj_list;
812 struct mutex i_obj_mutex;
810 void *i_private; /* fs or device private pointer */ 813 void *i_private; /* fs or device private pointer */
811}; 814};
812 815
@@ -1032,10 +1035,10 @@ static inline int file_check_writeable(struct file *filp)
1032 1035
1033#define MAX_NON_LFS ((1UL<<31) - 1) 1036#define MAX_NON_LFS ((1UL<<31) - 1)
1034 1037
1035/* Page cache limit. The filesystems should put that into their s_maxbytes 1038/* Page cache limit. The filesystems should put that into their s_maxbytes
1036 limits, otherwise bad things can happen in VM. */ 1039 limits, otherwise bad things can happen in VM. */
1037#if BITS_PER_LONG==32 1040#if BITS_PER_LONG==32
1038#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 1041#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
1039#elif BITS_PER_LONG==64 1042#elif BITS_PER_LONG==64
1040#define MAX_LFS_FILESIZE 0x7fffffffffffffffUL 1043#define MAX_LFS_FILESIZE 0x7fffffffffffffffUL
1041#endif 1044#endif
@@ -2234,7 +2237,7 @@ extern void free_write_pipe(struct file *);
2234 2237
2235extern int kernel_read(struct file *, loff_t, char *, unsigned long); 2238extern int kernel_read(struct file *, loff_t, char *, unsigned long);
2236extern struct file * open_exec(const char *); 2239extern struct file * open_exec(const char *);
2237 2240
2238/* fs/dcache.c -- generic fs support functions */ 2241/* fs/dcache.c -- generic fs support functions */
2239extern int is_subdir(struct dentry *, struct dentry *); 2242extern int is_subdir(struct dentry *, struct dentry *);
2240extern int path_is_under(struct path *, struct path *); 2243extern int path_is_under(struct path *, struct path *);
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index fd0dc30c9f15..d91bba539ca8 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -174,6 +174,7 @@ enum hrtimer_base_type {
174 * @nr_hangs: Total number of hrtimer interrupt hangs 174 * @nr_hangs: Total number of hrtimer interrupt hangs
175 * @max_hang_time: Maximum time spent in hrtimer_interrupt 175 * @max_hang_time: Maximum time spent in hrtimer_interrupt
176 * @clock_base: array of clock bases for this cpu 176 * @clock_base: array of clock bases for this cpu
177 * @to_pull: LITMUS^RT list of timers to be pulled on this cpu
177 */ 178 */
178struct hrtimer_cpu_base { 179struct hrtimer_cpu_base {
179 raw_spinlock_t lock; 180 raw_spinlock_t lock;
@@ -188,8 +189,32 @@ struct hrtimer_cpu_base {
188 ktime_t max_hang_time; 189 ktime_t max_hang_time;
189#endif 190#endif
190 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; 191 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
192 struct list_head to_pull;
191}; 193};
192 194
195#ifdef CONFIG_ARCH_HAS_SEND_PULL_TIMERS
196
197#define HRTIMER_START_ON_INACTIVE 0
198#define HRTIMER_START_ON_QUEUED 1
199
200/*
201 * struct hrtimer_start_on_info - save timer info on remote cpu
202 * @list: list of hrtimer_start_on_info on remote cpu (to_pull)
203 * @timer: timer to be triggered on remote cpu
204 * @time: time event
205 * @mode: timer mode
206 * @state: activity flag
207 */
208struct hrtimer_start_on_info {
209 struct list_head list;
210 struct hrtimer *timer;
211 ktime_t time;
212 enum hrtimer_mode mode;
213 atomic_t state;
214};
215
216#endif
217
193static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) 218static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
194{ 219{
195 timer->node.expires = time; 220 timer->node.expires = time;
@@ -355,6 +380,13 @@ __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
355 unsigned long delta_ns, 380 unsigned long delta_ns,
356 const enum hrtimer_mode mode, int wakeup); 381 const enum hrtimer_mode mode, int wakeup);
357 382
383#ifdef CONFIG_ARCH_HAS_SEND_PULL_TIMERS
384extern void hrtimer_start_on_info_init(struct hrtimer_start_on_info *info);
385extern int hrtimer_start_on(int cpu, struct hrtimer_start_on_info *info,
386 struct hrtimer *timer, ktime_t time,
387 const enum hrtimer_mode mode);
388#endif
389
358extern int hrtimer_cancel(struct hrtimer *timer); 390extern int hrtimer_cancel(struct hrtimer *timer);
359extern int hrtimer_try_to_cancel(struct hrtimer *timer); 391extern int hrtimer_try_to_cancel(struct hrtimer *timer);
360 392
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 14a6c7b545de..9c990d13ae35 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -39,6 +39,7 @@
39#define SCHED_BATCH 3 39#define SCHED_BATCH 3
40/* SCHED_ISO: reserved but not implemented yet */ 40/* SCHED_ISO: reserved but not implemented yet */
41#define SCHED_IDLE 5 41#define SCHED_IDLE 5
42#define SCHED_LITMUS 6
42/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ 43/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
43#define SCHED_RESET_ON_FORK 0x40000000 44#define SCHED_RESET_ON_FORK 0x40000000
44 45
@@ -93,6 +94,9 @@ struct sched_param {
93 94
94#include <asm/processor.h> 95#include <asm/processor.h>
95 96
97#include <litmus/rt_param.h>
98#include <litmus/preempt.h>
99
96struct exec_domain; 100struct exec_domain;
97struct futex_pi_state; 101struct futex_pi_state;
98struct robust_list_head; 102struct robust_list_head;
@@ -1209,6 +1213,7 @@ struct sched_rt_entity {
1209}; 1213};
1210 1214
1211struct rcu_node; 1215struct rcu_node;
1216struct od_table_entry;
1212 1217
1213enum perf_event_task_context { 1218enum perf_event_task_context {
1214 perf_invalid_context = -1, 1219 perf_invalid_context = -1,
@@ -1313,9 +1318,9 @@ struct task_struct {
1313 unsigned long stack_canary; 1318 unsigned long stack_canary;
1314#endif 1319#endif
1315 1320
1316 /* 1321 /*
1317 * pointers to (original) parent process, youngest child, younger sibling, 1322 * pointers to (original) parent process, youngest child, younger sibling,
1318 * older sibling, respectively. (p->father can be replaced with 1323 * older sibling, respectively. (p->father can be replaced with
1319 * p->real_parent->pid) 1324 * p->real_parent->pid)
1320 */ 1325 */
1321 struct task_struct *real_parent; /* real parent process */ 1326 struct task_struct *real_parent; /* real parent process */
@@ -1526,6 +1531,13 @@ struct task_struct {
1526 int make_it_fail; 1531 int make_it_fail;
1527#endif 1532#endif
1528 struct prop_local_single dirties; 1533 struct prop_local_single dirties;
1534
1535 /* LITMUS RT parameters and state */
1536 struct rt_param rt_param;
1537
1538 /* references to PI semaphores, etc. */
1539 struct od_table_entry *od_table;
1540
1529#ifdef CONFIG_LATENCYTOP 1541#ifdef CONFIG_LATENCYTOP
1530 int latency_record_count; 1542 int latency_record_count;
1531 struct latency_record latency_record[LT_SAVECOUNT]; 1543 struct latency_record latency_record[LT_SAVECOUNT];
@@ -2136,7 +2148,7 @@ static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, s
2136 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 2148 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2137 2149
2138 return ret; 2150 return ret;
2139} 2151}
2140 2152
2141extern void block_all_signals(int (*notifier)(void *priv), void *priv, 2153extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2142 sigset_t *mask); 2154 sigset_t *mask);
@@ -2446,6 +2458,7 @@ static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2446static inline void set_tsk_need_resched(struct task_struct *tsk) 2458static inline void set_tsk_need_resched(struct task_struct *tsk)
2447{ 2459{
2448 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2460 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2461 sched_state_will_schedule(tsk);
2449} 2462}
2450 2463
2451static inline void clear_tsk_need_resched(struct task_struct *tsk) 2464static inline void clear_tsk_need_resched(struct task_struct *tsk)
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 8cc38d3bab0c..53b1beef27ad 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -82,6 +82,11 @@ int smp_call_function_any(const struct cpumask *mask,
82 smp_call_func_t func, void *info, int wait); 82 smp_call_func_t func, void *info, int wait);
83 83
84/* 84/*
85 * sends a 'pull timer' event to a remote CPU
86 */
87extern void smp_send_pull_timers(int cpu);
88
89/*
85 * Generic and arch helpers 90 * Generic and arch helpers
86 */ 91 */
87#ifdef CONFIG_USE_GENERIC_SMP_HELPERS 92#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
diff --git a/include/linux/tick.h b/include/linux/tick.h
index b232ccc0ee29..1e29bd5b18af 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -74,6 +74,11 @@ extern int tick_is_oneshot_available(void);
74extern struct tick_device *tick_get_device(int cpu); 74extern struct tick_device *tick_get_device(int cpu);
75 75
76# ifdef CONFIG_HIGH_RES_TIMERS 76# ifdef CONFIG_HIGH_RES_TIMERS
77/* LITMUS^RT tick alignment */
78#define LINUX_DEFAULT_TICKS 0
79#define LITMUS_ALIGNED_TICKS 1
80#define LITMUS_STAGGERED_TICKS 2
81
77extern int tick_init_highres(void); 82extern int tick_init_highres(void);
78extern int tick_program_event(ktime_t expires, int force); 83extern int tick_program_event(ktime_t expires, int force);
79extern void tick_setup_sched_timer(void); 84extern void tick_setup_sched_timer(void);
diff --git a/include/litmus/bheap.h b/include/litmus/bheap.h
new file mode 100644
index 000000000000..cf4864a498d8
--- /dev/null
+++ b/include/litmus/bheap.h
@@ -0,0 +1,77 @@
1/* bheaps.h -- Binomial Heaps
2 *
3 * (c) 2008, 2009 Bjoern Brandenburg
4 */
5
6#ifndef BHEAP_H
7#define BHEAP_H
8
9#define NOT_IN_HEAP UINT_MAX
10
11struct bheap_node {
12 struct bheap_node* parent;
13 struct bheap_node* next;
14 struct bheap_node* child;
15
16 unsigned int degree;
17 void* value;
18 struct bheap_node** ref;
19};
20
21struct bheap {
22 struct bheap_node* head;
23 /* We cache the minimum of the heap.
24 * This speeds up repeated peek operations.
25 */
26 struct bheap_node* min;
27};
28
29typedef int (*bheap_prio_t)(struct bheap_node* a, struct bheap_node* b);
30
31void bheap_init(struct bheap* heap);
32void bheap_node_init(struct bheap_node** ref_to_bheap_node_ptr, void* value);
33
34static inline int bheap_node_in_heap(struct bheap_node* h)
35{
36 return h->degree != NOT_IN_HEAP;
37}
38
39static inline int bheap_empty(struct bheap* heap)
40{
41 return heap->head == NULL && heap->min == NULL;
42}
43
44/* insert (and reinitialize) a node into the heap */
45void bheap_insert(bheap_prio_t higher_prio,
46 struct bheap* heap,
47 struct bheap_node* node);
48
49/* merge addition into target */
50void bheap_union(bheap_prio_t higher_prio,
51 struct bheap* target,
52 struct bheap* addition);
53
54struct bheap_node* bheap_peek(bheap_prio_t higher_prio,
55 struct bheap* heap);
56
57struct bheap_node* bheap_take(bheap_prio_t higher_prio,
58 struct bheap* heap);
59
60void bheap_uncache_min(bheap_prio_t higher_prio, struct bheap* heap);
61int bheap_decrease(bheap_prio_t higher_prio, struct bheap_node* node);
62
63void bheap_delete(bheap_prio_t higher_prio,
64 struct bheap* heap,
65 struct bheap_node* node);
66
67/* allocate from memcache */
68struct bheap_node* bheap_node_alloc(int gfp_flags);
69void bheap_node_free(struct bheap_node* hn);
70
71/* allocate a heap node for value and insert into the heap */
72int bheap_add(bheap_prio_t higher_prio, struct bheap* heap,
73 void* value, int gfp_flags);
74
75void* bheap_take_del(bheap_prio_t higher_prio,
76 struct bheap* heap);
77#endif
diff --git a/include/litmus/budget.h b/include/litmus/budget.h
new file mode 100644
index 000000000000..732530e63491
--- /dev/null
+++ b/include/litmus/budget.h
@@ -0,0 +1,8 @@
1#ifndef _LITMUS_BUDGET_H_
2#define _LITMUS_BUDGET_H_
3
4/* Update the per-processor enforcement timer (arm/reproram/cancel) for
5 * the next task. */
6void update_enforcement_timer(struct task_struct* t);
7
8#endif
diff --git a/include/litmus/clustered.h b/include/litmus/clustered.h
new file mode 100644
index 000000000000..0c18dcb15e6c
--- /dev/null
+++ b/include/litmus/clustered.h
@@ -0,0 +1,44 @@
1#ifndef CLUSTERED_H
2#define CLUSTERED_H
3
4/* Which cache level should be used to group CPUs into clusters?
5 * GLOBAL_CLUSTER means that all CPUs form a single cluster (just like under
6 * global scheduling).
7 */
8enum cache_level {
9 GLOBAL_CLUSTER = 0,
10 L1_CLUSTER = 1,
11 L2_CLUSTER = 2,
12 L3_CLUSTER = 3
13};
14
15int parse_cache_level(const char *str, enum cache_level *level);
16const char* cache_level_name(enum cache_level level);
17
18/* expose a cache level in a /proc dir */
19struct proc_dir_entry* create_cluster_file(struct proc_dir_entry* parent,
20 enum cache_level* level);
21
22
23
24struct scheduling_cluster {
25 unsigned int id;
26 /* list of CPUs that are part of this cluster */
27 struct list_head cpus;
28};
29
30struct cluster_cpu {
31 unsigned int id; /* which CPU is this? */
32 struct list_head cluster_list; /* List of the CPUs in this cluster. */
33 struct scheduling_cluster* cluster; /* The cluster that this CPU belongs to. */
34};
35
36int get_cluster_size(enum cache_level level);
37
38int assign_cpus_to_clusters(enum cache_level level,
39 struct scheduling_cluster* clusters[],
40 unsigned int num_clusters,
41 struct cluster_cpu* cpus[],
42 unsigned int num_cpus);
43
44#endif
diff --git a/include/litmus/debug_trace.h b/include/litmus/debug_trace.h
new file mode 100644
index 000000000000..48d086d5a44c
--- /dev/null
+++ b/include/litmus/debug_trace.h
@@ -0,0 +1,37 @@
1#ifndef LITMUS_DEBUG_TRACE_H
2#define LITMUS_DEBUG_TRACE_H
3
4#ifdef CONFIG_SCHED_DEBUG_TRACE
5void sched_trace_log_message(const char* fmt, ...);
6void dump_trace_buffer(int max);
7#else
8
9#define sched_trace_log_message(fmt, ...)
10
11#endif
12
13extern atomic_t __log_seq_no;
14
15#ifdef CONFIG_SCHED_DEBUG_TRACE_CALLER
16#define TRACE_PREFIX "%d P%d [%s@%s:%d]: "
17#define TRACE_ARGS atomic_add_return(1, &__log_seq_no), \
18 raw_smp_processor_id(), \
19 __FUNCTION__, __FILE__, __LINE__
20#else
21#define TRACE_PREFIX "%d P%d: "
22#define TRACE_ARGS atomic_add_return(1, &__log_seq_no), \
23 raw_smp_processor_id()
24#endif
25
26#define TRACE(fmt, args...) \
27 sched_trace_log_message(TRACE_PREFIX fmt, \
28 TRACE_ARGS, ## args)
29
30#define TRACE_TASK(t, fmt, args...) \
31 TRACE("(%s/%d:%d) " fmt, (t)->comm, (t)->pid, \
32 (t)->rt_param.job_params.job_no, ##args)
33
34#define TRACE_CUR(fmt, args...) \
35 TRACE_TASK(current, fmt, ## args)
36
37#endif
diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h
new file mode 100644
index 000000000000..bbaf22ea7f12
--- /dev/null
+++ b/include/litmus/edf_common.h
@@ -0,0 +1,25 @@
1/*
2 * EDF common data structures and utility functions shared by all EDF
3 * based scheduler plugins
4 */
5
6/* CLEANUP: Add comments and make it less messy.
7 *
8 */
9
10#ifndef __UNC_EDF_COMMON_H__
11#define __UNC_EDF_COMMON_H__
12
13#include <litmus/rt_domain.h>
14
15void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
16 release_jobs_t release);
17
18int edf_higher_prio(struct task_struct* first,
19 struct task_struct* second);
20
21int edf_ready_order(struct bheap_node* a, struct bheap_node* b);
22
23int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t);
24
25#endif
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h
new file mode 100644
index 000000000000..caf2a1e6918c
--- /dev/null
+++ b/include/litmus/fdso.h
@@ -0,0 +1,71 @@
1/* fdso.h - file descriptor attached shared objects
2 *
3 * (c) 2007 B. Brandenburg, LITMUS^RT project
4 */
5
6#ifndef _LINUX_FDSO_H_
7#define _LINUX_FDSO_H_
8
9#include <linux/list.h>
10#include <asm/atomic.h>
11
12#include <linux/fs.h>
13#include <linux/slab.h>
14
15#define MAX_OBJECT_DESCRIPTORS 32
16
17typedef enum {
18 MIN_OBJ_TYPE = 0,
19
20 FMLP_SEM = 0,
21 SRP_SEM = 1,
22
23 MAX_OBJ_TYPE = 1
24} obj_type_t;
25
26struct inode_obj_id {
27 struct list_head list;
28 atomic_t count;
29 struct inode* inode;
30
31 obj_type_t type;
32 void* obj;
33 unsigned int id;
34};
35
36struct fdso_ops;
37
38struct od_table_entry {
39 unsigned int used;
40
41 struct inode_obj_id* obj;
42 const struct fdso_ops* class;
43};
44
45struct fdso_ops {
46 int (*create)(void** obj_ref, obj_type_t type, void* __user);
47 void (*destroy)(obj_type_t type, void*);
48 int (*open) (struct od_table_entry*, void* __user);
49 int (*close) (struct od_table_entry*);
50};
51
52/* translate a userspace supplied od into the raw table entry
53 * returns NULL if od is invalid
54 */
55struct od_table_entry* get_entry_for_od(int od);
56
57/* translate a userspace supplied od into the associated object
58 * returns NULL if od is invalid
59 */
60static inline void* od_lookup(int od, obj_type_t type)
61{
62 struct od_table_entry* e = get_entry_for_od(od);
63 return e && e->obj->type == type ? e->obj->obj : NULL;
64}
65
66#define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM))
67#define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM))
68#define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID))
69
70
71#endif
diff --git a/include/litmus/feather_buffer.h b/include/litmus/feather_buffer.h
new file mode 100644
index 000000000000..6c18277fdfc9
--- /dev/null
+++ b/include/litmus/feather_buffer.h
@@ -0,0 +1,94 @@
1#ifndef _FEATHER_BUFFER_H_
2#define _FEATHER_BUFFER_H_
3
4/* requires UINT_MAX and memcpy */
5
6#define SLOT_FREE 0
7#define SLOT_BUSY 1
8#define SLOT_READY 2
9
10struct ft_buffer {
11 unsigned int slot_count;
12 unsigned int slot_size;
13
14 int free_count;
15 unsigned int write_idx;
16 unsigned int read_idx;
17
18 char* slots;
19 void* buffer_mem;
20 unsigned int failed_writes;
21};
22
23static inline int init_ft_buffer(struct ft_buffer* buf,
24 unsigned int slot_count,
25 unsigned int slot_size,
26 char* slots,
27 void* buffer_mem)
28{
29 int i = 0;
30 if (!slot_count || UINT_MAX % slot_count != slot_count - 1) {
31 /* The slot count must divide UNIT_MAX + 1 so that when it
32 * wraps around the index correctly points to 0.
33 */
34 return 0;
35 } else {
36 buf->slot_count = slot_count;
37 buf->slot_size = slot_size;
38 buf->slots = slots;
39 buf->buffer_mem = buffer_mem;
40 buf->free_count = slot_count;
41 buf->write_idx = 0;
42 buf->read_idx = 0;
43 buf->failed_writes = 0;
44 for (i = 0; i < slot_count; i++)
45 buf->slots[i] = SLOT_FREE;
46 return 1;
47 }
48}
49
50static inline int ft_buffer_start_write(struct ft_buffer* buf, void **ptr)
51{
52 int free = fetch_and_dec(&buf->free_count);
53 unsigned int idx;
54 if (free <= 0) {
55 fetch_and_inc(&buf->free_count);
56 *ptr = 0;
57 fetch_and_inc(&buf->failed_writes);
58 return 0;
59 } else {
60 idx = fetch_and_inc((int*) &buf->write_idx) % buf->slot_count;
61 buf->slots[idx] = SLOT_BUSY;
62 *ptr = ((char*) buf->buffer_mem) + idx * buf->slot_size;
63 return 1;
64 }
65}
66
67static inline void ft_buffer_finish_write(struct ft_buffer* buf, void *ptr)
68{
69 unsigned int idx = ((char*) ptr - (char*) buf->buffer_mem) / buf->slot_size;
70 buf->slots[idx] = SLOT_READY;
71}
72
73
74/* exclusive reader access is assumed */
75static inline int ft_buffer_read(struct ft_buffer* buf, void* dest)
76{
77 unsigned int idx;
78 if (buf->free_count == buf->slot_count)
79 /* nothing available */
80 return 0;
81 idx = buf->read_idx % buf->slot_count;
82 if (buf->slots[idx] == SLOT_READY) {
83 memcpy(dest, ((char*) buf->buffer_mem) + idx * buf->slot_size,
84 buf->slot_size);
85 buf->slots[idx] = SLOT_FREE;
86 buf->read_idx++;
87 fetch_and_inc(&buf->free_count);
88 return 1;
89 } else
90 return 0;
91}
92
93
94#endif
diff --git a/include/litmus/feather_trace.h b/include/litmus/feather_trace.h
new file mode 100644
index 000000000000..028dfb206fb0
--- /dev/null
+++ b/include/litmus/feather_trace.h
@@ -0,0 +1,65 @@
1#ifndef _FEATHER_TRACE_H_
2#define _FEATHER_TRACE_H_
3
4#include <asm/atomic.h>
5
6int ft_enable_event(unsigned long id);
7int ft_disable_event(unsigned long id);
8int ft_is_event_enabled(unsigned long id);
9int ft_disable_all_events(void);
10
11/* atomic_* funcitons are inline anyway */
12static inline int fetch_and_inc(int *val)
13{
14 return atomic_add_return(1, (atomic_t*) val) - 1;
15}
16
17static inline int fetch_and_dec(int *val)
18{
19 return atomic_sub_return(1, (atomic_t*) val) + 1;
20}
21
22/* Don't use rewriting implementation if kernel text pages are read-only.
23 * Ftrace gets around this by using the identity mapping, but that's more
24 * effort that is warrented right now for Feather-Trace.
25 * Eventually, it may make sense to replace Feather-Trace with ftrace.
26 */
27#if defined(CONFIG_ARCH_HAS_FEATHER_TRACE) && !defined(CONFIG_DEBUG_RODATA)
28
29#include <asm/feather_trace.h>
30
31#else /* !__ARCH_HAS_FEATHER_TRACE */
32
33/* provide default implementation */
34
35#include <asm/timex.h> /* for get_cycles() */
36
37static inline unsigned long long ft_timestamp(void)
38{
39 return get_cycles();
40}
41
42#define feather_callback
43
44#define MAX_EVENTS 1024
45
46extern int ft_events[MAX_EVENTS];
47
48#define ft_event(id, callback) \
49 if (ft_events[id]) callback();
50
51#define ft_event0(id, callback) \
52 if (ft_events[id]) callback(id);
53
54#define ft_event1(id, callback, param) \
55 if (ft_events[id]) callback(id, param);
56
57#define ft_event2(id, callback, param, param2) \
58 if (ft_events[id]) callback(id, param, param2);
59
60#define ft_event3(id, callback, p, p2, p3) \
61 if (ft_events[id]) callback(id, p, p2, p3);
62
63#endif /* __ARCH_HAS_FEATHER_TRACE */
64
65#endif
diff --git a/include/litmus/ftdev.h b/include/litmus/ftdev.h
new file mode 100644
index 000000000000..348387e9adf9
--- /dev/null
+++ b/include/litmus/ftdev.h
@@ -0,0 +1,52 @@
1#ifndef _LITMUS_FTDEV_H_
2#define _LITMUS_FTDEV_H_
3
4#include <litmus/feather_trace.h>
5#include <litmus/feather_buffer.h>
6#include <linux/mutex.h>
7#include <linux/cdev.h>
8
9#define FTDEV_ENABLE_CMD 0
10#define FTDEV_DISABLE_CMD 1
11
12struct ftdev;
13
14/* return 0 if buffer can be opened, otherwise -$REASON */
15typedef int (*ftdev_can_open_t)(struct ftdev* dev, unsigned int buf_no);
16/* return 0 on success, otherwise -$REASON */
17typedef int (*ftdev_alloc_t)(struct ftdev* dev, unsigned int buf_no);
18typedef void (*ftdev_free_t)(struct ftdev* dev, unsigned int buf_no);
19
20
21struct ftdev_event;
22
23struct ftdev_minor {
24 struct ft_buffer* buf;
25 unsigned int readers;
26 struct mutex lock;
27 /* FIXME: filter for authorized events */
28 struct ftdev_event* events;
29 struct device* device;
30};
31
32struct ftdev {
33 dev_t major;
34 struct cdev cdev;
35 struct class* class;
36 const char* name;
37 struct ftdev_minor* minor;
38 unsigned int minor_cnt;
39 ftdev_alloc_t alloc;
40 ftdev_free_t free;
41 ftdev_can_open_t can_open;
42};
43
44struct ft_buffer* alloc_ft_buffer(unsigned int count, size_t size);
45void free_ft_buffer(struct ft_buffer* buf);
46
47int ftdev_init( struct ftdev* ftdev, struct module* owner,
48 const int minor_cnt, const char* name);
49void ftdev_exit(struct ftdev* ftdev);
50int register_ftdev(struct ftdev* ftdev);
51
52#endif
diff --git a/include/litmus/jobs.h b/include/litmus/jobs.h
new file mode 100644
index 000000000000..9bd361ef3943
--- /dev/null
+++ b/include/litmus/jobs.h
@@ -0,0 +1,9 @@
1#ifndef __LITMUS_JOBS_H__
2#define __LITMUS_JOBS_H__
3
4void prepare_for_next_period(struct task_struct *t);
5void release_at(struct task_struct *t, lt_t start);
6long complete_job(void);
7
8#endif
9
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
new file mode 100644
index 000000000000..e7769ca36ec0
--- /dev/null
+++ b/include/litmus/litmus.h
@@ -0,0 +1,246 @@
1/*
2 * Constant definitions related to
3 * scheduling policy.
4 */
5
6#ifndef _LINUX_LITMUS_H_
7#define _LINUX_LITMUS_H_
8
9#include <litmus/debug_trace.h>
10
11#ifdef CONFIG_RELEASE_MASTER
12extern atomic_t release_master_cpu;
13#endif
14
15/* in_list - is a given list_head queued on some list?
16 */
17static inline int in_list(struct list_head* list)
18{
19 return !( /* case 1: deleted */
20 (list->next == LIST_POISON1 &&
21 list->prev == LIST_POISON2)
22 ||
23 /* case 2: initialized */
24 (list->next == list &&
25 list->prev == list)
26 );
27}
28
29struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq);
30
31#define NO_CPU 0xffffffff
32
33void litmus_fork(struct task_struct *tsk);
34void litmus_exec(void);
35/* clean up real-time state of a task */
36void exit_litmus(struct task_struct *dead_tsk);
37
38long litmus_admit_task(struct task_struct *tsk);
39void litmus_exit_task(struct task_struct *tsk);
40
41#define is_realtime(t) ((t)->policy == SCHED_LITMUS)
42#define rt_transition_pending(t) \
43 ((t)->rt_param.transition_pending)
44
45#define tsk_rt(t) (&(t)->rt_param)
46
47/* Realtime utility macros */
48#define get_rt_flags(t) (tsk_rt(t)->flags)
49#define set_rt_flags(t,f) (tsk_rt(t)->flags=(f))
50#define get_exec_cost(t) (tsk_rt(t)->task_params.exec_cost)
51#define get_exec_time(t) (tsk_rt(t)->job_params.exec_time)
52#define get_rt_period(t) (tsk_rt(t)->task_params.period)
53#define get_rt_phase(t) (tsk_rt(t)->task_params.phase)
54#define get_partition(t) (tsk_rt(t)->task_params.cpu)
55#define get_deadline(t) (tsk_rt(t)->job_params.deadline)
56#define get_release(t) (tsk_rt(t)->job_params.release)
57#define get_class(t) (tsk_rt(t)->task_params.cls)
58
59#define is_priority_boosted(t) (tsk_rt(t)->priority_boosted)
60#define get_boost_start(t) (tsk_rt(t)->boost_start_time)
61
62inline static int budget_exhausted(struct task_struct* t)
63{
64 return get_exec_time(t) >= get_exec_cost(t);
65}
66
67inline static lt_t budget_remaining(struct task_struct* t)
68{
69 if (!budget_exhausted(t))
70 return get_exec_cost(t) - get_exec_time(t);
71 else
72 /* avoid overflow */
73 return 0;
74}
75
76#define budget_enforced(t) (tsk_rt(t)->task_params.budget_policy != NO_ENFORCEMENT)
77
78#define budget_precisely_enforced(t) (tsk_rt(t)->task_params.budget_policy \
79 == PRECISE_ENFORCEMENT)
80
81#define is_hrt(t) \
82 (tsk_rt(t)->task_params.cls == RT_CLASS_HARD)
83#define is_srt(t) \
84 (tsk_rt(t)->task_params.cls == RT_CLASS_SOFT)
85#define is_be(t) \
86 (tsk_rt(t)->task_params.cls == RT_CLASS_BEST_EFFORT)
87
88/* Our notion of time within LITMUS: kernel monotonic time. */
89static inline lt_t litmus_clock(void)
90{
91 return ktime_to_ns(ktime_get());
92}
93
94/* A macro to convert from nanoseconds to ktime_t. */
95#define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t)
96
97#define get_domain(t) (tsk_rt(t)->domain)
98
99/* Honor the flag in the preempt_count variable that is set
100 * when scheduling is in progress.
101 */
102#define is_running(t) \
103 ((t)->state == TASK_RUNNING || \
104 task_thread_info(t)->preempt_count & PREEMPT_ACTIVE)
105
106#define is_blocked(t) \
107 (!is_running(t))
108#define is_released(t, now) \
109 (lt_before_eq(get_release(t), now))
110#define is_tardy(t, now) \
111 (lt_before_eq(tsk_rt(t)->job_params.deadline, now))
112
113/* real-time comparison macros */
114#define earlier_deadline(a, b) (lt_before(\
115 (a)->rt_param.job_params.deadline,\
116 (b)->rt_param.job_params.deadline))
117#define earlier_release(a, b) (lt_before(\
118 (a)->rt_param.job_params.release,\
119 (b)->rt_param.job_params.release))
120
121void preempt_if_preemptable(struct task_struct* t, int on_cpu);
122
123#ifdef CONFIG_LITMUS_LOCKING
124void srp_ceiling_block(void);
125#else
126#define srp_ceiling_block() /* nothing */
127#endif
128
129#define bheap2task(hn) ((struct task_struct*) hn->value)
130
131#ifdef CONFIG_NP_SECTION
132
133static inline int is_kernel_np(struct task_struct *t)
134{
135 return tsk_rt(t)->kernel_np;
136}
137
138static inline int is_user_np(struct task_struct *t)
139{
140 return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->np_flag : 0;
141}
142
143static inline void request_exit_np(struct task_struct *t)
144{
145 if (is_user_np(t)) {
146 /* Set the flag that tells user space to call
147 * into the kernel at the end of a critical section. */
148 if (likely(tsk_rt(t)->ctrl_page)) {
149 TRACE_TASK(t, "setting delayed_preemption flag\n");
150 tsk_rt(t)->ctrl_page->delayed_preemption = 1;
151 }
152 }
153}
154
155static inline void clear_exit_np(struct task_struct *t)
156{
157 if (likely(tsk_rt(t)->ctrl_page))
158 tsk_rt(t)->ctrl_page->delayed_preemption = 0;
159}
160
161static inline void make_np(struct task_struct *t)
162{
163 tsk_rt(t)->kernel_np++;
164}
165
166/* Caller should check if preemption is necessary when
167 * the function return 0.
168 */
169static inline int take_np(struct task_struct *t)
170{
171 return --tsk_rt(t)->kernel_np;
172}
173
174#else
175
176static inline int is_kernel_np(struct task_struct* t)
177{
178 return 0;
179}
180
181static inline int is_user_np(struct task_struct* t)
182{
183 return 0;
184}
185
186static inline void request_exit_np(struct task_struct *t)
187{
188 /* request_exit_np() shouldn't be called if !CONFIG_NP_SECTION */
189 BUG();
190}
191
192static inline void clear_exit_np(struct task_struct* t)
193{
194}
195
196#endif
197
198static inline int is_np(struct task_struct *t)
199{
200#ifdef CONFIG_SCHED_DEBUG_TRACE
201 int kernel, user;
202 kernel = is_kernel_np(t);
203 user = is_user_np(t);
204 if (kernel || user)
205 TRACE_TASK(t, " is non-preemptive: kernel=%d user=%d\n",
206
207 kernel, user);
208 return kernel || user;
209#else
210 return unlikely(is_kernel_np(t) || is_user_np(t));
211#endif
212}
213
214static inline int is_present(struct task_struct* t)
215{
216 return t && tsk_rt(t)->present;
217}
218
219
220/* make the unit explicit */
221typedef unsigned long quanta_t;
222
223enum round {
224 FLOOR,
225 CEIL
226};
227
228
229/* Tick period is used to convert ns-specified execution
230 * costs and periods into tick-based equivalents.
231 */
232extern ktime_t tick_period;
233
234static inline quanta_t time2quanta(lt_t time, enum round round)
235{
236 s64 quantum_length = ktime_to_ns(tick_period);
237
238 if (do_div(time, quantum_length) && round == CEIL)
239 time++;
240 return (quanta_t) time;
241}
242
243/* By how much is cpu staggered behind CPU 0? */
244u64 cpu_stagger_offset(int cpu);
245
246#endif
diff --git a/include/litmus/litmus_proc.h b/include/litmus/litmus_proc.h
new file mode 100644
index 000000000000..6800e725d48c
--- /dev/null
+++ b/include/litmus/litmus_proc.h
@@ -0,0 +1,25 @@
1#include <litmus/sched_plugin.h>
2#include <linux/proc_fs.h>
3
4int __init init_litmus_proc(void);
5void exit_litmus_proc(void);
6
7/*
8 * On success, returns 0 and sets the pointer to the location of the new
9 * proc dir entry, otherwise returns an error code and sets pde to NULL.
10 */
11long make_plugin_proc_dir(struct sched_plugin* plugin,
12 struct proc_dir_entry** pde);
13
14/*
15 * Plugins should deallocate all child proc directory entries before
16 * calling this, to avoid memory leaks.
17 */
18void remove_plugin_proc_dir(struct sched_plugin* plugin);
19
20
21/* Copy at most size-1 bytes from ubuf into kbuf, null-terminate buf, and
22 * remove a '\n' if present. Returns the number of bytes that were read or
23 * -EFAULT. */
24int copy_and_chomp(char *kbuf, unsigned long ksize,
25 __user const char* ubuf, unsigned long ulength);
diff --git a/include/litmus/locking.h b/include/litmus/locking.h
new file mode 100644
index 000000000000..4d7b870cb443
--- /dev/null
+++ b/include/litmus/locking.h
@@ -0,0 +1,28 @@
1#ifndef LITMUS_LOCKING_H
2#define LITMUS_LOCKING_H
3
4struct litmus_lock_ops;
5
6/* Generic base struct for LITMUS^RT userspace semaphores.
7 * This structure should be embedded in protocol-specific semaphores.
8 */
9struct litmus_lock {
10 struct litmus_lock_ops *ops;
11 int type;
12};
13
14struct litmus_lock_ops {
15 /* Current task tries to obtain / drop a reference to a lock.
16 * Optional methods, allowed by default. */
17 int (*open)(struct litmus_lock*, void* __user);
18 int (*close)(struct litmus_lock*);
19
20 /* Current tries to lock/unlock this lock (mandatory methods). */
21 int (*lock)(struct litmus_lock*);
22 int (*unlock)(struct litmus_lock*);
23
24 /* The lock is no longer being referenced (mandatory method). */
25 void (*deallocate)(struct litmus_lock*);
26};
27
28#endif
diff --git a/include/litmus/preempt.h b/include/litmus/preempt.h
new file mode 100644
index 000000000000..260c6fe17986
--- /dev/null
+++ b/include/litmus/preempt.h
@@ -0,0 +1,164 @@
1#ifndef LITMUS_PREEMPT_H
2#define LITMUS_PREEMPT_H
3
4#include <linux/types.h>
5#include <linux/cache.h>
6#include <linux/percpu.h>
7#include <asm/atomic.h>
8
9#include <litmus/debug_trace.h>
10
11extern DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, resched_state);
12
13#ifdef CONFIG_DEBUG_KERNEL
14const char* sched_state_name(int s);
15#define TRACE_STATE(fmt, args...) TRACE("SCHED_STATE " fmt, args)
16#else
17#define TRACE_STATE(fmt, args...) /* ignore */
18#endif
19
20#define VERIFY_SCHED_STATE(x) \
21 do { int __s = get_sched_state(); \
22 if ((__s & (x)) == 0) \
23 TRACE_STATE("INVALID s=0x%x (%s) not " \
24 "in 0x%x (%s) [%s]\n", \
25 __s, sched_state_name(__s), \
26 (x), #x, __FUNCTION__); \
27 } while (0);
28
29#define TRACE_SCHED_STATE_CHANGE(x, y, cpu) \
30 TRACE_STATE("[P%d] 0x%x (%s) -> 0x%x (%s)\n", \
31 cpu, (x), sched_state_name(x), \
32 (y), sched_state_name(y))
33
34
35typedef enum scheduling_state {
36 TASK_SCHEDULED = (1 << 0), /* The currently scheduled task is the one that
37 * should be scheduled, and the processor does not
38 * plan to invoke schedule(). */
39 SHOULD_SCHEDULE = (1 << 1), /* A remote processor has determined that the
40 * processor should reschedule, but this has not
41 * been communicated yet (IPI still pending). */
42 WILL_SCHEDULE = (1 << 2), /* The processor has noticed that it has to
43 * reschedule and will do so shortly. */
44 TASK_PICKED = (1 << 3), /* The processor is currently executing schedule(),
45 * has selected a new task to schedule, but has not
46 * yet performed the actual context switch. */
47 PICKED_WRONG_TASK = (1 << 4), /* The processor has not yet performed the context
48 * switch, but a remote processor has already
49 * determined that a higher-priority task became
50 * eligible after the task was picked. */
51} sched_state_t;
52
53static inline sched_state_t get_sched_state_on(int cpu)
54{
55 return atomic_read(&per_cpu(resched_state, cpu));
56}
57
58static inline sched_state_t get_sched_state(void)
59{
60 return atomic_read(&__get_cpu_var(resched_state));
61}
62
63static inline int is_in_sched_state(int possible_states)
64{
65 return get_sched_state() & possible_states;
66}
67
68static inline int cpu_is_in_sched_state(int cpu, int possible_states)
69{
70 return get_sched_state_on(cpu) & possible_states;
71}
72
73static inline void set_sched_state(sched_state_t s)
74{
75 TRACE_SCHED_STATE_CHANGE(get_sched_state(), s, smp_processor_id());
76 atomic_set(&__get_cpu_var(resched_state), s);
77}
78
79static inline int sched_state_transition(sched_state_t from, sched_state_t to)
80{
81 sched_state_t old_state;
82
83 old_state = atomic_cmpxchg(&__get_cpu_var(resched_state), from, to);
84 if (old_state == from) {
85 TRACE_SCHED_STATE_CHANGE(from, to, smp_processor_id());
86 return 1;
87 } else
88 return 0;
89}
90
91static inline int sched_state_transition_on(int cpu,
92 sched_state_t from,
93 sched_state_t to)
94{
95 sched_state_t old_state;
96
97 old_state = atomic_cmpxchg(&per_cpu(resched_state, cpu), from, to);
98 if (old_state == from) {
99 TRACE_SCHED_STATE_CHANGE(from, to, cpu);
100 return 1;
101 } else
102 return 0;
103}
104
105/* Plugins must call this function after they have decided which job to
106 * schedule next. IMPORTANT: this function must be called while still holding
107 * the lock that is used to serialize scheduling decisions.
108 *
109 * (Ideally, we would like to use runqueue locks for this purpose, but that
110 * would lead to deadlocks with the migration code.)
111 */
112static inline void sched_state_task_picked(void)
113{
114 VERIFY_SCHED_STATE(WILL_SCHEDULE);
115
116 /* WILL_SCHEDULE has only a local tansition => simple store is ok */
117 set_sched_state(TASK_PICKED);
118}
119
120static inline void sched_state_entered_schedule(void)
121{
122 /* Update state for the case that we entered schedule() not due to
123 * set_tsk_need_resched() */
124 set_sched_state(WILL_SCHEDULE);
125}
126
127/* Called by schedule() to check if the scheduling decision is still valid
128 * after a context switch. Returns 1 if the CPU needs to reschdule. */
129static inline int sched_state_validate_switch(void)
130{
131 int left_state_ok = 0;
132
133 VERIFY_SCHED_STATE(PICKED_WRONG_TASK | TASK_PICKED);
134
135 if (is_in_sched_state(TASK_PICKED)) {
136 /* Might be good; let's try to transition out of this
137 * state. This must be done atomically since remote processors
138 * may try to change the state, too. */
139 left_state_ok = sched_state_transition(TASK_PICKED, TASK_SCHEDULED);
140 }
141
142 if (!left_state_ok) {
143 /* We raced with a higher-priority task arrival => not
144 * valid. The CPU needs to reschedule. */
145 set_sched_state(WILL_SCHEDULE);
146 return 1;
147 } else
148 return 0;
149}
150
151/* State transition events. See litmus/preempt.c for details. */
152void sched_state_will_schedule(struct task_struct* tsk);
153void sched_state_ipi(void);
154/* Cause a CPU (remote or local) to reschedule. */
155void litmus_reschedule(int cpu);
156void litmus_reschedule_local(void);
157
158#ifdef CONFIG_DEBUG_KERNEL
159void sched_state_plugin_check(void);
160#else
161#define sched_state_plugin_check() /* no check */
162#endif
163
164#endif
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h
new file mode 100644
index 000000000000..ac249292e866
--- /dev/null
+++ b/include/litmus/rt_domain.h
@@ -0,0 +1,182 @@
1/* CLEANUP: Add comments and make it less messy.
2 *
3 */
4
5#ifndef __UNC_RT_DOMAIN_H__
6#define __UNC_RT_DOMAIN_H__
7
8#include <litmus/bheap.h>
9
10#define RELEASE_QUEUE_SLOTS 127 /* prime */
11
12struct _rt_domain;
13
14typedef int (*check_resched_needed_t)(struct _rt_domain *rt);
15typedef void (*release_jobs_t)(struct _rt_domain *rt, struct bheap* tasks);
16
17struct release_queue {
18 /* each slot maintains a list of release heaps sorted
19 * by release time */
20 struct list_head slot[RELEASE_QUEUE_SLOTS];
21};
22
23typedef struct _rt_domain {
24 /* runnable rt tasks are in here */
25 raw_spinlock_t ready_lock;
26 struct bheap ready_queue;
27
28 /* real-time tasks waiting for release are in here */
29 raw_spinlock_t release_lock;
30 struct release_queue release_queue;
31
32#ifdef CONFIG_RELEASE_MASTER
33 int release_master;
34#endif
35
36 /* for moving tasks to the release queue */
37 raw_spinlock_t tobe_lock;
38 struct list_head tobe_released;
39
40 /* how do we check if we need to kick another CPU? */
41 check_resched_needed_t check_resched;
42
43 /* how do we release jobs? */
44 release_jobs_t release_jobs;
45
46 /* how are tasks ordered in the ready queue? */
47 bheap_prio_t order;
48} rt_domain_t;
49
50struct release_heap {
51 /* list_head for per-time-slot list */
52 struct list_head list;
53 lt_t release_time;
54 /* all tasks to be released at release_time */
55 struct bheap heap;
56 /* used to trigger the release */
57 struct hrtimer timer;
58
59#ifdef CONFIG_RELEASE_MASTER
60 /* used to delegate releases */
61 struct hrtimer_start_on_info info;
62#endif
63 /* required for the timer callback */
64 rt_domain_t* dom;
65};
66
67
68static inline struct task_struct* __next_ready(rt_domain_t* rt)
69{
70 struct bheap_node *hn = bheap_peek(rt->order, &rt->ready_queue);
71 if (hn)
72 return bheap2task(hn);
73 else
74 return NULL;
75}
76
77void rt_domain_init(rt_domain_t *rt, bheap_prio_t order,
78 check_resched_needed_t check,
79 release_jobs_t relase);
80
81void __add_ready(rt_domain_t* rt, struct task_struct *new);
82void __merge_ready(rt_domain_t* rt, struct bheap *tasks);
83void __add_release(rt_domain_t* rt, struct task_struct *task);
84
85static inline struct task_struct* __take_ready(rt_domain_t* rt)
86{
87 struct bheap_node* hn = bheap_take(rt->order, &rt->ready_queue);
88 if (hn)
89 return bheap2task(hn);
90 else
91 return NULL;
92}
93
94static inline struct task_struct* __peek_ready(rt_domain_t* rt)
95{
96 struct bheap_node* hn = bheap_peek(rt->order, &rt->ready_queue);
97 if (hn)
98 return bheap2task(hn);
99 else
100 return NULL;
101}
102
103static inline int is_queued(struct task_struct *t)
104{
105 BUG_ON(!tsk_rt(t)->heap_node);
106 return bheap_node_in_heap(tsk_rt(t)->heap_node);
107}
108
109static inline void remove(rt_domain_t* rt, struct task_struct *t)
110{
111 bheap_delete(rt->order, &rt->ready_queue, tsk_rt(t)->heap_node);
112}
113
114static inline void add_ready(rt_domain_t* rt, struct task_struct *new)
115{
116 unsigned long flags;
117 /* first we need the write lock for rt_ready_queue */
118 raw_spin_lock_irqsave(&rt->ready_lock, flags);
119 __add_ready(rt, new);
120 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
121}
122
123static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks)
124{
125 unsigned long flags;
126 raw_spin_lock_irqsave(&rt->ready_lock, flags);
127 __merge_ready(rt, tasks);
128 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
129}
130
131static inline struct task_struct* take_ready(rt_domain_t* rt)
132{
133 unsigned long flags;
134 struct task_struct* ret;
135 /* first we need the write lock for rt_ready_queue */
136 raw_spin_lock_irqsave(&rt->ready_lock, flags);
137 ret = __take_ready(rt);
138 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
139 return ret;
140}
141
142
143static inline void add_release(rt_domain_t* rt, struct task_struct *task)
144{
145 unsigned long flags;
146 raw_spin_lock_irqsave(&rt->tobe_lock, flags);
147 __add_release(rt, task);
148 raw_spin_unlock_irqrestore(&rt->tobe_lock, flags);
149}
150
151#ifdef CONFIG_RELEASE_MASTER
152void __add_release_on(rt_domain_t* rt, struct task_struct *task,
153 int target_cpu);
154
155static inline void add_release_on(rt_domain_t* rt,
156 struct task_struct *task,
157 int target_cpu)
158{
159 unsigned long flags;
160 raw_spin_lock_irqsave(&rt->tobe_lock, flags);
161 __add_release_on(rt, task, target_cpu);
162 raw_spin_unlock_irqrestore(&rt->tobe_lock, flags);
163}
164#endif
165
166static inline int __jobs_pending(rt_domain_t* rt)
167{
168 return !bheap_empty(&rt->ready_queue);
169}
170
171static inline int jobs_pending(rt_domain_t* rt)
172{
173 unsigned long flags;
174 int ret;
175 /* first we need the write lock for rt_ready_queue */
176 raw_spin_lock_irqsave(&rt->ready_lock, flags);
177 ret = !bheap_empty(&rt->ready_queue);
178 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
179 return ret;
180}
181
182#endif
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
new file mode 100644
index 000000000000..5de422c742f6
--- /dev/null
+++ b/include/litmus/rt_param.h
@@ -0,0 +1,203 @@
1/*
2 * Definition of the scheduler plugin interface.
3 *
4 */
5#ifndef _LINUX_RT_PARAM_H_
6#define _LINUX_RT_PARAM_H_
7
8/* Litmus time type. */
9typedef unsigned long long lt_t;
10
11static inline int lt_after(lt_t a, lt_t b)
12{
13 return ((long long) b) - ((long long) a) < 0;
14}
15#define lt_before(a, b) lt_after(b, a)
16
17static inline int lt_after_eq(lt_t a, lt_t b)
18{
19 return ((long long) a) - ((long long) b) >= 0;
20}
21#define lt_before_eq(a, b) lt_after_eq(b, a)
22
23/* different types of clients */
24typedef enum {
25 RT_CLASS_HARD,
26 RT_CLASS_SOFT,
27 RT_CLASS_BEST_EFFORT
28} task_class_t;
29
30typedef enum {
31 NO_ENFORCEMENT, /* job may overrun unhindered */
32 QUANTUM_ENFORCEMENT, /* budgets are only checked on quantum boundaries */
33 PRECISE_ENFORCEMENT /* NOT IMPLEMENTED - enforced with hrtimers */
34} budget_policy_t;
35
36struct rt_task {
37 lt_t exec_cost;
38 lt_t period;
39 lt_t phase;
40 unsigned int cpu;
41 task_class_t cls;
42 budget_policy_t budget_policy; /* ignored by pfair */
43};
44
45/* The definition of the data that is shared between the kernel and real-time
46 * tasks via a shared page (see litmus/ctrldev.c).
47 *
48 * WARNING: User space can write to this, so don't trust
49 * the correctness of the fields!
50 *
51 * This servees two purposes: to enable efficient signaling
52 * of non-preemptive sections (user->kernel) and
53 * delayed preemptions (kernel->user), and to export
54 * some real-time relevant statistics such as preemption and
55 * migration data to user space. We can't use a device to export
56 * statistics because we want to avoid system call overhead when
57 * determining preemption/migration overheads).
58 */
59struct control_page {
60 /* Is the task currently in a non-preemptive section? */
61 int np_flag;
62 /* Should the task call into the kernel when it leaves
63 * its non-preemptive section? */
64 int delayed_preemption;
65
66 /* to be extended */
67};
68
69/* don't export internal data structures to user space (liblitmus) */
70#ifdef __KERNEL__
71
72struct _rt_domain;
73struct bheap_node;
74struct release_heap;
75
76struct rt_job {
77 /* Time instant the the job was or will be released. */
78 lt_t release;
79 /* What is the current deadline? */
80 lt_t deadline;
81
82 /* How much service has this job received so far? */
83 lt_t exec_time;
84
85 /* Which job is this. This is used to let user space
86 * specify which job to wait for, which is important if jobs
87 * overrun. If we just call sys_sleep_next_period() then we
88 * will unintentionally miss jobs after an overrun.
89 *
90 * Increase this sequence number when a job is released.
91 */
92 unsigned int job_no;
93};
94
95struct pfair_param;
96
97/* RT task parameters for scheduling extensions
98 * These parameters are inherited during clone and therefore must
99 * be explicitly set up before the task set is launched.
100 */
101struct rt_param {
102 /* is the task sleeping? */
103 unsigned int flags:8;
104
105 /* do we need to check for srp blocking? */
106 unsigned int srp_non_recurse:1;
107
108 /* is the task present? (true if it can be scheduled) */
109 unsigned int present:1;
110
111#ifdef CONFIG_LITMUS_LOCKING
112 /* Is the task being priority-boosted by a locking protocol? */
113 unsigned int priority_boosted:1;
114 /* If so, when did this start? */
115 lt_t boost_start_time;
116#endif
117
118 /* user controlled parameters */
119 struct rt_task task_params;
120
121 /* timing parameters */
122 struct rt_job job_params;
123
124 /* task representing the current "inherited" task
125 * priority, assigned by inherit_priority and
126 * return priority in the scheduler plugins.
127 * could point to self if PI does not result in
128 * an increased task priority.
129 */
130 struct task_struct* inh_task;
131
132#ifdef CONFIG_NP_SECTION
133 /* For the FMLP under PSN-EDF, it is required to make the task
134 * non-preemptive from kernel space. In order not to interfere with
135 * user space, this counter indicates the kernel space np setting.
136 * kernel_np > 0 => task is non-preemptive
137 */
138 unsigned int kernel_np;
139#endif
140
141 /* This field can be used by plugins to store where the task
142 * is currently scheduled. It is the responsibility of the
143 * plugin to avoid race conditions.
144 *
145 * This used by GSN-EDF and PFAIR.
146 */
147 volatile int scheduled_on;
148
149 /* Is the stack of the task currently in use? This is updated by
150 * the LITMUS core.
151 *
152 * Be careful to avoid deadlocks!
153 */
154 volatile int stack_in_use;
155
156 /* This field can be used by plugins to store where the task
157 * is currently linked. It is the responsibility of the plugin
158 * to avoid race conditions.
159 *
160 * Used by GSN-EDF.
161 */
162 volatile int linked_on;
163
164 /* PFAIR/PD^2 state. Allocated on demand. */
165 struct pfair_param* pfair;
166
167 /* Fields saved before BE->RT transition.
168 */
169 int old_policy;
170 int old_prio;
171
172 /* ready queue for this task */
173 struct _rt_domain* domain;
174
175 /* heap element for this task
176 *
177 * Warning: Don't statically allocate this node. The heap
178 * implementation swaps these between tasks, thus after
179 * dequeuing from a heap you may end up with a different node
180 * then the one you had when enqueuing the task. For the same
181 * reason, don't obtain and store references to this node
182 * other than this pointer (which is updated by the heap
183 * implementation).
184 */
185 struct bheap_node* heap_node;
186 struct release_heap* rel_heap;
187
188 /* Used by rt_domain to queue task in release list.
189 */
190 struct list_head list;
191
192 /* Pointer to the page shared between userspace and kernel. */
193 struct control_page * ctrl_page;
194};
195
196/* Possible RT flags */
197#define RT_F_RUNNING 0x00000000
198#define RT_F_SLEEP 0x00000001
199#define RT_F_EXIT_SEM 0x00000008
200
201#endif
202
203#endif
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
new file mode 100644
index 000000000000..6e7cabdddae8
--- /dev/null
+++ b/include/litmus/sched_plugin.h
@@ -0,0 +1,111 @@
1/*
2 * Definition of the scheduler plugin interface.
3 *
4 */
5#ifndef _LINUX_SCHED_PLUGIN_H_
6#define _LINUX_SCHED_PLUGIN_H_
7
8#include <linux/sched.h>
9
10#ifdef CONFIG_LITMUS_LOCKING
11#include <litmus/locking.h>
12#endif
13
14/************************ setup/tear down ********************/
15
16typedef long (*activate_plugin_t) (void);
17typedef long (*deactivate_plugin_t) (void);
18
19
20
21/********************* scheduler invocation ******************/
22
23/* Plugin-specific realtime tick handler */
24typedef void (*scheduler_tick_t) (struct task_struct *cur);
25/* Novell make sched decision function */
26typedef struct task_struct* (*schedule_t)(struct task_struct * prev);
27/* Clean up after the task switch has occured.
28 * This function is called after every (even non-rt) task switch.
29 */
30typedef void (*finish_switch_t)(struct task_struct *prev);
31
32
33/********************* task state changes ********************/
34
35/* Called to setup a new real-time task.
36 * Release the first job, enqueue, etc.
37 * Task may already be running.
38 */
39typedef void (*task_new_t) (struct task_struct *task,
40 int on_rq,
41 int running);
42
43/* Called to re-introduce a task after blocking.
44 * Can potentially be called multiple times.
45 */
46typedef void (*task_wake_up_t) (struct task_struct *task);
47/* called to notify the plugin of a blocking real-time task
48 * it will only be called for real-time tasks and before schedule is called */
49typedef void (*task_block_t) (struct task_struct *task);
50/* Called when a real-time task exits or changes to a different scheduling
51 * class.
52 * Free any allocated resources
53 */
54typedef void (*task_exit_t) (struct task_struct *);
55
56/* Called when the current task attempts to create a new lock of a given
57 * protocol type. */
58typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type,
59 void* __user config);
60
61
62/********************* sys call backends ********************/
63/* This function causes the caller to sleep until the next release */
64typedef long (*complete_job_t) (void);
65
66typedef long (*admit_task_t)(struct task_struct* tsk);
67
68typedef void (*release_at_t)(struct task_struct *t, lt_t start);
69
70struct sched_plugin {
71 struct list_head list;
72 /* basic info */
73 char *plugin_name;
74
75 /* setup */
76 activate_plugin_t activate_plugin;
77 deactivate_plugin_t deactivate_plugin;
78
79 /* scheduler invocation */
80 scheduler_tick_t tick;
81 schedule_t schedule;
82 finish_switch_t finish_switch;
83
84 /* syscall backend */
85 complete_job_t complete_job;
86 release_at_t release_at;
87
88 /* task state changes */
89 admit_task_t admit_task;
90
91 task_new_t task_new;
92 task_wake_up_t task_wake_up;
93 task_block_t task_block;
94 task_exit_t task_exit;
95
96#ifdef CONFIG_LITMUS_LOCKING
97 /* locking protocols */
98 allocate_lock_t allocate_lock;
99#endif
100} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
101
102
103extern struct sched_plugin *litmus;
104
105int register_sched_plugin(struct sched_plugin* plugin);
106struct sched_plugin* find_sched_plugin(const char* name);
107int print_sched_plugins(char* buf, int max);
108
109extern struct sched_plugin linux_sched_plugin;
110
111#endif
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h
new file mode 100644
index 000000000000..7ca34cb13881
--- /dev/null
+++ b/include/litmus/sched_trace.h
@@ -0,0 +1,200 @@
1/*
2 * sched_trace.h -- record scheduler events to a byte stream for offline analysis.
3 */
4#ifndef _LINUX_SCHED_TRACE_H_
5#define _LINUX_SCHED_TRACE_H_
6
7/* all times in nanoseconds */
8
9struct st_trace_header {
10 u8 type; /* Of what type is this record? */
11 u8 cpu; /* On which CPU was it recorded? */
12 u16 pid; /* PID of the task. */
13 u32 job; /* The job sequence number. */
14};
15
16#define ST_NAME_LEN 16
17struct st_name_data {
18 char cmd[ST_NAME_LEN];/* The name of the executable of this process. */
19};
20
21struct st_param_data { /* regular params */
22 u32 wcet;
23 u32 period;
24 u32 phase;
25 u8 partition;
26 u8 class;
27 u8 __unused[2];
28};
29
30struct st_release_data { /* A job is was/is going to be released. */
31 u64 release; /* What's the release time? */
32 u64 deadline; /* By when must it finish? */
33};
34
35struct st_assigned_data { /* A job was asigned to a CPU. */
36 u64 when;
37 u8 target; /* Where should it execute? */
38 u8 __unused[7];
39};
40
41struct st_switch_to_data { /* A process was switched to on a given CPU. */
42 u64 when; /* When did this occur? */
43 u32 exec_time; /* Time the current job has executed. */
44 u8 __unused[4];
45
46};
47
48struct st_switch_away_data { /* A process was switched away from on a given CPU. */
49 u64 when;
50 u64 exec_time;
51};
52
53struct st_completion_data { /* A job completed. */
54 u64 when;
55 u8 forced:1; /* Set to 1 if job overran and kernel advanced to the
56 * next task automatically; set to 0 otherwise.
57 */
58 u8 __uflags:7;
59 u8 __unused[7];
60};
61
62struct st_block_data { /* A task blocks. */
63 u64 when;
64 u64 __unused;
65};
66
67struct st_resume_data { /* A task resumes. */
68 u64 when;
69 u64 __unused;
70};
71
72struct st_action_data {
73 u64 when;
74 u8 action;
75 u8 __unused[7];
76};
77
78struct st_sys_release_data {
79 u64 when;
80 u64 release;
81};
82
83#define DATA(x) struct st_ ## x ## _data x;
84
85typedef enum {
86 ST_NAME = 1, /* Start at one, so that we can spot
87 * uninitialized records. */
88 ST_PARAM,
89 ST_RELEASE,
90 ST_ASSIGNED,
91 ST_SWITCH_TO,
92 ST_SWITCH_AWAY,
93 ST_COMPLETION,
94 ST_BLOCK,
95 ST_RESUME,
96 ST_ACTION,
97 ST_SYS_RELEASE
98} st_event_record_type_t;
99
100struct st_event_record {
101 struct st_trace_header hdr;
102 union {
103 u64 raw[2];
104
105 DATA(name);
106 DATA(param);
107 DATA(release);
108 DATA(assigned);
109 DATA(switch_to);
110 DATA(switch_away);
111 DATA(completion);
112 DATA(block);
113 DATA(resume);
114 DATA(action);
115 DATA(sys_release);
116 } data;
117};
118
119#undef DATA
120
121#ifdef __KERNEL__
122
123#include <linux/sched.h>
124#include <litmus/feather_trace.h>
125
126#ifdef CONFIG_SCHED_TASK_TRACE
127
128#define SCHED_TRACE(id, callback, task) \
129 ft_event1(id, callback, task)
130#define SCHED_TRACE2(id, callback, task, xtra) \
131 ft_event2(id, callback, task, xtra)
132
133/* provide prototypes; needed on sparc64 */
134#ifndef NO_TASK_TRACE_DECLS
135feather_callback void do_sched_trace_task_name(unsigned long id,
136 struct task_struct* task);
137feather_callback void do_sched_trace_task_param(unsigned long id,
138 struct task_struct* task);
139feather_callback void do_sched_trace_task_release(unsigned long id,
140 struct task_struct* task);
141feather_callback void do_sched_trace_task_switch_to(unsigned long id,
142 struct task_struct* task);
143feather_callback void do_sched_trace_task_switch_away(unsigned long id,
144 struct task_struct* task);
145feather_callback void do_sched_trace_task_completion(unsigned long id,
146 struct task_struct* task,
147 unsigned long forced);
148feather_callback void do_sched_trace_task_block(unsigned long id,
149 struct task_struct* task);
150feather_callback void do_sched_trace_task_resume(unsigned long id,
151 struct task_struct* task);
152feather_callback void do_sched_trace_action(unsigned long id,
153 struct task_struct* task,
154 unsigned long action);
155feather_callback void do_sched_trace_sys_release(unsigned long id,
156 lt_t* start);
157
158#endif
159
160#else
161
162#define SCHED_TRACE(id, callback, task) /* no tracing */
163#define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */
164
165#endif
166
167
168#define SCHED_TRACE_BASE_ID 500
169
170
171#define sched_trace_task_name(t) \
172 SCHED_TRACE(SCHED_TRACE_BASE_ID + 1, do_sched_trace_task_name, t)
173#define sched_trace_task_param(t) \
174 SCHED_TRACE(SCHED_TRACE_BASE_ID + 2, do_sched_trace_task_param, t)
175#define sched_trace_task_release(t) \
176 SCHED_TRACE(SCHED_TRACE_BASE_ID + 3, do_sched_trace_task_release, t)
177#define sched_trace_task_switch_to(t) \
178 SCHED_TRACE(SCHED_TRACE_BASE_ID + 4, do_sched_trace_task_switch_to, t)
179#define sched_trace_task_switch_away(t) \
180 SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, do_sched_trace_task_switch_away, t)
181#define sched_trace_task_completion(t, forced) \
182 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, do_sched_trace_task_completion, t, \
183 (unsigned long) forced)
184#define sched_trace_task_block(t) \
185 SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, do_sched_trace_task_block, t)
186#define sched_trace_task_resume(t) \
187 SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, do_sched_trace_task_resume, t)
188#define sched_trace_action(t, action) \
189 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 9, do_sched_trace_action, t, \
190 (unsigned long) action);
191/* when is a pointer, it does not need an explicit cast to unsigned long */
192#define sched_trace_sys_release(when) \
193 SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, do_sched_trace_sys_release, when)
194
195
196#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */
197
198#endif /* __KERNEL__ */
199
200#endif
diff --git a/include/litmus/srp.h b/include/litmus/srp.h
new file mode 100644
index 000000000000..c9a4552b2bf3
--- /dev/null
+++ b/include/litmus/srp.h
@@ -0,0 +1,28 @@
1#ifndef LITMUS_SRP_H
2#define LITMUS_SRP_H
3
4struct srp_semaphore;
5
6struct srp_priority {
7 struct list_head list;
8 unsigned int priority;
9 pid_t pid;
10};
11#define list2prio(l) list_entry(l, struct srp_priority, list)
12
13/* struct for uniprocessor SRP "semaphore" */
14struct srp_semaphore {
15 struct litmus_lock litmus_lock;
16 struct srp_priority ceiling;
17 struct task_struct* owner;
18 int cpu; /* cpu associated with this "semaphore" and resource */
19};
20
21/* map a task to its SRP preemption level priority */
22typedef unsigned int (*srp_prioritization_t)(struct task_struct* t);
23/* Must be updated by each plugin that uses SRP.*/
24extern srp_prioritization_t get_srp_prio;
25
26struct srp_semaphore* allocate_srp_semaphore(void);
27
28#endif
diff --git a/include/litmus/trace.h b/include/litmus/trace.h
new file mode 100644
index 000000000000..05f487263f28
--- /dev/null
+++ b/include/litmus/trace.h
@@ -0,0 +1,103 @@
1#ifndef _SYS_TRACE_H_
2#define _SYS_TRACE_H_
3
4#ifdef CONFIG_SCHED_OVERHEAD_TRACE
5
6#include <litmus/feather_trace.h>
7#include <litmus/feather_buffer.h>
8
9
10/*********************** TIMESTAMPS ************************/
11
12enum task_type_marker {
13 TSK_BE,
14 TSK_RT,
15 TSK_UNKNOWN
16};
17
18struct timestamp {
19 uint64_t timestamp;
20 uint32_t seq_no;
21 uint8_t cpu;
22 uint8_t event;
23 uint8_t task_type;
24};
25
26/* tracing callbacks */
27feather_callback void save_timestamp(unsigned long event);
28feather_callback void save_timestamp_def(unsigned long event, unsigned long type);
29feather_callback void save_timestamp_task(unsigned long event, unsigned long t_ptr);
30feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu);
31
32
33#define TIMESTAMP(id) ft_event0(id, save_timestamp)
34
35#define DTIMESTAMP(id, def) ft_event1(id, save_timestamp_def, (unsigned long) def)
36
37#define TTIMESTAMP(id, task) \
38 ft_event1(id, save_timestamp_task, (unsigned long) task)
39
40#define CTIMESTAMP(id, cpu) \
41 ft_event1(id, save_timestamp_cpu, (unsigned long) cpu)
42
43#else /* !CONFIG_SCHED_OVERHEAD_TRACE */
44
45#define TIMESTAMP(id) /* no tracing */
46
47#define DTIMESTAMP(id, def) /* no tracing */
48
49#define TTIMESTAMP(id, task) /* no tracing */
50
51#define CTIMESTAMP(id, cpu) /* no tracing */
52
53#endif
54
55
56/* Convention for timestamps
57 * =========================
58 *
59 * In order to process the trace files with a common tool, we use the following
60 * convention to measure execution times: The end time id of a code segment is
61 * always the next number after the start time event id.
62 */
63
64#define TS_SCHED_START DTIMESTAMP(100, TSK_UNKNOWN) /* we only
65 * care
66 * about
67 * next */
68#define TS_SCHED_END(t) TTIMESTAMP(101, t)
69#define TS_SCHED2_START(t) TTIMESTAMP(102, t)
70#define TS_SCHED2_END(t) TTIMESTAMP(103, t)
71
72#define TS_CXS_START(t) TTIMESTAMP(104, t)
73#define TS_CXS_END(t) TTIMESTAMP(105, t)
74
75#define TS_RELEASE_START DTIMESTAMP(106, TSK_RT)
76#define TS_RELEASE_END DTIMESTAMP(107, TSK_RT)
77
78#define TS_TICK_START(t) TTIMESTAMP(110, t)
79#define TS_TICK_END(t) TTIMESTAMP(111, t)
80
81
82#define TS_PLUGIN_SCHED_START /* TIMESTAMP(120) */ /* currently unused */
83#define TS_PLUGIN_SCHED_END /* TIMESTAMP(121) */
84
85#define TS_PLUGIN_TICK_START /* TIMESTAMP(130) */
86#define TS_PLUGIN_TICK_END /* TIMESTAMP(131) */
87
88#define TS_ENTER_NP_START TIMESTAMP(140)
89#define TS_ENTER_NP_END TIMESTAMP(141)
90
91#define TS_EXIT_NP_START TIMESTAMP(150)
92#define TS_EXIT_NP_END TIMESTAMP(151)
93
94#define TS_LOCK_START TIMESTAMP(170)
95#define TS_LOCK_END TIMESTAMP(171)
96#define TS_UNLOCK_START TIMESTAMP(172)
97#define TS_UNLOCK_END TIMESTAMP(173)
98
99#define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c)
100#define TS_SEND_RESCHED_END DTIMESTAMP(191, TSK_UNKNOWN)
101
102
103#endif /* !_SYS_TRACE_H_ */
diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h
new file mode 100644
index 000000000000..94264c27d9ac
--- /dev/null
+++ b/include/litmus/unistd_32.h
@@ -0,0 +1,21 @@
1/*
2 * included from arch/x86/include/asm/unistd_32.h
3 *
4 * LITMUS^RT syscalls with "relative" numbers
5 */
6#define __LSC(x) (__NR_LITMUS + x)
7
8#define __NR_set_rt_task_param __LSC(0)
9#define __NR_get_rt_task_param __LSC(1)
10#define __NR_complete_job __LSC(2)
11#define __NR_od_open __LSC(3)
12#define __NR_od_close __LSC(4)
13#define __NR_litmus_lock __LSC(5)
14#define __NR_litmus_unlock __LSC(6)
15#define __NR_query_job_no __LSC(7)
16#define __NR_wait_for_job_release __LSC(8)
17#define __NR_wait_for_ts_release __LSC(9)
18#define __NR_release_ts __LSC(10)
19#define __NR_null_call __LSC(11)
20
21#define NR_litmus_syscalls 12
diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h
new file mode 100644
index 000000000000..d5ced0d2642c
--- /dev/null
+++ b/include/litmus/unistd_64.h
@@ -0,0 +1,33 @@
1/*
2 * included from arch/x86/include/asm/unistd_64.h
3 *
4 * LITMUS^RT syscalls with "relative" numbers
5 */
6#define __LSC(x) (__NR_LITMUS + x)
7
8#define __NR_set_rt_task_param __LSC(0)
9__SYSCALL(__NR_set_rt_task_param, sys_set_rt_task_param)
10#define __NR_get_rt_task_param __LSC(1)
11__SYSCALL(__NR_get_rt_task_param, sys_get_rt_task_param)
12#define __NR_complete_job __LSC(2)
13__SYSCALL(__NR_complete_job, sys_complete_job)
14#define __NR_od_open __LSC(3)
15__SYSCALL(__NR_od_open, sys_od_open)
16#define __NR_od_close __LSC(4)
17__SYSCALL(__NR_od_close, sys_od_close)
18#define __NR_litmus_lock __LSC(5)
19__SYSCALL(__NR_litmus_lock, sys_litmus_lock)
20#define __NR_litmus_unlock __LSC(6)
21__SYSCALL(__NR_litmus_unlock, sys_litmus_unlock)
22#define __NR_query_job_no __LSC(7)
23__SYSCALL(__NR_query_job_no, sys_query_job_no)
24#define __NR_wait_for_job_release __LSC(8)
25__SYSCALL(__NR_wait_for_job_release, sys_wait_for_job_release)
26#define __NR_wait_for_ts_release __LSC(9)
27__SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release)
28#define __NR_release_ts __LSC(10)
29__SYSCALL(__NR_release_ts, sys_release_ts)
30#define __NR_null_call __LSC(11)
31__SYSCALL(__NR_null_call, sys_null_call)
32
33#define NR_litmus_syscalls 12