aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJeremy Erickson <jerickso@cs.unc.edu>2012-08-30 21:01:47 -0400
committerJeremy Erickson <jerickso@cs.unc.edu>2012-08-30 21:01:47 -0400
commitb1e1fea67bca3796d5f9133a92c300ec4fa93a4f (patch)
tree5cc1336e1fe1d6f93b1067e73e43381dd20db690 /include
parentf6f94e2ab1b33f0082ac22d71f66385a60d8157f (diff)
Bjoern's Dissertation Code with Priority Donationwip-splitting-omlp-jerickso
Diffstat (limited to 'include')
-rw-r--r--include/linux/completion.h1
-rw-r--r--include/linux/fs.h21
-rw-r--r--include/linux/hrtimer.h32
-rw-r--r--include/linux/sched.h19
-rw-r--r--include/linux/smp.h5
-rw-r--r--include/linux/tick.h5
-rw-r--r--include/litmus/bheap.h77
-rw-r--r--include/litmus/budget.h8
-rw-r--r--include/litmus/clustered.h44
-rw-r--r--include/litmus/debug_trace.h37
-rw-r--r--include/litmus/edf_common.h33
-rw-r--r--include/litmus/fdso.h77
-rw-r--r--include/litmus/feather_buffer.h94
-rw-r--r--include/litmus/feather_trace.h65
-rw-r--r--include/litmus/fp_common.h105
-rw-r--r--include/litmus/ftdev.h55
-rw-r--r--include/litmus/jobs.h9
-rw-r--r--include/litmus/litmus.h292
-rw-r--r--include/litmus/litmus_proc.h25
-rw-r--r--include/litmus/locking.h28
-rw-r--r--include/litmus/preempt.h165
-rw-r--r--include/litmus/rt_domain.h182
-rw-r--r--include/litmus/rt_param.h228
-rw-r--r--include/litmus/sched_plugin.h117
-rw-r--r--include/litmus/sched_plugin.h.rej22
-rw-r--r--include/litmus/sched_trace.h200
-rw-r--r--include/litmus/srp.h28
-rw-r--r--include/litmus/trace.h129
-rw-r--r--include/litmus/unistd_32.h21
-rw-r--r--include/litmus/unistd_64.h33
-rw-r--r--include/litmus/wait.h57
31 files changed, 2202 insertions, 12 deletions
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 51e3145196f6..c63950e8a863 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -90,6 +90,7 @@ extern bool completion_done(struct completion *x);
90 90
91extern void complete(struct completion *); 91extern void complete(struct completion *);
92extern void complete_all(struct completion *); 92extern void complete_all(struct completion *);
93extern void complete_n(struct completion *, int n);
93 94
94/** 95/**
95 * INIT_COMPLETION: - reinitialize a completion structure 96 * INIT_COMPLETION: - reinitialize a completion structure
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 63d069bd80b7..29a672458d27 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -16,8 +16,8 @@
16 * nr_file rlimit, so it's safe to set up a ridiculously high absolute 16 * nr_file rlimit, so it's safe to set up a ridiculously high absolute
17 * upper limit on files-per-process. 17 * upper limit on files-per-process.
18 * 18 *
19 * Some programs (notably those using select()) may have to be 19 * Some programs (notably those using select()) may have to be
20 * recompiled to take full advantage of the new limits.. 20 * recompiled to take full advantage of the new limits..
21 */ 21 */
22 22
23/* Fixed constants first: */ 23/* Fixed constants first: */
@@ -172,7 +172,7 @@ struct inodes_stat_t {
172#define SEL_EX 4 172#define SEL_EX 4
173 173
174/* public flags for file_system_type */ 174/* public flags for file_system_type */
175#define FS_REQUIRES_DEV 1 175#define FS_REQUIRES_DEV 1
176#define FS_BINARY_MOUNTDATA 2 176#define FS_BINARY_MOUNTDATA 2
177#define FS_HAS_SUBTYPE 4 177#define FS_HAS_SUBTYPE 4
178#define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */ 178#define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */
@@ -470,7 +470,7 @@ struct iattr {
470 */ 470 */
471#include <linux/quota.h> 471#include <linux/quota.h>
472 472
473/** 473/**
474 * enum positive_aop_returns - aop return codes with specific semantics 474 * enum positive_aop_returns - aop return codes with specific semantics
475 * 475 *
476 * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has 476 * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has
@@ -480,7 +480,7 @@ struct iattr {
480 * be a candidate for writeback again in the near 480 * be a candidate for writeback again in the near
481 * future. Other callers must be careful to unlock 481 * future. Other callers must be careful to unlock
482 * the page if they get this return. Returned by 482 * the page if they get this return. Returned by
483 * writepage(); 483 * writepage();
484 * 484 *
485 * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has 485 * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has
486 * unlocked it and the page might have been truncated. 486 * unlocked it and the page might have been truncated.
@@ -721,6 +721,7 @@ static inline int mapping_writably_mapped(struct address_space *mapping)
721 721
722struct posix_acl; 722struct posix_acl;
723#define ACL_NOT_CACHED ((void *)(-1)) 723#define ACL_NOT_CACHED ((void *)(-1))
724struct inode_obj_id_table;
724 725
725struct inode { 726struct inode {
726 struct hlist_node i_hash; 727 struct hlist_node i_hash;
@@ -784,6 +785,8 @@ struct inode {
784 struct posix_acl *i_acl; 785 struct posix_acl *i_acl;
785 struct posix_acl *i_default_acl; 786 struct posix_acl *i_default_acl;
786#endif 787#endif
788 struct list_head i_obj_list;
789 struct mutex i_obj_mutex;
787 void *i_private; /* fs or device private pointer */ 790 void *i_private; /* fs or device private pointer */
788}; 791};
789 792
@@ -997,10 +1000,10 @@ static inline int file_check_writeable(struct file *filp)
997 1000
998#define MAX_NON_LFS ((1UL<<31) - 1) 1001#define MAX_NON_LFS ((1UL<<31) - 1)
999 1002
1000/* Page cache limit. The filesystems should put that into their s_maxbytes 1003/* Page cache limit. The filesystems should put that into their s_maxbytes
1001 limits, otherwise bad things can happen in VM. */ 1004 limits, otherwise bad things can happen in VM. */
1002#if BITS_PER_LONG==32 1005#if BITS_PER_LONG==32
1003#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 1006#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
1004#elif BITS_PER_LONG==64 1007#elif BITS_PER_LONG==64
1005#define MAX_LFS_FILESIZE 0x7fffffffffffffffUL 1008#define MAX_LFS_FILESIZE 0x7fffffffffffffffUL
1006#endif 1009#endif
@@ -2145,7 +2148,7 @@ extern int may_open(struct path *, int, int);
2145 2148
2146extern int kernel_read(struct file *, loff_t, char *, unsigned long); 2149extern int kernel_read(struct file *, loff_t, char *, unsigned long);
2147extern struct file * open_exec(const char *); 2150extern struct file * open_exec(const char *);
2148 2151
2149/* fs/dcache.c -- generic fs support functions */ 2152/* fs/dcache.c -- generic fs support functions */
2150extern int is_subdir(struct dentry *, struct dentry *); 2153extern int is_subdir(struct dentry *, struct dentry *);
2151extern int path_is_under(struct path *, struct path *); 2154extern int path_is_under(struct path *, struct path *);
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index fd0c1b857d3d..76da541c1f66 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -167,6 +167,7 @@ struct hrtimer_clock_base {
167 * @nr_retries: Total number of hrtimer interrupt retries 167 * @nr_retries: Total number of hrtimer interrupt retries
168 * @nr_hangs: Total number of hrtimer interrupt hangs 168 * @nr_hangs: Total number of hrtimer interrupt hangs
169 * @max_hang_time: Maximum time spent in hrtimer_interrupt 169 * @max_hang_time: Maximum time spent in hrtimer_interrupt
170 * @to_pull: LITMUS^RT list of timers to be pulled on this cpu
170 */ 171 */
171struct hrtimer_cpu_base { 172struct hrtimer_cpu_base {
172 raw_spinlock_t lock; 173 raw_spinlock_t lock;
@@ -180,8 +181,32 @@ struct hrtimer_cpu_base {
180 unsigned long nr_hangs; 181 unsigned long nr_hangs;
181 ktime_t max_hang_time; 182 ktime_t max_hang_time;
182#endif 183#endif
184 struct list_head to_pull;
183}; 185};
184 186
187#ifdef CONFIG_ARCH_HAS_SEND_PULL_TIMERS
188
189#define HRTIMER_START_ON_INACTIVE 0
190#define HRTIMER_START_ON_QUEUED 1
191
192/*
193 * struct hrtimer_start_on_info - save timer info on remote cpu
194 * @list: list of hrtimer_start_on_info on remote cpu (to_pull)
195 * @timer: timer to be triggered on remote cpu
196 * @time: time event
197 * @mode: timer mode
198 * @state: activity flag
199 */
200struct hrtimer_start_on_info {
201 struct list_head list;
202 struct hrtimer *timer;
203 ktime_t time;
204 enum hrtimer_mode mode;
205 atomic_t state;
206};
207
208#endif
209
185static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) 210static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
186{ 211{
187 timer->_expires = time; 212 timer->_expires = time;
@@ -348,6 +373,13 @@ __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
348 unsigned long delta_ns, 373 unsigned long delta_ns,
349 const enum hrtimer_mode mode, int wakeup); 374 const enum hrtimer_mode mode, int wakeup);
350 375
376#ifdef CONFIG_ARCH_HAS_SEND_PULL_TIMERS
377extern void hrtimer_start_on_info_init(struct hrtimer_start_on_info *info);
378extern int hrtimer_start_on(int cpu, struct hrtimer_start_on_info *info,
379 struct hrtimer *timer, ktime_t time,
380 const enum hrtimer_mode mode);
381#endif
382
351extern int hrtimer_cancel(struct hrtimer *timer); 383extern int hrtimer_cancel(struct hrtimer *timer);
352extern int hrtimer_try_to_cancel(struct hrtimer *timer); 384extern int hrtimer_try_to_cancel(struct hrtimer *timer);
353 385
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1e2a6db2d7dd..c9ac4fc837ba 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -38,6 +38,7 @@
38#define SCHED_BATCH 3 38#define SCHED_BATCH 3
39/* SCHED_ISO: reserved but not implemented yet */ 39/* SCHED_ISO: reserved but not implemented yet */
40#define SCHED_IDLE 5 40#define SCHED_IDLE 5
41#define SCHED_LITMUS 6
41/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ 42/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
42#define SCHED_RESET_ON_FORK 0x40000000 43#define SCHED_RESET_ON_FORK 0x40000000
43 44
@@ -94,6 +95,9 @@ struct sched_param {
94 95
95#include <asm/processor.h> 96#include <asm/processor.h>
96 97
98#include <litmus/rt_param.h>
99#include <litmus/preempt.h>
100
97struct exec_domain; 101struct exec_domain;
98struct futex_pi_state; 102struct futex_pi_state;
99struct robust_list_head; 103struct robust_list_head;
@@ -1159,6 +1163,7 @@ struct sched_rt_entity {
1159}; 1163};
1160 1164
1161struct rcu_node; 1165struct rcu_node;
1166struct od_table_entry;
1162 1167
1163struct task_struct { 1168struct task_struct {
1164 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 1169 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
@@ -1243,9 +1248,9 @@ struct task_struct {
1243 unsigned long stack_canary; 1248 unsigned long stack_canary;
1244#endif 1249#endif
1245 1250
1246 /* 1251 /*
1247 * pointers to (original) parent process, youngest child, younger sibling, 1252 * pointers to (original) parent process, youngest child, younger sibling,
1248 * older sibling, respectively. (p->father can be replaced with 1253 * older sibling, respectively. (p->father can be replaced with
1249 * p->real_parent->pid) 1254 * p->real_parent->pid)
1250 */ 1255 */
1251 struct task_struct *real_parent; /* real parent process */ 1256 struct task_struct *real_parent; /* real parent process */
@@ -1453,6 +1458,13 @@ struct task_struct {
1453 int make_it_fail; 1458 int make_it_fail;
1454#endif 1459#endif
1455 struct prop_local_single dirties; 1460 struct prop_local_single dirties;
1461
1462 /* LITMUS RT parameters and state */
1463 struct rt_param rt_param;
1464
1465 /* references to PI semaphores, etc. */
1466 struct od_table_entry *od_table;
1467
1456#ifdef CONFIG_LATENCYTOP 1468#ifdef CONFIG_LATENCYTOP
1457 int latency_record_count; 1469 int latency_record_count;
1458 struct latency_record latency_record[LT_SAVECOUNT]; 1470 struct latency_record latency_record[LT_SAVECOUNT];
@@ -2014,7 +2026,7 @@ static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, s
2014 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 2026 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2015 2027
2016 return ret; 2028 return ret;
2017} 2029}
2018 2030
2019extern void block_all_signals(int (*notifier)(void *priv), void *priv, 2031extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2020 sigset_t *mask); 2032 sigset_t *mask);
@@ -2290,6 +2302,7 @@ static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2290static inline void set_tsk_need_resched(struct task_struct *tsk) 2302static inline void set_tsk_need_resched(struct task_struct *tsk)
2291{ 2303{
2292 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2304 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2305 sched_state_will_schedule(tsk);
2293} 2306}
2294 2307
2295static inline void clear_tsk_need_resched(struct task_struct *tsk) 2308static inline void clear_tsk_need_resched(struct task_struct *tsk)
diff --git a/include/linux/smp.h b/include/linux/smp.h
index cfa2d20e35f1..f86d40768e7f 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -80,6 +80,11 @@ int smp_call_function_any(const struct cpumask *mask,
80 void (*func)(void *info), void *info, int wait); 80 void (*func)(void *info), void *info, int wait);
81 81
82/* 82/*
83 * sends a 'pull timer' event to a remote CPU
84 */
85extern void smp_send_pull_timers(int cpu);
86
87/*
83 * Generic and arch helpers 88 * Generic and arch helpers
84 */ 89 */
85#ifdef CONFIG_USE_GENERIC_SMP_HELPERS 90#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
diff --git a/include/linux/tick.h b/include/linux/tick.h
index b232ccc0ee29..1e29bd5b18af 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -74,6 +74,11 @@ extern int tick_is_oneshot_available(void);
74extern struct tick_device *tick_get_device(int cpu); 74extern struct tick_device *tick_get_device(int cpu);
75 75
76# ifdef CONFIG_HIGH_RES_TIMERS 76# ifdef CONFIG_HIGH_RES_TIMERS
77/* LITMUS^RT tick alignment */
78#define LINUX_DEFAULT_TICKS 0
79#define LITMUS_ALIGNED_TICKS 1
80#define LITMUS_STAGGERED_TICKS 2
81
77extern int tick_init_highres(void); 82extern int tick_init_highres(void);
78extern int tick_program_event(ktime_t expires, int force); 83extern int tick_program_event(ktime_t expires, int force);
79extern void tick_setup_sched_timer(void); 84extern void tick_setup_sched_timer(void);
diff --git a/include/litmus/bheap.h b/include/litmus/bheap.h
new file mode 100644
index 000000000000..cf4864a498d8
--- /dev/null
+++ b/include/litmus/bheap.h
@@ -0,0 +1,77 @@
1/* bheaps.h -- Binomial Heaps
2 *
3 * (c) 2008, 2009 Bjoern Brandenburg
4 */
5
6#ifndef BHEAP_H
7#define BHEAP_H
8
9#define NOT_IN_HEAP UINT_MAX
10
11struct bheap_node {
12 struct bheap_node* parent;
13 struct bheap_node* next;
14 struct bheap_node* child;
15
16 unsigned int degree;
17 void* value;
18 struct bheap_node** ref;
19};
20
21struct bheap {
22 struct bheap_node* head;
23 /* We cache the minimum of the heap.
24 * This speeds up repeated peek operations.
25 */
26 struct bheap_node* min;
27};
28
29typedef int (*bheap_prio_t)(struct bheap_node* a, struct bheap_node* b);
30
31void bheap_init(struct bheap* heap);
32void bheap_node_init(struct bheap_node** ref_to_bheap_node_ptr, void* value);
33
34static inline int bheap_node_in_heap(struct bheap_node* h)
35{
36 return h->degree != NOT_IN_HEAP;
37}
38
39static inline int bheap_empty(struct bheap* heap)
40{
41 return heap->head == NULL && heap->min == NULL;
42}
43
44/* insert (and reinitialize) a node into the heap */
45void bheap_insert(bheap_prio_t higher_prio,
46 struct bheap* heap,
47 struct bheap_node* node);
48
49/* merge addition into target */
50void bheap_union(bheap_prio_t higher_prio,
51 struct bheap* target,
52 struct bheap* addition);
53
54struct bheap_node* bheap_peek(bheap_prio_t higher_prio,
55 struct bheap* heap);
56
57struct bheap_node* bheap_take(bheap_prio_t higher_prio,
58 struct bheap* heap);
59
60void bheap_uncache_min(bheap_prio_t higher_prio, struct bheap* heap);
61int bheap_decrease(bheap_prio_t higher_prio, struct bheap_node* node);
62
63void bheap_delete(bheap_prio_t higher_prio,
64 struct bheap* heap,
65 struct bheap_node* node);
66
67/* allocate from memcache */
68struct bheap_node* bheap_node_alloc(int gfp_flags);
69void bheap_node_free(struct bheap_node* hn);
70
71/* allocate a heap node for value and insert into the heap */
72int bheap_add(bheap_prio_t higher_prio, struct bheap* heap,
73 void* value, int gfp_flags);
74
75void* bheap_take_del(bheap_prio_t higher_prio,
76 struct bheap* heap);
77#endif
diff --git a/include/litmus/budget.h b/include/litmus/budget.h
new file mode 100644
index 000000000000..732530e63491
--- /dev/null
+++ b/include/litmus/budget.h
@@ -0,0 +1,8 @@
1#ifndef _LITMUS_BUDGET_H_
2#define _LITMUS_BUDGET_H_
3
4/* Update the per-processor enforcement timer (arm/reproram/cancel) for
5 * the next task. */
6void update_enforcement_timer(struct task_struct* t);
7
8#endif
diff --git a/include/litmus/clustered.h b/include/litmus/clustered.h
new file mode 100644
index 000000000000..0c18dcb15e6c
--- /dev/null
+++ b/include/litmus/clustered.h
@@ -0,0 +1,44 @@
1#ifndef CLUSTERED_H
2#define CLUSTERED_H
3
4/* Which cache level should be used to group CPUs into clusters?
5 * GLOBAL_CLUSTER means that all CPUs form a single cluster (just like under
6 * global scheduling).
7 */
8enum cache_level {
9 GLOBAL_CLUSTER = 0,
10 L1_CLUSTER = 1,
11 L2_CLUSTER = 2,
12 L3_CLUSTER = 3
13};
14
15int parse_cache_level(const char *str, enum cache_level *level);
16const char* cache_level_name(enum cache_level level);
17
18/* expose a cache level in a /proc dir */
19struct proc_dir_entry* create_cluster_file(struct proc_dir_entry* parent,
20 enum cache_level* level);
21
22
23
24struct scheduling_cluster {
25 unsigned int id;
26 /* list of CPUs that are part of this cluster */
27 struct list_head cpus;
28};
29
30struct cluster_cpu {
31 unsigned int id; /* which CPU is this? */
32 struct list_head cluster_list; /* List of the CPUs in this cluster. */
33 struct scheduling_cluster* cluster; /* The cluster that this CPU belongs to. */
34};
35
36int get_cluster_size(enum cache_level level);
37
38int assign_cpus_to_clusters(enum cache_level level,
39 struct scheduling_cluster* clusters[],
40 unsigned int num_clusters,
41 struct cluster_cpu* cpus[],
42 unsigned int num_cpus);
43
44#endif
diff --git a/include/litmus/debug_trace.h b/include/litmus/debug_trace.h
new file mode 100644
index 000000000000..48d086d5a44c
--- /dev/null
+++ b/include/litmus/debug_trace.h
@@ -0,0 +1,37 @@
1#ifndef LITMUS_DEBUG_TRACE_H
2#define LITMUS_DEBUG_TRACE_H
3
4#ifdef CONFIG_SCHED_DEBUG_TRACE
5void sched_trace_log_message(const char* fmt, ...);
6void dump_trace_buffer(int max);
7#else
8
9#define sched_trace_log_message(fmt, ...)
10
11#endif
12
13extern atomic_t __log_seq_no;
14
15#ifdef CONFIG_SCHED_DEBUG_TRACE_CALLER
16#define TRACE_PREFIX "%d P%d [%s@%s:%d]: "
17#define TRACE_ARGS atomic_add_return(1, &__log_seq_no), \
18 raw_smp_processor_id(), \
19 __FUNCTION__, __FILE__, __LINE__
20#else
21#define TRACE_PREFIX "%d P%d: "
22#define TRACE_ARGS atomic_add_return(1, &__log_seq_no), \
23 raw_smp_processor_id()
24#endif
25
26#define TRACE(fmt, args...) \
27 sched_trace_log_message(TRACE_PREFIX fmt, \
28 TRACE_ARGS, ## args)
29
30#define TRACE_TASK(t, fmt, args...) \
31 TRACE("(%s/%d:%d) " fmt, (t)->comm, (t)->pid, \
32 (t)->rt_param.job_params.job_no, ##args)
33
34#define TRACE_CUR(fmt, args...) \
35 TRACE_TASK(current, fmt, ## args)
36
37#endif
diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h
new file mode 100644
index 000000000000..2c4266f77c03
--- /dev/null
+++ b/include/litmus/edf_common.h
@@ -0,0 +1,33 @@
1/*
2 * EDF common data structures and utility functions shared by all EDF
3 * based scheduler plugins
4 */
5
6/* CLEANUP: Add comments and make it less messy.
7 *
8 */
9
10#ifndef __UNC_EDF_COMMON_H__
11#define __UNC_EDF_COMMON_H__
12
13#include <litmus/rt_domain.h>
14
15void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
16 release_jobs_t release);
17
18int edf_higher_prio(struct task_struct* first,
19 struct task_struct* second);
20
21#ifdef CONFIG_LITMUS_LOCKING
22/* priority comparison without priority inheritance */
23int edf_higher_base_prio(struct task_struct* first,
24 struct task_struct* second);
25
26int edf_pending_order(struct bheap_node* a, struct bheap_node* b);
27#endif
28
29int edf_ready_order(struct bheap_node* a, struct bheap_node* b);
30
31int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t);
32
33#endif
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h
new file mode 100644
index 000000000000..d1ee0d1142d8
--- /dev/null
+++ b/include/litmus/fdso.h
@@ -0,0 +1,77 @@
1/* fdso.h - file descriptor attached shared objects
2 *
3 * (c) 2007--2011 B. Brandenburg, LITMUS^RT project
4 */
5
6#ifndef _LINUX_FDSO_H_
7#define _LINUX_FDSO_H_
8
9#include <linux/list.h>
10#include <asm/atomic.h>
11
12#include <linux/fs.h>
13#include <linux/slab.h>
14
15#define MAX_OBJECT_DESCRIPTORS 85
16
17typedef enum {
18 MIN_OBJ_TYPE = 0,
19
20 FMLP_SEM = 0,
21 SRP_SEM = 1,
22
23 MPCP_SEM = 2,
24 MPCP_VS_SEM = 3,
25 DPCP_SEM = 4,
26
27 OMLP_SEM = 5,
28
29 MAX_OBJ_TYPE = 5
30} obj_type_t;
31
32struct inode_obj_id {
33 struct list_head list;
34 atomic_t count;
35 struct inode* inode;
36
37 obj_type_t type;
38 void* obj;
39 unsigned int id;
40};
41
42struct fdso_ops;
43
44struct od_table_entry {
45 unsigned int used;
46
47 struct inode_obj_id* obj;
48 const struct fdso_ops* class;
49};
50
51struct fdso_ops {
52 int (*create)(void** obj_ref, obj_type_t type, void* __user);
53 void (*destroy)(obj_type_t type, void*);
54 int (*open) (struct od_table_entry*, void* __user);
55 int (*close) (struct od_table_entry*);
56};
57
58/* translate a userspace supplied od into the raw table entry
59 * returns NULL if od is invalid
60 */
61struct od_table_entry* get_entry_for_od(int od);
62
63/* translate a userspace supplied od into the associated object
64 * returns NULL if od is invalid
65 */
66static inline void* od_lookup(int od, obj_type_t type)
67{
68 struct od_table_entry* e = get_entry_for_od(od);
69 return e && e->obj->type == type ? e->obj->obj : NULL;
70}
71
72#define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM))
73#define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM))
74#define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID))
75
76
77#endif
diff --git a/include/litmus/feather_buffer.h b/include/litmus/feather_buffer.h
new file mode 100644
index 000000000000..6c18277fdfc9
--- /dev/null
+++ b/include/litmus/feather_buffer.h
@@ -0,0 +1,94 @@
1#ifndef _FEATHER_BUFFER_H_
2#define _FEATHER_BUFFER_H_
3
4/* requires UINT_MAX and memcpy */
5
6#define SLOT_FREE 0
7#define SLOT_BUSY 1
8#define SLOT_READY 2
9
10struct ft_buffer {
11 unsigned int slot_count;
12 unsigned int slot_size;
13
14 int free_count;
15 unsigned int write_idx;
16 unsigned int read_idx;
17
18 char* slots;
19 void* buffer_mem;
20 unsigned int failed_writes;
21};
22
23static inline int init_ft_buffer(struct ft_buffer* buf,
24 unsigned int slot_count,
25 unsigned int slot_size,
26 char* slots,
27 void* buffer_mem)
28{
29 int i = 0;
30 if (!slot_count || UINT_MAX % slot_count != slot_count - 1) {
31 /* The slot count must divide UNIT_MAX + 1 so that when it
32 * wraps around the index correctly points to 0.
33 */
34 return 0;
35 } else {
36 buf->slot_count = slot_count;
37 buf->slot_size = slot_size;
38 buf->slots = slots;
39 buf->buffer_mem = buffer_mem;
40 buf->free_count = slot_count;
41 buf->write_idx = 0;
42 buf->read_idx = 0;
43 buf->failed_writes = 0;
44 for (i = 0; i < slot_count; i++)
45 buf->slots[i] = SLOT_FREE;
46 return 1;
47 }
48}
49
50static inline int ft_buffer_start_write(struct ft_buffer* buf, void **ptr)
51{
52 int free = fetch_and_dec(&buf->free_count);
53 unsigned int idx;
54 if (free <= 0) {
55 fetch_and_inc(&buf->free_count);
56 *ptr = 0;
57 fetch_and_inc(&buf->failed_writes);
58 return 0;
59 } else {
60 idx = fetch_and_inc((int*) &buf->write_idx) % buf->slot_count;
61 buf->slots[idx] = SLOT_BUSY;
62 *ptr = ((char*) buf->buffer_mem) + idx * buf->slot_size;
63 return 1;
64 }
65}
66
67static inline void ft_buffer_finish_write(struct ft_buffer* buf, void *ptr)
68{
69 unsigned int idx = ((char*) ptr - (char*) buf->buffer_mem) / buf->slot_size;
70 buf->slots[idx] = SLOT_READY;
71}
72
73
74/* exclusive reader access is assumed */
75static inline int ft_buffer_read(struct ft_buffer* buf, void* dest)
76{
77 unsigned int idx;
78 if (buf->free_count == buf->slot_count)
79 /* nothing available */
80 return 0;
81 idx = buf->read_idx % buf->slot_count;
82 if (buf->slots[idx] == SLOT_READY) {
83 memcpy(dest, ((char*) buf->buffer_mem) + idx * buf->slot_size,
84 buf->slot_size);
85 buf->slots[idx] = SLOT_FREE;
86 buf->read_idx++;
87 fetch_and_inc(&buf->free_count);
88 return 1;
89 } else
90 return 0;
91}
92
93
94#endif
diff --git a/include/litmus/feather_trace.h b/include/litmus/feather_trace.h
new file mode 100644
index 000000000000..028dfb206fb0
--- /dev/null
+++ b/include/litmus/feather_trace.h
@@ -0,0 +1,65 @@
1#ifndef _FEATHER_TRACE_H_
2#define _FEATHER_TRACE_H_
3
4#include <asm/atomic.h>
5
6int ft_enable_event(unsigned long id);
7int ft_disable_event(unsigned long id);
8int ft_is_event_enabled(unsigned long id);
9int ft_disable_all_events(void);
10
11/* atomic_* funcitons are inline anyway */
12static inline int fetch_and_inc(int *val)
13{
14 return atomic_add_return(1, (atomic_t*) val) - 1;
15}
16
17static inline int fetch_and_dec(int *val)
18{
19 return atomic_sub_return(1, (atomic_t*) val) + 1;
20}
21
22/* Don't use rewriting implementation if kernel text pages are read-only.
23 * Ftrace gets around this by using the identity mapping, but that's more
24 * effort that is warrented right now for Feather-Trace.
25 * Eventually, it may make sense to replace Feather-Trace with ftrace.
26 */
27#if defined(CONFIG_ARCH_HAS_FEATHER_TRACE) && !defined(CONFIG_DEBUG_RODATA)
28
29#include <asm/feather_trace.h>
30
31#else /* !__ARCH_HAS_FEATHER_TRACE */
32
33/* provide default implementation */
34
35#include <asm/timex.h> /* for get_cycles() */
36
37static inline unsigned long long ft_timestamp(void)
38{
39 return get_cycles();
40}
41
42#define feather_callback
43
44#define MAX_EVENTS 1024
45
46extern int ft_events[MAX_EVENTS];
47
48#define ft_event(id, callback) \
49 if (ft_events[id]) callback();
50
51#define ft_event0(id, callback) \
52 if (ft_events[id]) callback(id);
53
54#define ft_event1(id, callback, param) \
55 if (ft_events[id]) callback(id, param);
56
57#define ft_event2(id, callback, param, param2) \
58 if (ft_events[id]) callback(id, param, param2);
59
60#define ft_event3(id, callback, p, p2, p3) \
61 if (ft_events[id]) callback(id, p, p2, p3);
62
63#endif /* __ARCH_HAS_FEATHER_TRACE */
64
65#endif
diff --git a/include/litmus/fp_common.h b/include/litmus/fp_common.h
new file mode 100644
index 000000000000..dd1f7bf1e347
--- /dev/null
+++ b/include/litmus/fp_common.h
@@ -0,0 +1,105 @@
1/* Fixed-priority scheduler support.
2 */
3
4#ifndef __FP_COMMON_H__
5#define __FP_COMMON_H__
6
7#include <litmus/rt_domain.h>
8
9#include <asm/bitops.h>
10
11
12void fp_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
13 release_jobs_t release);
14
15int fp_higher_prio(struct task_struct* first,
16 struct task_struct* second);
17
18int fp_ready_order(struct bheap_node* a, struct bheap_node* b);
19
20#define FP_PRIO_BIT_WORDS (LITMUS_MAX_PRIORITY / BITS_PER_LONG)
21
22#if (LITMUS_MAX_PRIORITY % BITS_PER_LONG)
23#error LITMUS_MAX_PRIORITY must be a multiple of BITS_PER_LONG
24#endif
25
26/* bitmask-inexed priority queue */
27struct fp_prio_queue {
28 unsigned long bitmask[FP_PRIO_BIT_WORDS];
29 struct bheap queue[LITMUS_MAX_PRIORITY];
30};
31
32void fp_prio_queue_init(struct fp_prio_queue* q);
33
34static inline void fpq_set(struct fp_prio_queue* q, unsigned int index)
35{
36 unsigned long *word = q->bitmask + (index / BITS_PER_LONG);
37 __set_bit(index % BITS_PER_LONG, word);
38}
39
40static inline void fpq_clear(struct fp_prio_queue* q, unsigned int index)
41{
42 unsigned long *word = q->bitmask + (index / BITS_PER_LONG);
43 __clear_bit(index % BITS_PER_LONG, word);
44}
45
46static inline unsigned int fpq_find(struct fp_prio_queue* q)
47{
48 int i;
49
50 /* loop optimizer should unroll this */
51 for (i = 0; i < FP_PRIO_BIT_WORDS; i++)
52 if (q->bitmask[i])
53 return __ffs(q->bitmask[i]) + i * BITS_PER_LONG;
54
55 return LITMUS_MAX_PRIORITY; /* nothing found */
56}
57
58static inline void fp_prio_add(struct fp_prio_queue* q, struct task_struct* t, unsigned int index)
59{
60
61 BUG_ON(bheap_node_in_heap(tsk_rt(t)->heap_node));
62
63 fpq_set(q, index);
64 bheap_insert(fp_ready_order, &q->queue[index], tsk_rt(t)->heap_node);
65}
66
67static inline void fp_prio_remove(struct fp_prio_queue* q, struct task_struct* t, unsigned int index)
68{
69 BUG_ON(!is_queued(t));
70
71 bheap_delete(fp_ready_order, &q->queue[index], tsk_rt(t)->heap_node);
72 if (likely(bheap_empty(&q->queue[index])))
73 fpq_clear(q, index);
74}
75
76static inline struct task_struct* fp_prio_peek(struct fp_prio_queue* q)
77{
78 unsigned int idx = fpq_find(q);
79 struct bheap_node* hn;
80
81 if (idx < LITMUS_MAX_PRIORITY) {
82 hn = bheap_peek(fp_ready_order, &q->queue[idx]);
83 return bheap2task(hn);
84 } else
85 return NULL;
86}
87
88static inline struct task_struct* fp_prio_take(struct fp_prio_queue* q)
89{
90 unsigned int idx = fpq_find(q);
91 struct bheap_node* hn;
92
93 if (idx < LITMUS_MAX_PRIORITY) {
94 hn = bheap_take(fp_ready_order, &q->queue[idx]);
95 if (likely(bheap_empty(&q->queue[idx])))
96 fpq_clear(q, idx);
97 return bheap2task(hn);
98 } else
99 return NULL;
100}
101
102int fp_preemption_needed(struct fp_prio_queue* q, struct task_struct *t);
103
104
105#endif
diff --git a/include/litmus/ftdev.h b/include/litmus/ftdev.h
new file mode 100644
index 000000000000..0b959874dd70
--- /dev/null
+++ b/include/litmus/ftdev.h
@@ -0,0 +1,55 @@
1#ifndef _LITMUS_FTDEV_H_
2#define _LITMUS_FTDEV_H_
3
4#include <litmus/feather_trace.h>
5#include <litmus/feather_buffer.h>
6#include <linux/mutex.h>
7#include <linux/cdev.h>
8
9#define FTDEV_ENABLE_CMD 0
10#define FTDEV_DISABLE_CMD 1
11
12struct ftdev;
13
14/* return 0 if buffer can be opened, otherwise -$REASON */
15typedef int (*ftdev_can_open_t)(struct ftdev* dev, unsigned int buf_no);
16/* return 0 on success, otherwise -$REASON */
17typedef int (*ftdev_alloc_t)(struct ftdev* dev, unsigned int buf_no);
18typedef void (*ftdev_free_t)(struct ftdev* dev, unsigned int buf_no);
19/* Let devices handle writes from userspace. No synchronization provided. */
20typedef ssize_t (*ftdev_write_t)(struct ft_buffer* buf, size_t len, const char __user *from);
21
22struct ftdev_event;
23
24struct ftdev_minor {
25 struct ft_buffer* buf;
26 unsigned int readers;
27 struct mutex lock;
28 /* FIXME: filter for authorized events */
29 struct ftdev_event* events;
30 struct device* device;
31 struct ftdev* ftdev;
32};
33
34struct ftdev {
35 dev_t major;
36 struct cdev cdev;
37 struct class* class;
38 const char* name;
39 struct ftdev_minor* minor;
40 unsigned int minor_cnt;
41 ftdev_alloc_t alloc;
42 ftdev_free_t free;
43 ftdev_can_open_t can_open;
44 ftdev_write_t write;
45};
46
47struct ft_buffer* alloc_ft_buffer(unsigned int count, size_t size);
48void free_ft_buffer(struct ft_buffer* buf);
49
50int ftdev_init( struct ftdev* ftdev, struct module* owner,
51 const int minor_cnt, const char* name);
52void ftdev_exit(struct ftdev* ftdev);
53int register_ftdev(struct ftdev* ftdev);
54
55#endif
diff --git a/include/litmus/jobs.h b/include/litmus/jobs.h
new file mode 100644
index 000000000000..9bd361ef3943
--- /dev/null
+++ b/include/litmus/jobs.h
@@ -0,0 +1,9 @@
1#ifndef __LITMUS_JOBS_H__
2#define __LITMUS_JOBS_H__
3
4void prepare_for_next_period(struct task_struct *t);
5void release_at(struct task_struct *t, lt_t start);
6long complete_job(void);
7
8#endif
9
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
new file mode 100644
index 000000000000..31ac72eddef7
--- /dev/null
+++ b/include/litmus/litmus.h
@@ -0,0 +1,292 @@
1/*
2 * Constant definitions related to
3 * scheduling policy.
4 */
5
6#ifndef _LINUX_LITMUS_H_
7#define _LINUX_LITMUS_H_
8
9#include <litmus/debug_trace.h>
10
11#ifdef CONFIG_RELEASE_MASTER
12extern atomic_t release_master_cpu;
13#endif
14
15/* in_list - is a given list_head queued on some list?
16 */
17static inline int in_list(struct list_head* list)
18{
19 return !( /* case 1: deleted */
20 (list->next == LIST_POISON1 &&
21 list->prev == LIST_POISON2)
22 ||
23 /* case 2: initialized */
24 (list->next == list &&
25 list->prev == list)
26 );
27}
28
29#define NO_CPU 0xffffffff
30
31void litmus_fork(struct task_struct *tsk);
32void litmus_exec(void);
33/* clean up real-time state of a task */
34void exit_litmus(struct task_struct *dead_tsk);
35
36long litmus_admit_task(struct task_struct *tsk);
37void litmus_exit_task(struct task_struct *tsk);
38
39#define is_realtime(t) ((t)->policy == SCHED_LITMUS)
40#define rt_transition_pending(t) \
41 ((t)->rt_param.transition_pending)
42
43#define tsk_rt(t) (&(t)->rt_param)
44
45/* Realtime utility macros */
46#define get_rt_flags(t) (tsk_rt(t)->flags)
47#define set_rt_flags(t,f) (tsk_rt(t)->flags=(f))
48#define get_exec_cost(t) (tsk_rt(t)->task_params.exec_cost)
49#define get_exec_time(t) (tsk_rt(t)->job_params.exec_time)
50#define get_rt_period(t) (tsk_rt(t)->task_params.period)
51#define get_rt_phase(t) (tsk_rt(t)->task_params.phase)
52#define get_partition(t) (tsk_rt(t)->task_params.cpu)
53#define get_priority(t) (tsk_rt(t)->task_params.priority)
54#define get_deadline(t) (tsk_rt(t)->job_params.deadline)
55#define get_release(t) (tsk_rt(t)->job_params.release)
56#define get_class(t) (tsk_rt(t)->task_params.cls)
57
58#define is_priority_boosted(t) (tsk_rt(t)->priority_boosted)
59#define get_boost_start(t) (tsk_rt(t)->boost_start_time)
60
61inline static int budget_exhausted(struct task_struct* t)
62{
63 return get_exec_time(t) >= get_exec_cost(t);
64}
65
66inline static lt_t budget_remaining(struct task_struct* t)
67{
68 if (!budget_exhausted(t))
69 return get_exec_cost(t) - get_exec_time(t);
70 else
71 /* avoid overflow */
72 return 0;
73}
74
75#define budget_enforced(t) (tsk_rt(t)->task_params.budget_policy != NO_ENFORCEMENT)
76
77#define budget_precisely_enforced(t) (tsk_rt(t)->task_params.budget_policy \
78 == PRECISE_ENFORCEMENT)
79
80#define is_hrt(t) \
81 (tsk_rt(t)->task_params.cls == RT_CLASS_HARD)
82#define is_srt(t) \
83 (tsk_rt(t)->task_params.cls == RT_CLASS_SOFT)
84#define is_be(t) \
85 (tsk_rt(t)->task_params.cls == RT_CLASS_BEST_EFFORT)
86
87/* Our notion of time within LITMUS: kernel monotonic time. */
88static inline lt_t litmus_clock(void)
89{
90 return ktime_to_ns(ktime_get());
91}
92
93/* A macro to convert from nanoseconds to ktime_t. */
94#define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t)
95
96#define get_domain(t) (tsk_rt(t)->domain)
97
98/* Honor the flag in the preempt_count variable that is set
99 * when scheduling is in progress.
100 */
101#define is_running(t) \
102 ((t)->state == TASK_RUNNING || \
103 task_thread_info(t)->preempt_count & PREEMPT_ACTIVE)
104
105#define is_blocked(t) \
106 (!is_running(t))
107#define is_released(t, now) \
108 (lt_before_eq(get_release(t), now))
109#define is_tardy(t, now) \
110 (lt_before_eq(tsk_rt(t)->job_params.deadline, now))
111
112/* real-time comparison macros */
113#define earlier_deadline(a, b) (lt_before(\
114 (a)->rt_param.job_params.deadline,\
115 (b)->rt_param.job_params.deadline))
116#define earlier_release(a, b) (lt_before(\
117 (a)->rt_param.job_params.release,\
118 (b)->rt_param.job_params.release))
119
120void preempt_if_preemptable(struct task_struct* t, int on_cpu);
121
122#ifdef CONFIG_LITMUS_LOCKING
123void srp_ceiling_block(void);
124#else
125#define srp_ceiling_block() /* nothing */
126#endif
127
128#define bheap2task(hn) ((struct task_struct*) hn->value)
129
130static inline struct control_page* get_control_page(struct task_struct *t)
131{
132 return tsk_rt(t)->ctrl_page;
133}
134
135static inline int has_control_page(struct task_struct* t)
136{
137 return tsk_rt(t)->ctrl_page != NULL;
138}
139
140#ifdef CONFIG_NP_SECTION
141
142static inline int is_kernel_np(struct task_struct *t)
143{
144 return tsk_rt(t)->kernel_np;
145}
146
147static inline int is_user_np(struct task_struct *t)
148{
149 return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->sched.np.flag : 0;
150}
151
152static inline void request_exit_np(struct task_struct *t)
153{
154 if (is_user_np(t)) {
155 /* Set the flag that tells user space to call
156 * into the kernel at the end of a critical section. */
157 if (likely(tsk_rt(t)->ctrl_page)) {
158 TRACE_TASK(t, "setting delayed_preemption flag\n");
159 tsk_rt(t)->ctrl_page->sched.np.preempt = 1;
160 }
161 }
162}
163
164static inline void make_np(struct task_struct *t)
165{
166 tsk_rt(t)->kernel_np++;
167}
168
169/* Caller should check if preemption is necessary when
170 * the function return 0.
171 */
172static inline int take_np(struct task_struct *t)
173{
174 return --tsk_rt(t)->kernel_np;
175}
176
177/* returns 0 if remote CPU needs an IPI to preempt, 1 if no IPI is required */
178static inline int request_exit_np_atomic(struct task_struct *t)
179{
180 union np_flag old, new;
181 int ok;
182
183 if (tsk_rt(t)->ctrl_page) {
184 old.raw = tsk_rt(t)->ctrl_page->sched.raw;
185 if (old.np.flag == 0) {
186 /* no longer non-preemptive */
187 return 0;
188 } else if (old.np.preempt) {
189 /* already set, nothing for us to do */
190 TRACE_TASK(t, "not setting np.preempt flag again\n");
191 return 1;
192 } else {
193 /* non preemptive and flag not set */
194 new.raw = old.raw;
195 new.np.preempt = 1;
196 /* if we get old back, then we atomically set the flag */
197 ok = cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw;
198 /* If we raced with a concurrent change, then so be
199 * it. Deliver it by IPI. We don't want an unbounded
200 * retry loop here since tasks might exploit that to
201 * keep the kernel busy indefinitely. */
202 TRACE_TASK(t, "request_exit_np => %d\n", ok);
203 return ok;
204 }
205 } else
206 return 0;
207}
208
209#else
210
211static inline int is_kernel_np(struct task_struct* t)
212{
213 return 0;
214}
215
216static inline int is_user_np(struct task_struct* t)
217{
218 return 0;
219}
220
221static inline void request_exit_np(struct task_struct *t)
222{
223 /* request_exit_np() shouldn't be called if !CONFIG_NP_SECTION */
224 BUG();
225}
226
227static inline int request_exist_np_atomic(struct task_struct *t)
228{
229 return 0;
230}
231
232#endif
233
234static inline void clear_exit_np(struct task_struct *t)
235{
236 if (likely(tsk_rt(t)->ctrl_page))
237 tsk_rt(t)->ctrl_page->sched.np.preempt = 0;
238}
239
240static inline int is_np(struct task_struct *t)
241{
242#ifdef CONFIG_SCHED_DEBUG_TRACE
243 int kernel, user;
244 kernel = is_kernel_np(t);
245 user = is_user_np(t);
246 if (kernel || user)
247 TRACE_TASK(t, " is non-preemptive: kernel=%d user=%d\n",
248
249 kernel, user);
250 return kernel || user;
251#else
252 return unlikely(is_kernel_np(t) || is_user_np(t));
253#endif
254}
255
256static inline int is_present(struct task_struct* t)
257{
258 return t && tsk_rt(t)->present;
259}
260
261
262/* make the unit explicit */
263typedef unsigned long quanta_t;
264
265enum round {
266 FLOOR,
267 CEIL
268};
269
270
271/* Tick period is used to convert ns-specified execution
272 * costs and periods into tick-based equivalents.
273 */
274extern ktime_t tick_period;
275
276static inline quanta_t time2quanta(lt_t time, enum round round)
277{
278 s64 quantum_length = ktime_to_ns(tick_period);
279
280 if (do_div(time, quantum_length) && round == CEIL)
281 time++;
282 return (quanta_t) time;
283}
284
285/* By how much is cpu staggered behind CPU 0? */
286u64 cpu_stagger_offset(int cpu);
287
288#define TS_SYSCALL_IN_START \
289 if (has_control_page(current)) \
290 __TS_SYSCALL_IN_START(&get_control_page(current)->ts_syscall_start);
291
292#endif
diff --git a/include/litmus/litmus_proc.h b/include/litmus/litmus_proc.h
new file mode 100644
index 000000000000..6800e725d48c
--- /dev/null
+++ b/include/litmus/litmus_proc.h
@@ -0,0 +1,25 @@
1#include <litmus/sched_plugin.h>
2#include <linux/proc_fs.h>
3
4int __init init_litmus_proc(void);
5void exit_litmus_proc(void);
6
7/*
8 * On success, returns 0 and sets the pointer to the location of the new
9 * proc dir entry, otherwise returns an error code and sets pde to NULL.
10 */
11long make_plugin_proc_dir(struct sched_plugin* plugin,
12 struct proc_dir_entry** pde);
13
14/*
15 * Plugins should deallocate all child proc directory entries before
16 * calling this, to avoid memory leaks.
17 */
18void remove_plugin_proc_dir(struct sched_plugin* plugin);
19
20
21/* Copy at most size-1 bytes from ubuf into kbuf, null-terminate buf, and
22 * remove a '\n' if present. Returns the number of bytes that were read or
23 * -EFAULT. */
24int copy_and_chomp(char *kbuf, unsigned long ksize,
25 __user const char* ubuf, unsigned long ulength);
diff --git a/include/litmus/locking.h b/include/litmus/locking.h
new file mode 100644
index 000000000000..4d7b870cb443
--- /dev/null
+++ b/include/litmus/locking.h
@@ -0,0 +1,28 @@
1#ifndef LITMUS_LOCKING_H
2#define LITMUS_LOCKING_H
3
4struct litmus_lock_ops;
5
6/* Generic base struct for LITMUS^RT userspace semaphores.
7 * This structure should be embedded in protocol-specific semaphores.
8 */
9struct litmus_lock {
10 struct litmus_lock_ops *ops;
11 int type;
12};
13
14struct litmus_lock_ops {
15 /* Current task tries to obtain / drop a reference to a lock.
16 * Optional methods, allowed by default. */
17 int (*open)(struct litmus_lock*, void* __user);
18 int (*close)(struct litmus_lock*);
19
20 /* Current tries to lock/unlock this lock (mandatory methods). */
21 int (*lock)(struct litmus_lock*);
22 int (*unlock)(struct litmus_lock*);
23
24 /* The lock is no longer being referenced (mandatory method). */
25 void (*deallocate)(struct litmus_lock*);
26};
27
28#endif
diff --git a/include/litmus/preempt.h b/include/litmus/preempt.h
new file mode 100644
index 000000000000..f3cf29ad87ee
--- /dev/null
+++ b/include/litmus/preempt.h
@@ -0,0 +1,165 @@
1#ifndef LITMUS_PREEMPT_H
2#define LITMUS_PREEMPT_H
3
4#include <linux/types.h>
5#include <linux/cache.h>
6#include <linux/percpu.h>
7#include <asm/atomic.h>
8
9#include <litmus/debug_trace.h>
10
11extern DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, resched_state);
12
13//#ifdef CONFIG_DEBUG_KERNEL
14#if 0
15const char* sched_state_name(int s);
16#define TRACE_STATE(fmt, args...) TRACE("SCHED_STATE " fmt, args)
17#else
18#define TRACE_STATE(fmt, args...) /* ignore */
19#endif
20
21#define VERIFY_SCHED_STATE(x) \
22 do { int __s = get_sched_state(); \
23 if ((__s & (x)) == 0) \
24 TRACE_STATE("INVALID s=0x%x (%s) not " \
25 "in 0x%x (%s) [%s]\n", \
26 __s, sched_state_name(__s), \
27 (x), #x, __FUNCTION__); \
28 } while (0);
29
30#define TRACE_SCHED_STATE_CHANGE(x, y, cpu) \
31 TRACE_STATE("[P%d] 0x%x (%s) -> 0x%x (%s)\n", \
32 cpu, (x), sched_state_name(x), \
33 (y), sched_state_name(y))
34
35
36typedef enum scheduling_state {
37 TASK_SCHEDULED = (1 << 0), /* The currently scheduled task is the one that
38 * should be scheduled, and the processor does not
39 * plan to invoke schedule(). */
40 SHOULD_SCHEDULE = (1 << 1), /* A remote processor has determined that the
41 * processor should reschedule, but this has not
42 * been communicated yet (IPI still pending). */
43 WILL_SCHEDULE = (1 << 2), /* The processor has noticed that it has to
44 * reschedule and will do so shortly. */
45 TASK_PICKED = (1 << 3), /* The processor is currently executing schedule(),
46 * has selected a new task to schedule, but has not
47 * yet performed the actual context switch. */
48 PICKED_WRONG_TASK = (1 << 4), /* The processor has not yet performed the context
49 * switch, but a remote processor has already
50 * determined that a higher-priority task became
51 * eligible after the task was picked. */
52} sched_state_t;
53
54static inline sched_state_t get_sched_state_on(int cpu)
55{
56 return atomic_read(&per_cpu(resched_state, cpu));
57}
58
59static inline sched_state_t get_sched_state(void)
60{
61 return atomic_read(&__get_cpu_var(resched_state));
62}
63
64static inline int is_in_sched_state(int possible_states)
65{
66 return get_sched_state() & possible_states;
67}
68
69static inline int cpu_is_in_sched_state(int cpu, int possible_states)
70{
71 return get_sched_state_on(cpu) & possible_states;
72}
73
74static inline void set_sched_state(sched_state_t s)
75{
76 TRACE_SCHED_STATE_CHANGE(get_sched_state(), s, smp_processor_id());
77 atomic_set(&__get_cpu_var(resched_state), s);
78}
79
80static inline int sched_state_transition(sched_state_t from, sched_state_t to)
81{
82 sched_state_t old_state;
83
84 old_state = atomic_cmpxchg(&__get_cpu_var(resched_state), from, to);
85 if (old_state == from) {
86 TRACE_SCHED_STATE_CHANGE(from, to, smp_processor_id());
87 return 1;
88 } else
89 return 0;
90}
91
92static inline int sched_state_transition_on(int cpu,
93 sched_state_t from,
94 sched_state_t to)
95{
96 sched_state_t old_state;
97
98 old_state = atomic_cmpxchg(&per_cpu(resched_state, cpu), from, to);
99 if (old_state == from) {
100 TRACE_SCHED_STATE_CHANGE(from, to, cpu);
101 return 1;
102 } else
103 return 0;
104}
105
106/* Plugins must call this function after they have decided which job to
107 * schedule next. IMPORTANT: this function must be called while still holding
108 * the lock that is used to serialize scheduling decisions.
109 *
110 * (Ideally, we would like to use runqueue locks for this purpose, but that
111 * would lead to deadlocks with the migration code.)
112 */
113static inline void sched_state_task_picked(void)
114{
115 VERIFY_SCHED_STATE(WILL_SCHEDULE);
116
117 /* WILL_SCHEDULE has only a local tansition => simple store is ok */
118 set_sched_state(TASK_PICKED);
119}
120
121static inline void sched_state_entered_schedule(void)
122{
123 /* Update state for the case that we entered schedule() not due to
124 * set_tsk_need_resched() */
125 set_sched_state(WILL_SCHEDULE);
126}
127
128/* Called by schedule() to check if the scheduling decision is still valid
129 * after a context switch. Returns 1 if the CPU needs to reschdule. */
130static inline int sched_state_validate_switch(void)
131{
132 int left_state_ok = 0;
133
134 VERIFY_SCHED_STATE(PICKED_WRONG_TASK | TASK_PICKED);
135
136 if (is_in_sched_state(TASK_PICKED)) {
137 /* Might be good; let's try to transition out of this
138 * state. This must be done atomically since remote processors
139 * may try to change the state, too. */
140 left_state_ok = sched_state_transition(TASK_PICKED, TASK_SCHEDULED);
141 }
142
143 if (!left_state_ok) {
144 /* We raced with a higher-priority task arrival => not
145 * valid. The CPU needs to reschedule. */
146 set_sched_state(WILL_SCHEDULE);
147 return 1;
148 } else
149 return 0;
150}
151
152/* State transition events. See litmus/preempt.c for details. */
153void sched_state_will_schedule(struct task_struct* tsk);
154void sched_state_ipi(void);
155/* Cause a CPU (remote or local) to reschedule. */
156void litmus_reschedule(int cpu);
157void litmus_reschedule_local(void);
158
159#ifdef CONFIG_DEBUG_KERNEL
160void sched_state_plugin_check(void);
161#else
162#define sched_state_plugin_check() /* no check */
163#endif
164
165#endif
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h
new file mode 100644
index 000000000000..ac249292e866
--- /dev/null
+++ b/include/litmus/rt_domain.h
@@ -0,0 +1,182 @@
1/* CLEANUP: Add comments and make it less messy.
2 *
3 */
4
5#ifndef __UNC_RT_DOMAIN_H__
6#define __UNC_RT_DOMAIN_H__
7
8#include <litmus/bheap.h>
9
10#define RELEASE_QUEUE_SLOTS 127 /* prime */
11
12struct _rt_domain;
13
14typedef int (*check_resched_needed_t)(struct _rt_domain *rt);
15typedef void (*release_jobs_t)(struct _rt_domain *rt, struct bheap* tasks);
16
17struct release_queue {
18 /* each slot maintains a list of release heaps sorted
19 * by release time */
20 struct list_head slot[RELEASE_QUEUE_SLOTS];
21};
22
23typedef struct _rt_domain {
24 /* runnable rt tasks are in here */
25 raw_spinlock_t ready_lock;
26 struct bheap ready_queue;
27
28 /* real-time tasks waiting for release are in here */
29 raw_spinlock_t release_lock;
30 struct release_queue release_queue;
31
32#ifdef CONFIG_RELEASE_MASTER
33 int release_master;
34#endif
35
36 /* for moving tasks to the release queue */
37 raw_spinlock_t tobe_lock;
38 struct list_head tobe_released;
39
40 /* how do we check if we need to kick another CPU? */
41 check_resched_needed_t check_resched;
42
43 /* how do we release jobs? */
44 release_jobs_t release_jobs;
45
46 /* how are tasks ordered in the ready queue? */
47 bheap_prio_t order;
48} rt_domain_t;
49
50struct release_heap {
51 /* list_head for per-time-slot list */
52 struct list_head list;
53 lt_t release_time;
54 /* all tasks to be released at release_time */
55 struct bheap heap;
56 /* used to trigger the release */
57 struct hrtimer timer;
58
59#ifdef CONFIG_RELEASE_MASTER
60 /* used to delegate releases */
61 struct hrtimer_start_on_info info;
62#endif
63 /* required for the timer callback */
64 rt_domain_t* dom;
65};
66
67
68static inline struct task_struct* __next_ready(rt_domain_t* rt)
69{
70 struct bheap_node *hn = bheap_peek(rt->order, &rt->ready_queue);
71 if (hn)
72 return bheap2task(hn);
73 else
74 return NULL;
75}
76
77void rt_domain_init(rt_domain_t *rt, bheap_prio_t order,
78 check_resched_needed_t check,
79 release_jobs_t relase);
80
81void __add_ready(rt_domain_t* rt, struct task_struct *new);
82void __merge_ready(rt_domain_t* rt, struct bheap *tasks);
83void __add_release(rt_domain_t* rt, struct task_struct *task);
84
85static inline struct task_struct* __take_ready(rt_domain_t* rt)
86{
87 struct bheap_node* hn = bheap_take(rt->order, &rt->ready_queue);
88 if (hn)
89 return bheap2task(hn);
90 else
91 return NULL;
92}
93
94static inline struct task_struct* __peek_ready(rt_domain_t* rt)
95{
96 struct bheap_node* hn = bheap_peek(rt->order, &rt->ready_queue);
97 if (hn)
98 return bheap2task(hn);
99 else
100 return NULL;
101}
102
103static inline int is_queued(struct task_struct *t)
104{
105 BUG_ON(!tsk_rt(t)->heap_node);
106 return bheap_node_in_heap(tsk_rt(t)->heap_node);
107}
108
109static inline void remove(rt_domain_t* rt, struct task_struct *t)
110{
111 bheap_delete(rt->order, &rt->ready_queue, tsk_rt(t)->heap_node);
112}
113
114static inline void add_ready(rt_domain_t* rt, struct task_struct *new)
115{
116 unsigned long flags;
117 /* first we need the write lock for rt_ready_queue */
118 raw_spin_lock_irqsave(&rt->ready_lock, flags);
119 __add_ready(rt, new);
120 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
121}
122
123static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks)
124{
125 unsigned long flags;
126 raw_spin_lock_irqsave(&rt->ready_lock, flags);
127 __merge_ready(rt, tasks);
128 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
129}
130
131static inline struct task_struct* take_ready(rt_domain_t* rt)
132{
133 unsigned long flags;
134 struct task_struct* ret;
135 /* first we need the write lock for rt_ready_queue */
136 raw_spin_lock_irqsave(&rt->ready_lock, flags);
137 ret = __take_ready(rt);
138 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
139 return ret;
140}
141
142
143static inline void add_release(rt_domain_t* rt, struct task_struct *task)
144{
145 unsigned long flags;
146 raw_spin_lock_irqsave(&rt->tobe_lock, flags);
147 __add_release(rt, task);
148 raw_spin_unlock_irqrestore(&rt->tobe_lock, flags);
149}
150
151#ifdef CONFIG_RELEASE_MASTER
152void __add_release_on(rt_domain_t* rt, struct task_struct *task,
153 int target_cpu);
154
155static inline void add_release_on(rt_domain_t* rt,
156 struct task_struct *task,
157 int target_cpu)
158{
159 unsigned long flags;
160 raw_spin_lock_irqsave(&rt->tobe_lock, flags);
161 __add_release_on(rt, task, target_cpu);
162 raw_spin_unlock_irqrestore(&rt->tobe_lock, flags);
163}
164#endif
165
166static inline int __jobs_pending(rt_domain_t* rt)
167{
168 return !bheap_empty(&rt->ready_queue);
169}
170
171static inline int jobs_pending(rt_domain_t* rt)
172{
173 unsigned long flags;
174 int ret;
175 /* first we need the write lock for rt_ready_queue */
176 raw_spin_lock_irqsave(&rt->ready_lock, flags);
177 ret = !bheap_empty(&rt->ready_queue);
178 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
179 return ret;
180}
181
182#endif
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
new file mode 100644
index 000000000000..a23ce1524051
--- /dev/null
+++ b/include/litmus/rt_param.h
@@ -0,0 +1,228 @@
1/*
2 * Definition of the scheduler plugin interface.
3 *
4 */
5#ifndef _LINUX_RT_PARAM_H_
6#define _LINUX_RT_PARAM_H_
7
8/* Litmus time type. */
9typedef unsigned long long lt_t;
10
11static inline int lt_after(lt_t a, lt_t b)
12{
13 return ((long long) b) - ((long long) a) < 0;
14}
15#define lt_before(a, b) lt_after(b, a)
16
17static inline int lt_after_eq(lt_t a, lt_t b)
18{
19 return ((long long) a) - ((long long) b) >= 0;
20}
21#define lt_before_eq(a, b) lt_after_eq(b, a)
22
23/* different types of clients */
24typedef enum {
25 RT_CLASS_HARD,
26 RT_CLASS_SOFT,
27 RT_CLASS_BEST_EFFORT
28} task_class_t;
29
30typedef enum {
31 NO_ENFORCEMENT, /* job may overrun unhindered */
32 QUANTUM_ENFORCEMENT, /* budgets are only checked on quantum boundaries */
33 PRECISE_ENFORCEMENT /* NOT IMPLEMENTED - enforced with hrtimers */
34} budget_policy_t;
35
36#define LITMUS_MAX_PRIORITY 512
37
38struct rt_task {
39 lt_t exec_cost;
40 lt_t period;
41 lt_t phase;
42 unsigned int cpu;
43 unsigned int priority;
44 task_class_t cls;
45 budget_policy_t budget_policy; /* ignored by pfair */
46};
47
48union np_flag {
49 uint32_t raw;
50 struct {
51 /* Is the task currently in a non-preemptive section? */
52 uint32_t flag:31;
53 /* Should the task call into the scheduler? */
54 uint32_t preempt:1;
55 } np;
56};
57
58/* The definition of the data that is shared between the kernel and real-time
59 * tasks via a shared page (see litmus/ctrldev.c).
60 *
61 * WARNING: User space can write to this, so don't trust
62 * the correctness of the fields!
63 *
64 * This servees two purposes: to enable efficient signaling
65 * of non-preemptive sections (user->kernel) and
66 * delayed preemptions (kernel->user), and to export
67 * some real-time relevant statistics such as preemption and
68 * migration data to user space. We can't use a device to export
69 * statistics because we want to avoid system call overhead when
70 * determining preemption/migration overheads).
71 */
72struct control_page {
73 volatile union np_flag sched;
74
75 /* locking overhead tracing: time stamp prior to system call */
76 uint64_t ts_syscall_start; /* Feather-Trace cycles */
77
78 /* to be extended */
79};
80
81/* don't export internal data structures to user space (liblitmus) */
82#ifdef __KERNEL__
83
84struct _rt_domain;
85struct bheap_node;
86struct release_heap;
87
88struct rt_job {
89 /* Time instant the the job was or will be released. */
90 lt_t release;
91 /* What is the current deadline? */
92 lt_t deadline;
93
94 /* How much service has this job received so far? */
95 lt_t exec_time;
96
97 /* Which job is this. This is used to let user space
98 * specify which job to wait for, which is important if jobs
99 * overrun. If we just call sys_sleep_next_period() then we
100 * will unintentionally miss jobs after an overrun.
101 *
102 * Increase this sequence number when a job is released.
103 */
104 unsigned int job_no;
105};
106
107struct pfair_param;
108
109/* RT task parameters for scheduling extensions
110 * These parameters are inherited during clone and therefore must
111 * be explicitly set up before the task set is launched.
112 */
113struct rt_param {
114 /* is the task sleeping? */
115 unsigned int flags:8;
116
117 /* do we need to check for srp blocking? */
118 unsigned int srp_non_recurse:1;
119
120 /* is the task present? (true if it can be scheduled) */
121 unsigned int present:1;
122
123#ifdef CONFIG_LITMUS_LOCKING
124 /* Is the task being priority-boosted by a locking protocol? */
125 unsigned int priority_boosted:1;
126 /* If so, when did this start? */
127 lt_t boost_start_time;
128#endif
129
130 /* user controlled parameters */
131 struct rt_task task_params;
132
133 /* timing parameters */
134 struct rt_job job_params;
135
136 /* task representing the current "inherited" task
137 * priority, assigned by inherit_priority and
138 * return priority in the scheduler plugins.
139 * could point to self if PI does not result in
140 * an increased task priority.
141 */
142 struct task_struct* inh_task;
143
144#ifdef CONFIG_NP_SECTION
145 /* For the FMLP under PSN-EDF, it is required to make the task
146 * non-preemptive from kernel space. In order not to interfere with
147 * user space, this counter indicates the kernel space np setting.
148 * kernel_np > 0 => task is non-preemptive
149 */
150 unsigned int kernel_np;
151#endif
152
153 /* This field can be used by plugins to store where the task
154 * is currently scheduled. It is the responsibility of the
155 * plugin to avoid race conditions.
156 *
157 * This used by GSN-EDF and PFAIR.
158 */
159 volatile int scheduled_on;
160
161 /* Is the stack of the task currently in use? This is updated by
162 * the LITMUS core.
163 *
164 * Be careful to avoid deadlocks!
165 */
166 volatile int stack_in_use;
167
168 /* This field can be used by plugins to store where the task
169 * is currently linked. It is the responsibility of the plugin
170 * to avoid race conditions.
171 *
172 * Used by GSN-EDF.
173 */
174 volatile int linked_on;
175
176 /* PFAIR/PD^2 state. Allocated on demand. */
177 struct pfair_param* pfair;
178
179 /* Fields saved before BE->RT transition.
180 */
181 int old_policy;
182 int old_prio;
183
184 /* ready queue for this task */
185 struct _rt_domain* domain;
186
187 /* heap element for this task
188 *
189 * Warning: Don't statically allocate this node. The heap
190 * implementation swaps these between tasks, thus after
191 * dequeuing from a heap you may end up with a different node
192 * then the one you had when enqueuing the task. For the same
193 * reason, don't obtain and store references to this node
194 * other than this pointer (which is updated by the heap
195 * implementation).
196 */
197 struct bheap_node* heap_node;
198 struct release_heap* rel_heap;
199
200#ifdef CONFIG_LITMUS_LOCKING
201 /* task in heap of pending jobs -- used by C-EDF for priority donation */
202 struct bheap_node* pending_node;
203 /* is the job in a critical section or a wait queue?*/
204 unsigned int request_incomplete;
205 /* is the job currently a donor? */
206 unsigned int is_donor;
207 /* is this job suspended, waiting to become eligible? */
208 unsigned int waiting_eligible;
209
210 int pending_on;
211#endif
212
213 /* Used by rt_domain to queue task in release list.
214 */
215 struct list_head list;
216
217 /* Pointer to the page shared between userspace and kernel. */
218 struct control_page * ctrl_page;
219};
220
221/* Possible RT flags */
222#define RT_F_RUNNING 0x00000000
223#define RT_F_SLEEP 0x00000001
224#define RT_F_EXIT_SEM 0x00000008
225
226#endif
227
228#endif
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
new file mode 100644
index 000000000000..b5d1ae7bc3b6
--- /dev/null
+++ b/include/litmus/sched_plugin.h
@@ -0,0 +1,117 @@
1/*
2 * Definition of the scheduler plugin interface.
3 *
4 */
5#ifndef _LINUX_SCHED_PLUGIN_H_
6#define _LINUX_SCHED_PLUGIN_H_
7
8#include <linux/sched.h>
9
10#ifdef CONFIG_LITMUS_LOCKING
11#include <litmus/locking.h>
12#endif
13
14/************************ setup/tear down ********************/
15
16typedef long (*activate_plugin_t) (void);
17typedef long (*deactivate_plugin_t) (void);
18
19
20
21/********************* scheduler invocation ******************/
22
23/* Plugin-specific realtime tick handler */
24typedef void (*scheduler_tick_t) (struct task_struct *cur);
25/* Novell make sched decision function */
26typedef struct task_struct* (*schedule_t)(struct task_struct * prev);
27/* Clean up after the task switch has occured.
28 * This function is called after every (even non-rt) task switch.
29 */
30typedef void (*finish_switch_t)(struct task_struct *prev);
31
32
33/********************* task state changes ********************/
34
35/* Called to setup a new real-time task.
36 * Release the first job, enqueue, etc.
37 * Task may already be running.
38 */
39typedef void (*task_new_t) (struct task_struct *task,
40 int on_rq,
41 int running);
42
43/* Called to re-introduce a task after blocking.
44 * Can potentially be called multiple times.
45 */
46typedef void (*task_wake_up_t) (struct task_struct *task);
47/* called to notify the plugin of a blocking real-time task
48 * it will only be called for real-time tasks and before schedule is called */
49typedef void (*task_block_t) (struct task_struct *task);
50/* Called when a real-time task exits or changes to a different scheduling
51 * class.
52 * Free any allocated resources
53 */
54typedef void (*task_exit_t) (struct task_struct *);
55
56/* called early before the caller holds the runqueue lock */
57typedef void (*pre_setsched_t) (struct task_struct *, int policy);
58
59
60/* Called when the current task attempts to create a new lock of a given
61 * protocol type. */
62typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type,
63 void* __user config);
64
65
66/********************* sys call backends ********************/
67/* This function causes the caller to sleep until the next release */
68typedef long (*complete_job_t) (void);
69
70typedef long (*admit_task_t)(struct task_struct* tsk);
71
72typedef void (*release_at_t)(struct task_struct *t, lt_t start);
73
74struct sched_plugin {
75 struct list_head list;
76 /* basic info */
77 char *plugin_name;
78
79 /* setup */
80 activate_plugin_t activate_plugin;
81 deactivate_plugin_t deactivate_plugin;
82
83 /* scheduler invocation */
84 scheduler_tick_t tick;
85 schedule_t schedule;
86 finish_switch_t finish_switch;
87
88 /* syscall backend */
89 complete_job_t complete_job;
90 release_at_t release_at;
91
92 /* task state changes */
93 admit_task_t admit_task;
94
95 task_new_t task_new;
96 task_wake_up_t task_wake_up;
97 task_block_t task_block;
98 task_exit_t task_exit;
99
100 pre_setsched_t pre_setsched;
101
102#ifdef CONFIG_LITMUS_LOCKING
103 /* locking protocols */
104 allocate_lock_t allocate_lock;
105#endif
106} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
107
108
109extern struct sched_plugin *litmus;
110
111int register_sched_plugin(struct sched_plugin* plugin);
112struct sched_plugin* find_sched_plugin(const char* name);
113int print_sched_plugins(char* buf, int max);
114
115extern struct sched_plugin linux_sched_plugin;
116
117#endif
diff --git a/include/litmus/sched_plugin.h.rej b/include/litmus/sched_plugin.h.rej
new file mode 100644
index 000000000000..47e0c27c5061
--- /dev/null
+++ b/include/litmus/sched_plugin.h.rej
@@ -0,0 +1,22 @@
1--- include/litmus/sched_plugin.h
2+++ include/litmus/sched_plugin.h
3@@ -53,6 +53,10 @@
4 */
5 typedef void (*task_exit_t) (struct task_struct *);
6
7+/* called early before the caller holds the runqueue lock */
8+typedef void (*pre_setsched_t) (struct task_struct *, int policy);
9+
10+
11 /* Called when the current task attempts to create a new lock of a given
12 * protocol type. */
13 typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type,
14@@ -93,6 +97,8 @@
15 task_block_t task_block;
16 task_exit_t task_exit;
17
18+ pre_setsched_t pre_setsched;
19+
20 #ifdef CONFIG_LITMUS_LOCKING
21 /* locking protocols */
22 allocate_lock_t allocate_lock;
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h
new file mode 100644
index 000000000000..7ca34cb13881
--- /dev/null
+++ b/include/litmus/sched_trace.h
@@ -0,0 +1,200 @@
1/*
2 * sched_trace.h -- record scheduler events to a byte stream for offline analysis.
3 */
4#ifndef _LINUX_SCHED_TRACE_H_
5#define _LINUX_SCHED_TRACE_H_
6
7/* all times in nanoseconds */
8
9struct st_trace_header {
10 u8 type; /* Of what type is this record? */
11 u8 cpu; /* On which CPU was it recorded? */
12 u16 pid; /* PID of the task. */
13 u32 job; /* The job sequence number. */
14};
15
16#define ST_NAME_LEN 16
17struct st_name_data {
18 char cmd[ST_NAME_LEN];/* The name of the executable of this process. */
19};
20
21struct st_param_data { /* regular params */
22 u32 wcet;
23 u32 period;
24 u32 phase;
25 u8 partition;
26 u8 class;
27 u8 __unused[2];
28};
29
30struct st_release_data { /* A job is was/is going to be released. */
31 u64 release; /* What's the release time? */
32 u64 deadline; /* By when must it finish? */
33};
34
35struct st_assigned_data { /* A job was asigned to a CPU. */
36 u64 when;
37 u8 target; /* Where should it execute? */
38 u8 __unused[7];
39};
40
41struct st_switch_to_data { /* A process was switched to on a given CPU. */
42 u64 when; /* When did this occur? */
43 u32 exec_time; /* Time the current job has executed. */
44 u8 __unused[4];
45
46};
47
48struct st_switch_away_data { /* A process was switched away from on a given CPU. */
49 u64 when;
50 u64 exec_time;
51};
52
53struct st_completion_data { /* A job completed. */
54 u64 when;
55 u8 forced:1; /* Set to 1 if job overran and kernel advanced to the
56 * next task automatically; set to 0 otherwise.
57 */
58 u8 __uflags:7;
59 u8 __unused[7];
60};
61
62struct st_block_data { /* A task blocks. */
63 u64 when;
64 u64 __unused;
65};
66
67struct st_resume_data { /* A task resumes. */
68 u64 when;
69 u64 __unused;
70};
71
72struct st_action_data {
73 u64 when;
74 u8 action;
75 u8 __unused[7];
76};
77
78struct st_sys_release_data {
79 u64 when;
80 u64 release;
81};
82
83#define DATA(x) struct st_ ## x ## _data x;
84
85typedef enum {
86 ST_NAME = 1, /* Start at one, so that we can spot
87 * uninitialized records. */
88 ST_PARAM,
89 ST_RELEASE,
90 ST_ASSIGNED,
91 ST_SWITCH_TO,
92 ST_SWITCH_AWAY,
93 ST_COMPLETION,
94 ST_BLOCK,
95 ST_RESUME,
96 ST_ACTION,
97 ST_SYS_RELEASE
98} st_event_record_type_t;
99
100struct st_event_record {
101 struct st_trace_header hdr;
102 union {
103 u64 raw[2];
104
105 DATA(name);
106 DATA(param);
107 DATA(release);
108 DATA(assigned);
109 DATA(switch_to);
110 DATA(switch_away);
111 DATA(completion);
112 DATA(block);
113 DATA(resume);
114 DATA(action);
115 DATA(sys_release);
116 } data;
117};
118
119#undef DATA
120
121#ifdef __KERNEL__
122
123#include <linux/sched.h>
124#include <litmus/feather_trace.h>
125
126#ifdef CONFIG_SCHED_TASK_TRACE
127
128#define SCHED_TRACE(id, callback, task) \
129 ft_event1(id, callback, task)
130#define SCHED_TRACE2(id, callback, task, xtra) \
131 ft_event2(id, callback, task, xtra)
132
133/* provide prototypes; needed on sparc64 */
134#ifndef NO_TASK_TRACE_DECLS
135feather_callback void do_sched_trace_task_name(unsigned long id,
136 struct task_struct* task);
137feather_callback void do_sched_trace_task_param(unsigned long id,
138 struct task_struct* task);
139feather_callback void do_sched_trace_task_release(unsigned long id,
140 struct task_struct* task);
141feather_callback void do_sched_trace_task_switch_to(unsigned long id,
142 struct task_struct* task);
143feather_callback void do_sched_trace_task_switch_away(unsigned long id,
144 struct task_struct* task);
145feather_callback void do_sched_trace_task_completion(unsigned long id,
146 struct task_struct* task,
147 unsigned long forced);
148feather_callback void do_sched_trace_task_block(unsigned long id,
149 struct task_struct* task);
150feather_callback void do_sched_trace_task_resume(unsigned long id,
151 struct task_struct* task);
152feather_callback void do_sched_trace_action(unsigned long id,
153 struct task_struct* task,
154 unsigned long action);
155feather_callback void do_sched_trace_sys_release(unsigned long id,
156 lt_t* start);
157
158#endif
159
160#else
161
162#define SCHED_TRACE(id, callback, task) /* no tracing */
163#define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */
164
165#endif
166
167
168#define SCHED_TRACE_BASE_ID 500
169
170
171#define sched_trace_task_name(t) \
172 SCHED_TRACE(SCHED_TRACE_BASE_ID + 1, do_sched_trace_task_name, t)
173#define sched_trace_task_param(t) \
174 SCHED_TRACE(SCHED_TRACE_BASE_ID + 2, do_sched_trace_task_param, t)
175#define sched_trace_task_release(t) \
176 SCHED_TRACE(SCHED_TRACE_BASE_ID + 3, do_sched_trace_task_release, t)
177#define sched_trace_task_switch_to(t) \
178 SCHED_TRACE(SCHED_TRACE_BASE_ID + 4, do_sched_trace_task_switch_to, t)
179#define sched_trace_task_switch_away(t) \
180 SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, do_sched_trace_task_switch_away, t)
181#define sched_trace_task_completion(t, forced) \
182 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, do_sched_trace_task_completion, t, \
183 (unsigned long) forced)
184#define sched_trace_task_block(t) \
185 SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, do_sched_trace_task_block, t)
186#define sched_trace_task_resume(t) \
187 SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, do_sched_trace_task_resume, t)
188#define sched_trace_action(t, action) \
189 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 9, do_sched_trace_action, t, \
190 (unsigned long) action);
191/* when is a pointer, it does not need an explicit cast to unsigned long */
192#define sched_trace_sys_release(when) \
193 SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, do_sched_trace_sys_release, when)
194
195
196#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */
197
198#endif /* __KERNEL__ */
199
200#endif
diff --git a/include/litmus/srp.h b/include/litmus/srp.h
new file mode 100644
index 000000000000..c9a4552b2bf3
--- /dev/null
+++ b/include/litmus/srp.h
@@ -0,0 +1,28 @@
1#ifndef LITMUS_SRP_H
2#define LITMUS_SRP_H
3
4struct srp_semaphore;
5
6struct srp_priority {
7 struct list_head list;
8 unsigned int priority;
9 pid_t pid;
10};
11#define list2prio(l) list_entry(l, struct srp_priority, list)
12
13/* struct for uniprocessor SRP "semaphore" */
14struct srp_semaphore {
15 struct litmus_lock litmus_lock;
16 struct srp_priority ceiling;
17 struct task_struct* owner;
18 int cpu; /* cpu associated with this "semaphore" and resource */
19};
20
21/* map a task to its SRP preemption level priority */
22typedef unsigned int (*srp_prioritization_t)(struct task_struct* t);
23/* Must be updated by each plugin that uses SRP.*/
24extern srp_prioritization_t get_srp_prio;
25
26struct srp_semaphore* allocate_srp_semaphore(void);
27
28#endif
diff --git a/include/litmus/trace.h b/include/litmus/trace.h
new file mode 100644
index 000000000000..d6829c416912
--- /dev/null
+++ b/include/litmus/trace.h
@@ -0,0 +1,129 @@
1#ifndef _SYS_TRACE_H_
2#define _SYS_TRACE_H_
3
4#ifdef CONFIG_SCHED_OVERHEAD_TRACE
5
6#include <litmus/feather_trace.h>
7#include <litmus/feather_buffer.h>
8
9
10/*********************** TIMESTAMPS ************************/
11
12enum task_type_marker {
13 TSK_BE,
14 TSK_RT,
15 TSK_UNKNOWN
16};
17
18struct timestamp {
19 uint64_t timestamp;
20 uint32_t seq_no;
21 uint8_t cpu;
22 uint8_t event;
23 uint8_t task_type;
24};
25
26/* tracing callbacks */
27feather_callback void save_timestamp(unsigned long event);
28feather_callback void save_timestamp_def(unsigned long event, unsigned long type);
29feather_callback void save_timestamp_task(unsigned long event, unsigned long t_ptr);
30feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu);
31feather_callback void save_task_latency(unsigned long event, unsigned long when_ptr);
32feather_callback void save_timestamp_time(unsigned long event, unsigned long time_ptr);
33
34#define TIMESTAMP(id) ft_event0(id, save_timestamp)
35
36#define DTIMESTAMP(id, def) ft_event1(id, save_timestamp_def, (unsigned long) def)
37
38#define TTIMESTAMP(id, task) \
39 ft_event1(id, save_timestamp_task, (unsigned long) task)
40
41#define CTIMESTAMP(id, cpu) \
42 ft_event1(id, save_timestamp_cpu, (unsigned long) cpu)
43
44#define LTIMESTAMP(id, task) \
45 ft_event1(id, save_task_latency, (unsigned long) task)
46
47#define TIMESTAMP_TIME(id, time_ptr) \
48 ft_event1(id, save_timestamp_time, (unsigned long) time_ptr)
49
50#define TIMESTAMP_PID(id) ft_event0(id, save_timestamp_pid)
51
52#else /* !CONFIG_SCHED_OVERHEAD_TRACE */
53
54#define TIMESTAMP(id) /* no tracing */
55
56#define DTIMESTAMP(id, def) /* no tracing */
57
58#define TTIMESTAMP(id, task) /* no tracing */
59
60#define CTIMESTAMP(id, cpu) /* no tracing */
61
62#define LTIMESTAMP(id, when_ptr) /* no tracing */
63
64#define TIMESTAMP_TIME(id, time_ptr) /* no tracing */
65
66#define TIMESTAMP_PID(id) /* no tracing */
67
68#endif
69
70
71/* Convention for timestamps
72 * =========================
73 *
74 * In order to process the trace files with a common tool, we use the following
75 * convention to measure execution times: The end time id of a code segment is
76 * always the next number after the start time event id.
77 */
78
79#define __TS_SYSCALL_IN_START(p) TIMESTAMP_TIME(10, p)
80#define TS_SYSCALL_IN_END TIMESTAMP_PID(11)
81
82#define TS_SYSCALL_OUT_START TIMESTAMP_PID(20)
83#define TS_SYSCALL_OUT_END TIMESTAMP_PID(21)
84
85#define TS_LOCK_START TIMESTAMP_PID(30)
86#define TS_LOCK_END TIMESTAMP_PID(31)
87
88#define TS_LOCK_SUSPEND TIMESTAMP_PID(38)
89#define TS_LOCK_RESUME TIMESTAMP_PID(39)
90
91#define TS_UNLOCK_START TIMESTAMP_PID(40)
92#define TS_UNLOCK_END TIMESTAMP_PID(41)
93
94#define TS_SCHED_START DTIMESTAMP(100, TSK_UNKNOWN) /* we only
95 * care
96 * about
97 * next */
98#define TS_SCHED_END(t) TTIMESTAMP(101, t)
99#define TS_SCHED2_START(t) TTIMESTAMP(102, t)
100#define TS_SCHED2_END(t) TTIMESTAMP(103, t)
101
102#define TS_CXS_START(t) TTIMESTAMP(104, t)
103#define TS_CXS_END(t) TTIMESTAMP(105, t)
104
105#define TS_RELEASE_START DTIMESTAMP(106, TSK_RT)
106#define TS_RELEASE_END DTIMESTAMP(107, TSK_RT)
107
108#define TS_TICK_START(t) TTIMESTAMP(110, t)
109#define TS_TICK_END(t) TTIMESTAMP(111, t)
110
111
112#define TS_PLUGIN_SCHED_START /* TIMESTAMP(120) */ /* currently unused */
113#define TS_PLUGIN_SCHED_END /* TIMESTAMP(121) */
114
115#define TS_PLUGIN_TICK_START /* TIMESTAMP(130) */
116#define TS_PLUGIN_TICK_END /* TIMESTAMP(131) */
117
118#define TS_ENTER_NP_START TIMESTAMP(140)
119#define TS_ENTER_NP_END TIMESTAMP(141)
120
121#define TS_EXIT_NP_START TIMESTAMP(150)
122#define TS_EXIT_NP_END TIMESTAMP(151)
123
124#define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c)
125#define TS_SEND_RESCHED_END DTIMESTAMP(191, TSK_UNKNOWN)
126
127#define TS_RELEASE_LATENCY(when) LTIMESTAMP(208, &(when))
128
129#endif /* !_SYS_TRACE_H_ */
diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h
new file mode 100644
index 000000000000..94264c27d9ac
--- /dev/null
+++ b/include/litmus/unistd_32.h
@@ -0,0 +1,21 @@
1/*
2 * included from arch/x86/include/asm/unistd_32.h
3 *
4 * LITMUS^RT syscalls with "relative" numbers
5 */
6#define __LSC(x) (__NR_LITMUS + x)
7
8#define __NR_set_rt_task_param __LSC(0)
9#define __NR_get_rt_task_param __LSC(1)
10#define __NR_complete_job __LSC(2)
11#define __NR_od_open __LSC(3)
12#define __NR_od_close __LSC(4)
13#define __NR_litmus_lock __LSC(5)
14#define __NR_litmus_unlock __LSC(6)
15#define __NR_query_job_no __LSC(7)
16#define __NR_wait_for_job_release __LSC(8)
17#define __NR_wait_for_ts_release __LSC(9)
18#define __NR_release_ts __LSC(10)
19#define __NR_null_call __LSC(11)
20
21#define NR_litmus_syscalls 12
diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h
new file mode 100644
index 000000000000..d5ced0d2642c
--- /dev/null
+++ b/include/litmus/unistd_64.h
@@ -0,0 +1,33 @@
1/*
2 * included from arch/x86/include/asm/unistd_64.h
3 *
4 * LITMUS^RT syscalls with "relative" numbers
5 */
6#define __LSC(x) (__NR_LITMUS + x)
7
8#define __NR_set_rt_task_param __LSC(0)
9__SYSCALL(__NR_set_rt_task_param, sys_set_rt_task_param)
10#define __NR_get_rt_task_param __LSC(1)
11__SYSCALL(__NR_get_rt_task_param, sys_get_rt_task_param)
12#define __NR_complete_job __LSC(2)
13__SYSCALL(__NR_complete_job, sys_complete_job)
14#define __NR_od_open __LSC(3)
15__SYSCALL(__NR_od_open, sys_od_open)
16#define __NR_od_close __LSC(4)
17__SYSCALL(__NR_od_close, sys_od_close)
18#define __NR_litmus_lock __LSC(5)
19__SYSCALL(__NR_litmus_lock, sys_litmus_lock)
20#define __NR_litmus_unlock __LSC(6)
21__SYSCALL(__NR_litmus_unlock, sys_litmus_unlock)
22#define __NR_query_job_no __LSC(7)
23__SYSCALL(__NR_query_job_no, sys_query_job_no)
24#define __NR_wait_for_job_release __LSC(8)
25__SYSCALL(__NR_wait_for_job_release, sys_wait_for_job_release)
26#define __NR_wait_for_ts_release __LSC(9)
27__SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release)
28#define __NR_release_ts __LSC(10)
29__SYSCALL(__NR_release_ts, sys_release_ts)
30#define __NR_null_call __LSC(11)
31__SYSCALL(__NR_null_call, sys_null_call)
32
33#define NR_litmus_syscalls 12
diff --git a/include/litmus/wait.h b/include/litmus/wait.h
new file mode 100644
index 000000000000..ce1347c355f8
--- /dev/null
+++ b/include/litmus/wait.h
@@ -0,0 +1,57 @@
1#ifndef _LITMUS_WAIT_H_
2#define _LITMUS_WAIT_H_
3
4struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq);
5
6/* wrap regular wait_queue_t head */
7struct __prio_wait_queue {
8 wait_queue_t wq;
9
10 /* some priority point */
11 lt_t priority;
12 /* break ties in priority by lower tie_breaker */
13 unsigned int tie_breaker;
14};
15
16typedef struct __prio_wait_queue prio_wait_queue_t;
17
18static inline void init_prio_waitqueue_entry(prio_wait_queue_t *pwq,
19 struct task_struct* t,
20 lt_t priority)
21{
22 init_waitqueue_entry(&pwq->wq, t);
23 pwq->priority = priority;
24 pwq->tie_breaker = 0;
25}
26
27static inline void init_prio_waitqueue_entry_tie(prio_wait_queue_t *pwq,
28 struct task_struct* t,
29 lt_t priority,
30 unsigned int tie_breaker)
31{
32 init_waitqueue_entry(&pwq->wq, t);
33 pwq->priority = priority;
34 pwq->tie_breaker = tie_breaker;
35}
36
37unsigned int __add_wait_queue_prio_exclusive(
38 wait_queue_head_t* head,
39 prio_wait_queue_t *new);
40
41static inline unsigned int add_wait_queue_prio_exclusive(
42 wait_queue_head_t* head,
43 prio_wait_queue_t *new)
44{
45 unsigned long flags;
46 unsigned int passed;
47
48 spin_lock_irqsave(&head->lock, flags);
49 passed = __add_wait_queue_prio_exclusive(head, new);
50
51 spin_unlock_irqrestore(&head->lock, flags);
52
53 return passed;
54}
55
56
57#endif