aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 23:35:01 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 23:35:01 -0400
commit6ffc1fee98c4b995eb3a0285f4f8fb467cb0306e (patch)
tree69a05892a41e7f7400fa598ee0bdf8027c8f0fd6 /include
parente40152ee1e1c7a63f4777791863215e3faa37a86 (diff)
parent7c1ff4c544dd650cceff3cd69a04bcba60856678 (diff)
Merge branch 'master' into wip-merge-2.6.34
Simple merge between master and 2.6.34 with conflicts resolved. This commit does not compile, the following main problems are still unresolved: - spinlock -> raw_spinlock API changes - kfifo API changes - sched_class API changes Conflicts: Makefile arch/x86/include/asm/hw_irq.h arch/x86/include/asm/unistd_32.h arch/x86/kernel/syscall_table_32.S include/linux/hrtimer.h kernel/sched.c kernel/sched_fair.c
Diffstat (limited to 'include')
-rw-r--r--include/linux/completion.h1
-rw-r--r--include/linux/fs.h21
-rw-r--r--include/linux/hrtimer.h25
-rw-r--r--include/linux/sched.h17
-rw-r--r--include/linux/smp.h5
-rw-r--r--include/linux/tick.h5
-rw-r--r--include/litmus/bheap.h77
-rw-r--r--include/litmus/edf_common.h27
-rw-r--r--include/litmus/fdso.h69
-rw-r--r--include/litmus/feather_buffer.h94
-rw-r--r--include/litmus/feather_trace.h49
-rw-r--r--include/litmus/ftdev.h49
-rw-r--r--include/litmus/jobs.h9
-rw-r--r--include/litmus/litmus.h252
-rw-r--r--include/litmus/rt_domain.h162
-rw-r--r--include/litmus/rt_param.h189
-rw-r--r--include/litmus/sched_plugin.h162
-rw-r--r--include/litmus/sched_trace.h192
-rw-r--r--include/litmus/trace.h113
-rw-r--r--include/litmus/unistd_32.h23
-rw-r--r--include/litmus/unistd_64.h37
21 files changed, 1566 insertions, 12 deletions
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 4a6b604ef7e4..258bec13d424 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -88,6 +88,7 @@ extern bool completion_done(struct completion *x);
88 88
89extern void complete(struct completion *); 89extern void complete(struct completion *);
90extern void complete_all(struct completion *); 90extern void complete_all(struct completion *);
91extern void complete_n(struct completion *, int n);
91 92
92/** 93/**
93 * INIT_COMPLETION: - reinitialize a completion structure 94 * INIT_COMPLETION: - reinitialize a completion structure
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 44f35aea2f1f..894918440bc8 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -15,8 +15,8 @@
15 * nr_file rlimit, so it's safe to set up a ridiculously high absolute 15 * nr_file rlimit, so it's safe to set up a ridiculously high absolute
16 * upper limit on files-per-process. 16 * upper limit on files-per-process.
17 * 17 *
18 * Some programs (notably those using select()) may have to be 18 * Some programs (notably those using select()) may have to be
19 * recompiled to take full advantage of the new limits.. 19 * recompiled to take full advantage of the new limits..
20 */ 20 */
21 21
22/* Fixed constants first: */ 22/* Fixed constants first: */
@@ -173,7 +173,7 @@ struct inodes_stat_t {
173#define SEL_EX 4 173#define SEL_EX 4
174 174
175/* public flags for file_system_type */ 175/* public flags for file_system_type */
176#define FS_REQUIRES_DEV 1 176#define FS_REQUIRES_DEV 1
177#define FS_BINARY_MOUNTDATA 2 177#define FS_BINARY_MOUNTDATA 2
178#define FS_HAS_SUBTYPE 4 178#define FS_HAS_SUBTYPE 4
179#define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */ 179#define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */
@@ -471,7 +471,7 @@ struct iattr {
471 */ 471 */
472#include <linux/quota.h> 472#include <linux/quota.h>
473 473
474/** 474/**
475 * enum positive_aop_returns - aop return codes with specific semantics 475 * enum positive_aop_returns - aop return codes with specific semantics
476 * 476 *
477 * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has 477 * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has
@@ -481,7 +481,7 @@ struct iattr {
481 * be a candidate for writeback again in the near 481 * be a candidate for writeback again in the near
482 * future. Other callers must be careful to unlock 482 * future. Other callers must be careful to unlock
483 * the page if they get this return. Returned by 483 * the page if they get this return. Returned by
484 * writepage(); 484 * writepage();
485 * 485 *
486 * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has 486 * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has
487 * unlocked it and the page might have been truncated. 487 * unlocked it and the page might have been truncated.
@@ -720,6 +720,7 @@ static inline int mapping_writably_mapped(struct address_space *mapping)
720 720
721struct posix_acl; 721struct posix_acl;
722#define ACL_NOT_CACHED ((void *)(-1)) 722#define ACL_NOT_CACHED ((void *)(-1))
723struct inode_obj_id_table;
723 724
724struct inode { 725struct inode {
725 struct hlist_node i_hash; 726 struct hlist_node i_hash;
@@ -788,6 +789,8 @@ struct inode {
788 struct posix_acl *i_acl; 789 struct posix_acl *i_acl;
789 struct posix_acl *i_default_acl; 790 struct posix_acl *i_default_acl;
790#endif 791#endif
792 struct list_head i_obj_list;
793 struct mutex i_obj_mutex;
791 void *i_private; /* fs or device private pointer */ 794 void *i_private; /* fs or device private pointer */
792}; 795};
793 796
@@ -1000,10 +1003,10 @@ static inline int file_check_writeable(struct file *filp)
1000 1003
1001#define MAX_NON_LFS ((1UL<<31) - 1) 1004#define MAX_NON_LFS ((1UL<<31) - 1)
1002 1005
1003/* Page cache limit. The filesystems should put that into their s_maxbytes 1006/* Page cache limit. The filesystems should put that into their s_maxbytes
1004 limits, otherwise bad things can happen in VM. */ 1007 limits, otherwise bad things can happen in VM. */
1005#if BITS_PER_LONG==32 1008#if BITS_PER_LONG==32
1006#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 1009#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
1007#elif BITS_PER_LONG==64 1010#elif BITS_PER_LONG==64
1008#define MAX_LFS_FILESIZE 0x7fffffffffffffffUL 1011#define MAX_LFS_FILESIZE 0x7fffffffffffffffUL
1009#endif 1012#endif
@@ -2129,7 +2132,7 @@ extern int may_open(struct path *, int, int);
2129 2132
2130extern int kernel_read(struct file *, loff_t, char *, unsigned long); 2133extern int kernel_read(struct file *, loff_t, char *, unsigned long);
2131extern struct file * open_exec(const char *); 2134extern struct file * open_exec(const char *);
2132 2135
2133/* fs/dcache.c -- generic fs support functions */ 2136/* fs/dcache.c -- generic fs support functions */
2134extern int is_subdir(struct dentry *, struct dentry *); 2137extern int is_subdir(struct dentry *, struct dentry *);
2135extern int path_is_under(struct path *, struct path *); 2138extern int path_is_under(struct path *, struct path *);
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 5d86fb2309d2..b34823755ee4 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -167,6 +167,7 @@ struct hrtimer_clock_base {
167 * @nr_retries: Total number of hrtimer interrupt retries 167 * @nr_retries: Total number of hrtimer interrupt retries
168 * @nr_hangs: Total number of hrtimer interrupt hangs 168 * @nr_hangs: Total number of hrtimer interrupt hangs
169 * @max_hang_time: Maximum time spent in hrtimer_interrupt 169 * @max_hang_time: Maximum time spent in hrtimer_interrupt
170 * @to_pull: LITMUS^RT list of timers to be pulled on this cpu
170 */ 171 */
171struct hrtimer_cpu_base { 172struct hrtimer_cpu_base {
172 raw_spinlock_t lock; 173 raw_spinlock_t lock;
@@ -180,6 +181,26 @@ struct hrtimer_cpu_base {
180 unsigned long nr_hangs; 181 unsigned long nr_hangs;
181 ktime_t max_hang_time; 182 ktime_t max_hang_time;
182#endif 183#endif
184 struct list_head to_pull;
185};
186
187#define HRTIMER_START_ON_INACTIVE 0
188#define HRTIMER_START_ON_QUEUED 1
189
190/*
191 * struct hrtimer_start_on_info - save timer info on remote cpu
192 * @list: list of hrtimer_start_on_info on remote cpu (to_pull)
193 * @timer: timer to be triggered on remote cpu
194 * @time: time event
195 * @mode: timer mode
196 * @state: activity flag
197 */
198struct hrtimer_start_on_info {
199 struct list_head list;
200 struct hrtimer *timer;
201 ktime_t time;
202 enum hrtimer_mode mode;
203 atomic_t state;
183}; 204};
184 205
185static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) 206static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
@@ -348,6 +369,10 @@ __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
348 unsigned long delta_ns, 369 unsigned long delta_ns,
349 const enum hrtimer_mode mode, int wakeup); 370 const enum hrtimer_mode mode, int wakeup);
350 371
372extern int hrtimer_start_on(int cpu, struct hrtimer_start_on_info *info,
373 struct hrtimer *timer, ktime_t time,
374 const enum hrtimer_mode mode);
375
351extern int hrtimer_cancel(struct hrtimer *timer); 376extern int hrtimer_cancel(struct hrtimer *timer);
352extern int hrtimer_try_to_cancel(struct hrtimer *timer); 377extern int hrtimer_try_to_cancel(struct hrtimer *timer);
353 378
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2b7b81df78b3..225347d97d47 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -38,6 +38,7 @@
38#define SCHED_BATCH 3 38#define SCHED_BATCH 3
39/* SCHED_ISO: reserved but not implemented yet */ 39/* SCHED_ISO: reserved but not implemented yet */
40#define SCHED_IDLE 5 40#define SCHED_IDLE 5
41#define SCHED_LITMUS 6
41/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ 42/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
42#define SCHED_RESET_ON_FORK 0x40000000 43#define SCHED_RESET_ON_FORK 0x40000000
43 44
@@ -94,6 +95,8 @@ struct sched_param {
94 95
95#include <asm/processor.h> 96#include <asm/processor.h>
96 97
98#include <litmus/rt_param.h>
99
97struct exec_domain; 100struct exec_domain;
98struct futex_pi_state; 101struct futex_pi_state;
99struct robust_list_head; 102struct robust_list_head;
@@ -1166,6 +1169,7 @@ struct sched_rt_entity {
1166}; 1169};
1167 1170
1168struct rcu_node; 1171struct rcu_node;
1172struct od_table_entry;
1169 1173
1170struct task_struct { 1174struct task_struct {
1171 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 1175 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
@@ -1250,9 +1254,9 @@ struct task_struct {
1250 unsigned long stack_canary; 1254 unsigned long stack_canary;
1251#endif 1255#endif
1252 1256
1253 /* 1257 /*
1254 * pointers to (original) parent process, youngest child, younger sibling, 1258 * pointers to (original) parent process, youngest child, younger sibling,
1255 * older sibling, respectively. (p->father can be replaced with 1259 * older sibling, respectively. (p->father can be replaced with
1256 * p->real_parent->pid) 1260 * p->real_parent->pid)
1257 */ 1261 */
1258 struct task_struct *real_parent; /* real parent process */ 1262 struct task_struct *real_parent; /* real parent process */
@@ -1464,6 +1468,13 @@ struct task_struct {
1464 int make_it_fail; 1468 int make_it_fail;
1465#endif 1469#endif
1466 struct prop_local_single dirties; 1470 struct prop_local_single dirties;
1471
1472 /* LITMUS RT parameters and state */
1473 struct rt_param rt_param;
1474
1475 /* references to PI semaphores, etc. */
1476 struct od_table_entry *od_table;
1477
1467#ifdef CONFIG_LATENCYTOP 1478#ifdef CONFIG_LATENCYTOP
1468 int latency_record_count; 1479 int latency_record_count;
1469 struct latency_record latency_record[LT_SAVECOUNT]; 1480 struct latency_record latency_record[LT_SAVECOUNT];
@@ -2018,7 +2029,7 @@ static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, s
2018 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 2029 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2019 2030
2020 return ret; 2031 return ret;
2021} 2032}
2022 2033
2023extern void block_all_signals(int (*notifier)(void *priv), void *priv, 2034extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2024 sigset_t *mask); 2035 sigset_t *mask);
diff --git a/include/linux/smp.h b/include/linux/smp.h
index cfa2d20e35f1..f86d40768e7f 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -80,6 +80,11 @@ int smp_call_function_any(const struct cpumask *mask,
80 void (*func)(void *info), void *info, int wait); 80 void (*func)(void *info), void *info, int wait);
81 81
82/* 82/*
83 * sends a 'pull timer' event to a remote CPU
84 */
85extern void smp_send_pull_timers(int cpu);
86
87/*
83 * Generic and arch helpers 88 * Generic and arch helpers
84 */ 89 */
85#ifdef CONFIG_USE_GENERIC_SMP_HELPERS 90#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
diff --git a/include/linux/tick.h b/include/linux/tick.h
index d2ae79e21be3..25d0cf41d3fd 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -73,6 +73,11 @@ extern int tick_is_oneshot_available(void);
73extern struct tick_device *tick_get_device(int cpu); 73extern struct tick_device *tick_get_device(int cpu);
74 74
75# ifdef CONFIG_HIGH_RES_TIMERS 75# ifdef CONFIG_HIGH_RES_TIMERS
76/* LITMUS^RT tick alignment */
77#define LINUX_DEFAULT_TICKS 0
78#define LITMUS_ALIGNED_TICKS 1
79#define LITMUS_STAGGERED_TICKS 2
80
76extern int tick_init_highres(void); 81extern int tick_init_highres(void);
77extern int tick_program_event(ktime_t expires, int force); 82extern int tick_program_event(ktime_t expires, int force);
78extern void tick_setup_sched_timer(void); 83extern void tick_setup_sched_timer(void);
diff --git a/include/litmus/bheap.h b/include/litmus/bheap.h
new file mode 100644
index 000000000000..cf4864a498d8
--- /dev/null
+++ b/include/litmus/bheap.h
@@ -0,0 +1,77 @@
1/* bheaps.h -- Binomial Heaps
2 *
3 * (c) 2008, 2009 Bjoern Brandenburg
4 */
5
6#ifndef BHEAP_H
7#define BHEAP_H
8
9#define NOT_IN_HEAP UINT_MAX
10
11struct bheap_node {
12 struct bheap_node* parent;
13 struct bheap_node* next;
14 struct bheap_node* child;
15
16 unsigned int degree;
17 void* value;
18 struct bheap_node** ref;
19};
20
21struct bheap {
22 struct bheap_node* head;
23 /* We cache the minimum of the heap.
24 * This speeds up repeated peek operations.
25 */
26 struct bheap_node* min;
27};
28
29typedef int (*bheap_prio_t)(struct bheap_node* a, struct bheap_node* b);
30
31void bheap_init(struct bheap* heap);
32void bheap_node_init(struct bheap_node** ref_to_bheap_node_ptr, void* value);
33
34static inline int bheap_node_in_heap(struct bheap_node* h)
35{
36 return h->degree != NOT_IN_HEAP;
37}
38
39static inline int bheap_empty(struct bheap* heap)
40{
41 return heap->head == NULL && heap->min == NULL;
42}
43
44/* insert (and reinitialize) a node into the heap */
45void bheap_insert(bheap_prio_t higher_prio,
46 struct bheap* heap,
47 struct bheap_node* node);
48
49/* merge addition into target */
50void bheap_union(bheap_prio_t higher_prio,
51 struct bheap* target,
52 struct bheap* addition);
53
54struct bheap_node* bheap_peek(bheap_prio_t higher_prio,
55 struct bheap* heap);
56
57struct bheap_node* bheap_take(bheap_prio_t higher_prio,
58 struct bheap* heap);
59
60void bheap_uncache_min(bheap_prio_t higher_prio, struct bheap* heap);
61int bheap_decrease(bheap_prio_t higher_prio, struct bheap_node* node);
62
63void bheap_delete(bheap_prio_t higher_prio,
64 struct bheap* heap,
65 struct bheap_node* node);
66
67/* allocate from memcache */
68struct bheap_node* bheap_node_alloc(int gfp_flags);
69void bheap_node_free(struct bheap_node* hn);
70
71/* allocate a heap node for value and insert into the heap */
72int bheap_add(bheap_prio_t higher_prio, struct bheap* heap,
73 void* value, int gfp_flags);
74
75void* bheap_take_del(bheap_prio_t higher_prio,
76 struct bheap* heap);
77#endif
diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h
new file mode 100644
index 000000000000..80d4321cc87e
--- /dev/null
+++ b/include/litmus/edf_common.h
@@ -0,0 +1,27 @@
1/*
2 * EDF common data structures and utility functions shared by all EDF
3 * based scheduler plugins
4 */
5
6/* CLEANUP: Add comments and make it less messy.
7 *
8 */
9
10#ifndef __UNC_EDF_COMMON_H__
11#define __UNC_EDF_COMMON_H__
12
13#include <litmus/rt_domain.h>
14
15void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
16 release_jobs_t release);
17
18int edf_higher_prio(struct task_struct* first,
19 struct task_struct* second);
20
21int edf_ready_order(struct bheap_node* a, struct bheap_node* b);
22
23int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t);
24
25int edf_set_hp_task(struct pi_semaphore *sem);
26int edf_set_hp_cpu_task(struct pi_semaphore *sem, int cpu);
27#endif
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h
new file mode 100644
index 000000000000..286e10f86de0
--- /dev/null
+++ b/include/litmus/fdso.h
@@ -0,0 +1,69 @@
1/* fdso.h - file descriptor attached shared objects
2 *
3 * (c) 2007 B. Brandenburg, LITMUS^RT project
4 */
5
6#ifndef _LINUX_FDSO_H_
7#define _LINUX_FDSO_H_
8
9#include <linux/list.h>
10#include <asm/atomic.h>
11
12#include <linux/fs.h>
13
14#define MAX_OBJECT_DESCRIPTORS 32
15
16typedef enum {
17 MIN_OBJ_TYPE = 0,
18
19 FMLP_SEM = 0,
20 SRP_SEM = 1,
21
22 MAX_OBJ_TYPE = 1
23} obj_type_t;
24
25struct inode_obj_id {
26 struct list_head list;
27 atomic_t count;
28 struct inode* inode;
29
30 obj_type_t type;
31 void* obj;
32 unsigned int id;
33};
34
35
36struct od_table_entry {
37 unsigned int used;
38
39 struct inode_obj_id* obj;
40 void* extra;
41};
42
43struct fdso_ops {
44 void* (*create) (void);
45 void (*destroy)(void*);
46 int (*open) (struct od_table_entry*, void* __user);
47 int (*close) (struct od_table_entry*);
48};
49
50/* translate a userspace supplied od into the raw table entry
51 * returns NULL if od is invalid
52 */
53struct od_table_entry* __od_lookup(int od);
54
55/* translate a userspace supplied od into the associated object
56 * returns NULL if od is invalid
57 */
58static inline void* od_lookup(int od, obj_type_t type)
59{
60 struct od_table_entry* e = __od_lookup(od);
61 return e && e->obj->type == type ? e->obj->obj : NULL;
62}
63
64#define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM))
65#define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM))
66#define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID))
67
68
69#endif
diff --git a/include/litmus/feather_buffer.h b/include/litmus/feather_buffer.h
new file mode 100644
index 000000000000..6c18277fdfc9
--- /dev/null
+++ b/include/litmus/feather_buffer.h
@@ -0,0 +1,94 @@
1#ifndef _FEATHER_BUFFER_H_
2#define _FEATHER_BUFFER_H_
3
4/* requires UINT_MAX and memcpy */
5
6#define SLOT_FREE 0
7#define SLOT_BUSY 1
8#define SLOT_READY 2
9
10struct ft_buffer {
11 unsigned int slot_count;
12 unsigned int slot_size;
13
14 int free_count;
15 unsigned int write_idx;
16 unsigned int read_idx;
17
18 char* slots;
19 void* buffer_mem;
20 unsigned int failed_writes;
21};
22
23static inline int init_ft_buffer(struct ft_buffer* buf,
24 unsigned int slot_count,
25 unsigned int slot_size,
26 char* slots,
27 void* buffer_mem)
28{
29 int i = 0;
30 if (!slot_count || UINT_MAX % slot_count != slot_count - 1) {
31 /* The slot count must divide UNIT_MAX + 1 so that when it
32 * wraps around the index correctly points to 0.
33 */
34 return 0;
35 } else {
36 buf->slot_count = slot_count;
37 buf->slot_size = slot_size;
38 buf->slots = slots;
39 buf->buffer_mem = buffer_mem;
40 buf->free_count = slot_count;
41 buf->write_idx = 0;
42 buf->read_idx = 0;
43 buf->failed_writes = 0;
44 for (i = 0; i < slot_count; i++)
45 buf->slots[i] = SLOT_FREE;
46 return 1;
47 }
48}
49
50static inline int ft_buffer_start_write(struct ft_buffer* buf, void **ptr)
51{
52 int free = fetch_and_dec(&buf->free_count);
53 unsigned int idx;
54 if (free <= 0) {
55 fetch_and_inc(&buf->free_count);
56 *ptr = 0;
57 fetch_and_inc(&buf->failed_writes);
58 return 0;
59 } else {
60 idx = fetch_and_inc((int*) &buf->write_idx) % buf->slot_count;
61 buf->slots[idx] = SLOT_BUSY;
62 *ptr = ((char*) buf->buffer_mem) + idx * buf->slot_size;
63 return 1;
64 }
65}
66
67static inline void ft_buffer_finish_write(struct ft_buffer* buf, void *ptr)
68{
69 unsigned int idx = ((char*) ptr - (char*) buf->buffer_mem) / buf->slot_size;
70 buf->slots[idx] = SLOT_READY;
71}
72
73
74/* exclusive reader access is assumed */
75static inline int ft_buffer_read(struct ft_buffer* buf, void* dest)
76{
77 unsigned int idx;
78 if (buf->free_count == buf->slot_count)
79 /* nothing available */
80 return 0;
81 idx = buf->read_idx % buf->slot_count;
82 if (buf->slots[idx] == SLOT_READY) {
83 memcpy(dest, ((char*) buf->buffer_mem) + idx * buf->slot_size,
84 buf->slot_size);
85 buf->slots[idx] = SLOT_FREE;
86 buf->read_idx++;
87 fetch_and_inc(&buf->free_count);
88 return 1;
89 } else
90 return 0;
91}
92
93
94#endif
diff --git a/include/litmus/feather_trace.h b/include/litmus/feather_trace.h
new file mode 100644
index 000000000000..7d27e763406f
--- /dev/null
+++ b/include/litmus/feather_trace.h
@@ -0,0 +1,49 @@
1#ifndef _FEATHER_TRACE_H_
2#define _FEATHER_TRACE_H_
3
4#include <asm/atomic.h>
5#include <asm/feather_trace.h>
6
7int ft_enable_event(unsigned long id);
8int ft_disable_event(unsigned long id);
9int ft_is_event_enabled(unsigned long id);
10int ft_disable_all_events(void);
11
12/* atomic_* funcitons are inline anyway */
13static inline int fetch_and_inc(int *val)
14{
15 return atomic_add_return(1, (atomic_t*) val) - 1;
16}
17
18static inline int fetch_and_dec(int *val)
19{
20 return atomic_sub_return(1, (atomic_t*) val) + 1;
21}
22
23#ifndef __ARCH_HAS_FEATHER_TRACE
24/* provide default implementation */
25
26#define feather_callback
27
28#define MAX_EVENTS 1024
29
30extern int ft_events[MAX_EVENTS];
31
32#define ft_event(id, callback) \
33 if (ft_events[id]) callback();
34
35#define ft_event0(id, callback) \
36 if (ft_events[id]) callback(id);
37
38#define ft_event1(id, callback, param) \
39 if (ft_events[id]) callback(id, param);
40
41#define ft_event2(id, callback, param, param2) \
42 if (ft_events[id]) callback(id, param, param2);
43
44#define ft_event3(id, callback, p, p2, p3) \
45 if (ft_events[id]) callback(id, p, p2, p3);
46
47#endif
48
49#endif
diff --git a/include/litmus/ftdev.h b/include/litmus/ftdev.h
new file mode 100644
index 000000000000..7697b4616699
--- /dev/null
+++ b/include/litmus/ftdev.h
@@ -0,0 +1,49 @@
1#ifndef _LITMUS_FTDEV_H_
2#define _LITMUS_FTDEV_H_
3
4#include <litmus/feather_trace.h>
5#include <litmus/feather_buffer.h>
6#include <linux/mutex.h>
7#include <linux/cdev.h>
8
9#define MAX_FTDEV_MINORS NR_CPUS
10
11#define FTDEV_ENABLE_CMD 0
12#define FTDEV_DISABLE_CMD 1
13
14struct ftdev;
15
16/* return 0 if buffer can be opened, otherwise -$REASON */
17typedef int (*ftdev_can_open_t)(struct ftdev* dev, unsigned int buf_no);
18/* return 0 on success, otherwise -$REASON */
19typedef int (*ftdev_alloc_t)(struct ftdev* dev, unsigned int buf_no);
20typedef void (*ftdev_free_t)(struct ftdev* dev, unsigned int buf_no);
21
22
23struct ftdev_event;
24
25struct ftdev_minor {
26 struct ft_buffer* buf;
27 unsigned int readers;
28 struct mutex lock;
29 /* FIXME: filter for authorized events */
30 struct ftdev_event* events;
31};
32
33struct ftdev {
34 struct cdev cdev;
35 /* FIXME: don't waste memory, allocate dynamically */
36 struct ftdev_minor minor[MAX_FTDEV_MINORS];
37 unsigned int minor_cnt;
38 ftdev_alloc_t alloc;
39 ftdev_free_t free;
40 ftdev_can_open_t can_open;
41};
42
43struct ft_buffer* alloc_ft_buffer(unsigned int count, size_t size);
44void free_ft_buffer(struct ft_buffer* buf);
45
46void ftdev_init(struct ftdev* ftdev, struct module* owner);
47int register_ftdev(struct ftdev* ftdev, const char* name, int major);
48
49#endif
diff --git a/include/litmus/jobs.h b/include/litmus/jobs.h
new file mode 100644
index 000000000000..9bd361ef3943
--- /dev/null
+++ b/include/litmus/jobs.h
@@ -0,0 +1,9 @@
1#ifndef __LITMUS_JOBS_H__
2#define __LITMUS_JOBS_H__
3
4void prepare_for_next_period(struct task_struct *t);
5void release_at(struct task_struct *t, lt_t start);
6long complete_job(void);
7
8#endif
9
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
new file mode 100644
index 000000000000..62107e659c12
--- /dev/null
+++ b/include/litmus/litmus.h
@@ -0,0 +1,252 @@
1/*
2 * Constant definitions related to
3 * scheduling policy.
4 */
5
6#ifndef _LINUX_LITMUS_H_
7#define _LINUX_LITMUS_H_
8
9#include <linux/jiffies.h>
10#include <litmus/sched_trace.h>
11
12extern atomic_t release_master_cpu;
13
14extern atomic_t __log_seq_no;
15
16#define TRACE(fmt, args...) \
17 sched_trace_log_message("%d P%d: " fmt, atomic_add_return(1, &__log_seq_no), \
18 raw_smp_processor_id(), ## args)
19
20#define TRACE_TASK(t, fmt, args...) \
21 TRACE("(%s/%d) " fmt, (t)->comm, (t)->pid, ##args)
22
23#define TRACE_CUR(fmt, args...) \
24 TRACE_TASK(current, fmt, ## args)
25
26#define TRACE_BUG_ON(cond) \
27 do { if (cond) TRACE("BUG_ON(%s) at %s:%d " \
28 "called from %p current=%s/%d state=%d " \
29 "flags=%x partition=%d cpu=%d rtflags=%d"\
30 " job=%u timeslice=%u\n", \
31 #cond, __FILE__, __LINE__, __builtin_return_address(0), current->comm, \
32 current->pid, current->state, current->flags, \
33 get_partition(current), smp_processor_id(), get_rt_flags(current), \
34 current->rt_param.job_params.job_no, \
35 current->rt.time_slice\
36 ); } while(0);
37
38
39/* in_list - is a given list_head queued on some list?
40 */
41static inline int in_list(struct list_head* list)
42{
43 return !( /* case 1: deleted */
44 (list->next == LIST_POISON1 &&
45 list->prev == LIST_POISON2)
46 ||
47 /* case 2: initialized */
48 (list->next == list &&
49 list->prev == list)
50 );
51}
52
53#define NO_CPU 0xffffffff
54
55void litmus_fork(struct task_struct *tsk);
56void litmus_exec(void);
57/* clean up real-time state of a task */
58void exit_litmus(struct task_struct *dead_tsk);
59
60long litmus_admit_task(struct task_struct *tsk);
61void litmus_exit_task(struct task_struct *tsk);
62
63#define is_realtime(t) ((t)->policy == SCHED_LITMUS)
64#define rt_transition_pending(t) \
65 ((t)->rt_param.transition_pending)
66
67#define tsk_rt(t) (&(t)->rt_param)
68
69/* Realtime utility macros */
70#define get_rt_flags(t) (tsk_rt(t)->flags)
71#define set_rt_flags(t,f) (tsk_rt(t)->flags=(f))
72#define get_exec_cost(t) (tsk_rt(t)->task_params.exec_cost)
73#define get_exec_time(t) (tsk_rt(t)->job_params.exec_time)
74#define get_rt_period(t) (tsk_rt(t)->task_params.period)
75#define get_rt_phase(t) (tsk_rt(t)->task_params.phase)
76#define get_partition(t) (tsk_rt(t)->task_params.cpu)
77#define get_deadline(t) (tsk_rt(t)->job_params.deadline)
78#define get_release(t) (tsk_rt(t)->job_params.release)
79#define get_class(t) (tsk_rt(t)->task_params.cls)
80
81inline static int budget_exhausted(struct task_struct* t)
82{
83 return get_exec_time(t) >= get_exec_cost(t);
84}
85
86
87#define is_hrt(t) \
88 (tsk_rt(t)->task_params.class == RT_CLASS_HARD)
89#define is_srt(t) \
90 (tsk_rt(t)->task_params.class == RT_CLASS_SOFT)
91#define is_be(t) \
92 (tsk_rt(t)->task_params.class == RT_CLASS_BEST_EFFORT)
93
94/* Our notion of time within LITMUS: kernel monotonic time. */
95static inline lt_t litmus_clock(void)
96{
97 return ktime_to_ns(ktime_get());
98}
99
100/* A macro to convert from nanoseconds to ktime_t. */
101#define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t)
102
103#define get_domain(t) (tsk_rt(t)->domain)
104
105/* Honor the flag in the preempt_count variable that is set
106 * when scheduling is in progress.
107 */
108#define is_running(t) \
109 ((t)->state == TASK_RUNNING || \
110 task_thread_info(t)->preempt_count & PREEMPT_ACTIVE)
111
112#define is_blocked(t) \
113 (!is_running(t))
114#define is_released(t, now) \
115 (lt_before_eq(get_release(t), now))
116#define is_tardy(t, now) \
117 (lt_before_eq(tsk_rt(t)->job_params.deadline, now))
118
119/* real-time comparison macros */
120#define earlier_deadline(a, b) (lt_before(\
121 (a)->rt_param.job_params.deadline,\
122 (b)->rt_param.job_params.deadline))
123#define earlier_release(a, b) (lt_before(\
124 (a)->rt_param.job_params.release,\
125 (b)->rt_param.job_params.release))
126
127void preempt_if_preemptable(struct task_struct* t, int on_cpu);
128
129#ifdef CONFIG_SRP
130void srp_ceiling_block(void);
131#else
132#define srp_ceiling_block() /* nothing */
133#endif
134
135#define bheap2task(hn) ((struct task_struct*) hn->value)
136
137#ifdef CONFIG_NP_SECTION
138
139static inline int is_kernel_np(struct task_struct *t)
140{
141 return tsk_rt(t)->kernel_np;
142}
143
144static inline int is_user_np(struct task_struct *t)
145{
146 return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->np_flag : 0;
147}
148
149static inline void request_exit_np(struct task_struct *t)
150{
151 if (is_user_np(t)) {
152 /* Set the flag that tells user space to call
153 * into the kernel at the end of a critical section. */
154 if (likely(tsk_rt(t)->ctrl_page)) {
155 TRACE_TASK(t, "setting delayed_preemption flag\n");
156 tsk_rt(t)->ctrl_page->delayed_preemption = 1;
157 }
158 }
159}
160
161static inline void clear_exit_np(struct task_struct *t)
162{
163 if (likely(tsk_rt(t)->ctrl_page))
164 tsk_rt(t)->ctrl_page->delayed_preemption = 0;
165}
166
167static inline void make_np(struct task_struct *t)
168{
169 tsk_rt(t)->kernel_np++;
170}
171
172/* Caller should check if preemption is necessary when
173 * the function return 0.
174 */
175static inline int take_np(struct task_struct *t)
176{
177 return --tsk_rt(t)->kernel_np;
178}
179
180#else
181
182static inline int is_kernel_np(struct task_struct* t)
183{
184 return 0;
185}
186
187static inline int is_user_np(struct task_struct* t)
188{
189 return 0;
190}
191
192static inline void request_exit_np(struct task_struct *t)
193{
194 /* request_exit_np() shouldn't be called if !CONFIG_NP_SECTION */
195 BUG();
196}
197
198static inline void clear_exit_np(struct task_struct* t)
199{
200}
201
202#endif
203
204static inline int is_np(struct task_struct *t)
205{
206#ifdef CONFIG_SCHED_DEBUG_TRACE
207 int kernel, user;
208 kernel = is_kernel_np(t);
209 user = is_user_np(t);
210 if (kernel || user)
211 TRACE_TASK(t, " is non-preemptive: kernel=%d user=%d\n",
212
213 kernel, user);
214 return kernel || user;
215#else
216 return unlikely(is_kernel_np(t) || is_user_np(t));
217#endif
218}
219
220static inline int is_present(struct task_struct* t)
221{
222 return t && tsk_rt(t)->present;
223}
224
225
226/* make the unit explicit */
227typedef unsigned long quanta_t;
228
229enum round {
230 FLOOR,
231 CEIL
232};
233
234
235/* Tick period is used to convert ns-specified execution
236 * costs and periods into tick-based equivalents.
237 */
238extern ktime_t tick_period;
239
240static inline quanta_t time2quanta(lt_t time, enum round round)
241{
242 s64 quantum_length = ktime_to_ns(tick_period);
243
244 if (do_div(time, quantum_length) && round == CEIL)
245 time++;
246 return (quanta_t) time;
247}
248
249/* By how much is cpu staggered behind CPU 0? */
250u64 cpu_stagger_offset(int cpu);
251
252#endif
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h
new file mode 100644
index 000000000000..b452be1d2256
--- /dev/null
+++ b/include/litmus/rt_domain.h
@@ -0,0 +1,162 @@
1/* CLEANUP: Add comments and make it less messy.
2 *
3 */
4
5#ifndef __UNC_RT_DOMAIN_H__
6#define __UNC_RT_DOMAIN_H__
7
8#include <litmus/bheap.h>
9
10#define RELEASE_QUEUE_SLOTS 127 /* prime */
11
12struct _rt_domain;
13
14typedef int (*check_resched_needed_t)(struct _rt_domain *rt);
15typedef void (*release_jobs_t)(struct _rt_domain *rt, struct bheap* tasks);
16
17struct release_queue {
18 /* each slot maintains a list of release heaps sorted
19 * by release time */
20 struct list_head slot[RELEASE_QUEUE_SLOTS];
21};
22
23typedef struct _rt_domain {
24 /* runnable rt tasks are in here */
25 spinlock_t ready_lock;
26 struct bheap ready_queue;
27
28 /* real-time tasks waiting for release are in here */
29 spinlock_t release_lock;
30 struct release_queue release_queue;
31 int release_master;
32
33 /* for moving tasks to the release queue */
34 spinlock_t tobe_lock;
35 struct list_head tobe_released;
36
37 /* how do we check if we need to kick another CPU? */
38 check_resched_needed_t check_resched;
39
40 /* how do we release jobs? */
41 release_jobs_t release_jobs;
42
43 /* how are tasks ordered in the ready queue? */
44 bheap_prio_t order;
45} rt_domain_t;
46
47struct release_heap {
48 /* list_head for per-time-slot list */
49 struct list_head list;
50 lt_t release_time;
51 /* all tasks to be released at release_time */
52 struct bheap heap;
53 /* used to trigger the release */
54 struct hrtimer timer;
55 /* used to delegate releases */
56 struct hrtimer_start_on_info info;
57 /* required for the timer callback */
58 rt_domain_t* dom;
59};
60
61
62static inline struct task_struct* __next_ready(rt_domain_t* rt)
63{
64 struct bheap_node *hn = bheap_peek(rt->order, &rt->ready_queue);
65 if (hn)
66 return bheap2task(hn);
67 else
68 return NULL;
69}
70
71void rt_domain_init(rt_domain_t *rt, bheap_prio_t order,
72 check_resched_needed_t check,
73 release_jobs_t relase);
74
75void __add_ready(rt_domain_t* rt, struct task_struct *new);
76void __merge_ready(rt_domain_t* rt, struct bheap *tasks);
77void __add_release(rt_domain_t* rt, struct task_struct *task);
78
79static inline struct task_struct* __take_ready(rt_domain_t* rt)
80{
81 struct bheap_node* hn = bheap_take(rt->order, &rt->ready_queue);
82 if (hn)
83 return bheap2task(hn);
84 else
85 return NULL;
86}
87
88static inline struct task_struct* __peek_ready(rt_domain_t* rt)
89{
90 struct bheap_node* hn = bheap_peek(rt->order, &rt->ready_queue);
91 if (hn)
92 return bheap2task(hn);
93 else
94 return NULL;
95}
96
97static inline int is_queued(struct task_struct *t)
98{
99 BUG_ON(!tsk_rt(t)->heap_node);
100 return bheap_node_in_heap(tsk_rt(t)->heap_node);
101}
102
103static inline void remove(rt_domain_t* rt, struct task_struct *t)
104{
105 bheap_delete(rt->order, &rt->ready_queue, tsk_rt(t)->heap_node);
106}
107
108static inline void add_ready(rt_domain_t* rt, struct task_struct *new)
109{
110 unsigned long flags;
111 /* first we need the write lock for rt_ready_queue */
112 spin_lock_irqsave(&rt->ready_lock, flags);
113 __add_ready(rt, new);
114 spin_unlock_irqrestore(&rt->ready_lock, flags);
115}
116
117static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks)
118{
119 unsigned long flags;
120 spin_lock_irqsave(&rt->ready_lock, flags);
121 __merge_ready(rt, tasks);
122 spin_unlock_irqrestore(&rt->ready_lock, flags);
123}
124
125static inline struct task_struct* take_ready(rt_domain_t* rt)
126{
127 unsigned long flags;
128 struct task_struct* ret;
129 /* first we need the write lock for rt_ready_queue */
130 spin_lock_irqsave(&rt->ready_lock, flags);
131 ret = __take_ready(rt);
132 spin_unlock_irqrestore(&rt->ready_lock, flags);
133 return ret;
134}
135
136
137static inline void add_release(rt_domain_t* rt, struct task_struct *task)
138{
139 unsigned long flags;
140 /* first we need the write lock for rt_ready_queue */
141 spin_lock_irqsave(&rt->tobe_lock, flags);
142 __add_release(rt, task);
143 spin_unlock_irqrestore(&rt->tobe_lock, flags);
144}
145
146static inline int __jobs_pending(rt_domain_t* rt)
147{
148 return !bheap_empty(&rt->ready_queue);
149}
150
151static inline int jobs_pending(rt_domain_t* rt)
152{
153 unsigned long flags;
154 int ret;
155 /* first we need the write lock for rt_ready_queue */
156 spin_lock_irqsave(&rt->ready_lock, flags);
157 ret = !bheap_empty(&rt->ready_queue);
158 spin_unlock_irqrestore(&rt->ready_lock, flags);
159 return ret;
160}
161
162#endif
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
new file mode 100644
index 000000000000..5b94d1a8eea7
--- /dev/null
+++ b/include/litmus/rt_param.h
@@ -0,0 +1,189 @@
1/*
2 * Definition of the scheduler plugin interface.
3 *
4 */
5#ifndef _LINUX_RT_PARAM_H_
6#define _LINUX_RT_PARAM_H_
7
8/* Litmus time type. */
9typedef unsigned long long lt_t;
10
11static inline int lt_after(lt_t a, lt_t b)
12{
13 return ((long long) b) - ((long long) a) < 0;
14}
15#define lt_before(a, b) lt_after(b, a)
16
17static inline int lt_after_eq(lt_t a, lt_t b)
18{
19 return ((long long) a) - ((long long) b) >= 0;
20}
21#define lt_before_eq(a, b) lt_after_eq(b, a)
22
23/* different types of clients */
24typedef enum {
25 RT_CLASS_HARD,
26 RT_CLASS_SOFT,
27 RT_CLASS_BEST_EFFORT
28} task_class_t;
29
30struct rt_task {
31 lt_t exec_cost;
32 lt_t period;
33 lt_t phase;
34 unsigned int cpu;
35 task_class_t cls;
36};
37
38/* The definition of the data that is shared between the kernel and real-time
39 * tasks via a shared page (see litmus/ctrldev.c).
40 *
41 * WARNING: User space can write to this, so don't trust
42 * the correctness of the fields!
43 *
44 * This servees two purposes: to enable efficient signaling
45 * of non-preemptive sections (user->kernel) and
46 * delayed preemptions (kernel->user), and to export
47 * some real-time relevant statistics such as preemption and
48 * migration data to user space. We can't use a device to export
49 * statistics because we want to avoid system call overhead when
50 * determining preemption/migration overheads).
51 */
52struct control_page {
53 /* Is the task currently in a non-preemptive section? */
54 int np_flag;
55 /* Should the task call into the kernel when it leaves
56 * its non-preemptive section? */
57 int delayed_preemption;
58
59 /* to be extended */
60};
61
62/* don't export internal data structures to user space (liblitmus) */
63#ifdef __KERNEL__
64
65struct _rt_domain;
66struct bheap_node;
67struct release_heap;
68
69struct rt_job {
70 /* Time instant the the job was or will be released. */
71 lt_t release;
72 /* What is the current deadline? */
73 lt_t deadline;
74
75 /* How much service has this job received so far? */
76 lt_t exec_time;
77
78 /* Which job is this. This is used to let user space
79 * specify which job to wait for, which is important if jobs
80 * overrun. If we just call sys_sleep_next_period() then we
81 * will unintentionally miss jobs after an overrun.
82 *
83 * Increase this sequence number when a job is released.
84 */
85 unsigned int job_no;
86};
87
88struct pfair_param;
89
90/* RT task parameters for scheduling extensions
91 * These parameters are inherited during clone and therefore must
92 * be explicitly set up before the task set is launched.
93 */
94struct rt_param {
95 /* is the task sleeping? */
96 unsigned int flags:8;
97
98 /* do we need to check for srp blocking? */
99 unsigned int srp_non_recurse:1;
100
101 /* is the task present? (true if it can be scheduled) */
102 unsigned int present:1;
103
104 /* user controlled parameters */
105 struct rt_task task_params;
106
107 /* timing parameters */
108 struct rt_job job_params;
109
110 /* task representing the current "inherited" task
111 * priority, assigned by inherit_priority and
112 * return priority in the scheduler plugins.
113 * could point to self if PI does not result in
114 * an increased task priority.
115 */
116 struct task_struct* inh_task;
117
118#ifdef CONFIG_NP_SECTION
119 /* For the FMLP under PSN-EDF, it is required to make the task
120 * non-preemptive from kernel space. In order not to interfere with
121 * user space, this counter indicates the kernel space np setting.
122 * kernel_np > 0 => task is non-preemptive
123 */
124 unsigned int kernel_np;
125#endif
126
127 /* This field can be used by plugins to store where the task
128 * is currently scheduled. It is the responsibility of the
129 * plugin to avoid race conditions.
130 *
131 * This used by GSN-EDF and PFAIR.
132 */
133 volatile int scheduled_on;
134
135 /* Is the stack of the task currently in use? This is updated by
136 * the LITMUS core.
137 *
138 * Be careful to avoid deadlocks!
139 */
140 volatile int stack_in_use;
141
142 /* This field can be used by plugins to store where the task
143 * is currently linked. It is the responsibility of the plugin
144 * to avoid race conditions.
145 *
146 * Used by GSN-EDF.
147 */
148 volatile int linked_on;
149
150 /* PFAIR/PD^2 state. Allocated on demand. */
151 struct pfair_param* pfair;
152
153 /* Fields saved before BE->RT transition.
154 */
155 int old_policy;
156 int old_prio;
157
158 /* ready queue for this task */
159 struct _rt_domain* domain;
160
161 /* heap element for this task
162 *
163 * Warning: Don't statically allocate this node. The heap
164 * implementation swaps these between tasks, thus after
165 * dequeuing from a heap you may end up with a different node
166 * then the one you had when enqueuing the task. For the same
167 * reason, don't obtain and store references to this node
168 * other than this pointer (which is updated by the heap
169 * implementation).
170 */
171 struct bheap_node* heap_node;
172 struct release_heap* rel_heap;
173
174 /* Used by rt_domain to queue task in release list.
175 */
176 struct list_head list;
177
178 /* Pointer to the page shared between userspace and kernel. */
179 struct control_page * ctrl_page;
180};
181
182/* Possible RT flags */
183#define RT_F_RUNNING 0x00000000
184#define RT_F_SLEEP 0x00000001
185#define RT_F_EXIT_SEM 0x00000008
186
187#endif
188
189#endif
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
new file mode 100644
index 000000000000..9c1c9f28ba79
--- /dev/null
+++ b/include/litmus/sched_plugin.h
@@ -0,0 +1,162 @@
1/*
2 * Definition of the scheduler plugin interface.
3 *
4 */
5#ifndef _LINUX_SCHED_PLUGIN_H_
6#define _LINUX_SCHED_PLUGIN_H_
7
8#include <linux/sched.h>
9
10/* struct for semaphore with priority inheritance */
11struct pi_semaphore {
12 atomic_t count;
13 int sleepers;
14 wait_queue_head_t wait;
15 struct {
16 /* highest-prio holder/waiter */
17 struct task_struct *task;
18 struct task_struct* cpu_task[NR_CPUS];
19 } hp;
20 /* current lock holder */
21 struct task_struct *holder;
22};
23
24/************************ setup/tear down ********************/
25
26typedef long (*activate_plugin_t) (void);
27typedef long (*deactivate_plugin_t) (void);
28
29
30
31/********************* scheduler invocation ******************/
32
33/* Plugin-specific realtime tick handler */
34typedef void (*scheduler_tick_t) (struct task_struct *cur);
35/* Novell make sched decision function */
36typedef struct task_struct* (*schedule_t)(struct task_struct * prev);
37/* Clean up after the task switch has occured.
38 * This function is called after every (even non-rt) task switch.
39 */
40typedef void (*finish_switch_t)(struct task_struct *prev);
41
42
43/********************* task state changes ********************/
44
45/* Called to setup a new real-time task.
46 * Release the first job, enqueue, etc.
47 * Task may already be running.
48 */
49typedef void (*task_new_t) (struct task_struct *task,
50 int on_rq,
51 int running);
52
53/* Called to re-introduce a task after blocking.
54 * Can potentially be called multiple times.
55 */
56typedef void (*task_wake_up_t) (struct task_struct *task);
57/* called to notify the plugin of a blocking real-time task
58 * it will only be called for real-time tasks and before schedule is called */
59typedef void (*task_block_t) (struct task_struct *task);
60/* Called when a real-time task exits or changes to a different scheduling
61 * class.
62 * Free any allocated resources
63 */
64typedef void (*task_exit_t) (struct task_struct *);
65
66/* Called when the new_owner is released from the wait queue
67 * it should now inherit the priority from sem, _before_ it gets readded
68 * to any queue
69 */
70typedef long (*inherit_priority_t) (struct pi_semaphore *sem,
71 struct task_struct *new_owner);
72
73/* Called when the current task releases a semahpore where it might have
74 * inherited a piority from
75 */
76typedef long (*return_priority_t) (struct pi_semaphore *sem);
77
78/* Called when a task tries to acquire a semaphore and fails. Check if its
79 * priority is higher than that of the current holder.
80 */
81typedef long (*pi_block_t) (struct pi_semaphore *sem, struct task_struct *t);
82
83
84
85
86/********************* sys call backends ********************/
87/* This function causes the caller to sleep until the next release */
88typedef long (*complete_job_t) (void);
89
90typedef long (*admit_task_t)(struct task_struct* tsk);
91
92typedef void (*release_at_t)(struct task_struct *t, lt_t start);
93
94struct sched_plugin {
95 struct list_head list;
96 /* basic info */
97 char *plugin_name;
98
99 /* setup */
100 activate_plugin_t activate_plugin;
101 deactivate_plugin_t deactivate_plugin;
102
103#ifdef CONFIG_SRP
104 unsigned int srp_active;
105#endif
106
107 /* scheduler invocation */
108 scheduler_tick_t tick;
109 schedule_t schedule;
110 finish_switch_t finish_switch;
111
112 /* syscall backend */
113 complete_job_t complete_job;
114 release_at_t release_at;
115
116 /* task state changes */
117 admit_task_t admit_task;
118
119 task_new_t task_new;
120 task_wake_up_t task_wake_up;
121 task_block_t task_block;
122 task_exit_t task_exit;
123
124#ifdef CONFIG_FMLP
125 /* priority inheritance */
126 unsigned int fmlp_active;
127 inherit_priority_t inherit_priority;
128 return_priority_t return_priority;
129 pi_block_t pi_block;
130#endif
131} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
132
133
134extern struct sched_plugin *litmus;
135
136/* cluster size: cache_index = 2 L2, cache_index = 3 L3 */
137extern int cluster_cache_index;
138
139int register_sched_plugin(struct sched_plugin* plugin);
140struct sched_plugin* find_sched_plugin(const char* name);
141int print_sched_plugins(char* buf, int max);
142
143static inline int srp_active(void)
144{
145#ifdef CONFIG_SRP
146 return litmus->srp_active;
147#else
148 return 0;
149#endif
150}
151static inline int fmlp_active(void)
152{
153#ifdef CONFIG_FMLP
154 return litmus->fmlp_active;
155#else
156 return 0;
157#endif
158}
159
160extern struct sched_plugin linux_sched_plugin;
161
162#endif
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h
new file mode 100644
index 000000000000..e1b0c9712b5f
--- /dev/null
+++ b/include/litmus/sched_trace.h
@@ -0,0 +1,192 @@
1/*
2 * sched_trace.h -- record scheduler events to a byte stream for offline analysis.
3 */
4#ifndef _LINUX_SCHED_TRACE_H_
5#define _LINUX_SCHED_TRACE_H_
6
7/* all times in nanoseconds */
8
9struct st_trace_header {
10 u8 type; /* Of what type is this record? */
11 u8 cpu; /* On which CPU was it recorded? */
12 u16 pid; /* PID of the task. */
13 u32 job; /* The job sequence number. */
14};
15
16#define ST_NAME_LEN 16
17struct st_name_data {
18 char cmd[ST_NAME_LEN];/* The name of the executable of this process. */
19};
20
21struct st_param_data { /* regular params */
22 u32 wcet;
23 u32 period;
24 u32 phase;
25 u8 partition;
26 u8 __unused[3];
27};
28
29struct st_release_data { /* A job is was/is going to be released. */
30 u64 release; /* What's the release time? */
31 u64 deadline; /* By when must it finish? */
32};
33
34struct st_assigned_data { /* A job was asigned to a CPU. */
35 u64 when;
36 u8 target; /* Where should it execute? */
37 u8 __unused[3];
38};
39
40struct st_switch_to_data { /* A process was switched to on a given CPU. */
41 u64 when; /* When did this occur? */
42 u32 exec_time; /* Time the current job has executed. */
43
44};
45
46struct st_switch_away_data { /* A process was switched away from on a given CPU. */
47 u64 when;
48 u64 exec_time;
49};
50
51struct st_completion_data { /* A job completed. */
52 u64 when;
53 u8 forced:1; /* Set to 1 if job overran and kernel advanced to the
54 * next task automatically; set to 0 otherwise.
55 */
56 u8 __uflags:7;
57 u8 __unused[3];
58};
59
60struct st_block_data { /* A task blocks. */
61 u64 when;
62 u64 __unused;
63};
64
65struct st_resume_data { /* A task resumes. */
66 u64 when;
67 u64 __unused;
68};
69
70struct st_sys_release_data {
71 u64 when;
72 u64 release;
73};
74
75#define DATA(x) struct st_ ## x ## _data x;
76
77typedef enum {
78 ST_NAME = 1, /* Start at one, so that we can spot
79 * uninitialized records. */
80 ST_PARAM,
81 ST_RELEASE,
82 ST_ASSIGNED,
83 ST_SWITCH_TO,
84 ST_SWITCH_AWAY,
85 ST_COMPLETION,
86 ST_BLOCK,
87 ST_RESUME,
88 ST_SYS_RELEASE,
89} st_event_record_type_t;
90
91struct st_event_record {
92 struct st_trace_header hdr;
93 union {
94 u64 raw[2];
95
96 DATA(name);
97 DATA(param);
98 DATA(release);
99 DATA(assigned);
100 DATA(switch_to);
101 DATA(switch_away);
102 DATA(completion);
103 DATA(block);
104 DATA(resume);
105 DATA(sys_release);
106
107 } data;
108};
109
110#undef DATA
111
112#ifdef __KERNEL__
113
114#include <linux/sched.h>
115#include <litmus/feather_trace.h>
116
117#ifdef CONFIG_SCHED_TASK_TRACE
118
119#define SCHED_TRACE(id, callback, task) \
120 ft_event1(id, callback, task)
121#define SCHED_TRACE2(id, callback, task, xtra) \
122 ft_event2(id, callback, task, xtra)
123
124/* provide prototypes; needed on sparc64 */
125#ifndef NO_TASK_TRACE_DECLS
126feather_callback void do_sched_trace_task_name(unsigned long id,
127 struct task_struct* task);
128feather_callback void do_sched_trace_task_param(unsigned long id,
129 struct task_struct* task);
130feather_callback void do_sched_trace_task_release(unsigned long id,
131 struct task_struct* task);
132feather_callback void do_sched_trace_task_switch_to(unsigned long id,
133 struct task_struct* task);
134feather_callback void do_sched_trace_task_switch_away(unsigned long id,
135 struct task_struct* task);
136feather_callback void do_sched_trace_task_completion(unsigned long id,
137 struct task_struct* task,
138 unsigned long forced);
139feather_callback void do_sched_trace_task_block(unsigned long id,
140 struct task_struct* task);
141feather_callback void do_sched_trace_task_resume(unsigned long id,
142 struct task_struct* task);
143feather_callback void do_sched_trace_sys_release(unsigned long id,
144 lt_t* start);
145#endif
146
147#else
148
149#define SCHED_TRACE(id, callback, task) /* no tracing */
150#define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */
151
152#endif
153
154
155#define SCHED_TRACE_BASE_ID 500
156
157
158#define sched_trace_task_name(t) \
159 SCHED_TRACE(SCHED_TRACE_BASE_ID + 1, do_sched_trace_task_name, t)
160#define sched_trace_task_param(t) \
161 SCHED_TRACE(SCHED_TRACE_BASE_ID + 2, do_sched_trace_task_param, t)
162#define sched_trace_task_release(t) \
163 SCHED_TRACE(SCHED_TRACE_BASE_ID + 3, do_sched_trace_task_release, t)
164#define sched_trace_task_switch_to(t) \
165 SCHED_TRACE(SCHED_TRACE_BASE_ID + 4, do_sched_trace_task_switch_to, t)
166#define sched_trace_task_switch_away(t) \
167 SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, do_sched_trace_task_switch_away, t)
168#define sched_trace_task_completion(t, forced) \
169 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, do_sched_trace_task_completion, t, \
170 (unsigned long) forced)
171#define sched_trace_task_block(t) \
172 SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, do_sched_trace_task_block, t)
173#define sched_trace_task_resume(t) \
174 SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, do_sched_trace_task_resume, t)
175/* when is a pointer, it does not need an explicit cast to unsigned long */
176#define sched_trace_sys_release(when) \
177 SCHED_TRACE(SCHED_TRACE_BASE_ID + 9, do_sched_trace_sys_release, when)
178
179#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */
180
181#ifdef CONFIG_SCHED_DEBUG_TRACE
182void sched_trace_log_message(const char* fmt, ...);
183void dump_trace_buffer(int max);
184#else
185
186#define sched_trace_log_message(fmt, ...)
187
188#endif
189
190#endif /* __KERNEL__ */
191
192#endif
diff --git a/include/litmus/trace.h b/include/litmus/trace.h
new file mode 100644
index 000000000000..b32c71180774
--- /dev/null
+++ b/include/litmus/trace.h
@@ -0,0 +1,113 @@
1#ifndef _SYS_TRACE_H_
2#define _SYS_TRACE_H_
3
4#ifdef CONFIG_SCHED_OVERHEAD_TRACE
5
6#include <litmus/feather_trace.h>
7#include <litmus/feather_buffer.h>
8
9
10/*********************** TIMESTAMPS ************************/
11
12enum task_type_marker {
13 TSK_BE,
14 TSK_RT,
15 TSK_UNKNOWN
16};
17
18struct timestamp {
19 uint64_t timestamp;
20 uint32_t seq_no;
21 uint8_t cpu;
22 uint8_t event;
23 uint8_t task_type;
24};
25
26/* tracing callbacks */
27feather_callback void save_timestamp(unsigned long event);
28feather_callback void save_timestamp_def(unsigned long event, unsigned long type);
29feather_callback void save_timestamp_task(unsigned long event, unsigned long t_ptr);
30feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu);
31
32
33#define TIMESTAMP(id) ft_event0(id, save_timestamp)
34
35#define DTIMESTAMP(id, def) ft_event1(id, save_timestamp_def, (unsigned long) def)
36
37#define TTIMESTAMP(id, task) \
38 ft_event1(id, save_timestamp_task, (unsigned long) task)
39
40#define CTIMESTAMP(id, cpu) \
41 ft_event1(id, save_timestamp_cpu, (unsigned long) cpu)
42
43#else /* !CONFIG_SCHED_OVERHEAD_TRACE */
44
45#define TIMESTAMP(id) /* no tracing */
46
47#define DTIMESTAMP(id, def) /* no tracing */
48
49#define TTIMESTAMP(id, task) /* no tracing */
50
51#define CTIMESTAMP(id, cpu) /* no tracing */
52
53#endif
54
55
56/* Convention for timestamps
57 * =========================
58 *
59 * In order to process the trace files with a common tool, we use the following
60 * convention to measure execution times: The end time id of a code segment is
61 * always the next number after the start time event id.
62 */
63
64#define TS_SCHED_START DTIMESTAMP(100, TSK_UNKNOWN) /* we only
65 * care
66 * about
67 * next */
68#define TS_SCHED_END(t) TTIMESTAMP(101, t)
69#define TS_SCHED2_START(t) TTIMESTAMP(102, t)
70#define TS_SCHED2_END(t) TTIMESTAMP(103, t)
71
72#define TS_CXS_START(t) TTIMESTAMP(104, t)
73#define TS_CXS_END(t) TTIMESTAMP(105, t)
74
75#define TS_RELEASE_START DTIMESTAMP(106, TSK_RT)
76#define TS_RELEASE_END DTIMESTAMP(107, TSK_RT)
77
78#define TS_TICK_START(t) TTIMESTAMP(110, t)
79#define TS_TICK_END(t) TTIMESTAMP(111, t)
80
81
82#define TS_PLUGIN_SCHED_START /* TIMESTAMP(120) */ /* currently unused */
83#define TS_PLUGIN_SCHED_END /* TIMESTAMP(121) */
84
85#define TS_PLUGIN_TICK_START /* TIMESTAMP(130) */
86#define TS_PLUGIN_TICK_END /* TIMESTAMP(131) */
87
88#define TS_ENTER_NP_START TIMESTAMP(140)
89#define TS_ENTER_NP_END TIMESTAMP(141)
90
91#define TS_EXIT_NP_START TIMESTAMP(150)
92#define TS_EXIT_NP_END TIMESTAMP(151)
93
94#define TS_SRP_UP_START TIMESTAMP(160)
95#define TS_SRP_UP_END TIMESTAMP(161)
96#define TS_SRP_DOWN_START TIMESTAMP(162)
97#define TS_SRP_DOWN_END TIMESTAMP(163)
98
99#define TS_PI_UP_START TIMESTAMP(170)
100#define TS_PI_UP_END TIMESTAMP(171)
101#define TS_PI_DOWN_START TIMESTAMP(172)
102#define TS_PI_DOWN_END TIMESTAMP(173)
103
104#define TS_FIFO_UP_START TIMESTAMP(180)
105#define TS_FIFO_UP_END TIMESTAMP(181)
106#define TS_FIFO_DOWN_START TIMESTAMP(182)
107#define TS_FIFO_DOWN_END TIMESTAMP(183)
108
109#define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c)
110#define TS_SEND_RESCHED_END DTIMESTAMP(191, TSK_UNKNOWN)
111
112
113#endif /* !_SYS_TRACE_H_ */
diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h
new file mode 100644
index 000000000000..dbddc6523f8e
--- /dev/null
+++ b/include/litmus/unistd_32.h
@@ -0,0 +1,23 @@
1/*
2 * included from arch/x86/include/asm/unistd_32.h
3 *
4 * LITMUS^RT syscalls with "relative" numbers
5 */
6#define __LSC(x) (__NR_LITMUS + x)
7
8#define __NR_set_rt_task_param __LSC(0)
9#define __NR_get_rt_task_param __LSC(1)
10#define __NR_complete_job __LSC(2)
11#define __NR_od_open __LSC(3)
12#define __NR_od_close __LSC(4)
13#define __NR_fmlp_down __LSC(5)
14#define __NR_fmlp_up __LSC(6)
15#define __NR_srp_down __LSC(7)
16#define __NR_srp_up __LSC(8)
17#define __NR_query_job_no __LSC(9)
18#define __NR_wait_for_job_release __LSC(10)
19#define __NR_wait_for_ts_release __LSC(11)
20#define __NR_release_ts __LSC(12)
21#define __NR_null_call __LSC(13)
22
23#define NR_litmus_syscalls 14
diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h
new file mode 100644
index 000000000000..f0618e75348d
--- /dev/null
+++ b/include/litmus/unistd_64.h
@@ -0,0 +1,37 @@
1/*
2 * included from arch/x86/include/asm/unistd_64.h
3 *
4 * LITMUS^RT syscalls with "relative" numbers
5 */
6#define __LSC(x) (__NR_LITMUS + x)
7
8#define __NR_set_rt_task_param __LSC(0)
9__SYSCALL(__NR_set_rt_task_param, sys_set_rt_task_param)
10#define __NR_get_rt_task_param __LSC(1)
11__SYSCALL(__NR_get_rt_task_param, sys_get_rt_task_param)
12#define __NR_complete_job __LSC(2)
13__SYSCALL(__NR_complete_job, sys_complete_job)
14#define __NR_od_open __LSC(3)
15__SYSCALL(__NR_od_open, sys_od_open)
16#define __NR_od_close __LSC(4)
17__SYSCALL(__NR_od_close, sys_od_close)
18#define __NR_fmlp_down __LSC(5)
19__SYSCALL(__NR_fmlp_down, sys_fmlp_down)
20#define __NR_fmlp_up __LSC(6)
21__SYSCALL(__NR_fmlp_up, sys_fmlp_up)
22#define __NR_srp_down __LSC(7)
23__SYSCALL(__NR_srp_down, sys_srp_down)
24#define __NR_srp_up __LSC(8)
25__SYSCALL(__NR_srp_up, sys_srp_up)
26#define __NR_query_job_no __LSC(9)
27__SYSCALL(__NR_query_job_no, sys_query_job_no)
28#define __NR_wait_for_job_release __LSC(10)
29__SYSCALL(__NR_wait_for_job_release, sys_wait_for_job_release)
30#define __NR_wait_for_ts_release __LSC(11)
31__SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release)
32#define __NR_release_ts __LSC(12)
33__SYSCALL(__NR_release_ts, sys_release_ts)
34#define __NR_null_call __LSC(13)
35__SYSCALL(__NR_null_call, sys_null_call)
36
37#define NR_litmus_syscalls 14