aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus
diff options
context:
space:
mode:
Diffstat (limited to 'include/litmus')
-rw-r--r--include/litmus/affinity.h80
-rw-r--r--include/litmus/bheap.h77
-rw-r--r--include/litmus/binheap.h205
-rw-r--r--include/litmus/budget.h36
-rw-r--r--include/litmus/clustered.h44
-rw-r--r--include/litmus/edf_common.h25
-rw-r--r--include/litmus/fdso.h78
-rw-r--r--include/litmus/fp_common.h105
-rw-r--r--include/litmus/fpmath.h147
-rw-r--r--include/litmus/jobs.h10
-rw-r--r--include/litmus/litmus.h268
-rw-r--r--include/litmus/litmus_proc.h63
-rw-r--r--include/litmus/locking.h28
-rw-r--r--include/litmus/preempt.h164
-rw-r--r--include/litmus/rt_domain.h182
-rw-r--r--include/litmus/rt_param.h12
-rw-r--r--include/litmus/sched_plugin.h128
-rw-r--r--include/litmus/srp.h28
-rw-r--r--include/litmus/unistd_32.h21
-rw-r--r--include/litmus/unistd_64.h33
-rw-r--r--include/litmus/wait.h57
21 files changed, 1785 insertions, 6 deletions
diff --git a/include/litmus/affinity.h b/include/litmus/affinity.h
new file mode 100644
index 000000000000..ca2e442eb547
--- /dev/null
+++ b/include/litmus/affinity.h
@@ -0,0 +1,80 @@
1#ifndef __LITMUS_AFFINITY_H
2#define __LITMUS_AFFINITY_H
3
4#include <linux/cpumask.h>
5
6/*
7 L1 (instr) = depth 0
8 L1 (data) = depth 1
9 L2 = depth 2
10 L3 = depth 3
11 */
12#define NUM_CACHE_LEVELS 4
13
14struct neighborhood
15{
16 unsigned int size[NUM_CACHE_LEVELS];
17 cpumask_var_t neighbors[NUM_CACHE_LEVELS];
18};
19
20/* topology info is stored redundently in a big array for fast lookups */
21extern struct neighborhood neigh_info[NR_CPUS];
22
23void init_topology(void); /* called by Litmus module's _init_litmus() */
24
25/* Works like:
26void get_nearest_available_cpu(
27 cpu_entry_t **nearest,
28 cpu_entry_t *start,
29 cpu_entry_t *entries,
30 int release_master)
31
32Set release_master = NO_CPU for no Release Master.
33
34We use a macro here to exploit the fact that C-EDF and G-EDF
35have similar structures for their cpu_entry_t structs, even though
36they do not share a common base-struct. The macro allows us to
37avoid code duplication.
38
39TODO: Factor out the job-to-processor linking from C/G-EDF into
40a reusable "processor mapping". (See B.B.'s RTSS'09 paper &
41dissertation.)
42 */
43#define get_nearest_available_cpu(nearest, start, entries, release_master) \
44{ \
45 (nearest) = NULL; \
46 if (!(start)->linked) { \
47 (nearest) = (start); \
48 } else { \
49 int __level; \
50 int __cpu; \
51 int __release_master = ((release_master) == NO_CPU) ? -1 : (release_master); \
52 struct neighborhood *__neighbors = &neigh_info[(start)->cpu]; \
53 \
54 for (__level = 0; (__level < NUM_CACHE_LEVELS) && !(nearest); ++__level) { \
55 if (__neighbors->size[__level] > 1) { \
56 for_each_cpu(__cpu, __neighbors->neighbors[__level]) { \
57 if (__cpu != __release_master) { \
58 cpu_entry_t *__entry = &per_cpu((entries), __cpu); \
59 if (!__entry->linked) { \
60 (nearest) = __entry; \
61 break; \
62 } \
63 } \
64 } \
65 } else if (__neighbors->size[__level] == 0) { \
66 break; \
67 } \
68 } \
69 } \
70 \
71 if ((nearest)) { \
72 TRACE("P%d is closest available CPU to P%d\n", \
73 (nearest)->cpu, (start)->cpu); \
74 } else { \
75 TRACE("Could not find an available CPU close to P%d\n", \
76 (start)->cpu); \
77 } \
78}
79
80#endif
diff --git a/include/litmus/bheap.h b/include/litmus/bheap.h
new file mode 100644
index 000000000000..cf4864a498d8
--- /dev/null
+++ b/include/litmus/bheap.h
@@ -0,0 +1,77 @@
1/* bheaps.h -- Binomial Heaps
2 *
3 * (c) 2008, 2009 Bjoern Brandenburg
4 */
5
6#ifndef BHEAP_H
7#define BHEAP_H
8
9#define NOT_IN_HEAP UINT_MAX
10
11struct bheap_node {
12 struct bheap_node* parent;
13 struct bheap_node* next;
14 struct bheap_node* child;
15
16 unsigned int degree;
17 void* value;
18 struct bheap_node** ref;
19};
20
21struct bheap {
22 struct bheap_node* head;
23 /* We cache the minimum of the heap.
24 * This speeds up repeated peek operations.
25 */
26 struct bheap_node* min;
27};
28
29typedef int (*bheap_prio_t)(struct bheap_node* a, struct bheap_node* b);
30
31void bheap_init(struct bheap* heap);
32void bheap_node_init(struct bheap_node** ref_to_bheap_node_ptr, void* value);
33
34static inline int bheap_node_in_heap(struct bheap_node* h)
35{
36 return h->degree != NOT_IN_HEAP;
37}
38
39static inline int bheap_empty(struct bheap* heap)
40{
41 return heap->head == NULL && heap->min == NULL;
42}
43
44/* insert (and reinitialize) a node into the heap */
45void bheap_insert(bheap_prio_t higher_prio,
46 struct bheap* heap,
47 struct bheap_node* node);
48
49/* merge addition into target */
50void bheap_union(bheap_prio_t higher_prio,
51 struct bheap* target,
52 struct bheap* addition);
53
54struct bheap_node* bheap_peek(bheap_prio_t higher_prio,
55 struct bheap* heap);
56
57struct bheap_node* bheap_take(bheap_prio_t higher_prio,
58 struct bheap* heap);
59
60void bheap_uncache_min(bheap_prio_t higher_prio, struct bheap* heap);
61int bheap_decrease(bheap_prio_t higher_prio, struct bheap_node* node);
62
63void bheap_delete(bheap_prio_t higher_prio,
64 struct bheap* heap,
65 struct bheap_node* node);
66
67/* allocate from memcache */
68struct bheap_node* bheap_node_alloc(int gfp_flags);
69void bheap_node_free(struct bheap_node* hn);
70
71/* allocate a heap node for value and insert into the heap */
72int bheap_add(bheap_prio_t higher_prio, struct bheap* heap,
73 void* value, int gfp_flags);
74
75void* bheap_take_del(bheap_prio_t higher_prio,
76 struct bheap* heap);
77#endif
diff --git a/include/litmus/binheap.h b/include/litmus/binheap.h
new file mode 100644
index 000000000000..1cf364701da8
--- /dev/null
+++ b/include/litmus/binheap.h
@@ -0,0 +1,205 @@
1#ifndef LITMUS_BINARY_HEAP_H
2#define LITMUS_BINARY_HEAP_H
3
4#include <linux/kernel.h>
5
6/**
7 * Simple binary heap with add, arbitrary delete, delete_root, and top
8 * operations.
9 *
10 * Style meant to conform with list.h.
11 *
12 * Motivation: Linux's prio_heap.h is of fixed size. Litmus's binomial
13 * heap may be overkill (and perhaps not general enough) for some applications.
14 *
15 * Note: In order to make node swaps fast, a node inserted with a data pointer
16 * may not always hold said data pointer. This is similar to the binomial heap
17 * implementation. This does make node deletion tricky since we have to
18 * (1) locate the node that holds the data pointer to delete, and (2) the
19 * node that was originally inserted with said data pointer. These have to be
20 * coalesced into a single node before removal (see usage of
21 * __binheap_safe_swap()). We have to track node references to accomplish this.
22 */
23
24struct binheap_node {
25 void *data;
26 struct binheap_node *parent;
27 struct binheap_node *left;
28 struct binheap_node *right;
29
30 /* pointer to binheap_node that holds *data for which this binheap_node
31 * was originally inserted. (*data "owns" this node)
32 */
33 struct binheap_node *ref;
34 struct binheap_node **ref_ptr;
35};
36
37/**
38 * Signature of compator function. Assumed 'less-than' (min-heap).
39 * Pass in 'greater-than' for max-heap.
40 *
41 * TODO: Consider macro-based implementation that allows comparator to be
42 * inlined (similar to Linux red/black tree) for greater efficiency.
43 */
44typedef int (*binheap_order_t)(struct binheap_node *a,
45 struct binheap_node *b);
46
47
48struct binheap {
49 struct binheap_node *root;
50
51 /* pointer to node to take next inserted child */
52 struct binheap_node *next;
53
54 /* pointer to last node in complete binary tree */
55 struct binheap_node *last;
56
57 /* comparator function pointer */
58 binheap_order_t compare;
59};
60
61
62/* Initialized heap nodes not in a heap have parent
63 * set to BINHEAP_POISON.
64 */
65#define BINHEAP_POISON ((void*)(0xdeadbeef))
66
67
68/**
69 * binheap_entry - get the struct for this heap node.
70 * Only valid when called upon heap nodes other than the root handle.
71 * @ptr: the heap node.
72 * @type: the type of struct pointed to by binheap_node::data.
73 * @member: unused.
74 */
75#define binheap_entry(ptr, type, member) \
76((type *)((ptr)->data))
77
78/**
79 * binheap_node_container - get the struct that contains this node.
80 * Only valid when called upon heap nodes other than the root handle.
81 * @ptr: the heap node.
82 * @type: the type of struct the node is embedded in.
83 * @member: the name of the binheap_struct within the (type) struct.
84 */
85#define binheap_node_container(ptr, type, member) \
86container_of((ptr), type, member)
87
88/**
89 * binheap_top_entry - get the struct for the node at the top of the heap.
90 * Only valid when called upon the heap handle node.
91 * @ptr: the special heap-handle node.
92 * @type: the type of the struct the head is embedded in.
93 * @member: the name of the binheap_struct within the (type) struct.
94 */
95#define binheap_top_entry(ptr, type, member) \
96binheap_entry((ptr)->root, type, member)
97
98/**
99 * binheap_delete_root - remove the root element from the heap.
100 * @handle: handle to the heap.
101 * @type: the type of the struct the head is embedded in.
102 * @member: the name of the binheap_struct within the (type) struct.
103 */
104#define binheap_delete_root(handle, type, member) \
105__binheap_delete_root((handle), &((type *)((handle)->root->data))->member)
106
107/**
108 * binheap_delete - remove an arbitrary element from the heap.
109 * @to_delete: pointer to node to be removed.
110 * @handle: handle to the heap.
111 */
112#define binheap_delete(to_delete, handle) \
113__binheap_delete((to_delete), (handle))
114
115/**
116 * binheap_add - insert an element to the heap
117 * new_node: node to add.
118 * @handle: handle to the heap.
119 * @type: the type of the struct the head is embedded in.
120 * @member: the name of the binheap_struct within the (type) struct.
121 */
122#define binheap_add(new_node, handle, type, member) \
123__binheap_add((new_node), (handle), container_of((new_node), type, member))
124
125/**
126 * binheap_decrease - re-eval the position of a node (based upon its
127 * original data pointer).
128 * @handle: handle to the heap.
129 * @orig_node: node that was associated with the data pointer
130 * (whose value has changed) when said pointer was
131 * added to the heap.
132 */
133#define binheap_decrease(orig_node, handle) \
134__binheap_decrease((orig_node), (handle))
135
136#define BINHEAP_NODE_INIT() { NULL, BINHEAP_POISON, NULL, NULL , NULL, NULL}
137
138#define BINHEAP_NODE(name) \
139 struct binheap_node name = BINHEAP_NODE_INIT()
140
141
142static inline void INIT_BINHEAP_NODE(struct binheap_node *n)
143{
144 n->data = NULL;
145 n->parent = BINHEAP_POISON;
146 n->left = NULL;
147 n->right = NULL;
148 n->ref = NULL;
149 n->ref_ptr = NULL;
150}
151
152static inline void INIT_BINHEAP_HANDLE(struct binheap *handle,
153 binheap_order_t compare)
154{
155 handle->root = NULL;
156 handle->next = NULL;
157 handle->last = NULL;
158 handle->compare = compare;
159}
160
161/* Returns true if binheap is empty. */
162static inline int binheap_empty(struct binheap *handle)
163{
164 return(handle->root == NULL);
165}
166
167/* Returns true if binheap node is in a heap. */
168static inline int binheap_is_in_heap(struct binheap_node *node)
169{
170 return (node->parent != BINHEAP_POISON);
171}
172
173/* Returns true if binheap node is in given heap. */
174int binheap_is_in_this_heap(struct binheap_node *node, struct binheap* heap);
175
176/* Add a node to a heap */
177void __binheap_add(struct binheap_node *new_node,
178 struct binheap *handle,
179 void *data);
180
181/**
182 * Removes the root node from the heap. The node is removed after coalescing
183 * the binheap_node with its original data pointer at the root of the tree.
184 *
185 * The 'last' node in the tree is then swapped up to the root and bubbled
186 * down.
187 */
188void __binheap_delete_root(struct binheap *handle,
189 struct binheap_node *container);
190
191/**
192 * Delete an arbitrary node. Bubble node to delete up to the root,
193 * and then delete to root.
194 */
195void __binheap_delete(struct binheap_node *node_to_delete,
196 struct binheap *handle);
197
198/**
199 * Bubble up a node whose pointer has decreased in value.
200 */
201void __binheap_decrease(struct binheap_node *orig_node,
202 struct binheap *handle);
203
204
205#endif
diff --git a/include/litmus/budget.h b/include/litmus/budget.h
new file mode 100644
index 000000000000..bd2d5c964f92
--- /dev/null
+++ b/include/litmus/budget.h
@@ -0,0 +1,36 @@
1#ifndef _LITMUS_BUDGET_H_
2#define _LITMUS_BUDGET_H_
3
4/* Update the per-processor enforcement timer (arm/reproram/cancel) for
5 * the next task. */
6void update_enforcement_timer(struct task_struct* t);
7
8inline static int budget_exhausted(struct task_struct* t)
9{
10 return get_exec_time(t) >= get_exec_cost(t);
11}
12
13inline static lt_t budget_remaining(struct task_struct* t)
14{
15 if (!budget_exhausted(t))
16 return get_exec_cost(t) - get_exec_time(t);
17 else
18 /* avoid overflow */
19 return 0;
20}
21
22#define budget_enforced(t) (tsk_rt(t)->task_params.budget_policy != NO_ENFORCEMENT)
23
24#define budget_precisely_enforced(t) (tsk_rt(t)->task_params.budget_policy \
25 == PRECISE_ENFORCEMENT)
26
27static inline int requeue_preempted_job(struct task_struct* t)
28{
29 /* Add task to ready queue only if not subject to budget enforcement or
30 * if the job has budget remaining. t may be NULL.
31 */
32 return t && !is_completed(t) &&
33 (!budget_exhausted(t) || !budget_enforced(t));
34}
35
36#endif
diff --git a/include/litmus/clustered.h b/include/litmus/clustered.h
new file mode 100644
index 000000000000..0c18dcb15e6c
--- /dev/null
+++ b/include/litmus/clustered.h
@@ -0,0 +1,44 @@
1#ifndef CLUSTERED_H
2#define CLUSTERED_H
3
4/* Which cache level should be used to group CPUs into clusters?
5 * GLOBAL_CLUSTER means that all CPUs form a single cluster (just like under
6 * global scheduling).
7 */
8enum cache_level {
9 GLOBAL_CLUSTER = 0,
10 L1_CLUSTER = 1,
11 L2_CLUSTER = 2,
12 L3_CLUSTER = 3
13};
14
15int parse_cache_level(const char *str, enum cache_level *level);
16const char* cache_level_name(enum cache_level level);
17
18/* expose a cache level in a /proc dir */
19struct proc_dir_entry* create_cluster_file(struct proc_dir_entry* parent,
20 enum cache_level* level);
21
22
23
24struct scheduling_cluster {
25 unsigned int id;
26 /* list of CPUs that are part of this cluster */
27 struct list_head cpus;
28};
29
30struct cluster_cpu {
31 unsigned int id; /* which CPU is this? */
32 struct list_head cluster_list; /* List of the CPUs in this cluster. */
33 struct scheduling_cluster* cluster; /* The cluster that this CPU belongs to. */
34};
35
36int get_cluster_size(enum cache_level level);
37
38int assign_cpus_to_clusters(enum cache_level level,
39 struct scheduling_cluster* clusters[],
40 unsigned int num_clusters,
41 struct cluster_cpu* cpus[],
42 unsigned int num_cpus);
43
44#endif
diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h
new file mode 100644
index 000000000000..bbaf22ea7f12
--- /dev/null
+++ b/include/litmus/edf_common.h
@@ -0,0 +1,25 @@
1/*
2 * EDF common data structures and utility functions shared by all EDF
3 * based scheduler plugins
4 */
5
6/* CLEANUP: Add comments and make it less messy.
7 *
8 */
9
10#ifndef __UNC_EDF_COMMON_H__
11#define __UNC_EDF_COMMON_H__
12
13#include <litmus/rt_domain.h>
14
15void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
16 release_jobs_t release);
17
18int edf_higher_prio(struct task_struct* first,
19 struct task_struct* second);
20
21int edf_ready_order(struct bheap_node* a, struct bheap_node* b);
22
23int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t);
24
25#endif
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h
new file mode 100644
index 000000000000..fd9b30dbfb34
--- /dev/null
+++ b/include/litmus/fdso.h
@@ -0,0 +1,78 @@
1/* fdso.h - file descriptor attached shared objects
2 *
3 * (c) 2007 B. Brandenburg, LITMUS^RT project
4 */
5
6#ifndef _LINUX_FDSO_H_
7#define _LINUX_FDSO_H_
8
9#include <linux/list.h>
10#include <asm/atomic.h>
11
12#include <linux/fs.h>
13#include <linux/slab.h>
14
15#define MAX_OBJECT_DESCRIPTORS 85
16
17typedef enum {
18 MIN_OBJ_TYPE = 0,
19
20 FMLP_SEM = 0,
21 SRP_SEM = 1,
22
23 MPCP_SEM = 2,
24 MPCP_VS_SEM = 3,
25 DPCP_SEM = 4,
26 PCP_SEM = 5,
27
28 DFLP_SEM = 6,
29
30 MAX_OBJ_TYPE = 6
31} obj_type_t;
32
33struct inode_obj_id {
34 struct list_head list;
35 atomic_t count;
36 struct inode* inode;
37
38 obj_type_t type;
39 void* obj;
40 unsigned int id;
41};
42
43struct fdso_ops;
44
45struct od_table_entry {
46 unsigned int used;
47
48 struct inode_obj_id* obj;
49 const struct fdso_ops* class;
50};
51
52struct fdso_ops {
53 int (*create)(void** obj_ref, obj_type_t type, void* __user);
54 void (*destroy)(obj_type_t type, void*);
55 int (*open) (struct od_table_entry*, void* __user);
56 int (*close) (struct od_table_entry*);
57};
58
59/* translate a userspace supplied od into the raw table entry
60 * returns NULL if od is invalid
61 */
62struct od_table_entry* get_entry_for_od(int od);
63
64/* translate a userspace supplied od into the associated object
65 * returns NULL if od is invalid
66 */
67static inline void* od_lookup(int od, obj_type_t type)
68{
69 struct od_table_entry* e = get_entry_for_od(od);
70 return e && e->obj->type == type ? e->obj->obj : NULL;
71}
72
73#define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM))
74#define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM))
75#define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID))
76
77
78#endif
diff --git a/include/litmus/fp_common.h b/include/litmus/fp_common.h
new file mode 100644
index 000000000000..19356c0fa6c1
--- /dev/null
+++ b/include/litmus/fp_common.h
@@ -0,0 +1,105 @@
1/* Fixed-priority scheduler support.
2 */
3
4#ifndef __FP_COMMON_H__
5#define __FP_COMMON_H__
6
7#include <litmus/rt_domain.h>
8
9#include <asm/bitops.h>
10
11
12void fp_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
13 release_jobs_t release);
14
15int fp_higher_prio(struct task_struct* first,
16 struct task_struct* second);
17
18int fp_ready_order(struct bheap_node* a, struct bheap_node* b);
19
20#define FP_PRIO_BIT_WORDS (LITMUS_MAX_PRIORITY / BITS_PER_LONG)
21
22#if (LITMUS_MAX_PRIORITY % BITS_PER_LONG)
23#error LITMUS_MAX_PRIORITY must be a multiple of BITS_PER_LONG
24#endif
25
26/* bitmask-inexed priority queue */
27struct fp_prio_queue {
28 unsigned long bitmask[FP_PRIO_BIT_WORDS];
29 struct bheap queue[LITMUS_MAX_PRIORITY];
30};
31
32void fp_prio_queue_init(struct fp_prio_queue* q);
33
34static inline void fpq_set(struct fp_prio_queue* q, unsigned int index)
35{
36 unsigned long *word = q->bitmask + (index / BITS_PER_LONG);
37 __set_bit(index % BITS_PER_LONG, word);
38}
39
40static inline void fpq_clear(struct fp_prio_queue* q, unsigned int index)
41{
42 unsigned long *word = q->bitmask + (index / BITS_PER_LONG);
43 __clear_bit(index % BITS_PER_LONG, word);
44}
45
46static inline unsigned int fpq_find(struct fp_prio_queue* q)
47{
48 int i;
49
50 /* loop optimizer should unroll this */
51 for (i = 0; i < FP_PRIO_BIT_WORDS; i++)
52 if (q->bitmask[i])
53 return __ffs(q->bitmask[i]) + i * BITS_PER_LONG;
54
55 return LITMUS_MAX_PRIORITY; /* nothing found */
56}
57
58static inline void fp_prio_add(struct fp_prio_queue* q, struct task_struct* t, unsigned int index)
59{
60 BUG_ON(index >= LITMUS_MAX_PRIORITY);
61 BUG_ON(bheap_node_in_heap(tsk_rt(t)->heap_node));
62
63 fpq_set(q, index);
64 bheap_insert(fp_ready_order, &q->queue[index], tsk_rt(t)->heap_node);
65}
66
67static inline void fp_prio_remove(struct fp_prio_queue* q, struct task_struct* t, unsigned int index)
68{
69 BUG_ON(!is_queued(t));
70
71 bheap_delete(fp_ready_order, &q->queue[index], tsk_rt(t)->heap_node);
72 if (likely(bheap_empty(&q->queue[index])))
73 fpq_clear(q, index);
74}
75
76static inline struct task_struct* fp_prio_peek(struct fp_prio_queue* q)
77{
78 unsigned int idx = fpq_find(q);
79 struct bheap_node* hn;
80
81 if (idx < LITMUS_MAX_PRIORITY) {
82 hn = bheap_peek(fp_ready_order, &q->queue[idx]);
83 return bheap2task(hn);
84 } else
85 return NULL;
86}
87
88static inline struct task_struct* fp_prio_take(struct fp_prio_queue* q)
89{
90 unsigned int idx = fpq_find(q);
91 struct bheap_node* hn;
92
93 if (idx < LITMUS_MAX_PRIORITY) {
94 hn = bheap_take(fp_ready_order, &q->queue[idx]);
95 if (likely(bheap_empty(&q->queue[idx])))
96 fpq_clear(q, idx);
97 return bheap2task(hn);
98 } else
99 return NULL;
100}
101
102int fp_preemption_needed(struct fp_prio_queue* q, struct task_struct *t);
103
104
105#endif
diff --git a/include/litmus/fpmath.h b/include/litmus/fpmath.h
new file mode 100644
index 000000000000..642de98542c8
--- /dev/null
+++ b/include/litmus/fpmath.h
@@ -0,0 +1,147 @@
1#ifndef __FP_MATH_H__
2#define __FP_MATH_H__
3
4#include <linux/math64.h>
5
6#ifndef __KERNEL__
7#include <stdint.h>
8#define abs(x) (((x) < 0) ? -(x) : x)
9#endif
10
11// Use 64-bit because we want to track things at the nanosecond scale.
12// This can lead to very large numbers.
13typedef int64_t fpbuf_t;
14typedef struct
15{
16 fpbuf_t val;
17} fp_t;
18
19#define FP_SHIFT 10
20#define ROUND_BIT (FP_SHIFT - 1)
21
22#define _fp(x) ((fp_t) {x})
23
24#ifdef __KERNEL__
25static const fp_t LITMUS_FP_ZERO = {.val = 0};
26static const fp_t LITMUS_FP_ONE = {.val = (1 << FP_SHIFT)};
27#endif
28
29static inline fp_t FP(fpbuf_t x)
30{
31 return _fp(((fpbuf_t) x) << FP_SHIFT);
32}
33
34/* divide two integers to obtain a fixed point value */
35static inline fp_t _frac(fpbuf_t a, fpbuf_t b)
36{
37 return _fp(div64_s64(FP(a).val, (b)));
38}
39
40static inline fpbuf_t _point(fp_t x)
41{
42 return (x.val % (1 << FP_SHIFT));
43
44}
45
46#define fp2str(x) x.val
47/*(x.val >> FP_SHIFT), (x.val % (1 << FP_SHIFT)) */
48#define _FP_ "%ld/1024"
49
50static inline fpbuf_t _floor(fp_t x)
51{
52 return x.val >> FP_SHIFT;
53}
54
55/* FIXME: negative rounding */
56static inline fpbuf_t _round(fp_t x)
57{
58 return _floor(x) + ((x.val >> ROUND_BIT) & 1);
59}
60
61/* multiply two fixed point values */
62static inline fp_t _mul(fp_t a, fp_t b)
63{
64 return _fp((a.val * b.val) >> FP_SHIFT);
65}
66
67static inline fp_t _div(fp_t a, fp_t b)
68{
69#if !defined(__KERNEL__) && !defined(unlikely)
70#define unlikely(x) (x)
71#define DO_UNDEF_UNLIKELY
72#endif
73 /* try not to overflow */
74 if (unlikely( a.val > (2l << ((sizeof(fpbuf_t)*8) - FP_SHIFT)) ))
75 return _fp((a.val / b.val) << FP_SHIFT);
76 else
77 return _fp((a.val << FP_SHIFT) / b.val);
78#ifdef DO_UNDEF_UNLIKELY
79#undef unlikely
80#undef DO_UNDEF_UNLIKELY
81#endif
82}
83
84static inline fp_t _add(fp_t a, fp_t b)
85{
86 return _fp(a.val + b.val);
87}
88
89static inline fp_t _sub(fp_t a, fp_t b)
90{
91 return _fp(a.val - b.val);
92}
93
94static inline fp_t _neg(fp_t x)
95{
96 return _fp(-x.val);
97}
98
99static inline fp_t _abs(fp_t x)
100{
101 return _fp(abs(x.val));
102}
103
104/* works the same as casting float/double to integer */
105static inline fpbuf_t _fp_to_integer(fp_t x)
106{
107 return _floor(_abs(x)) * ((x.val > 0) ? 1 : -1);
108}
109
110static inline fp_t _integer_to_fp(fpbuf_t x)
111{
112 return _frac(x,1);
113}
114
115static inline int _leq(fp_t a, fp_t b)
116{
117 return a.val <= b.val;
118}
119
120static inline int _geq(fp_t a, fp_t b)
121{
122 return a.val >= b.val;
123}
124
125static inline int _lt(fp_t a, fp_t b)
126{
127 return a.val < b.val;
128}
129
130static inline int _gt(fp_t a, fp_t b)
131{
132 return a.val > b.val;
133}
134
135static inline int _eq(fp_t a, fp_t b)
136{
137 return a.val == b.val;
138}
139
140static inline fp_t _max(fp_t a, fp_t b)
141{
142 if (a.val < b.val)
143 return b;
144 else
145 return a;
146}
147#endif
diff --git a/include/litmus/jobs.h b/include/litmus/jobs.h
new file mode 100644
index 000000000000..24771dfaebf8
--- /dev/null
+++ b/include/litmus/jobs.h
@@ -0,0 +1,10 @@
1#ifndef __LITMUS_JOBS_H__
2#define __LITMUS_JOBS_H__
3
4void prepare_for_next_period(struct task_struct *t);
5void release_at(struct task_struct *t, lt_t start);
6
7long default_wait_for_release_at(lt_t release_time);
8long complete_job(void);
9
10#endif
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index c87863c9b231..0519831f6878 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -6,7 +6,49 @@
6#ifndef _LINUX_LITMUS_H_ 6#ifndef _LINUX_LITMUS_H_
7#define _LINUX_LITMUS_H_ 7#define _LINUX_LITMUS_H_
8 8
9#include <litmus/debug_trace.h>
10
11#ifdef CONFIG_RELEASE_MASTER
12extern atomic_t release_master_cpu;
13#endif
14
15/* in_list - is a given list_head queued on some list?
16 */
17static inline int in_list(struct list_head* list)
18{
19 return !( /* case 1: deleted */
20 (list->next == LIST_POISON1 &&
21 list->prev == LIST_POISON2)
22 ||
23 /* case 2: initialized */
24 (list->next == list &&
25 list->prev == list)
26 );
27}
28
29struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq);
30
31#define NO_CPU 0xffffffff
32
33void litmus_fork(struct task_struct *tsk);
34void litmus_exec(void);
35/* clean up real-time state of a task */
36void litmus_clear_state(struct task_struct *dead_tsk);
37void exit_litmus(struct task_struct *dead_tsk);
38
39/* Prevent the plugin from being switched-out from underneath a code
40 * path. Might sleep, so may be called only from non-atomic context. */
41void litmus_plugin_switch_disable(void);
42void litmus_plugin_switch_enable(void);
43
44long litmus_admit_task(struct task_struct *tsk);
45void litmus_exit_task(struct task_struct *tsk);
46void litmus_dealloc(struct task_struct *tsk);
47void litmus_do_exit(struct task_struct *tsk);
48
9#define is_realtime(t) ((t)->policy == SCHED_LITMUS) 49#define is_realtime(t) ((t)->policy == SCHED_LITMUS)
50#define rt_transition_pending(t) \
51 ((t)->rt_param.transition_pending)
10 52
11#define tsk_rt(t) (&(t)->rt_param) 53#define tsk_rt(t) (&(t)->rt_param)
12 54
@@ -28,6 +70,7 @@
28#define get_partition(t) (tsk_rt(t)->task_params.cpu) 70#define get_partition(t) (tsk_rt(t)->task_params.cpu)
29#define get_priority(t) (tsk_rt(t)->task_params.priority) 71#define get_priority(t) (tsk_rt(t)->task_params.priority)
30#define get_class(t) (tsk_rt(t)->task_params.cls) 72#define get_class(t) (tsk_rt(t)->task_params.cls)
73#define get_release_policy(t) (tsk_rt(t)->task_params.release_policy)
31 74
32/* job_param macros */ 75/* job_param macros */
33#define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) 76#define get_exec_time(t) (tsk_rt(t)->job_params.exec_time)
@@ -35,6 +78,15 @@
35#define get_release(t) (tsk_rt(t)->job_params.release) 78#define get_release(t) (tsk_rt(t)->job_params.release)
36#define get_lateness(t) (tsk_rt(t)->job_params.lateness) 79#define get_lateness(t) (tsk_rt(t)->job_params.lateness)
37 80
81/* release policy macros */
82#define is_periodic(t) (get_release_policy(t) == TASK_PERIODIC)
83#define is_sporadic(t) (get_release_policy(t) == TASK_SPORADIC)
84#ifdef CONFIG_ALLOW_EARLY_RELEASE
85#define is_early_releasing(t) (get_release_policy(t) == TASK_EARLY)
86#else
87#define is_early_releasing(t) (0)
88#endif
89
38#define is_hrt(t) \ 90#define is_hrt(t) \
39 (tsk_rt(t)->task_params.cls == RT_CLASS_HARD) 91 (tsk_rt(t)->task_params.cls == RT_CLASS_HARD)
40#define is_srt(t) \ 92#define is_srt(t) \
@@ -48,6 +100,196 @@ static inline lt_t litmus_clock(void)
48 return ktime_to_ns(ktime_get()); 100 return ktime_to_ns(ktime_get());
49} 101}
50 102
103/* A macro to convert from nanoseconds to ktime_t. */
104#define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t)
105
106#define get_domain(t) (tsk_rt(t)->domain)
107
108/* Honor the flag in the preempt_count variable that is set
109 * when scheduling is in progress.
110 */
111#define is_running(t) \
112 ((t)->state == TASK_RUNNING || \
113 task_thread_info(t)->preempt_count & PREEMPT_ACTIVE)
114
115#define is_blocked(t) \
116 (!is_running(t))
117#define is_released(t, now) \
118 (lt_before_eq(get_release(t), now))
119#define is_tardy(t, now) \
120 (lt_before_eq(tsk_rt(t)->job_params.deadline, now))
121
122/* real-time comparison macros */
123#define earlier_deadline(a, b) (lt_before(\
124 (a)->rt_param.job_params.deadline,\
125 (b)->rt_param.job_params.deadline))
126#define earlier_release(a, b) (lt_before(\
127 (a)->rt_param.job_params.release,\
128 (b)->rt_param.job_params.release))
129
130void preempt_if_preemptable(struct task_struct* t, int on_cpu);
131
132#ifdef CONFIG_LITMUS_LOCKING
133void srp_ceiling_block(void);
134#else
135#define srp_ceiling_block() /* nothing */
136#endif
137
138#define bheap2task(hn) ((struct task_struct*) hn->value)
139
140#ifdef CONFIG_NP_SECTION
141
142static inline int is_kernel_np(struct task_struct *t)
143{
144 return tsk_rt(t)->kernel_np;
145}
146
147static inline int is_user_np(struct task_struct *t)
148{
149 return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->sched.np.flag : 0;
150}
151
152static inline void request_exit_np(struct task_struct *t)
153{
154 if (is_user_np(t)) {
155 /* Set the flag that tells user space to call
156 * into the kernel at the end of a critical section. */
157 if (likely(tsk_rt(t)->ctrl_page)) {
158 TRACE_TASK(t, "setting delayed_preemption flag\n");
159 tsk_rt(t)->ctrl_page->sched.np.preempt = 1;
160 }
161 }
162}
163
164static inline void make_np(struct task_struct *t)
165{
166 tsk_rt(t)->kernel_np++;
167}
168
169/* Caller should check if preemption is necessary when
170 * the function return 0.
171 */
172static inline int take_np(struct task_struct *t)
173{
174 return --tsk_rt(t)->kernel_np;
175}
176
177/* returns 0 if remote CPU needs an IPI to preempt, 1 if no IPI is required */
178static inline int request_exit_np_atomic(struct task_struct *t)
179{
180 union np_flag old, new;
181
182 if (tsk_rt(t)->ctrl_page) {
183 old.raw = tsk_rt(t)->ctrl_page->sched.raw;
184 if (old.np.flag == 0) {
185 /* no longer non-preemptive */
186 return 0;
187 } else if (old.np.preempt) {
188 /* already set, nothing for us to do */
189 return 1;
190 } else {
191 /* non preemptive and flag not set */
192 new.raw = old.raw;
193 new.np.preempt = 1;
194 /* if we get old back, then we atomically set the flag */
195 return cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw;
196 /* If we raced with a concurrent change, then so be
197 * it. Deliver it by IPI. We don't want an unbounded
198 * retry loop here since tasks might exploit that to
199 * keep the kernel busy indefinitely. */
200 }
201 } else
202 return 0;
203}
204
205#else
206
207static inline int is_kernel_np(struct task_struct* t)
208{
209 return 0;
210}
211
212static inline int is_user_np(struct task_struct* t)
213{
214 return 0;
215}
216
217static inline void request_exit_np(struct task_struct *t)
218{
219 /* request_exit_np() shouldn't be called if !CONFIG_NP_SECTION */
220 BUG();
221}
222
223static inline int request_exit_np_atomic(struct task_struct *t)
224{
225 return 0;
226}
227
228#endif
229
230static inline void clear_exit_np(struct task_struct *t)
231{
232 if (likely(tsk_rt(t)->ctrl_page))
233 tsk_rt(t)->ctrl_page->sched.np.preempt = 0;
234}
235
236static inline int is_np(struct task_struct *t)
237{
238#ifdef CONFIG_SCHED_DEBUG_TRACE
239 int kernel, user;
240 kernel = is_kernel_np(t);
241 user = is_user_np(t);
242 if (kernel || user)
243 TRACE_TASK(t, " is non-preemptive: kernel=%d user=%d\n",
244
245 kernel, user);
246 return kernel || user;
247#else
248 return unlikely(is_kernel_np(t) || is_user_np(t));
249#endif
250}
251
252static inline int is_present(struct task_struct* t)
253{
254 return t && tsk_rt(t)->present;
255}
256
257static inline int is_completed(struct task_struct* t)
258{
259 return t && tsk_rt(t)->completed;
260}
261
262
263/* Used to convert ns-specified execution costs and periods into
264 * integral quanta equivalents.
265 */
266#define LITMUS_QUANTUM_LENGTH_NS (CONFIG_LITMUS_QUANTUM_LENGTH_US * 1000ULL)
267
268/* make the unit explicit */
269typedef unsigned long quanta_t;
270
271enum round {
272 FLOOR,
273 CEIL
274};
275
276static inline quanta_t time2quanta(lt_t time, enum round round)
277{
278 s64 quantum_length = LITMUS_QUANTUM_LENGTH_NS;
279
280 if (do_div(time, quantum_length) && round == CEIL)
281 time++;
282 return (quanta_t) time;
283}
284
285static inline lt_t quanta2time(quanta_t quanta)
286{
287 return quanta * LITMUS_QUANTUM_LENGTH_NS;
288}
289
290/* By how much is cpu staggered behind CPU 0? */
291u64 cpu_stagger_offset(int cpu);
292
51static inline struct control_page* get_control_page(struct task_struct *t) 293static inline struct control_page* get_control_page(struct task_struct *t)
52{ 294{
53 return tsk_rt(t)->ctrl_page; 295 return tsk_rt(t)->ctrl_page;
@@ -58,4 +300,30 @@ static inline int has_control_page(struct task_struct* t)
58 return tsk_rt(t)->ctrl_page != NULL; 300 return tsk_rt(t)->ctrl_page != NULL;
59} 301}
60 302
303
304#ifdef CONFIG_SCHED_OVERHEAD_TRACE
305
306#define TS_SYSCALL_IN_START \
307 if (has_control_page(current)) { \
308 __TS_SYSCALL_IN_START(&get_control_page(current)->ts_syscall_start); \
309 }
310
311#define TS_SYSCALL_IN_END \
312 if (has_control_page(current)) { \
313 unsigned long flags; \
314 uint64_t irqs; \
315 local_irq_save(flags); \
316 irqs = get_control_page(current)->irq_count - \
317 get_control_page(current)->irq_syscall_start; \
318 __TS_SYSCALL_IN_END(&irqs); \
319 local_irq_restore(flags); \
320 }
321
322#else
323
324#define TS_SYSCALL_IN_START
325#define TS_SYSCALL_IN_END
326
327#endif
328
61#endif 329#endif
diff --git a/include/litmus/litmus_proc.h b/include/litmus/litmus_proc.h
new file mode 100644
index 000000000000..a5db24c03ec0
--- /dev/null
+++ b/include/litmus/litmus_proc.h
@@ -0,0 +1,63 @@
1#include <litmus/sched_plugin.h>
2#include <linux/proc_fs.h>
3
4int __init init_litmus_proc(void);
5void exit_litmus_proc(void);
6
7struct cd_mapping
8{
9 int id;
10 cpumask_var_t mask;
11 struct proc_dir_entry *proc_file;
12};
13
14struct domain_proc_info
15{
16 int num_cpus;
17 int num_domains;
18
19 struct cd_mapping *cpu_to_domains;
20 struct cd_mapping *domain_to_cpus;
21};
22
23/*
24 * On success, returns 0 and sets the pointer to the location of the new
25 * proc dir entry, otherwise returns an error code and sets pde to NULL.
26 */
27long make_plugin_proc_dir(struct sched_plugin* plugin,
28 struct proc_dir_entry** pde);
29
30/*
31 * Plugins should deallocate all child proc directory entries before
32 * calling this, to avoid memory leaks.
33 */
34void remove_plugin_proc_dir(struct sched_plugin* plugin);
35
36/*
37 * Setup the CPU <-> sched domain mappings in proc
38 */
39long activate_domain_proc(struct domain_proc_info* map);
40
41/*
42 * Remove the CPU <-> sched domain mappings from proc
43 */
44long deactivate_domain_proc(void);
45
46/*
47 * Alloc memory for the mapping
48 * Note: Does not set up proc files. Use make_sched_domain_maps for that.
49 */
50long init_domain_proc_info(struct domain_proc_info* map,
51 int num_cpus, int num_domains);
52
53/*
54 * Free memory of the mapping
55 * Note: Does not clean up proc files. Use deactivate_domain_proc for that.
56 */
57void destroy_domain_proc_info(struct domain_proc_info* map);
58
59/* Copy at most size-1 bytes from ubuf into kbuf, null-terminate buf, and
60 * remove a '\n' if present. Returns the number of bytes that were read or
61 * -EFAULT. */
62int copy_and_chomp(char *kbuf, unsigned long ksize,
63 __user const char* ubuf, unsigned long ulength);
diff --git a/include/litmus/locking.h b/include/litmus/locking.h
new file mode 100644
index 000000000000..4d7b870cb443
--- /dev/null
+++ b/include/litmus/locking.h
@@ -0,0 +1,28 @@
1#ifndef LITMUS_LOCKING_H
2#define LITMUS_LOCKING_H
3
4struct litmus_lock_ops;
5
6/* Generic base struct for LITMUS^RT userspace semaphores.
7 * This structure should be embedded in protocol-specific semaphores.
8 */
9struct litmus_lock {
10 struct litmus_lock_ops *ops;
11 int type;
12};
13
14struct litmus_lock_ops {
15 /* Current task tries to obtain / drop a reference to a lock.
16 * Optional methods, allowed by default. */
17 int (*open)(struct litmus_lock*, void* __user);
18 int (*close)(struct litmus_lock*);
19
20 /* Current tries to lock/unlock this lock (mandatory methods). */
21 int (*lock)(struct litmus_lock*);
22 int (*unlock)(struct litmus_lock*);
23
24 /* The lock is no longer being referenced (mandatory method). */
25 void (*deallocate)(struct litmus_lock*);
26};
27
28#endif
diff --git a/include/litmus/preempt.h b/include/litmus/preempt.h
new file mode 100644
index 000000000000..4fd108a45333
--- /dev/null
+++ b/include/litmus/preempt.h
@@ -0,0 +1,164 @@
1#ifndef LITMUS_PREEMPT_H
2#define LITMUS_PREEMPT_H
3
4#include <linux/types.h>
5#include <linux/cache.h>
6#include <linux/percpu.h>
7#include <asm/atomic.h>
8
9#include <litmus/debug_trace.h>
10
11DECLARE_PER_CPU_SHARED_ALIGNED(atomic_t, resched_state);
12
13#ifdef CONFIG_PREEMPT_STATE_TRACE
14const char* sched_state_name(int s);
15#define TRACE_STATE(fmt, args...) TRACE("SCHED_STATE " fmt, args)
16#else
17#define TRACE_STATE(fmt, args...) /* ignore */
18#endif
19
20#define VERIFY_SCHED_STATE(x) \
21 do { int __s = get_sched_state(); \
22 if ((__s & (x)) == 0) \
23 TRACE_STATE("INVALID s=0x%x (%s) not " \
24 "in 0x%x (%s) [%s]\n", \
25 __s, sched_state_name(__s), \
26 (x), #x, __FUNCTION__); \
27 } while (0);
28
29#define TRACE_SCHED_STATE_CHANGE(x, y, cpu) \
30 TRACE_STATE("[P%d] 0x%x (%s) -> 0x%x (%s)\n", \
31 cpu, (x), sched_state_name(x), \
32 (y), sched_state_name(y))
33
34
35typedef enum scheduling_state {
36 TASK_SCHEDULED = (1 << 0), /* The currently scheduled task is the one that
37 * should be scheduled, and the processor does not
38 * plan to invoke schedule(). */
39 SHOULD_SCHEDULE = (1 << 1), /* A remote processor has determined that the
40 * processor should reschedule, but this has not
41 * been communicated yet (IPI still pending). */
42 WILL_SCHEDULE = (1 << 2), /* The processor has noticed that it has to
43 * reschedule and will do so shortly. */
44 TASK_PICKED = (1 << 3), /* The processor is currently executing schedule(),
45 * has selected a new task to schedule, but has not
46 * yet performed the actual context switch. */
47 PICKED_WRONG_TASK = (1 << 4), /* The processor has not yet performed the context
48 * switch, but a remote processor has already
49 * determined that a higher-priority task became
50 * eligible after the task was picked. */
51} sched_state_t;
52
53static inline sched_state_t get_sched_state_on(int cpu)
54{
55 return atomic_read(&per_cpu(resched_state, cpu));
56}
57
58static inline sched_state_t get_sched_state(void)
59{
60 return atomic_read(&__get_cpu_var(resched_state));
61}
62
63static inline int is_in_sched_state(int possible_states)
64{
65 return get_sched_state() & possible_states;
66}
67
68static inline int cpu_is_in_sched_state(int cpu, int possible_states)
69{
70 return get_sched_state_on(cpu) & possible_states;
71}
72
73static inline void set_sched_state(sched_state_t s)
74{
75 TRACE_SCHED_STATE_CHANGE(get_sched_state(), s, smp_processor_id());
76 atomic_set(&__get_cpu_var(resched_state), s);
77}
78
79static inline int sched_state_transition(sched_state_t from, sched_state_t to)
80{
81 sched_state_t old_state;
82
83 old_state = atomic_cmpxchg(&__get_cpu_var(resched_state), from, to);
84 if (old_state == from) {
85 TRACE_SCHED_STATE_CHANGE(from, to, smp_processor_id());
86 return 1;
87 } else
88 return 0;
89}
90
91static inline int sched_state_transition_on(int cpu,
92 sched_state_t from,
93 sched_state_t to)
94{
95 sched_state_t old_state;
96
97 old_state = atomic_cmpxchg(&per_cpu(resched_state, cpu), from, to);
98 if (old_state == from) {
99 TRACE_SCHED_STATE_CHANGE(from, to, cpu);
100 return 1;
101 } else
102 return 0;
103}
104
105/* Plugins must call this function after they have decided which job to
106 * schedule next. IMPORTANT: this function must be called while still holding
107 * the lock that is used to serialize scheduling decisions.
108 *
109 * (Ideally, we would like to use runqueue locks for this purpose, but that
110 * would lead to deadlocks with the migration code.)
111 */
112static inline void sched_state_task_picked(void)
113{
114 VERIFY_SCHED_STATE(WILL_SCHEDULE);
115
116 /* WILL_SCHEDULE has only a local tansition => simple store is ok */
117 set_sched_state(TASK_PICKED);
118}
119
120static inline void sched_state_entered_schedule(void)
121{
122 /* Update state for the case that we entered schedule() not due to
123 * set_tsk_need_resched() */
124 set_sched_state(WILL_SCHEDULE);
125}
126
127/* Called by schedule() to check if the scheduling decision is still valid
128 * after a context switch. Returns 1 if the CPU needs to reschdule. */
129static inline int sched_state_validate_switch(void)
130{
131 int left_state_ok = 0;
132
133 VERIFY_SCHED_STATE(PICKED_WRONG_TASK | TASK_PICKED);
134
135 if (is_in_sched_state(TASK_PICKED)) {
136 /* Might be good; let's try to transition out of this
137 * state. This must be done atomically since remote processors
138 * may try to change the state, too. */
139 left_state_ok = sched_state_transition(TASK_PICKED, TASK_SCHEDULED);
140 }
141
142 if (!left_state_ok) {
143 /* We raced with a higher-priority task arrival => not
144 * valid. The CPU needs to reschedule. */
145 set_sched_state(WILL_SCHEDULE);
146 return 1;
147 } else
148 return 0;
149}
150
151/* State transition events. See litmus/preempt.c for details. */
152void sched_state_will_schedule(struct task_struct* tsk);
153void sched_state_ipi(void);
154/* Cause a CPU (remote or local) to reschedule. */
155void litmus_reschedule(int cpu);
156void litmus_reschedule_local(void);
157
158#ifdef CONFIG_DEBUG_KERNEL
159void sched_state_plugin_check(void);
160#else
161#define sched_state_plugin_check() /* no check */
162#endif
163
164#endif
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h
new file mode 100644
index 000000000000..ac249292e866
--- /dev/null
+++ b/include/litmus/rt_domain.h
@@ -0,0 +1,182 @@
1/* CLEANUP: Add comments and make it less messy.
2 *
3 */
4
5#ifndef __UNC_RT_DOMAIN_H__
6#define __UNC_RT_DOMAIN_H__
7
8#include <litmus/bheap.h>
9
10#define RELEASE_QUEUE_SLOTS 127 /* prime */
11
12struct _rt_domain;
13
14typedef int (*check_resched_needed_t)(struct _rt_domain *rt);
15typedef void (*release_jobs_t)(struct _rt_domain *rt, struct bheap* tasks);
16
17struct release_queue {
18 /* each slot maintains a list of release heaps sorted
19 * by release time */
20 struct list_head slot[RELEASE_QUEUE_SLOTS];
21};
22
23typedef struct _rt_domain {
24 /* runnable rt tasks are in here */
25 raw_spinlock_t ready_lock;
26 struct bheap ready_queue;
27
28 /* real-time tasks waiting for release are in here */
29 raw_spinlock_t release_lock;
30 struct release_queue release_queue;
31
32#ifdef CONFIG_RELEASE_MASTER
33 int release_master;
34#endif
35
36 /* for moving tasks to the release queue */
37 raw_spinlock_t tobe_lock;
38 struct list_head tobe_released;
39
40 /* how do we check if we need to kick another CPU? */
41 check_resched_needed_t check_resched;
42
43 /* how do we release jobs? */
44 release_jobs_t release_jobs;
45
46 /* how are tasks ordered in the ready queue? */
47 bheap_prio_t order;
48} rt_domain_t;
49
50struct release_heap {
51 /* list_head for per-time-slot list */
52 struct list_head list;
53 lt_t release_time;
54 /* all tasks to be released at release_time */
55 struct bheap heap;
56 /* used to trigger the release */
57 struct hrtimer timer;
58
59#ifdef CONFIG_RELEASE_MASTER
60 /* used to delegate releases */
61 struct hrtimer_start_on_info info;
62#endif
63 /* required for the timer callback */
64 rt_domain_t* dom;
65};
66
67
68static inline struct task_struct* __next_ready(rt_domain_t* rt)
69{
70 struct bheap_node *hn = bheap_peek(rt->order, &rt->ready_queue);
71 if (hn)
72 return bheap2task(hn);
73 else
74 return NULL;
75}
76
77void rt_domain_init(rt_domain_t *rt, bheap_prio_t order,
78 check_resched_needed_t check,
79 release_jobs_t relase);
80
81void __add_ready(rt_domain_t* rt, struct task_struct *new);
82void __merge_ready(rt_domain_t* rt, struct bheap *tasks);
83void __add_release(rt_domain_t* rt, struct task_struct *task);
84
85static inline struct task_struct* __take_ready(rt_domain_t* rt)
86{
87 struct bheap_node* hn = bheap_take(rt->order, &rt->ready_queue);
88 if (hn)
89 return bheap2task(hn);
90 else
91 return NULL;
92}
93
94static inline struct task_struct* __peek_ready(rt_domain_t* rt)
95{
96 struct bheap_node* hn = bheap_peek(rt->order, &rt->ready_queue);
97 if (hn)
98 return bheap2task(hn);
99 else
100 return NULL;
101}
102
103static inline int is_queued(struct task_struct *t)
104{
105 BUG_ON(!tsk_rt(t)->heap_node);
106 return bheap_node_in_heap(tsk_rt(t)->heap_node);
107}
108
109static inline void remove(rt_domain_t* rt, struct task_struct *t)
110{
111 bheap_delete(rt->order, &rt->ready_queue, tsk_rt(t)->heap_node);
112}
113
114static inline void add_ready(rt_domain_t* rt, struct task_struct *new)
115{
116 unsigned long flags;
117 /* first we need the write lock for rt_ready_queue */
118 raw_spin_lock_irqsave(&rt->ready_lock, flags);
119 __add_ready(rt, new);
120 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
121}
122
123static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks)
124{
125 unsigned long flags;
126 raw_spin_lock_irqsave(&rt->ready_lock, flags);
127 __merge_ready(rt, tasks);
128 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
129}
130
131static inline struct task_struct* take_ready(rt_domain_t* rt)
132{
133 unsigned long flags;
134 struct task_struct* ret;
135 /* first we need the write lock for rt_ready_queue */
136 raw_spin_lock_irqsave(&rt->ready_lock, flags);
137 ret = __take_ready(rt);
138 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
139 return ret;
140}
141
142
143static inline void add_release(rt_domain_t* rt, struct task_struct *task)
144{
145 unsigned long flags;
146 raw_spin_lock_irqsave(&rt->tobe_lock, flags);
147 __add_release(rt, task);
148 raw_spin_unlock_irqrestore(&rt->tobe_lock, flags);
149}
150
151#ifdef CONFIG_RELEASE_MASTER
152void __add_release_on(rt_domain_t* rt, struct task_struct *task,
153 int target_cpu);
154
155static inline void add_release_on(rt_domain_t* rt,
156 struct task_struct *task,
157 int target_cpu)
158{
159 unsigned long flags;
160 raw_spin_lock_irqsave(&rt->tobe_lock, flags);
161 __add_release_on(rt, task, target_cpu);
162 raw_spin_unlock_irqrestore(&rt->tobe_lock, flags);
163}
164#endif
165
166static inline int __jobs_pending(rt_domain_t* rt)
167{
168 return !bheap_empty(&rt->ready_queue);
169}
170
171static inline int jobs_pending(rt_domain_t* rt)
172{
173 unsigned long flags;
174 int ret;
175 /* first we need the write lock for rt_ready_queue */
176 raw_spin_lock_irqsave(&rt->ready_lock, flags);
177 ret = !bheap_empty(&rt->ready_queue);
178 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
179 return ret;
180}
181
182#endif
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index ce76faa9c6d7..e26535b32bd1 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -84,12 +84,12 @@ struct rt_task {
84}; 84};
85 85
86union np_flag { 86union np_flag {
87 uint64_t raw; 87 uint32_t raw;
88 struct { 88 struct {
89 /* Is the task currently in a non-preemptive section? */ 89 /* Is the task currently in a non-preemptive section? */
90 uint64_t flag:31; 90 uint32_t flag:31;
91 /* Should the task call into the scheduler? */ 91 /* Should the task call into the scheduler? */
92 uint64_t preempt:1; 92 uint32_t preempt:1;
93 } np; 93 } np;
94}; 94};
95 95
@@ -110,10 +110,10 @@ union np_flag {
110struct control_page { 110struct control_page {
111 /* This flag is used by userspace to communicate non-preempive 111 /* This flag is used by userspace to communicate non-preempive
112 * sections. */ 112 * sections. */
113 volatile union np_flag sched; 113 volatile __attribute__ ((aligned (8))) union np_flag sched;
114 114
115 volatile uint64_t irq_count; /* Incremented by the kernel each time an IRQ is 115 /* Incremented by the kernel each time an IRQ is handled. */
116 * handled. */ 116 volatile __attribute__ ((aligned (8))) uint64_t irq_count;
117 117
118 /* Locking overhead tracing: userspace records here the time stamp 118 /* Locking overhead tracing: userspace records here the time stamp
119 * and IRQ counter prior to starting the system call. */ 119 * and IRQ counter prior to starting the system call. */
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
new file mode 100644
index 000000000000..0ccccd6ae1af
--- /dev/null
+++ b/include/litmus/sched_plugin.h
@@ -0,0 +1,128 @@
1/*
2 * Definition of the scheduler plugin interface.
3 *
4 */
5#ifndef _LINUX_SCHED_PLUGIN_H_
6#define _LINUX_SCHED_PLUGIN_H_
7
8#include <linux/sched.h>
9
10#ifdef CONFIG_LITMUS_LOCKING
11#include <litmus/locking.h>
12#endif
13
14/************************ setup/tear down ********************/
15
16typedef long (*activate_plugin_t) (void);
17typedef long (*deactivate_plugin_t) (void);
18
19struct domain_proc_info;
20typedef long (*get_domain_proc_info_t) (struct domain_proc_info **info);
21
22
23/********************* scheduler invocation ******************/
24/* The main scheduling function, called to select the next task to dispatch. */
25typedef struct task_struct* (*schedule_t)(struct task_struct * prev);
26/* Clean up after the task switch has occured.
27 * This function is called after every (even non-rt) task switch.
28 */
29typedef void (*finish_switch_t)(struct task_struct *prev);
30
31
32/********************* task state changes ********************/
33
34/* Called to setup a new real-time task.
35 * Release the first job, enqueue, etc.
36 * Task may already be running.
37 */
38typedef void (*task_new_t) (struct task_struct *task,
39 int on_rq,
40 int running);
41
42/* Called to re-introduce a task after blocking.
43 * Can potentially be called multiple times.
44 */
45typedef void (*task_wake_up_t) (struct task_struct *task);
46/* called to notify the plugin of a blocking real-time task
47 * it will only be called for real-time tasks and before schedule is called */
48typedef void (*task_block_t) (struct task_struct *task);
49/* Called when a real-time task exits or changes to a different scheduling
50 * class.
51 * Free any allocated resources
52 */
53typedef void (*task_exit_t) (struct task_struct *);
54
55/* task_exit() is called with interrupts disabled and runqueue locks held, and
56 * thus and cannot block or spin. task_cleanup() is called sometime later
57 * without any locks being held.
58 */
59typedef void (*task_cleanup_t) (struct task_struct *);
60
61#ifdef CONFIG_LITMUS_LOCKING
62/* Called when the current task attempts to create a new lock of a given
63 * protocol type. */
64typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type,
65 void* __user config);
66#endif
67
68
69/********************* sys call backends ********************/
70/* This function causes the caller to sleep until the next release */
71typedef long (*complete_job_t) (void);
72
73typedef long (*admit_task_t)(struct task_struct* tsk);
74
75typedef long (*wait_for_release_at_t)(lt_t release_time);
76
77/* Informs the plugin when a synchronous release takes place. */
78typedef void (*synchronous_release_at_t)(lt_t time_zero);
79
80/************************ misc routines ***********************/
81
82
83struct sched_plugin {
84 struct list_head list;
85 /* basic info */
86 char *plugin_name;
87
88 /* setup */
89 activate_plugin_t activate_plugin;
90 deactivate_plugin_t deactivate_plugin;
91 get_domain_proc_info_t get_domain_proc_info;
92
93 /* scheduler invocation */
94 schedule_t schedule;
95 finish_switch_t finish_switch;
96
97 /* syscall backend */
98 complete_job_t complete_job;
99 wait_for_release_at_t wait_for_release_at;
100 synchronous_release_at_t synchronous_release_at;
101
102 /* task state changes */
103 admit_task_t admit_task;
104
105 task_new_t task_new;
106 task_wake_up_t task_wake_up;
107 task_block_t task_block;
108
109 task_exit_t task_exit;
110 task_cleanup_t task_cleanup;
111
112#ifdef CONFIG_LITMUS_LOCKING
113 /* locking protocols */
114 allocate_lock_t allocate_lock;
115#endif
116} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
117
118
119extern struct sched_plugin *litmus;
120
121int register_sched_plugin(struct sched_plugin* plugin);
122struct sched_plugin* find_sched_plugin(const char* name);
123void print_sched_plugins(struct seq_file *m);
124
125
126extern struct sched_plugin linux_sched_plugin;
127
128#endif
diff --git a/include/litmus/srp.h b/include/litmus/srp.h
new file mode 100644
index 000000000000..c9a4552b2bf3
--- /dev/null
+++ b/include/litmus/srp.h
@@ -0,0 +1,28 @@
1#ifndef LITMUS_SRP_H
2#define LITMUS_SRP_H
3
4struct srp_semaphore;
5
6struct srp_priority {
7 struct list_head list;
8 unsigned int priority;
9 pid_t pid;
10};
11#define list2prio(l) list_entry(l, struct srp_priority, list)
12
13/* struct for uniprocessor SRP "semaphore" */
14struct srp_semaphore {
15 struct litmus_lock litmus_lock;
16 struct srp_priority ceiling;
17 struct task_struct* owner;
18 int cpu; /* cpu associated with this "semaphore" and resource */
19};
20
21/* map a task to its SRP preemption level priority */
22typedef unsigned int (*srp_prioritization_t)(struct task_struct* t);
23/* Must be updated by each plugin that uses SRP.*/
24extern srp_prioritization_t get_srp_prio;
25
26struct srp_semaphore* allocate_srp_semaphore(void);
27
28#endif
diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h
new file mode 100644
index 000000000000..94264c27d9ac
--- /dev/null
+++ b/include/litmus/unistd_32.h
@@ -0,0 +1,21 @@
1/*
2 * included from arch/x86/include/asm/unistd_32.h
3 *
4 * LITMUS^RT syscalls with "relative" numbers
5 */
6#define __LSC(x) (__NR_LITMUS + x)
7
8#define __NR_set_rt_task_param __LSC(0)
9#define __NR_get_rt_task_param __LSC(1)
10#define __NR_complete_job __LSC(2)
11#define __NR_od_open __LSC(3)
12#define __NR_od_close __LSC(4)
13#define __NR_litmus_lock __LSC(5)
14#define __NR_litmus_unlock __LSC(6)
15#define __NR_query_job_no __LSC(7)
16#define __NR_wait_for_job_release __LSC(8)
17#define __NR_wait_for_ts_release __LSC(9)
18#define __NR_release_ts __LSC(10)
19#define __NR_null_call __LSC(11)
20
21#define NR_litmus_syscalls 12
diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h
new file mode 100644
index 000000000000..d5ced0d2642c
--- /dev/null
+++ b/include/litmus/unistd_64.h
@@ -0,0 +1,33 @@
1/*
2 * included from arch/x86/include/asm/unistd_64.h
3 *
4 * LITMUS^RT syscalls with "relative" numbers
5 */
6#define __LSC(x) (__NR_LITMUS + x)
7
8#define __NR_set_rt_task_param __LSC(0)
9__SYSCALL(__NR_set_rt_task_param, sys_set_rt_task_param)
10#define __NR_get_rt_task_param __LSC(1)
11__SYSCALL(__NR_get_rt_task_param, sys_get_rt_task_param)
12#define __NR_complete_job __LSC(2)
13__SYSCALL(__NR_complete_job, sys_complete_job)
14#define __NR_od_open __LSC(3)
15__SYSCALL(__NR_od_open, sys_od_open)
16#define __NR_od_close __LSC(4)
17__SYSCALL(__NR_od_close, sys_od_close)
18#define __NR_litmus_lock __LSC(5)
19__SYSCALL(__NR_litmus_lock, sys_litmus_lock)
20#define __NR_litmus_unlock __LSC(6)
21__SYSCALL(__NR_litmus_unlock, sys_litmus_unlock)
22#define __NR_query_job_no __LSC(7)
23__SYSCALL(__NR_query_job_no, sys_query_job_no)
24#define __NR_wait_for_job_release __LSC(8)
25__SYSCALL(__NR_wait_for_job_release, sys_wait_for_job_release)
26#define __NR_wait_for_ts_release __LSC(9)
27__SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release)
28#define __NR_release_ts __LSC(10)
29__SYSCALL(__NR_release_ts, sys_release_ts)
30#define __NR_null_call __LSC(11)
31__SYSCALL(__NR_null_call, sys_null_call)
32
33#define NR_litmus_syscalls 12
diff --git a/include/litmus/wait.h b/include/litmus/wait.h
new file mode 100644
index 000000000000..ce1347c355f8
--- /dev/null
+++ b/include/litmus/wait.h
@@ -0,0 +1,57 @@
1#ifndef _LITMUS_WAIT_H_
2#define _LITMUS_WAIT_H_
3
4struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq);
5
6/* wrap regular wait_queue_t head */
7struct __prio_wait_queue {
8 wait_queue_t wq;
9
10 /* some priority point */
11 lt_t priority;
12 /* break ties in priority by lower tie_breaker */
13 unsigned int tie_breaker;
14};
15
16typedef struct __prio_wait_queue prio_wait_queue_t;
17
18static inline void init_prio_waitqueue_entry(prio_wait_queue_t *pwq,
19 struct task_struct* t,
20 lt_t priority)
21{
22 init_waitqueue_entry(&pwq->wq, t);
23 pwq->priority = priority;
24 pwq->tie_breaker = 0;
25}
26
27static inline void init_prio_waitqueue_entry_tie(prio_wait_queue_t *pwq,
28 struct task_struct* t,
29 lt_t priority,
30 unsigned int tie_breaker)
31{
32 init_waitqueue_entry(&pwq->wq, t);
33 pwq->priority = priority;
34 pwq->tie_breaker = tie_breaker;
35}
36
37unsigned int __add_wait_queue_prio_exclusive(
38 wait_queue_head_t* head,
39 prio_wait_queue_t *new);
40
41static inline unsigned int add_wait_queue_prio_exclusive(
42 wait_queue_head_t* head,
43 prio_wait_queue_t *new)
44{
45 unsigned long flags;
46 unsigned int passed;
47
48 spin_lock_irqsave(&head->lock, flags);
49 passed = __add_wait_queue_prio_exclusive(head, new);
50
51 spin_unlock_irqrestore(&head->lock, flags);
52
53 return passed;
54}
55
56
57#endif