diff options
author | Bjoern Brandenburg <bbb@mpi-sws.org> | 2015-08-09 07:18:48 -0400 |
---|---|---|
committer | Bjoern Brandenburg <bbb@mpi-sws.org> | 2015-08-09 06:21:18 -0400 |
commit | 8e048c798adaabef530a1526f7ce8c6c3cd3475e (patch) | |
tree | 5a96b3eaeaafecec1bf08ba71a9d0084d39d46eb /include/litmus | |
parent | bd175e94795774908317a861a883761b75750e35 (diff) |
Add LITMUS^RT core implementation
This patch adds the core of LITMUS^RT:
- library functionality (heaps, rt_domain, prioritization, etc.)
- budget enforcement logic
- job management
- system call backends
- virtual devices (control page, etc.)
- scheduler plugin API (and dummy plugin)
This code compiles, but is not yet integrated with the rest of Linux.
Diffstat (limited to 'include/litmus')
-rw-r--r-- | include/litmus/affinity.h | 52 | ||||
-rw-r--r-- | include/litmus/bheap.h | 77 | ||||
-rw-r--r-- | include/litmus/binheap.h | 205 | ||||
-rw-r--r-- | include/litmus/budget.h | 36 | ||||
-rw-r--r-- | include/litmus/ceiling.h | 36 | ||||
-rw-r--r-- | include/litmus/clustered.h | 46 | ||||
-rw-r--r-- | include/litmus/edf_common.h | 25 | ||||
-rw-r--r-- | include/litmus/fdso.h | 78 | ||||
-rw-r--r-- | include/litmus/fp_common.h | 105 | ||||
-rw-r--r-- | include/litmus/fpmath.h | 147 | ||||
-rw-r--r-- | include/litmus/jobs.h | 10 | ||||
-rw-r--r-- | include/litmus/litmus.h | 261 | ||||
-rw-r--r-- | include/litmus/litmus_proc.h | 63 | ||||
-rw-r--r-- | include/litmus/locking.h | 28 | ||||
-rw-r--r-- | include/litmus/preempt.h | 162 | ||||
-rw-r--r-- | include/litmus/rt_domain.h | 182 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 15 | ||||
-rw-r--r-- | include/litmus/sched_plugin.h | 128 | ||||
-rw-r--r-- | include/litmus/srp.h | 28 | ||||
-rw-r--r-- | include/litmus/unistd_32.h | 21 | ||||
-rw-r--r-- | include/litmus/unistd_64.h | 33 | ||||
-rw-r--r-- | include/litmus/wait.h | 57 |
22 files changed, 1786 insertions, 9 deletions
diff --git a/include/litmus/affinity.h b/include/litmus/affinity.h new file mode 100644 index 000000000000..4d7c618c8175 --- /dev/null +++ b/include/litmus/affinity.h | |||
@@ -0,0 +1,52 @@ | |||
1 | #ifndef __LITMUS_AFFINITY_H | ||
2 | #define __LITMUS_AFFINITY_H | ||
3 | |||
4 | #include <linux/cpumask.h> | ||
5 | |||
6 | /* Works like: | ||
7 | void get_nearest_available_cpu( | ||
8 | cpu_entry_t **nearest, | ||
9 | cpu_entry_t *start, | ||
10 | cpu_entry_t *entries, | ||
11 | int release_master, | ||
12 | cpumask_var_t cpus_to_test) | ||
13 | |||
14 | Set release_master = NO_CPU for no Release Master. | ||
15 | |||
16 | We use a macro here to exploit the fact that C-EDF and G-EDF | ||
17 | have similar structures for their cpu_entry_t structs, even though | ||
18 | they do not share a common base-struct. The macro allows us to | ||
19 | avoid code duplication. | ||
20 | |||
21 | */ | ||
22 | #define get_nearest_available_cpu(nearest, start, entries, release_master, cpus_to_test) \ | ||
23 | { \ | ||
24 | (nearest) = NULL; \ | ||
25 | if (!(start)->linked && likely((start)->cpu != (release_master))) { \ | ||
26 | (nearest) = (start); \ | ||
27 | } else { \ | ||
28 | int __cpu; \ | ||
29 | \ | ||
30 | /* FIXME: get rid of the iteration with a bitmask + AND */ \ | ||
31 | for_each_cpu(__cpu, cpus_to_test) { \ | ||
32 | if (likely(__cpu != release_master)) { \ | ||
33 | cpu_entry_t *__entry = &per_cpu((entries), __cpu); \ | ||
34 | if (cpus_share_cache((start)->cpu, __entry->cpu) \ | ||
35 | && !__entry->linked) { \ | ||
36 | (nearest) = __entry; \ | ||
37 | break; \ | ||
38 | } \ | ||
39 | } \ | ||
40 | } \ | ||
41 | } \ | ||
42 | \ | ||
43 | if ((nearest)) { \ | ||
44 | TRACE("P%d is closest available CPU to P%d\n", \ | ||
45 | (nearest)->cpu, (start)->cpu); \ | ||
46 | } else { \ | ||
47 | TRACE("Could not find an available CPU close to P%d\n", \ | ||
48 | (start)->cpu); \ | ||
49 | } \ | ||
50 | } | ||
51 | |||
52 | #endif | ||
diff --git a/include/litmus/bheap.h b/include/litmus/bheap.h new file mode 100644 index 000000000000..cf4864a498d8 --- /dev/null +++ b/include/litmus/bheap.h | |||
@@ -0,0 +1,77 @@ | |||
1 | /* bheaps.h -- Binomial Heaps | ||
2 | * | ||
3 | * (c) 2008, 2009 Bjoern Brandenburg | ||
4 | */ | ||
5 | |||
6 | #ifndef BHEAP_H | ||
7 | #define BHEAP_H | ||
8 | |||
9 | #define NOT_IN_HEAP UINT_MAX | ||
10 | |||
11 | struct bheap_node { | ||
12 | struct bheap_node* parent; | ||
13 | struct bheap_node* next; | ||
14 | struct bheap_node* child; | ||
15 | |||
16 | unsigned int degree; | ||
17 | void* value; | ||
18 | struct bheap_node** ref; | ||
19 | }; | ||
20 | |||
21 | struct bheap { | ||
22 | struct bheap_node* head; | ||
23 | /* We cache the minimum of the heap. | ||
24 | * This speeds up repeated peek operations. | ||
25 | */ | ||
26 | struct bheap_node* min; | ||
27 | }; | ||
28 | |||
29 | typedef int (*bheap_prio_t)(struct bheap_node* a, struct bheap_node* b); | ||
30 | |||
31 | void bheap_init(struct bheap* heap); | ||
32 | void bheap_node_init(struct bheap_node** ref_to_bheap_node_ptr, void* value); | ||
33 | |||
34 | static inline int bheap_node_in_heap(struct bheap_node* h) | ||
35 | { | ||
36 | return h->degree != NOT_IN_HEAP; | ||
37 | } | ||
38 | |||
39 | static inline int bheap_empty(struct bheap* heap) | ||
40 | { | ||
41 | return heap->head == NULL && heap->min == NULL; | ||
42 | } | ||
43 | |||
44 | /* insert (and reinitialize) a node into the heap */ | ||
45 | void bheap_insert(bheap_prio_t higher_prio, | ||
46 | struct bheap* heap, | ||
47 | struct bheap_node* node); | ||
48 | |||
49 | /* merge addition into target */ | ||
50 | void bheap_union(bheap_prio_t higher_prio, | ||
51 | struct bheap* target, | ||
52 | struct bheap* addition); | ||
53 | |||
54 | struct bheap_node* bheap_peek(bheap_prio_t higher_prio, | ||
55 | struct bheap* heap); | ||
56 | |||
57 | struct bheap_node* bheap_take(bheap_prio_t higher_prio, | ||
58 | struct bheap* heap); | ||
59 | |||
60 | void bheap_uncache_min(bheap_prio_t higher_prio, struct bheap* heap); | ||
61 | int bheap_decrease(bheap_prio_t higher_prio, struct bheap_node* node); | ||
62 | |||
63 | void bheap_delete(bheap_prio_t higher_prio, | ||
64 | struct bheap* heap, | ||
65 | struct bheap_node* node); | ||
66 | |||
67 | /* allocate from memcache */ | ||
68 | struct bheap_node* bheap_node_alloc(int gfp_flags); | ||
69 | void bheap_node_free(struct bheap_node* hn); | ||
70 | |||
71 | /* allocate a heap node for value and insert into the heap */ | ||
72 | int bheap_add(bheap_prio_t higher_prio, struct bheap* heap, | ||
73 | void* value, int gfp_flags); | ||
74 | |||
75 | void* bheap_take_del(bheap_prio_t higher_prio, | ||
76 | struct bheap* heap); | ||
77 | #endif | ||
diff --git a/include/litmus/binheap.h b/include/litmus/binheap.h new file mode 100644 index 000000000000..1cf364701da8 --- /dev/null +++ b/include/litmus/binheap.h | |||
@@ -0,0 +1,205 @@ | |||
1 | #ifndef LITMUS_BINARY_HEAP_H | ||
2 | #define LITMUS_BINARY_HEAP_H | ||
3 | |||
4 | #include <linux/kernel.h> | ||
5 | |||
6 | /** | ||
7 | * Simple binary heap with add, arbitrary delete, delete_root, and top | ||
8 | * operations. | ||
9 | * | ||
10 | * Style meant to conform with list.h. | ||
11 | * | ||
12 | * Motivation: Linux's prio_heap.h is of fixed size. Litmus's binomial | ||
13 | * heap may be overkill (and perhaps not general enough) for some applications. | ||
14 | * | ||
15 | * Note: In order to make node swaps fast, a node inserted with a data pointer | ||
16 | * may not always hold said data pointer. This is similar to the binomial heap | ||
17 | * implementation. This does make node deletion tricky since we have to | ||
18 | * (1) locate the node that holds the data pointer to delete, and (2) the | ||
19 | * node that was originally inserted with said data pointer. These have to be | ||
20 | * coalesced into a single node before removal (see usage of | ||
21 | * __binheap_safe_swap()). We have to track node references to accomplish this. | ||
22 | */ | ||
23 | |||
24 | struct binheap_node { | ||
25 | void *data; | ||
26 | struct binheap_node *parent; | ||
27 | struct binheap_node *left; | ||
28 | struct binheap_node *right; | ||
29 | |||
30 | /* pointer to binheap_node that holds *data for which this binheap_node | ||
31 | * was originally inserted. (*data "owns" this node) | ||
32 | */ | ||
33 | struct binheap_node *ref; | ||
34 | struct binheap_node **ref_ptr; | ||
35 | }; | ||
36 | |||
37 | /** | ||
38 | * Signature of compator function. Assumed 'less-than' (min-heap). | ||
39 | * Pass in 'greater-than' for max-heap. | ||
40 | * | ||
41 | * TODO: Consider macro-based implementation that allows comparator to be | ||
42 | * inlined (similar to Linux red/black tree) for greater efficiency. | ||
43 | */ | ||
44 | typedef int (*binheap_order_t)(struct binheap_node *a, | ||
45 | struct binheap_node *b); | ||
46 | |||
47 | |||
48 | struct binheap { | ||
49 | struct binheap_node *root; | ||
50 | |||
51 | /* pointer to node to take next inserted child */ | ||
52 | struct binheap_node *next; | ||
53 | |||
54 | /* pointer to last node in complete binary tree */ | ||
55 | struct binheap_node *last; | ||
56 | |||
57 | /* comparator function pointer */ | ||
58 | binheap_order_t compare; | ||
59 | }; | ||
60 | |||
61 | |||
62 | /* Initialized heap nodes not in a heap have parent | ||
63 | * set to BINHEAP_POISON. | ||
64 | */ | ||
65 | #define BINHEAP_POISON ((void*)(0xdeadbeef)) | ||
66 | |||
67 | |||
68 | /** | ||
69 | * binheap_entry - get the struct for this heap node. | ||
70 | * Only valid when called upon heap nodes other than the root handle. | ||
71 | * @ptr: the heap node. | ||
72 | * @type: the type of struct pointed to by binheap_node::data. | ||
73 | * @member: unused. | ||
74 | */ | ||
75 | #define binheap_entry(ptr, type, member) \ | ||
76 | ((type *)((ptr)->data)) | ||
77 | |||
78 | /** | ||
79 | * binheap_node_container - get the struct that contains this node. | ||
80 | * Only valid when called upon heap nodes other than the root handle. | ||
81 | * @ptr: the heap node. | ||
82 | * @type: the type of struct the node is embedded in. | ||
83 | * @member: the name of the binheap_struct within the (type) struct. | ||
84 | */ | ||
85 | #define binheap_node_container(ptr, type, member) \ | ||
86 | container_of((ptr), type, member) | ||
87 | |||
88 | /** | ||
89 | * binheap_top_entry - get the struct for the node at the top of the heap. | ||
90 | * Only valid when called upon the heap handle node. | ||
91 | * @ptr: the special heap-handle node. | ||
92 | * @type: the type of the struct the head is embedded in. | ||
93 | * @member: the name of the binheap_struct within the (type) struct. | ||
94 | */ | ||
95 | #define binheap_top_entry(ptr, type, member) \ | ||
96 | binheap_entry((ptr)->root, type, member) | ||
97 | |||
98 | /** | ||
99 | * binheap_delete_root - remove the root element from the heap. | ||
100 | * @handle: handle to the heap. | ||
101 | * @type: the type of the struct the head is embedded in. | ||
102 | * @member: the name of the binheap_struct within the (type) struct. | ||
103 | */ | ||
104 | #define binheap_delete_root(handle, type, member) \ | ||
105 | __binheap_delete_root((handle), &((type *)((handle)->root->data))->member) | ||
106 | |||
107 | /** | ||
108 | * binheap_delete - remove an arbitrary element from the heap. | ||
109 | * @to_delete: pointer to node to be removed. | ||
110 | * @handle: handle to the heap. | ||
111 | */ | ||
112 | #define binheap_delete(to_delete, handle) \ | ||
113 | __binheap_delete((to_delete), (handle)) | ||
114 | |||
115 | /** | ||
116 | * binheap_add - insert an element to the heap | ||
117 | * new_node: node to add. | ||
118 | * @handle: handle to the heap. | ||
119 | * @type: the type of the struct the head is embedded in. | ||
120 | * @member: the name of the binheap_struct within the (type) struct. | ||
121 | */ | ||
122 | #define binheap_add(new_node, handle, type, member) \ | ||
123 | __binheap_add((new_node), (handle), container_of((new_node), type, member)) | ||
124 | |||
125 | /** | ||
126 | * binheap_decrease - re-eval the position of a node (based upon its | ||
127 | * original data pointer). | ||
128 | * @handle: handle to the heap. | ||
129 | * @orig_node: node that was associated with the data pointer | ||
130 | * (whose value has changed) when said pointer was | ||
131 | * added to the heap. | ||
132 | */ | ||
133 | #define binheap_decrease(orig_node, handle) \ | ||
134 | __binheap_decrease((orig_node), (handle)) | ||
135 | |||
136 | #define BINHEAP_NODE_INIT() { NULL, BINHEAP_POISON, NULL, NULL , NULL, NULL} | ||
137 | |||
138 | #define BINHEAP_NODE(name) \ | ||
139 | struct binheap_node name = BINHEAP_NODE_INIT() | ||
140 | |||
141 | |||
142 | static inline void INIT_BINHEAP_NODE(struct binheap_node *n) | ||
143 | { | ||
144 | n->data = NULL; | ||
145 | n->parent = BINHEAP_POISON; | ||
146 | n->left = NULL; | ||
147 | n->right = NULL; | ||
148 | n->ref = NULL; | ||
149 | n->ref_ptr = NULL; | ||
150 | } | ||
151 | |||
152 | static inline void INIT_BINHEAP_HANDLE(struct binheap *handle, | ||
153 | binheap_order_t compare) | ||
154 | { | ||
155 | handle->root = NULL; | ||
156 | handle->next = NULL; | ||
157 | handle->last = NULL; | ||
158 | handle->compare = compare; | ||
159 | } | ||
160 | |||
161 | /* Returns true if binheap is empty. */ | ||
162 | static inline int binheap_empty(struct binheap *handle) | ||
163 | { | ||
164 | return(handle->root == NULL); | ||
165 | } | ||
166 | |||
167 | /* Returns true if binheap node is in a heap. */ | ||
168 | static inline int binheap_is_in_heap(struct binheap_node *node) | ||
169 | { | ||
170 | return (node->parent != BINHEAP_POISON); | ||
171 | } | ||
172 | |||
173 | /* Returns true if binheap node is in given heap. */ | ||
174 | int binheap_is_in_this_heap(struct binheap_node *node, struct binheap* heap); | ||
175 | |||
176 | /* Add a node to a heap */ | ||
177 | void __binheap_add(struct binheap_node *new_node, | ||
178 | struct binheap *handle, | ||
179 | void *data); | ||
180 | |||
181 | /** | ||
182 | * Removes the root node from the heap. The node is removed after coalescing | ||
183 | * the binheap_node with its original data pointer at the root of the tree. | ||
184 | * | ||
185 | * The 'last' node in the tree is then swapped up to the root and bubbled | ||
186 | * down. | ||
187 | */ | ||
188 | void __binheap_delete_root(struct binheap *handle, | ||
189 | struct binheap_node *container); | ||
190 | |||
191 | /** | ||
192 | * Delete an arbitrary node. Bubble node to delete up to the root, | ||
193 | * and then delete to root. | ||
194 | */ | ||
195 | void __binheap_delete(struct binheap_node *node_to_delete, | ||
196 | struct binheap *handle); | ||
197 | |||
198 | /** | ||
199 | * Bubble up a node whose pointer has decreased in value. | ||
200 | */ | ||
201 | void __binheap_decrease(struct binheap_node *orig_node, | ||
202 | struct binheap *handle); | ||
203 | |||
204 | |||
205 | #endif | ||
diff --git a/include/litmus/budget.h b/include/litmus/budget.h new file mode 100644 index 000000000000..bd2d5c964f92 --- /dev/null +++ b/include/litmus/budget.h | |||
@@ -0,0 +1,36 @@ | |||
1 | #ifndef _LITMUS_BUDGET_H_ | ||
2 | #define _LITMUS_BUDGET_H_ | ||
3 | |||
4 | /* Update the per-processor enforcement timer (arm/reproram/cancel) for | ||
5 | * the next task. */ | ||
6 | void update_enforcement_timer(struct task_struct* t); | ||
7 | |||
8 | inline static int budget_exhausted(struct task_struct* t) | ||
9 | { | ||
10 | return get_exec_time(t) >= get_exec_cost(t); | ||
11 | } | ||
12 | |||
13 | inline static lt_t budget_remaining(struct task_struct* t) | ||
14 | { | ||
15 | if (!budget_exhausted(t)) | ||
16 | return get_exec_cost(t) - get_exec_time(t); | ||
17 | else | ||
18 | /* avoid overflow */ | ||
19 | return 0; | ||
20 | } | ||
21 | |||
22 | #define budget_enforced(t) (tsk_rt(t)->task_params.budget_policy != NO_ENFORCEMENT) | ||
23 | |||
24 | #define budget_precisely_enforced(t) (tsk_rt(t)->task_params.budget_policy \ | ||
25 | == PRECISE_ENFORCEMENT) | ||
26 | |||
27 | static inline int requeue_preempted_job(struct task_struct* t) | ||
28 | { | ||
29 | /* Add task to ready queue only if not subject to budget enforcement or | ||
30 | * if the job has budget remaining. t may be NULL. | ||
31 | */ | ||
32 | return t && !is_completed(t) && | ||
33 | (!budget_exhausted(t) || !budget_enforced(t)); | ||
34 | } | ||
35 | |||
36 | #endif | ||
diff --git a/include/litmus/ceiling.h b/include/litmus/ceiling.h new file mode 100644 index 000000000000..f3d3889315f7 --- /dev/null +++ b/include/litmus/ceiling.h | |||
@@ -0,0 +1,36 @@ | |||
1 | #ifndef _LITMUS_CEILING_H_ | ||
2 | #define _LITMUS_CEILING_H_ | ||
3 | |||
4 | #ifdef CONFIG_LITMUS_LOCKING | ||
5 | |||
6 | void __srp_ceiling_block(struct task_struct *cur); | ||
7 | |||
8 | DECLARE_PER_CPU(int, srp_objects_in_use); | ||
9 | |||
10 | /* assumes preemptions off */ | ||
11 | void srp_ceiling_block(void) | ||
12 | { | ||
13 | struct task_struct *tsk = current; | ||
14 | |||
15 | /* Only applies to real-time tasks. */ | ||
16 | if (!is_realtime(tsk)) | ||
17 | return; | ||
18 | |||
19 | /* Bail out early if there aren't any SRP resources around. */ | ||
20 | if (likely(!raw_cpu_read(srp_objects_in_use))) | ||
21 | return; | ||
22 | |||
23 | /* Avoid recursive ceiling blocking. */ | ||
24 | if (unlikely(tsk->rt_param.srp_non_recurse)) | ||
25 | return; | ||
26 | |||
27 | /* must take slow path */ | ||
28 | __srp_ceiling_block(tsk); | ||
29 | } | ||
30 | |||
31 | #else | ||
32 | #define srp_ceiling_block() /* nothing */ | ||
33 | #endif | ||
34 | |||
35 | |||
36 | #endif \ No newline at end of file | ||
diff --git a/include/litmus/clustered.h b/include/litmus/clustered.h new file mode 100644 index 000000000000..fc7f0f87966e --- /dev/null +++ b/include/litmus/clustered.h | |||
@@ -0,0 +1,46 @@ | |||
1 | #ifndef CLUSTERED_H | ||
2 | #define CLUSTERED_H | ||
3 | |||
4 | /* Which cache level should be used to group CPUs into clusters? | ||
5 | * GLOBAL_CLUSTER means that all CPUs form a single cluster (just like under | ||
6 | * global scheduling). | ||
7 | */ | ||
8 | enum cache_level { | ||
9 | GLOBAL_CLUSTER = 0, | ||
10 | L1_CLUSTER = 1, | ||
11 | L2_CLUSTER = 2, | ||
12 | L3_CLUSTER = 3 | ||
13 | }; | ||
14 | |||
15 | int parse_cache_level(const char *str, enum cache_level *level); | ||
16 | const char* cache_level_name(enum cache_level level); | ||
17 | |||
18 | /* expose a cache level in a /proc dir */ | ||
19 | struct proc_dir_entry* create_cluster_file(struct proc_dir_entry* parent, | ||
20 | enum cache_level* level); | ||
21 | |||
22 | |||
23 | |||
24 | struct scheduling_cluster { | ||
25 | unsigned int id; | ||
26 | /* list of CPUs that are part of this cluster */ | ||
27 | struct list_head cpus; | ||
28 | }; | ||
29 | |||
30 | struct cluster_cpu { | ||
31 | unsigned int id; /* which CPU is this? */ | ||
32 | struct list_head cluster_list; /* List of the CPUs in this cluster. */ | ||
33 | struct scheduling_cluster* cluster; /* The cluster that this CPU belongs to. */ | ||
34 | }; | ||
35 | |||
36 | int get_cluster_size(enum cache_level level); | ||
37 | |||
38 | int assign_cpus_to_clusters(enum cache_level level, | ||
39 | struct scheduling_cluster* clusters[], | ||
40 | unsigned int num_clusters, | ||
41 | struct cluster_cpu* cpus[], | ||
42 | unsigned int num_cpus); | ||
43 | |||
44 | int get_shared_cpu_map(cpumask_var_t mask, unsigned int cpu, unsigned int index); | ||
45 | |||
46 | #endif | ||
diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h new file mode 100644 index 000000000000..bbaf22ea7f12 --- /dev/null +++ b/include/litmus/edf_common.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * EDF common data structures and utility functions shared by all EDF | ||
3 | * based scheduler plugins | ||
4 | */ | ||
5 | |||
6 | /* CLEANUP: Add comments and make it less messy. | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #ifndef __UNC_EDF_COMMON_H__ | ||
11 | #define __UNC_EDF_COMMON_H__ | ||
12 | |||
13 | #include <litmus/rt_domain.h> | ||
14 | |||
15 | void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | ||
16 | release_jobs_t release); | ||
17 | |||
18 | int edf_higher_prio(struct task_struct* first, | ||
19 | struct task_struct* second); | ||
20 | |||
21 | int edf_ready_order(struct bheap_node* a, struct bheap_node* b); | ||
22 | |||
23 | int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t); | ||
24 | |||
25 | #endif | ||
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h new file mode 100644 index 000000000000..fd9b30dbfb34 --- /dev/null +++ b/include/litmus/fdso.h | |||
@@ -0,0 +1,78 @@ | |||
1 | /* fdso.h - file descriptor attached shared objects | ||
2 | * | ||
3 | * (c) 2007 B. Brandenburg, LITMUS^RT project | ||
4 | */ | ||
5 | |||
6 | #ifndef _LINUX_FDSO_H_ | ||
7 | #define _LINUX_FDSO_H_ | ||
8 | |||
9 | #include <linux/list.h> | ||
10 | #include <asm/atomic.h> | ||
11 | |||
12 | #include <linux/fs.h> | ||
13 | #include <linux/slab.h> | ||
14 | |||
15 | #define MAX_OBJECT_DESCRIPTORS 85 | ||
16 | |||
17 | typedef enum { | ||
18 | MIN_OBJ_TYPE = 0, | ||
19 | |||
20 | FMLP_SEM = 0, | ||
21 | SRP_SEM = 1, | ||
22 | |||
23 | MPCP_SEM = 2, | ||
24 | MPCP_VS_SEM = 3, | ||
25 | DPCP_SEM = 4, | ||
26 | PCP_SEM = 5, | ||
27 | |||
28 | DFLP_SEM = 6, | ||
29 | |||
30 | MAX_OBJ_TYPE = 6 | ||
31 | } obj_type_t; | ||
32 | |||
33 | struct inode_obj_id { | ||
34 | struct list_head list; | ||
35 | atomic_t count; | ||
36 | struct inode* inode; | ||
37 | |||
38 | obj_type_t type; | ||
39 | void* obj; | ||
40 | unsigned int id; | ||
41 | }; | ||
42 | |||
43 | struct fdso_ops; | ||
44 | |||
45 | struct od_table_entry { | ||
46 | unsigned int used; | ||
47 | |||
48 | struct inode_obj_id* obj; | ||
49 | const struct fdso_ops* class; | ||
50 | }; | ||
51 | |||
52 | struct fdso_ops { | ||
53 | int (*create)(void** obj_ref, obj_type_t type, void* __user); | ||
54 | void (*destroy)(obj_type_t type, void*); | ||
55 | int (*open) (struct od_table_entry*, void* __user); | ||
56 | int (*close) (struct od_table_entry*); | ||
57 | }; | ||
58 | |||
59 | /* translate a userspace supplied od into the raw table entry | ||
60 | * returns NULL if od is invalid | ||
61 | */ | ||
62 | struct od_table_entry* get_entry_for_od(int od); | ||
63 | |||
64 | /* translate a userspace supplied od into the associated object | ||
65 | * returns NULL if od is invalid | ||
66 | */ | ||
67 | static inline void* od_lookup(int od, obj_type_t type) | ||
68 | { | ||
69 | struct od_table_entry* e = get_entry_for_od(od); | ||
70 | return e && e->obj->type == type ? e->obj->obj : NULL; | ||
71 | } | ||
72 | |||
73 | #define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM)) | ||
74 | #define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) | ||
75 | #define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) | ||
76 | |||
77 | |||
78 | #endif | ||
diff --git a/include/litmus/fp_common.h b/include/litmus/fp_common.h new file mode 100644 index 000000000000..19356c0fa6c1 --- /dev/null +++ b/include/litmus/fp_common.h | |||
@@ -0,0 +1,105 @@ | |||
1 | /* Fixed-priority scheduler support. | ||
2 | */ | ||
3 | |||
4 | #ifndef __FP_COMMON_H__ | ||
5 | #define __FP_COMMON_H__ | ||
6 | |||
7 | #include <litmus/rt_domain.h> | ||
8 | |||
9 | #include <asm/bitops.h> | ||
10 | |||
11 | |||
12 | void fp_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | ||
13 | release_jobs_t release); | ||
14 | |||
15 | int fp_higher_prio(struct task_struct* first, | ||
16 | struct task_struct* second); | ||
17 | |||
18 | int fp_ready_order(struct bheap_node* a, struct bheap_node* b); | ||
19 | |||
20 | #define FP_PRIO_BIT_WORDS (LITMUS_MAX_PRIORITY / BITS_PER_LONG) | ||
21 | |||
22 | #if (LITMUS_MAX_PRIORITY % BITS_PER_LONG) | ||
23 | #error LITMUS_MAX_PRIORITY must be a multiple of BITS_PER_LONG | ||
24 | #endif | ||
25 | |||
26 | /* bitmask-inexed priority queue */ | ||
27 | struct fp_prio_queue { | ||
28 | unsigned long bitmask[FP_PRIO_BIT_WORDS]; | ||
29 | struct bheap queue[LITMUS_MAX_PRIORITY]; | ||
30 | }; | ||
31 | |||
32 | void fp_prio_queue_init(struct fp_prio_queue* q); | ||
33 | |||
34 | static inline void fpq_set(struct fp_prio_queue* q, unsigned int index) | ||
35 | { | ||
36 | unsigned long *word = q->bitmask + (index / BITS_PER_LONG); | ||
37 | __set_bit(index % BITS_PER_LONG, word); | ||
38 | } | ||
39 | |||
40 | static inline void fpq_clear(struct fp_prio_queue* q, unsigned int index) | ||
41 | { | ||
42 | unsigned long *word = q->bitmask + (index / BITS_PER_LONG); | ||
43 | __clear_bit(index % BITS_PER_LONG, word); | ||
44 | } | ||
45 | |||
46 | static inline unsigned int fpq_find(struct fp_prio_queue* q) | ||
47 | { | ||
48 | int i; | ||
49 | |||
50 | /* loop optimizer should unroll this */ | ||
51 | for (i = 0; i < FP_PRIO_BIT_WORDS; i++) | ||
52 | if (q->bitmask[i]) | ||
53 | return __ffs(q->bitmask[i]) + i * BITS_PER_LONG; | ||
54 | |||
55 | return LITMUS_MAX_PRIORITY; /* nothing found */ | ||
56 | } | ||
57 | |||
58 | static inline void fp_prio_add(struct fp_prio_queue* q, struct task_struct* t, unsigned int index) | ||
59 | { | ||
60 | BUG_ON(index >= LITMUS_MAX_PRIORITY); | ||
61 | BUG_ON(bheap_node_in_heap(tsk_rt(t)->heap_node)); | ||
62 | |||
63 | fpq_set(q, index); | ||
64 | bheap_insert(fp_ready_order, &q->queue[index], tsk_rt(t)->heap_node); | ||
65 | } | ||
66 | |||
67 | static inline void fp_prio_remove(struct fp_prio_queue* q, struct task_struct* t, unsigned int index) | ||
68 | { | ||
69 | BUG_ON(!is_queued(t)); | ||
70 | |||
71 | bheap_delete(fp_ready_order, &q->queue[index], tsk_rt(t)->heap_node); | ||
72 | if (likely(bheap_empty(&q->queue[index]))) | ||
73 | fpq_clear(q, index); | ||
74 | } | ||
75 | |||
76 | static inline struct task_struct* fp_prio_peek(struct fp_prio_queue* q) | ||
77 | { | ||
78 | unsigned int idx = fpq_find(q); | ||
79 | struct bheap_node* hn; | ||
80 | |||
81 | if (idx < LITMUS_MAX_PRIORITY) { | ||
82 | hn = bheap_peek(fp_ready_order, &q->queue[idx]); | ||
83 | return bheap2task(hn); | ||
84 | } else | ||
85 | return NULL; | ||
86 | } | ||
87 | |||
88 | static inline struct task_struct* fp_prio_take(struct fp_prio_queue* q) | ||
89 | { | ||
90 | unsigned int idx = fpq_find(q); | ||
91 | struct bheap_node* hn; | ||
92 | |||
93 | if (idx < LITMUS_MAX_PRIORITY) { | ||
94 | hn = bheap_take(fp_ready_order, &q->queue[idx]); | ||
95 | if (likely(bheap_empty(&q->queue[idx]))) | ||
96 | fpq_clear(q, idx); | ||
97 | return bheap2task(hn); | ||
98 | } else | ||
99 | return NULL; | ||
100 | } | ||
101 | |||
102 | int fp_preemption_needed(struct fp_prio_queue* q, struct task_struct *t); | ||
103 | |||
104 | |||
105 | #endif | ||
diff --git a/include/litmus/fpmath.h b/include/litmus/fpmath.h new file mode 100644 index 000000000000..642de98542c8 --- /dev/null +++ b/include/litmus/fpmath.h | |||
@@ -0,0 +1,147 @@ | |||
1 | #ifndef __FP_MATH_H__ | ||
2 | #define __FP_MATH_H__ | ||
3 | |||
4 | #include <linux/math64.h> | ||
5 | |||
6 | #ifndef __KERNEL__ | ||
7 | #include <stdint.h> | ||
8 | #define abs(x) (((x) < 0) ? -(x) : x) | ||
9 | #endif | ||
10 | |||
11 | // Use 64-bit because we want to track things at the nanosecond scale. | ||
12 | // This can lead to very large numbers. | ||
13 | typedef int64_t fpbuf_t; | ||
14 | typedef struct | ||
15 | { | ||
16 | fpbuf_t val; | ||
17 | } fp_t; | ||
18 | |||
19 | #define FP_SHIFT 10 | ||
20 | #define ROUND_BIT (FP_SHIFT - 1) | ||
21 | |||
22 | #define _fp(x) ((fp_t) {x}) | ||
23 | |||
24 | #ifdef __KERNEL__ | ||
25 | static const fp_t LITMUS_FP_ZERO = {.val = 0}; | ||
26 | static const fp_t LITMUS_FP_ONE = {.val = (1 << FP_SHIFT)}; | ||
27 | #endif | ||
28 | |||
29 | static inline fp_t FP(fpbuf_t x) | ||
30 | { | ||
31 | return _fp(((fpbuf_t) x) << FP_SHIFT); | ||
32 | } | ||
33 | |||
34 | /* divide two integers to obtain a fixed point value */ | ||
35 | static inline fp_t _frac(fpbuf_t a, fpbuf_t b) | ||
36 | { | ||
37 | return _fp(div64_s64(FP(a).val, (b))); | ||
38 | } | ||
39 | |||
40 | static inline fpbuf_t _point(fp_t x) | ||
41 | { | ||
42 | return (x.val % (1 << FP_SHIFT)); | ||
43 | |||
44 | } | ||
45 | |||
46 | #define fp2str(x) x.val | ||
47 | /*(x.val >> FP_SHIFT), (x.val % (1 << FP_SHIFT)) */ | ||
48 | #define _FP_ "%ld/1024" | ||
49 | |||
50 | static inline fpbuf_t _floor(fp_t x) | ||
51 | { | ||
52 | return x.val >> FP_SHIFT; | ||
53 | } | ||
54 | |||
55 | /* FIXME: negative rounding */ | ||
56 | static inline fpbuf_t _round(fp_t x) | ||
57 | { | ||
58 | return _floor(x) + ((x.val >> ROUND_BIT) & 1); | ||
59 | } | ||
60 | |||
61 | /* multiply two fixed point values */ | ||
62 | static inline fp_t _mul(fp_t a, fp_t b) | ||
63 | { | ||
64 | return _fp((a.val * b.val) >> FP_SHIFT); | ||
65 | } | ||
66 | |||
67 | static inline fp_t _div(fp_t a, fp_t b) | ||
68 | { | ||
69 | #if !defined(__KERNEL__) && !defined(unlikely) | ||
70 | #define unlikely(x) (x) | ||
71 | #define DO_UNDEF_UNLIKELY | ||
72 | #endif | ||
73 | /* try not to overflow */ | ||
74 | if (unlikely( a.val > (2l << ((sizeof(fpbuf_t)*8) - FP_SHIFT)) )) | ||
75 | return _fp((a.val / b.val) << FP_SHIFT); | ||
76 | else | ||
77 | return _fp((a.val << FP_SHIFT) / b.val); | ||
78 | #ifdef DO_UNDEF_UNLIKELY | ||
79 | #undef unlikely | ||
80 | #undef DO_UNDEF_UNLIKELY | ||
81 | #endif | ||
82 | } | ||
83 | |||
84 | static inline fp_t _add(fp_t a, fp_t b) | ||
85 | { | ||
86 | return _fp(a.val + b.val); | ||
87 | } | ||
88 | |||
89 | static inline fp_t _sub(fp_t a, fp_t b) | ||
90 | { | ||
91 | return _fp(a.val - b.val); | ||
92 | } | ||
93 | |||
94 | static inline fp_t _neg(fp_t x) | ||
95 | { | ||
96 | return _fp(-x.val); | ||
97 | } | ||
98 | |||
99 | static inline fp_t _abs(fp_t x) | ||
100 | { | ||
101 | return _fp(abs(x.val)); | ||
102 | } | ||
103 | |||
104 | /* works the same as casting float/double to integer */ | ||
105 | static inline fpbuf_t _fp_to_integer(fp_t x) | ||
106 | { | ||
107 | return _floor(_abs(x)) * ((x.val > 0) ? 1 : -1); | ||
108 | } | ||
109 | |||
110 | static inline fp_t _integer_to_fp(fpbuf_t x) | ||
111 | { | ||
112 | return _frac(x,1); | ||
113 | } | ||
114 | |||
115 | static inline int _leq(fp_t a, fp_t b) | ||
116 | { | ||
117 | return a.val <= b.val; | ||
118 | } | ||
119 | |||
120 | static inline int _geq(fp_t a, fp_t b) | ||
121 | { | ||
122 | return a.val >= b.val; | ||
123 | } | ||
124 | |||
125 | static inline int _lt(fp_t a, fp_t b) | ||
126 | { | ||
127 | return a.val < b.val; | ||
128 | } | ||
129 | |||
130 | static inline int _gt(fp_t a, fp_t b) | ||
131 | { | ||
132 | return a.val > b.val; | ||
133 | } | ||
134 | |||
135 | static inline int _eq(fp_t a, fp_t b) | ||
136 | { | ||
137 | return a.val == b.val; | ||
138 | } | ||
139 | |||
140 | static inline fp_t _max(fp_t a, fp_t b) | ||
141 | { | ||
142 | if (a.val < b.val) | ||
143 | return b; | ||
144 | else | ||
145 | return a; | ||
146 | } | ||
147 | #endif | ||
diff --git a/include/litmus/jobs.h b/include/litmus/jobs.h new file mode 100644 index 000000000000..24771dfaebf8 --- /dev/null +++ b/include/litmus/jobs.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef __LITMUS_JOBS_H__ | ||
2 | #define __LITMUS_JOBS_H__ | ||
3 | |||
4 | void prepare_for_next_period(struct task_struct *t); | ||
5 | void release_at(struct task_struct *t, lt_t start); | ||
6 | |||
7 | long default_wait_for_release_at(lt_t release_time); | ||
8 | long complete_job(void); | ||
9 | |||
10 | #endif | ||
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index c87863c9b231..a6eb534ee0fa 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -6,7 +6,50 @@ | |||
6 | #ifndef _LINUX_LITMUS_H_ | 6 | #ifndef _LINUX_LITMUS_H_ |
7 | #define _LINUX_LITMUS_H_ | 7 | #define _LINUX_LITMUS_H_ |
8 | 8 | ||
9 | #include <litmus/debug_trace.h> | ||
10 | |||
11 | #ifdef CONFIG_RELEASE_MASTER | ||
12 | extern atomic_t release_master_cpu; | ||
13 | #endif | ||
14 | |||
15 | /* in_list - is a given list_head queued on some list? | ||
16 | */ | ||
17 | static inline int in_list(struct list_head* list) | ||
18 | { | ||
19 | return !( /* case 1: deleted */ | ||
20 | (list->next == LIST_POISON1 && | ||
21 | list->prev == LIST_POISON2) | ||
22 | || | ||
23 | /* case 2: initialized */ | ||
24 | (list->next == list && | ||
25 | list->prev == list) | ||
26 | ); | ||
27 | } | ||
28 | |||
29 | struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq); | ||
30 | |||
31 | #define NO_CPU 0xffffffff | ||
32 | |||
33 | void litmus_fork(struct task_struct *tsk); | ||
34 | void litmus_exec(void); | ||
35 | /* clean up real-time state of a task */ | ||
36 | void litmus_clear_state(struct task_struct *dead_tsk); | ||
37 | void exit_litmus(struct task_struct *dead_tsk); | ||
38 | |||
39 | /* Prevent the plugin from being switched-out from underneath a code | ||
40 | * path. Might sleep, so may be called only from non-atomic context. */ | ||
41 | void litmus_plugin_switch_disable(void); | ||
42 | void litmus_plugin_switch_enable(void); | ||
43 | |||
44 | long litmus_admit_task(struct task_struct *tsk); | ||
45 | void litmus_exit_task(struct task_struct *tsk); | ||
46 | void litmus_dealloc(struct task_struct *tsk); | ||
47 | void litmus_do_exit(struct task_struct *tsk); | ||
48 | int litmus_be_migrate_to(int cpu); | ||
49 | |||
9 | #define is_realtime(t) ((t)->policy == SCHED_LITMUS) | 50 | #define is_realtime(t) ((t)->policy == SCHED_LITMUS) |
51 | #define rt_transition_pending(t) \ | ||
52 | ((t)->rt_param.transition_pending) | ||
10 | 53 | ||
11 | #define tsk_rt(t) (&(t)->rt_param) | 54 | #define tsk_rt(t) (&(t)->rt_param) |
12 | 55 | ||
@@ -28,6 +71,7 @@ | |||
28 | #define get_partition(t) (tsk_rt(t)->task_params.cpu) | 71 | #define get_partition(t) (tsk_rt(t)->task_params.cpu) |
29 | #define get_priority(t) (tsk_rt(t)->task_params.priority) | 72 | #define get_priority(t) (tsk_rt(t)->task_params.priority) |
30 | #define get_class(t) (tsk_rt(t)->task_params.cls) | 73 | #define get_class(t) (tsk_rt(t)->task_params.cls) |
74 | #define get_release_policy(t) (tsk_rt(t)->task_params.release_policy) | ||
31 | 75 | ||
32 | /* job_param macros */ | 76 | /* job_param macros */ |
33 | #define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) | 77 | #define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) |
@@ -35,6 +79,15 @@ | |||
35 | #define get_release(t) (tsk_rt(t)->job_params.release) | 79 | #define get_release(t) (tsk_rt(t)->job_params.release) |
36 | #define get_lateness(t) (tsk_rt(t)->job_params.lateness) | 80 | #define get_lateness(t) (tsk_rt(t)->job_params.lateness) |
37 | 81 | ||
82 | /* release policy macros */ | ||
83 | #define is_periodic(t) (get_release_policy(t) == TASK_PERIODIC) | ||
84 | #define is_sporadic(t) (get_release_policy(t) == TASK_SPORADIC) | ||
85 | #ifdef CONFIG_ALLOW_EARLY_RELEASE | ||
86 | #define is_early_releasing(t) (get_release_policy(t) == TASK_EARLY) | ||
87 | #else | ||
88 | #define is_early_releasing(t) (0) | ||
89 | #endif | ||
90 | |||
38 | #define is_hrt(t) \ | 91 | #define is_hrt(t) \ |
39 | (tsk_rt(t)->task_params.cls == RT_CLASS_HARD) | 92 | (tsk_rt(t)->task_params.cls == RT_CLASS_HARD) |
40 | #define is_srt(t) \ | 93 | #define is_srt(t) \ |
@@ -48,6 +101,188 @@ static inline lt_t litmus_clock(void) | |||
48 | return ktime_to_ns(ktime_get()); | 101 | return ktime_to_ns(ktime_get()); |
49 | } | 102 | } |
50 | 103 | ||
104 | /* A macro to convert from nanoseconds to ktime_t. */ | ||
105 | #define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t) | ||
106 | |||
107 | #define get_domain(t) (tsk_rt(t)->domain) | ||
108 | |||
109 | /* Honor the flag in the preempt_count variable that is set | ||
110 | * when scheduling is in progress. | ||
111 | */ | ||
112 | #define is_current_running() \ | ||
113 | ((current)->state == TASK_RUNNING || \ | ||
114 | preempt_count() & PREEMPT_ACTIVE) | ||
115 | |||
116 | #define is_released(t, now) \ | ||
117 | (lt_before_eq(get_release(t), now)) | ||
118 | #define is_tardy(t, now) \ | ||
119 | (lt_before_eq(tsk_rt(t)->job_params.deadline, now)) | ||
120 | |||
121 | /* real-time comparison macros */ | ||
122 | #define earlier_deadline(a, b) (lt_before(\ | ||
123 | (a)->rt_param.job_params.deadline,\ | ||
124 | (b)->rt_param.job_params.deadline)) | ||
125 | #define earlier_release(a, b) (lt_before(\ | ||
126 | (a)->rt_param.job_params.release,\ | ||
127 | (b)->rt_param.job_params.release)) | ||
128 | |||
129 | void preempt_if_preemptable(struct task_struct* t, int on_cpu); | ||
130 | |||
131 | #define bheap2task(hn) ((struct task_struct*) hn->value) | ||
132 | |||
133 | #ifdef CONFIG_NP_SECTION | ||
134 | |||
135 | static inline int is_kernel_np(struct task_struct *t) | ||
136 | { | ||
137 | return tsk_rt(t)->kernel_np; | ||
138 | } | ||
139 | |||
140 | static inline int is_user_np(struct task_struct *t) | ||
141 | { | ||
142 | return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->sched.np.flag : 0; | ||
143 | } | ||
144 | |||
145 | static inline void request_exit_np(struct task_struct *t) | ||
146 | { | ||
147 | if (is_user_np(t)) { | ||
148 | /* Set the flag that tells user space to call | ||
149 | * into the kernel at the end of a critical section. */ | ||
150 | if (likely(tsk_rt(t)->ctrl_page)) { | ||
151 | TRACE_TASK(t, "setting delayed_preemption flag\n"); | ||
152 | tsk_rt(t)->ctrl_page->sched.np.preempt = 1; | ||
153 | } | ||
154 | } | ||
155 | } | ||
156 | |||
157 | static inline void make_np(struct task_struct *t) | ||
158 | { | ||
159 | tsk_rt(t)->kernel_np++; | ||
160 | } | ||
161 | |||
162 | /* Caller should check if preemption is necessary when | ||
163 | * the function return 0. | ||
164 | */ | ||
165 | static inline int take_np(struct task_struct *t) | ||
166 | { | ||
167 | return --tsk_rt(t)->kernel_np; | ||
168 | } | ||
169 | |||
170 | /* returns 0 if remote CPU needs an IPI to preempt, 1 if no IPI is required */ | ||
171 | static inline int request_exit_np_atomic(struct task_struct *t) | ||
172 | { | ||
173 | union np_flag old, new; | ||
174 | |||
175 | if (tsk_rt(t)->ctrl_page) { | ||
176 | old.raw = tsk_rt(t)->ctrl_page->sched.raw; | ||
177 | if (old.np.flag == 0) { | ||
178 | /* no longer non-preemptive */ | ||
179 | return 0; | ||
180 | } else if (old.np.preempt) { | ||
181 | /* already set, nothing for us to do */ | ||
182 | return 1; | ||
183 | } else { | ||
184 | /* non preemptive and flag not set */ | ||
185 | new.raw = old.raw; | ||
186 | new.np.preempt = 1; | ||
187 | /* if we get old back, then we atomically set the flag */ | ||
188 | return cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw; | ||
189 | /* If we raced with a concurrent change, then so be | ||
190 | * it. Deliver it by IPI. We don't want an unbounded | ||
191 | * retry loop here since tasks might exploit that to | ||
192 | * keep the kernel busy indefinitely. */ | ||
193 | } | ||
194 | } else | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | #else | ||
199 | |||
200 | static inline int is_kernel_np(struct task_struct* t) | ||
201 | { | ||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | static inline int is_user_np(struct task_struct* t) | ||
206 | { | ||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | static inline void request_exit_np(struct task_struct *t) | ||
211 | { | ||
212 | /* request_exit_np() shouldn't be called if !CONFIG_NP_SECTION */ | ||
213 | BUG(); | ||
214 | } | ||
215 | |||
216 | static inline int request_exit_np_atomic(struct task_struct *t) | ||
217 | { | ||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | #endif | ||
222 | |||
223 | static inline void clear_exit_np(struct task_struct *t) | ||
224 | { | ||
225 | if (likely(tsk_rt(t)->ctrl_page)) | ||
226 | tsk_rt(t)->ctrl_page->sched.np.preempt = 0; | ||
227 | } | ||
228 | |||
229 | static inline int is_np(struct task_struct *t) | ||
230 | { | ||
231 | #ifdef CONFIG_SCHED_DEBUG_TRACE | ||
232 | int kernel, user; | ||
233 | kernel = is_kernel_np(t); | ||
234 | user = is_user_np(t); | ||
235 | if (kernel || user) | ||
236 | TRACE_TASK(t, " is non-preemptive: kernel=%d user=%d\n", | ||
237 | |||
238 | kernel, user); | ||
239 | return kernel || user; | ||
240 | #else | ||
241 | return unlikely(is_kernel_np(t) || is_user_np(t)); | ||
242 | #endif | ||
243 | } | ||
244 | |||
245 | static inline int is_present(struct task_struct* t) | ||
246 | { | ||
247 | return t && tsk_rt(t)->present; | ||
248 | } | ||
249 | |||
250 | static inline int is_completed(struct task_struct* t) | ||
251 | { | ||
252 | return t && tsk_rt(t)->completed; | ||
253 | } | ||
254 | |||
255 | |||
256 | /* Used to convert ns-specified execution costs and periods into | ||
257 | * integral quanta equivalents. | ||
258 | */ | ||
259 | #define LITMUS_QUANTUM_LENGTH_NS (CONFIG_LITMUS_QUANTUM_LENGTH_US * 1000ULL) | ||
260 | |||
261 | /* make the unit explicit */ | ||
262 | typedef unsigned long quanta_t; | ||
263 | |||
264 | enum round { | ||
265 | FLOOR, | ||
266 | CEIL | ||
267 | }; | ||
268 | |||
269 | static inline quanta_t time2quanta(lt_t time, enum round round) | ||
270 | { | ||
271 | s64 quantum_length = LITMUS_QUANTUM_LENGTH_NS; | ||
272 | |||
273 | if (do_div(time, quantum_length) && round == CEIL) | ||
274 | time++; | ||
275 | return (quanta_t) time; | ||
276 | } | ||
277 | |||
278 | static inline lt_t quanta2time(quanta_t quanta) | ||
279 | { | ||
280 | return quanta * LITMUS_QUANTUM_LENGTH_NS; | ||
281 | } | ||
282 | |||
283 | /* By how much is cpu staggered behind CPU 0? */ | ||
284 | u64 cpu_stagger_offset(int cpu); | ||
285 | |||
51 | static inline struct control_page* get_control_page(struct task_struct *t) | 286 | static inline struct control_page* get_control_page(struct task_struct *t) |
52 | { | 287 | { |
53 | return tsk_rt(t)->ctrl_page; | 288 | return tsk_rt(t)->ctrl_page; |
@@ -58,4 +293,30 @@ static inline int has_control_page(struct task_struct* t) | |||
58 | return tsk_rt(t)->ctrl_page != NULL; | 293 | return tsk_rt(t)->ctrl_page != NULL; |
59 | } | 294 | } |
60 | 295 | ||
296 | |||
297 | #ifdef CONFIG_SCHED_OVERHEAD_TRACE | ||
298 | |||
299 | #define TS_SYSCALL_IN_START \ | ||
300 | if (has_control_page(current)) { \ | ||
301 | __TS_SYSCALL_IN_START(&get_control_page(current)->ts_syscall_start); \ | ||
302 | } | ||
303 | |||
304 | #define TS_SYSCALL_IN_END \ | ||
305 | if (has_control_page(current)) { \ | ||
306 | unsigned long flags; \ | ||
307 | uint64_t irqs; \ | ||
308 | local_irq_save(flags); \ | ||
309 | irqs = get_control_page(current)->irq_count - \ | ||
310 | get_control_page(current)->irq_syscall_start; \ | ||
311 | __TS_SYSCALL_IN_END(&irqs); \ | ||
312 | local_irq_restore(flags); \ | ||
313 | } | ||
314 | |||
315 | #else | ||
316 | |||
317 | #define TS_SYSCALL_IN_START | ||
318 | #define TS_SYSCALL_IN_END | ||
319 | |||
320 | #endif | ||
321 | |||
61 | #endif | 322 | #endif |
diff --git a/include/litmus/litmus_proc.h b/include/litmus/litmus_proc.h new file mode 100644 index 000000000000..a5db24c03ec0 --- /dev/null +++ b/include/litmus/litmus_proc.h | |||
@@ -0,0 +1,63 @@ | |||
1 | #include <litmus/sched_plugin.h> | ||
2 | #include <linux/proc_fs.h> | ||
3 | |||
4 | int __init init_litmus_proc(void); | ||
5 | void exit_litmus_proc(void); | ||
6 | |||
7 | struct cd_mapping | ||
8 | { | ||
9 | int id; | ||
10 | cpumask_var_t mask; | ||
11 | struct proc_dir_entry *proc_file; | ||
12 | }; | ||
13 | |||
14 | struct domain_proc_info | ||
15 | { | ||
16 | int num_cpus; | ||
17 | int num_domains; | ||
18 | |||
19 | struct cd_mapping *cpu_to_domains; | ||
20 | struct cd_mapping *domain_to_cpus; | ||
21 | }; | ||
22 | |||
23 | /* | ||
24 | * On success, returns 0 and sets the pointer to the location of the new | ||
25 | * proc dir entry, otherwise returns an error code and sets pde to NULL. | ||
26 | */ | ||
27 | long make_plugin_proc_dir(struct sched_plugin* plugin, | ||
28 | struct proc_dir_entry** pde); | ||
29 | |||
30 | /* | ||
31 | * Plugins should deallocate all child proc directory entries before | ||
32 | * calling this, to avoid memory leaks. | ||
33 | */ | ||
34 | void remove_plugin_proc_dir(struct sched_plugin* plugin); | ||
35 | |||
36 | /* | ||
37 | * Setup the CPU <-> sched domain mappings in proc | ||
38 | */ | ||
39 | long activate_domain_proc(struct domain_proc_info* map); | ||
40 | |||
41 | /* | ||
42 | * Remove the CPU <-> sched domain mappings from proc | ||
43 | */ | ||
44 | long deactivate_domain_proc(void); | ||
45 | |||
46 | /* | ||
47 | * Alloc memory for the mapping | ||
48 | * Note: Does not set up proc files. Use make_sched_domain_maps for that. | ||
49 | */ | ||
50 | long init_domain_proc_info(struct domain_proc_info* map, | ||
51 | int num_cpus, int num_domains); | ||
52 | |||
53 | /* | ||
54 | * Free memory of the mapping | ||
55 | * Note: Does not clean up proc files. Use deactivate_domain_proc for that. | ||
56 | */ | ||
57 | void destroy_domain_proc_info(struct domain_proc_info* map); | ||
58 | |||
59 | /* Copy at most size-1 bytes from ubuf into kbuf, null-terminate buf, and | ||
60 | * remove a '\n' if present. Returns the number of bytes that were read or | ||
61 | * -EFAULT. */ | ||
62 | int copy_and_chomp(char *kbuf, unsigned long ksize, | ||
63 | __user const char* ubuf, unsigned long ulength); | ||
diff --git a/include/litmus/locking.h b/include/litmus/locking.h new file mode 100644 index 000000000000..4d7b870cb443 --- /dev/null +++ b/include/litmus/locking.h | |||
@@ -0,0 +1,28 @@ | |||
1 | #ifndef LITMUS_LOCKING_H | ||
2 | #define LITMUS_LOCKING_H | ||
3 | |||
4 | struct litmus_lock_ops; | ||
5 | |||
6 | /* Generic base struct for LITMUS^RT userspace semaphores. | ||
7 | * This structure should be embedded in protocol-specific semaphores. | ||
8 | */ | ||
9 | struct litmus_lock { | ||
10 | struct litmus_lock_ops *ops; | ||
11 | int type; | ||
12 | }; | ||
13 | |||
14 | struct litmus_lock_ops { | ||
15 | /* Current task tries to obtain / drop a reference to a lock. | ||
16 | * Optional methods, allowed by default. */ | ||
17 | int (*open)(struct litmus_lock*, void* __user); | ||
18 | int (*close)(struct litmus_lock*); | ||
19 | |||
20 | /* Current tries to lock/unlock this lock (mandatory methods). */ | ||
21 | int (*lock)(struct litmus_lock*); | ||
22 | int (*unlock)(struct litmus_lock*); | ||
23 | |||
24 | /* The lock is no longer being referenced (mandatory method). */ | ||
25 | void (*deallocate)(struct litmus_lock*); | ||
26 | }; | ||
27 | |||
28 | #endif | ||
diff --git a/include/litmus/preempt.h b/include/litmus/preempt.h new file mode 100644 index 000000000000..bdf5d8e52344 --- /dev/null +++ b/include/litmus/preempt.h | |||
@@ -0,0 +1,162 @@ | |||
1 | #ifndef LITMUS_PREEMPT_H | ||
2 | #define LITMUS_PREEMPT_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <linux/cache.h> | ||
6 | #include <linux/percpu.h> | ||
7 | #include <asm/atomic.h> | ||
8 | |||
9 | #include <litmus/debug_trace.h> | ||
10 | |||
11 | DECLARE_PER_CPU_SHARED_ALIGNED(atomic_t, resched_state); | ||
12 | |||
13 | #ifdef CONFIG_PREEMPT_STATE_TRACE | ||
14 | const char* sched_state_name(int s); | ||
15 | #define TRACE_STATE(fmt, args...) TRACE("SCHED_STATE " fmt, args) | ||
16 | #else | ||
17 | #define TRACE_STATE(fmt, args...) /* ignore */ | ||
18 | #endif | ||
19 | |||
20 | #define VERIFY_SCHED_STATE(x) \ | ||
21 | do { int __s = get_sched_state(); \ | ||
22 | if ((__s & (x)) == 0) \ | ||
23 | TRACE_STATE("INVALID s=0x%x (%s) not " \ | ||
24 | "in 0x%x (%s) [%s]\n", \ | ||
25 | __s, sched_state_name(__s), \ | ||
26 | (x), #x, __FUNCTION__); \ | ||
27 | } while (0); | ||
28 | |||
29 | #define TRACE_SCHED_STATE_CHANGE(x, y, cpu) \ | ||
30 | TRACE_STATE("[P%d] 0x%x (%s) -> 0x%x (%s)\n", \ | ||
31 | cpu, (x), sched_state_name(x), \ | ||
32 | (y), sched_state_name(y)) | ||
33 | |||
34 | |||
35 | typedef enum scheduling_state { | ||
36 | TASK_SCHEDULED = (1 << 0), /* The currently scheduled task is the one that | ||
37 | * should be scheduled, and the processor does not | ||
38 | * plan to invoke schedule(). */ | ||
39 | SHOULD_SCHEDULE = (1 << 1), /* A remote processor has determined that the | ||
40 | * processor should reschedule, but this has not | ||
41 | * been communicated yet (IPI still pending). */ | ||
42 | WILL_SCHEDULE = (1 << 2), /* The processor has noticed that it has to | ||
43 | * reschedule and will do so shortly. */ | ||
44 | TASK_PICKED = (1 << 3), /* The processor is currently executing schedule(), | ||
45 | * has selected a new task to schedule, but has not | ||
46 | * yet performed the actual context switch. */ | ||
47 | PICKED_WRONG_TASK = (1 << 4), /* The processor has not yet performed the context | ||
48 | * switch, but a remote processor has already | ||
49 | * determined that a higher-priority task became | ||
50 | * eligible after the task was picked. */ | ||
51 | } sched_state_t; | ||
52 | |||
53 | static inline sched_state_t get_sched_state_on(int cpu) | ||
54 | { | ||
55 | return atomic_read(&per_cpu(resched_state, cpu)); | ||
56 | } | ||
57 | |||
58 | static inline sched_state_t get_sched_state(void) | ||
59 | { | ||
60 | return atomic_read(this_cpu_ptr(&resched_state)); | ||
61 | } | ||
62 | |||
63 | static inline int is_in_sched_state(int possible_states) | ||
64 | { | ||
65 | return get_sched_state() & possible_states; | ||
66 | } | ||
67 | |||
68 | static inline int cpu_is_in_sched_state(int cpu, int possible_states) | ||
69 | { | ||
70 | return get_sched_state_on(cpu) & possible_states; | ||
71 | } | ||
72 | |||
73 | static inline void set_sched_state(sched_state_t s) | ||
74 | { | ||
75 | TRACE_SCHED_STATE_CHANGE(get_sched_state(), s, smp_processor_id()); | ||
76 | atomic_set(this_cpu_ptr(&resched_state), s); | ||
77 | } | ||
78 | |||
79 | static inline int sched_state_transition(sched_state_t from, sched_state_t to) | ||
80 | { | ||
81 | sched_state_t old_state; | ||
82 | |||
83 | old_state = atomic_cmpxchg(this_cpu_ptr(&resched_state), from, to); | ||
84 | if (old_state == from) { | ||
85 | TRACE_SCHED_STATE_CHANGE(from, to, smp_processor_id()); | ||
86 | return 1; | ||
87 | } else | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | static inline int sched_state_transition_on(int cpu, | ||
92 | sched_state_t from, | ||
93 | sched_state_t to) | ||
94 | { | ||
95 | sched_state_t old_state; | ||
96 | |||
97 | old_state = atomic_cmpxchg(&per_cpu(resched_state, cpu), from, to); | ||
98 | if (old_state == from) { | ||
99 | TRACE_SCHED_STATE_CHANGE(from, to, cpu); | ||
100 | return 1; | ||
101 | } else | ||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | /* Plugins must call this function after they have decided which job to | ||
106 | * schedule next. IMPORTANT: this function must be called while still holding | ||
107 | * the lock that is used to serialize scheduling decisions. | ||
108 | * | ||
109 | * (Ideally, we would like to use runqueue locks for this purpose, but that | ||
110 | * would lead to deadlocks with the migration code.) | ||
111 | */ | ||
112 | static inline void sched_state_task_picked(void) | ||
113 | { | ||
114 | VERIFY_SCHED_STATE(WILL_SCHEDULE); | ||
115 | |||
116 | /* WILL_SCHEDULE has only a local tansition => simple store is ok */ | ||
117 | set_sched_state(TASK_PICKED); | ||
118 | } | ||
119 | |||
120 | static inline void sched_state_entered_schedule(void) | ||
121 | { | ||
122 | /* Update state for the case that we entered schedule() not due to | ||
123 | * set_tsk_need_resched() */ | ||
124 | set_sched_state(WILL_SCHEDULE); | ||
125 | } | ||
126 | |||
127 | /* Called by schedule() to check if the scheduling decision is still valid | ||
128 | * after a context switch. Returns 1 if the CPU needs to reschdule. */ | ||
129 | static inline int sched_state_validate_switch(void) | ||
130 | { | ||
131 | int decision_ok = 0; | ||
132 | |||
133 | VERIFY_SCHED_STATE(PICKED_WRONG_TASK | TASK_PICKED | WILL_SCHEDULE); | ||
134 | |||
135 | if (is_in_sched_state(TASK_PICKED)) { | ||
136 | /* Might be good; let's try to transition out of this | ||
137 | * state. This must be done atomically since remote processors | ||
138 | * may try to change the state, too. */ | ||
139 | decision_ok = sched_state_transition(TASK_PICKED, TASK_SCHEDULED); | ||
140 | } | ||
141 | |||
142 | if (!decision_ok) | ||
143 | TRACE_STATE("validation failed (%s)\n", | ||
144 | sched_state_name(get_sched_state())); | ||
145 | |||
146 | return !decision_ok; | ||
147 | } | ||
148 | |||
149 | /* State transition events. See litmus/preempt.c for details. */ | ||
150 | void sched_state_will_schedule(struct task_struct* tsk); | ||
151 | void sched_state_ipi(void); | ||
152 | /* Cause a CPU (remote or local) to reschedule. */ | ||
153 | void litmus_reschedule(int cpu); | ||
154 | void litmus_reschedule_local(void); | ||
155 | |||
156 | #ifdef CONFIG_DEBUG_KERNEL | ||
157 | void sched_state_plugin_check(void); | ||
158 | #else | ||
159 | #define sched_state_plugin_check() /* no check */ | ||
160 | #endif | ||
161 | |||
162 | #endif | ||
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h new file mode 100644 index 000000000000..ac249292e866 --- /dev/null +++ b/include/litmus/rt_domain.h | |||
@@ -0,0 +1,182 @@ | |||
1 | /* CLEANUP: Add comments and make it less messy. | ||
2 | * | ||
3 | */ | ||
4 | |||
5 | #ifndef __UNC_RT_DOMAIN_H__ | ||
6 | #define __UNC_RT_DOMAIN_H__ | ||
7 | |||
8 | #include <litmus/bheap.h> | ||
9 | |||
10 | #define RELEASE_QUEUE_SLOTS 127 /* prime */ | ||
11 | |||
12 | struct _rt_domain; | ||
13 | |||
14 | typedef int (*check_resched_needed_t)(struct _rt_domain *rt); | ||
15 | typedef void (*release_jobs_t)(struct _rt_domain *rt, struct bheap* tasks); | ||
16 | |||
17 | struct release_queue { | ||
18 | /* each slot maintains a list of release heaps sorted | ||
19 | * by release time */ | ||
20 | struct list_head slot[RELEASE_QUEUE_SLOTS]; | ||
21 | }; | ||
22 | |||
23 | typedef struct _rt_domain { | ||
24 | /* runnable rt tasks are in here */ | ||
25 | raw_spinlock_t ready_lock; | ||
26 | struct bheap ready_queue; | ||
27 | |||
28 | /* real-time tasks waiting for release are in here */ | ||
29 | raw_spinlock_t release_lock; | ||
30 | struct release_queue release_queue; | ||
31 | |||
32 | #ifdef CONFIG_RELEASE_MASTER | ||
33 | int release_master; | ||
34 | #endif | ||
35 | |||
36 | /* for moving tasks to the release queue */ | ||
37 | raw_spinlock_t tobe_lock; | ||
38 | struct list_head tobe_released; | ||
39 | |||
40 | /* how do we check if we need to kick another CPU? */ | ||
41 | check_resched_needed_t check_resched; | ||
42 | |||
43 | /* how do we release jobs? */ | ||
44 | release_jobs_t release_jobs; | ||
45 | |||
46 | /* how are tasks ordered in the ready queue? */ | ||
47 | bheap_prio_t order; | ||
48 | } rt_domain_t; | ||
49 | |||
50 | struct release_heap { | ||
51 | /* list_head for per-time-slot list */ | ||
52 | struct list_head list; | ||
53 | lt_t release_time; | ||
54 | /* all tasks to be released at release_time */ | ||
55 | struct bheap heap; | ||
56 | /* used to trigger the release */ | ||
57 | struct hrtimer timer; | ||
58 | |||
59 | #ifdef CONFIG_RELEASE_MASTER | ||
60 | /* used to delegate releases */ | ||
61 | struct hrtimer_start_on_info info; | ||
62 | #endif | ||
63 | /* required for the timer callback */ | ||
64 | rt_domain_t* dom; | ||
65 | }; | ||
66 | |||
67 | |||
68 | static inline struct task_struct* __next_ready(rt_domain_t* rt) | ||
69 | { | ||
70 | struct bheap_node *hn = bheap_peek(rt->order, &rt->ready_queue); | ||
71 | if (hn) | ||
72 | return bheap2task(hn); | ||
73 | else | ||
74 | return NULL; | ||
75 | } | ||
76 | |||
77 | void rt_domain_init(rt_domain_t *rt, bheap_prio_t order, | ||
78 | check_resched_needed_t check, | ||
79 | release_jobs_t relase); | ||
80 | |||
81 | void __add_ready(rt_domain_t* rt, struct task_struct *new); | ||
82 | void __merge_ready(rt_domain_t* rt, struct bheap *tasks); | ||
83 | void __add_release(rt_domain_t* rt, struct task_struct *task); | ||
84 | |||
85 | static inline struct task_struct* __take_ready(rt_domain_t* rt) | ||
86 | { | ||
87 | struct bheap_node* hn = bheap_take(rt->order, &rt->ready_queue); | ||
88 | if (hn) | ||
89 | return bheap2task(hn); | ||
90 | else | ||
91 | return NULL; | ||
92 | } | ||
93 | |||
94 | static inline struct task_struct* __peek_ready(rt_domain_t* rt) | ||
95 | { | ||
96 | struct bheap_node* hn = bheap_peek(rt->order, &rt->ready_queue); | ||
97 | if (hn) | ||
98 | return bheap2task(hn); | ||
99 | else | ||
100 | return NULL; | ||
101 | } | ||
102 | |||
103 | static inline int is_queued(struct task_struct *t) | ||
104 | { | ||
105 | BUG_ON(!tsk_rt(t)->heap_node); | ||
106 | return bheap_node_in_heap(tsk_rt(t)->heap_node); | ||
107 | } | ||
108 | |||
109 | static inline void remove(rt_domain_t* rt, struct task_struct *t) | ||
110 | { | ||
111 | bheap_delete(rt->order, &rt->ready_queue, tsk_rt(t)->heap_node); | ||
112 | } | ||
113 | |||
114 | static inline void add_ready(rt_domain_t* rt, struct task_struct *new) | ||
115 | { | ||
116 | unsigned long flags; | ||
117 | /* first we need the write lock for rt_ready_queue */ | ||
118 | raw_spin_lock_irqsave(&rt->ready_lock, flags); | ||
119 | __add_ready(rt, new); | ||
120 | raw_spin_unlock_irqrestore(&rt->ready_lock, flags); | ||
121 | } | ||
122 | |||
123 | static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks) | ||
124 | { | ||
125 | unsigned long flags; | ||
126 | raw_spin_lock_irqsave(&rt->ready_lock, flags); | ||
127 | __merge_ready(rt, tasks); | ||
128 | raw_spin_unlock_irqrestore(&rt->ready_lock, flags); | ||
129 | } | ||
130 | |||
131 | static inline struct task_struct* take_ready(rt_domain_t* rt) | ||
132 | { | ||
133 | unsigned long flags; | ||
134 | struct task_struct* ret; | ||
135 | /* first we need the write lock for rt_ready_queue */ | ||
136 | raw_spin_lock_irqsave(&rt->ready_lock, flags); | ||
137 | ret = __take_ready(rt); | ||
138 | raw_spin_unlock_irqrestore(&rt->ready_lock, flags); | ||
139 | return ret; | ||
140 | } | ||
141 | |||
142 | |||
143 | static inline void add_release(rt_domain_t* rt, struct task_struct *task) | ||
144 | { | ||
145 | unsigned long flags; | ||
146 | raw_spin_lock_irqsave(&rt->tobe_lock, flags); | ||
147 | __add_release(rt, task); | ||
148 | raw_spin_unlock_irqrestore(&rt->tobe_lock, flags); | ||
149 | } | ||
150 | |||
151 | #ifdef CONFIG_RELEASE_MASTER | ||
152 | void __add_release_on(rt_domain_t* rt, struct task_struct *task, | ||
153 | int target_cpu); | ||
154 | |||
155 | static inline void add_release_on(rt_domain_t* rt, | ||
156 | struct task_struct *task, | ||
157 | int target_cpu) | ||
158 | { | ||
159 | unsigned long flags; | ||
160 | raw_spin_lock_irqsave(&rt->tobe_lock, flags); | ||
161 | __add_release_on(rt, task, target_cpu); | ||
162 | raw_spin_unlock_irqrestore(&rt->tobe_lock, flags); | ||
163 | } | ||
164 | #endif | ||
165 | |||
166 | static inline int __jobs_pending(rt_domain_t* rt) | ||
167 | { | ||
168 | return !bheap_empty(&rt->ready_queue); | ||
169 | } | ||
170 | |||
171 | static inline int jobs_pending(rt_domain_t* rt) | ||
172 | { | ||
173 | unsigned long flags; | ||
174 | int ret; | ||
175 | /* first we need the write lock for rt_ready_queue */ | ||
176 | raw_spin_lock_irqsave(&rt->ready_lock, flags); | ||
177 | ret = !bheap_empty(&rt->ready_queue); | ||
178 | raw_spin_unlock_irqrestore(&rt->ready_lock, flags); | ||
179 | return ret; | ||
180 | } | ||
181 | |||
182 | #endif | ||
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index ce76faa9c6d7..7b9a90965c25 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -84,12 +84,12 @@ struct rt_task { | |||
84 | }; | 84 | }; |
85 | 85 | ||
86 | union np_flag { | 86 | union np_flag { |
87 | uint64_t raw; | 87 | uint32_t raw; |
88 | struct { | 88 | struct { |
89 | /* Is the task currently in a non-preemptive section? */ | 89 | /* Is the task currently in a non-preemptive section? */ |
90 | uint64_t flag:31; | 90 | uint32_t flag:31; |
91 | /* Should the task call into the scheduler? */ | 91 | /* Should the task call into the scheduler? */ |
92 | uint64_t preempt:1; | 92 | uint32_t preempt:1; |
93 | } np; | 93 | } np; |
94 | }; | 94 | }; |
95 | 95 | ||
@@ -110,10 +110,10 @@ union np_flag { | |||
110 | struct control_page { | 110 | struct control_page { |
111 | /* This flag is used by userspace to communicate non-preempive | 111 | /* This flag is used by userspace to communicate non-preempive |
112 | * sections. */ | 112 | * sections. */ |
113 | volatile union np_flag sched; | 113 | volatile __attribute__ ((aligned (8))) union np_flag sched; |
114 | 114 | ||
115 | volatile uint64_t irq_count; /* Incremented by the kernel each time an IRQ is | 115 | /* Incremented by the kernel each time an IRQ is handled. */ |
116 | * handled. */ | 116 | volatile __attribute__ ((aligned (8))) uint64_t irq_count; |
117 | 117 | ||
118 | /* Locking overhead tracing: userspace records here the time stamp | 118 | /* Locking overhead tracing: userspace records here the time stamp |
119 | * and IRQ counter prior to starting the system call. */ | 119 | * and IRQ counter prior to starting the system call. */ |
@@ -171,9 +171,6 @@ struct pfair_param; | |||
171 | * be explicitly set up before the task set is launched. | 171 | * be explicitly set up before the task set is launched. |
172 | */ | 172 | */ |
173 | struct rt_param { | 173 | struct rt_param { |
174 | /* Generic flags available for plugin-internal use. */ | ||
175 | unsigned int flags:8; | ||
176 | |||
177 | /* do we need to check for srp blocking? */ | 174 | /* do we need to check for srp blocking? */ |
178 | unsigned int srp_non_recurse:1; | 175 | unsigned int srp_non_recurse:1; |
179 | 176 | ||
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h new file mode 100644 index 000000000000..0ccccd6ae1af --- /dev/null +++ b/include/litmus/sched_plugin.h | |||
@@ -0,0 +1,128 @@ | |||
1 | /* | ||
2 | * Definition of the scheduler plugin interface. | ||
3 | * | ||
4 | */ | ||
5 | #ifndef _LINUX_SCHED_PLUGIN_H_ | ||
6 | #define _LINUX_SCHED_PLUGIN_H_ | ||
7 | |||
8 | #include <linux/sched.h> | ||
9 | |||
10 | #ifdef CONFIG_LITMUS_LOCKING | ||
11 | #include <litmus/locking.h> | ||
12 | #endif | ||
13 | |||
14 | /************************ setup/tear down ********************/ | ||
15 | |||
16 | typedef long (*activate_plugin_t) (void); | ||
17 | typedef long (*deactivate_plugin_t) (void); | ||
18 | |||
19 | struct domain_proc_info; | ||
20 | typedef long (*get_domain_proc_info_t) (struct domain_proc_info **info); | ||
21 | |||
22 | |||
23 | /********************* scheduler invocation ******************/ | ||
24 | /* The main scheduling function, called to select the next task to dispatch. */ | ||
25 | typedef struct task_struct* (*schedule_t)(struct task_struct * prev); | ||
26 | /* Clean up after the task switch has occured. | ||
27 | * This function is called after every (even non-rt) task switch. | ||
28 | */ | ||
29 | typedef void (*finish_switch_t)(struct task_struct *prev); | ||
30 | |||
31 | |||
32 | /********************* task state changes ********************/ | ||
33 | |||
34 | /* Called to setup a new real-time task. | ||
35 | * Release the first job, enqueue, etc. | ||
36 | * Task may already be running. | ||
37 | */ | ||
38 | typedef void (*task_new_t) (struct task_struct *task, | ||
39 | int on_rq, | ||
40 | int running); | ||
41 | |||
42 | /* Called to re-introduce a task after blocking. | ||
43 | * Can potentially be called multiple times. | ||
44 | */ | ||
45 | typedef void (*task_wake_up_t) (struct task_struct *task); | ||
46 | /* called to notify the plugin of a blocking real-time task | ||
47 | * it will only be called for real-time tasks and before schedule is called */ | ||
48 | typedef void (*task_block_t) (struct task_struct *task); | ||
49 | /* Called when a real-time task exits or changes to a different scheduling | ||
50 | * class. | ||
51 | * Free any allocated resources | ||
52 | */ | ||
53 | typedef void (*task_exit_t) (struct task_struct *); | ||
54 | |||
55 | /* task_exit() is called with interrupts disabled and runqueue locks held, and | ||
56 | * thus and cannot block or spin. task_cleanup() is called sometime later | ||
57 | * without any locks being held. | ||
58 | */ | ||
59 | typedef void (*task_cleanup_t) (struct task_struct *); | ||
60 | |||
61 | #ifdef CONFIG_LITMUS_LOCKING | ||
62 | /* Called when the current task attempts to create a new lock of a given | ||
63 | * protocol type. */ | ||
64 | typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type, | ||
65 | void* __user config); | ||
66 | #endif | ||
67 | |||
68 | |||
69 | /********************* sys call backends ********************/ | ||
70 | /* This function causes the caller to sleep until the next release */ | ||
71 | typedef long (*complete_job_t) (void); | ||
72 | |||
73 | typedef long (*admit_task_t)(struct task_struct* tsk); | ||
74 | |||
75 | typedef long (*wait_for_release_at_t)(lt_t release_time); | ||
76 | |||
77 | /* Informs the plugin when a synchronous release takes place. */ | ||
78 | typedef void (*synchronous_release_at_t)(lt_t time_zero); | ||
79 | |||
80 | /************************ misc routines ***********************/ | ||
81 | |||
82 | |||
83 | struct sched_plugin { | ||
84 | struct list_head list; | ||
85 | /* basic info */ | ||
86 | char *plugin_name; | ||
87 | |||
88 | /* setup */ | ||
89 | activate_plugin_t activate_plugin; | ||
90 | deactivate_plugin_t deactivate_plugin; | ||
91 | get_domain_proc_info_t get_domain_proc_info; | ||
92 | |||
93 | /* scheduler invocation */ | ||
94 | schedule_t schedule; | ||
95 | finish_switch_t finish_switch; | ||
96 | |||
97 | /* syscall backend */ | ||
98 | complete_job_t complete_job; | ||
99 | wait_for_release_at_t wait_for_release_at; | ||
100 | synchronous_release_at_t synchronous_release_at; | ||
101 | |||
102 | /* task state changes */ | ||
103 | admit_task_t admit_task; | ||
104 | |||
105 | task_new_t task_new; | ||
106 | task_wake_up_t task_wake_up; | ||
107 | task_block_t task_block; | ||
108 | |||
109 | task_exit_t task_exit; | ||
110 | task_cleanup_t task_cleanup; | ||
111 | |||
112 | #ifdef CONFIG_LITMUS_LOCKING | ||
113 | /* locking protocols */ | ||
114 | allocate_lock_t allocate_lock; | ||
115 | #endif | ||
116 | } __attribute__ ((__aligned__(SMP_CACHE_BYTES))); | ||
117 | |||
118 | |||
119 | extern struct sched_plugin *litmus; | ||
120 | |||
121 | int register_sched_plugin(struct sched_plugin* plugin); | ||
122 | struct sched_plugin* find_sched_plugin(const char* name); | ||
123 | void print_sched_plugins(struct seq_file *m); | ||
124 | |||
125 | |||
126 | extern struct sched_plugin linux_sched_plugin; | ||
127 | |||
128 | #endif | ||
diff --git a/include/litmus/srp.h b/include/litmus/srp.h new file mode 100644 index 000000000000..c9a4552b2bf3 --- /dev/null +++ b/include/litmus/srp.h | |||
@@ -0,0 +1,28 @@ | |||
1 | #ifndef LITMUS_SRP_H | ||
2 | #define LITMUS_SRP_H | ||
3 | |||
4 | struct srp_semaphore; | ||
5 | |||
6 | struct srp_priority { | ||
7 | struct list_head list; | ||
8 | unsigned int priority; | ||
9 | pid_t pid; | ||
10 | }; | ||
11 | #define list2prio(l) list_entry(l, struct srp_priority, list) | ||
12 | |||
13 | /* struct for uniprocessor SRP "semaphore" */ | ||
14 | struct srp_semaphore { | ||
15 | struct litmus_lock litmus_lock; | ||
16 | struct srp_priority ceiling; | ||
17 | struct task_struct* owner; | ||
18 | int cpu; /* cpu associated with this "semaphore" and resource */ | ||
19 | }; | ||
20 | |||
21 | /* map a task to its SRP preemption level priority */ | ||
22 | typedef unsigned int (*srp_prioritization_t)(struct task_struct* t); | ||
23 | /* Must be updated by each plugin that uses SRP.*/ | ||
24 | extern srp_prioritization_t get_srp_prio; | ||
25 | |||
26 | struct srp_semaphore* allocate_srp_semaphore(void); | ||
27 | |||
28 | #endif | ||
diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h new file mode 100644 index 000000000000..94264c27d9ac --- /dev/null +++ b/include/litmus/unistd_32.h | |||
@@ -0,0 +1,21 @@ | |||
1 | /* | ||
2 | * included from arch/x86/include/asm/unistd_32.h | ||
3 | * | ||
4 | * LITMUS^RT syscalls with "relative" numbers | ||
5 | */ | ||
6 | #define __LSC(x) (__NR_LITMUS + x) | ||
7 | |||
8 | #define __NR_set_rt_task_param __LSC(0) | ||
9 | #define __NR_get_rt_task_param __LSC(1) | ||
10 | #define __NR_complete_job __LSC(2) | ||
11 | #define __NR_od_open __LSC(3) | ||
12 | #define __NR_od_close __LSC(4) | ||
13 | #define __NR_litmus_lock __LSC(5) | ||
14 | #define __NR_litmus_unlock __LSC(6) | ||
15 | #define __NR_query_job_no __LSC(7) | ||
16 | #define __NR_wait_for_job_release __LSC(8) | ||
17 | #define __NR_wait_for_ts_release __LSC(9) | ||
18 | #define __NR_release_ts __LSC(10) | ||
19 | #define __NR_null_call __LSC(11) | ||
20 | |||
21 | #define NR_litmus_syscalls 12 | ||
diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h new file mode 100644 index 000000000000..d5ced0d2642c --- /dev/null +++ b/include/litmus/unistd_64.h | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * included from arch/x86/include/asm/unistd_64.h | ||
3 | * | ||
4 | * LITMUS^RT syscalls with "relative" numbers | ||
5 | */ | ||
6 | #define __LSC(x) (__NR_LITMUS + x) | ||
7 | |||
8 | #define __NR_set_rt_task_param __LSC(0) | ||
9 | __SYSCALL(__NR_set_rt_task_param, sys_set_rt_task_param) | ||
10 | #define __NR_get_rt_task_param __LSC(1) | ||
11 | __SYSCALL(__NR_get_rt_task_param, sys_get_rt_task_param) | ||
12 | #define __NR_complete_job __LSC(2) | ||
13 | __SYSCALL(__NR_complete_job, sys_complete_job) | ||
14 | #define __NR_od_open __LSC(3) | ||
15 | __SYSCALL(__NR_od_open, sys_od_open) | ||
16 | #define __NR_od_close __LSC(4) | ||
17 | __SYSCALL(__NR_od_close, sys_od_close) | ||
18 | #define __NR_litmus_lock __LSC(5) | ||
19 | __SYSCALL(__NR_litmus_lock, sys_litmus_lock) | ||
20 | #define __NR_litmus_unlock __LSC(6) | ||
21 | __SYSCALL(__NR_litmus_unlock, sys_litmus_unlock) | ||
22 | #define __NR_query_job_no __LSC(7) | ||
23 | __SYSCALL(__NR_query_job_no, sys_query_job_no) | ||
24 | #define __NR_wait_for_job_release __LSC(8) | ||
25 | __SYSCALL(__NR_wait_for_job_release, sys_wait_for_job_release) | ||
26 | #define __NR_wait_for_ts_release __LSC(9) | ||
27 | __SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release) | ||
28 | #define __NR_release_ts __LSC(10) | ||
29 | __SYSCALL(__NR_release_ts, sys_release_ts) | ||
30 | #define __NR_null_call __LSC(11) | ||
31 | __SYSCALL(__NR_null_call, sys_null_call) | ||
32 | |||
33 | #define NR_litmus_syscalls 12 | ||
diff --git a/include/litmus/wait.h b/include/litmus/wait.h new file mode 100644 index 000000000000..ce1347c355f8 --- /dev/null +++ b/include/litmus/wait.h | |||
@@ -0,0 +1,57 @@ | |||
1 | #ifndef _LITMUS_WAIT_H_ | ||
2 | #define _LITMUS_WAIT_H_ | ||
3 | |||
4 | struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq); | ||
5 | |||
6 | /* wrap regular wait_queue_t head */ | ||
7 | struct __prio_wait_queue { | ||
8 | wait_queue_t wq; | ||
9 | |||
10 | /* some priority point */ | ||
11 | lt_t priority; | ||
12 | /* break ties in priority by lower tie_breaker */ | ||
13 | unsigned int tie_breaker; | ||
14 | }; | ||
15 | |||
16 | typedef struct __prio_wait_queue prio_wait_queue_t; | ||
17 | |||
18 | static inline void init_prio_waitqueue_entry(prio_wait_queue_t *pwq, | ||
19 | struct task_struct* t, | ||
20 | lt_t priority) | ||
21 | { | ||
22 | init_waitqueue_entry(&pwq->wq, t); | ||
23 | pwq->priority = priority; | ||
24 | pwq->tie_breaker = 0; | ||
25 | } | ||
26 | |||
27 | static inline void init_prio_waitqueue_entry_tie(prio_wait_queue_t *pwq, | ||
28 | struct task_struct* t, | ||
29 | lt_t priority, | ||
30 | unsigned int tie_breaker) | ||
31 | { | ||
32 | init_waitqueue_entry(&pwq->wq, t); | ||
33 | pwq->priority = priority; | ||
34 | pwq->tie_breaker = tie_breaker; | ||
35 | } | ||
36 | |||
37 | unsigned int __add_wait_queue_prio_exclusive( | ||
38 | wait_queue_head_t* head, | ||
39 | prio_wait_queue_t *new); | ||
40 | |||
41 | static inline unsigned int add_wait_queue_prio_exclusive( | ||
42 | wait_queue_head_t* head, | ||
43 | prio_wait_queue_t *new) | ||
44 | { | ||
45 | unsigned long flags; | ||
46 | unsigned int passed; | ||
47 | |||
48 | spin_lock_irqsave(&head->lock, flags); | ||
49 | passed = __add_wait_queue_prio_exclusive(head, new); | ||
50 | |||
51 | spin_unlock_irqrestore(&head->lock, flags); | ||
52 | |||
53 | return passed; | ||
54 | } | ||
55 | |||
56 | |||
57 | #endif | ||