aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-09-10 13:30:24 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2012-09-10 13:30:24 -0400
commit893c8943ce5c5527f05ab7e9208d5a942d77d8b5 (patch)
treeaa9e84b3503ff97c1d87bc9c2ce6682e51cfc971
parent901fdd9c22790039a76c1d3ee01828a2f124f6f3 (diff)
parentd3c32e91e3fce2a57083a734efae6d9de06ec02f (diff)
Merge branch 'prop/robust-tie-break' into wip-gpu-rtas12
Conflicts: include/litmus/binheap.h include/litmus/fdso.h include/litmus/litmus.h litmus/Makefile litmus/binheap.c litmus/edf_common.c litmus/fdso.c litmus/jobs.c litmus/locking.c
-rw-r--r--arch/x86/include/asm/feather_trace_32.h96
-rw-r--r--arch/x86/include/asm/feather_trace_64.h101
-rw-r--r--include/litmus/binheap.h41
-rw-r--r--include/litmus/budget.h27
-rw-r--r--include/litmus/fdso.h10
-rw-r--r--include/litmus/fp_common.h105
-rw-r--r--include/litmus/ikglp_lock.h10
-rw-r--r--include/litmus/litmus.h35
-rw-r--r--include/litmus/locking.h2
-rw-r--r--include/litmus/rt_param.h31
-rw-r--r--include/litmus/sched_trace.h105
-rw-r--r--include/litmus/wait.h57
-rw-r--r--include/trace/events/litmus.h231
-rw-r--r--kernel/sched.c6
-rw-r--r--litmus/Kconfig64
-rw-r--r--litmus/Makefile6
-rw-r--r--litmus/binheap.c105
-rw-r--r--litmus/budget.c2
-rw-r--r--litmus/ctrldev.c45
-rw-r--r--litmus/edf_common.c132
-rw-r--r--litmus/fdso.c4
-rw-r--r--litmus/fp_common.c119
-rw-r--r--litmus/jobs.c39
-rw-r--r--litmus/litmus.c27
-rw-r--r--litmus/locking.c33
-rw-r--r--litmus/rt_domain.c8
-rw-r--r--litmus/sched_cedf.c11
-rw-r--r--litmus/sched_gsn_edf.c16
-rw-r--r--litmus/sched_pfair.c7
-rw-r--r--litmus/sched_pfp.c1685
-rw-r--r--litmus/sched_psn_edf.c1
31 files changed, 2840 insertions, 321 deletions
diff --git a/arch/x86/include/asm/feather_trace_32.h b/arch/x86/include/asm/feather_trace_32.h
index 70202f90f169..75e81a9f9382 100644
--- a/arch/x86/include/asm/feather_trace_32.h
+++ b/arch/x86/include/asm/feather_trace_32.h
@@ -1,12 +1,45 @@
1/* Copyright (c) 2007-2012 Björn Brandenburg, <bbb@mpi-sws.org>
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining
4 * a copy of this software and associated documentation files (the
5 * "Software"), to deal in the Software without restriction, including
6 * without limitation the rights to use, copy, modify, merge, publish,
7 * distribute, sublicense, and/or sell copies of the Software, and to
8 * permit persons to whom the Software is furnished to do so, subject to
9 * the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be
12 * included in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
18 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
19 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
1/* Do not directly include this file. Include feather_trace.h instead */ 24/* Do not directly include this file. Include feather_trace.h instead */
2 25
3#define feather_callback __attribute__((regparm(0))) 26#define feather_callback __attribute__((regparm(3))) __attribute__((used))
4 27
5/* 28/*
6 * make the compiler reload any register that is not saved in 29 * Make the compiler reload any register that is not saved in a cdecl function
7 * a cdecl function call 30 * call (minus the registers that we explicitly clobber as output registers).
8 */ 31 */
9#define CLOBBER_LIST "memory", "cc", "eax", "ecx", "edx" 32#define __FT_CLOBBER_LIST0 "memory", "cc", "eax", "edx", "ecx"
33#define __FT_CLOBBER_LIST1 "memory", "cc", "eax", "ecx"
34#define __FT_CLOBBER_LIST2 "memory", "cc", "eax"
35#define __FT_CLOBBER_LIST3 "memory", "cc", "eax"
36
37#define __FT_TMP1(x) "=d" (x)
38#define __FT_ARG1(x) "0" ((long) (x))
39#define __FT_TMP2(x) "=c" (x)
40#define __FT_ARG2(x) "1" ((long) (x))
41
42#define __FT_ARG3(x) "r" ((long) (x))
10 43
11#define ft_event(id, callback) \ 44#define ft_event(id, callback) \
12 __asm__ __volatile__( \ 45 __asm__ __volatile__( \
@@ -16,64 +49,67 @@
16 ".long " #id ", 0, 1b, 2f \n\t" \ 49 ".long " #id ", 0, 1b, 2f \n\t" \
17 ".previous \n\t" \ 50 ".previous \n\t" \
18 "2: \n\t" \ 51 "2: \n\t" \
19 : : : CLOBBER_LIST) 52 : : : __FT_CLOBBER_LIST0)
20 53
21#define ft_event0(id, callback) \ 54#define ft_event0(id, callback) \
22 __asm__ __volatile__( \ 55 __asm__ __volatile__( \
23 "1: jmp 2f \n\t" \ 56 "1: jmp 2f \n\t" \
24 " subl $4, %%esp \n\t" \ 57 " movl $" #id ", %%eax \n\t" \
25 " movl $" #id ", (%%esp) \n\t" \
26 " call " #callback " \n\t" \ 58 " call " #callback " \n\t" \
27 " addl $4, %%esp \n\t" \
28 ".section __event_table, \"aw\" \n\t" \ 59 ".section __event_table, \"aw\" \n\t" \
29 ".long " #id ", 0, 1b, 2f \n\t" \ 60 ".long " #id ", 0, 1b, 2f \n\t" \
30 ".previous \n\t" \ 61 ".previous \n\t" \
31 "2: \n\t" \ 62 "2: \n\t" \
32 : : : CLOBBER_LIST) 63 : : : __FT_CLOBBER_LIST0)
33 64
34#define ft_event1(id, callback, param) \ 65#define ft_event1(id, callback, param) \
66 do { \
67 long __ft_tmp1; \
35 __asm__ __volatile__( \ 68 __asm__ __volatile__( \
36 "1: jmp 2f \n\t" \ 69 "1: jmp 2f \n\t" \
37 " subl $8, %%esp \n\t" \ 70 " movl $" #id ", %%eax \n\t" \
38 " movl %0, 4(%%esp) \n\t" \
39 " movl $" #id ", (%%esp) \n\t" \
40 " call " #callback " \n\t" \ 71 " call " #callback " \n\t" \
41 " addl $8, %%esp \n\t" \
42 ".section __event_table, \"aw\" \n\t" \ 72 ".section __event_table, \"aw\" \n\t" \
43 ".long " #id ", 0, 1b, 2f \n\t" \ 73 ".long " #id ", 0, 1b, 2f \n\t" \
44 ".previous \n\t" \ 74 ".previous \n\t" \
45 "2: \n\t" \ 75 "2: \n\t" \
46 : : "r" (param) : CLOBBER_LIST) 76 : __FT_TMP1(__ft_tmp1) \
77 : __FT_ARG1(param) \
78 : __FT_CLOBBER_LIST1); \
79 } while (0);
47 80
48#define ft_event2(id, callback, param, param2) \ 81#define ft_event2(id, callback, param, param2) \
82 do { \
83 long __ft_tmp1, __ft_tmp2; \
49 __asm__ __volatile__( \ 84 __asm__ __volatile__( \
50 "1: jmp 2f \n\t" \ 85 "1: jmp 2f \n\t" \
51 " subl $12, %%esp \n\t" \ 86 " movl $" #id ", %%eax \n\t" \
52 " movl %1, 8(%%esp) \n\t" \
53 " movl %0, 4(%%esp) \n\t" \
54 " movl $" #id ", (%%esp) \n\t" \
55 " call " #callback " \n\t" \ 87 " call " #callback " \n\t" \
56 " addl $12, %%esp \n\t" \
57 ".section __event_table, \"aw\" \n\t" \ 88 ".section __event_table, \"aw\" \n\t" \
58 ".long " #id ", 0, 1b, 2f \n\t" \ 89 ".long " #id ", 0, 1b, 2f \n\t" \
59 ".previous \n\t" \ 90 ".previous \n\t" \
60 "2: \n\t" \ 91 "2: \n\t" \
61 : : "r" (param), "r" (param2) : CLOBBER_LIST) 92 : __FT_TMP1(__ft_tmp1), __FT_TMP2(__ft_tmp2) \
93 : __FT_ARG1(param), __FT_ARG2(param2) \
94 : __FT_CLOBBER_LIST2); \
95 } while (0);
62 96
63 97
64#define ft_event3(id, callback, p, p2, p3) \ 98#define ft_event3(id, callback, param, param2, param3) \
99 do { \
100 long __ft_tmp1, __ft_tmp2; \
65 __asm__ __volatile__( \ 101 __asm__ __volatile__( \
66 "1: jmp 2f \n\t" \ 102 "1: jmp 2f \n\t" \
67 " subl $16, %%esp \n\t" \ 103 " subl $4, %%esp \n\t" \
68 " movl %2, 12(%%esp) \n\t" \ 104 " movl $" #id ", %%eax \n\t" \
69 " movl %1, 8(%%esp) \n\t" \ 105 " movl %2, (%%esp) \n\t" \
70 " movl %0, 4(%%esp) \n\t" \
71 " movl $" #id ", (%%esp) \n\t" \
72 " call " #callback " \n\t" \ 106 " call " #callback " \n\t" \
73 " addl $16, %%esp \n\t" \ 107 " addl $4, %%esp \n\t" \
74 ".section __event_table, \"aw\" \n\t" \ 108 ".section __event_table, \"aw\" \n\t" \
75 ".long " #id ", 0, 1b, 2f \n\t" \ 109 ".long " #id ", 0, 1b, 2f \n\t" \
76 ".previous \n\t" \ 110 ".previous \n\t" \
77 "2: \n\t" \ 111 "2: \n\t" \
78 : : "r" (p), "r" (p2), "r" (p3) : CLOBBER_LIST) 112 : __FT_TMP1(__ft_tmp1), __FT_TMP2(__ft_tmp2) \
79 113 : __FT_ARG1(param), __FT_ARG2(param2), __FT_ARG3(param3) \
114 : __FT_CLOBBER_LIST3); \
115 } while (0);
diff --git a/arch/x86/include/asm/feather_trace_64.h b/arch/x86/include/asm/feather_trace_64.h
index 54ac2aeb3a28..5ce49e2eebba 100644
--- a/arch/x86/include/asm/feather_trace_64.h
+++ b/arch/x86/include/asm/feather_trace_64.h
@@ -1,67 +1,124 @@
1/* Copyright (c) 2010 Andrea Bastoni, <bastoni@cs.unc.edu>
2 * Copyright (c) 2012 Björn Brandenburg, <bbb@mpi-sws.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
1/* Do not directly include this file. Include feather_trace.h instead */ 25/* Do not directly include this file. Include feather_trace.h instead */
2 26
3/* regparm is the default on x86_64 */ 27/* regparm is the default on x86_64 */
4#define feather_callback 28#define feather_callback __attribute__((used))
5 29
6# define _EVENT_TABLE(id,from,to) \ 30#define __FT_EVENT_TABLE(id,from,to) \
7 ".section __event_table, \"aw\"\n\t" \ 31 ".section __event_table, \"aw\"\n\t" \
8 ".balign 8\n\t" \ 32 ".balign 8\n\t" \
9 ".quad " #id ", 0, " #from ", " #to " \n\t" \ 33 ".quad " #id ", 0, " #from ", " #to " \n\t" \
10 ".previous \n\t" 34 ".previous \n\t"
11 35
12/* 36/*
13 * x86_64 callee only owns rbp, rbx, r12 -> r15 37 * x86_64 caller only owns rbp, rbx, r12-r15;
14 * the called can freely modify the others 38 * the callee can freely modify the others.
15 */ 39 */
16#define CLOBBER_LIST "memory", "cc", "rdi", "rsi", "rdx", "rcx", \ 40#define __FT_CLOBBER_LIST0 "memory", "cc", "rdi", "rsi", "rdx", "rcx", \
41 "r8", "r9", "r10", "r11", "rax"
42
43#define __FT_CLOBBER_LIST1 "memory", "cc", "rdi", "rdx", "rcx", \
44 "r8", "r9", "r10", "r11", "rax"
45
46#define __FT_CLOBBER_LIST2 "memory", "cc", "rdi", "rcx", \
17 "r8", "r9", "r10", "r11", "rax" 47 "r8", "r9", "r10", "r11", "rax"
18 48
49#define __FT_CLOBBER_LIST3 "memory", "cc", "rdi", \
50 "r8", "r9", "r10", "r11", "rax"
51
52/* The registers RDI, RSI, RDX, RCX, R8 and R9 are used for integer and pointer
53 * arguments. */
54
55/* RSI */
56#define __FT_TMP1(x) "=S" (x)
57#define __FT_ARG1(x) "0" ((long) (x))
58
59/* RDX */
60#define __FT_TMP2(x) "=d" (x)
61#define __FT_ARG2(x) "1" ((long) (x))
62
63/* RCX */
64#define __FT_TMP3(x) "=c" (x)
65#define __FT_ARG3(x) "2" ((long) (x))
66
19#define ft_event(id, callback) \ 67#define ft_event(id, callback) \
20 __asm__ __volatile__( \ 68 __asm__ __volatile__( \
21 "1: jmp 2f \n\t" \ 69 "1: jmp 2f \n\t" \
22 " call " #callback " \n\t" \ 70 " call " #callback " \n\t" \
23 _EVENT_TABLE(id,1b,2f) \ 71 __FT_EVENT_TABLE(id,1b,2f) \
24 "2: \n\t" \ 72 "2: \n\t" \
25 : : : CLOBBER_LIST) 73 : : : __FT_CLOBBER_LIST0)
26 74
27#define ft_event0(id, callback) \ 75#define ft_event0(id, callback) \
28 __asm__ __volatile__( \ 76 __asm__ __volatile__( \
29 "1: jmp 2f \n\t" \ 77 "1: jmp 2f \n\t" \
30 " movq $" #id ", %%rdi \n\t" \ 78 " movq $" #id ", %%rdi \n\t" \
31 " call " #callback " \n\t" \ 79 " call " #callback " \n\t" \
32 _EVENT_TABLE(id,1b,2f) \ 80 __FT_EVENT_TABLE(id,1b,2f) \
33 "2: \n\t" \ 81 "2: \n\t" \
34 : : : CLOBBER_LIST) 82 : : : __FT_CLOBBER_LIST0)
35 83
36#define ft_event1(id, callback, param) \ 84#define ft_event1(id, callback, param) \
85 do { \
86 long __ft_tmp1; \
37 __asm__ __volatile__( \ 87 __asm__ __volatile__( \
38 "1: jmp 2f \n\t" \ 88 "1: jmp 2f \n\t" \
39 " movq %0, %%rsi \n\t" \
40 " movq $" #id ", %%rdi \n\t" \ 89 " movq $" #id ", %%rdi \n\t" \
41 " call " #callback " \n\t" \ 90 " call " #callback " \n\t" \
42 _EVENT_TABLE(id,1b,2f) \ 91 __FT_EVENT_TABLE(id,1b,2f) \
43 "2: \n\t" \ 92 "2: \n\t" \
44 : : "r" (param) : CLOBBER_LIST) 93 : __FT_TMP1(__ft_tmp1) \
94 : __FT_ARG1(param) \
95 : __FT_CLOBBER_LIST1); \
96 } while (0);
45 97
46#define ft_event2(id, callback, param, param2) \ 98#define ft_event2(id, callback, param, param2) \
99 do { \
100 long __ft_tmp1, __ft_tmp2; \
47 __asm__ __volatile__( \ 101 __asm__ __volatile__( \
48 "1: jmp 2f \n\t" \ 102 "1: jmp 2f \n\t" \
49 " movq %1, %%rdx \n\t" \
50 " movq %0, %%rsi \n\t" \
51 " movq $" #id ", %%rdi \n\t" \ 103 " movq $" #id ", %%rdi \n\t" \
52 " call " #callback " \n\t" \ 104 " call " #callback " \n\t" \
53 _EVENT_TABLE(id,1b,2f) \ 105 __FT_EVENT_TABLE(id,1b,2f) \
54 "2: \n\t" \ 106 "2: \n\t" \
55 : : "r" (param), "r" (param2) : CLOBBER_LIST) 107 : __FT_TMP1(__ft_tmp1), __FT_TMP2(__ft_tmp2) \
108 : __FT_ARG1(param), __FT_ARG2(param2) \
109 : __FT_CLOBBER_LIST2); \
110 } while (0);
56 111
57#define ft_event3(id, callback, p, p2, p3) \ 112#define ft_event3(id, callback, param, param2, param3) \
113 do { \
114 long __ft_tmp1, __ft_tmp2, __ft_tmp3; \
58 __asm__ __volatile__( \ 115 __asm__ __volatile__( \
59 "1: jmp 2f \n\t" \ 116 "1: jmp 2f \n\t" \
60 " movq %2, %%rcx \n\t" \
61 " movq %1, %%rdx \n\t" \
62 " movq %0, %%rsi \n\t" \
63 " movq $" #id ", %%rdi \n\t" \ 117 " movq $" #id ", %%rdi \n\t" \
64 " call " #callback " \n\t" \ 118 " call " #callback " \n\t" \
65 _EVENT_TABLE(id,1b,2f) \ 119 __FT_EVENT_TABLE(id,1b,2f) \
66 "2: \n\t" \ 120 "2: \n\t" \
67 : : "r" (p), "r" (p2), "r" (p3) : CLOBBER_LIST) 121 : __FT_TMP1(__ft_tmp1), __FT_TMP2(__ft_tmp2), __FT_TMP3(__ft_tmp3) \
122 : __FT_ARG1(param), __FT_ARG2(param2), __FT_ARG3(param3) \
123 : __FT_CLOBBER_LIST3); \
124 } while (0);
diff --git a/include/litmus/binheap.h b/include/litmus/binheap.h
index 9e966e3886cb..901a30a3e296 100644
--- a/include/litmus/binheap.h
+++ b/include/litmus/binheap.h
@@ -42,10 +42,10 @@ struct binheap_node {
42 * inlined (similar to Linux red/black tree) for greater efficiency. 42 * inlined (similar to Linux red/black tree) for greater efficiency.
43 */ 43 */
44typedef int (*binheap_order_t)(struct binheap_node *a, 44typedef int (*binheap_order_t)(struct binheap_node *a,
45 struct binheap_node *b); 45 struct binheap_node *b);
46 46
47 47
48struct binheap_handle { 48struct binheap {
49 struct binheap_node *root; 49 struct binheap_node *root;
50 50
51 /* pointer to node to take next inserted child */ 51 /* pointer to node to take next inserted child */
@@ -59,6 +59,9 @@ struct binheap_handle {
59}; 59};
60 60
61 61
62/* Initialized heap nodes not in a heap have parent
63 * set to BINHEAP_POISON.
64 */
62#define BINHEAP_POISON ((void*)(0xdeadbeef)) 65#define BINHEAP_POISON ((void*)(0xdeadbeef))
63 66
64 67
@@ -146,9 +149,8 @@ static inline void INIT_BINHEAP_NODE(struct binheap_node *n)
146 n->ref_ptr = NULL; 149 n->ref_ptr = NULL;
147} 150}
148 151
149static inline void INIT_BINHEAP_HANDLE( 152static inline void INIT_BINHEAP_HANDLE(struct binheap *handle,
150 struct binheap_handle *handle, 153 binheap_order_t compare)
151 binheap_order_t compare)
152{ 154{
153 handle->root = NULL; 155 handle->root = NULL;
154 handle->next = NULL; 156 handle->next = NULL;
@@ -156,27 +158,25 @@ static inline void INIT_BINHEAP_HANDLE(
156 handle->compare = compare; 158 handle->compare = compare;
157} 159}
158 160
159/* Returns true (1) if binheap is empty. */ 161/* Returns true if binheap is empty. */
160static inline int binheap_empty(struct binheap_handle *handle) 162static inline int binheap_empty(struct binheap *handle)
161{ 163{
162 return(handle->root == NULL); 164 return(handle->root == NULL);
163} 165}
164 166
165/* Returns true (1) if binheap node is in a heap. */ 167/* Returns true if binheap node is in a heap. */
166static inline int binheap_is_in_heap(struct binheap_node *node) 168static inline int binheap_is_in_heap(struct binheap_node *node)
167{ 169{
168 return (node->parent != BINHEAP_POISON); 170 return (node->parent != BINHEAP_POISON);
169} 171}
170 172
173/* Returns true if binheap node is in given heap. */
174int binheap_is_in_this_heap(struct binheap_node *node, struct binheap* heap);
171 175
172int binheap_is_in_this_heap(struct binheap_node *node, struct binheap_handle* heap); 176/* Add a node to a heap */
173
174
175
176void __binheap_add(struct binheap_node *new_node, 177void __binheap_add(struct binheap_node *new_node,
177 struct binheap_handle *handle, 178 struct binheap *handle,
178 void *data); 179 void *data);
179
180 180
181/** 181/**
182 * Removes the root node from the heap. The node is removed after coalescing 182 * Removes the root node from the heap. The node is removed after coalescing
@@ -185,22 +185,21 @@ void __binheap_add(struct binheap_node *new_node,
185 * The 'last' node in the tree is then swapped up to the root and bubbled 185 * The 'last' node in the tree is then swapped up to the root and bubbled
186 * down. 186 * down.
187 */ 187 */
188void __binheap_delete_root(struct binheap_handle *handle, 188void __binheap_delete_root(struct binheap *handle,
189 struct binheap_node *container); 189 struct binheap_node *container);
190 190
191/** 191/**
192 * Delete an arbitrary node. Bubble node to delete up to the root, 192 * Delete an arbitrary node. Bubble node to delete up to the root,
193 * and then delete to root. 193 * and then delete to root.
194 */ 194 */
195void __binheap_delete( 195void __binheap_delete(struct binheap_node *node_to_delete,
196 struct binheap_node *node_to_delete, 196 struct binheap *handle);
197 struct binheap_handle *handle);
198 197
199/** 198/**
200 * Bubble up a node whose pointer has decreased in value. 199 * Bubble up a node whose pointer has decreased in value.
201 */ 200 */
202void __binheap_decrease(struct binheap_node *orig_node, 201void __binheap_decrease(struct binheap_node *orig_node,
203 struct binheap_handle *handle); 202 struct binheap *handle);
204 203
205 204
206#endif 205#endif
diff --git a/include/litmus/budget.h b/include/litmus/budget.h
index 732530e63491..33344ee8d5f9 100644
--- a/include/litmus/budget.h
+++ b/include/litmus/budget.h
@@ -5,4 +5,31 @@
5 * the next task. */ 5 * the next task. */
6void update_enforcement_timer(struct task_struct* t); 6void update_enforcement_timer(struct task_struct* t);
7 7
8inline static int budget_exhausted(struct task_struct* t)
9{
10 return get_exec_time(t) >= get_exec_cost(t);
11}
12
13inline static lt_t budget_remaining(struct task_struct* t)
14{
15 if (!budget_exhausted(t))
16 return get_exec_cost(t) - get_exec_time(t);
17 else
18 /* avoid overflow */
19 return 0;
20}
21
22#define budget_enforced(t) (tsk_rt(t)->task_params.budget_policy != NO_ENFORCEMENT)
23
24#define budget_precisely_enforced(t) (tsk_rt(t)->task_params.budget_policy \
25 == PRECISE_ENFORCEMENT)
26
27static inline int requeue_preempted_job(struct task_struct* t)
28{
29 /* Add task to ready queue only if not subject to budget enforcement or
30 * if the job has budget remaining. t may be NULL.
31 */
32 return t && (!budget_exhausted(t) || !budget_enforced(t));
33}
34
8#endif 35#endif
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h
index 1f5d3bd1a1db..35be59b970ee 100644
--- a/include/litmus/fdso.h
+++ b/include/litmus/fdso.h
@@ -12,7 +12,7 @@
12#include <linux/fs.h> 12#include <linux/fs.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14 14
15#define MAX_OBJECT_DESCRIPTORS 32 15#define MAX_OBJECT_DESCRIPTORS 85
16 16
17typedef enum { 17typedef enum {
18 MIN_OBJ_TYPE = 0, 18 MIN_OBJ_TYPE = 0,
@@ -29,7 +29,13 @@ typedef enum {
29 KFMLP_SIMPLE_GPU_AFF_OBS = 7, 29 KFMLP_SIMPLE_GPU_AFF_OBS = 7,
30 KFMLP_GPU_AFF_OBS = 8, 30 KFMLP_GPU_AFF_OBS = 8,
31 31
32 MAX_OBJ_TYPE = 8 32 MPCP_SEM = 9,
33 MPCP_VS_SEM = 10,
34 DPCP_SEM = 11,
35
36 PCP_SEM = 12,
37
38 MAX_OBJ_TYPE = 12
33} obj_type_t; 39} obj_type_t;
34 40
35struct inode_obj_id { 41struct inode_obj_id {
diff --git a/include/litmus/fp_common.h b/include/litmus/fp_common.h
new file mode 100644
index 000000000000..dd1f7bf1e347
--- /dev/null
+++ b/include/litmus/fp_common.h
@@ -0,0 +1,105 @@
1/* Fixed-priority scheduler support.
2 */
3
4#ifndef __FP_COMMON_H__
5#define __FP_COMMON_H__
6
7#include <litmus/rt_domain.h>
8
9#include <asm/bitops.h>
10
11
12void fp_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
13 release_jobs_t release);
14
15int fp_higher_prio(struct task_struct* first,
16 struct task_struct* second);
17
18int fp_ready_order(struct bheap_node* a, struct bheap_node* b);
19
20#define FP_PRIO_BIT_WORDS (LITMUS_MAX_PRIORITY / BITS_PER_LONG)
21
22#if (LITMUS_MAX_PRIORITY % BITS_PER_LONG)
23#error LITMUS_MAX_PRIORITY must be a multiple of BITS_PER_LONG
24#endif
25
26/* bitmask-inexed priority queue */
27struct fp_prio_queue {
28 unsigned long bitmask[FP_PRIO_BIT_WORDS];
29 struct bheap queue[LITMUS_MAX_PRIORITY];
30};
31
32void fp_prio_queue_init(struct fp_prio_queue* q);
33
34static inline void fpq_set(struct fp_prio_queue* q, unsigned int index)
35{
36 unsigned long *word = q->bitmask + (index / BITS_PER_LONG);
37 __set_bit(index % BITS_PER_LONG, word);
38}
39
40static inline void fpq_clear(struct fp_prio_queue* q, unsigned int index)
41{
42 unsigned long *word = q->bitmask + (index / BITS_PER_LONG);
43 __clear_bit(index % BITS_PER_LONG, word);
44}
45
46static inline unsigned int fpq_find(struct fp_prio_queue* q)
47{
48 int i;
49
50 /* loop optimizer should unroll this */
51 for (i = 0; i < FP_PRIO_BIT_WORDS; i++)
52 if (q->bitmask[i])
53 return __ffs(q->bitmask[i]) + i * BITS_PER_LONG;
54
55 return LITMUS_MAX_PRIORITY; /* nothing found */
56}
57
58static inline void fp_prio_add(struct fp_prio_queue* q, struct task_struct* t, unsigned int index)
59{
60
61 BUG_ON(bheap_node_in_heap(tsk_rt(t)->heap_node));
62
63 fpq_set(q, index);
64 bheap_insert(fp_ready_order, &q->queue[index], tsk_rt(t)->heap_node);
65}
66
67static inline void fp_prio_remove(struct fp_prio_queue* q, struct task_struct* t, unsigned int index)
68{
69 BUG_ON(!is_queued(t));
70
71 bheap_delete(fp_ready_order, &q->queue[index], tsk_rt(t)->heap_node);
72 if (likely(bheap_empty(&q->queue[index])))
73 fpq_clear(q, index);
74}
75
76static inline struct task_struct* fp_prio_peek(struct fp_prio_queue* q)
77{
78 unsigned int idx = fpq_find(q);
79 struct bheap_node* hn;
80
81 if (idx < LITMUS_MAX_PRIORITY) {
82 hn = bheap_peek(fp_ready_order, &q->queue[idx]);
83 return bheap2task(hn);
84 } else
85 return NULL;
86}
87
88static inline struct task_struct* fp_prio_take(struct fp_prio_queue* q)
89{
90 unsigned int idx = fpq_find(q);
91 struct bheap_node* hn;
92
93 if (idx < LITMUS_MAX_PRIORITY) {
94 hn = bheap_take(fp_ready_order, &q->queue[idx]);
95 if (likely(bheap_empty(&q->queue[idx])))
96 fpq_clear(q, idx);
97 return bheap2task(hn);
98 } else
99 return NULL;
100}
101
102int fp_preemption_needed(struct fp_prio_queue* q, struct task_struct *t);
103
104
105#endif
diff --git a/include/litmus/ikglp_lock.h b/include/litmus/ikglp_lock.h
index af6f15178cb1..0b89c8135360 100644
--- a/include/litmus/ikglp_lock.h
+++ b/include/litmus/ikglp_lock.h
@@ -76,18 +76,18 @@ struct ikglp_semaphore
76 int max_fifo_len; // max len of a fifo queue 76 int max_fifo_len; // max len of a fifo queue
77 int nr_in_fifos; 77 int nr_in_fifos;
78 78
79 struct binheap_handle top_m; // min heap, base prio 79 struct binheap top_m; // min heap, base prio
80 int top_m_size; // number of nodes in top_m 80 int top_m_size; // number of nodes in top_m
81 81
82 struct binheap_handle not_top_m; // max heap, base prio 82 struct binheap not_top_m; // max heap, base prio
83 83
84 struct binheap_handle donees; // min-heap, base prio 84 struct binheap donees; // min-heap, base prio
85 struct fifo_queue *shortest_fifo_queue; // pointer to shortest fifo queue 85 struct fifo_queue *shortest_fifo_queue; // pointer to shortest fifo queue
86 86
87 /* data structures for holding requests */ 87 /* data structures for holding requests */
88 struct fifo_queue *fifo_queues; // array nr_replicas in length 88 struct fifo_queue *fifo_queues; // array nr_replicas in length
89 struct binheap_handle priority_queue; // max-heap, base prio 89 struct binheap priority_queue; // max-heap, base prio
90 struct binheap_handle donors; // max-heap, base prio 90 struct binheap donors; // max-heap, base prio
91 91
92#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 92#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
93 struct ikglp_affinity *aff_obs; 93 struct ikglp_affinity *aff_obs;
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index 71df378236f5..1d70ab713571 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -48,41 +48,28 @@ void litmus_exit_task(struct task_struct *tsk);
48/* Realtime utility macros */ 48/* Realtime utility macros */
49#define get_rt_flags(t) (tsk_rt(t)->flags) 49#define get_rt_flags(t) (tsk_rt(t)->flags)
50#define set_rt_flags(t,f) (tsk_rt(t)->flags=(f)) 50#define set_rt_flags(t,f) (tsk_rt(t)->flags=(f))
51#define is_priority_boosted(t) (tsk_rt(t)->priority_boosted)
52#define get_boost_start(t) (tsk_rt(t)->boost_start_time)
53
54/* task_params macros */
51#define get_exec_cost(t) (tsk_rt(t)->task_params.exec_cost) 55#define get_exec_cost(t) (tsk_rt(t)->task_params.exec_cost)
52#define get_exec_time(t) (tsk_rt(t)->job_params.exec_time)
53#define get_rt_period(t) (tsk_rt(t)->task_params.period) 56#define get_rt_period(t) (tsk_rt(t)->task_params.period)
57#define get_rt_relative_deadline(t) (tsk_rt(t)->task_params.relative_deadline)
54#define get_rt_phase(t) (tsk_rt(t)->task_params.phase) 58#define get_rt_phase(t) (tsk_rt(t)->task_params.phase)
55#define get_partition(t) (tsk_rt(t)->task_params.cpu) 59#define get_partition(t) (tsk_rt(t)->task_params.cpu)
60#define get_priority(t) (tsk_rt(t)->task_params.priority)
61#define get_class(t) (tsk_rt(t)->task_params.cls)
62
63/* job_param macros */
64#define get_exec_time(t) (tsk_rt(t)->job_params.exec_time)
56#define get_deadline(t) (tsk_rt(t)->job_params.deadline) 65#define get_deadline(t) (tsk_rt(t)->job_params.deadline)
57#define get_period(t) (tsk_rt(t)->task_params.period) 66#define get_period(t) (tsk_rt(t)->task_params.period)
58#define get_release(t) (tsk_rt(t)->job_params.release) 67#define get_release(t) (tsk_rt(t)->job_params.release)
59#define get_class(t) (tsk_rt(t)->task_params.cls) 68#define get_lateness(t) (tsk_rt(t)->job_params.lateness)
60
61#define is_priority_boosted(t) (tsk_rt(t)->priority_boosted)
62#define get_boost_start(t) (tsk_rt(t)->boost_start_time)
63 69
64#define effective_priority(t) ((!(tsk_rt(t)->inh_task)) ? t : tsk_rt(t)->inh_task) 70#define effective_priority(t) ((!(tsk_rt(t)->inh_task)) ? t : tsk_rt(t)->inh_task)
65#define base_priority(t) (t) 71#define base_priority(t) (t)
66 72
67inline static int budget_exhausted(struct task_struct* t)
68{
69 return get_exec_time(t) >= get_exec_cost(t);
70}
71
72inline static lt_t budget_remaining(struct task_struct* t)
73{
74 if (!budget_exhausted(t))
75 return get_exec_cost(t) - get_exec_time(t);
76 else
77 /* avoid overflow */
78 return 0;
79}
80
81#define budget_enforced(t) (tsk_rt(t)->task_params.budget_policy != NO_ENFORCEMENT)
82
83#define budget_precisely_enforced(t) (tsk_rt(t)->task_params.budget_policy \
84 == PRECISE_ENFORCEMENT)
85
86#define is_hrt(t) \ 73#define is_hrt(t) \
87 (tsk_rt(t)->task_params.cls == RT_CLASS_HARD) 74 (tsk_rt(t)->task_params.cls == RT_CLASS_HARD)
88#define is_srt(t) \ 75#define is_srt(t) \
diff --git a/include/litmus/locking.h b/include/litmus/locking.h
index 36647fee03e4..296bbf6f7af0 100644
--- a/include/litmus/locking.h
+++ b/include/litmus/locking.h
@@ -14,7 +14,7 @@ struct nested_info
14 struct binheap_node hp_binheap_node; 14 struct binheap_node hp_binheap_node;
15}; 15};
16 16
17static inline struct task_struct* top_priority(struct binheap_handle* handle) { 17static inline struct task_struct* top_priority(struct binheap* handle) {
18 if(!binheap_empty(handle)) { 18 if(!binheap_empty(handle)) {
19 return (struct task_struct*)(binheap_top_entry(handle, struct nested_info, hp_binheap_node)->hp_waiter_eff_prio); 19 return (struct task_struct*)(binheap_top_entry(handle, struct nested_info, hp_binheap_node)->hp_waiter_eff_prio);
20 } 20 }
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 04239c747f06..419ff0c88a65 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -26,7 +26,6 @@ static inline int lt_after_eq(lt_t a, lt_t b)
26typedef enum { 26typedef enum {
27 RT_CLASS_HARD, 27 RT_CLASS_HARD,
28 RT_CLASS_SOFT, 28 RT_CLASS_SOFT,
29 RT_CLASS_SOFT_W_SLIP,
30 RT_CLASS_BEST_EFFORT 29 RT_CLASS_BEST_EFFORT
31} task_class_t; 30} task_class_t;
32 31
@@ -36,11 +35,33 @@ typedef enum {
36 PRECISE_ENFORCEMENT /* budgets are enforced with hrtimers */ 35 PRECISE_ENFORCEMENT /* budgets are enforced with hrtimers */
37} budget_policy_t; 36} budget_policy_t;
38 37
38/* We use the common priority interpretation "lower index == higher priority",
39 * which is commonly used in fixed-priority schedulability analysis papers.
40 * So, a numerically lower priority value implies higher scheduling priority,
41 * with priority 1 being the highest priority. Priority 0 is reserved for
42 * priority boosting. LITMUS_MAX_PRIORITY denotes the maximum priority value
43 * range.
44 */
45
46#define LITMUS_MAX_PRIORITY 512
47#define LITMUS_HIGHEST_PRIORITY 1
48#define LITMUS_LOWEST_PRIORITY (LITMUS_MAX_PRIORITY - 1)
49
50/* Provide generic comparison macros for userspace,
51 * in case that we change this later. */
52#define litmus_higher_fixed_prio(a, b) (a < b)
53#define litmus_lower_fixed_prio(a, b) (a > b)
54#define litmus_is_valid_fixed_prio(p) \
55 ((p) >= LITMUS_HIGHEST_PRIORITY && \
56 (p) <= LITMUS_LOWEST_PRIORITY)
57
39struct rt_task { 58struct rt_task {
40 lt_t exec_cost; 59 lt_t exec_cost;
41 lt_t period; 60 lt_t period;
61 lt_t relative_deadline;
42 lt_t phase; 62 lt_t phase;
43 unsigned int cpu; 63 unsigned int cpu;
64 unsigned int priority;
44 task_class_t cls; 65 task_class_t cls;
45 budget_policy_t budget_policy; /* ignored by pfair */ 66 budget_policy_t budget_policy; /* ignored by pfair */
46}; 67};
@@ -107,6 +128,12 @@ struct rt_job {
107 /* How much service has this job received so far? */ 128 /* How much service has this job received so far? */
108 lt_t exec_time; 129 lt_t exec_time;
109 130
131 /* By how much did the prior job miss its deadline by?
132 * Value differs from tardiness in that lateness may
133 * be negative (when job finishes before its deadline).
134 */
135 long long lateness;
136
110 /* Which job is this. This is used to let user space 137 /* Which job is this. This is used to let user space
111 * specify which job to wait for, which is important if jobs 138 * specify which job to wait for, which is important if jobs
112 * overrun. If we just call sys_sleep_next_period() then we 139 * overrun. If we just call sys_sleep_next_period() then we
@@ -237,7 +264,7 @@ struct rt_param {
237 264
238#ifdef CONFIG_LITMUS_NESTED_LOCKING 265#ifdef CONFIG_LITMUS_NESTED_LOCKING
239 raw_spinlock_t hp_blocked_tasks_lock; 266 raw_spinlock_t hp_blocked_tasks_lock;
240 struct binheap_handle hp_blocked_tasks; 267 struct binheap hp_blocked_tasks;
241 268
242 /* pointer to lock upon which is currently blocked */ 269 /* pointer to lock upon which is currently blocked */
243 struct litmus_lock* blocked_lock; 270 struct litmus_lock* blocked_lock;
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h
index b1b71f6c5f0c..7af12f49c600 100644
--- a/include/litmus/sched_trace.h
+++ b/include/litmus/sched_trace.h
@@ -309,34 +309,93 @@ feather_callback void do_sched_trace_migration(unsigned long id,
309 309
310#endif 310#endif
311 311
312#ifdef CONFIG_SCHED_LITMUS_TRACEPOINT
313
314#include <trace/events/litmus.h>
315
316#else
317
318/* Override trace macros to actually do nothing */
319#define trace_litmus_task_param(t)
320#define trace_litmus_task_release(t)
321#define trace_litmus_switch_to(t)
322#define trace_litmus_switch_away(prev)
323#define trace_litmus_task_completion(t, forced)
324#define trace_litmus_task_block(t)
325#define trace_litmus_task_resume(t)
326#define trace_litmus_sys_release(start)
327
328#endif
329
312 330
313#define SCHED_TRACE_BASE_ID 500 331#define SCHED_TRACE_BASE_ID 500
314 332
315 333
316#define sched_trace_task_name(t) \ 334#define sched_trace_task_name(t) \
317 SCHED_TRACE(SCHED_TRACE_BASE_ID + 1, do_sched_trace_task_name, t) 335 SCHED_TRACE(SCHED_TRACE_BASE_ID + 1, \
318#define sched_trace_task_param(t) \ 336 do_sched_trace_task_name, t)
319 SCHED_TRACE(SCHED_TRACE_BASE_ID + 2, do_sched_trace_task_param, t) 337
320#define sched_trace_task_release(t) \ 338#define sched_trace_task_param(t) \
321 SCHED_TRACE(SCHED_TRACE_BASE_ID + 3, do_sched_trace_task_release, t) 339 do { \
322#define sched_trace_task_switch_to(t) \ 340 SCHED_TRACE(SCHED_TRACE_BASE_ID + 2, \
323 SCHED_TRACE(SCHED_TRACE_BASE_ID + 4, do_sched_trace_task_switch_to, t) 341 do_sched_trace_task_param, t); \
324#define sched_trace_task_switch_away(t) \ 342 trace_litmus_task_param(t); \
325 SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, do_sched_trace_task_switch_away, t) 343 } while (0)
326#define sched_trace_task_completion(t, forced) \ 344
327 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, do_sched_trace_task_completion, t, \ 345#define sched_trace_task_release(t) \
328 (unsigned long) forced) 346 do { \
329#define sched_trace_task_block(t) \ 347 SCHED_TRACE(SCHED_TRACE_BASE_ID + 3, \
330 SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, do_sched_trace_task_block, t) 348 do_sched_trace_task_release, t); \
331#define sched_trace_task_resume(t) \ 349 trace_litmus_task_release(t); \
332 SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, do_sched_trace_task_resume, t) 350 } while (0)
333#define sched_trace_action(t, action) \ 351
334 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 9, do_sched_trace_action, t, \ 352#define sched_trace_task_switch_to(t) \
335 (unsigned long) action); 353 do { \
336/* when is a pointer, it does not need an explicit cast to unsigned long */ 354 SCHED_TRACE(SCHED_TRACE_BASE_ID + 4, \
337#define sched_trace_sys_release(when) \ 355 do_sched_trace_task_switch_to, t); \
338 SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, do_sched_trace_sys_release, when) 356 trace_litmus_switch_to(t); \
357 } while (0)
358
359#define sched_trace_task_switch_away(t) \
360 do { \
361 SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, \
362 do_sched_trace_task_switch_away, t); \
363 trace_litmus_switch_away(t); \
364 } while (0)
365
366#define sched_trace_task_completion(t, forced) \
367 do { \
368 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, \
369 do_sched_trace_task_completion, t, \
370 (unsigned long) forced); \
371 trace_litmus_task_completion(t, forced); \
372 } while (0)
373
374#define sched_trace_task_block(t) \
375 do { \
376 SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, \
377 do_sched_trace_task_block, t); \
378 trace_litmus_task_block(t); \
379 } while (0)
380
381#define sched_trace_task_resume(t) \
382 do { \
383 SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, \
384 do_sched_trace_task_resume, t); \
385 trace_litmus_task_resume(t); \
386 } while (0)
387
388#define sched_trace_action(t, action) \
389 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 9, \
390 do_sched_trace_action, t, (unsigned long) action);
339 391
392/* when is a pointer, it does not need an explicit cast to unsigned long */
393#define sched_trace_sys_release(when) \
394 do { \
395 SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, \
396 do_sched_trace_sys_release, when); \
397 trace_litmus_sys_release(when); \
398 } while (0)
340 399
341#define sched_trace_tasklet_release(t) \ 400#define sched_trace_tasklet_release(t) \
342 SCHED_TRACE(SCHED_TRACE_BASE_ID + 11, do_sched_trace_tasklet_release, t) 401 SCHED_TRACE(SCHED_TRACE_BASE_ID + 11, do_sched_trace_tasklet_release, t)
diff --git a/include/litmus/wait.h b/include/litmus/wait.h
new file mode 100644
index 000000000000..ce1347c355f8
--- /dev/null
+++ b/include/litmus/wait.h
@@ -0,0 +1,57 @@
1#ifndef _LITMUS_WAIT_H_
2#define _LITMUS_WAIT_H_
3
4struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq);
5
6/* wrap regular wait_queue_t head */
7struct __prio_wait_queue {
8 wait_queue_t wq;
9
10 /* some priority point */
11 lt_t priority;
12 /* break ties in priority by lower tie_breaker */
13 unsigned int tie_breaker;
14};
15
16typedef struct __prio_wait_queue prio_wait_queue_t;
17
18static inline void init_prio_waitqueue_entry(prio_wait_queue_t *pwq,
19 struct task_struct* t,
20 lt_t priority)
21{
22 init_waitqueue_entry(&pwq->wq, t);
23 pwq->priority = priority;
24 pwq->tie_breaker = 0;
25}
26
27static inline void init_prio_waitqueue_entry_tie(prio_wait_queue_t *pwq,
28 struct task_struct* t,
29 lt_t priority,
30 unsigned int tie_breaker)
31{
32 init_waitqueue_entry(&pwq->wq, t);
33 pwq->priority = priority;
34 pwq->tie_breaker = tie_breaker;
35}
36
37unsigned int __add_wait_queue_prio_exclusive(
38 wait_queue_head_t* head,
39 prio_wait_queue_t *new);
40
41static inline unsigned int add_wait_queue_prio_exclusive(
42 wait_queue_head_t* head,
43 prio_wait_queue_t *new)
44{
45 unsigned long flags;
46 unsigned int passed;
47
48 spin_lock_irqsave(&head->lock, flags);
49 passed = __add_wait_queue_prio_exclusive(head, new);
50
51 spin_unlock_irqrestore(&head->lock, flags);
52
53 return passed;
54}
55
56
57#endif
diff --git a/include/trace/events/litmus.h b/include/trace/events/litmus.h
new file mode 100644
index 000000000000..0fffcee02be0
--- /dev/null
+++ b/include/trace/events/litmus.h
@@ -0,0 +1,231 @@
1/*
2 * LITMUS^RT kernel style scheduling tracepoints
3 */
4#undef TRACE_SYSTEM
5#define TRACE_SYSTEM litmus
6
7#if !defined(_SCHED_TASK_TRACEPOINT_H) || defined(TRACE_HEADER_MULTI_READ)
8#define _SCHED_TASK_TRACEPOINT_H
9
10#include <linux/tracepoint.h>
11
12#include <litmus/litmus.h>
13#include <litmus/rt_param.h>
14
15/*
16 * Tracing task admission
17 */
18TRACE_EVENT(litmus_task_param,
19
20 TP_PROTO(struct task_struct *t),
21
22 TP_ARGS(t),
23
24 TP_STRUCT__entry(
25 __field( pid_t, pid )
26 __field( unsigned int, job )
27 __field( lt_t, wcet )
28 __field( lt_t, period )
29 __field( lt_t, phase )
30 __field( int, partition )
31 ),
32
33 TP_fast_assign(
34 __entry->pid = t ? t->pid : 0;
35 __entry->job = t ? t->rt_param.job_params.job_no : 0;
36 __entry->wcet = get_exec_cost(t);
37 __entry->period = get_rt_period(t);
38 __entry->phase = get_rt_phase(t);
39 __entry->partition = get_partition(t);
40 ),
41
42 TP_printk("period(%d, %Lu).\nwcet(%d, %Lu).\n",
43 __entry->pid, __entry->period,
44 __entry->pid, __entry->wcet)
45);
46
47/*
48 * Tracing jobs release
49 */
50TRACE_EVENT(litmus_task_release,
51
52 TP_PROTO(struct task_struct *t),
53
54 TP_ARGS(t),
55
56 TP_STRUCT__entry(
57 __field( pid_t, pid )
58 __field( unsigned int, job )
59 __field( lt_t, release )
60 __field( lt_t, deadline )
61 ),
62
63 TP_fast_assign(
64 __entry->pid = t ? t->pid : 0;
65 __entry->job = t ? t->rt_param.job_params.job_no : 0;
66 __entry->release = get_release(t);
67 __entry->deadline = get_deadline(t);
68 ),
69
70 TP_printk("release(job(%u, %u)): %Lu\ndeadline(job(%u, %u)): %Lu\n",
71 __entry->pid, __entry->job, __entry->release,
72 __entry->pid, __entry->job, __entry->deadline)
73);
74
75/*
76 * Tracepoint for switching to new task
77 */
78TRACE_EVENT(litmus_switch_to,
79
80 TP_PROTO(struct task_struct *t),
81
82 TP_ARGS(t),
83
84 TP_STRUCT__entry(
85 __field( pid_t, pid )
86 __field( unsigned int, job )
87 __field( lt_t, when )
88 __field( lt_t, exec_time )
89 ),
90
91 TP_fast_assign(
92 __entry->pid = is_realtime(t) ? t->pid : 0;
93 __entry->job = is_realtime(t) ? t->rt_param.job_params.job_no : 0;
94 __entry->when = litmus_clock();
95 __entry->exec_time = get_exec_time(t);
96 ),
97
98 TP_printk("switch_to(job(%u, %u)): %Lu (exec: %Lu)\n",
99 __entry->pid, __entry->job,
100 __entry->when, __entry->exec_time)
101);
102
103/*
104 * Tracepoint for switching away previous task
105 */
106TRACE_EVENT(litmus_switch_away,
107
108 TP_PROTO(struct task_struct *t),
109
110 TP_ARGS(t),
111
112 TP_STRUCT__entry(
113 __field( pid_t, pid )
114 __field( unsigned int, job )
115 __field( lt_t, when )
116 __field( lt_t, exec_time )
117 ),
118
119 TP_fast_assign(
120 __entry->pid = is_realtime(t) ? t->pid : 0;
121 __entry->job = is_realtime(t) ? t->rt_param.job_params.job_no : 0;
122 __entry->when = litmus_clock();
123 __entry->exec_time = get_exec_time(t);
124 ),
125
126 TP_printk("switch_away(job(%u, %u)): %Lu (exec: %Lu)\n",
127 __entry->pid, __entry->job,
128 __entry->when, __entry->exec_time)
129);
130
131/*
132 * Tracing jobs completion
133 */
134TRACE_EVENT(litmus_task_completion,
135
136 TP_PROTO(struct task_struct *t, unsigned long forced),
137
138 TP_ARGS(t, forced),
139
140 TP_STRUCT__entry(
141 __field( pid_t, pid )
142 __field( unsigned int, job )
143 __field( lt_t, when )
144 __field( unsigned long, forced )
145 ),
146
147 TP_fast_assign(
148 __entry->pid = t ? t->pid : 0;
149 __entry->job = t ? t->rt_param.job_params.job_no : 0;
150 __entry->when = litmus_clock();
151 __entry->forced = forced;
152 ),
153
154 TP_printk("completed(job(%u, %u)): %Lu (forced: %lu)\n",
155 __entry->pid, __entry->job,
156 __entry->when, __entry->forced)
157);
158
159/*
160 * Trace blocking tasks.
161 */
162TRACE_EVENT(litmus_task_block,
163
164 TP_PROTO(struct task_struct *t),
165
166 TP_ARGS(t),
167
168 TP_STRUCT__entry(
169 __field( pid_t, pid )
170 __field( lt_t, when )
171 ),
172
173 TP_fast_assign(
174 __entry->pid = t ? t->pid : 0;
175 __entry->when = litmus_clock();
176 ),
177
178 TP_printk("(%u) blocks: %Lu\n", __entry->pid, __entry->when)
179);
180
181/*
182 * Tracing jobs resume
183 */
184TRACE_EVENT(litmus_task_resume,
185
186 TP_PROTO(struct task_struct *t),
187
188 TP_ARGS(t),
189
190 TP_STRUCT__entry(
191 __field( pid_t, pid )
192 __field( unsigned int, job )
193 __field( lt_t, when )
194 ),
195
196 TP_fast_assign(
197 __entry->pid = t ? t->pid : 0;
198 __entry->job = t ? t->rt_param.job_params.job_no : 0;
199 __entry->when = litmus_clock();
200 ),
201
202 TP_printk("resume(job(%u, %u)): %Lu\n",
203 __entry->pid, __entry->job, __entry->when)
204);
205
206/*
207 * Trace synchronous release
208 */
209TRACE_EVENT(litmus_sys_release,
210
211 TP_PROTO(lt_t *start),
212
213 TP_ARGS(start),
214
215 TP_STRUCT__entry(
216 __field( lt_t, rel )
217 __field( lt_t, when )
218 ),
219
220 TP_fast_assign(
221 __entry->rel = *start;
222 __entry->when = litmus_clock();
223 ),
224
225 TP_printk("SynRelease(%Lu) at %Lu\n", __entry->rel, __entry->when)
226);
227
228#endif /* _SCHED_TASK_TRACEPOINT_H */
229
230/* Must stay outside the protection */
231#include <trace/define_trace.h>
diff --git a/kernel/sched.c b/kernel/sched.c
index f3d9a69a3777..9e8d8698323b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -80,6 +80,9 @@
80#include "workqueue_sched.h" 80#include "workqueue_sched.h"
81#include "sched_autogroup.h" 81#include "sched_autogroup.h"
82 82
83#define CREATE_TRACE_POINTS
84#include <trace/events/sched.h>
85
83#include <litmus/sched_trace.h> 86#include <litmus/sched_trace.h>
84#include <litmus/trace.h> 87#include <litmus/trace.h>
85 88
@@ -89,9 +92,6 @@
89 92
90static void litmus_tick(struct rq*, struct task_struct*); 93static void litmus_tick(struct rq*, struct task_struct*);
91 94
92#define CREATE_TRACE_POINTS
93#include <trace/events/sched.h>
94
95/* 95/*
96 * Convert user-nice values [ -20 ... 0 ... 19 ] 96 * Convert user-nice values [ -20 ... 0 ... 19 ]
97 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], 97 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
diff --git a/litmus/Kconfig b/litmus/Kconfig
index 8c156e4da528..95e0671e2aec 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -115,6 +115,52 @@ config SCHED_CPU_AFFINITY
115 115
116 Say Yes if unsure. 116 Say Yes if unsure.
117 117
118choice
119 prompt "EDF Tie-Break Behavior"
120 default EDF_TIE_BREAK_LATENESS_NORM
121 help
122 Allows the configuration of tie-breaking behavior when the deadlines
123 of two EDF-scheduled tasks are equal.
124
125 config EDF_TIE_BREAK_LATENESS
126 bool "Lateness-based Tie Break"
127 help
128 Break ties between two jobs, A and B, based upon the lateness of their
129 prior jobs. The job with the greatest lateness has priority. Note that
130 lateness has a negative value if the prior job finished before its
131 deadline.
132
133 config EDF_TIE_BREAK_LATENESS_NORM
134 bool "Normalized Lateness-based Tie Break"
135 help
136 Break ties between two jobs, A and B, based upon the lateness, normalized
137 by relative deadline, of their prior jobs. The job with the greatest
138 normalized lateness has priority. Note that lateness has a negative value
139 if the prior job finished before its deadline.
140
141 Normalized lateness tie-breaks are likely desireable over non-normalized
142 tie-breaks if the execution times and/or relative deadlines of tasks in a
143 task set vary greatly.
144
145 config EDF_TIE_BREAK_HASH
146 bool "Hash-based Tie Breaks"
147 help
148 Break ties between two jobs, A and B, with equal deadlines by using a
149 uniform hash; i.e.: hash(A.pid, A.job_num) < hash(B.pid, B.job_num). Job
150 A has ~50% of winning a given tie-break.
151
152 config EDF_PID_TIE_BREAK
153 bool "PID-based Tie Breaks"
154 help
155 Break ties based upon OS-assigned thread IDs. Use this option if
156 required by algorithm's real-time analysis or per-task response-time
157 jitter must be minimized.
158
159 NOTES:
160 * This tie-breaking method was default in Litmus 2012.2 and before.
161
162endchoice
163
118endmenu 164endmenu
119 165
120menu "Tracing" 166menu "Tracing"
@@ -174,6 +220,24 @@ config SCHED_TASK_TRACE_SHIFT
174 10 => 1k events 220 10 => 1k events
175 8 => 512 events 221 8 => 512 events
176 222
223config SCHED_LITMUS_TRACEPOINT
224 bool "Enable Event/Tracepoint Tracing for real-time task tracing"
225 depends on TRACEPOINTS
226 default n
227 help
228 Enable kernel-style events (tracepoint) for Litmus. Litmus events
229 trace the same functions as the above sched_trace_XXX(), but can
230 be enabled independently.
231 Litmus tracepoints can be recorded and analyzed together (single
232 time reference) with all other kernel tracing events (e.g.,
233 sched:sched_switch, etc.).
234
235 This also enables a quick way to visualize schedule traces using
236 trace-cmd utility and kernelshark visualizer.
237
238 Say Yes for debugging and visualization purposes.
239 Say No for overhead tracing.
240
177config SCHED_OVERHEAD_TRACE 241config SCHED_OVERHEAD_TRACE
178 bool "Record timestamps for overhead measurements" 242 bool "Record timestamps for overhead measurements"
179 depends on FEATHER_TRACE 243 depends on FEATHER_TRACE
diff --git a/litmus/Makefile b/litmus/Makefile
index 080cbf694a41..59c018560ee9 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -11,15 +11,16 @@ obj-y = sched_plugin.o litmus.o \
11 sync.o \ 11 sync.o \
12 rt_domain.o \ 12 rt_domain.o \
13 edf_common.o \ 13 edf_common.o \
14 fp_common.o \
14 fdso.o \ 15 fdso.o \
15 locking.o \ 16 locking.o \
16 srp.o \ 17 srp.o \
17 bheap.o \ 18 bheap.o \
18 binheap.o \ 19 binheap.o \
19 ctrldev.o \ 20 ctrldev.o \
20 sched_gsn_edf.o \ 21 sched_gsn_edf.o \
21 sched_psn_edf.o \ 22 sched_psn_edf.o \
22 kfmlp_lock.o 23 sched_pfp.o
23 24
24obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o 25obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
25obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o 26obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
@@ -30,6 +31,7 @@ obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
30obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o 31obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o
31obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o 32obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o
32 33
34obj-$(CONFIG_LITMUS_LOCKING) += kfmlp_lock.o
33obj-$(CONFIG_LITMUS_NESTED_LOCKING) += rsm_lock.o ikglp_lock.o 35obj-$(CONFIG_LITMUS_NESTED_LOCKING) += rsm_lock.o ikglp_lock.o
34obj-$(CONFIG_LITMUS_SOFTIRQD) += litmus_softirq.o 36obj-$(CONFIG_LITMUS_SOFTIRQD) += litmus_softirq.o
35obj-$(CONFIG_LITMUS_PAI_SOFTIRQD) += litmus_pai_softirq.o 37obj-$(CONFIG_LITMUS_PAI_SOFTIRQD) += litmus_pai_softirq.o
diff --git a/litmus/binheap.c b/litmus/binheap.c
index 8d42403ad52c..40a913f4b5a7 100644
--- a/litmus/binheap.c
+++ b/litmus/binheap.c
@@ -1,10 +1,8 @@
1#include <litmus/binheap.h> 1#include <litmus/binheap.h>
2 2
3//extern void dump_node_data(struct binheap_node* parent, struct binheap_node* child); 3/* Returns true of the root ancestor of node is the root of the given heap. */
4//extern void dump_node_data2(struct binheap_handle *handle, struct binheap_node* bad_node);
5
6int binheap_is_in_this_heap(struct binheap_node *node, 4int binheap_is_in_this_heap(struct binheap_node *node,
7 struct binheap_handle* heap) 5 struct binheap* heap)
8{ 6{
9 if(!binheap_is_in_heap(node)) { 7 if(!binheap_is_in_heap(node)) {
10 return 0; 8 return 0;
@@ -17,9 +15,10 @@ int binheap_is_in_this_heap(struct binheap_node *node,
17 return (node == heap->root); 15 return (node == heap->root);
18} 16}
19 17
18
20/* Update the node reference pointers. Same logic as Litmus binomial heap. */ 19/* Update the node reference pointers. Same logic as Litmus binomial heap. */
21static void __update_ref(struct binheap_node *parent, 20static void __update_ref(struct binheap_node *parent,
22 struct binheap_node *child) 21 struct binheap_node *child)
23{ 22{
24 *(parent->ref_ptr) = child; 23 *(parent->ref_ptr) = child;
25 *(child->ref_ptr) = parent; 24 *(child->ref_ptr) = parent;
@@ -27,15 +26,11 @@ static void __update_ref(struct binheap_node *parent,
27 swap(parent->ref_ptr, child->ref_ptr); 26 swap(parent->ref_ptr, child->ref_ptr);
28} 27}
29 28
29
30/* Swaps data between two nodes. */ 30/* Swaps data between two nodes. */
31static void __binheap_swap(struct binheap_node *parent, 31static void __binheap_swap(struct binheap_node *parent,
32 struct binheap_node *child) 32 struct binheap_node *child)
33{ 33{
34// if(parent == BINHEAP_POISON || child == BINHEAP_POISON) {
35// dump_node_data(parent, child);
36// BUG();
37// }
38
39 swap(parent->data, child->data); 34 swap(parent->data, child->data);
40 __update_ref(parent, child); 35 __update_ref(parent, child);
41} 36}
@@ -44,9 +39,9 @@ static void __binheap_swap(struct binheap_node *parent,
44/* Swaps memory and data between two nodes. Actual nodes swap instead of 39/* Swaps memory and data between two nodes. Actual nodes swap instead of
45 * just data. Needed when we delete nodes from the heap. 40 * just data. Needed when we delete nodes from the heap.
46 */ 41 */
47static void __binheap_swap_safe(struct binheap_handle *handle, 42static void __binheap_swap_safe(struct binheap *handle,
48 struct binheap_node *a, 43 struct binheap_node *a,
49 struct binheap_node *b) 44 struct binheap_node *b)
50{ 45{
51 swap(a->data, b->data); 46 swap(a->data, b->data);
52 __update_ref(a, b); 47 __update_ref(a, b);
@@ -130,7 +125,7 @@ static void __binheap_swap_safe(struct binheap_handle *handle,
130 * Update the pointer to the last node in the complete binary tree. 125 * Update the pointer to the last node in the complete binary tree.
131 * Called internally after the root node has been deleted. 126 * Called internally after the root node has been deleted.
132 */ 127 */
133static void __binheap_update_last(struct binheap_handle *handle) 128static void __binheap_update_last(struct binheap *handle)
134{ 129{
135 struct binheap_node *temp = handle->last; 130 struct binheap_node *temp = handle->last;
136 131
@@ -154,16 +149,15 @@ static void __binheap_update_last(struct binheap_handle *handle)
154 temp = temp->left; 149 temp = temp->left;
155 } 150 }
156 151
157 //BUG_ON(!(temp->left == NULL && temp->right == NULL));
158
159 handle->last = temp; 152 handle->last = temp;
160} 153}
161 154
155
162/** 156/**
163 * Update the pointer to the node that will take the next inserted node. 157 * Update the pointer to the node that will take the next inserted node.
164 * Called internally after a node has been inserted. 158 * Called internally after a node has been inserted.
165 */ 159 */
166static void __binheap_update_next(struct binheap_handle *handle) 160static void __binheap_update_next(struct binheap *handle)
167{ 161{
168 struct binheap_node *temp = handle->next; 162 struct binheap_node *temp = handle->next;
169 163
@@ -188,34 +182,22 @@ static void __binheap_update_next(struct binheap_handle *handle)
188 182
189 183
190/* bubble node up towards root */ 184/* bubble node up towards root */
191static void __binheap_bubble_up( 185static void __binheap_bubble_up(struct binheap *handle,
192 struct binheap_handle *handle, 186 struct binheap_node *node)
193 struct binheap_node *node)
194{ 187{
195 //BUG_ON(!binheap_is_in_heap(node)); 188 /* let BINHEAP_POISON data bubble to the top */
196// if(!binheap_is_in_heap(node))
197// {
198// dump_node_data2(handle, node);
199// BUG();
200// }
201 189
202 while((node->parent != NULL) && 190 while((node->parent != NULL) &&
203 ((node->data == BINHEAP_POISON) /* let BINHEAP_POISON data bubble to the top */ || 191 ((node->data == BINHEAP_POISON) ||
204 handle->compare(node, node->parent))) { 192 handle->compare(node, node->parent))) {
205 __binheap_swap(node->parent, node); 193 __binheap_swap(node->parent, node);
206 node = node->parent; 194 node = node->parent;
207
208// if(!binheap_is_in_heap(node))
209// {
210// dump_node_data2(handle, node);
211// BUG();
212// }
213 } 195 }
214} 196}
215 197
216 198
217/* bubble node down, swapping with min-child */ 199/* bubble node down, swapping with min-child */
218static void __binheap_bubble_down(struct binheap_handle *handle) 200static void __binheap_bubble_down(struct binheap *handle)
219{ 201{
220 struct binheap_node *node = handle->root; 202 struct binheap_node *node = handle->root;
221 203
@@ -242,17 +224,10 @@ static void __binheap_bubble_down(struct binheap_handle *handle)
242} 224}
243 225
244 226
245
246void __binheap_add(struct binheap_node *new_node, 227void __binheap_add(struct binheap_node *new_node,
247 struct binheap_handle *handle, 228 struct binheap *handle,
248 void *data) 229 void *data)
249{ 230{
250// if(binheap_is_in_heap(new_node))
251// {
252// dump_node_data2(handle, new_node);
253// BUG();
254// }
255
256 new_node->data = data; 231 new_node->data = data;
257 new_node->ref = new_node; 232 new_node->ref = new_node;
258 new_node->ref_ptr = &(new_node->ref); 233 new_node->ref_ptr = &(new_node->ref);
@@ -296,7 +271,6 @@ void __binheap_add(struct binheap_node *new_node,
296} 271}
297 272
298 273
299
300/** 274/**
301 * Removes the root node from the heap. The node is removed after coalescing 275 * Removes the root node from the heap. The node is removed after coalescing
302 * the binheap_node with its original data pointer at the root of the tree. 276 * the binheap_node with its original data pointer at the root of the tree.
@@ -304,17 +278,11 @@ void __binheap_add(struct binheap_node *new_node,
304 * The 'last' node in the tree is then swapped up to the root and bubbled 278 * The 'last' node in the tree is then swapped up to the root and bubbled
305 * down. 279 * down.
306 */ 280 */
307void __binheap_delete_root(struct binheap_handle *handle, 281void __binheap_delete_root(struct binheap *handle,
308 struct binheap_node *container) 282 struct binheap_node *container)
309{ 283{
310 struct binheap_node *root = handle->root; 284 struct binheap_node *root = handle->root;
311 285
312// if(!binheap_is_in_heap(container))
313// {
314// dump_node_data2(handle, container);
315// BUG();
316// }
317
318 if(root != container) { 286 if(root != container) {
319 /* coalesce */ 287 /* coalesce */
320 __binheap_swap_safe(handle, root, container); 288 __binheap_swap_safe(handle, root, container);
@@ -392,23 +360,11 @@ void __binheap_delete_root(struct binheap_handle *handle,
392 * and then delete to root. 360 * and then delete to root.
393 */ 361 */
394void __binheap_delete(struct binheap_node *node_to_delete, 362void __binheap_delete(struct binheap_node *node_to_delete,
395 struct binheap_handle *handle) 363 struct binheap *handle)
396{ 364{
397 struct binheap_node *target = node_to_delete->ref; 365 struct binheap_node *target = node_to_delete->ref;
398 void *temp_data = target->data; 366 void *temp_data = target->data;
399 367
400// if(!binheap_is_in_heap(node_to_delete))
401// {
402// dump_node_data2(handle, node_to_delete);
403// BUG();
404// }
405//
406// if(!binheap_is_in_heap(target))
407// {
408// dump_node_data2(handle, target);
409// BUG();
410// }
411
412 /* temporarily set data to null to allow node to bubble up to the top. */ 368 /* temporarily set data to null to allow node to bubble up to the top. */
413 target->data = BINHEAP_POISON; 369 target->data = BINHEAP_POISON;
414 370
@@ -416,28 +372,17 @@ void __binheap_delete(struct binheap_node *node_to_delete,
416 __binheap_delete_root(handle, node_to_delete); 372 __binheap_delete_root(handle, node_to_delete);
417 373
418 node_to_delete->data = temp_data; /* restore node data pointer */ 374 node_to_delete->data = temp_data; /* restore node data pointer */
419 //node_to_delete->parent = BINHEAP_POISON; /* poison the node */
420} 375}
421 376
377
422/** 378/**
423 * Bubble up a node whose pointer has decreased in value. 379 * Bubble up a node whose pointer has decreased in value.
424 */ 380 */
425void __binheap_decrease(struct binheap_node *orig_node, 381void __binheap_decrease(struct binheap_node *orig_node,
426 struct binheap_handle *handle) 382 struct binheap *handle)
427{ 383{
428 struct binheap_node *target = orig_node->ref; 384 struct binheap_node *target = orig_node->ref;
429 385
430// if(!binheap_is_in_heap(orig_node))
431// {
432// dump_node_data2(handle, orig_node);
433// BUG();
434// }
435//
436// if(!binheap_is_in_heap(target))
437// {
438// dump_node_data2(handle, target);
439// BUG();
440// }
441//
442 __binheap_bubble_up(handle, target); 386 __binheap_bubble_up(handle, target);
443} 387}
388
diff --git a/litmus/budget.c b/litmus/budget.c
index 310e9a3d4172..f7712be29adb 100644
--- a/litmus/budget.c
+++ b/litmus/budget.c
@@ -5,6 +5,8 @@
5#include <litmus/litmus.h> 5#include <litmus/litmus.h>
6#include <litmus/preempt.h> 6#include <litmus/preempt.h>
7 7
8#include <litmus/budget.h>
9
8struct enforcement_timer { 10struct enforcement_timer {
9 /* The enforcement timer is used to accurately police 11 /* The enforcement timer is used to accurately police
10 * slice budgets. */ 12 * slice budgets. */
diff --git a/litmus/ctrldev.c b/litmus/ctrldev.c
index 6677a67cc945..9969ab17c190 100644
--- a/litmus/ctrldev.c
+++ b/litmus/ctrldev.c
@@ -30,27 +30,19 @@ static int alloc_ctrl_page(struct task_struct *t)
30static int map_ctrl_page(struct task_struct *t, struct vm_area_struct* vma) 30static int map_ctrl_page(struct task_struct *t, struct vm_area_struct* vma)
31{ 31{
32 int err; 32 int err;
33 unsigned long pfn;
34 33
35 struct page* ctrl = virt_to_page(tsk_rt(t)->ctrl_page); 34 struct page* ctrl = virt_to_page(tsk_rt(t)->ctrl_page);
36 35
37 /* Increase ref count. Is decreased when vma is destroyed. */
38 get_page(ctrl);
39
40 /* compute page frame number */
41 pfn = page_to_pfn(ctrl);
42
43 TRACE_CUR(CTRL_NAME 36 TRACE_CUR(CTRL_NAME
44 ": mapping %p (pfn:%lx, %lx) to 0x%lx (prot:%lx)\n", 37 ": mapping %p (pfn:%lx) to 0x%lx (prot:%lx)\n",
45 tsk_rt(t)->ctrl_page, pfn, page_to_pfn(ctrl), vma->vm_start, 38 tsk_rt(t)->ctrl_page,page_to_pfn(ctrl), vma->vm_start,
46 vma->vm_page_prot); 39 vma->vm_page_prot);
47 40
48 /* Map it into the vma. Make sure to use PAGE_SHARED, otherwise 41 /* Map it into the vma. */
49 * userspace actually gets a copy-on-write page. */ 42 err = vm_insert_page(vma, vma->vm_start, ctrl);
50 err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, PAGE_SHARED);
51 43
52 if (err) 44 if (err)
53 TRACE_CUR(CTRL_NAME ": remap_pfn_range() failed (%d)\n", err); 45 TRACE_CUR(CTRL_NAME ": vm_insert_page() failed (%d)\n", err);
54 46
55 return err; 47 return err;
56} 48}
@@ -63,19 +55,19 @@ static void litmus_ctrl_vm_close(struct vm_area_struct* vma)
63 TRACE_CUR(CTRL_NAME 55 TRACE_CUR(CTRL_NAME
64 ": %p:%p vma:%p vma->vm_private_data:%p closed.\n", 56 ": %p:%p vma:%p vma->vm_private_data:%p closed.\n",
65 (void*) vma->vm_start, (void*) vma->vm_end, vma, 57 (void*) vma->vm_start, (void*) vma->vm_end, vma,
66 vma->vm_private_data, current->comm, 58 vma->vm_private_data);
67 current->pid);
68} 59}
69 60
70static int litmus_ctrl_vm_fault(struct vm_area_struct* vma, 61static int litmus_ctrl_vm_fault(struct vm_area_struct* vma,
71 struct vm_fault* vmf) 62 struct vm_fault* vmf)
72{ 63{
73 /* This function should never be called, since 64 TRACE_CUR("%s flags=0x%x (off:%ld)\n", __FUNCTION__,
74 * all pages should have been mapped by mmap() 65 vma->vm_flags, vmf->pgoff);
75 * already. */ 66
76 TRACE_CUR("%s flags=0x%x\n", __FUNCTION__, vma->vm_flags); 67 /* This function should never be called, since all pages should have
68 * been mapped by mmap() already. */
69 WARN_ONCE(1, "Page faults should be impossible in the control page\n");
77 70
78 /* nope, you only get one page */
79 return VM_FAULT_SIGBUS; 71 return VM_FAULT_SIGBUS;
80} 72}
81 73
@@ -103,9 +95,16 @@ static int litmus_ctrl_mmap(struct file* filp, struct vm_area_struct* vma)
103 return -EINVAL; 95 return -EINVAL;
104 96
105 vma->vm_ops = &litmus_ctrl_vm_ops; 97 vma->vm_ops = &litmus_ctrl_vm_ops;
106 /* this mapping should not be kept across forks, 98 /* This mapping should not be kept across forks,
107 * and cannot be expanded */ 99 * cannot be expanded, and is not a "normal" page. */
108 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; 100 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_IO;
101
102 /* We don't want the first write access to trigger a "minor" page fault
103 * to mark the page as dirty. This is transient, private memory, we
104 * don't care if it was touched or not. __S011 means RW access, but not
105 * execute, and avoids copy-on-write behavior.
106 * See protection_map in mmap.c. */
107 vma->vm_page_prot = __S011;
109 108
110 err = alloc_ctrl_page(current); 109 err = alloc_ctrl_page(current);
111 if (!err) 110 if (!err)
diff --git a/litmus/edf_common.c b/litmus/edf_common.c
index b346bdd65b3b..a1cdc10ea6f1 100644
--- a/litmus/edf_common.c
+++ b/litmus/edf_common.c
@@ -18,6 +18,30 @@
18 18
19#include <litmus/edf_common.h> 19#include <litmus/edf_common.h>
20 20
21#ifdef CONFIG_EDF_TIE_BREAK_LATENESS_NORM
22#include <litmus/fpmath.h>
23#endif
24
25#ifdef CONFIG_EDF_TIE_BREAK_HASH
26#include <linux/hash.h>
27static inline long edf_hash(struct task_struct *t)
28{
29 /* pid is 32 bits, so normally we would shove that into the
30 * upper 32-bits and and put the job number in the bottom
31 * and hash the 64-bit number with hash_64(). Sadly,
32 * in testing, hash_64() doesn't distribute keys were the
33 * upper bits are close together (as would be the case with
34 * pids) and job numbers are equal (as would be the case with
35 * synchronous task sets with all relative deadlines equal).
36 *
37 * A 2006 Linux patch proposed the following solution
38 * (but for some reason it wasn't accepted...).
39 *
40 * At least this workaround works for 32-bit systems as well.
41 */
42 return hash_32(hash_32((u32)tsk_rt(t)->job_params.job_no, 32) ^ t->pid, 32);
43}
44#endif
21 45
22 46
23/* edf_higher_prio - returns true if first has a higher EDF priority 47/* edf_higher_prio - returns true if first has a higher EDF priority
@@ -88,58 +112,90 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second)
88 112
89#endif 113#endif
90 114
91// // rate-monotonic for testing
92// if (!is_realtime(second_task)) {
93// return true;
94// }
95//
96// if (shorter_period(first_task, second_task)) {
97// return true;
98// }
99//
100// if (get_period(first_task) == get_period(second_task)) {
101// if (first_task->pid < second_task->pid) {
102// return true;
103// }
104// else if (first_task->pid == second_task->pid) {
105// return !second->rt_param.inh_task;
106// }
107// }
108
109 if (!is_realtime(second_task)) { 115 if (!is_realtime(second_task)) {
110 return true; 116 return 1;
111 } 117 }
112 118 else if (earlier_deadline(first_task, second_task)) {
113 if (earlier_deadline(first_task, second_task)) { 119 return 1;
114 return true;
115 } 120 }
116 if (get_deadline(first_task) == get_deadline(second_task)) { 121 else if (get_deadline(first_task) == get_deadline(second_task)) {
117 122 /* Need to tie break. All methods must set pid_break to 0/1 if
118 if (shorter_period(first_task, second_task)) { 123 * first_task does not have priority over second_task.
119 return true; 124 */
125 int pid_break;
126
127#if defined(CONFIG_EDF_TIE_BREAK_LATENESS)
128 /* Tie break by lateness. Jobs with greater lateness get
129 * priority. This should spread tardiness across all tasks,
130 * especially in task sets where all tasks have the same
131 * period and relative deadlines.
132 */
133 if (get_lateness(first_task) > get_lateness(second_task)) {
134 return 1;
135 }
136 pid_break = (get_lateness(first_task) == get_lateness(second_task));
137
138
139#elif defined(CONFIG_EDF_TIE_BREAK_LATENESS_NORM)
140 /* Tie break by lateness, normalized by relative deadline. Jobs with
141 * greater normalized lateness get priority.
142 *
143 * Note: Considered using the algebraically equivalent
144 * lateness(first)*relative_deadline(second) >
145 lateness(second)*relative_deadline(first)
146 * to avoid fixed-point math, but values are prone to overflow if inputs
147 * are on the order of several seconds, even in 64-bit.
148 */
149 fp_t fnorm = _frac(get_lateness(first_task),
150 get_rt_relative_deadline(first_task));
151 fp_t snorm = _frac(get_lateness(second_task),
152 get_rt_relative_deadline(second_task));
153 if (_gt(fnorm, snorm)) {
154 return 1;
155 }
156 pid_break = _eq(fnorm, snorm);
157
158
159#elif defined(CONFIG_EDF_TIE_BREAK_HASH)
160 /* Tie break by comparing hashs of (pid, job#) tuple. There should be
161 * a 50% chance that first_task has a higher priority than second_task.
162 */
163 long fhash = edf_hash(first_task);
164 long shash = edf_hash(second_task);
165 if (fhash < shash) {
166 return 1;
120 } 167 }
121 if (get_rt_period(first_task) == get_rt_period(second_task)) { 168 pid_break = (fhash == shash);
169#else
170
171
172 /* CONFIG_EDF_PID_TIE_BREAK */
173 pid_break = 1; // fall through to tie-break by pid;
174#endif
175
176 /* Tie break by pid */
177 if(pid_break) {
122 if (first_task->pid < second_task->pid) { 178 if (first_task->pid < second_task->pid) {
123 return true; 179 return 1;
124 } 180 }
125 if (first_task->pid == second_task->pid) { 181 else if (first_task->pid == second_task->pid) {
126#ifdef CONFIG_LITMUS_SOFTIRQD 182#ifdef CONFIG_LITMUS_SOFTIRQD
127 if (first_task->rt_param.is_proxy_thread < 183 if (first_task->rt_param.is_proxy_thread <
128 second_task->rt_param.is_proxy_thread) { 184 second_task->rt_param.is_proxy_thread) {
129 return true; 185 return 1;
130 }
131 if(first_task->rt_param.is_proxy_thread == second_task->rt_param.is_proxy_thread) {
132 return !second->rt_param.inh_task;
133 } 186 }
134#else
135 return !second->rt_param.inh_task;
136#endif 187#endif
188 /* If the PIDs are the same then the task with the
189 * inherited priority wins.
190 */
191 if (!second_task->rt_param.inh_task) {
192 return 1;
193 }
137 } 194 }
138
139 } 195 }
140 } 196 }
141 197
142 return false; 198 return 0; /* fall-through. prio(second_task) > prio(first_task) */
143} 199}
144 200
145 201
diff --git a/litmus/fdso.c b/litmus/fdso.c
index 18fc61b6414a..bac6a35fa17d 100644
--- a/litmus/fdso.c
+++ b/litmus/fdso.c
@@ -36,6 +36,10 @@ static const struct fdso_ops* fdso_ops[] = {
36 &generic_affinity_ops, /* KFMLP_SIMPLE_GPU_AFF_OBS */ 36 &generic_affinity_ops, /* KFMLP_SIMPLE_GPU_AFF_OBS */
37 &generic_affinity_ops, /* KFMLP_GPU_AFF_OBS */ 37 &generic_affinity_ops, /* KFMLP_GPU_AFF_OBS */
38#endif 38#endif
39 &generic_lock_ops, /* MPCP_SEM */
40 &generic_lock_ops, /* MPCP_VS_SEM */
41 &generic_lock_ops, /* DPCP_SEM */
42 &generic_lock_ops, /* PCP_SEM */
39}; 43};
40 44
41static int fdso_create(void** obj_ref, obj_type_t type, void* __user config) 45static int fdso_create(void** obj_ref, obj_type_t type, void* __user config)
diff --git a/litmus/fp_common.c b/litmus/fp_common.c
new file mode 100644
index 000000000000..31fc2db20adf
--- /dev/null
+++ b/litmus/fp_common.c
@@ -0,0 +1,119 @@
1/*
2 * litmus/fp_common.c
3 *
4 * Common functions for fixed-priority scheduler.
5 */
6
7#include <linux/percpu.h>
8#include <linux/sched.h>
9#include <linux/list.h>
10
11#include <litmus/litmus.h>
12#include <litmus/sched_plugin.h>
13#include <litmus/sched_trace.h>
14
15#include <litmus/fp_common.h>
16
17/* fp_higher_prio - returns true if first has a higher static priority
18 * than second. Deadline ties are broken by PID.
19 *
20 * both first and second may be NULL
21 */
22int fp_higher_prio(struct task_struct* first,
23 struct task_struct* second)
24{
25 struct task_struct *first_task = first;
26 struct task_struct *second_task = second;
27
28 /* There is no point in comparing a task to itself. */
29 if (unlikely(first && first == second)) {
30 TRACE_TASK(first,
31 "WARNING: pointless FP priority comparison.\n");
32 return 0;
33 }
34
35
36 /* check for NULL tasks */
37 if (!first || !second)
38 return first && !second;
39
40#ifdef CONFIG_LITMUS_LOCKING
41
42 /* Check for inherited priorities. Change task
43 * used for comparison in such a case.
44 */
45 if (unlikely(first->rt_param.inh_task))
46 first_task = first->rt_param.inh_task;
47 if (unlikely(second->rt_param.inh_task))
48 second_task = second->rt_param.inh_task;
49
50 /* Check for priority boosting. Tie-break by start of boosting.
51 */
52 if (unlikely(is_priority_boosted(first_task))) {
53 /* first_task is boosted, how about second_task? */
54 if (!is_priority_boosted(second_task) ||
55 lt_before(get_boost_start(first_task),
56 get_boost_start(second_task)))
57 return 1;
58 else
59 return 0;
60 } else if (unlikely(is_priority_boosted(second_task)))
61 /* second_task is boosted, first is not*/
62 return 0;
63
64#endif
65
66
67 return !is_realtime(second_task) ||
68
69 get_priority(first_task) < get_priority(second_task) ||
70
71 /* Break by PID.
72 */
73 (get_priority(first_task) == get_priority(second_task) &&
74 (first_task->pid < second_task->pid ||
75
76 /* If the PIDs are the same then the task with the inherited
77 * priority wins.
78 */
79 (first_task->pid == second_task->pid &&
80 !second->rt_param.inh_task)));
81}
82
83int fp_ready_order(struct bheap_node* a, struct bheap_node* b)
84{
85 return fp_higher_prio(bheap2task(a), bheap2task(b));
86}
87
88void fp_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
89 release_jobs_t release)
90{
91 rt_domain_init(rt, fp_ready_order, resched, release);
92}
93
94/* need_to_preempt - check whether the task t needs to be preempted
95 */
96int fp_preemption_needed(struct fp_prio_queue *q, struct task_struct *t)
97{
98 struct task_struct *pending;
99
100 pending = fp_prio_peek(q);
101
102 if (!pending)
103 return 0;
104 if (!t)
105 return 1;
106
107 /* make sure to get non-rt stuff out of the way */
108 return !is_realtime(t) || fp_higher_prio(pending, t);
109}
110
111void fp_prio_queue_init(struct fp_prio_queue* q)
112{
113 int i;
114
115 for (i = 0; i < FP_PRIO_BIT_WORDS; i++)
116 q->bitmask[i] = 0;
117 for (i = 0; i < LITMUS_MAX_PRIORITY; i++)
118 bheap_init(&q->queue[i]);
119}
diff --git a/litmus/jobs.c b/litmus/jobs.c
index 1d97462cc128..fb093c03d53d 100644
--- a/litmus/jobs.c
+++ b/litmus/jobs.c
@@ -6,26 +6,13 @@
6#include <litmus/litmus.h> 6#include <litmus/litmus.h>
7#include <litmus/jobs.h> 7#include <litmus/jobs.h>
8 8
9void prepare_for_next_period(struct task_struct *t) 9static inline void setup_release(struct task_struct *t, lt_t release)
10{ 10{
11 BUG_ON(!t);
12 /* prepare next release */ 11 /* prepare next release */
13 12 t->rt_param.job_params.release = release;
14 if(tsk_rt(t)->task_params.cls == RT_CLASS_SOFT_W_SLIP) { 13 t->rt_param.job_params.deadline = release + get_rt_relative_deadline(t);
15 /* allow the release point to slip if we've passed our deadline. */
16 lt_t now = litmus_clock();
17 t->rt_param.job_params.release =
18 (t->rt_param.job_params.deadline < now) ?
19 now : t->rt_param.job_params.deadline;
20 t->rt_param.job_params.deadline =
21 t->rt_param.job_params.release + get_rt_period(t);
22 }
23 else {
24 t->rt_param.job_params.release = t->rt_param.job_params.deadline;
25 t->rt_param.job_params.deadline += get_rt_period(t);
26 }
27
28 t->rt_param.job_params.exec_time = 0; 14 t->rt_param.job_params.exec_time = 0;
15
29 /* update job sequence number */ 16 /* update job sequence number */
30 t->rt_param.job_params.job_no++; 17 t->rt_param.job_params.job_no++;
31 18
@@ -33,10 +20,24 @@ void prepare_for_next_period(struct task_struct *t)
33 t->rt.time_slice = 1; 20 t->rt.time_slice = 1;
34} 21}
35 22
23void prepare_for_next_period(struct task_struct *t)
24{
25 BUG_ON(!t);
26
27 /* Record lateness before we set up the next job's
28 * release and deadline. Lateness may be negative.
29 */
30 t->rt_param.job_params.lateness =
31 (long long)litmus_clock() -
32 (long long)t->rt_param.job_params.deadline;
33
34 setup_release(t, get_release(t) + get_rt_period(t));
35}
36
36void release_at(struct task_struct *t, lt_t start) 37void release_at(struct task_struct *t, lt_t start)
37{ 38{
38 t->rt_param.job_params.deadline = start; 39 BUG_ON(!t);
39 prepare_for_next_period(t); 40 setup_release(t, start);
40 set_rt_flags(t, RT_F_RUNNING); 41 set_rt_flags(t, RT_F_RUNNING);
41} 42}
42 43
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 91ec65894379..83a860c52e17 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -128,21 +128,25 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param)
128 goto out_unlock; 128 goto out_unlock;
129 } 129 }
130 130
131 /* set relative deadline to be implicit if left unspecified */
132 if (tp.relative_deadline == 0)
133 tp.relative_deadline = tp.period;
134
131 if (tp.exec_cost <= 0) 135 if (tp.exec_cost <= 0)
132 goto out_unlock; 136 goto out_unlock;
133 if (tp.period <= 0) 137 if (tp.period <= 0)
134 goto out_unlock; 138 goto out_unlock;
135 if (!cpu_online(tp.cpu)) 139 if (!cpu_online(tp.cpu))
136 goto out_unlock; 140 goto out_unlock;
137 if (tp.period < tp.exec_cost) 141 if (min(tp.relative_deadline, tp.period) < tp.exec_cost) /*density check*/
138 { 142 {
139 printk(KERN_INFO "litmus: real-time task %d rejected " 143 printk(KERN_INFO "litmus: real-time task %d rejected "
140 "because wcet > period\n", pid); 144 "because task density > 1.0\n", pid);
141 goto out_unlock; 145 goto out_unlock;
142 } 146 }
143 if ( tp.cls != RT_CLASS_HARD && 147 if (tp.cls != RT_CLASS_HARD &&
144 tp.cls != RT_CLASS_SOFT && 148 tp.cls != RT_CLASS_SOFT &&
145 tp.cls != RT_CLASS_BEST_EFFORT) 149 tp.cls != RT_CLASS_BEST_EFFORT)
146 { 150 {
147 printk(KERN_INFO "litmus: real-time task %d rejected " 151 printk(KERN_INFO "litmus: real-time task %d rejected "
148 "because its class is invalid\n", pid); 152 "because its class is invalid\n", pid);
@@ -415,11 +419,14 @@ long litmus_admit_task(struct task_struct* tsk)
415 419
416 BUG_ON(is_realtime(tsk)); 420 BUG_ON(is_realtime(tsk));
417 421
418 if (get_rt_period(tsk) == 0 || 422 if (get_rt_relative_deadline(tsk) == 0 ||
419 get_exec_cost(tsk) > get_rt_period(tsk)) { 423 get_exec_cost(tsk) >
420 TRACE_TASK(tsk, "litmus admit: invalid task parameters " 424 min(get_rt_relative_deadline(tsk), get_rt_period(tsk)) ) {
421 "(%lu, %lu)\n", 425 TRACE_TASK(tsk,
422 get_exec_cost(tsk), get_rt_period(tsk)); 426 "litmus admit: invalid task parameters "
427 "(e = %lu, p = %lu, d = %lu)\n",
428 get_exec_cost(tsk), get_rt_period(tsk),
429 get_rt_relative_deadline(tsk));
423 retval = -EINVAL; 430 retval = -EINVAL;
424 goto out; 431 goto out;
425 } 432 }
diff --git a/litmus/locking.c b/litmus/locking.c
index 718a5a3281d7..12a23eb715cc 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -5,6 +5,7 @@
5#include <litmus/sched_plugin.h> 5#include <litmus/sched_plugin.h>
6#include <litmus/trace.h> 6#include <litmus/trace.h>
7#include <litmus/litmus.h> 7#include <litmus/litmus.h>
8#include <litmus/wait.h>
8 9
9#ifdef CONFIG_LITMUS_DGL_SUPPORT 10#ifdef CONFIG_LITMUS_DGL_SUPPORT
10#include <linux/uaccess.h> 11#include <linux/uaccess.h>
@@ -507,6 +508,38 @@ asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size)
507 508
508#endif 509#endif
509 510
511unsigned int __add_wait_queue_prio_exclusive(
512 wait_queue_head_t* head,
513 prio_wait_queue_t *new)
514{
515 struct list_head *pos;
516 unsigned int passed = 0;
517
518 new->wq.flags |= WQ_FLAG_EXCLUSIVE;
519
520 /* find a spot where the new entry is less than the next */
521 list_for_each(pos, &head->task_list) {
522 prio_wait_queue_t* queued = list_entry(pos, prio_wait_queue_t,
523 wq.task_list);
524
525 if (unlikely(lt_before(new->priority, queued->priority) ||
526 (new->priority == queued->priority &&
527 new->tie_breaker < queued->tie_breaker))) {
528 /* pos is not less than new, thus insert here */
529 __list_add(&new->wq.task_list, pos->prev, pos);
530 goto out;
531 }
532 passed++;
533 }
534
535 /* if we get to this point either the list is empty or every entry
536 * queued element is less than new.
537 * Let's add new to the end. */
538 list_add_tail(&new->wq.task_list, &head->task_list);
539out:
540 return passed;
541}
542
510#else // CONFIG_LITMUS_LOCKING 543#else // CONFIG_LITMUS_LOCKING
511 544
512struct fdso_ops generic_lock_ops = {}; 545struct fdso_ops generic_lock_ops = {};
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
index d405854cd39c..d0b796611bea 100644
--- a/litmus/rt_domain.c
+++ b/litmus/rt_domain.c
@@ -300,9 +300,11 @@ void rt_domain_init(rt_domain_t *rt,
300 */ 300 */
301void __add_ready(rt_domain_t* rt, struct task_struct *new) 301void __add_ready(rt_domain_t* rt, struct task_struct *new)
302{ 302{
303 TRACE("rt: adding %s/%d (%llu, %llu) rel=%llu to ready queue at %llu\n", 303 TRACE("rt: adding %s/%d (%llu, %llu, %llu) rel=%llu "
304 new->comm, new->pid, get_exec_cost(new), get_rt_period(new), 304 "to ready queue at %llu\n",
305 get_release(new), litmus_clock()); 305 new->comm, new->pid,
306 get_exec_cost(new), get_rt_period(new), get_rt_relative_deadline(new),
307 get_release(new), litmus_clock());
306 308
307 BUG_ON(bheap_node_in_heap(tsk_rt(new)->heap_node)); 309 BUG_ON(bheap_node_in_heap(tsk_rt(new)->heap_node));
308 310
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index be14dbec6ed2..0460e232d6e6 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -35,6 +35,7 @@
35#include <litmus/litmus.h> 35#include <litmus/litmus.h>
36#include <litmus/jobs.h> 36#include <litmus/jobs.h>
37#include <litmus/preempt.h> 37#include <litmus/preempt.h>
38#include <litmus/budget.h>
38#include <litmus/sched_plugin.h> 39#include <litmus/sched_plugin.h>
39#include <litmus/edf_common.h> 40#include <litmus/edf_common.h>
40#include <litmus/sched_trace.h> 41#include <litmus/sched_trace.h>
@@ -134,7 +135,7 @@ typedef struct clusterdomain {
134 /* map of this cluster cpus */ 135 /* map of this cluster cpus */
135 cpumask_var_t cpu_map; 136 cpumask_var_t cpu_map;
136 /* the cpus queue themselves according to priority in here */ 137 /* the cpus queue themselves according to priority in here */
137 struct binheap_handle cpu_heap; 138 struct binheap cpu_heap;
138 /* lock for this cluster */ 139 /* lock for this cluster */
139#define cluster_lock domain.ready_lock 140#define cluster_lock domain.ready_lock
140 141
@@ -362,11 +363,11 @@ static void check_for_preemptions(cedf_domain_t *cluster)
362 &per_cpu(cedf_cpu_entries, task_cpu(task))); 363 &per_cpu(cedf_cpu_entries, task_cpu(task)));
363 if(affinity) 364 if(affinity)
364 last = affinity; 365 last = affinity;
365 else if(last->linked) 366 else if(requeue_preempted_job(last->linked))
366 requeue(last->linked); 367 requeue(last->linked);
367 } 368 }
368#else 369#else
369 if (last->linked) 370 if (requeue_preempted_job(last->linked))
370 requeue(last->linked); 371 requeue(last->linked);
371#endif 372#endif
372 link_task_to_cpu(task, last); 373 link_task_to_cpu(task, last);
@@ -861,9 +862,9 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
861 /* Any task that is preemptable and either exhausts its execution 862 /* Any task that is preemptable and either exhausts its execution
862 * budget or wants to sleep completes. We may have to reschedule after 863 * budget or wants to sleep completes. We may have to reschedule after
863 * this. Don't do a job completion if we block (can't have timers running 864 * this. Don't do a job completion if we block (can't have timers running
864 * for blocked jobs). Preemption go first for the same reason. 865 * for blocked jobs).
865 */ 866 */
866 if (!np && (out_of_time || sleep) && !blocks && !preempt) 867 if (!np && (out_of_time || sleep) && !blocks)
867 job_completion(entry->scheduled, !sleep); 868 job_completion(entry->scheduled, !sleep);
868 869
869 /* Link pending task if we became unlinked. 870 /* Link pending task if we became unlinked.
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 8c48757fa86c..11304d634661 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -22,6 +22,7 @@
22#include <litmus/sched_trace.h> 22#include <litmus/sched_trace.h>
23 23
24#include <litmus/preempt.h> 24#include <litmus/preempt.h>
25#include <litmus/budget.h>
25 26
26#include <litmus/bheap.h> 27#include <litmus/bheap.h>
27#include <litmus/binheap.h> 28#include <litmus/binheap.h>
@@ -136,7 +137,7 @@ DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries);
136cpu_entry_t* gsnedf_cpus[NR_CPUS]; 137cpu_entry_t* gsnedf_cpus[NR_CPUS];
137 138
138/* the cpus queue themselves according to priority in here */ 139/* the cpus queue themselves according to priority in here */
139static struct binheap_handle gsnedf_cpu_heap; 140static struct binheap gsnedf_cpu_heap;
140 141
141static rt_domain_t gsnedf; 142static rt_domain_t gsnedf;
142#define gsnedf_lock (gsnedf.ready_lock) 143#define gsnedf_lock (gsnedf.ready_lock)
@@ -340,11 +341,11 @@ static void check_for_preemptions(void)
340 &per_cpu(gsnedf_cpu_entries, task_cpu(task))); 341 &per_cpu(gsnedf_cpu_entries, task_cpu(task)));
341 if (affinity) 342 if (affinity)
342 last = affinity; 343 last = affinity;
343 else if (last->linked) 344 else if (requeue_preempted_job(last->linked))
344 requeue(last->linked); 345 requeue(last->linked);
345 } 346 }
346#else 347#else
347 if (last->linked) 348 if (requeue_preempted_job(last->linked))
348 requeue(last->linked); 349 requeue(last->linked);
349#endif 350#endif
350 351
@@ -786,9 +787,8 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
786 /* (0) Determine state */ 787 /* (0) Determine state */
787 exists = entry->scheduled != NULL; 788 exists = entry->scheduled != NULL;
788 blocks = exists && !is_running(entry->scheduled); 789 blocks = exists && !is_running(entry->scheduled);
789 out_of_time = exists && 790 out_of_time = exists && budget_enforced(entry->scheduled)
790 budget_enforced(entry->scheduled) && 791 && budget_exhausted(entry->scheduled);
791 budget_exhausted(entry->scheduled);
792 np = exists && is_np(entry->scheduled); 792 np = exists && is_np(entry->scheduled);
793 sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; 793 sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP;
794 preempt = entry->scheduled != entry->linked; 794 preempt = entry->scheduled != entry->linked;
@@ -837,9 +837,9 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
837 /* Any task that is preemptable and either exhausts its execution 837 /* Any task that is preemptable and either exhausts its execution
838 * budget or wants to sleep completes. We may have to reschedule after 838 * budget or wants to sleep completes. We may have to reschedule after
839 * this. Don't do a job completion if we block (can't have timers running 839 * this. Don't do a job completion if we block (can't have timers running
840 * for blocked jobs). Preemption go first for the same reason. 840 * for blocked jobs).
841 */ 841 */
842 if (!np && (out_of_time || sleep) && !blocks && !preempt) 842 if (!np && (out_of_time || sleep) && !blocks)
843 job_completion(entry->scheduled, !sleep); 843 job_completion(entry->scheduled, !sleep);
844 844
845 /* Link pending task if we became unlinked. 845 /* Link pending task if we became unlinked.
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c
index 16f1065bbdca..72c06a492ef9 100644
--- a/litmus/sched_pfair.c
+++ b/litmus/sched_pfair.c
@@ -850,6 +850,13 @@ static long pfair_admit_task(struct task_struct* t)
850 cpu_cluster(pstate[task_cpu(t)])) 850 cpu_cluster(pstate[task_cpu(t)]))
851 return -EINVAL; 851 return -EINVAL;
852 852
853 if (get_rt_period(t) != get_rt_relative_deadline(t)) {
854 printk(KERN_INFO "%s: Admission rejected. "
855 "Only implicit deadlines are currently supported.\n",
856 litmus->plugin_name);
857 return -EINVAL;
858 }
859
853 /* Pfair is a tick-based method, so the time 860 /* Pfair is a tick-based method, so the time
854 * of interest is jiffies. Calculate tick-based 861 * of interest is jiffies. Calculate tick-based
855 * times for everything. 862 * times for everything.
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c
new file mode 100644
index 000000000000..62be699629b1
--- /dev/null
+++ b/litmus/sched_pfp.c
@@ -0,0 +1,1685 @@
1/*
2 * litmus/sched_pfp.c
3 *
4 * Implementation of partitioned fixed-priority scheduling.
5 * Based on PSN-EDF.
6 */
7
8#include <linux/percpu.h>
9#include <linux/sched.h>
10#include <linux/list.h>
11#include <linux/spinlock.h>
12#include <linux/module.h>
13
14#include <litmus/litmus.h>
15#include <litmus/wait.h>
16#include <litmus/jobs.h>
17#include <litmus/preempt.h>
18#include <litmus/fp_common.h>
19#include <litmus/sched_plugin.h>
20#include <litmus/sched_trace.h>
21#include <litmus/trace.h>
22#include <litmus/budget.h>
23
24#include <linux/uaccess.h>
25
26
27typedef struct {
28 rt_domain_t domain;
29 struct fp_prio_queue ready_queue;
30 int cpu;
31 struct task_struct* scheduled; /* only RT tasks */
32/*
33 * scheduling lock slock
34 * protects the domain and serializes scheduling decisions
35 */
36#define slock domain.ready_lock
37
38} pfp_domain_t;
39
40DEFINE_PER_CPU(pfp_domain_t, pfp_domains);
41
42pfp_domain_t* pfp_doms[NR_CPUS];
43
44#define local_pfp (&__get_cpu_var(pfp_domains))
45#define remote_dom(cpu) (&per_cpu(pfp_domains, cpu).domain)
46#define remote_pfp(cpu) (&per_cpu(pfp_domains, cpu))
47#define task_dom(task) remote_dom(get_partition(task))
48#define task_pfp(task) remote_pfp(get_partition(task))
49
50/* we assume the lock is being held */
51static void preempt(pfp_domain_t *pfp)
52{
53 preempt_if_preemptable(pfp->scheduled, pfp->cpu);
54}
55
56static unsigned int priority_index(struct task_struct* t)
57{
58#ifdef CONFIG_LOCKING
59 if (unlikely(t->rt_param.inh_task))
60 /* use effective priority */
61 t = t->rt_param.inh_task;
62
63 if (is_priority_boosted(t)) {
64 /* zero is reserved for priority-boosted tasks */
65 return 0;
66 } else
67#endif
68 return get_priority(t);
69}
70
71
72static void pfp_release_jobs(rt_domain_t* rt, struct bheap* tasks)
73{
74 pfp_domain_t *pfp = container_of(rt, pfp_domain_t, domain);
75 unsigned long flags;
76 struct task_struct* t;
77 struct bheap_node* hn;
78
79 raw_spin_lock_irqsave(&pfp->slock, flags);
80
81 while (!bheap_empty(tasks)) {
82 hn = bheap_take(fp_ready_order, tasks);
83 t = bheap2task(hn);
84 TRACE_TASK(t, "released (part:%d prio:%d)\n",
85 get_partition(t), get_priority(t));
86 fp_prio_add(&pfp->ready_queue, t, priority_index(t));
87 }
88
89 /* do we need to preempt? */
90 if (fp_higher_prio(fp_prio_peek(&pfp->ready_queue), pfp->scheduled)) {
91 TRACE_CUR("preempted by new release\n");
92 preempt(pfp);
93 }
94
95 raw_spin_unlock_irqrestore(&pfp->slock, flags);
96}
97
98static void pfp_domain_init(pfp_domain_t* pfp,
99 int cpu)
100{
101 fp_domain_init(&pfp->domain, NULL, pfp_release_jobs);
102 pfp->cpu = cpu;
103 pfp->scheduled = NULL;
104 fp_prio_queue_init(&pfp->ready_queue);
105}
106
107static void requeue(struct task_struct* t, pfp_domain_t *pfp)
108{
109 if (t->state != TASK_RUNNING)
110 TRACE_TASK(t, "requeue: !TASK_RUNNING\n");
111
112 set_rt_flags(t, RT_F_RUNNING);
113 if (is_released(t, litmus_clock()))
114 fp_prio_add(&pfp->ready_queue, t, priority_index(t));
115 else
116 add_release(&pfp->domain, t); /* it has got to wait */
117}
118
119static void job_completion(struct task_struct* t, int forced)
120{
121 sched_trace_task_completion(t,forced);
122 TRACE_TASK(t, "job_completion().\n");
123
124 set_rt_flags(t, RT_F_SLEEP);
125 prepare_for_next_period(t);
126}
127
128static void pfp_tick(struct task_struct *t)
129{
130 pfp_domain_t *pfp = local_pfp;
131
132 /* Check for inconsistency. We don't need the lock for this since
133 * ->scheduled is only changed in schedule, which obviously is not
134 * executing in parallel on this CPU
135 */
136 BUG_ON(is_realtime(t) && t != pfp->scheduled);
137
138 if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) {
139 if (!is_np(t)) {
140 litmus_reschedule_local();
141 TRACE("pfp_scheduler_tick: "
142 "%d is preemptable "
143 " => FORCE_RESCHED\n", t->pid);
144 } else if (is_user_np(t)) {
145 TRACE("pfp_scheduler_tick: "
146 "%d is non-preemptable, "
147 "preemption delayed.\n", t->pid);
148 request_exit_np(t);
149 }
150 }
151}
152
153static struct task_struct* pfp_schedule(struct task_struct * prev)
154{
155 pfp_domain_t* pfp = local_pfp;
156 struct task_struct* next;
157
158 int out_of_time, sleep, preempt, np, exists, blocks, resched, migrate;
159
160 raw_spin_lock(&pfp->slock);
161
162 /* sanity checking
163 * differently from gedf, when a task exits (dead)
164 * pfp->schedule may be null and prev _is_ realtime
165 */
166 BUG_ON(pfp->scheduled && pfp->scheduled != prev);
167 BUG_ON(pfp->scheduled && !is_realtime(prev));
168
169 /* (0) Determine state */
170 exists = pfp->scheduled != NULL;
171 blocks = exists && !is_running(pfp->scheduled);
172 out_of_time = exists &&
173 budget_enforced(pfp->scheduled) &&
174 budget_exhausted(pfp->scheduled);
175 np = exists && is_np(pfp->scheduled);
176 sleep = exists && get_rt_flags(pfp->scheduled) == RT_F_SLEEP;
177 migrate = exists && get_partition(pfp->scheduled) != pfp->cpu;
178 preempt = migrate || fp_preemption_needed(&pfp->ready_queue, prev);
179
180 /* If we need to preempt do so.
181 * The following checks set resched to 1 in case of special
182 * circumstances.
183 */
184 resched = preempt;
185
186 /* If a task blocks we have no choice but to reschedule.
187 */
188 if (blocks)
189 resched = 1;
190
191 /* Request a sys_exit_np() call if we would like to preempt but cannot.
192 * Multiple calls to request_exit_np() don't hurt.
193 */
194 if (np && (out_of_time || preempt || sleep))
195 request_exit_np(pfp->scheduled);
196
197 /* Any task that is preemptable and either exhausts its execution
198 * budget or wants to sleep completes. We may have to reschedule after
199 * this.
200 */
201 if (!np && (out_of_time || sleep) && !blocks && !migrate) {
202 job_completion(pfp->scheduled, !sleep);
203 resched = 1;
204 }
205
206 /* The final scheduling decision. Do we need to switch for some reason?
207 * Switch if we are in RT mode and have no task or if we need to
208 * resched.
209 */
210 next = NULL;
211 if ((!np || blocks) && (resched || !exists)) {
212 /* When preempting a task that does not block, then
213 * re-insert it into either the ready queue or the
214 * release queue (if it completed). requeue() picks
215 * the appropriate queue.
216 */
217 if (pfp->scheduled && !blocks && !migrate)
218 requeue(pfp->scheduled, pfp);
219 next = fp_prio_take(&pfp->ready_queue);
220 } else
221 /* Only override Linux scheduler if we have a real-time task
222 * scheduled that needs to continue.
223 */
224 if (exists)
225 next = prev;
226
227 if (next) {
228 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
229 set_rt_flags(next, RT_F_RUNNING);
230 } else {
231 TRACE("becoming idle at %llu\n", litmus_clock());
232 }
233
234 pfp->scheduled = next;
235 sched_state_task_picked();
236 raw_spin_unlock(&pfp->slock);
237
238 return next;
239}
240
241#ifdef CONFIG_LITMUS_LOCKING
242
243/* prev is no longer scheduled --- see if it needs to migrate */
244static void pfp_finish_switch(struct task_struct *prev)
245{
246 pfp_domain_t *to;
247
248 if (is_realtime(prev) &&
249 is_running(prev) &&
250 get_partition(prev) != smp_processor_id()) {
251 TRACE_TASK(prev, "needs to migrate from P%d to P%d\n",
252 smp_processor_id(), get_partition(prev));
253
254 to = task_pfp(prev);
255
256 raw_spin_lock(&to->slock);
257
258 TRACE_TASK(prev, "adding to queue on P%d\n", to->cpu);
259 requeue(prev, to);
260 if (fp_preemption_needed(&to->ready_queue, to->scheduled))
261 preempt(to);
262
263 raw_spin_unlock(&to->slock);
264
265 }
266}
267
268#endif
269
270/* Prepare a task for running in RT mode
271 */
272static void pfp_task_new(struct task_struct * t, int on_rq, int running)
273{
274 pfp_domain_t* pfp = task_pfp(t);
275 unsigned long flags;
276
277 TRACE_TASK(t, "P-FP: task new, cpu = %d\n",
278 t->rt_param.task_params.cpu);
279
280 /* setup job parameters */
281 release_at(t, litmus_clock());
282
283 /* The task should be running in the queue, otherwise signal
284 * code will try to wake it up with fatal consequences.
285 */
286 raw_spin_lock_irqsave(&pfp->slock, flags);
287 if (running) {
288 /* there shouldn't be anything else running at the time */
289 BUG_ON(pfp->scheduled);
290 pfp->scheduled = t;
291 } else {
292 requeue(t, pfp);
293 /* maybe we have to reschedule */
294 preempt(pfp);
295 }
296 raw_spin_unlock_irqrestore(&pfp->slock, flags);
297}
298
299static void pfp_task_wake_up(struct task_struct *task)
300{
301 unsigned long flags;
302 pfp_domain_t* pfp = task_pfp(task);
303 lt_t now;
304
305 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
306 raw_spin_lock_irqsave(&pfp->slock, flags);
307
308#ifdef CONFIG_LITMUS_LOCKING
309 /* Should only be queued when processing a fake-wake up due to a
310 * migration-related state change. */
311 if (unlikely(is_queued(task))) {
312 TRACE_TASK(task, "WARNING: waking task still queued. Is this right?\n");
313 goto out_unlock;
314 }
315#else
316 BUG_ON(is_queued(task));
317#endif
318 now = litmus_clock();
319 if (is_tardy(task, now)
320#ifdef CONFIG_LITMUS_LOCKING
321 /* We need to take suspensions because of semaphores into
322 * account! If a job resumes after being suspended due to acquiring
323 * a semaphore, it should never be treated as a new job release.
324 */
325 && !is_priority_boosted(task)
326#endif
327 ) {
328 /* new sporadic release */
329 release_at(task, now);
330 sched_trace_task_release(task);
331 }
332
333 /* Only add to ready queue if it is not the currently-scheduled
334 * task. This could be the case if a task was woken up concurrently
335 * on a remote CPU before the executing CPU got around to actually
336 * de-scheduling the task, i.e., wake_up() raced with schedule()
337 * and won. Also, don't requeue if it is still queued, which can
338 * happen under the DPCP due wake-ups racing with migrations.
339 */
340 if (pfp->scheduled != task)
341 requeue(task, pfp);
342
343out_unlock:
344 raw_spin_unlock_irqrestore(&pfp->slock, flags);
345 TRACE_TASK(task, "wake up done\n");
346}
347
348static void pfp_task_block(struct task_struct *t)
349{
350 /* only running tasks can block, thus t is in no queue */
351 TRACE_TASK(t, "block at %llu, state=%d\n", litmus_clock(), t->state);
352
353 BUG_ON(!is_realtime(t));
354
355 /* If this task blocked normally, it shouldn't be queued. The exception is
356 * if this is a simulated block()/wakeup() pair from the pull-migration code path.
357 * This should only happen if the DPCP is being used.
358 */
359#ifdef CONFIG_LITMUS_LOCKING
360 if (unlikely(is_queued(t)))
361 TRACE_TASK(t, "WARNING: blocking task still queued. Is this right?\n");
362#else
363 BUG_ON(is_queued(t));
364#endif
365}
366
367static void pfp_task_exit(struct task_struct * t)
368{
369 unsigned long flags;
370 pfp_domain_t* pfp = task_pfp(t);
371 rt_domain_t* dom;
372
373 raw_spin_lock_irqsave(&pfp->slock, flags);
374 if (is_queued(t)) {
375 BUG(); /* This currently doesn't work. */
376 /* dequeue */
377 dom = task_dom(t);
378 remove(dom, t);
379 }
380 if (pfp->scheduled == t) {
381 pfp->scheduled = NULL;
382 preempt(pfp);
383 }
384 TRACE_TASK(t, "RIP, now reschedule\n");
385
386 raw_spin_unlock_irqrestore(&pfp->slock, flags);
387}
388
389#ifdef CONFIG_LITMUS_LOCKING
390
391#include <litmus/fdso.h>
392#include <litmus/srp.h>
393
394static void fp_dequeue(pfp_domain_t* pfp, struct task_struct* t)
395{
396 BUG_ON(pfp->scheduled == t && is_queued(t));
397 if (is_queued(t))
398 fp_prio_remove(&pfp->ready_queue, t, priority_index(t));
399}
400
401static void fp_set_prio_inh(pfp_domain_t* pfp, struct task_struct* t,
402 struct task_struct* prio_inh)
403{
404 int requeue;
405
406 if (!t || t->rt_param.inh_task == prio_inh) {
407 /* no update required */
408 if (t)
409 TRACE_TASK(t, "no prio-inh update required\n");
410 return;
411 }
412
413 requeue = is_queued(t);
414 TRACE_TASK(t, "prio-inh: is_queued:%d\n", requeue);
415
416 if (requeue)
417 /* first remove */
418 fp_dequeue(pfp, t);
419
420 t->rt_param.inh_task = prio_inh;
421
422 if (requeue)
423 /* add again to the right queue */
424 fp_prio_add(&pfp->ready_queue, t, priority_index(t));
425}
426
427static int effective_agent_priority(int prio)
428{
429 /* make sure agents have higher priority */
430 return prio - LITMUS_MAX_PRIORITY;
431}
432
433static lt_t prio_point(int eprio)
434{
435 /* make sure we have non-negative prio points */
436 return eprio + LITMUS_MAX_PRIORITY;
437}
438
439static int prio_from_point(lt_t prio_point)
440{
441 return ((int) prio_point) - LITMUS_MAX_PRIORITY;
442}
443
444static void boost_priority(struct task_struct* t, lt_t priority_point)
445{
446 unsigned long flags;
447 pfp_domain_t* pfp = task_pfp(t);
448
449 raw_spin_lock_irqsave(&pfp->slock, flags);
450
451
452 TRACE_TASK(t, "priority boosted at %llu\n", litmus_clock());
453
454 tsk_rt(t)->priority_boosted = 1;
455 /* tie-break by protocol-specific priority point */
456 tsk_rt(t)->boost_start_time = priority_point;
457
458 if (pfp->scheduled != t) {
459 /* holder may be queued: first stop queue changes */
460 raw_spin_lock(&pfp->domain.release_lock);
461 if (is_queued(t) &&
462 /* If it is queued, then we need to re-order. */
463 bheap_decrease(fp_ready_order, tsk_rt(t)->heap_node) &&
464 /* If we bubbled to the top, then we need to check for preemptions. */
465 fp_preemption_needed(&pfp->ready_queue, pfp->scheduled))
466 preempt(pfp);
467 raw_spin_unlock(&pfp->domain.release_lock);
468 } /* else: nothing to do since the job is not queued while scheduled */
469
470 raw_spin_unlock_irqrestore(&pfp->slock, flags);
471}
472
473static void unboost_priority(struct task_struct* t)
474{
475 unsigned long flags;
476 pfp_domain_t* pfp = task_pfp(t);
477 lt_t now;
478
479 raw_spin_lock_irqsave(&pfp->slock, flags);
480 now = litmus_clock();
481
482 /* assumption: this only happens when the job is scheduled */
483 BUG_ON(pfp->scheduled != t);
484
485 TRACE_TASK(t, "priority restored at %llu\n", now);
486
487 /* priority boosted jobs must be scheduled */
488 BUG_ON(pfp->scheduled != t);
489
490 tsk_rt(t)->priority_boosted = 0;
491 tsk_rt(t)->boost_start_time = 0;
492
493 /* check if this changes anything */
494 if (fp_preemption_needed(&pfp->ready_queue, pfp->scheduled))
495 preempt(pfp);
496
497 raw_spin_unlock_irqrestore(&pfp->slock, flags);
498}
499
500/* ******************** SRP support ************************ */
501
502static unsigned int pfp_get_srp_prio(struct task_struct* t)
503{
504 return get_priority(t);
505}
506
507/* ******************** FMLP support ********************** */
508
509struct fmlp_semaphore {
510 struct litmus_lock litmus_lock;
511
512 /* current resource holder */
513 struct task_struct *owner;
514
515 /* FIFO queue of waiting tasks */
516 wait_queue_head_t wait;
517};
518
519static inline struct fmlp_semaphore* fmlp_from_lock(struct litmus_lock* lock)
520{
521 return container_of(lock, struct fmlp_semaphore, litmus_lock);
522}
523int pfp_fmlp_lock(struct litmus_lock* l)
524{
525 struct task_struct* t = current;
526 struct fmlp_semaphore *sem = fmlp_from_lock(l);
527 wait_queue_t wait;
528 unsigned long flags;
529 lt_t time_of_request;
530
531 if (!is_realtime(t))
532 return -EPERM;
533
534 spin_lock_irqsave(&sem->wait.lock, flags);
535
536 /* tie-break by this point in time */
537 time_of_request = litmus_clock();
538
539 /* Priority-boost ourself *before* we suspend so that
540 * our priority is boosted when we resume. */
541 boost_priority(t, time_of_request);
542
543 if (sem->owner) {
544 /* resource is not free => must suspend and wait */
545
546 init_waitqueue_entry(&wait, t);
547
548 /* FIXME: interruptible would be nice some day */
549 set_task_state(t, TASK_UNINTERRUPTIBLE);
550
551 __add_wait_queue_tail_exclusive(&sem->wait, &wait);
552
553 TS_LOCK_SUSPEND;
554
555 /* release lock before sleeping */
556 spin_unlock_irqrestore(&sem->wait.lock, flags);
557
558 /* We depend on the FIFO order. Thus, we don't need to recheck
559 * when we wake up; we are guaranteed to have the lock since
560 * there is only one wake up per release.
561 */
562
563 schedule();
564
565 TS_LOCK_RESUME;
566
567 /* Since we hold the lock, no other task will change
568 * ->owner. We can thus check it without acquiring the spin
569 * lock. */
570 BUG_ON(sem->owner != t);
571 } else {
572 /* it's ours now */
573 sem->owner = t;
574
575 spin_unlock_irqrestore(&sem->wait.lock, flags);
576 }
577
578 return 0;
579}
580
581int pfp_fmlp_unlock(struct litmus_lock* l)
582{
583 struct task_struct *t = current, *next;
584 struct fmlp_semaphore *sem = fmlp_from_lock(l);
585 unsigned long flags;
586 int err = 0;
587
588 spin_lock_irqsave(&sem->wait.lock, flags);
589
590 if (sem->owner != t) {
591 err = -EINVAL;
592 goto out;
593 }
594
595 /* we lose the benefit of priority boosting */
596
597 unboost_priority(t);
598
599 /* check if there are jobs waiting for this resource */
600 next = __waitqueue_remove_first(&sem->wait);
601 if (next) {
602 /* next becomes the resouce holder */
603 sem->owner = next;
604
605 /* Wake up next. The waiting job is already priority-boosted. */
606 wake_up_process(next);
607 } else
608 /* resource becomes available */
609 sem->owner = NULL;
610
611out:
612 spin_unlock_irqrestore(&sem->wait.lock, flags);
613 return err;
614}
615
616int pfp_fmlp_close(struct litmus_lock* l)
617{
618 struct task_struct *t = current;
619 struct fmlp_semaphore *sem = fmlp_from_lock(l);
620 unsigned long flags;
621
622 int owner;
623
624 spin_lock_irqsave(&sem->wait.lock, flags);
625
626 owner = sem->owner == t;
627
628 spin_unlock_irqrestore(&sem->wait.lock, flags);
629
630 if (owner)
631 pfp_fmlp_unlock(l);
632
633 return 0;
634}
635
636void pfp_fmlp_free(struct litmus_lock* lock)
637{
638 kfree(fmlp_from_lock(lock));
639}
640
641static struct litmus_lock_ops pfp_fmlp_lock_ops = {
642 .close = pfp_fmlp_close,
643 .lock = pfp_fmlp_lock,
644 .unlock = pfp_fmlp_unlock,
645 .deallocate = pfp_fmlp_free,
646};
647
648static struct litmus_lock* pfp_new_fmlp(void)
649{
650 struct fmlp_semaphore* sem;
651
652 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
653 if (!sem)
654 return NULL;
655
656 sem->owner = NULL;
657 init_waitqueue_head(&sem->wait);
658 sem->litmus_lock.ops = &pfp_fmlp_lock_ops;
659
660 return &sem->litmus_lock;
661}
662
663/* ******************** MPCP support ********************** */
664
665struct mpcp_semaphore {
666 struct litmus_lock litmus_lock;
667
668 /* current resource holder */
669 struct task_struct *owner;
670
671 /* priority queue of waiting tasks */
672 wait_queue_head_t wait;
673
674 /* priority ceiling per cpu */
675 unsigned int prio_ceiling[NR_CPUS];
676
677 /* should jobs spin "virtually" for this resource? */
678 int vspin;
679};
680
681#define OMEGA_CEILING UINT_MAX
682
683/* Since jobs spin "virtually" while waiting to acquire a lock,
684 * they first must aquire a local per-cpu resource.
685 */
686static DEFINE_PER_CPU(wait_queue_head_t, mpcpvs_vspin_wait);
687static DEFINE_PER_CPU(struct task_struct*, mpcpvs_vspin);
688
689/* called with preemptions off <=> no local modifications */
690static void mpcp_vspin_enter(void)
691{
692 struct task_struct* t = current;
693
694 while (1) {
695 if (__get_cpu_var(mpcpvs_vspin) == NULL) {
696 /* good, we get to issue our request */
697 __get_cpu_var(mpcpvs_vspin) = t;
698 break;
699 } else {
700 /* some job is spinning => enqueue in request queue */
701 prio_wait_queue_t wait;
702 wait_queue_head_t* vspin = &__get_cpu_var(mpcpvs_vspin_wait);
703 unsigned long flags;
704
705 /* ordered by regular priority */
706 init_prio_waitqueue_entry(&wait, t, prio_point(get_priority(t)));
707
708 spin_lock_irqsave(&vspin->lock, flags);
709
710 set_task_state(t, TASK_UNINTERRUPTIBLE);
711
712 __add_wait_queue_prio_exclusive(vspin, &wait);
713
714 spin_unlock_irqrestore(&vspin->lock, flags);
715
716 TS_LOCK_SUSPEND;
717
718 preempt_enable_no_resched();
719
720 schedule();
721
722 preempt_disable();
723
724 TS_LOCK_RESUME;
725 /* Recheck if we got it --- some higher-priority process might
726 * have swooped in. */
727 }
728 }
729 /* ok, now it is ours */
730}
731
732/* called with preemptions off */
733static void mpcp_vspin_exit(void)
734{
735 struct task_struct* t = current, *next;
736 unsigned long flags;
737 wait_queue_head_t* vspin = &__get_cpu_var(mpcpvs_vspin_wait);
738
739 BUG_ON(__get_cpu_var(mpcpvs_vspin) != t);
740
741 /* no spinning job */
742 __get_cpu_var(mpcpvs_vspin) = NULL;
743
744 /* see if anyone is waiting for us to stop "spinning" */
745 spin_lock_irqsave(&vspin->lock, flags);
746 next = __waitqueue_remove_first(vspin);
747
748 if (next)
749 wake_up_process(next);
750
751 spin_unlock_irqrestore(&vspin->lock, flags);
752}
753
754static inline struct mpcp_semaphore* mpcp_from_lock(struct litmus_lock* lock)
755{
756 return container_of(lock, struct mpcp_semaphore, litmus_lock);
757}
758
759int pfp_mpcp_lock(struct litmus_lock* l)
760{
761 struct task_struct* t = current;
762 struct mpcp_semaphore *sem = mpcp_from_lock(l);
763 prio_wait_queue_t wait;
764 unsigned long flags;
765
766 if (!is_realtime(t))
767 return -EPERM;
768
769 preempt_disable();
770
771 if (sem->vspin)
772 mpcp_vspin_enter();
773
774 /* Priority-boost ourself *before* we suspend so that
775 * our priority is boosted when we resume. Use the priority
776 * ceiling for the local partition. */
777 boost_priority(t, sem->prio_ceiling[get_partition(t)]);
778
779 spin_lock_irqsave(&sem->wait.lock, flags);
780
781 preempt_enable_no_resched();
782
783 if (sem->owner) {
784 /* resource is not free => must suspend and wait */
785
786 /* ordered by regular priority */
787 init_prio_waitqueue_entry(&wait, t, prio_point(get_priority(t)));
788
789 /* FIXME: interruptible would be nice some day */
790 set_task_state(t, TASK_UNINTERRUPTIBLE);
791
792 __add_wait_queue_prio_exclusive(&sem->wait, &wait);
793
794 TS_LOCK_SUSPEND;
795
796 /* release lock before sleeping */
797 spin_unlock_irqrestore(&sem->wait.lock, flags);
798
799 /* We depend on the FIFO order. Thus, we don't need to recheck
800 * when we wake up; we are guaranteed to have the lock since
801 * there is only one wake up per release.
802 */
803
804 schedule();
805
806 TS_LOCK_RESUME;
807
808 /* Since we hold the lock, no other task will change
809 * ->owner. We can thus check it without acquiring the spin
810 * lock. */
811 BUG_ON(sem->owner != t);
812 } else {
813 /* it's ours now */
814 sem->owner = t;
815
816 spin_unlock_irqrestore(&sem->wait.lock, flags);
817 }
818
819 return 0;
820}
821
822int pfp_mpcp_unlock(struct litmus_lock* l)
823{
824 struct task_struct *t = current, *next;
825 struct mpcp_semaphore *sem = mpcp_from_lock(l);
826 unsigned long flags;
827 int err = 0;
828
829 spin_lock_irqsave(&sem->wait.lock, flags);
830
831 if (sem->owner != t) {
832 err = -EINVAL;
833 goto out;
834 }
835
836 /* we lose the benefit of priority boosting */
837
838 unboost_priority(t);
839
840 /* check if there are jobs waiting for this resource */
841 next = __waitqueue_remove_first(&sem->wait);
842 if (next) {
843 /* next becomes the resouce holder */
844 sem->owner = next;
845
846 /* Wake up next. The waiting job is already priority-boosted. */
847 wake_up_process(next);
848 } else
849 /* resource becomes available */
850 sem->owner = NULL;
851
852out:
853 spin_unlock_irqrestore(&sem->wait.lock, flags);
854
855 if (sem->vspin && err == 0) {
856 preempt_disable();
857 mpcp_vspin_exit();
858 preempt_enable();
859 }
860
861 return err;
862}
863
864int pfp_mpcp_open(struct litmus_lock* l, void* config)
865{
866 struct task_struct *t = current;
867 struct mpcp_semaphore *sem = mpcp_from_lock(l);
868 int cpu, local_cpu;
869 unsigned long flags;
870
871 if (!is_realtime(t))
872 /* we need to know the real-time priority */
873 return -EPERM;
874
875 local_cpu = get_partition(t);
876
877 spin_lock_irqsave(&sem->wait.lock, flags);
878
879 for (cpu = 0; cpu < NR_CPUS; cpu++)
880 if (cpu != local_cpu)
881 {
882 sem->prio_ceiling[cpu] = min(sem->prio_ceiling[cpu],
883 get_priority(t));
884 TRACE_CUR("priority ceiling for sem %p is now %d on cpu %d\n",
885 sem, sem->prio_ceiling[cpu], cpu);
886 }
887
888 spin_unlock_irqrestore(&sem->wait.lock, flags);
889
890 return 0;
891}
892
893int pfp_mpcp_close(struct litmus_lock* l)
894{
895 struct task_struct *t = current;
896 struct mpcp_semaphore *sem = mpcp_from_lock(l);
897 unsigned long flags;
898
899 int owner;
900
901 spin_lock_irqsave(&sem->wait.lock, flags);
902
903 owner = sem->owner == t;
904
905 spin_unlock_irqrestore(&sem->wait.lock, flags);
906
907 if (owner)
908 pfp_mpcp_unlock(l);
909
910 return 0;
911}
912
913void pfp_mpcp_free(struct litmus_lock* lock)
914{
915 kfree(mpcp_from_lock(lock));
916}
917
918static struct litmus_lock_ops pfp_mpcp_lock_ops = {
919 .close = pfp_mpcp_close,
920 .lock = pfp_mpcp_lock,
921 .open = pfp_mpcp_open,
922 .unlock = pfp_mpcp_unlock,
923 .deallocate = pfp_mpcp_free,
924};
925
926static struct litmus_lock* pfp_new_mpcp(int vspin)
927{
928 struct mpcp_semaphore* sem;
929 int cpu;
930
931 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
932 if (!sem)
933 return NULL;
934
935 sem->owner = NULL;
936 init_waitqueue_head(&sem->wait);
937 sem->litmus_lock.ops = &pfp_mpcp_lock_ops;
938
939 for (cpu = 0; cpu < NR_CPUS; cpu++)
940 sem->prio_ceiling[cpu] = OMEGA_CEILING;
941
942 /* mark as virtual spinning */
943 sem->vspin = vspin;
944
945 return &sem->litmus_lock;
946}
947
948
949/* ******************** PCP support ********************** */
950
951
952struct pcp_semaphore {
953 struct litmus_lock litmus_lock;
954
955 struct list_head ceiling;
956
957 /* current resource holder */
958 struct task_struct *owner;
959
960 /* priority ceiling --- can be negative due to DPCP support */
961 int prio_ceiling;
962
963 /* on which processor is this PCP semaphore allocated? */
964 int on_cpu;
965};
966
967static inline struct pcp_semaphore* pcp_from_lock(struct litmus_lock* lock)
968{
969 return container_of(lock, struct pcp_semaphore, litmus_lock);
970}
971
972
973struct pcp_state {
974 struct list_head system_ceiling;
975
976 /* highest-priority waiting task */
977 struct task_struct* hp_waiter;
978
979 /* list of jobs waiting to get past the system ceiling */
980 wait_queue_head_t ceiling_blocked;
981};
982
983static void pcp_init_state(struct pcp_state* s)
984{
985 INIT_LIST_HEAD(&s->system_ceiling);
986 s->hp_waiter = NULL;
987 init_waitqueue_head(&s->ceiling_blocked);
988}
989
990static DEFINE_PER_CPU(struct pcp_state, pcp_state);
991
992/* assumes preemptions are off */
993static struct pcp_semaphore* pcp_get_ceiling(void)
994{
995 struct list_head* top = __get_cpu_var(pcp_state).system_ceiling.next;
996
997 if (top)
998 return list_entry(top, struct pcp_semaphore, ceiling);
999 else
1000 return NULL;
1001}
1002
1003/* assumes preempt off */
1004static void pcp_add_ceiling(struct pcp_semaphore* sem)
1005{
1006 struct list_head *pos;
1007 struct list_head *in_use = &__get_cpu_var(pcp_state).system_ceiling;
1008 struct pcp_semaphore* held;
1009
1010 BUG_ON(sem->on_cpu != smp_processor_id());
1011 BUG_ON(in_list(&sem->ceiling));
1012
1013 list_for_each(pos, in_use) {
1014 held = list_entry(pos, struct pcp_semaphore, ceiling);
1015 if (held->prio_ceiling >= sem->prio_ceiling) {
1016 __list_add(&sem->ceiling, pos->prev, pos);
1017 return;
1018 }
1019 }
1020
1021 /* we hit the end of the list */
1022
1023 list_add_tail(&sem->ceiling, in_use);
1024}
1025
1026/* assumes preempt off */
1027static int pcp_exceeds_ceiling(struct pcp_semaphore* ceiling,
1028 struct task_struct* task,
1029 int effective_prio)
1030{
1031 return ceiling == NULL ||
1032 ceiling->prio_ceiling > effective_prio ||
1033 ceiling->owner == task;
1034}
1035
1036/* assumes preempt off */
1037static void pcp_priority_inheritance(void)
1038{
1039 unsigned long flags;
1040 pfp_domain_t* pfp = local_pfp;
1041
1042 struct pcp_semaphore* ceiling = pcp_get_ceiling();
1043 struct task_struct *blocker, *blocked;
1044
1045 blocker = ceiling ? ceiling->owner : NULL;
1046 blocked = __get_cpu_var(pcp_state).hp_waiter;
1047
1048 raw_spin_lock_irqsave(&pfp->slock, flags);
1049
1050 /* Current is no longer inheriting anything by default. This should be
1051 * the currently scheduled job, and hence not currently queued. */
1052 BUG_ON(current != pfp->scheduled);
1053
1054 fp_set_prio_inh(pfp, current, NULL);
1055 fp_set_prio_inh(pfp, blocked, NULL);
1056 fp_set_prio_inh(pfp, blocker, NULL);
1057
1058
1059 /* Let blocking job inherit priority of blocked job, if required. */
1060 if (blocker && blocked &&
1061 fp_higher_prio(blocked, blocker)) {
1062 TRACE_TASK(blocker, "PCP inherits from %s/%d (prio %u -> %u) \n",
1063 blocked->comm, blocked->pid,
1064 get_priority(blocker), get_priority(blocked));
1065 fp_set_prio_inh(pfp, blocker, blocked);
1066 }
1067
1068 /* check if anything changed */
1069 if (fp_higher_prio(fp_prio_peek(&pfp->ready_queue), pfp->scheduled))
1070 preempt(pfp);
1071
1072 raw_spin_unlock_irqrestore(&pfp->slock, flags);
1073}
1074
1075/* called with preemptions off */
1076static void pcp_raise_ceiling(struct pcp_semaphore* sem,
1077 int effective_prio)
1078{
1079 struct task_struct* t = current;
1080 struct pcp_semaphore* ceiling;
1081 prio_wait_queue_t wait;
1082 unsigned int waiting_higher_prio;
1083
1084 do {
1085 ceiling = pcp_get_ceiling();
1086 if (pcp_exceeds_ceiling(ceiling, t, effective_prio))
1087 break;
1088
1089 TRACE_CUR("PCP ceiling-blocked, wanted sem %p, but %s/%d has the ceiling \n",
1090 sem, ceiling->owner->comm, ceiling->owner->pid);
1091
1092 /* we need to wait until the ceiling is lowered */
1093
1094 /* enqueue in priority order */
1095 init_prio_waitqueue_entry(&wait, t, prio_point(effective_prio));
1096 set_task_state(t, TASK_UNINTERRUPTIBLE);
1097 waiting_higher_prio = add_wait_queue_prio_exclusive(
1098 &__get_cpu_var(pcp_state).ceiling_blocked, &wait);
1099
1100 if (waiting_higher_prio == 0) {
1101 TRACE_CUR("PCP new highest-prio waiter => prio inheritance\n");
1102
1103 /* we are the new highest-priority waiting job
1104 * => update inheritance */
1105 __get_cpu_var(pcp_state).hp_waiter = t;
1106 pcp_priority_inheritance();
1107 }
1108
1109 TS_LOCK_SUSPEND;
1110
1111 preempt_enable_no_resched();
1112 schedule();
1113 preempt_disable();
1114
1115 /* pcp_resume_unblocked() removed us from wait queue */
1116
1117 TS_LOCK_RESUME;
1118 } while(1);
1119
1120 TRACE_CUR("PCP got the ceiling and sem %p\n", sem);
1121
1122 /* We are good to go. The semaphore should be available. */
1123 BUG_ON(sem->owner != NULL);
1124
1125 sem->owner = t;
1126
1127 pcp_add_ceiling(sem);
1128}
1129
1130static void pcp_resume_unblocked(void)
1131{
1132 wait_queue_head_t *blocked = &__get_cpu_var(pcp_state).ceiling_blocked;
1133 unsigned long flags;
1134 prio_wait_queue_t* q;
1135 struct task_struct* t = NULL;
1136
1137 struct pcp_semaphore* ceiling = pcp_get_ceiling();
1138
1139 spin_lock_irqsave(&blocked->lock, flags);
1140
1141 while (waitqueue_active(blocked)) {
1142 /* check first == highest-priority waiting job */
1143 q = list_entry(blocked->task_list.next,
1144 prio_wait_queue_t, wq.task_list);
1145 t = (struct task_struct*) q->wq.private;
1146
1147 /* can it proceed now? => let it go */
1148 if (pcp_exceeds_ceiling(ceiling, t,
1149 prio_from_point(q->priority))) {
1150 __remove_wait_queue(blocked, &q->wq);
1151 wake_up_process(t);
1152 } else {
1153 /* We are done. Update highest-priority waiter. */
1154 __get_cpu_var(pcp_state).hp_waiter = t;
1155 goto out;
1156 }
1157 }
1158 /* If we get here, then there are no more waiting
1159 * jobs. */
1160 __get_cpu_var(pcp_state).hp_waiter = NULL;
1161out:
1162 spin_unlock_irqrestore(&blocked->lock, flags);
1163}
1164
1165/* assumes preempt off */
1166static void pcp_lower_ceiling(struct pcp_semaphore* sem)
1167{
1168 BUG_ON(!in_list(&sem->ceiling));
1169 BUG_ON(sem->owner != current);
1170 BUG_ON(sem->on_cpu != smp_processor_id());
1171
1172 /* remove from ceiling list */
1173 list_del(&sem->ceiling);
1174
1175 /* release */
1176 sem->owner = NULL;
1177
1178 TRACE_CUR("PCP released sem %p\n", sem);
1179
1180 /* Wake up all ceiling-blocked jobs that now pass the ceiling. */
1181 pcp_resume_unblocked();
1182
1183 pcp_priority_inheritance();
1184}
1185
1186static void pcp_update_prio_ceiling(struct pcp_semaphore* sem,
1187 int effective_prio)
1188{
1189 /* This needs to be synchronized on something.
1190 * Might as well use waitqueue lock for the processor.
1191 * We assume this happens only before the task set starts execution,
1192 * (i.e., during initialization), but it may happen on multiple processors
1193 * at the same time.
1194 */
1195 unsigned long flags;
1196
1197 struct pcp_state* s = &per_cpu(pcp_state, sem->on_cpu);
1198
1199 spin_lock_irqsave(&s->ceiling_blocked.lock, flags);
1200
1201 sem->prio_ceiling = min(sem->prio_ceiling, effective_prio);
1202
1203 spin_unlock_irqrestore(&s->ceiling_blocked.lock, flags);
1204}
1205
1206static void pcp_init_semaphore(struct pcp_semaphore* sem, int cpu)
1207{
1208 sem->owner = NULL;
1209 INIT_LIST_HEAD(&sem->ceiling);
1210 sem->prio_ceiling = INT_MAX;
1211 sem->on_cpu = cpu;
1212}
1213
1214int pfp_pcp_lock(struct litmus_lock* l)
1215{
1216 struct task_struct* t = current;
1217 struct pcp_semaphore *sem = pcp_from_lock(l);
1218
1219 int eprio = effective_agent_priority(get_priority(t));
1220 int from = get_partition(t);
1221 int to = sem->on_cpu;
1222
1223 if (!is_realtime(t) || from != to)
1224 return -EPERM;
1225
1226 preempt_disable();
1227
1228 pcp_raise_ceiling(sem, eprio);
1229
1230 preempt_enable();
1231
1232 return 0;
1233}
1234
1235int pfp_pcp_unlock(struct litmus_lock* l)
1236{
1237 struct task_struct *t = current;
1238 struct pcp_semaphore *sem = pcp_from_lock(l);
1239
1240 int err = 0;
1241
1242 preempt_disable();
1243
1244 if (sem->on_cpu != smp_processor_id() || sem->owner != t) {
1245 err = -EINVAL;
1246 goto out;
1247 }
1248
1249 /* give it back */
1250 pcp_lower_ceiling(sem);
1251
1252out:
1253 preempt_enable();
1254
1255 return err;
1256}
1257
1258int pfp_pcp_open(struct litmus_lock* l, void* __user config)
1259{
1260 struct task_struct *t = current;
1261 struct pcp_semaphore *sem = pcp_from_lock(l);
1262
1263 int cpu, eprio;
1264
1265 if (!is_realtime(t))
1266 /* we need to know the real-time priority */
1267 return -EPERM;
1268
1269 if (get_user(cpu, (int*) config))
1270 return -EFAULT;
1271
1272 /* make sure the resource location matches */
1273 if (cpu != sem->on_cpu)
1274 return -EINVAL;
1275
1276 eprio = effective_agent_priority(get_priority(t));
1277
1278 pcp_update_prio_ceiling(sem, eprio);
1279
1280 return 0;
1281}
1282
1283int pfp_pcp_close(struct litmus_lock* l)
1284{
1285 struct task_struct *t = current;
1286 struct pcp_semaphore *sem = pcp_from_lock(l);
1287
1288 int owner = 0;
1289
1290 preempt_disable();
1291
1292 if (sem->on_cpu == smp_processor_id())
1293 owner = sem->owner == t;
1294
1295 preempt_enable();
1296
1297 if (owner)
1298 pfp_pcp_unlock(l);
1299
1300 return 0;
1301}
1302
1303void pfp_pcp_free(struct litmus_lock* lock)
1304{
1305 kfree(pcp_from_lock(lock));
1306}
1307
1308
1309static struct litmus_lock_ops pfp_pcp_lock_ops = {
1310 .close = pfp_pcp_close,
1311 .lock = pfp_pcp_lock,
1312 .open = pfp_pcp_open,
1313 .unlock = pfp_pcp_unlock,
1314 .deallocate = pfp_pcp_free,
1315};
1316
1317
1318static struct litmus_lock* pfp_new_pcp(int on_cpu)
1319{
1320 struct pcp_semaphore* sem;
1321
1322 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
1323 if (!sem)
1324 return NULL;
1325
1326 sem->litmus_lock.ops = &pfp_pcp_lock_ops;
1327 pcp_init_semaphore(sem, on_cpu);
1328
1329 return &sem->litmus_lock;
1330}
1331
1332/* ******************** DPCP support ********************** */
1333
1334struct dpcp_semaphore {
1335 struct litmus_lock litmus_lock;
1336 struct pcp_semaphore pcp;
1337 int owner_cpu;
1338};
1339
1340static inline struct dpcp_semaphore* dpcp_from_lock(struct litmus_lock* lock)
1341{
1342 return container_of(lock, struct dpcp_semaphore, litmus_lock);
1343}
1344
1345/* called with preemptions disabled */
1346static void pfp_migrate_to(int target_cpu)
1347{
1348 struct task_struct* t = current;
1349 pfp_domain_t *from;
1350
1351 if (get_partition(t) == target_cpu)
1352 return;
1353
1354 /* make sure target_cpu makes sense */
1355 BUG_ON(!cpu_online(target_cpu));
1356
1357 local_irq_disable();
1358
1359 /* scheduled task should not be in any ready or release queue */
1360 BUG_ON(is_queued(t));
1361
1362 /* lock both pfp domains in order of address */
1363 from = task_pfp(t);
1364
1365 raw_spin_lock(&from->slock);
1366
1367 /* switch partitions */
1368 tsk_rt(t)->task_params.cpu = target_cpu;
1369
1370 raw_spin_unlock(&from->slock);
1371
1372 /* Don't trace scheduler costs as part of
1373 * locking overhead. Scheduling costs are accounted for
1374 * explicitly. */
1375 TS_LOCK_SUSPEND;
1376
1377 local_irq_enable();
1378 preempt_enable_no_resched();
1379
1380 /* deschedule to be migrated */
1381 schedule();
1382
1383 /* we are now on the target processor */
1384 preempt_disable();
1385
1386 /* start recording costs again */
1387 TS_LOCK_RESUME;
1388
1389 BUG_ON(smp_processor_id() != target_cpu);
1390}
1391
1392int pfp_dpcp_lock(struct litmus_lock* l)
1393{
1394 struct task_struct* t = current;
1395 struct dpcp_semaphore *sem = dpcp_from_lock(l);
1396 int eprio = effective_agent_priority(get_priority(t));
1397 int from = get_partition(t);
1398 int to = sem->pcp.on_cpu;
1399
1400 if (!is_realtime(t))
1401 return -EPERM;
1402
1403 preempt_disable();
1404
1405 /* Priority-boost ourself *before* we suspend so that
1406 * our priority is boosted when we resume. */
1407
1408 boost_priority(t, get_priority(t));
1409
1410 pfp_migrate_to(to);
1411
1412 pcp_raise_ceiling(&sem->pcp, eprio);
1413
1414 /* yep, we got it => execute request */
1415 sem->owner_cpu = from;
1416
1417 preempt_enable();
1418
1419 return 0;
1420}
1421
1422int pfp_dpcp_unlock(struct litmus_lock* l)
1423{
1424 struct task_struct *t = current;
1425 struct dpcp_semaphore *sem = dpcp_from_lock(l);
1426 int err = 0;
1427 int home;
1428
1429 preempt_disable();
1430
1431 if (sem->pcp.on_cpu != smp_processor_id() || sem->pcp.owner != t) {
1432 err = -EINVAL;
1433 goto out;
1434 }
1435
1436 home = sem->owner_cpu;
1437
1438 /* give it back */
1439 pcp_lower_ceiling(&sem->pcp);
1440
1441 /* we lose the benefit of priority boosting */
1442 unboost_priority(t);
1443
1444 pfp_migrate_to(home);
1445
1446out:
1447 preempt_enable();
1448
1449 return err;
1450}
1451
1452int pfp_dpcp_open(struct litmus_lock* l, void* __user config)
1453{
1454 struct task_struct *t = current;
1455 struct dpcp_semaphore *sem = dpcp_from_lock(l);
1456 int cpu, eprio;
1457
1458 if (!is_realtime(t))
1459 /* we need to know the real-time priority */
1460 return -EPERM;
1461
1462 if (get_user(cpu, (int*) config))
1463 return -EFAULT;
1464
1465 /* make sure the resource location matches */
1466 if (cpu != sem->pcp.on_cpu)
1467 return -EINVAL;
1468
1469 eprio = effective_agent_priority(get_priority(t));
1470
1471 pcp_update_prio_ceiling(&sem->pcp, eprio);
1472
1473 return 0;
1474}
1475
1476int pfp_dpcp_close(struct litmus_lock* l)
1477{
1478 struct task_struct *t = current;
1479 struct dpcp_semaphore *sem = dpcp_from_lock(l);
1480 int owner = 0;
1481
1482 preempt_disable();
1483
1484 if (sem->pcp.on_cpu == smp_processor_id())
1485 owner = sem->pcp.owner == t;
1486
1487 preempt_enable();
1488
1489 if (owner)
1490 pfp_dpcp_unlock(l);
1491
1492 return 0;
1493}
1494
1495void pfp_dpcp_free(struct litmus_lock* lock)
1496{
1497 kfree(dpcp_from_lock(lock));
1498}
1499
1500static struct litmus_lock_ops pfp_dpcp_lock_ops = {
1501 .close = pfp_dpcp_close,
1502 .lock = pfp_dpcp_lock,
1503 .open = pfp_dpcp_open,
1504 .unlock = pfp_dpcp_unlock,
1505 .deallocate = pfp_dpcp_free,
1506};
1507
1508static struct litmus_lock* pfp_new_dpcp(int on_cpu)
1509{
1510 struct dpcp_semaphore* sem;
1511
1512 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
1513 if (!sem)
1514 return NULL;
1515
1516 sem->litmus_lock.ops = &pfp_dpcp_lock_ops;
1517 sem->owner_cpu = NO_CPU;
1518 pcp_init_semaphore(&sem->pcp, on_cpu);
1519
1520 return &sem->litmus_lock;
1521}
1522
1523
1524/* **** lock constructor **** */
1525
1526
1527static long pfp_allocate_lock(struct litmus_lock **lock, int type,
1528 void* __user config)
1529{
1530 int err = -ENXIO, cpu;
1531 struct srp_semaphore* srp;
1532
1533 /* P-FP currently supports the SRP for local resources and the FMLP
1534 * for global resources. */
1535 switch (type) {
1536 case FMLP_SEM:
1537 /* FIFO Mutex Locking Protocol */
1538 *lock = pfp_new_fmlp();
1539 if (*lock)
1540 err = 0;
1541 else
1542 err = -ENOMEM;
1543 break;
1544
1545 case MPCP_SEM:
1546 /* Multiprocesor Priority Ceiling Protocol */
1547 *lock = pfp_new_mpcp(0);
1548 if (*lock)
1549 err = 0;
1550 else
1551 err = -ENOMEM;
1552 break;
1553
1554 case MPCP_VS_SEM:
1555 /* Multiprocesor Priority Ceiling Protocol with virtual spinning */
1556 *lock = pfp_new_mpcp(1);
1557 if (*lock)
1558 err = 0;
1559 else
1560 err = -ENOMEM;
1561 break;
1562
1563 case DPCP_SEM:
1564 /* Distributed Priority Ceiling Protocol */
1565 if (get_user(cpu, (int*) config))
1566 return -EFAULT;
1567
1568 if (!cpu_online(cpu))
1569 return -EINVAL;
1570
1571 *lock = pfp_new_dpcp(cpu);
1572 if (*lock)
1573 err = 0;
1574 else
1575 err = -ENOMEM;
1576 break;
1577
1578 case SRP_SEM:
1579 /* Baker's Stack Resource Policy */
1580 srp = allocate_srp_semaphore();
1581 if (srp) {
1582 *lock = &srp->litmus_lock;
1583 err = 0;
1584 } else
1585 err = -ENOMEM;
1586 break;
1587
1588 case PCP_SEM:
1589 /* Priority Ceiling Protocol */
1590 if (get_user(cpu, (int*) config))
1591 return -EFAULT;
1592
1593 if (!cpu_online(cpu))
1594 return -EINVAL;
1595
1596 *lock = pfp_new_pcp(cpu);
1597 if (*lock)
1598 err = 0;
1599 else
1600 err = -ENOMEM;
1601 break;
1602 };
1603
1604 return err;
1605}
1606
1607#endif
1608
1609static long pfp_admit_task(struct task_struct* tsk)
1610{
1611 if (task_cpu(tsk) == tsk->rt_param.task_params.cpu &&
1612#ifdef CONFIG_RELEASE_MASTER
1613 /* don't allow tasks on release master CPU */
1614 task_cpu(tsk) != remote_dom(task_cpu(tsk))->release_master &&
1615#endif
1616 litmus_is_valid_fixed_prio(get_priority(tsk)))
1617 return 0;
1618 else
1619 return -EINVAL;
1620}
1621
1622static long pfp_activate_plugin(void)
1623{
1624#if defined(CONFIG_RELEASE_MASTER) || defined(CONFIG_LITMUS_LOCKING)
1625 int cpu;
1626#endif
1627
1628#ifdef CONFIG_RELEASE_MASTER
1629 for_each_online_cpu(cpu) {
1630 remote_dom(cpu)->release_master = atomic_read(&release_master_cpu);
1631 }
1632#endif
1633
1634#ifdef CONFIG_LITMUS_LOCKING
1635 get_srp_prio = pfp_get_srp_prio;
1636
1637 for_each_online_cpu(cpu) {
1638 init_waitqueue_head(&per_cpu(mpcpvs_vspin_wait, cpu));
1639 per_cpu(mpcpvs_vspin, cpu) = NULL;
1640
1641 pcp_init_state(&per_cpu(pcp_state, cpu));
1642 pfp_doms[cpu] = remote_pfp(cpu);
1643 }
1644
1645#endif
1646
1647 return 0;
1648}
1649
1650
1651/* Plugin object */
1652static struct sched_plugin pfp_plugin __cacheline_aligned_in_smp = {
1653 .plugin_name = "P-FP",
1654 .tick = pfp_tick,
1655 .task_new = pfp_task_new,
1656 .complete_job = complete_job,
1657 .task_exit = pfp_task_exit,
1658 .schedule = pfp_schedule,
1659 .task_wake_up = pfp_task_wake_up,
1660 .task_block = pfp_task_block,
1661 .admit_task = pfp_admit_task,
1662 .activate_plugin = pfp_activate_plugin,
1663#ifdef CONFIG_LITMUS_LOCKING
1664 .allocate_lock = pfp_allocate_lock,
1665 .finish_switch = pfp_finish_switch,
1666#endif
1667};
1668
1669
1670static int __init init_pfp(void)
1671{
1672 int i;
1673
1674 /* We do not really want to support cpu hotplug, do we? ;)
1675 * However, if we are so crazy to do so,
1676 * we cannot use num_online_cpu()
1677 */
1678 for (i = 0; i < num_online_cpus(); i++) {
1679 pfp_domain_init(remote_pfp(i), i);
1680 }
1681 return register_sched_plugin(&pfp_plugin);
1682}
1683
1684module_init(init_pfp);
1685
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 8e4a22dd8d6a..b0c8126bd44a 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -17,6 +17,7 @@
17#include <litmus/litmus.h> 17#include <litmus/litmus.h>
18#include <litmus/jobs.h> 18#include <litmus/jobs.h>
19#include <litmus/preempt.h> 19#include <litmus/preempt.h>
20#include <litmus/budget.h>
20#include <litmus/sched_plugin.h> 21#include <litmus/sched_plugin.h>
21#include <litmus/edf_common.h> 22#include <litmus/edf_common.h>
22#include <litmus/sched_trace.h> 23#include <litmus/sched_trace.h>