diff options
35 files changed, 3336 insertions, 263 deletions
diff --git a/arch/x86/include/asm/feather_trace_32.h b/arch/x86/include/asm/feather_trace_32.h index 70202f90f169..75e81a9f9382 100644 --- a/arch/x86/include/asm/feather_trace_32.h +++ b/arch/x86/include/asm/feather_trace_32.h | |||
@@ -1,12 +1,45 @@ | |||
1 | /* Copyright (c) 2007-2012 Björn Brandenburg, <bbb@mpi-sws.org> | ||
2 | * | ||
3 | * Permission is hereby granted, free of charge, to any person obtaining | ||
4 | * a copy of this software and associated documentation files (the | ||
5 | * "Software"), to deal in the Software without restriction, including | ||
6 | * without limitation the rights to use, copy, modify, merge, publish, | ||
7 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
8 | * permit persons to whom the Software is furnished to do so, subject to | ||
9 | * the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be | ||
12 | * included in all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
15 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
16 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
17 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
18 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
19 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
20 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
21 | * SOFTWARE. | ||
22 | */ | ||
23 | |||
1 | /* Do not directly include this file. Include feather_trace.h instead */ | 24 | /* Do not directly include this file. Include feather_trace.h instead */ |
2 | 25 | ||
3 | #define feather_callback __attribute__((regparm(0))) | 26 | #define feather_callback __attribute__((regparm(3))) __attribute__((used)) |
4 | 27 | ||
5 | /* | 28 | /* |
6 | * make the compiler reload any register that is not saved in | 29 | * Make the compiler reload any register that is not saved in a cdecl function |
7 | * a cdecl function call | 30 | * call (minus the registers that we explicitly clobber as output registers). |
8 | */ | 31 | */ |
9 | #define CLOBBER_LIST "memory", "cc", "eax", "ecx", "edx" | 32 | #define __FT_CLOBBER_LIST0 "memory", "cc", "eax", "edx", "ecx" |
33 | #define __FT_CLOBBER_LIST1 "memory", "cc", "eax", "ecx" | ||
34 | #define __FT_CLOBBER_LIST2 "memory", "cc", "eax" | ||
35 | #define __FT_CLOBBER_LIST3 "memory", "cc", "eax" | ||
36 | |||
37 | #define __FT_TMP1(x) "=d" (x) | ||
38 | #define __FT_ARG1(x) "0" ((long) (x)) | ||
39 | #define __FT_TMP2(x) "=c" (x) | ||
40 | #define __FT_ARG2(x) "1" ((long) (x)) | ||
41 | |||
42 | #define __FT_ARG3(x) "r" ((long) (x)) | ||
10 | 43 | ||
11 | #define ft_event(id, callback) \ | 44 | #define ft_event(id, callback) \ |
12 | __asm__ __volatile__( \ | 45 | __asm__ __volatile__( \ |
@@ -16,64 +49,67 @@ | |||
16 | ".long " #id ", 0, 1b, 2f \n\t" \ | 49 | ".long " #id ", 0, 1b, 2f \n\t" \ |
17 | ".previous \n\t" \ | 50 | ".previous \n\t" \ |
18 | "2: \n\t" \ | 51 | "2: \n\t" \ |
19 | : : : CLOBBER_LIST) | 52 | : : : __FT_CLOBBER_LIST0) |
20 | 53 | ||
21 | #define ft_event0(id, callback) \ | 54 | #define ft_event0(id, callback) \ |
22 | __asm__ __volatile__( \ | 55 | __asm__ __volatile__( \ |
23 | "1: jmp 2f \n\t" \ | 56 | "1: jmp 2f \n\t" \ |
24 | " subl $4, %%esp \n\t" \ | 57 | " movl $" #id ", %%eax \n\t" \ |
25 | " movl $" #id ", (%%esp) \n\t" \ | ||
26 | " call " #callback " \n\t" \ | 58 | " call " #callback " \n\t" \ |
27 | " addl $4, %%esp \n\t" \ | ||
28 | ".section __event_table, \"aw\" \n\t" \ | 59 | ".section __event_table, \"aw\" \n\t" \ |
29 | ".long " #id ", 0, 1b, 2f \n\t" \ | 60 | ".long " #id ", 0, 1b, 2f \n\t" \ |
30 | ".previous \n\t" \ | 61 | ".previous \n\t" \ |
31 | "2: \n\t" \ | 62 | "2: \n\t" \ |
32 | : : : CLOBBER_LIST) | 63 | : : : __FT_CLOBBER_LIST0) |
33 | 64 | ||
34 | #define ft_event1(id, callback, param) \ | 65 | #define ft_event1(id, callback, param) \ |
66 | do { \ | ||
67 | long __ft_tmp1; \ | ||
35 | __asm__ __volatile__( \ | 68 | __asm__ __volatile__( \ |
36 | "1: jmp 2f \n\t" \ | 69 | "1: jmp 2f \n\t" \ |
37 | " subl $8, %%esp \n\t" \ | 70 | " movl $" #id ", %%eax \n\t" \ |
38 | " movl %0, 4(%%esp) \n\t" \ | ||
39 | " movl $" #id ", (%%esp) \n\t" \ | ||
40 | " call " #callback " \n\t" \ | 71 | " call " #callback " \n\t" \ |
41 | " addl $8, %%esp \n\t" \ | ||
42 | ".section __event_table, \"aw\" \n\t" \ | 72 | ".section __event_table, \"aw\" \n\t" \ |
43 | ".long " #id ", 0, 1b, 2f \n\t" \ | 73 | ".long " #id ", 0, 1b, 2f \n\t" \ |
44 | ".previous \n\t" \ | 74 | ".previous \n\t" \ |
45 | "2: \n\t" \ | 75 | "2: \n\t" \ |
46 | : : "r" (param) : CLOBBER_LIST) | 76 | : __FT_TMP1(__ft_tmp1) \ |
77 | : __FT_ARG1(param) \ | ||
78 | : __FT_CLOBBER_LIST1); \ | ||
79 | } while (0); | ||
47 | 80 | ||
48 | #define ft_event2(id, callback, param, param2) \ | 81 | #define ft_event2(id, callback, param, param2) \ |
82 | do { \ | ||
83 | long __ft_tmp1, __ft_tmp2; \ | ||
49 | __asm__ __volatile__( \ | 84 | __asm__ __volatile__( \ |
50 | "1: jmp 2f \n\t" \ | 85 | "1: jmp 2f \n\t" \ |
51 | " subl $12, %%esp \n\t" \ | 86 | " movl $" #id ", %%eax \n\t" \ |
52 | " movl %1, 8(%%esp) \n\t" \ | ||
53 | " movl %0, 4(%%esp) \n\t" \ | ||
54 | " movl $" #id ", (%%esp) \n\t" \ | ||
55 | " call " #callback " \n\t" \ | 87 | " call " #callback " \n\t" \ |
56 | " addl $12, %%esp \n\t" \ | ||
57 | ".section __event_table, \"aw\" \n\t" \ | 88 | ".section __event_table, \"aw\" \n\t" \ |
58 | ".long " #id ", 0, 1b, 2f \n\t" \ | 89 | ".long " #id ", 0, 1b, 2f \n\t" \ |
59 | ".previous \n\t" \ | 90 | ".previous \n\t" \ |
60 | "2: \n\t" \ | 91 | "2: \n\t" \ |
61 | : : "r" (param), "r" (param2) : CLOBBER_LIST) | 92 | : __FT_TMP1(__ft_tmp1), __FT_TMP2(__ft_tmp2) \ |
93 | : __FT_ARG1(param), __FT_ARG2(param2) \ | ||
94 | : __FT_CLOBBER_LIST2); \ | ||
95 | } while (0); | ||
62 | 96 | ||
63 | 97 | ||
64 | #define ft_event3(id, callback, p, p2, p3) \ | 98 | #define ft_event3(id, callback, param, param2, param3) \ |
99 | do { \ | ||
100 | long __ft_tmp1, __ft_tmp2; \ | ||
65 | __asm__ __volatile__( \ | 101 | __asm__ __volatile__( \ |
66 | "1: jmp 2f \n\t" \ | 102 | "1: jmp 2f \n\t" \ |
67 | " subl $16, %%esp \n\t" \ | 103 | " subl $4, %%esp \n\t" \ |
68 | " movl %2, 12(%%esp) \n\t" \ | 104 | " movl $" #id ", %%eax \n\t" \ |
69 | " movl %1, 8(%%esp) \n\t" \ | 105 | " movl %2, (%%esp) \n\t" \ |
70 | " movl %0, 4(%%esp) \n\t" \ | ||
71 | " movl $" #id ", (%%esp) \n\t" \ | ||
72 | " call " #callback " \n\t" \ | 106 | " call " #callback " \n\t" \ |
73 | " addl $16, %%esp \n\t" \ | 107 | " addl $4, %%esp \n\t" \ |
74 | ".section __event_table, \"aw\" \n\t" \ | 108 | ".section __event_table, \"aw\" \n\t" \ |
75 | ".long " #id ", 0, 1b, 2f \n\t" \ | 109 | ".long " #id ", 0, 1b, 2f \n\t" \ |
76 | ".previous \n\t" \ | 110 | ".previous \n\t" \ |
77 | "2: \n\t" \ | 111 | "2: \n\t" \ |
78 | : : "r" (p), "r" (p2), "r" (p3) : CLOBBER_LIST) | 112 | : __FT_TMP1(__ft_tmp1), __FT_TMP2(__ft_tmp2) \ |
79 | 113 | : __FT_ARG1(param), __FT_ARG2(param2), __FT_ARG3(param3) \ | |
114 | : __FT_CLOBBER_LIST3); \ | ||
115 | } while (0); | ||
diff --git a/arch/x86/include/asm/feather_trace_64.h b/arch/x86/include/asm/feather_trace_64.h index 54ac2aeb3a28..5ce49e2eebba 100644 --- a/arch/x86/include/asm/feather_trace_64.h +++ b/arch/x86/include/asm/feather_trace_64.h | |||
@@ -1,67 +1,124 @@ | |||
1 | /* Copyright (c) 2010 Andrea Bastoni, <bastoni@cs.unc.edu> | ||
2 | * Copyright (c) 2012 Björn Brandenburg, <bbb@mpi-sws.org> | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining | ||
5 | * a copy of this software and associated documentation files (the | ||
6 | * "Software"), to deal in the Software without restriction, including | ||
7 | * without limitation the rights to use, copy, modify, merge, publish, | ||
8 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
9 | * permit persons to whom the Software is furnished to do so, subject to | ||
10 | * the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice shall be | ||
13 | * included in all copies or substantial portions of the Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
16 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
17 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
18 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
19 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
20 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
21 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
22 | * SOFTWARE. | ||
23 | */ | ||
24 | |||
1 | /* Do not directly include this file. Include feather_trace.h instead */ | 25 | /* Do not directly include this file. Include feather_trace.h instead */ |
2 | 26 | ||
3 | /* regparm is the default on x86_64 */ | 27 | /* regparm is the default on x86_64 */ |
4 | #define feather_callback | 28 | #define feather_callback __attribute__((used)) |
5 | 29 | ||
6 | # define _EVENT_TABLE(id,from,to) \ | 30 | #define __FT_EVENT_TABLE(id,from,to) \ |
7 | ".section __event_table, \"aw\"\n\t" \ | 31 | ".section __event_table, \"aw\"\n\t" \ |
8 | ".balign 8\n\t" \ | 32 | ".balign 8\n\t" \ |
9 | ".quad " #id ", 0, " #from ", " #to " \n\t" \ | 33 | ".quad " #id ", 0, " #from ", " #to " \n\t" \ |
10 | ".previous \n\t" | 34 | ".previous \n\t" |
11 | 35 | ||
12 | /* | 36 | /* |
13 | * x86_64 callee only owns rbp, rbx, r12 -> r15 | 37 | * x86_64 caller only owns rbp, rbx, r12-r15; |
14 | * the called can freely modify the others | 38 | * the callee can freely modify the others. |
15 | */ | 39 | */ |
16 | #define CLOBBER_LIST "memory", "cc", "rdi", "rsi", "rdx", "rcx", \ | 40 | #define __FT_CLOBBER_LIST0 "memory", "cc", "rdi", "rsi", "rdx", "rcx", \ |
41 | "r8", "r9", "r10", "r11", "rax" | ||
42 | |||
43 | #define __FT_CLOBBER_LIST1 "memory", "cc", "rdi", "rdx", "rcx", \ | ||
44 | "r8", "r9", "r10", "r11", "rax" | ||
45 | |||
46 | #define __FT_CLOBBER_LIST2 "memory", "cc", "rdi", "rcx", \ | ||
17 | "r8", "r9", "r10", "r11", "rax" | 47 | "r8", "r9", "r10", "r11", "rax" |
18 | 48 | ||
49 | #define __FT_CLOBBER_LIST3 "memory", "cc", "rdi", \ | ||
50 | "r8", "r9", "r10", "r11", "rax" | ||
51 | |||
52 | /* The registers RDI, RSI, RDX, RCX, R8 and R9 are used for integer and pointer | ||
53 | * arguments. */ | ||
54 | |||
55 | /* RSI */ | ||
56 | #define __FT_TMP1(x) "=S" (x) | ||
57 | #define __FT_ARG1(x) "0" ((long) (x)) | ||
58 | |||
59 | /* RDX */ | ||
60 | #define __FT_TMP2(x) "=d" (x) | ||
61 | #define __FT_ARG2(x) "1" ((long) (x)) | ||
62 | |||
63 | /* RCX */ | ||
64 | #define __FT_TMP3(x) "=c" (x) | ||
65 | #define __FT_ARG3(x) "2" ((long) (x)) | ||
66 | |||
19 | #define ft_event(id, callback) \ | 67 | #define ft_event(id, callback) \ |
20 | __asm__ __volatile__( \ | 68 | __asm__ __volatile__( \ |
21 | "1: jmp 2f \n\t" \ | 69 | "1: jmp 2f \n\t" \ |
22 | " call " #callback " \n\t" \ | 70 | " call " #callback " \n\t" \ |
23 | _EVENT_TABLE(id,1b,2f) \ | 71 | __FT_EVENT_TABLE(id,1b,2f) \ |
24 | "2: \n\t" \ | 72 | "2: \n\t" \ |
25 | : : : CLOBBER_LIST) | 73 | : : : __FT_CLOBBER_LIST0) |
26 | 74 | ||
27 | #define ft_event0(id, callback) \ | 75 | #define ft_event0(id, callback) \ |
28 | __asm__ __volatile__( \ | 76 | __asm__ __volatile__( \ |
29 | "1: jmp 2f \n\t" \ | 77 | "1: jmp 2f \n\t" \ |
30 | " movq $" #id ", %%rdi \n\t" \ | 78 | " movq $" #id ", %%rdi \n\t" \ |
31 | " call " #callback " \n\t" \ | 79 | " call " #callback " \n\t" \ |
32 | _EVENT_TABLE(id,1b,2f) \ | 80 | __FT_EVENT_TABLE(id,1b,2f) \ |
33 | "2: \n\t" \ | 81 | "2: \n\t" \ |
34 | : : : CLOBBER_LIST) | 82 | : : : __FT_CLOBBER_LIST0) |
35 | 83 | ||
36 | #define ft_event1(id, callback, param) \ | 84 | #define ft_event1(id, callback, param) \ |
85 | do { \ | ||
86 | long __ft_tmp1; \ | ||
37 | __asm__ __volatile__( \ | 87 | __asm__ __volatile__( \ |
38 | "1: jmp 2f \n\t" \ | 88 | "1: jmp 2f \n\t" \ |
39 | " movq %0, %%rsi \n\t" \ | ||
40 | " movq $" #id ", %%rdi \n\t" \ | 89 | " movq $" #id ", %%rdi \n\t" \ |
41 | " call " #callback " \n\t" \ | 90 | " call " #callback " \n\t" \ |
42 | _EVENT_TABLE(id,1b,2f) \ | 91 | __FT_EVENT_TABLE(id,1b,2f) \ |
43 | "2: \n\t" \ | 92 | "2: \n\t" \ |
44 | : : "r" (param) : CLOBBER_LIST) | 93 | : __FT_TMP1(__ft_tmp1) \ |
94 | : __FT_ARG1(param) \ | ||
95 | : __FT_CLOBBER_LIST1); \ | ||
96 | } while (0); | ||
45 | 97 | ||
46 | #define ft_event2(id, callback, param, param2) \ | 98 | #define ft_event2(id, callback, param, param2) \ |
99 | do { \ | ||
100 | long __ft_tmp1, __ft_tmp2; \ | ||
47 | __asm__ __volatile__( \ | 101 | __asm__ __volatile__( \ |
48 | "1: jmp 2f \n\t" \ | 102 | "1: jmp 2f \n\t" \ |
49 | " movq %1, %%rdx \n\t" \ | ||
50 | " movq %0, %%rsi \n\t" \ | ||
51 | " movq $" #id ", %%rdi \n\t" \ | 103 | " movq $" #id ", %%rdi \n\t" \ |
52 | " call " #callback " \n\t" \ | 104 | " call " #callback " \n\t" \ |
53 | _EVENT_TABLE(id,1b,2f) \ | 105 | __FT_EVENT_TABLE(id,1b,2f) \ |
54 | "2: \n\t" \ | 106 | "2: \n\t" \ |
55 | : : "r" (param), "r" (param2) : CLOBBER_LIST) | 107 | : __FT_TMP1(__ft_tmp1), __FT_TMP2(__ft_tmp2) \ |
108 | : __FT_ARG1(param), __FT_ARG2(param2) \ | ||
109 | : __FT_CLOBBER_LIST2); \ | ||
110 | } while (0); | ||
56 | 111 | ||
57 | #define ft_event3(id, callback, p, p2, p3) \ | 112 | #define ft_event3(id, callback, param, param2, param3) \ |
113 | do { \ | ||
114 | long __ft_tmp1, __ft_tmp2, __ft_tmp3; \ | ||
58 | __asm__ __volatile__( \ | 115 | __asm__ __volatile__( \ |
59 | "1: jmp 2f \n\t" \ | 116 | "1: jmp 2f \n\t" \ |
60 | " movq %2, %%rcx \n\t" \ | ||
61 | " movq %1, %%rdx \n\t" \ | ||
62 | " movq %0, %%rsi \n\t" \ | ||
63 | " movq $" #id ", %%rdi \n\t" \ | 117 | " movq $" #id ", %%rdi \n\t" \ |
64 | " call " #callback " \n\t" \ | 118 | " call " #callback " \n\t" \ |
65 | _EVENT_TABLE(id,1b,2f) \ | 119 | __FT_EVENT_TABLE(id,1b,2f) \ |
66 | "2: \n\t" \ | 120 | "2: \n\t" \ |
67 | : : "r" (p), "r" (p2), "r" (p3) : CLOBBER_LIST) | 121 | : __FT_TMP1(__ft_tmp1), __FT_TMP2(__ft_tmp2), __FT_TMP3(__ft_tmp3) \ |
122 | : __FT_ARG1(param), __FT_ARG2(param2), __FT_ARG3(param3) \ | ||
123 | : __FT_CLOBBER_LIST3); \ | ||
124 | } while (0); | ||
diff --git a/include/litmus/binheap.h b/include/litmus/binheap.h new file mode 100644 index 000000000000..901a30a3e296 --- /dev/null +++ b/include/litmus/binheap.h | |||
@@ -0,0 +1,206 @@ | |||
1 | #ifndef LITMUS_BINARY_HEAP_H | ||
2 | #define LITMUS_BINARY_HEAP_H | ||
3 | |||
4 | #include <linux/kernel.h> | ||
5 | |||
6 | /** | ||
7 | * Simple binary heap with add, arbitrary delete, delete_root, and top | ||
8 | * operations. | ||
9 | * | ||
10 | * Style meant to conform with list.h. | ||
11 | * | ||
12 | * Motivation: Linux's prio_heap.h is of fixed size. Litmus's binomial | ||
13 | * heap may be overkill (and perhaps not general enough) for some applications. | ||
14 | * | ||
15 | * Note: In order to make node swaps fast, a node inserted with a data pointer | ||
16 | * may not always hold said data pointer. This is similar to the binomial heap | ||
17 | * implementation. This does make node deletion tricky since we have to | ||
18 | * (1) locate the node that holds the data pointer to delete, and (2) the | ||
19 | * node that was originally inserted with said data pointer. These have to be | ||
20 | * coalesced into a single node before removal (see usage of | ||
21 | * __binheap_safe_swap()). We have to track node references to accomplish this. | ||
22 | */ | ||
23 | |||
24 | struct binheap_node { | ||
25 | void *data; | ||
26 | struct binheap_node *parent; | ||
27 | struct binheap_node *left; | ||
28 | struct binheap_node *right; | ||
29 | |||
30 | /* pointer to binheap_node that holds *data for which this binheap_node | ||
31 | * was originally inserted. (*data "owns" this node) | ||
32 | */ | ||
33 | struct binheap_node *ref; | ||
34 | struct binheap_node **ref_ptr; | ||
35 | }; | ||
36 | |||
37 | /** | ||
38 | * Signature of compator function. Assumed 'less-than' (min-heap). | ||
39 | * Pass in 'greater-than' for max-heap. | ||
40 | * | ||
41 | * TODO: Consider macro-based implementation that allows comparator to be | ||
42 | * inlined (similar to Linux red/black tree) for greater efficiency. | ||
43 | */ | ||
44 | typedef int (*binheap_order_t)(struct binheap_node *a, | ||
45 | struct binheap_node *b); | ||
46 | |||
47 | |||
48 | struct binheap { | ||
49 | struct binheap_node *root; | ||
50 | |||
51 | /* pointer to node to take next inserted child */ | ||
52 | struct binheap_node *next; | ||
53 | |||
54 | /* pointer to last node in complete binary tree */ | ||
55 | struct binheap_node *last; | ||
56 | |||
57 | /* comparator function pointer */ | ||
58 | binheap_order_t compare; | ||
59 | }; | ||
60 | |||
61 | |||
62 | /* Initialized heap nodes not in a heap have parent | ||
63 | * set to BINHEAP_POISON. | ||
64 | */ | ||
65 | #define BINHEAP_POISON ((void*)(0xdeadbeef)) | ||
66 | |||
67 | |||
68 | /** | ||
69 | * binheap_entry - get the struct for this heap node. | ||
70 | * Only valid when called upon heap nodes other than the root handle. | ||
71 | * @ptr: the heap node. | ||
72 | * @type: the type of struct pointed to by binheap_node::data. | ||
73 | * @member: unused. | ||
74 | */ | ||
75 | #define binheap_entry(ptr, type, member) \ | ||
76 | ((type *)((ptr)->data)) | ||
77 | |||
78 | /** | ||
79 | * binheap_node_container - get the struct that contains this node. | ||
80 | * Only valid when called upon heap nodes other than the root handle. | ||
81 | * @ptr: the heap node. | ||
82 | * @type: the type of struct the node is embedded in. | ||
83 | * @member: the name of the binheap_struct within the (type) struct. | ||
84 | */ | ||
85 | #define binheap_node_container(ptr, type, member) \ | ||
86 | container_of((ptr), type, member) | ||
87 | |||
88 | /** | ||
89 | * binheap_top_entry - get the struct for the node at the top of the heap. | ||
90 | * Only valid when called upon the heap handle node. | ||
91 | * @ptr: the special heap-handle node. | ||
92 | * @type: the type of the struct the head is embedded in. | ||
93 | * @member: the name of the binheap_struct within the (type) struct. | ||
94 | */ | ||
95 | #define binheap_top_entry(ptr, type, member) \ | ||
96 | binheap_entry((ptr)->root, type, member) | ||
97 | |||
98 | /** | ||
99 | * binheap_delete_root - remove the root element from the heap. | ||
100 | * @handle: handle to the heap. | ||
101 | * @type: the type of the struct the head is embedded in. | ||
102 | * @member: the name of the binheap_struct within the (type) struct. | ||
103 | */ | ||
104 | #define binheap_delete_root(handle, type, member) \ | ||
105 | __binheap_delete_root((handle), &((type *)((handle)->root->data))->member) | ||
106 | |||
107 | /** | ||
108 | * binheap_delete - remove an arbitrary element from the heap. | ||
109 | * @to_delete: pointer to node to be removed. | ||
110 | * @handle: handle to the heap. | ||
111 | */ | ||
112 | #define binheap_delete(to_delete, handle) \ | ||
113 | __binheap_delete((to_delete), (handle)) | ||
114 | |||
115 | /** | ||
116 | * binheap_add - insert an element to the heap | ||
117 | * new_node: node to add. | ||
118 | * @handle: handle to the heap. | ||
119 | * @type: the type of the struct the head is embedded in. | ||
120 | * @member: the name of the binheap_struct within the (type) struct. | ||
121 | */ | ||
122 | #define binheap_add(new_node, handle, type, member) \ | ||
123 | __binheap_add((new_node), (handle), container_of((new_node), type, member)) | ||
124 | |||
125 | /** | ||
126 | * binheap_decrease - re-eval the position of a node (based upon its | ||
127 | * original data pointer). | ||
128 | * @handle: handle to the heap. | ||
129 | * @orig_node: node that was associated with the data pointer | ||
130 | * (whose value has changed) when said pointer was | ||
131 | * added to the heap. | ||
132 | */ | ||
133 | #define binheap_decrease(orig_node, handle) \ | ||
134 | __binheap_decrease((orig_node), (handle)) | ||
135 | |||
136 | #define BINHEAP_NODE_INIT() { NULL, BINHEAP_POISON, NULL, NULL , NULL, NULL} | ||
137 | |||
138 | #define BINHEAP_NODE(name) \ | ||
139 | struct binheap_node name = BINHEAP_NODE_INIT() | ||
140 | |||
141 | |||
142 | static inline void INIT_BINHEAP_NODE(struct binheap_node *n) | ||
143 | { | ||
144 | n->data = NULL; | ||
145 | n->parent = BINHEAP_POISON; | ||
146 | n->left = NULL; | ||
147 | n->right = NULL; | ||
148 | n->ref = NULL; | ||
149 | n->ref_ptr = NULL; | ||
150 | } | ||
151 | |||
152 | static inline void INIT_BINHEAP_HANDLE(struct binheap *handle, | ||
153 | binheap_order_t compare) | ||
154 | { | ||
155 | handle->root = NULL; | ||
156 | handle->next = NULL; | ||
157 | handle->last = NULL; | ||
158 | handle->compare = compare; | ||
159 | } | ||
160 | |||
161 | /* Returns true if binheap is empty. */ | ||
162 | static inline int binheap_empty(struct binheap *handle) | ||
163 | { | ||
164 | return(handle->root == NULL); | ||
165 | } | ||
166 | |||
167 | /* Returns true if binheap node is in a heap. */ | ||
168 | static inline int binheap_is_in_heap(struct binheap_node *node) | ||
169 | { | ||
170 | return (node->parent != BINHEAP_POISON); | ||
171 | } | ||
172 | |||
173 | /* Returns true if binheap node is in given heap. */ | ||
174 | int binheap_is_in_this_heap(struct binheap_node *node, struct binheap* heap); | ||
175 | |||
176 | /* Add a node to a heap */ | ||
177 | void __binheap_add(struct binheap_node *new_node, | ||
178 | struct binheap *handle, | ||
179 | void *data); | ||
180 | |||
181 | /** | ||
182 | * Removes the root node from the heap. The node is removed after coalescing | ||
183 | * the binheap_node with its original data pointer at the root of the tree. | ||
184 | * | ||
185 | * The 'last' node in the tree is then swapped up to the root and bubbled | ||
186 | * down. | ||
187 | */ | ||
188 | void __binheap_delete_root(struct binheap *handle, | ||
189 | struct binheap_node *container); | ||
190 | |||
191 | /** | ||
192 | * Delete an arbitrary node. Bubble node to delete up to the root, | ||
193 | * and then delete to root. | ||
194 | */ | ||
195 | void __binheap_delete(struct binheap_node *node_to_delete, | ||
196 | struct binheap *handle); | ||
197 | |||
198 | /** | ||
199 | * Bubble up a node whose pointer has decreased in value. | ||
200 | */ | ||
201 | void __binheap_decrease(struct binheap_node *orig_node, | ||
202 | struct binheap *handle); | ||
203 | |||
204 | |||
205 | #endif | ||
206 | |||
diff --git a/include/litmus/budget.h b/include/litmus/budget.h index 265f2b1e62b8..d1c73f5cf73e 100644 --- a/include/litmus/budget.h +++ b/include/litmus/budget.h | |||
@@ -44,4 +44,31 @@ void server_release(struct task_struct *t); | |||
44 | */ | 44 | */ |
45 | void task_release(struct task_struct *t); | 45 | void task_release(struct task_struct *t); |
46 | 46 | ||
47 | inline static int budget_exhausted(struct task_struct* t) | ||
48 | { | ||
49 | return get_exec_time(t) >= get_exec_cost(t); | ||
50 | } | ||
51 | |||
52 | inline static lt_t budget_remaining(struct task_struct* t) | ||
53 | { | ||
54 | if (!budget_exhausted(t)) | ||
55 | return get_exec_cost(t) - get_exec_time(t); | ||
56 | else | ||
57 | /* avoid overflow */ | ||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | #define budget_enforced(t) (tsk_rt(t)->task_params.budget_policy != NO_ENFORCEMENT) | ||
62 | |||
63 | #define budget_precisely_enforced(t) (tsk_rt(t)->task_params.budget_policy \ | ||
64 | == PRECISE_ENFORCEMENT) | ||
65 | |||
66 | static inline int requeue_preempted_job(struct task_struct* t) | ||
67 | { | ||
68 | /* Add task to ready queue only if not subject to budget enforcement or | ||
69 | * if the job has budget remaining. t may be NULL. | ||
70 | */ | ||
71 | return t && (!budget_exhausted(t) || !budget_enforced(t)); | ||
72 | } | ||
73 | |||
47 | #endif | 74 | #endif |
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h index caf2a1e6918c..f2115b83f1e4 100644 --- a/include/litmus/fdso.h +++ b/include/litmus/fdso.h | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/fs.h> | 12 | #include <linux/fs.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | 14 | ||
15 | #define MAX_OBJECT_DESCRIPTORS 32 | 15 | #define MAX_OBJECT_DESCRIPTORS 85 |
16 | 16 | ||
17 | typedef enum { | 17 | typedef enum { |
18 | MIN_OBJ_TYPE = 0, | 18 | MIN_OBJ_TYPE = 0, |
@@ -20,7 +20,13 @@ typedef enum { | |||
20 | FMLP_SEM = 0, | 20 | FMLP_SEM = 0, |
21 | SRP_SEM = 1, | 21 | SRP_SEM = 1, |
22 | 22 | ||
23 | MAX_OBJ_TYPE = 1 | 23 | MPCP_SEM = 2, |
24 | MPCP_VS_SEM = 3, | ||
25 | DPCP_SEM = 4, | ||
26 | |||
27 | PCP_SEM = 5, | ||
28 | |||
29 | MAX_OBJ_TYPE = 5 | ||
24 | } obj_type_t; | 30 | } obj_type_t; |
25 | 31 | ||
26 | struct inode_obj_id { | 32 | struct inode_obj_id { |
diff --git a/include/litmus/fp_common.h b/include/litmus/fp_common.h new file mode 100644 index 000000000000..dd1f7bf1e347 --- /dev/null +++ b/include/litmus/fp_common.h | |||
@@ -0,0 +1,105 @@ | |||
1 | /* Fixed-priority scheduler support. | ||
2 | */ | ||
3 | |||
4 | #ifndef __FP_COMMON_H__ | ||
5 | #define __FP_COMMON_H__ | ||
6 | |||
7 | #include <litmus/rt_domain.h> | ||
8 | |||
9 | #include <asm/bitops.h> | ||
10 | |||
11 | |||
12 | void fp_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | ||
13 | release_jobs_t release); | ||
14 | |||
15 | int fp_higher_prio(struct task_struct* first, | ||
16 | struct task_struct* second); | ||
17 | |||
18 | int fp_ready_order(struct bheap_node* a, struct bheap_node* b); | ||
19 | |||
20 | #define FP_PRIO_BIT_WORDS (LITMUS_MAX_PRIORITY / BITS_PER_LONG) | ||
21 | |||
22 | #if (LITMUS_MAX_PRIORITY % BITS_PER_LONG) | ||
23 | #error LITMUS_MAX_PRIORITY must be a multiple of BITS_PER_LONG | ||
24 | #endif | ||
25 | |||
26 | /* bitmask-inexed priority queue */ | ||
27 | struct fp_prio_queue { | ||
28 | unsigned long bitmask[FP_PRIO_BIT_WORDS]; | ||
29 | struct bheap queue[LITMUS_MAX_PRIORITY]; | ||
30 | }; | ||
31 | |||
32 | void fp_prio_queue_init(struct fp_prio_queue* q); | ||
33 | |||
34 | static inline void fpq_set(struct fp_prio_queue* q, unsigned int index) | ||
35 | { | ||
36 | unsigned long *word = q->bitmask + (index / BITS_PER_LONG); | ||
37 | __set_bit(index % BITS_PER_LONG, word); | ||
38 | } | ||
39 | |||
40 | static inline void fpq_clear(struct fp_prio_queue* q, unsigned int index) | ||
41 | { | ||
42 | unsigned long *word = q->bitmask + (index / BITS_PER_LONG); | ||
43 | __clear_bit(index % BITS_PER_LONG, word); | ||
44 | } | ||
45 | |||
46 | static inline unsigned int fpq_find(struct fp_prio_queue* q) | ||
47 | { | ||
48 | int i; | ||
49 | |||
50 | /* loop optimizer should unroll this */ | ||
51 | for (i = 0; i < FP_PRIO_BIT_WORDS; i++) | ||
52 | if (q->bitmask[i]) | ||
53 | return __ffs(q->bitmask[i]) + i * BITS_PER_LONG; | ||
54 | |||
55 | return LITMUS_MAX_PRIORITY; /* nothing found */ | ||
56 | } | ||
57 | |||
58 | static inline void fp_prio_add(struct fp_prio_queue* q, struct task_struct* t, unsigned int index) | ||
59 | { | ||
60 | |||
61 | BUG_ON(bheap_node_in_heap(tsk_rt(t)->heap_node)); | ||
62 | |||
63 | fpq_set(q, index); | ||
64 | bheap_insert(fp_ready_order, &q->queue[index], tsk_rt(t)->heap_node); | ||
65 | } | ||
66 | |||
67 | static inline void fp_prio_remove(struct fp_prio_queue* q, struct task_struct* t, unsigned int index) | ||
68 | { | ||
69 | BUG_ON(!is_queued(t)); | ||
70 | |||
71 | bheap_delete(fp_ready_order, &q->queue[index], tsk_rt(t)->heap_node); | ||
72 | if (likely(bheap_empty(&q->queue[index]))) | ||
73 | fpq_clear(q, index); | ||
74 | } | ||
75 | |||
76 | static inline struct task_struct* fp_prio_peek(struct fp_prio_queue* q) | ||
77 | { | ||
78 | unsigned int idx = fpq_find(q); | ||
79 | struct bheap_node* hn; | ||
80 | |||
81 | if (idx < LITMUS_MAX_PRIORITY) { | ||
82 | hn = bheap_peek(fp_ready_order, &q->queue[idx]); | ||
83 | return bheap2task(hn); | ||
84 | } else | ||
85 | return NULL; | ||
86 | } | ||
87 | |||
88 | static inline struct task_struct* fp_prio_take(struct fp_prio_queue* q) | ||
89 | { | ||
90 | unsigned int idx = fpq_find(q); | ||
91 | struct bheap_node* hn; | ||
92 | |||
93 | if (idx < LITMUS_MAX_PRIORITY) { | ||
94 | hn = bheap_take(fp_ready_order, &q->queue[idx]); | ||
95 | if (likely(bheap_empty(&q->queue[idx]))) | ||
96 | fpq_clear(q, idx); | ||
97 | return bheap2task(hn); | ||
98 | } else | ||
99 | return NULL; | ||
100 | } | ||
101 | |||
102 | int fp_preemption_needed(struct fp_prio_queue* q, struct task_struct *t); | ||
103 | |||
104 | |||
105 | #endif | ||
diff --git a/include/litmus/fpmath.h b/include/litmus/fpmath.h new file mode 100644 index 000000000000..04d4bcaeae96 --- /dev/null +++ b/include/litmus/fpmath.h | |||
@@ -0,0 +1,145 @@ | |||
1 | #ifndef __FP_MATH_H__ | ||
2 | #define __FP_MATH_H__ | ||
3 | |||
4 | #ifndef __KERNEL__ | ||
5 | #include <stdint.h> | ||
6 | #define abs(x) (((x) < 0) ? -(x) : x) | ||
7 | #endif | ||
8 | |||
9 | // Use 64-bit because we want to track things at the nanosecond scale. | ||
10 | // This can lead to very large numbers. | ||
11 | typedef int64_t fpbuf_t; | ||
12 | typedef struct | ||
13 | { | ||
14 | fpbuf_t val; | ||
15 | } fp_t; | ||
16 | |||
17 | #define FP_SHIFT 10 | ||
18 | #define ROUND_BIT (FP_SHIFT - 1) | ||
19 | |||
20 | #define _fp(x) ((fp_t) {x}) | ||
21 | |||
22 | #ifdef __KERNEL__ | ||
23 | static const fp_t LITMUS_FP_ZERO = {.val = 0}; | ||
24 | static const fp_t LITMUS_FP_ONE = {.val = (1 << FP_SHIFT)}; | ||
25 | #endif | ||
26 | |||
27 | static inline fp_t FP(fpbuf_t x) | ||
28 | { | ||
29 | return _fp(((fpbuf_t) x) << FP_SHIFT); | ||
30 | } | ||
31 | |||
32 | /* divide two integers to obtain a fixed point value */ | ||
33 | static inline fp_t _frac(fpbuf_t a, fpbuf_t b) | ||
34 | { | ||
35 | return _fp(FP(a).val / (b)); | ||
36 | } | ||
37 | |||
38 | static inline fpbuf_t _point(fp_t x) | ||
39 | { | ||
40 | return (x.val % (1 << FP_SHIFT)); | ||
41 | |||
42 | } | ||
43 | |||
44 | #define fp2str(x) x.val | ||
45 | /*(x.val >> FP_SHIFT), (x.val % (1 << FP_SHIFT)) */ | ||
46 | #define _FP_ "%ld/1024" | ||
47 | |||
48 | static inline fpbuf_t _floor(fp_t x) | ||
49 | { | ||
50 | return x.val >> FP_SHIFT; | ||
51 | } | ||
52 | |||
53 | /* FIXME: negative rounding */ | ||
54 | static inline fpbuf_t _round(fp_t x) | ||
55 | { | ||
56 | return _floor(x) + ((x.val >> ROUND_BIT) & 1); | ||
57 | } | ||
58 | |||
59 | /* multiply two fixed point values */ | ||
60 | static inline fp_t _mul(fp_t a, fp_t b) | ||
61 | { | ||
62 | return _fp((a.val * b.val) >> FP_SHIFT); | ||
63 | } | ||
64 | |||
65 | static inline fp_t _div(fp_t a, fp_t b) | ||
66 | { | ||
67 | #if !defined(__KERNEL__) && !defined(unlikely) | ||
68 | #define unlikely(x) (x) | ||
69 | #define DO_UNDEF_UNLIKELY | ||
70 | #endif | ||
71 | /* try not to overflow */ | ||
72 | if (unlikely( a.val > (2l << ((sizeof(fpbuf_t)*8) - FP_SHIFT)) )) | ||
73 | return _fp((a.val / b.val) << FP_SHIFT); | ||
74 | else | ||
75 | return _fp((a.val << FP_SHIFT) / b.val); | ||
76 | #ifdef DO_UNDEF_UNLIKELY | ||
77 | #undef unlikely | ||
78 | #undef DO_UNDEF_UNLIKELY | ||
79 | #endif | ||
80 | } | ||
81 | |||
82 | static inline fp_t _add(fp_t a, fp_t b) | ||
83 | { | ||
84 | return _fp(a.val + b.val); | ||
85 | } | ||
86 | |||
87 | static inline fp_t _sub(fp_t a, fp_t b) | ||
88 | { | ||
89 | return _fp(a.val - b.val); | ||
90 | } | ||
91 | |||
92 | static inline fp_t _neg(fp_t x) | ||
93 | { | ||
94 | return _fp(-x.val); | ||
95 | } | ||
96 | |||
97 | static inline fp_t _abs(fp_t x) | ||
98 | { | ||
99 | return _fp(abs(x.val)); | ||
100 | } | ||
101 | |||
102 | /* works the same as casting float/double to integer */ | ||
103 | static inline fpbuf_t _fp_to_integer(fp_t x) | ||
104 | { | ||
105 | return _floor(_abs(x)) * ((x.val > 0) ? 1 : -1); | ||
106 | } | ||
107 | |||
108 | static inline fp_t _integer_to_fp(fpbuf_t x) | ||
109 | { | ||
110 | return _frac(x,1); | ||
111 | } | ||
112 | |||
113 | static inline int _leq(fp_t a, fp_t b) | ||
114 | { | ||
115 | return a.val <= b.val; | ||
116 | } | ||
117 | |||
118 | static inline int _geq(fp_t a, fp_t b) | ||
119 | { | ||
120 | return a.val >= b.val; | ||
121 | } | ||
122 | |||
123 | static inline int _lt(fp_t a, fp_t b) | ||
124 | { | ||
125 | return a.val < b.val; | ||
126 | } | ||
127 | |||
128 | static inline int _gt(fp_t a, fp_t b) | ||
129 | { | ||
130 | return a.val > b.val; | ||
131 | } | ||
132 | |||
133 | static inline int _eq(fp_t a, fp_t b) | ||
134 | { | ||
135 | return a.val == b.val; | ||
136 | } | ||
137 | |||
138 | static inline fp_t _max(fp_t a, fp_t b) | ||
139 | { | ||
140 | if (a.val < b.val) | ||
141 | return b; | ||
142 | else | ||
143 | return a; | ||
144 | } | ||
145 | #endif | ||
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 1455f249c1fb..c3b91fe8115c 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -49,12 +49,22 @@ void litmus_exit_task(struct task_struct *tsk); | |||
49 | /* Realtime utility macros */ | 49 | /* Realtime utility macros */ |
50 | #define get_rt_flags(t) (tsk_rt(t)->flags) | 50 | #define get_rt_flags(t) (tsk_rt(t)->flags) |
51 | #define set_rt_flags(t,f) (tsk_rt(t)->flags=(f)) | 51 | #define set_rt_flags(t,f) (tsk_rt(t)->flags=(f)) |
52 | #define is_priority_boosted(t) (tsk_rt(t)->priority_boosted) | ||
53 | #define get_boost_start(t) (tsk_rt(t)->boost_start_time) | ||
54 | |||
55 | /* task_params macros */ | ||
52 | #define get_exec_cost(t) (tsk_rt(t)->task_params.exec_cost) | 56 | #define get_exec_cost(t) (tsk_rt(t)->task_params.exec_cost) |
53 | #define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) | ||
54 | #define get_rt_period(t) (tsk_rt(t)->task_params.period) | 57 | #define get_rt_period(t) (tsk_rt(t)->task_params.period) |
58 | #define get_rt_relative_deadline(t) (tsk_rt(t)->task_params.relative_deadline) | ||
55 | #define get_rt_phase(t) (tsk_rt(t)->task_params.phase) | 59 | #define get_rt_phase(t) (tsk_rt(t)->task_params.phase) |
56 | #define get_rt_job(t) (tsk_rt(t)->job_params.job_no) | 60 | #define get_rt_job(t) (tsk_rt(t)->job_params.job_no) |
57 | #define get_partition(t) (tsk_rt(t)->task_params.cpu) | 61 | #define get_partition(t) (tsk_rt(t)->task_params.cpu) |
62 | #define get_priority(t) (tsk_rt(t)->task_params.priority) | ||
63 | #define get_class(t) (tsk_rt(t)->task_params.cls) | ||
64 | |||
65 | /* job_param macros */ | ||
66 | #define get_job_no(t) (tsk_rt(t)->job_params.job_no) | ||
67 | #define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) | ||
58 | #define get_deadline(t) (tsk_rt(t)->job_params.deadline) | 68 | #define get_deadline(t) (tsk_rt(t)->job_params.deadline) |
59 | #define get_release(t) (tsk_rt(t)->job_params.release) | 69 | #define get_release(t) (tsk_rt(t)->job_params.release) |
60 | #define get_class(t) (tsk_rt(t)->task_params.cls) | 70 | #define get_class(t) (tsk_rt(t)->task_params.cls) |
@@ -65,25 +75,7 @@ void litmus_exit_task(struct task_struct *tsk); | |||
65 | 75 | ||
66 | #define is_priority_boosted(t) (tsk_rt(t)->priority_boosted) | 76 | #define is_priority_boosted(t) (tsk_rt(t)->priority_boosted) |
67 | #define get_boost_start(t) (tsk_rt(t)->boost_start_time) | 77 | #define get_boost_start(t) (tsk_rt(t)->boost_start_time) |
68 | 78 | #define get_lateness(t) (tsk_rt(t)->job_params.lateness) | |
69 | inline static int budget_exhausted(struct task_struct* t) | ||
70 | { | ||
71 | return get_exec_time(t) >= get_exec_cost(t); | ||
72 | } | ||
73 | |||
74 | inline static lt_t budget_remaining(struct task_struct* t) | ||
75 | { | ||
76 | if (!budget_exhausted(t)) | ||
77 | return get_exec_cost(t) - get_exec_time(t); | ||
78 | else | ||
79 | /* avoid overflow */ | ||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | #define budget_enforced(t) (tsk_rt(t)->task_params.budget_policy != NO_ENFORCEMENT) | ||
84 | |||
85 | #define budget_precisely_enforced(t) (tsk_rt(t)->task_params.budget_policy \ | ||
86 | == PRECISE_ENFORCEMENT) | ||
87 | 79 | ||
88 | #define is_hrt(t) \ | 80 | #define is_hrt(t) \ |
89 | (tsk_rt(t)->task_params.cls == RT_CLASS_HARD) | 81 | (tsk_rt(t)->task_params.cls == RT_CLASS_HARD) |
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 2ced0dba067d..a8c82eed5562 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -33,11 +33,33 @@ typedef enum { | |||
33 | PRECISE_ENFORCEMENT /* budgets are enforced with hrtimers */ | 33 | PRECISE_ENFORCEMENT /* budgets are enforced with hrtimers */ |
34 | } budget_policy_t; | 34 | } budget_policy_t; |
35 | 35 | ||
36 | /* We use the common priority interpretation "lower index == higher priority", | ||
37 | * which is commonly used in fixed-priority schedulability analysis papers. | ||
38 | * So, a numerically lower priority value implies higher scheduling priority, | ||
39 | * with priority 1 being the highest priority. Priority 0 is reserved for | ||
40 | * priority boosting. LITMUS_MAX_PRIORITY denotes the maximum priority value | ||
41 | * range. | ||
42 | */ | ||
43 | |||
44 | #define LITMUS_MAX_PRIORITY 512 | ||
45 | #define LITMUS_HIGHEST_PRIORITY 1 | ||
46 | #define LITMUS_LOWEST_PRIORITY (LITMUS_MAX_PRIORITY - 1) | ||
47 | |||
48 | /* Provide generic comparison macros for userspace, | ||
49 | * in case that we change this later. */ | ||
50 | #define litmus_higher_fixed_prio(a, b) (a < b) | ||
51 | #define litmus_lower_fixed_prio(a, b) (a > b) | ||
52 | #define litmus_is_valid_fixed_prio(p) \ | ||
53 | ((p) >= LITMUS_HIGHEST_PRIORITY && \ | ||
54 | (p) <= LITMUS_LOWEST_PRIORITY) | ||
55 | |||
36 | struct rt_task { | 56 | struct rt_task { |
37 | lt_t exec_cost; | 57 | lt_t exec_cost; |
38 | lt_t period; | 58 | lt_t period; |
59 | lt_t relative_deadline; | ||
39 | lt_t phase; | 60 | lt_t phase; |
40 | unsigned int cpu; | 61 | unsigned int cpu; |
62 | unsigned int priority; | ||
41 | task_class_t cls; | 63 | task_class_t cls; |
42 | budget_policy_t budget_policy; /* ignored by pfair */ | 64 | budget_policy_t budget_policy; /* ignored by pfair */ |
43 | }; | 65 | }; |
@@ -119,6 +141,12 @@ struct rt_job { | |||
119 | /* How much service has this job received so far? */ | 141 | /* How much service has this job received so far? */ |
120 | lt_t exec_time; | 142 | lt_t exec_time; |
121 | 143 | ||
144 | /* By how much did the prior job miss its deadline by? | ||
145 | * Value differs from tardiness in that lateness may | ||
146 | * be negative (when job finishes before its deadline). | ||
147 | */ | ||
148 | long long lateness; | ||
149 | |||
122 | /* Which job is this. This is used to let user space | 150 | /* Which job is this. This is used to let user space |
123 | * specify which job to wait for, which is important if jobs | 151 | * specify which job to wait for, which is important if jobs |
124 | * overrun. If we just call sys_sleep_next_period() then we | 152 | * overrun. If we just call sys_sleep_next_period() then we |
diff --git a/include/litmus/sched_mc.h b/include/litmus/sched_mc.h index 740cc11be5d7..1d491ce6a31a 100644 --- a/include/litmus/sched_mc.h +++ b/include/litmus/sched_mc.h | |||
@@ -32,7 +32,7 @@ struct mc_data { | |||
32 | }; | 32 | }; |
33 | 33 | ||
34 | #define tsk_mc_data(t) (tsk_rt(t)->mc_data) | 34 | #define tsk_mc_data(t) (tsk_rt(t)->mc_data) |
35 | #define tsk_mc_crit(t) (tsk_mc_data(t)->mc_task.crit) | 35 | #define tsk_mc_crit(t) (tsk_mc_data(t) ? tsk_mc_data(t)->mc_task.crit : CRIT_LEVEL_C) |
36 | #define is_ghost(t) (tsk_mc_data(t)->mc_job.is_ghost) | 36 | #define is_ghost(t) (tsk_mc_data(t)->mc_job.is_ghost) |
37 | 37 | ||
38 | #define TS "(%s/%d:%d:%s)" | 38 | #define TS "(%s/%d:%d:%s)" |
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h index 15909e530771..0e050ac3748c 100644 --- a/include/litmus/sched_trace.h +++ b/include/litmus/sched_trace.h | |||
@@ -196,21 +196,17 @@ feather_callback void do_sched_trace_task_tardy(unsigned long id, | |||
196 | #define trace_litmus_switch_to(t) | 196 | #define trace_litmus_switch_to(t) |
197 | #define trace_litmus_switch_away(prev) | 197 | #define trace_litmus_switch_away(prev) |
198 | #define trace_litmus_task_completion(t, forced) | 198 | #define trace_litmus_task_completion(t, forced) |
199 | |||
199 | #define trace_litmus_task_block(t, i) | 200 | #define trace_litmus_task_block(t, i) |
200 | #define trace_litmus_task_resume(t, i) | 201 | #define trace_litmus_task_resume(t, i) |
201 | #define trace_litmus_sys_release(start) | 202 | #define trace_litmus_sys_release(start) |
202 | |||
203 | #define trace_litmus_task_exit(t) | 203 | #define trace_litmus_task_exit(t) |
204 | #define trace_litmus_task_tardy(t) | 204 | #define trace_litmus_task_tardy(t) |
205 | 205 | ||
206 | #define trace_litmus_resource_acquire(t, i); | ||
207 | #define trace_litmus_resource_release(t, i); | ||
208 | #define trace_litmus_priority_donate(t, d, i) | ||
209 | |||
210 | #define trace_litmus_container_param(cid, name) | 206 | #define trace_litmus_container_param(cid, name) |
211 | #define trace_litmus_server_param(sid, cid, wcet, time) | 207 | #define trace_litmus_server_param(sid, cid, wcet, time) |
212 | #define trace_litmus_server_switch_to(sid, job, tid, tjob) | 208 | #define trace_litmus_server_switch_to(sid, job, tid) |
213 | #define trace_litmus_server_switch_away(sid, job, tid, tjob) | 209 | #define trace_litmus_server_switch_away(sid, job, tid) |
214 | #define trace_litmus_server_release(sid, job, release, deadline) | 210 | #define trace_litmus_server_release(sid, job, release, deadline) |
215 | #define trace_litmus_server_completion(sid, job) | 211 | #define trace_litmus_server_completion(sid, job) |
216 | 212 | ||
@@ -260,20 +256,36 @@ feather_callback void do_sched_trace_task_tardy(unsigned long id, | |||
260 | trace_litmus_task_completion(t, forced); \ | 256 | trace_litmus_task_completion(t, forced); \ |
261 | } while (0) | 257 | } while (0) |
262 | 258 | ||
263 | #define sched_trace_task_block(t, i) \ | 259 | #define sched_trace_task_block_on(t, i) \ |
264 | do { \ | 260 | do { \ |
265 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, \ | 261 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, \ |
266 | do_sched_trace_task_block, t); \ | 262 | do_sched_trace_task_block, t); \ |
267 | trace_litmus_task_block(t, i); \ | 263 | trace_litmus_task_block(t, i); \ |
268 | } while (0) | 264 | } while (0) |
269 | 265 | ||
270 | #define sched_trace_task_resume(t, i) \ | 266 | #define sched_trace_task_block(t) \ |
267 | sched_trace_task_block_on(t, 0) | ||
268 | |||
269 | #define sched_trace_task_resume_on(t, i) \ | ||
271 | do { \ | 270 | do { \ |
272 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, \ | 271 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, \ |
273 | do_sched_trace_task_resume, t); \ | 272 | do_sched_trace_task_resume, t); \ |
274 | trace_litmus_task_resume(t, i); \ | 273 | trace_litmus_task_resume(t, i); \ |
275 | } while (0) | 274 | } while (0) |
276 | 275 | ||
276 | #define sched_trace_task_resume(t) \ | ||
277 | sched_trace_task_resume_on(t, 0) | ||
278 | |||
279 | #define sched_trace_resource_acquire(t, i) \ | ||
280 | do { \ | ||
281 | trace_litmus_resource_acquire(t, i); \ | ||
282 | } while (0) | ||
283 | |||
284 | #define sched_trace_resource_released(t, i) \ | ||
285 | do { \ | ||
286 | trace_litmus_resource_released(t, i); \ | ||
287 | } while (0) | ||
288 | |||
277 | #define sched_trace_action(t, action) \ | 289 | #define sched_trace_action(t, action) \ |
278 | SCHED_TRACE2(SCHED_TRACE_BASE_ID + 9, \ | 290 | SCHED_TRACE2(SCHED_TRACE_BASE_ID + 9, \ |
279 | do_sched_trace_action, t, (unsigned long) action); | 291 | do_sched_trace_action, t, (unsigned long) action); |
@@ -305,21 +317,6 @@ feather_callback void do_sched_trace_task_tardy(unsigned long id, | |||
305 | sched_trace_log_message("%d P%d [%s@%s:%d]: Took %llu\n\n", \ | 317 | sched_trace_log_message("%d P%d [%s@%s:%d]: Took %llu\n\n", \ |
306 | TRACE_ARGS, litmus_clock() - _qt_start) | 318 | TRACE_ARGS, litmus_clock() - _qt_start) |
307 | 319 | ||
308 | #define sched_trace_resource_acquire(t, i) \ | ||
309 | do { \ | ||
310 | trace_litmus_resource_acquire(t, i); \ | ||
311 | } while (0) | ||
312 | |||
313 | #define sched_trace_resource_release(t, i) \ | ||
314 | do { \ | ||
315 | trace_litmus_resource_release(t, i); \ | ||
316 | } while (0) | ||
317 | |||
318 | #define sched_trace_priority_donate(t, d, i) \ | ||
319 | do { \ | ||
320 | trace_litmus_priority_donate(t, d, i); \ | ||
321 | } while (0) | ||
322 | |||
323 | #define sched_trace_container_param(cid, name) \ | 320 | #define sched_trace_container_param(cid, name) \ |
324 | do { \ | 321 | do { \ |
325 | trace_litmus_container_param(cid, name); \ | 322 | trace_litmus_container_param(cid, name); \ |
diff --git a/include/litmus/trace_irq.h b/include/litmus/trace_irq.h index f18b127a089d..b717b1d55396 100644 --- a/include/litmus/trace_irq.h +++ b/include/litmus/trace_irq.h | |||
@@ -3,6 +3,8 @@ | |||
3 | 3 | ||
4 | #ifdef CONFIG_SCHED_OVERHEAD_TRACE | 4 | #ifdef CONFIG_SCHED_OVERHEAD_TRACE |
5 | 5 | ||
6 | #include <linux/percpu.h> | ||
7 | |||
6 | extern DEFINE_PER_CPU(atomic_t, irq_fired_count); | 8 | extern DEFINE_PER_CPU(atomic_t, irq_fired_count); |
7 | 9 | ||
8 | static inline void ft_irq_fired(void) | 10 | static inline void ft_irq_fired(void) |
diff --git a/include/litmus/wait.h b/include/litmus/wait.h new file mode 100644 index 000000000000..ce1347c355f8 --- /dev/null +++ b/include/litmus/wait.h | |||
@@ -0,0 +1,57 @@ | |||
1 | #ifndef _LITMUS_WAIT_H_ | ||
2 | #define _LITMUS_WAIT_H_ | ||
3 | |||
4 | struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq); | ||
5 | |||
6 | /* wrap regular wait_queue_t head */ | ||
7 | struct __prio_wait_queue { | ||
8 | wait_queue_t wq; | ||
9 | |||
10 | /* some priority point */ | ||
11 | lt_t priority; | ||
12 | /* break ties in priority by lower tie_breaker */ | ||
13 | unsigned int tie_breaker; | ||
14 | }; | ||
15 | |||
16 | typedef struct __prio_wait_queue prio_wait_queue_t; | ||
17 | |||
18 | static inline void init_prio_waitqueue_entry(prio_wait_queue_t *pwq, | ||
19 | struct task_struct* t, | ||
20 | lt_t priority) | ||
21 | { | ||
22 | init_waitqueue_entry(&pwq->wq, t); | ||
23 | pwq->priority = priority; | ||
24 | pwq->tie_breaker = 0; | ||
25 | } | ||
26 | |||
27 | static inline void init_prio_waitqueue_entry_tie(prio_wait_queue_t *pwq, | ||
28 | struct task_struct* t, | ||
29 | lt_t priority, | ||
30 | unsigned int tie_breaker) | ||
31 | { | ||
32 | init_waitqueue_entry(&pwq->wq, t); | ||
33 | pwq->priority = priority; | ||
34 | pwq->tie_breaker = tie_breaker; | ||
35 | } | ||
36 | |||
37 | unsigned int __add_wait_queue_prio_exclusive( | ||
38 | wait_queue_head_t* head, | ||
39 | prio_wait_queue_t *new); | ||
40 | |||
41 | static inline unsigned int add_wait_queue_prio_exclusive( | ||
42 | wait_queue_head_t* head, | ||
43 | prio_wait_queue_t *new) | ||
44 | { | ||
45 | unsigned long flags; | ||
46 | unsigned int passed; | ||
47 | |||
48 | spin_lock_irqsave(&head->lock, flags); | ||
49 | passed = __add_wait_queue_prio_exclusive(head, new); | ||
50 | |||
51 | spin_unlock_irqrestore(&head->lock, flags); | ||
52 | |||
53 | return passed; | ||
54 | } | ||
55 | |||
56 | |||
57 | #endif | ||
diff --git a/include/trace/events/litmus.h b/include/trace/events/litmus.h index 7ba7332a8ebc..474aa129c233 100644 --- a/include/trace/events/litmus.h +++ b/include/trace/events/litmus.h | |||
@@ -85,14 +85,13 @@ TRACE_EVENT(litmus_switch_to, | |||
85 | ), | 85 | ), |
86 | 86 | ||
87 | TP_fast_assign( | 87 | TP_fast_assign( |
88 | __entry->pid = t->pid;//is_realtime(t) ? t->pid : 0; | 88 | __entry->pid = is_realtime(t) ? t->pid : 0; |
89 | __entry->job = t->rt_param.job_params.job_no;//is_realtime(t) ? t->rt_param.job_params.job_no : 0; | 89 | __entry->job = is_realtime(t) ? t->rt_param.job_params.job_no : 0; |
90 | __entry->exec_time = get_exec_time(t); | 90 | __entry->exec_time = get_exec_time(t); |
91 | ), | 91 | ), |
92 | 92 | ||
93 | TP_printk("switch_to(job(%u, %u)): (exec: %Lu)\n", | 93 | TP_printk("switch_to(job(%u, %u)): (exec: %Lu)\n", |
94 | __entry->pid, __entry->job, | 94 | __entry->pid, __entry->job, __entry->exec_time) |
95 | __entry->exec_time) | ||
96 | ); | 95 | ); |
97 | 96 | ||
98 | /* | 97 | /* |
@@ -111,14 +110,13 @@ TRACE_EVENT(litmus_switch_away, | |||
111 | ), | 110 | ), |
112 | 111 | ||
113 | TP_fast_assign( | 112 | TP_fast_assign( |
114 | __entry->pid = t->pid;//is_realtime(t) ? t->pid : 0; | 113 | __entry->pid = is_realtime(t) ? t->pid : 0; |
115 | __entry->job = t->rt_param.job_params.job_no;//is_realtime(t) ? t->rt_param.job_params.job_no : 0; | 114 | __entry->job = is_realtime(t) ? t->rt_param.job_params.job_no : 0; |
116 | __entry->exec_time = get_exec_time(t); | 115 | __entry->exec_time = get_exec_time(t); |
117 | ), | 116 | ), |
118 | 117 | ||
119 | TP_printk("switch_away(job(%u, %u)): (exec: %Lu)\n", | 118 | TP_printk("switch_away(job(%u, %u)): (exec: %Lu)\n", |
120 | __entry->pid, __entry->job, | 119 | __entry->pid, __entry->job, __entry->exec_time) |
121 | __entry->exec_time) | ||
122 | ); | 120 | ); |
123 | 121 | ||
124 | /* | 122 | /* |
@@ -143,8 +141,7 @@ TRACE_EVENT(litmus_task_completion, | |||
143 | ), | 141 | ), |
144 | 142 | ||
145 | TP_printk("completed(job(%u, %u)): (forced: %lu)\n", | 143 | TP_printk("completed(job(%u, %u)): (forced: %lu)\n", |
146 | __entry->pid, __entry->job, | 144 | __entry->pid, __entry->job, __entry->forced) |
147 | __entry->forced) | ||
148 | ); | 145 | ); |
149 | 146 | ||
150 | /* | 147 | /* |
@@ -166,8 +163,7 @@ TRACE_EVENT(litmus_task_block, | |||
166 | __entry->lid = lid; | 163 | __entry->lid = lid; |
167 | ), | 164 | ), |
168 | 165 | ||
169 | TP_printk("(%u) blocks on %d\n", __entry->pid, | 166 | TP_printk("(%u) blocks on %d\n", __entry->pid, __entry->lid) |
170 | __entry->lid) | ||
171 | ); | 167 | ); |
172 | 168 | ||
173 | /* | 169 | /* |
@@ -189,8 +185,7 @@ TRACE_EVENT(litmus_resource_acquire, | |||
189 | __entry->lid = lid; | 185 | __entry->lid = lid; |
190 | ), | 186 | ), |
191 | 187 | ||
192 | TP_printk("(%u) acquires %d\n", __entry->pid, | 188 | TP_printk("(%u) acquires %d\n", __entry->pid, __entry->lid) |
193 | __entry->lid) | ||
194 | ); | 189 | ); |
195 | 190 | ||
196 | TRACE_EVENT(litmus_resource_release, | 191 | TRACE_EVENT(litmus_resource_release, |
@@ -215,26 +210,26 @@ TRACE_EVENT(litmus_resource_release, | |||
215 | 210 | ||
216 | TRACE_EVENT(litmus_priority_donate, | 211 | TRACE_EVENT(litmus_priority_donate, |
217 | 212 | ||
218 | TP_PROTO(struct task_struct *t, struct task_struct *donor, int lid), | 213 | TP_PROTO(struct task_struct *t, struct task_struct *donor, int lid), |
219 | 214 | ||
220 | TP_ARGS(t, donor, lid), | 215 | TP_ARGS(t, donor, lid), |
221 | 216 | ||
222 | TP_STRUCT__entry( | 217 | TP_STRUCT__entry( |
223 | __field( pid_t, t_pid ) | 218 | __field( pid_t, t_pid ) |
224 | __field( pid_t, d_pid ) | 219 | __field( pid_t, d_pid ) |
225 | __field( unsigned long long, prio) | 220 | __field( unsigned long long, prio) |
226 | __field( int, lid ) | 221 | __field( int, lid ) |
227 | ), | 222 | ), |
228 | 223 | ||
229 | TP_fast_assign( | 224 | TP_fast_assign( |
230 | __entry->t_pid = t ? t->pid : 0; | 225 | __entry->t_pid = t ? t->pid : 0; |
231 | __entry->d_pid = donor ? donor->pid : 0; | 226 | __entry->d_pid = donor ? donor->pid : 0; |
232 | __entry->prio = get_deadline(donor); | 227 | __entry->prio = get_deadline(donor); |
233 | __entry->lid = lid; | 228 | __entry->lid = lid; |
234 | ), | 229 | ), |
235 | 230 | ||
236 | TP_printk("(%u) inherits %llu from (%u) on %d\n", __entry->t_pid, | 231 | TP_printk("(%u) inherits %llu from (%u) on %d\n", __entry->t_pid, |
237 | __entry->d_pid, __entry->prio, __entry->lid) | 232 | __entry->d_pid, __entry->prio, __entry->lid) |
238 | ); | 233 | ); |
239 | 234 | ||
240 | /* | 235 | /* |
@@ -259,8 +254,7 @@ TRACE_EVENT(litmus_task_resume, | |||
259 | ), | 254 | ), |
260 | 255 | ||
261 | TP_printk("resume(job(%u, %u)) on %d\n", | 256 | TP_printk("resume(job(%u, %u)) on %d\n", |
262 | __entry->pid, __entry->job, | 257 | __entry->pid, __entry->job, __entry->lid) |
263 | __entry->lid) | ||
264 | ); | 258 | ); |
265 | 259 | ||
266 | /* | 260 | /* |
@@ -284,7 +278,6 @@ TRACE_EVENT(litmus_sys_release, | |||
284 | ); | 278 | ); |
285 | 279 | ||
286 | /* | 280 | /* |
287 | ======= | ||
288 | * Trace task exit | 281 | * Trace task exit |
289 | */ | 282 | */ |
290 | TRACE_EVENT(litmus_task_exit, | 283 | TRACE_EVENT(litmus_task_exit, |
@@ -300,9 +293,7 @@ TRACE_EVENT(litmus_task_exit, | |||
300 | 293 | ||
301 | TP_fast_assign( | 294 | TP_fast_assign( |
302 | __entry->pid = t ? t->pid : 0; | 295 | __entry->pid = t ? t->pid : 0; |
303 | #ifdef CONFIG_PLUGIN_COLOR | ||
304 | __entry->max_exec_time = t ? t->rt_param.max_exec_time : 0; | 296 | __entry->max_exec_time = t ? t->rt_param.max_exec_time : 0; |
305 | #endif | ||
306 | ), | 297 | ), |
307 | 298 | ||
308 | TP_printk("(%u) exit\n", __entry->pid) | 299 | TP_printk("(%u) exit\n", __entry->pid) |
@@ -356,10 +347,9 @@ TRACE_EVENT(litmus_server_param, | |||
356 | 347 | ||
357 | TRACE_EVENT(litmus_server_switch_to, | 348 | TRACE_EVENT(litmus_server_switch_to, |
358 | 349 | ||
359 | TP_PROTO(int sid, unsigned int job, int tid, unsigned int tjob), | 350 | TP_PROTO(int sid, unsigned int job, int tid, unsigned int tjob), |
360 | |||
361 | TP_ARGS(sid, job, tid, tjob), | ||
362 | 351 | ||
352 | TP_ARGS(sid, job, tid, tjob), | ||
363 | 353 | ||
364 | TP_STRUCT__entry( | 354 | TP_STRUCT__entry( |
365 | __field( int, sid) | 355 | __field( int, sid) |
@@ -375,14 +365,15 @@ TRACE_EVENT(litmus_server_switch_to, | |||
375 | __entry->tjob = tjob; | 365 | __entry->tjob = tjob; |
376 | ), | 366 | ), |
377 | 367 | ||
378 | TP_printk("switch_to(server(%d, %u)): (%d, %d)\n", __entry->sid, __entry->job, __entry->tid, __entry->tjob) | 368 | TP_printk("switch_to(server(%d, %u)): (%d, %d)\n", |
369 | __entry->sid, __entry->job, __entry->tid, __entry->tjob) | ||
379 | ); | 370 | ); |
380 | 371 | ||
381 | TRACE_EVENT(litmus_server_switch_away, | 372 | TRACE_EVENT(litmus_server_switch_away, |
382 | 373 | ||
383 | TP_PROTO(int sid, unsigned int job, int tid, unsigned int tjob), | 374 | TP_PROTO(int sid, unsigned int job, int tid, unsigned int tjob), |
384 | 375 | ||
385 | TP_ARGS(sid, job, tid, tjob), | 376 | TP_ARGS(sid, job, tid, tjob), |
386 | 377 | ||
387 | TP_STRUCT__entry( | 378 | TP_STRUCT__entry( |
388 | __field( int, sid) | 379 | __field( int, sid) |
@@ -398,7 +389,8 @@ TRACE_EVENT(litmus_server_switch_away, | |||
398 | __entry->tjob = tjob; | 389 | __entry->tjob = tjob; |
399 | ), | 390 | ), |
400 | 391 | ||
401 | TP_printk("switch_away(server(%d, %u)): (%d, %d)\n", __entry->sid, __entry->job, __entry->tid, __entry->tjob) | 392 | TP_printk("switch_away(server(%d, %u)): (%d, %d)\n", |
393 | __entry->sid, __entry->job, __entry->tid, __entry->tjob) | ||
402 | ); | 394 | ); |
403 | 395 | ||
404 | TRACE_EVENT(litmus_server_release, | 396 | TRACE_EVENT(litmus_server_release, |
diff --git a/kernel/sched.c b/kernel/sched.c index 4f76f883f831..2739b3339ffb 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -3163,16 +3163,26 @@ static inline void post_schedule(struct rq *rq) | |||
3163 | asmlinkage void schedule_tail(struct task_struct *prev) | 3163 | asmlinkage void schedule_tail(struct task_struct *prev) |
3164 | __releases(rq->lock) | 3164 | __releases(rq->lock) |
3165 | { | 3165 | { |
3166 | struct rq *rq = this_rq(); | 3166 | struct rq *rq; |
3167 | 3167 | ||
3168 | preempt_disable(); | ||
3169 | |||
3170 | rq = this_rq(); | ||
3168 | finish_task_switch(rq, prev); | 3171 | finish_task_switch(rq, prev); |
3169 | 3172 | ||
3173 | sched_trace_task_switch_to(current); | ||
3174 | |||
3170 | /* | 3175 | /* |
3171 | * FIXME: do we need to worry about rq being invalidated by the | 3176 | * FIXME: do we need to worry about rq being invalidated by the |
3172 | * task_switch? | 3177 | * task_switch? |
3173 | */ | 3178 | */ |
3174 | post_schedule(rq); | 3179 | post_schedule(rq); |
3175 | 3180 | ||
3181 | if (sched_state_validate_switch()) | ||
3182 | litmus_reschedule_local(); | ||
3183 | |||
3184 | preempt_enable(); | ||
3185 | |||
3176 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW | 3186 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW |
3177 | /* In this case, finish_task_switch does not reenable preemption */ | 3187 | /* In this case, finish_task_switch does not reenable preemption */ |
3178 | preempt_enable(); | 3188 | preempt_enable(); |
diff --git a/litmus/Kconfig b/litmus/Kconfig index 48d6f28c6e4a..91bf81ea9fae 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig | |||
@@ -142,6 +142,52 @@ config SCHED_CPU_AFFINITY | |||
142 | 142 | ||
143 | Say Yes if unsure. | 143 | Say Yes if unsure. |
144 | 144 | ||
145 | choice | ||
146 | prompt "EDF Tie-Break Behavior" | ||
147 | default EDF_TIE_BREAK_LATENESS_NORM | ||
148 | help | ||
149 | Allows the configuration of tie-breaking behavior when the deadlines | ||
150 | of two EDF-scheduled tasks are equal. | ||
151 | |||
152 | config EDF_TIE_BREAK_LATENESS | ||
153 | bool "Lateness-based Tie Break" | ||
154 | help | ||
155 | Break ties between two jobs, A and B, based upon the lateness of their | ||
156 | prior jobs. The job with the greatest lateness has priority. Note that | ||
157 | lateness has a negative value if the prior job finished before its | ||
158 | deadline. | ||
159 | |||
160 | config EDF_TIE_BREAK_LATENESS_NORM | ||
161 | bool "Normalized Lateness-based Tie Break" | ||
162 | help | ||
163 | Break ties between two jobs, A and B, based upon the lateness, normalized | ||
164 | by relative deadline, of their prior jobs. The job with the greatest | ||
165 | normalized lateness has priority. Note that lateness has a negative value | ||
166 | if the prior job finished before its deadline. | ||
167 | |||
168 | Normalized lateness tie-breaks are likely desireable over non-normalized | ||
169 | tie-breaks if the execution times and/or relative deadlines of tasks in a | ||
170 | task set vary greatly. | ||
171 | |||
172 | config EDF_TIE_BREAK_HASH | ||
173 | bool "Hash-based Tie Breaks" | ||
174 | help | ||
175 | Break ties between two jobs, A and B, with equal deadlines by using a | ||
176 | uniform hash; i.e.: hash(A.pid, A.job_num) < hash(B.pid, B.job_num). Job | ||
177 | A has ~50% of winning a given tie-break. | ||
178 | |||
179 | config EDF_PID_TIE_BREAK | ||
180 | bool "PID-based Tie Breaks" | ||
181 | help | ||
182 | Break ties based upon OS-assigned thread IDs. Use this option if | ||
183 | required by algorithm's real-time analysis or per-task response-time | ||
184 | jitter must be minimized. | ||
185 | |||
186 | NOTES: | ||
187 | * This tie-breaking method was default in Litmus 2012.2 and before. | ||
188 | |||
189 | endchoice | ||
190 | |||
145 | endmenu | 191 | endmenu |
146 | 192 | ||
147 | menu "Tracing" | 193 | menu "Tracing" |
diff --git a/litmus/Makefile b/litmus/Makefile index cea8a2572bd3..76a07e8531c6 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | obj-y = sched_plugin.o litmus.o \ | 5 | obj-y = sched_plugin.o litmus.o \ |
6 | bheap.o \ | 6 | bheap.o \ |
7 | binheap.o \ | ||
7 | budget.o \ | 8 | budget.o \ |
8 | clustered.o \ | 9 | clustered.o \ |
9 | color.o \ | 10 | color.o \ |
@@ -15,6 +16,7 @@ obj-y = sched_plugin.o litmus.o \ | |||
15 | edf_common.o \ | 16 | edf_common.o \ |
16 | fdso.o \ | 17 | fdso.o \ |
17 | fifo_common.o \ | 18 | fifo_common.o \ |
19 | fp_common.o \ | ||
18 | jobs.o \ | 20 | jobs.o \ |
19 | litmus_proc.o \ | 21 | litmus_proc.o \ |
20 | locking.o \ | 22 | locking.o \ |
@@ -23,6 +25,7 @@ obj-y = sched_plugin.o litmus.o \ | |||
23 | rt_domain.o \ | 25 | rt_domain.o \ |
24 | rt_server.o \ | 26 | rt_server.o \ |
25 | sched_gsn_edf.o \ | 27 | sched_gsn_edf.o \ |
28 | sched_pfp.o \ | ||
26 | sched_psn_edf.o \ | 29 | sched_psn_edf.o \ |
27 | srp.o \ | 30 | srp.o \ |
28 | sync.o | 31 | sync.o |
diff --git a/litmus/binheap.c b/litmus/binheap.c new file mode 100644 index 000000000000..40a913f4b5a7 --- /dev/null +++ b/litmus/binheap.c | |||
@@ -0,0 +1,388 @@ | |||
1 | #include <litmus/binheap.h> | ||
2 | |||
3 | /* Returns true of the root ancestor of node is the root of the given heap. */ | ||
4 | int binheap_is_in_this_heap(struct binheap_node *node, | ||
5 | struct binheap* heap) | ||
6 | { | ||
7 | if(!binheap_is_in_heap(node)) { | ||
8 | return 0; | ||
9 | } | ||
10 | |||
11 | while(node->parent != NULL) { | ||
12 | node = node->parent; | ||
13 | } | ||
14 | |||
15 | return (node == heap->root); | ||
16 | } | ||
17 | |||
18 | |||
19 | /* Update the node reference pointers. Same logic as Litmus binomial heap. */ | ||
20 | static void __update_ref(struct binheap_node *parent, | ||
21 | struct binheap_node *child) | ||
22 | { | ||
23 | *(parent->ref_ptr) = child; | ||
24 | *(child->ref_ptr) = parent; | ||
25 | |||
26 | swap(parent->ref_ptr, child->ref_ptr); | ||
27 | } | ||
28 | |||
29 | |||
30 | /* Swaps data between two nodes. */ | ||
31 | static void __binheap_swap(struct binheap_node *parent, | ||
32 | struct binheap_node *child) | ||
33 | { | ||
34 | swap(parent->data, child->data); | ||
35 | __update_ref(parent, child); | ||
36 | } | ||
37 | |||
38 | |||
39 | /* Swaps memory and data between two nodes. Actual nodes swap instead of | ||
40 | * just data. Needed when we delete nodes from the heap. | ||
41 | */ | ||
42 | static void __binheap_swap_safe(struct binheap *handle, | ||
43 | struct binheap_node *a, | ||
44 | struct binheap_node *b) | ||
45 | { | ||
46 | swap(a->data, b->data); | ||
47 | __update_ref(a, b); | ||
48 | |||
49 | if((a->parent != NULL) && (a->parent == b->parent)) { | ||
50 | /* special case: shared parent */ | ||
51 | swap(a->parent->left, a->parent->right); | ||
52 | } | ||
53 | else { | ||
54 | /* Update pointers to swap parents. */ | ||
55 | |||
56 | if(a->parent) { | ||
57 | if(a == a->parent->left) { | ||
58 | a->parent->left = b; | ||
59 | } | ||
60 | else { | ||
61 | a->parent->right = b; | ||
62 | } | ||
63 | } | ||
64 | |||
65 | if(b->parent) { | ||
66 | if(b == b->parent->left) { | ||
67 | b->parent->left = a; | ||
68 | } | ||
69 | else { | ||
70 | b->parent->right = a; | ||
71 | } | ||
72 | } | ||
73 | |||
74 | swap(a->parent, b->parent); | ||
75 | } | ||
76 | |||
77 | /* swap children */ | ||
78 | |||
79 | if(a->left) { | ||
80 | a->left->parent = b; | ||
81 | |||
82 | if(a->right) { | ||
83 | a->right->parent = b; | ||
84 | } | ||
85 | } | ||
86 | |||
87 | if(b->left) { | ||
88 | b->left->parent = a; | ||
89 | |||
90 | if(b->right) { | ||
91 | b->right->parent = a; | ||
92 | } | ||
93 | } | ||
94 | |||
95 | swap(a->left, b->left); | ||
96 | swap(a->right, b->right); | ||
97 | |||
98 | |||
99 | /* update next/last/root pointers */ | ||
100 | |||
101 | if(a == handle->next) { | ||
102 | handle->next = b; | ||
103 | } | ||
104 | else if(b == handle->next) { | ||
105 | handle->next = a; | ||
106 | } | ||
107 | |||
108 | if(a == handle->last) { | ||
109 | handle->last = b; | ||
110 | } | ||
111 | else if(b == handle->last) { | ||
112 | handle->last = a; | ||
113 | } | ||
114 | |||
115 | if(a == handle->root) { | ||
116 | handle->root = b; | ||
117 | } | ||
118 | else if(b == handle->root) { | ||
119 | handle->root = a; | ||
120 | } | ||
121 | } | ||
122 | |||
123 | |||
124 | /** | ||
125 | * Update the pointer to the last node in the complete binary tree. | ||
126 | * Called internally after the root node has been deleted. | ||
127 | */ | ||
128 | static void __binheap_update_last(struct binheap *handle) | ||
129 | { | ||
130 | struct binheap_node *temp = handle->last; | ||
131 | |||
132 | /* find a "bend" in the tree. */ | ||
133 | while(temp->parent && (temp == temp->parent->left)) { | ||
134 | temp = temp->parent; | ||
135 | } | ||
136 | |||
137 | /* step over to sibling if we're not at root */ | ||
138 | if(temp->parent != NULL) { | ||
139 | temp = temp->parent->left; | ||
140 | } | ||
141 | |||
142 | /* now travel right as far as possible. */ | ||
143 | while(temp->right != NULL) { | ||
144 | temp = temp->right; | ||
145 | } | ||
146 | |||
147 | /* take one step to the left if we're not at the bottom-most level. */ | ||
148 | if(temp->left != NULL) { | ||
149 | temp = temp->left; | ||
150 | } | ||
151 | |||
152 | handle->last = temp; | ||
153 | } | ||
154 | |||
155 | |||
156 | /** | ||
157 | * Update the pointer to the node that will take the next inserted node. | ||
158 | * Called internally after a node has been inserted. | ||
159 | */ | ||
160 | static void __binheap_update_next(struct binheap *handle) | ||
161 | { | ||
162 | struct binheap_node *temp = handle->next; | ||
163 | |||
164 | /* find a "bend" in the tree. */ | ||
165 | while(temp->parent && (temp == temp->parent->right)) { | ||
166 | temp = temp->parent; | ||
167 | } | ||
168 | |||
169 | /* step over to sibling if we're not at root */ | ||
170 | if(temp->parent != NULL) { | ||
171 | temp = temp->parent->right; | ||
172 | } | ||
173 | |||
174 | /* now travel left as far as possible. */ | ||
175 | while(temp->left != NULL) { | ||
176 | temp = temp->left; | ||
177 | } | ||
178 | |||
179 | handle->next = temp; | ||
180 | } | ||
181 | |||
182 | |||
183 | |||
184 | /* bubble node up towards root */ | ||
185 | static void __binheap_bubble_up(struct binheap *handle, | ||
186 | struct binheap_node *node) | ||
187 | { | ||
188 | /* let BINHEAP_POISON data bubble to the top */ | ||
189 | |||
190 | while((node->parent != NULL) && | ||
191 | ((node->data == BINHEAP_POISON) || | ||
192 | handle->compare(node, node->parent))) { | ||
193 | __binheap_swap(node->parent, node); | ||
194 | node = node->parent; | ||
195 | } | ||
196 | } | ||
197 | |||
198 | |||
199 | /* bubble node down, swapping with min-child */ | ||
200 | static void __binheap_bubble_down(struct binheap *handle) | ||
201 | { | ||
202 | struct binheap_node *node = handle->root; | ||
203 | |||
204 | while(node->left != NULL) { | ||
205 | if(node->right && handle->compare(node->right, node->left)) { | ||
206 | if(handle->compare(node->right, node)) { | ||
207 | __binheap_swap(node, node->right); | ||
208 | node = node->right; | ||
209 | } | ||
210 | else { | ||
211 | break; | ||
212 | } | ||
213 | } | ||
214 | else { | ||
215 | if(handle->compare(node->left, node)) { | ||
216 | __binheap_swap(node, node->left); | ||
217 | node = node->left; | ||
218 | } | ||
219 | else { | ||
220 | break; | ||
221 | } | ||
222 | } | ||
223 | } | ||
224 | } | ||
225 | |||
226 | |||
227 | void __binheap_add(struct binheap_node *new_node, | ||
228 | struct binheap *handle, | ||
229 | void *data) | ||
230 | { | ||
231 | new_node->data = data; | ||
232 | new_node->ref = new_node; | ||
233 | new_node->ref_ptr = &(new_node->ref); | ||
234 | |||
235 | if(!binheap_empty(handle)) { | ||
236 | /* insert left side first */ | ||
237 | if(handle->next->left == NULL) { | ||
238 | handle->next->left = new_node; | ||
239 | new_node->parent = handle->next; | ||
240 | new_node->left = NULL; | ||
241 | new_node->right = NULL; | ||
242 | |||
243 | handle->last = new_node; | ||
244 | |||
245 | __binheap_bubble_up(handle, new_node); | ||
246 | } | ||
247 | else { | ||
248 | /* left occupied. insert right. */ | ||
249 | handle->next->right = new_node; | ||
250 | new_node->parent = handle->next; | ||
251 | new_node->left = NULL; | ||
252 | new_node->right = NULL; | ||
253 | |||
254 | handle->last = new_node; | ||
255 | |||
256 | __binheap_update_next(handle); | ||
257 | __binheap_bubble_up(handle, new_node); | ||
258 | } | ||
259 | } | ||
260 | else { | ||
261 | /* first node in heap */ | ||
262 | |||
263 | new_node->parent = NULL; | ||
264 | new_node->left = NULL; | ||
265 | new_node->right = NULL; | ||
266 | |||
267 | handle->root = new_node; | ||
268 | handle->next = new_node; | ||
269 | handle->last = new_node; | ||
270 | } | ||
271 | } | ||
272 | |||
273 | |||
274 | /** | ||
275 | * Removes the root node from the heap. The node is removed after coalescing | ||
276 | * the binheap_node with its original data pointer at the root of the tree. | ||
277 | * | ||
278 | * The 'last' node in the tree is then swapped up to the root and bubbled | ||
279 | * down. | ||
280 | */ | ||
281 | void __binheap_delete_root(struct binheap *handle, | ||
282 | struct binheap_node *container) | ||
283 | { | ||
284 | struct binheap_node *root = handle->root; | ||
285 | |||
286 | if(root != container) { | ||
287 | /* coalesce */ | ||
288 | __binheap_swap_safe(handle, root, container); | ||
289 | root = container; | ||
290 | } | ||
291 | |||
292 | if(handle->last != root) { | ||
293 | /* swap 'last' node up to root and bubble it down. */ | ||
294 | |||
295 | struct binheap_node *to_move = handle->last; | ||
296 | |||
297 | if(to_move->parent != root) { | ||
298 | handle->next = to_move->parent; | ||
299 | |||
300 | if(handle->next->right == to_move) { | ||
301 | /* disconnect from parent */ | ||
302 | to_move->parent->right = NULL; | ||
303 | handle->last = handle->next->left; | ||
304 | } | ||
305 | else { | ||
306 | /* find new 'last' before we disconnect */ | ||
307 | __binheap_update_last(handle); | ||
308 | |||
309 | /* disconnect from parent */ | ||
310 | to_move->parent->left = NULL; | ||
311 | } | ||
312 | } | ||
313 | else { | ||
314 | /* 'last' is direct child of root */ | ||
315 | |||
316 | handle->next = to_move; | ||
317 | |||
318 | if(to_move == to_move->parent->right) { | ||
319 | to_move->parent->right = NULL; | ||
320 | handle->last = to_move->parent->left; | ||
321 | } | ||
322 | else { | ||
323 | to_move->parent->left = NULL; | ||
324 | handle->last = to_move; | ||
325 | } | ||
326 | } | ||
327 | to_move->parent = NULL; | ||
328 | |||
329 | /* reconnect as root. We can't just swap data ptrs since root node | ||
330 | * may be freed after this function returns. | ||
331 | */ | ||
332 | to_move->left = root->left; | ||
333 | to_move->right = root->right; | ||
334 | if(to_move->left != NULL) { | ||
335 | to_move->left->parent = to_move; | ||
336 | } | ||
337 | if(to_move->right != NULL) { | ||
338 | to_move->right->parent = to_move; | ||
339 | } | ||
340 | |||
341 | handle->root = to_move; | ||
342 | |||
343 | /* bubble down */ | ||
344 | __binheap_bubble_down(handle); | ||
345 | } | ||
346 | else { | ||
347 | /* removing last node in tree */ | ||
348 | handle->root = NULL; | ||
349 | handle->next = NULL; | ||
350 | handle->last = NULL; | ||
351 | } | ||
352 | |||
353 | /* mark as removed */ | ||
354 | container->parent = BINHEAP_POISON; | ||
355 | } | ||
356 | |||
357 | |||
358 | /** | ||
359 | * Delete an arbitrary node. Bubble node to delete up to the root, | ||
360 | * and then delete to root. | ||
361 | */ | ||
362 | void __binheap_delete(struct binheap_node *node_to_delete, | ||
363 | struct binheap *handle) | ||
364 | { | ||
365 | struct binheap_node *target = node_to_delete->ref; | ||
366 | void *temp_data = target->data; | ||
367 | |||
368 | /* temporarily set data to null to allow node to bubble up to the top. */ | ||
369 | target->data = BINHEAP_POISON; | ||
370 | |||
371 | __binheap_bubble_up(handle, target); | ||
372 | __binheap_delete_root(handle, node_to_delete); | ||
373 | |||
374 | node_to_delete->data = temp_data; /* restore node data pointer */ | ||
375 | } | ||
376 | |||
377 | |||
378 | /** | ||
379 | * Bubble up a node whose pointer has decreased in value. | ||
380 | */ | ||
381 | void __binheap_decrease(struct binheap_node *orig_node, | ||
382 | struct binheap *handle) | ||
383 | { | ||
384 | struct binheap_node *target = orig_node->ref; | ||
385 | |||
386 | __binheap_bubble_up(handle, target); | ||
387 | } | ||
388 | |||
diff --git a/litmus/budget.c b/litmus/budget.c index d63e484ba160..f7505b0f86e5 100644 --- a/litmus/budget.c +++ b/litmus/budget.c | |||
@@ -120,12 +120,11 @@ void task_release(struct task_struct *t) | |||
120 | 120 | ||
121 | void server_release(struct task_struct *t) | 121 | void server_release(struct task_struct *t) |
122 | { | 122 | { |
123 | /* TODO: so wrong with respect to time accounting */ | ||
124 | lt_t now = litmus_clock(); | ||
125 | t->rt_param.job_params.exec_time = 0; | 123 | t->rt_param.job_params.exec_time = 0; |
126 | t->rt_param.job_params.release = t->rt_param.job_params.deadline; | 124 | t->rt_param.job_params.release = t->rt_param.job_params.deadline; |
127 | t->rt_param.job_params.deadline += get_rt_period(t); | 125 | t->rt_param.job_params.deadline += get_rt_period(t); |
128 | t->rt_param.job_params.fake_job_no++; | 126 | t->rt_param.job_params.fake_job_no++; |
127 | |||
129 | /* don't confuse linux */ | 128 | /* don't confuse linux */ |
130 | t->rt.time_slice = 1; | 129 | t->rt.time_slice = 1; |
131 | 130 | ||
diff --git a/litmus/ctrldev.c b/litmus/ctrldev.c index 6677a67cc945..9969ab17c190 100644 --- a/litmus/ctrldev.c +++ b/litmus/ctrldev.c | |||
@@ -30,27 +30,19 @@ static int alloc_ctrl_page(struct task_struct *t) | |||
30 | static int map_ctrl_page(struct task_struct *t, struct vm_area_struct* vma) | 30 | static int map_ctrl_page(struct task_struct *t, struct vm_area_struct* vma) |
31 | { | 31 | { |
32 | int err; | 32 | int err; |
33 | unsigned long pfn; | ||
34 | 33 | ||
35 | struct page* ctrl = virt_to_page(tsk_rt(t)->ctrl_page); | 34 | struct page* ctrl = virt_to_page(tsk_rt(t)->ctrl_page); |
36 | 35 | ||
37 | /* Increase ref count. Is decreased when vma is destroyed. */ | ||
38 | get_page(ctrl); | ||
39 | |||
40 | /* compute page frame number */ | ||
41 | pfn = page_to_pfn(ctrl); | ||
42 | |||
43 | TRACE_CUR(CTRL_NAME | 36 | TRACE_CUR(CTRL_NAME |
44 | ": mapping %p (pfn:%lx, %lx) to 0x%lx (prot:%lx)\n", | 37 | ": mapping %p (pfn:%lx) to 0x%lx (prot:%lx)\n", |
45 | tsk_rt(t)->ctrl_page, pfn, page_to_pfn(ctrl), vma->vm_start, | 38 | tsk_rt(t)->ctrl_page,page_to_pfn(ctrl), vma->vm_start, |
46 | vma->vm_page_prot); | 39 | vma->vm_page_prot); |
47 | 40 | ||
48 | /* Map it into the vma. Make sure to use PAGE_SHARED, otherwise | 41 | /* Map it into the vma. */ |
49 | * userspace actually gets a copy-on-write page. */ | 42 | err = vm_insert_page(vma, vma->vm_start, ctrl); |
50 | err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, PAGE_SHARED); | ||
51 | 43 | ||
52 | if (err) | 44 | if (err) |
53 | TRACE_CUR(CTRL_NAME ": remap_pfn_range() failed (%d)\n", err); | 45 | TRACE_CUR(CTRL_NAME ": vm_insert_page() failed (%d)\n", err); |
54 | 46 | ||
55 | return err; | 47 | return err; |
56 | } | 48 | } |
@@ -63,19 +55,19 @@ static void litmus_ctrl_vm_close(struct vm_area_struct* vma) | |||
63 | TRACE_CUR(CTRL_NAME | 55 | TRACE_CUR(CTRL_NAME |
64 | ": %p:%p vma:%p vma->vm_private_data:%p closed.\n", | 56 | ": %p:%p vma:%p vma->vm_private_data:%p closed.\n", |
65 | (void*) vma->vm_start, (void*) vma->vm_end, vma, | 57 | (void*) vma->vm_start, (void*) vma->vm_end, vma, |
66 | vma->vm_private_data, current->comm, | 58 | vma->vm_private_data); |
67 | current->pid); | ||
68 | } | 59 | } |
69 | 60 | ||
70 | static int litmus_ctrl_vm_fault(struct vm_area_struct* vma, | 61 | static int litmus_ctrl_vm_fault(struct vm_area_struct* vma, |
71 | struct vm_fault* vmf) | 62 | struct vm_fault* vmf) |
72 | { | 63 | { |
73 | /* This function should never be called, since | 64 | TRACE_CUR("%s flags=0x%x (off:%ld)\n", __FUNCTION__, |
74 | * all pages should have been mapped by mmap() | 65 | vma->vm_flags, vmf->pgoff); |
75 | * already. */ | 66 | |
76 | TRACE_CUR("%s flags=0x%x\n", __FUNCTION__, vma->vm_flags); | 67 | /* This function should never be called, since all pages should have |
68 | * been mapped by mmap() already. */ | ||
69 | WARN_ONCE(1, "Page faults should be impossible in the control page\n"); | ||
77 | 70 | ||
78 | /* nope, you only get one page */ | ||
79 | return VM_FAULT_SIGBUS; | 71 | return VM_FAULT_SIGBUS; |
80 | } | 72 | } |
81 | 73 | ||
@@ -103,9 +95,16 @@ static int litmus_ctrl_mmap(struct file* filp, struct vm_area_struct* vma) | |||
103 | return -EINVAL; | 95 | return -EINVAL; |
104 | 96 | ||
105 | vma->vm_ops = &litmus_ctrl_vm_ops; | 97 | vma->vm_ops = &litmus_ctrl_vm_ops; |
106 | /* this mapping should not be kept across forks, | 98 | /* This mapping should not be kept across forks, |
107 | * and cannot be expanded */ | 99 | * cannot be expanded, and is not a "normal" page. */ |
108 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; | 100 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_IO; |
101 | |||
102 | /* We don't want the first write access to trigger a "minor" page fault | ||
103 | * to mark the page as dirty. This is transient, private memory, we | ||
104 | * don't care if it was touched or not. __S011 means RW access, but not | ||
105 | * execute, and avoids copy-on-write behavior. | ||
106 | * See protection_map in mmap.c. */ | ||
107 | vma->vm_page_prot = __S011; | ||
109 | 108 | ||
110 | err = alloc_ctrl_page(current); | 109 | err = alloc_ctrl_page(current); |
111 | if (!err) | 110 | if (!err) |
diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 9b44dc2d8d1e..5aca2934a7b5 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c | |||
@@ -14,6 +14,32 @@ | |||
14 | 14 | ||
15 | #include <litmus/edf_common.h> | 15 | #include <litmus/edf_common.h> |
16 | 16 | ||
17 | #ifdef CONFIG_EDF_TIE_BREAK_LATENESS_NORM | ||
18 | #include <litmus/fpmath.h> | ||
19 | #endif | ||
20 | |||
21 | #ifdef CONFIG_EDF_TIE_BREAK_HASH | ||
22 | #include <linux/hash.h> | ||
23 | static inline long edf_hash(struct task_struct *t) | ||
24 | { | ||
25 | /* pid is 32 bits, so normally we would shove that into the | ||
26 | * upper 32-bits and and put the job number in the bottom | ||
27 | * and hash the 64-bit number with hash_64(). Sadly, | ||
28 | * in testing, hash_64() doesn't distribute keys were the | ||
29 | * upper bits are close together (as would be the case with | ||
30 | * pids) and job numbers are equal (as would be the case with | ||
31 | * synchronous task sets with all relative deadlines equal). | ||
32 | * | ||
33 | * A 2006 Linux patch proposed the following solution | ||
34 | * (but for some reason it wasn't accepted...). | ||
35 | * | ||
36 | * At least this workaround works for 32-bit systems as well. | ||
37 | */ | ||
38 | return hash_32(hash_32((u32)tsk_rt(t)->job_params.job_no, 32) ^ t->pid, 32); | ||
39 | } | ||
40 | #endif | ||
41 | |||
42 | |||
17 | /* edf_higher_prio - returns true if first has a higher EDF priority | 43 | /* edf_higher_prio - returns true if first has a higher EDF priority |
18 | * than second. Deadline ties are broken by PID. | 44 | * than second. Deadline ties are broken by PID. |
19 | * | 45 | * |
@@ -63,25 +89,81 @@ int edf_higher_prio(struct task_struct* first, | |||
63 | 89 | ||
64 | #endif | 90 | #endif |
65 | 91 | ||
92 | if (earlier_deadline(first_task, second_task)) { | ||
93 | return 1; | ||
94 | } | ||
95 | else if (get_deadline(first_task) == get_deadline(second_task)) { | ||
96 | /* Need to tie break. All methods must set pid_break to 0/1 if | ||
97 | * first_task does not have priority over second_task. | ||
98 | */ | ||
99 | int pid_break; | ||
66 | 100 | ||
67 | return !is_realtime(second_task) || | ||
68 | 101 | ||
69 | /* is the deadline of the first task earlier? | 102 | #if defined(CONFIG_EDF_TIE_BREAK_LATENESS) |
70 | * Then it has higher priority. | 103 | /* Tie break by lateness. Jobs with greater lateness get |
104 | * priority. This should spread tardiness across all tasks, | ||
105 | * especially in task sets where all tasks have the same | ||
106 | * period and relative deadlines. | ||
71 | */ | 107 | */ |
72 | earlier_deadline(first_task, second_task) || | 108 | if (get_lateness(first_task) > get_lateness(second_task)) { |
73 | 109 | return 1; | |
74 | /* Do we have a deadline tie? | 110 | } |
75 | * Then break by PID. | 111 | pid_break = (get_lateness(first_task) == get_lateness(second_task)); |
112 | |||
113 | |||
114 | #elif defined(CONFIG_EDF_TIE_BREAK_LATENESS_NORM) | ||
115 | /* Tie break by lateness, normalized by relative deadline. Jobs with | ||
116 | * greater normalized lateness get priority. | ||
117 | * | ||
118 | * Note: Considered using the algebraically equivalent | ||
119 | * lateness(first)*relative_deadline(second) > | ||
120 | lateness(second)*relative_deadline(first) | ||
121 | * to avoid fixed-point math, but values are prone to overflow if inputs | ||
122 | * are on the order of several seconds, even in 64-bit. | ||
76 | */ | 123 | */ |
77 | (get_deadline(first_task) == get_deadline(second_task) && | 124 | fp_t fnorm = _frac(get_lateness(first_task), |
78 | (first_task->pid < second_task->pid || | 125 | get_rt_relative_deadline(first_task)); |
126 | fp_t snorm = _frac(get_lateness(second_task), | ||
127 | get_rt_relative_deadline(second_task)); | ||
128 | if (_gt(fnorm, snorm)) { | ||
129 | return 1; | ||
130 | } | ||
131 | pid_break = _eq(fnorm, snorm); | ||
79 | 132 | ||
80 | /* If the PIDs are the same then the task with the inherited | 133 | |
81 | * priority wins. | 134 | #elif defined(CONFIG_EDF_TIE_BREAK_HASH) |
135 | /* Tie break by comparing hashs of (pid, job#) tuple. There should be | ||
136 | * a 50% chance that first_task has a higher priority than second_task. | ||
82 | */ | 137 | */ |
83 | (first_task->pid == second_task->pid && | 138 | long fhash = edf_hash(first_task); |
84 | !second->rt_param.inh_task))); | 139 | long shash = edf_hash(second_task); |
140 | if (fhash < shash) { | ||
141 | return 1; | ||
142 | } | ||
143 | pid_break = (fhash == shash); | ||
144 | #else | ||
145 | |||
146 | |||
147 | /* CONFIG_EDF_PID_TIE_BREAK */ | ||
148 | pid_break = 1; // fall through to tie-break by pid; | ||
149 | #endif | ||
150 | |||
151 | /* Tie break by pid */ | ||
152 | if(pid_break) { | ||
153 | if (first_task->pid < second_task->pid) { | ||
154 | return 1; | ||
155 | } | ||
156 | else if (first_task->pid == second_task->pid) { | ||
157 | /* If the PIDs are the same then the task with the | ||
158 | * inherited priority wins. | ||
159 | */ | ||
160 | if (!second->rt_param.inh_task) { | ||
161 | return 1; | ||
162 | } | ||
163 | } | ||
164 | } | ||
165 | } | ||
166 | return 0; /* fall-through. prio(second_task) > prio(first_task) */ | ||
85 | } | 167 | } |
86 | 168 | ||
87 | int edf_ready_order(struct bheap_node* a, struct bheap_node* b) | 169 | int edf_ready_order(struct bheap_node* a, struct bheap_node* b) |
diff --git a/litmus/fdso.c b/litmus/fdso.c index aa7b384264e3..cd85b9cd9a0a 100644 --- a/litmus/fdso.c +++ b/litmus/fdso.c | |||
@@ -23,6 +23,10 @@ extern struct fdso_ops generic_lock_ops; | |||
23 | static const struct fdso_ops* fdso_ops[] = { | 23 | static const struct fdso_ops* fdso_ops[] = { |
24 | &generic_lock_ops, /* FMLP_SEM */ | 24 | &generic_lock_ops, /* FMLP_SEM */ |
25 | &generic_lock_ops, /* SRP_SEM */ | 25 | &generic_lock_ops, /* SRP_SEM */ |
26 | &generic_lock_ops, /* MPCP_SEM */ | ||
27 | &generic_lock_ops, /* MPCP_VS_SEM */ | ||
28 | &generic_lock_ops, /* DPCP_SEM */ | ||
29 | &generic_lock_ops, /* PCP_SEM */ | ||
26 | }; | 30 | }; |
27 | 31 | ||
28 | static int fdso_create(void** obj_ref, obj_type_t type, void* __user config) | 32 | static int fdso_create(void** obj_ref, obj_type_t type, void* __user config) |
diff --git a/litmus/fp_common.c b/litmus/fp_common.c new file mode 100644 index 000000000000..31fc2db20adf --- /dev/null +++ b/litmus/fp_common.c | |||
@@ -0,0 +1,119 @@ | |||
1 | /* | ||
2 | * litmus/fp_common.c | ||
3 | * | ||
4 | * Common functions for fixed-priority scheduler. | ||
5 | */ | ||
6 | |||
7 | #include <linux/percpu.h> | ||
8 | #include <linux/sched.h> | ||
9 | #include <linux/list.h> | ||
10 | |||
11 | #include <litmus/litmus.h> | ||
12 | #include <litmus/sched_plugin.h> | ||
13 | #include <litmus/sched_trace.h> | ||
14 | |||
15 | #include <litmus/fp_common.h> | ||
16 | |||
17 | /* fp_higher_prio - returns true if first has a higher static priority | ||
18 | * than second. Deadline ties are broken by PID. | ||
19 | * | ||
20 | * both first and second may be NULL | ||
21 | */ | ||
22 | int fp_higher_prio(struct task_struct* first, | ||
23 | struct task_struct* second) | ||
24 | { | ||
25 | struct task_struct *first_task = first; | ||
26 | struct task_struct *second_task = second; | ||
27 | |||
28 | /* There is no point in comparing a task to itself. */ | ||
29 | if (unlikely(first && first == second)) { | ||
30 | TRACE_TASK(first, | ||
31 | "WARNING: pointless FP priority comparison.\n"); | ||
32 | return 0; | ||
33 | } | ||
34 | |||
35 | |||
36 | /* check for NULL tasks */ | ||
37 | if (!first || !second) | ||
38 | return first && !second; | ||
39 | |||
40 | #ifdef CONFIG_LITMUS_LOCKING | ||
41 | |||
42 | /* Check for inherited priorities. Change task | ||
43 | * used for comparison in such a case. | ||
44 | */ | ||
45 | if (unlikely(first->rt_param.inh_task)) | ||
46 | first_task = first->rt_param.inh_task; | ||
47 | if (unlikely(second->rt_param.inh_task)) | ||
48 | second_task = second->rt_param.inh_task; | ||
49 | |||
50 | /* Check for priority boosting. Tie-break by start of boosting. | ||
51 | */ | ||
52 | if (unlikely(is_priority_boosted(first_task))) { | ||
53 | /* first_task is boosted, how about second_task? */ | ||
54 | if (!is_priority_boosted(second_task) || | ||
55 | lt_before(get_boost_start(first_task), | ||
56 | get_boost_start(second_task))) | ||
57 | return 1; | ||
58 | else | ||
59 | return 0; | ||
60 | } else if (unlikely(is_priority_boosted(second_task))) | ||
61 | /* second_task is boosted, first is not*/ | ||
62 | return 0; | ||
63 | |||
64 | #endif | ||
65 | |||
66 | |||
67 | return !is_realtime(second_task) || | ||
68 | |||
69 | get_priority(first_task) < get_priority(second_task) || | ||
70 | |||
71 | /* Break by PID. | ||
72 | */ | ||
73 | (get_priority(first_task) == get_priority(second_task) && | ||
74 | (first_task->pid < second_task->pid || | ||
75 | |||
76 | /* If the PIDs are the same then the task with the inherited | ||
77 | * priority wins. | ||
78 | */ | ||
79 | (first_task->pid == second_task->pid && | ||
80 | !second->rt_param.inh_task))); | ||
81 | } | ||
82 | |||
83 | int fp_ready_order(struct bheap_node* a, struct bheap_node* b) | ||
84 | { | ||
85 | return fp_higher_prio(bheap2task(a), bheap2task(b)); | ||
86 | } | ||
87 | |||
88 | void fp_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | ||
89 | release_jobs_t release) | ||
90 | { | ||
91 | rt_domain_init(rt, fp_ready_order, resched, release); | ||
92 | } | ||
93 | |||
94 | /* need_to_preempt - check whether the task t needs to be preempted | ||
95 | */ | ||
96 | int fp_preemption_needed(struct fp_prio_queue *q, struct task_struct *t) | ||
97 | { | ||
98 | struct task_struct *pending; | ||
99 | |||
100 | pending = fp_prio_peek(q); | ||
101 | |||
102 | if (!pending) | ||
103 | return 0; | ||
104 | if (!t) | ||
105 | return 1; | ||
106 | |||
107 | /* make sure to get non-rt stuff out of the way */ | ||
108 | return !is_realtime(t) || fp_higher_prio(pending, t); | ||
109 | } | ||
110 | |||
111 | void fp_prio_queue_init(struct fp_prio_queue* q) | ||
112 | { | ||
113 | int i; | ||
114 | |||
115 | for (i = 0; i < FP_PRIO_BIT_WORDS; i++) | ||
116 | q->bitmask[i] = 0; | ||
117 | for (i = 0; i < LITMUS_MAX_PRIORITY; i++) | ||
118 | bheap_init(&q->queue[i]); | ||
119 | } | ||
diff --git a/litmus/jobs.c b/litmus/jobs.c index 7263cabf8c6c..bd3175baefae 100644 --- a/litmus/jobs.c +++ b/litmus/jobs.c | |||
@@ -5,21 +5,13 @@ | |||
5 | 5 | ||
6 | #include <litmus/litmus.h> | 6 | #include <litmus/litmus.h> |
7 | #include <litmus/jobs.h> | 7 | #include <litmus/jobs.h> |
8 | #include <litmus/trace.h> | ||
8 | 9 | ||
9 | void prepare_for_next_period(struct task_struct *t) | 10 | static inline void setup_release(struct task_struct *t, lt_t release) |
10 | { | 11 | { |
11 | BUG_ON(!t); | ||
12 | #ifdef CONFIG_PLUGIN_COLOR | ||
13 | tsk_rt(t)->tot_exec_time += tsk_rt(t)->job_params.exec_time; | ||
14 | #endif | ||
15 | /* prepare next release */ | 12 | /* prepare next release */ |
16 | t->rt_param.job_params.release = t->rt_param.job_params.deadline; | 13 | tsk_rt(t)->job_params.release = release; |
17 | t->rt_param.job_params.real_release = t->rt_param.job_params.release; | 14 | tsk_rt(t)->job_params.deadline = release + get_rt_relative_deadline(t); |
18 | t->rt_param.job_params.deadline += get_rt_period(t); | ||
19 | t->rt_param.job_params.real_deadline = t->rt_param.job_params.deadline; | ||
20 | t->rt_param.job_params.exec_time = 0; | ||
21 | tsk_rt(t)->job_params.release = tsk_rt(t)->job_params.deadline; | ||
22 | tsk_rt(t)->job_params.deadline += get_rt_period(t); | ||
23 | tsk_rt(t)->job_params.exec_time = 0; | 15 | tsk_rt(t)->job_params.exec_time = 0; |
24 | 16 | ||
25 | /* update job sequence number */ | 17 | /* update job sequence number */ |
@@ -27,23 +19,59 @@ void prepare_for_next_period(struct task_struct *t) | |||
27 | 19 | ||
28 | /* don't confuse Linux */ | 20 | /* don't confuse Linux */ |
29 | t->rt.time_slice = 1; | 21 | t->rt.time_slice = 1; |
22 | |||
23 | TRACE_TASK(t, "Releasing at %llu, deadline: %llu, period: %llu, now: %llu\n", | ||
24 | release, get_deadline(t), get_rt_period(t), litmus_clock()); | ||
25 | } | ||
26 | |||
27 | void prepare_for_next_period(struct task_struct *t) | ||
28 | { | ||
29 | BUG_ON(!t); | ||
30 | |||
31 | /* Record lateness before we set up the next job's | ||
32 | * release and deadline. Lateness may be negative. | ||
33 | */ | ||
34 | t->rt_param.job_params.lateness = | ||
35 | (long long)litmus_clock() - | ||
36 | (long long)t->rt_param.job_params.deadline; | ||
37 | |||
38 | setup_release(t, get_release(t) + get_rt_period(t)); | ||
30 | } | 39 | } |
31 | 40 | ||
32 | void release_at(struct task_struct *t, lt_t start) | 41 | void release_at(struct task_struct *t, lt_t start) |
33 | { | 42 | { |
34 | tsk_rt(t)->job_params.deadline = start; | 43 | BUG_ON(!t); |
35 | prepare_for_next_period(t); | 44 | setup_release(t, start); |
36 | set_rt_flags(t, RT_F_RUNNING); | 45 | set_rt_flags(t, RT_F_RUNNING); |
37 | } | 46 | } |
38 | 47 | ||
39 | |||
40 | /* | 48 | /* |
41 | * Deactivate current task until the beginning of the next period. | 49 | * Deactivate current task until the beginning of the next period. |
42 | */ | 50 | */ |
43 | long complete_job(void) | 51 | long complete_job(void) |
44 | { | 52 | { |
45 | /* Mark that we do not excute anymore */ | 53 | lt_t amount; |
54 | lt_t now = litmus_clock(); | ||
55 | lt_t exec_time = tsk_rt(current)->job_params.exec_time; | ||
56 | |||
57 | /* Task statistic summaries */ | ||
58 | tsk_rt(current)->tot_exec_time += exec_time; | ||
59 | if (lt_before(tsk_rt(current)->max_exec_time, exec_time)) | ||
60 | tsk_rt(current)->max_exec_time = exec_time; | ||
61 | |||
62 | if (is_tardy(current, now)) { | ||
63 | TRACE_TASK(current, "is tardy, now: %llu, deadline: %llu\n", | ||
64 | now, get_deadline(current)); | ||
65 | amount = now - get_deadline(current); | ||
66 | if (lt_after(amount, tsk_rt(current)->max_tardy)) | ||
67 | tsk_rt(current)->max_tardy = amount; | ||
68 | tsk_rt(current)->total_tardy += amount; | ||
69 | ++tsk_rt(current)->missed; | ||
70 | } | ||
71 | |||
72 | /* Mark that we do not execute anymore */ | ||
46 | set_rt_flags(current, RT_F_SLEEP); | 73 | set_rt_flags(current, RT_F_SLEEP); |
74 | |||
47 | /* call schedule, this will return when a new job arrives | 75 | /* call schedule, this will return when a new job arrives |
48 | * it also takes care of preparing for the next release | 76 | * it also takes care of preparing for the next release |
49 | */ | 77 | */ |
diff --git a/litmus/litmus.c b/litmus/litmus.c index 7d20dd1be493..cb41548d3e2d 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -119,21 +119,25 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param) | |||
119 | goto out_unlock; | 119 | goto out_unlock; |
120 | } | 120 | } |
121 | 121 | ||
122 | /* set relative deadline to be implicit if left unspecified */ | ||
123 | if (tp.relative_deadline == 0) | ||
124 | tp.relative_deadline = tp.period; | ||
125 | |||
122 | if (tp.exec_cost <= 0) | 126 | if (tp.exec_cost <= 0) |
123 | goto out_unlock; | 127 | goto out_unlock; |
124 | if (tp.period <= 0) | 128 | if (tp.period <= 0) |
125 | goto out_unlock; | 129 | goto out_unlock; |
126 | if (!cpu_online(tp.cpu)) | 130 | if (!cpu_online(tp.cpu)) |
127 | goto out_unlock; | 131 | goto out_unlock; |
128 | if (tp.period < tp.exec_cost) | 132 | if (min(tp.relative_deadline, tp.period) < tp.exec_cost) /*density check*/ |
129 | { | 133 | { |
130 | printk(KERN_INFO "litmus: real-time task %d rejected " | 134 | printk(KERN_INFO "litmus: real-time task %d rejected " |
131 | "because wcet > period\n", pid); | 135 | "because task density > 1.0\n", pid); |
132 | goto out_unlock; | 136 | goto out_unlock; |
133 | } | 137 | } |
134 | if ( tp.cls != RT_CLASS_HARD && | 138 | if (tp.cls != RT_CLASS_HARD && |
135 | tp.cls != RT_CLASS_SOFT && | 139 | tp.cls != RT_CLASS_SOFT && |
136 | tp.cls != RT_CLASS_BEST_EFFORT) | 140 | tp.cls != RT_CLASS_BEST_EFFORT) |
137 | { | 141 | { |
138 | printk(KERN_INFO "litmus: real-time task %d rejected " | 142 | printk(KERN_INFO "litmus: real-time task %d rejected " |
139 | "because its class is invalid\n", pid); | 143 | "because its class is invalid\n", pid); |
@@ -414,11 +418,14 @@ long litmus_admit_task(struct task_struct* tsk) | |||
414 | 418 | ||
415 | BUG_ON(is_realtime(tsk)); | 419 | BUG_ON(is_realtime(tsk)); |
416 | 420 | ||
417 | if (get_rt_period(tsk) == 0 || | 421 | if (get_rt_relative_deadline(tsk) == 0 || |
418 | get_exec_cost(tsk) > get_rt_period(tsk)) { | 422 | get_exec_cost(tsk) > |
419 | TRACE_TASK(tsk, "litmus admit: invalid task parameters " | 423 | min(get_rt_relative_deadline(tsk), get_rt_period(tsk)) ) { |
420 | "(%lu, %lu)\n", | 424 | TRACE_TASK(tsk, |
421 | get_exec_cost(tsk), get_rt_period(tsk)); | 425 | "litmus admit: invalid task parameters " |
426 | "(e = %lu, p = %lu, d = %lu)\n", | ||
427 | get_exec_cost(tsk), get_rt_period(tsk), | ||
428 | get_rt_relative_deadline(tsk)); | ||
422 | retval = -EINVAL; | 429 | retval = -EINVAL; |
423 | goto out; | 430 | goto out; |
424 | } | 431 | } |
@@ -469,6 +476,8 @@ void litmus_exit_task(struct task_struct* tsk) | |||
469 | { | 476 | { |
470 | if (is_realtime(tsk)) { | 477 | if (is_realtime(tsk)) { |
471 | sched_trace_task_completion(tsk, 1); | 478 | sched_trace_task_completion(tsk, 1); |
479 | sched_trace_task_exit(tsk); | ||
480 | sched_trace_task_tardy(tsk); | ||
472 | 481 | ||
473 | litmus->task_exit(tsk); | 482 | litmus->task_exit(tsk); |
474 | 483 | ||
diff --git a/litmus/locking.c b/litmus/locking.c index 4881ca119acf..1d32dcd8e726 100644 --- a/litmus/locking.c +++ b/litmus/locking.c | |||
@@ -6,6 +6,7 @@ | |||
6 | 6 | ||
7 | #include <litmus/sched_plugin.h> | 7 | #include <litmus/sched_plugin.h> |
8 | #include <litmus/trace.h> | 8 | #include <litmus/trace.h> |
9 | #include <litmus/wait.h> | ||
9 | 10 | ||
10 | static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg); | 11 | static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg); |
11 | static int open_generic_lock(struct od_table_entry* entry, void* __user arg); | 12 | static int open_generic_lock(struct od_table_entry* entry, void* __user arg); |
@@ -127,6 +128,37 @@ struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq) | |||
127 | return(t); | 128 | return(t); |
128 | } | 129 | } |
129 | 130 | ||
131 | unsigned int __add_wait_queue_prio_exclusive( | ||
132 | wait_queue_head_t* head, | ||
133 | prio_wait_queue_t *new) | ||
134 | { | ||
135 | struct list_head *pos; | ||
136 | unsigned int passed = 0; | ||
137 | |||
138 | new->wq.flags |= WQ_FLAG_EXCLUSIVE; | ||
139 | |||
140 | /* find a spot where the new entry is less than the next */ | ||
141 | list_for_each(pos, &head->task_list) { | ||
142 | prio_wait_queue_t* queued = list_entry(pos, prio_wait_queue_t, | ||
143 | wq.task_list); | ||
144 | |||
145 | if (unlikely(lt_before(new->priority, queued->priority) || | ||
146 | (new->priority == queued->priority && | ||
147 | new->tie_breaker < queued->tie_breaker))) { | ||
148 | /* pos is not less than new, thus insert here */ | ||
149 | __list_add(&new->wq.task_list, pos->prev, pos); | ||
150 | goto out; | ||
151 | } | ||
152 | passed++; | ||
153 | } | ||
154 | |||
155 | /* if we get to this point either the list is empty or every entry | ||
156 | * queued element is less than new. | ||
157 | * Let's add new to the end. */ | ||
158 | list_add_tail(&new->wq.task_list, &head->task_list); | ||
159 | out: | ||
160 | return passed; | ||
161 | } | ||
130 | 162 | ||
131 | #else | 163 | #else |
132 | 164 | ||
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c index faf876623d62..c63bd0303916 100644 --- a/litmus/rt_domain.c +++ b/litmus/rt_domain.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <litmus/bheap.h> | 20 | #include <litmus/bheap.h> |
21 | 21 | ||
22 | /* Uncomment when debugging timer races... */ | 22 | /* Uncomment when debugging timer races... */ |
23 | #if 0 | 23 | #if 1 |
24 | #define VTRACE_TASK TRACE_TASK | 24 | #define VTRACE_TASK TRACE_TASK |
25 | #define VTRACE TRACE | 25 | #define VTRACE TRACE |
26 | #else | 26 | #else |
@@ -53,14 +53,15 @@ static void do_release(struct release_heap *rh) | |||
53 | { | 53 | { |
54 | unsigned long flags; | 54 | unsigned long flags; |
55 | 55 | ||
56 | if (CRIT_LEVEL_B == rh->dom->level) | 56 | if (CRIT_LEVEL_B == rh->dom->level) { |
57 | TS_LVLB_RELEASE_START; | 57 | TS_LVLB_RELEASE_START; |
58 | else | 58 | } else { |
59 | TS_LVLC_RELEASE_START; | 59 | TS_LVLC_RELEASE_START; |
60 | } | ||
60 | 61 | ||
61 | TS_RELEASE_LATENCY(rh->release_time); | 62 | TS_RELEASE_LATENCY(rh->release_time); |
62 | 63 | ||
63 | VTRACE("on_release_timer(0x%p) starts.\n", timer); | 64 | VTRACE("on_release_timer starts.\n"); |
64 | 65 | ||
65 | TS_RELEASE_START; | 66 | TS_RELEASE_START; |
66 | 67 | ||
@@ -74,10 +75,11 @@ static void do_release(struct release_heap *rh) | |||
74 | /* call release callback */ | 75 | /* call release callback */ |
75 | rh->dom->release_jobs(rh->dom, &rh->heap); | 76 | rh->dom->release_jobs(rh->dom, &rh->heap); |
76 | 77 | ||
77 | if (CRIT_LEVEL_B == rh->dom->level) | 78 | if (CRIT_LEVEL_B == rh->dom->level) { |
78 | TS_LVLB_RELEASE_END; | 79 | TS_LVLB_RELEASE_END; |
79 | else | 80 | } else { |
80 | TS_LVLC_RELEASE_END; | 81 | TS_LVLC_RELEASE_END; |
82 | } | ||
81 | } | 83 | } |
82 | 84 | ||
83 | #ifdef CONFIG_MERGE_TIMERS | 85 | #ifdef CONFIG_MERGE_TIMERS |
@@ -267,6 +269,7 @@ static void setup_release(rt_domain_t *_rt) | |||
267 | list_for_each_safe(pos, safe, &list) { | 269 | list_for_each_safe(pos, safe, &list) { |
268 | /* pick task of work list */ | 270 | /* pick task of work list */ |
269 | t = list_entry(pos, struct task_struct, rt_param.list); | 271 | t = list_entry(pos, struct task_struct, rt_param.list); |
272 | sched_trace_task_release(t); | ||
270 | list_del_init(pos); | 273 | list_del_init(pos); |
271 | 274 | ||
272 | /* put into release heap while holding release_lock */ | 275 | /* put into release heap while holding release_lock */ |
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 480c62bc895b..b0c16e34d2c5 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <litmus/litmus.h> | 35 | #include <litmus/litmus.h> |
36 | #include <litmus/jobs.h> | 36 | #include <litmus/jobs.h> |
37 | #include <litmus/preempt.h> | 37 | #include <litmus/preempt.h> |
38 | #include <litmus/budget.h> | ||
38 | #include <litmus/sched_plugin.h> | 39 | #include <litmus/sched_plugin.h> |
39 | #include <litmus/edf_common.h> | 40 | #include <litmus/edf_common.h> |
40 | #include <litmus/sched_trace.h> | 41 | #include <litmus/sched_trace.h> |
@@ -304,11 +305,11 @@ static void check_for_preemptions(cedf_domain_t *cluster) | |||
304 | &per_cpu(cedf_cpu_entries, task_cpu(task))); | 305 | &per_cpu(cedf_cpu_entries, task_cpu(task))); |
305 | if(affinity) | 306 | if(affinity) |
306 | last = affinity; | 307 | last = affinity; |
307 | else if(last->linked) | 308 | else if(requeue_preempted_job(last->linked)) |
308 | requeue(last->linked); | 309 | requeue(last->linked); |
309 | } | 310 | } |
310 | #else | 311 | #else |
311 | if (last->linked) | 312 | if (requeue_preempted_job(last->linked)) |
312 | requeue(last->linked); | 313 | requeue(last->linked); |
313 | #endif | 314 | #endif |
314 | link_task_to_cpu(task, last); | 315 | link_task_to_cpu(task, last); |
@@ -478,9 +479,9 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
478 | /* Any task that is preemptable and either exhausts its execution | 479 | /* Any task that is preemptable and either exhausts its execution |
479 | * budget or wants to sleep completes. We may have to reschedule after | 480 | * budget or wants to sleep completes. We may have to reschedule after |
480 | * this. Don't do a job completion if we block (can't have timers running | 481 | * this. Don't do a job completion if we block (can't have timers running |
481 | * for blocked jobs). Preemption go first for the same reason. | 482 | * for blocked jobs). |
482 | */ | 483 | */ |
483 | if (!np && (out_of_time || sleep) && !blocks && !preempt) | 484 | if (!np && (out_of_time || sleep) && !blocks) |
484 | job_completion(entry->scheduled, !sleep); | 485 | job_completion(entry->scheduled, !sleep); |
485 | 486 | ||
486 | /* Link pending task if we became unlinked. | 487 | /* Link pending task if we became unlinked. |
diff --git a/litmus/sched_color.c b/litmus/sched_color.c index 44327d60aaa5..66ce40fd1b57 100644 --- a/litmus/sched_color.c +++ b/litmus/sched_color.c | |||
@@ -278,21 +278,7 @@ static void check_for_fifo_preempt(void) | |||
278 | */ | 278 | */ |
279 | static void job_arrival(struct task_struct *t) | 279 | static void job_arrival(struct task_struct *t) |
280 | { | 280 | { |
281 | int i; | ||
282 | rt_domain_t *dom = task_dom(task_entry(t), t); | 281 | rt_domain_t *dom = task_dom(task_entry(t), t); |
283 | struct dgl_group_req *gr = tsk_rt(t)->req; | ||
284 | struct control_page *cp = tsk_rt(t)->ctrl_page; | ||
285 | struct color_ctrl_page *ccp = tsk_rt(t)->color_ctrl_page; | ||
286 | |||
287 | /* Fill request */ | ||
288 | if (cp && ccp && cp->colors_updated) { | ||
289 | cp->colors_updated = 0; | ||
290 | dgl_group_req_init(&group_lock, gr); | ||
291 | for (i = 0; ccp->pages[i]; ++i) | ||
292 | set_req(&group_lock, gr, ccp->colors[i], ccp->pages[i]); | ||
293 | } else { | ||
294 | TRACE("Oh noz: %p %p %d\n", cp, ccp, ((cp) ? cp->colors_updated : -1)); | ||
295 | } | ||
296 | 282 | ||
297 | lock_if(&fifo_lock, is_be(t)); | 283 | lock_if(&fifo_lock, is_be(t)); |
298 | requeue(dom, t); | 284 | requeue(dom, t); |
@@ -519,12 +505,12 @@ static struct task_struct* color_schedule(struct task_struct *prev) | |||
519 | acquire_resources(next); | 505 | acquire_resources(next); |
520 | if (has_resources(next, entry->server.cpu)) { | 506 | if (has_resources(next, entry->server.cpu)) { |
521 | TRACE_TASK(next, "Has group lock\n"); | 507 | TRACE_TASK(next, "Has group lock\n"); |
522 | sched_trace_task_resume(next, 1); | 508 | sched_trace_task_resume_on(next, 1); |
523 | } else { | 509 | } else { |
524 | TRACE_TASK(next, "Does not have lock, 0x%p does\n", | 510 | TRACE_TASK(next, "Does not have lock, 0x%p does\n", |
525 | group_lock.acquired[entry->server.cpu]); | 511 | group_lock.acquired[entry->server.cpu]); |
526 | if (next != prev) | 512 | if (next != prev) |
527 | sched_trace_task_block(next, 1); | 513 | sched_trace_task_block_on(next, 1); |
528 | next = NULL; | 514 | next = NULL; |
529 | server_running = 0; | 515 | server_running = 0; |
530 | } | 516 | } |
@@ -562,9 +548,13 @@ static struct task_struct* color_schedule(struct task_struct *prev) | |||
562 | 548 | ||
563 | static void color_task_new(struct task_struct *t, int on_rq, int running) | 549 | static void color_task_new(struct task_struct *t, int on_rq, int running) |
564 | { | 550 | { |
551 | int i; | ||
565 | unsigned long flags; | 552 | unsigned long flags; |
566 | struct cpu_entry *entry; | 553 | struct cpu_entry *entry; |
567 | struct dgl_group_req *req; | 554 | struct dgl_group_req *req; |
555 | struct control_page *cp = tsk_rt(t)->ctrl_page; | ||
556 | struct color_ctrl_page *ccp = tsk_rt(t)->color_ctrl_page; | ||
557 | |||
568 | 558 | ||
569 | TRACE_TASK(t, "New colored task\n"); | 559 | TRACE_TASK(t, "New colored task\n"); |
570 | entry = (is_be(t)) ? local_entry : task_entry(t); | 560 | entry = (is_be(t)) ? local_entry : task_entry(t); |
@@ -583,6 +573,17 @@ static void color_task_new(struct task_struct *t, int on_rq, int running) | |||
583 | 573 | ||
584 | release_at(t, litmus_clock()); | 574 | release_at(t, litmus_clock()); |
585 | 575 | ||
576 | /* Fill request */ | ||
577 | if (cp && ccp && cp->colors_updated) { | ||
578 | TRACE_TASK(t, "Initializing group request\n"); | ||
579 | cp->colors_updated = 0; | ||
580 | dgl_group_req_init(&group_lock, req); | ||
581 | for (i = 0; ccp->pages[i]; ++i) | ||
582 | set_req(&group_lock, req, ccp->colors[i], ccp->pages[i]); | ||
583 | } else { | ||
584 | TRACE("Oh noz: %p %p %d\n", cp, ccp, ((cp) ? cp->colors_updated : -1)); | ||
585 | } | ||
586 | |||
586 | if (running) { | 587 | if (running) { |
587 | /* No need to lock with irqs disabled */ | 588 | /* No need to lock with irqs disabled */ |
588 | TRACE_TASK(t, "Already scheduled on %d\n", entry->server.cpu); | 589 | TRACE_TASK(t, "Already scheduled on %d\n", entry->server.cpu); |
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 0aa44dbddbd6..4f93d16b4d52 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <litmus/trace.h> | 21 | #include <litmus/trace.h> |
22 | 22 | ||
23 | #include <litmus/preempt.h> | 23 | #include <litmus/preempt.h> |
24 | #include <litmus/budget.h> | ||
24 | 25 | ||
25 | #include <litmus/bheap.h> | 26 | #include <litmus/bheap.h> |
26 | 27 | ||
@@ -296,11 +297,11 @@ static void check_for_preemptions(void) | |||
296 | &per_cpu(gsnedf_cpu_entries, task_cpu(task))); | 297 | &per_cpu(gsnedf_cpu_entries, task_cpu(task))); |
297 | if (affinity) | 298 | if (affinity) |
298 | last = affinity; | 299 | last = affinity; |
299 | else if (last->linked) | 300 | else if (requeue_preempted_job(last->linked)) |
300 | requeue(last->linked); | 301 | requeue(last->linked); |
301 | } | 302 | } |
302 | #else | 303 | #else |
303 | if (last->linked) | 304 | if (requeue_preempted_job(last->linked)) |
304 | requeue(last->linked); | 305 | requeue(last->linked); |
305 | #endif | 306 | #endif |
306 | 307 | ||
@@ -426,9 +427,8 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
426 | /* (0) Determine state */ | 427 | /* (0) Determine state */ |
427 | exists = entry->scheduled != NULL; | 428 | exists = entry->scheduled != NULL; |
428 | blocks = exists && !is_running(entry->scheduled); | 429 | blocks = exists && !is_running(entry->scheduled); |
429 | out_of_time = exists && | 430 | out_of_time = exists && budget_enforced(entry->scheduled) |
430 | budget_enforced(entry->scheduled) && | 431 | && budget_exhausted(entry->scheduled); |
431 | budget_exhausted(entry->scheduled); | ||
432 | np = exists && is_np(entry->scheduled); | 432 | np = exists && is_np(entry->scheduled); |
433 | sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | 433 | sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; |
434 | preempt = entry->scheduled != entry->linked; | 434 | preempt = entry->scheduled != entry->linked; |
@@ -466,9 +466,9 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
466 | /* Any task that is preemptable and either exhausts its execution | 466 | /* Any task that is preemptable and either exhausts its execution |
467 | * budget or wants to sleep completes. We may have to reschedule after | 467 | * budget or wants to sleep completes. We may have to reschedule after |
468 | * this. Don't do a job completion if we block (can't have timers running | 468 | * this. Don't do a job completion if we block (can't have timers running |
469 | * for blocked jobs). Preemption go first for the same reason. | 469 | * for blocked jobs). |
470 | */ | 470 | */ |
471 | if (!np && (out_of_time || sleep) && !blocks && !preempt) | 471 | if (!np && (out_of_time || sleep) && !blocks) |
472 | job_completion(entry->scheduled, !sleep); | 472 | job_completion(entry->scheduled, !sleep); |
473 | 473 | ||
474 | /* Link pending task if we became unlinked. | 474 | /* Link pending task if we became unlinked. |
@@ -797,7 +797,6 @@ int gsnedf_fmlp_lock(struct litmus_lock* l) | |||
797 | if (edf_higher_prio(t, sem->hp_waiter)) { | 797 | if (edf_higher_prio(t, sem->hp_waiter)) { |
798 | sem->hp_waiter = t; | 798 | sem->hp_waiter = t; |
799 | if (edf_higher_prio(t, sem->owner)) { | 799 | if (edf_higher_prio(t, sem->owner)) { |
800 | sched_trace_priority_donate(sem->owner, t, l->id); | ||
801 | set_priority_inheritance(sem->owner, sem->hp_waiter); | 800 | set_priority_inheritance(sem->owner, sem->hp_waiter); |
802 | 801 | ||
803 | } | 802 | } |
@@ -808,7 +807,7 @@ int gsnedf_fmlp_lock(struct litmus_lock* l) | |||
808 | /* release lock before sleeping */ | 807 | /* release lock before sleeping */ |
809 | spin_unlock_irqrestore(&sem->wait.lock, flags); | 808 | spin_unlock_irqrestore(&sem->wait.lock, flags); |
810 | 809 | ||
811 | sched_trace_task_block(t, l->id); | 810 | sched_trace_task_block(t); |
812 | 811 | ||
813 | /* We depend on the FIFO order. Thus, we don't need to recheck | 812 | /* We depend on the FIFO order. Thus, we don't need to recheck |
814 | * when we wake up; we are guaranteed to have the lock since | 813 | * when we wake up; we are guaranteed to have the lock since |
@@ -830,8 +829,6 @@ int gsnedf_fmlp_lock(struct litmus_lock* l) | |||
830 | spin_unlock_irqrestore(&sem->wait.lock, flags); | 829 | spin_unlock_irqrestore(&sem->wait.lock, flags); |
831 | } | 830 | } |
832 | 831 | ||
833 | sched_trace_resource_acquire(t, l->id); | ||
834 | |||
835 | return 0; | 832 | return 0; |
836 | } | 833 | } |
837 | 834 | ||
@@ -885,8 +882,6 @@ int gsnedf_fmlp_unlock(struct litmus_lock* l) | |||
885 | if (tsk_rt(t)->inh_task) | 882 | if (tsk_rt(t)->inh_task) |
886 | clear_priority_inheritance(t); | 883 | clear_priority_inheritance(t); |
887 | 884 | ||
888 | sched_trace_resource_release(t, l->id); | ||
889 | |||
890 | out: | 885 | out: |
891 | spin_unlock_irqrestore(&sem->wait.lock, flags); | 886 | spin_unlock_irqrestore(&sem->wait.lock, flags); |
892 | 887 | ||
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index 3de3c8605aae..6553948407de 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c | |||
@@ -160,7 +160,7 @@ static void enqueue_task_litmus(struct rq *rq, struct task_struct *p, | |||
160 | int flags) | 160 | int flags) |
161 | { | 161 | { |
162 | if (flags & ENQUEUE_WAKEUP) { | 162 | if (flags & ENQUEUE_WAKEUP) { |
163 | sched_trace_task_resume(p, 0); | 163 | sched_trace_task_resume(p); |
164 | tsk_rt(p)->present = 1; | 164 | tsk_rt(p)->present = 1; |
165 | /* LITMUS^RT plugins need to update the state | 165 | /* LITMUS^RT plugins need to update the state |
166 | * _before_ making it available in global structures. | 166 | * _before_ making it available in global structures. |
@@ -185,7 +185,7 @@ static void dequeue_task_litmus(struct rq *rq, struct task_struct *p, | |||
185 | if (flags & DEQUEUE_SLEEP) { | 185 | if (flags & DEQUEUE_SLEEP) { |
186 | litmus->task_block(p); | 186 | litmus->task_block(p); |
187 | tsk_rt(p)->present = 0; | 187 | tsk_rt(p)->present = 0; |
188 | sched_trace_task_block(p, 0); | 188 | sched_trace_task_block(p); |
189 | 189 | ||
190 | rq->litmus.nr_running--; | 190 | rq->litmus.nr_running--; |
191 | } else | 191 | } else |
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c index 16f1065bbdca..72c06a492ef9 100644 --- a/litmus/sched_pfair.c +++ b/litmus/sched_pfair.c | |||
@@ -850,6 +850,13 @@ static long pfair_admit_task(struct task_struct* t) | |||
850 | cpu_cluster(pstate[task_cpu(t)])) | 850 | cpu_cluster(pstate[task_cpu(t)])) |
851 | return -EINVAL; | 851 | return -EINVAL; |
852 | 852 | ||
853 | if (get_rt_period(t) != get_rt_relative_deadline(t)) { | ||
854 | printk(KERN_INFO "%s: Admission rejected. " | ||
855 | "Only implicit deadlines are currently supported.\n", | ||
856 | litmus->plugin_name); | ||
857 | return -EINVAL; | ||
858 | } | ||
859 | |||
853 | /* Pfair is a tick-based method, so the time | 860 | /* Pfair is a tick-based method, so the time |
854 | * of interest is jiffies. Calculate tick-based | 861 | * of interest is jiffies. Calculate tick-based |
855 | * times for everything. | 862 | * times for everything. |
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c new file mode 100644 index 000000000000..b1d5b4326a0e --- /dev/null +++ b/litmus/sched_pfp.c | |||
@@ -0,0 +1,1693 @@ | |||
1 | /* | ||
2 | * litmus/sched_pfp.c | ||
3 | * | ||
4 | * Implementation of partitioned fixed-priority scheduling. | ||
5 | * Based on PSN-EDF. | ||
6 | */ | ||
7 | |||
8 | #include <linux/percpu.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/list.h> | ||
11 | #include <linux/spinlock.h> | ||
12 | #include <linux/module.h> | ||
13 | |||
14 | #include <litmus/litmus.h> | ||
15 | #include <litmus/wait.h> | ||
16 | #include <litmus/jobs.h> | ||
17 | #include <litmus/preempt.h> | ||
18 | #include <litmus/fp_common.h> | ||
19 | #include <litmus/sched_plugin.h> | ||
20 | #include <litmus/sched_trace.h> | ||
21 | #include <litmus/trace.h> | ||
22 | #include <litmus/budget.h> | ||
23 | |||
24 | #include <linux/uaccess.h> | ||
25 | |||
26 | |||
27 | typedef struct { | ||
28 | rt_domain_t domain; | ||
29 | struct fp_prio_queue ready_queue; | ||
30 | int cpu; | ||
31 | struct task_struct* scheduled; /* only RT tasks */ | ||
32 | /* | ||
33 | * scheduling lock slock | ||
34 | * protects the domain and serializes scheduling decisions | ||
35 | */ | ||
36 | #define slock domain.ready_lock | ||
37 | |||
38 | } pfp_domain_t; | ||
39 | |||
40 | DEFINE_PER_CPU(pfp_domain_t, pfp_domains); | ||
41 | |||
42 | pfp_domain_t* pfp_doms[NR_CPUS]; | ||
43 | |||
44 | #define local_pfp (&__get_cpu_var(pfp_domains)) | ||
45 | #define remote_dom(cpu) (&per_cpu(pfp_domains, cpu).domain) | ||
46 | #define remote_pfp(cpu) (&per_cpu(pfp_domains, cpu)) | ||
47 | #define task_dom(task) remote_dom(get_partition(task)) | ||
48 | #define task_pfp(task) remote_pfp(get_partition(task)) | ||
49 | |||
50 | /* we assume the lock is being held */ | ||
51 | static void preempt(pfp_domain_t *pfp) | ||
52 | { | ||
53 | preempt_if_preemptable(pfp->scheduled, pfp->cpu); | ||
54 | } | ||
55 | |||
56 | static unsigned int priority_index(struct task_struct* t) | ||
57 | { | ||
58 | #ifdef CONFIG_LOCKING | ||
59 | if (unlikely(t->rt_param.inh_task)) | ||
60 | /* use effective priority */ | ||
61 | t = t->rt_param.inh_task; | ||
62 | |||
63 | if (is_priority_boosted(t)) { | ||
64 | /* zero is reserved for priority-boosted tasks */ | ||
65 | return 0; | ||
66 | } else | ||
67 | #endif | ||
68 | return get_priority(t); | ||
69 | } | ||
70 | |||
71 | |||
72 | static void pfp_release_jobs(rt_domain_t* rt, struct bheap* tasks) | ||
73 | { | ||
74 | pfp_domain_t *pfp = container_of(rt, pfp_domain_t, domain); | ||
75 | unsigned long flags; | ||
76 | struct task_struct* t; | ||
77 | struct bheap_node* hn; | ||
78 | |||
79 | raw_spin_lock_irqsave(&pfp->slock, flags); | ||
80 | |||
81 | while (!bheap_empty(tasks)) { | ||
82 | hn = bheap_take(fp_ready_order, tasks); | ||
83 | t = bheap2task(hn); | ||
84 | TRACE_TASK(t, "released (part:%d prio:%d)\n", | ||
85 | get_partition(t), get_priority(t)); | ||
86 | fp_prio_add(&pfp->ready_queue, t, priority_index(t)); | ||
87 | } | ||
88 | |||
89 | /* do we need to preempt? */ | ||
90 | if (fp_higher_prio(fp_prio_peek(&pfp->ready_queue), pfp->scheduled)) { | ||
91 | TRACE_CUR("preempted by new release\n"); | ||
92 | preempt(pfp); | ||
93 | } | ||
94 | |||
95 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | ||
96 | } | ||
97 | |||
98 | static void pfp_preempt_check(pfp_domain_t *pfp) | ||
99 | { | ||
100 | if (fp_higher_prio(fp_prio_peek(&pfp->ready_queue), pfp->scheduled)) | ||
101 | preempt(pfp); | ||
102 | } | ||
103 | |||
104 | static void pfp_domain_init(pfp_domain_t* pfp, | ||
105 | int cpu) | ||
106 | { | ||
107 | fp_domain_init(&pfp->domain, NULL, pfp_release_jobs); | ||
108 | pfp->cpu = cpu; | ||
109 | pfp->scheduled = NULL; | ||
110 | fp_prio_queue_init(&pfp->ready_queue); | ||
111 | } | ||
112 | |||
113 | static void requeue(struct task_struct* t, pfp_domain_t *pfp) | ||
114 | { | ||
115 | if (t->state != TASK_RUNNING) | ||
116 | TRACE_TASK(t, "requeue: !TASK_RUNNING\n"); | ||
117 | |||
118 | set_rt_flags(t, RT_F_RUNNING); | ||
119 | if (is_released(t, litmus_clock())) | ||
120 | fp_prio_add(&pfp->ready_queue, t, priority_index(t)); | ||
121 | else | ||
122 | add_release(&pfp->domain, t); /* it has got to wait */ | ||
123 | } | ||
124 | |||
125 | static void job_completion(struct task_struct* t, int forced) | ||
126 | { | ||
127 | sched_trace_task_completion(t,forced); | ||
128 | TRACE_TASK(t, "job_completion().\n"); | ||
129 | |||
130 | set_rt_flags(t, RT_F_SLEEP); | ||
131 | prepare_for_next_period(t); | ||
132 | } | ||
133 | |||
134 | static void pfp_tick(struct task_struct *t) | ||
135 | { | ||
136 | pfp_domain_t *pfp = local_pfp; | ||
137 | |||
138 | /* Check for inconsistency. We don't need the lock for this since | ||
139 | * ->scheduled is only changed in schedule, which obviously is not | ||
140 | * executing in parallel on this CPU | ||
141 | */ | ||
142 | BUG_ON(is_realtime(t) && t != pfp->scheduled); | ||
143 | |||
144 | if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { | ||
145 | if (!is_np(t)) { | ||
146 | litmus_reschedule_local(); | ||
147 | TRACE("pfp_scheduler_tick: " | ||
148 | "%d is preemptable " | ||
149 | " => FORCE_RESCHED\n", t->pid); | ||
150 | } else if (is_user_np(t)) { | ||
151 | TRACE("pfp_scheduler_tick: " | ||
152 | "%d is non-preemptable, " | ||
153 | "preemption delayed.\n", t->pid); | ||
154 | request_exit_np(t); | ||
155 | } | ||
156 | } | ||
157 | } | ||
158 | |||
159 | static struct task_struct* pfp_schedule(struct task_struct * prev) | ||
160 | { | ||
161 | pfp_domain_t* pfp = local_pfp; | ||
162 | struct task_struct* next; | ||
163 | |||
164 | int out_of_time, sleep, preempt, np, exists, blocks, resched, migrate; | ||
165 | |||
166 | raw_spin_lock(&pfp->slock); | ||
167 | |||
168 | /* sanity checking | ||
169 | * differently from gedf, when a task exits (dead) | ||
170 | * pfp->schedule may be null and prev _is_ realtime | ||
171 | */ | ||
172 | BUG_ON(pfp->scheduled && pfp->scheduled != prev); | ||
173 | BUG_ON(pfp->scheduled && !is_realtime(prev)); | ||
174 | |||
175 | /* (0) Determine state */ | ||
176 | exists = pfp->scheduled != NULL; | ||
177 | blocks = exists && !is_running(pfp->scheduled); | ||
178 | out_of_time = exists && | ||
179 | budget_enforced(pfp->scheduled) && | ||
180 | budget_exhausted(pfp->scheduled); | ||
181 | np = exists && is_np(pfp->scheduled); | ||
182 | sleep = exists && get_rt_flags(pfp->scheduled) == RT_F_SLEEP; | ||
183 | migrate = exists && get_partition(pfp->scheduled) != pfp->cpu; | ||
184 | preempt = migrate || fp_preemption_needed(&pfp->ready_queue, prev); | ||
185 | |||
186 | /* If we need to preempt do so. | ||
187 | * The following checks set resched to 1 in case of special | ||
188 | * circumstances. | ||
189 | */ | ||
190 | resched = preempt; | ||
191 | |||
192 | /* If a task blocks we have no choice but to reschedule. | ||
193 | */ | ||
194 | if (blocks) | ||
195 | resched = 1; | ||
196 | |||
197 | /* Request a sys_exit_np() call if we would like to preempt but cannot. | ||
198 | * Multiple calls to request_exit_np() don't hurt. | ||
199 | */ | ||
200 | if (np && (out_of_time || preempt || sleep)) | ||
201 | request_exit_np(pfp->scheduled); | ||
202 | |||
203 | /* Any task that is preemptable and either exhausts its execution | ||
204 | * budget or wants to sleep completes. We may have to reschedule after | ||
205 | * this. | ||
206 | */ | ||
207 | if (!np && (out_of_time || sleep) && !blocks && !migrate) { | ||
208 | job_completion(pfp->scheduled, !sleep); | ||
209 | resched = 1; | ||
210 | } | ||
211 | |||
212 | /* The final scheduling decision. Do we need to switch for some reason? | ||
213 | * Switch if we are in RT mode and have no task or if we need to | ||
214 | * resched. | ||
215 | */ | ||
216 | next = NULL; | ||
217 | if ((!np || blocks) && (resched || !exists)) { | ||
218 | /* When preempting a task that does not block, then | ||
219 | * re-insert it into either the ready queue or the | ||
220 | * release queue (if it completed). requeue() picks | ||
221 | * the appropriate queue. | ||
222 | */ | ||
223 | if (pfp->scheduled && !blocks && !migrate) | ||
224 | requeue(pfp->scheduled, pfp); | ||
225 | next = fp_prio_take(&pfp->ready_queue); | ||
226 | } else | ||
227 | /* Only override Linux scheduler if we have a real-time task | ||
228 | * scheduled that needs to continue. | ||
229 | */ | ||
230 | if (exists) | ||
231 | next = prev; | ||
232 | |||
233 | if (next) { | ||
234 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | ||
235 | set_rt_flags(next, RT_F_RUNNING); | ||
236 | } else { | ||
237 | TRACE("becoming idle at %llu\n", litmus_clock()); | ||
238 | } | ||
239 | |||
240 | pfp->scheduled = next; | ||
241 | sched_state_task_picked(); | ||
242 | raw_spin_unlock(&pfp->slock); | ||
243 | |||
244 | return next; | ||
245 | } | ||
246 | |||
247 | #ifdef CONFIG_LITMUS_LOCKING | ||
248 | |||
249 | /* prev is no longer scheduled --- see if it needs to migrate */ | ||
250 | static void pfp_finish_switch(struct task_struct *prev) | ||
251 | { | ||
252 | pfp_domain_t *to; | ||
253 | |||
254 | if (is_realtime(prev) && | ||
255 | is_running(prev) && | ||
256 | get_partition(prev) != smp_processor_id()) { | ||
257 | TRACE_TASK(prev, "needs to migrate from P%d to P%d\n", | ||
258 | smp_processor_id(), get_partition(prev)); | ||
259 | |||
260 | to = task_pfp(prev); | ||
261 | |||
262 | raw_spin_lock(&to->slock); | ||
263 | |||
264 | TRACE_TASK(prev, "adding to queue on P%d\n", to->cpu); | ||
265 | requeue(prev, to); | ||
266 | if (fp_preemption_needed(&to->ready_queue, to->scheduled)) | ||
267 | preempt(to); | ||
268 | |||
269 | raw_spin_unlock(&to->slock); | ||
270 | |||
271 | } | ||
272 | } | ||
273 | |||
274 | #endif | ||
275 | |||
276 | /* Prepare a task for running in RT mode | ||
277 | */ | ||
278 | static void pfp_task_new(struct task_struct * t, int on_rq, int running) | ||
279 | { | ||
280 | pfp_domain_t* pfp = task_pfp(t); | ||
281 | unsigned long flags; | ||
282 | |||
283 | TRACE_TASK(t, "P-FP: task new, cpu = %d\n", | ||
284 | t->rt_param.task_params.cpu); | ||
285 | |||
286 | /* setup job parameters */ | ||
287 | release_at(t, litmus_clock()); | ||
288 | |||
289 | /* The task should be running in the queue, otherwise signal | ||
290 | * code will try to wake it up with fatal consequences. | ||
291 | */ | ||
292 | raw_spin_lock_irqsave(&pfp->slock, flags); | ||
293 | if (running) { | ||
294 | /* there shouldn't be anything else running at the time */ | ||
295 | BUG_ON(pfp->scheduled); | ||
296 | pfp->scheduled = t; | ||
297 | } else { | ||
298 | requeue(t, pfp); | ||
299 | /* maybe we have to reschedule */ | ||
300 | pfp_preempt_check(pfp); | ||
301 | } | ||
302 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | ||
303 | } | ||
304 | |||
305 | static void pfp_task_wake_up(struct task_struct *task) | ||
306 | { | ||
307 | unsigned long flags; | ||
308 | pfp_domain_t* pfp = task_pfp(task); | ||
309 | lt_t now; | ||
310 | |||
311 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | ||
312 | raw_spin_lock_irqsave(&pfp->slock, flags); | ||
313 | |||
314 | #ifdef CONFIG_LITMUS_LOCKING | ||
315 | /* Should only be queued when processing a fake-wake up due to a | ||
316 | * migration-related state change. */ | ||
317 | if (unlikely(is_queued(task))) { | ||
318 | TRACE_TASK(task, "WARNING: waking task still queued. Is this right?\n"); | ||
319 | goto out_unlock; | ||
320 | } | ||
321 | #else | ||
322 | BUG_ON(is_queued(task)); | ||
323 | #endif | ||
324 | now = litmus_clock(); | ||
325 | if (is_tardy(task, now) | ||
326 | #ifdef CONFIG_LITMUS_LOCKING | ||
327 | /* We need to take suspensions because of semaphores into | ||
328 | * account! If a job resumes after being suspended due to acquiring | ||
329 | * a semaphore, it should never be treated as a new job release. | ||
330 | */ | ||
331 | && !is_priority_boosted(task) | ||
332 | #endif | ||
333 | ) { | ||
334 | /* new sporadic release */ | ||
335 | release_at(task, now); | ||
336 | sched_trace_task_release(task); | ||
337 | } | ||
338 | |||
339 | /* Only add to ready queue if it is not the currently-scheduled | ||
340 | * task. This could be the case if a task was woken up concurrently | ||
341 | * on a remote CPU before the executing CPU got around to actually | ||
342 | * de-scheduling the task, i.e., wake_up() raced with schedule() | ||
343 | * and won. Also, don't requeue if it is still queued, which can | ||
344 | * happen under the DPCP due wake-ups racing with migrations. | ||
345 | */ | ||
346 | if (pfp->scheduled != task) { | ||
347 | requeue(task, pfp); | ||
348 | pfp_preempt_check(pfp); | ||
349 | } | ||
350 | |||
351 | out_unlock: | ||
352 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | ||
353 | TRACE_TASK(task, "wake up done\n"); | ||
354 | } | ||
355 | |||
356 | static void pfp_task_block(struct task_struct *t) | ||
357 | { | ||
358 | /* only running tasks can block, thus t is in no queue */ | ||
359 | TRACE_TASK(t, "block at %llu, state=%d\n", litmus_clock(), t->state); | ||
360 | |||
361 | BUG_ON(!is_realtime(t)); | ||
362 | |||
363 | /* If this task blocked normally, it shouldn't be queued. The exception is | ||
364 | * if this is a simulated block()/wakeup() pair from the pull-migration code path. | ||
365 | * This should only happen if the DPCP is being used. | ||
366 | */ | ||
367 | #ifdef CONFIG_LITMUS_LOCKING | ||
368 | if (unlikely(is_queued(t))) | ||
369 | TRACE_TASK(t, "WARNING: blocking task still queued. Is this right?\n"); | ||
370 | #else | ||
371 | BUG_ON(is_queued(t)); | ||
372 | #endif | ||
373 | } | ||
374 | |||
375 | static void pfp_task_exit(struct task_struct * t) | ||
376 | { | ||
377 | unsigned long flags; | ||
378 | pfp_domain_t* pfp = task_pfp(t); | ||
379 | rt_domain_t* dom; | ||
380 | |||
381 | raw_spin_lock_irqsave(&pfp->slock, flags); | ||
382 | if (is_queued(t)) { | ||
383 | BUG(); /* This currently doesn't work. */ | ||
384 | /* dequeue */ | ||
385 | dom = task_dom(t); | ||
386 | remove(dom, t); | ||
387 | } | ||
388 | if (pfp->scheduled == t) { | ||
389 | pfp->scheduled = NULL; | ||
390 | preempt(pfp); | ||
391 | } | ||
392 | TRACE_TASK(t, "RIP, now reschedule\n"); | ||
393 | |||
394 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | ||
395 | } | ||
396 | |||
397 | #ifdef CONFIG_LITMUS_LOCKING | ||
398 | |||
399 | #include <litmus/fdso.h> | ||
400 | #include <litmus/srp.h> | ||
401 | |||
402 | static void fp_dequeue(pfp_domain_t* pfp, struct task_struct* t) | ||
403 | { | ||
404 | BUG_ON(pfp->scheduled == t && is_queued(t)); | ||
405 | if (is_queued(t)) | ||
406 | fp_prio_remove(&pfp->ready_queue, t, priority_index(t)); | ||
407 | } | ||
408 | |||
409 | static void fp_set_prio_inh(pfp_domain_t* pfp, struct task_struct* t, | ||
410 | struct task_struct* prio_inh) | ||
411 | { | ||
412 | int requeue; | ||
413 | |||
414 | if (!t || t->rt_param.inh_task == prio_inh) { | ||
415 | /* no update required */ | ||
416 | if (t) | ||
417 | TRACE_TASK(t, "no prio-inh update required\n"); | ||
418 | return; | ||
419 | } | ||
420 | |||
421 | requeue = is_queued(t); | ||
422 | TRACE_TASK(t, "prio-inh: is_queued:%d\n", requeue); | ||
423 | |||
424 | if (requeue) | ||
425 | /* first remove */ | ||
426 | fp_dequeue(pfp, t); | ||
427 | |||
428 | t->rt_param.inh_task = prio_inh; | ||
429 | |||
430 | if (requeue) | ||
431 | /* add again to the right queue */ | ||
432 | fp_prio_add(&pfp->ready_queue, t, priority_index(t)); | ||
433 | } | ||
434 | |||
435 | static int effective_agent_priority(int prio) | ||
436 | { | ||
437 | /* make sure agents have higher priority */ | ||
438 | return prio - LITMUS_MAX_PRIORITY; | ||
439 | } | ||
440 | |||
441 | static lt_t prio_point(int eprio) | ||
442 | { | ||
443 | /* make sure we have non-negative prio points */ | ||
444 | return eprio + LITMUS_MAX_PRIORITY; | ||
445 | } | ||
446 | |||
447 | static int prio_from_point(lt_t prio_point) | ||
448 | { | ||
449 | return ((int) prio_point) - LITMUS_MAX_PRIORITY; | ||
450 | } | ||
451 | |||
452 | static void boost_priority(struct task_struct* t, lt_t priority_point) | ||
453 | { | ||
454 | unsigned long flags; | ||
455 | pfp_domain_t* pfp = task_pfp(t); | ||
456 | |||
457 | raw_spin_lock_irqsave(&pfp->slock, flags); | ||
458 | |||
459 | |||
460 | TRACE_TASK(t, "priority boosted at %llu\n", litmus_clock()); | ||
461 | |||
462 | tsk_rt(t)->priority_boosted = 1; | ||
463 | /* tie-break by protocol-specific priority point */ | ||
464 | tsk_rt(t)->boost_start_time = priority_point; | ||
465 | |||
466 | if (pfp->scheduled != t) { | ||
467 | /* holder may be queued: first stop queue changes */ | ||
468 | raw_spin_lock(&pfp->domain.release_lock); | ||
469 | if (is_queued(t) && | ||
470 | /* If it is queued, then we need to re-order. */ | ||
471 | bheap_decrease(fp_ready_order, tsk_rt(t)->heap_node) && | ||
472 | /* If we bubbled to the top, then we need to check for preemptions. */ | ||
473 | fp_preemption_needed(&pfp->ready_queue, pfp->scheduled)) | ||
474 | preempt(pfp); | ||
475 | raw_spin_unlock(&pfp->domain.release_lock); | ||
476 | } /* else: nothing to do since the job is not queued while scheduled */ | ||
477 | |||
478 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | ||
479 | } | ||
480 | |||
481 | static void unboost_priority(struct task_struct* t) | ||
482 | { | ||
483 | unsigned long flags; | ||
484 | pfp_domain_t* pfp = task_pfp(t); | ||
485 | lt_t now; | ||
486 | |||
487 | raw_spin_lock_irqsave(&pfp->slock, flags); | ||
488 | now = litmus_clock(); | ||
489 | |||
490 | /* assumption: this only happens when the job is scheduled */ | ||
491 | BUG_ON(pfp->scheduled != t); | ||
492 | |||
493 | TRACE_TASK(t, "priority restored at %llu\n", now); | ||
494 | |||
495 | /* priority boosted jobs must be scheduled */ | ||
496 | BUG_ON(pfp->scheduled != t); | ||
497 | |||
498 | tsk_rt(t)->priority_boosted = 0; | ||
499 | tsk_rt(t)->boost_start_time = 0; | ||
500 | |||
501 | /* check if this changes anything */ | ||
502 | if (fp_preemption_needed(&pfp->ready_queue, pfp->scheduled)) | ||
503 | preempt(pfp); | ||
504 | |||
505 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | ||
506 | } | ||
507 | |||
508 | /* ******************** SRP support ************************ */ | ||
509 | |||
510 | static unsigned int pfp_get_srp_prio(struct task_struct* t) | ||
511 | { | ||
512 | return get_priority(t); | ||
513 | } | ||
514 | |||
515 | /* ******************** FMLP support ********************** */ | ||
516 | |||
517 | struct fmlp_semaphore { | ||
518 | struct litmus_lock litmus_lock; | ||
519 | |||
520 | /* current resource holder */ | ||
521 | struct task_struct *owner; | ||
522 | |||
523 | /* FIFO queue of waiting tasks */ | ||
524 | wait_queue_head_t wait; | ||
525 | }; | ||
526 | |||
527 | static inline struct fmlp_semaphore* fmlp_from_lock(struct litmus_lock* lock) | ||
528 | { | ||
529 | return container_of(lock, struct fmlp_semaphore, litmus_lock); | ||
530 | } | ||
531 | int pfp_fmlp_lock(struct litmus_lock* l) | ||
532 | { | ||
533 | struct task_struct* t = current; | ||
534 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
535 | wait_queue_t wait; | ||
536 | unsigned long flags; | ||
537 | lt_t time_of_request; | ||
538 | |||
539 | if (!is_realtime(t)) | ||
540 | return -EPERM; | ||
541 | |||
542 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
543 | |||
544 | /* tie-break by this point in time */ | ||
545 | time_of_request = litmus_clock(); | ||
546 | |||
547 | /* Priority-boost ourself *before* we suspend so that | ||
548 | * our priority is boosted when we resume. */ | ||
549 | boost_priority(t, time_of_request); | ||
550 | |||
551 | if (sem->owner) { | ||
552 | /* resource is not free => must suspend and wait */ | ||
553 | |||
554 | init_waitqueue_entry(&wait, t); | ||
555 | |||
556 | /* FIXME: interruptible would be nice some day */ | ||
557 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
558 | |||
559 | __add_wait_queue_tail_exclusive(&sem->wait, &wait); | ||
560 | |||
561 | TS_LOCK_SUSPEND; | ||
562 | |||
563 | /* release lock before sleeping */ | ||
564 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
565 | |||
566 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
567 | * when we wake up; we are guaranteed to have the lock since | ||
568 | * there is only one wake up per release. | ||
569 | */ | ||
570 | |||
571 | schedule(); | ||
572 | |||
573 | TS_LOCK_RESUME; | ||
574 | |||
575 | /* Since we hold the lock, no other task will change | ||
576 | * ->owner. We can thus check it without acquiring the spin | ||
577 | * lock. */ | ||
578 | BUG_ON(sem->owner != t); | ||
579 | } else { | ||
580 | /* it's ours now */ | ||
581 | sem->owner = t; | ||
582 | |||
583 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
584 | } | ||
585 | |||
586 | return 0; | ||
587 | } | ||
588 | |||
589 | int pfp_fmlp_unlock(struct litmus_lock* l) | ||
590 | { | ||
591 | struct task_struct *t = current, *next; | ||
592 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
593 | unsigned long flags; | ||
594 | int err = 0; | ||
595 | |||
596 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
597 | |||
598 | if (sem->owner != t) { | ||
599 | err = -EINVAL; | ||
600 | goto out; | ||
601 | } | ||
602 | |||
603 | /* we lose the benefit of priority boosting */ | ||
604 | |||
605 | unboost_priority(t); | ||
606 | |||
607 | /* check if there are jobs waiting for this resource */ | ||
608 | next = __waitqueue_remove_first(&sem->wait); | ||
609 | if (next) { | ||
610 | /* next becomes the resouce holder */ | ||
611 | sem->owner = next; | ||
612 | |||
613 | /* Wake up next. The waiting job is already priority-boosted. */ | ||
614 | wake_up_process(next); | ||
615 | } else | ||
616 | /* resource becomes available */ | ||
617 | sem->owner = NULL; | ||
618 | |||
619 | out: | ||
620 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
621 | return err; | ||
622 | } | ||
623 | |||
624 | int pfp_fmlp_close(struct litmus_lock* l) | ||
625 | { | ||
626 | struct task_struct *t = current; | ||
627 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
628 | unsigned long flags; | ||
629 | |||
630 | int owner; | ||
631 | |||
632 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
633 | |||
634 | owner = sem->owner == t; | ||
635 | |||
636 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
637 | |||
638 | if (owner) | ||
639 | pfp_fmlp_unlock(l); | ||
640 | |||
641 | return 0; | ||
642 | } | ||
643 | |||
644 | void pfp_fmlp_free(struct litmus_lock* lock) | ||
645 | { | ||
646 | kfree(fmlp_from_lock(lock)); | ||
647 | } | ||
648 | |||
649 | static struct litmus_lock_ops pfp_fmlp_lock_ops = { | ||
650 | .close = pfp_fmlp_close, | ||
651 | .lock = pfp_fmlp_lock, | ||
652 | .unlock = pfp_fmlp_unlock, | ||
653 | .deallocate = pfp_fmlp_free, | ||
654 | }; | ||
655 | |||
656 | static struct litmus_lock* pfp_new_fmlp(void) | ||
657 | { | ||
658 | struct fmlp_semaphore* sem; | ||
659 | |||
660 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
661 | if (!sem) | ||
662 | return NULL; | ||
663 | |||
664 | sem->owner = NULL; | ||
665 | init_waitqueue_head(&sem->wait); | ||
666 | sem->litmus_lock.ops = &pfp_fmlp_lock_ops; | ||
667 | |||
668 | return &sem->litmus_lock; | ||
669 | } | ||
670 | |||
671 | /* ******************** MPCP support ********************** */ | ||
672 | |||
673 | struct mpcp_semaphore { | ||
674 | struct litmus_lock litmus_lock; | ||
675 | |||
676 | /* current resource holder */ | ||
677 | struct task_struct *owner; | ||
678 | |||
679 | /* priority queue of waiting tasks */ | ||
680 | wait_queue_head_t wait; | ||
681 | |||
682 | /* priority ceiling per cpu */ | ||
683 | unsigned int prio_ceiling[NR_CPUS]; | ||
684 | |||
685 | /* should jobs spin "virtually" for this resource? */ | ||
686 | int vspin; | ||
687 | }; | ||
688 | |||
689 | #define OMEGA_CEILING UINT_MAX | ||
690 | |||
691 | /* Since jobs spin "virtually" while waiting to acquire a lock, | ||
692 | * they first must aquire a local per-cpu resource. | ||
693 | */ | ||
694 | static DEFINE_PER_CPU(wait_queue_head_t, mpcpvs_vspin_wait); | ||
695 | static DEFINE_PER_CPU(struct task_struct*, mpcpvs_vspin); | ||
696 | |||
697 | /* called with preemptions off <=> no local modifications */ | ||
698 | static void mpcp_vspin_enter(void) | ||
699 | { | ||
700 | struct task_struct* t = current; | ||
701 | |||
702 | while (1) { | ||
703 | if (__get_cpu_var(mpcpvs_vspin) == NULL) { | ||
704 | /* good, we get to issue our request */ | ||
705 | __get_cpu_var(mpcpvs_vspin) = t; | ||
706 | break; | ||
707 | } else { | ||
708 | /* some job is spinning => enqueue in request queue */ | ||
709 | prio_wait_queue_t wait; | ||
710 | wait_queue_head_t* vspin = &__get_cpu_var(mpcpvs_vspin_wait); | ||
711 | unsigned long flags; | ||
712 | |||
713 | /* ordered by regular priority */ | ||
714 | init_prio_waitqueue_entry(&wait, t, prio_point(get_priority(t))); | ||
715 | |||
716 | spin_lock_irqsave(&vspin->lock, flags); | ||
717 | |||
718 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
719 | |||
720 | __add_wait_queue_prio_exclusive(vspin, &wait); | ||
721 | |||
722 | spin_unlock_irqrestore(&vspin->lock, flags); | ||
723 | |||
724 | TS_LOCK_SUSPEND; | ||
725 | |||
726 | preempt_enable_no_resched(); | ||
727 | |||
728 | schedule(); | ||
729 | |||
730 | preempt_disable(); | ||
731 | |||
732 | TS_LOCK_RESUME; | ||
733 | /* Recheck if we got it --- some higher-priority process might | ||
734 | * have swooped in. */ | ||
735 | } | ||
736 | } | ||
737 | /* ok, now it is ours */ | ||
738 | } | ||
739 | |||
740 | /* called with preemptions off */ | ||
741 | static void mpcp_vspin_exit(void) | ||
742 | { | ||
743 | struct task_struct* t = current, *next; | ||
744 | unsigned long flags; | ||
745 | wait_queue_head_t* vspin = &__get_cpu_var(mpcpvs_vspin_wait); | ||
746 | |||
747 | BUG_ON(__get_cpu_var(mpcpvs_vspin) != t); | ||
748 | |||
749 | /* no spinning job */ | ||
750 | __get_cpu_var(mpcpvs_vspin) = NULL; | ||
751 | |||
752 | /* see if anyone is waiting for us to stop "spinning" */ | ||
753 | spin_lock_irqsave(&vspin->lock, flags); | ||
754 | next = __waitqueue_remove_first(vspin); | ||
755 | |||
756 | if (next) | ||
757 | wake_up_process(next); | ||
758 | |||
759 | spin_unlock_irqrestore(&vspin->lock, flags); | ||
760 | } | ||
761 | |||
762 | static inline struct mpcp_semaphore* mpcp_from_lock(struct litmus_lock* lock) | ||
763 | { | ||
764 | return container_of(lock, struct mpcp_semaphore, litmus_lock); | ||
765 | } | ||
766 | |||
767 | int pfp_mpcp_lock(struct litmus_lock* l) | ||
768 | { | ||
769 | struct task_struct* t = current; | ||
770 | struct mpcp_semaphore *sem = mpcp_from_lock(l); | ||
771 | prio_wait_queue_t wait; | ||
772 | unsigned long flags; | ||
773 | |||
774 | if (!is_realtime(t)) | ||
775 | return -EPERM; | ||
776 | |||
777 | preempt_disable(); | ||
778 | |||
779 | if (sem->vspin) | ||
780 | mpcp_vspin_enter(); | ||
781 | |||
782 | /* Priority-boost ourself *before* we suspend so that | ||
783 | * our priority is boosted when we resume. Use the priority | ||
784 | * ceiling for the local partition. */ | ||
785 | boost_priority(t, sem->prio_ceiling[get_partition(t)]); | ||
786 | |||
787 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
788 | |||
789 | preempt_enable_no_resched(); | ||
790 | |||
791 | if (sem->owner) { | ||
792 | /* resource is not free => must suspend and wait */ | ||
793 | |||
794 | /* ordered by regular priority */ | ||
795 | init_prio_waitqueue_entry(&wait, t, prio_point(get_priority(t))); | ||
796 | |||
797 | /* FIXME: interruptible would be nice some day */ | ||
798 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
799 | |||
800 | __add_wait_queue_prio_exclusive(&sem->wait, &wait); | ||
801 | |||
802 | TS_LOCK_SUSPEND; | ||
803 | |||
804 | /* release lock before sleeping */ | ||
805 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
806 | |||
807 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
808 | * when we wake up; we are guaranteed to have the lock since | ||
809 | * there is only one wake up per release. | ||
810 | */ | ||
811 | |||
812 | schedule(); | ||
813 | |||
814 | TS_LOCK_RESUME; | ||
815 | |||
816 | /* Since we hold the lock, no other task will change | ||
817 | * ->owner. We can thus check it without acquiring the spin | ||
818 | * lock. */ | ||
819 | BUG_ON(sem->owner != t); | ||
820 | } else { | ||
821 | /* it's ours now */ | ||
822 | sem->owner = t; | ||
823 | |||
824 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
825 | } | ||
826 | |||
827 | return 0; | ||
828 | } | ||
829 | |||
830 | int pfp_mpcp_unlock(struct litmus_lock* l) | ||
831 | { | ||
832 | struct task_struct *t = current, *next; | ||
833 | struct mpcp_semaphore *sem = mpcp_from_lock(l); | ||
834 | unsigned long flags; | ||
835 | int err = 0; | ||
836 | |||
837 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
838 | |||
839 | if (sem->owner != t) { | ||
840 | err = -EINVAL; | ||
841 | goto out; | ||
842 | } | ||
843 | |||
844 | /* we lose the benefit of priority boosting */ | ||
845 | |||
846 | unboost_priority(t); | ||
847 | |||
848 | /* check if there are jobs waiting for this resource */ | ||
849 | next = __waitqueue_remove_first(&sem->wait); | ||
850 | if (next) { | ||
851 | /* next becomes the resouce holder */ | ||
852 | sem->owner = next; | ||
853 | |||
854 | /* Wake up next. The waiting job is already priority-boosted. */ | ||
855 | wake_up_process(next); | ||
856 | } else | ||
857 | /* resource becomes available */ | ||
858 | sem->owner = NULL; | ||
859 | |||
860 | out: | ||
861 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
862 | |||
863 | if (sem->vspin && err == 0) { | ||
864 | preempt_disable(); | ||
865 | mpcp_vspin_exit(); | ||
866 | preempt_enable(); | ||
867 | } | ||
868 | |||
869 | return err; | ||
870 | } | ||
871 | |||
872 | int pfp_mpcp_open(struct litmus_lock* l, void* config) | ||
873 | { | ||
874 | struct task_struct *t = current; | ||
875 | struct mpcp_semaphore *sem = mpcp_from_lock(l); | ||
876 | int cpu, local_cpu; | ||
877 | unsigned long flags; | ||
878 | |||
879 | if (!is_realtime(t)) | ||
880 | /* we need to know the real-time priority */ | ||
881 | return -EPERM; | ||
882 | |||
883 | local_cpu = get_partition(t); | ||
884 | |||
885 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
886 | |||
887 | for (cpu = 0; cpu < NR_CPUS; cpu++) | ||
888 | if (cpu != local_cpu) | ||
889 | { | ||
890 | sem->prio_ceiling[cpu] = min(sem->prio_ceiling[cpu], | ||
891 | get_priority(t)); | ||
892 | TRACE_CUR("priority ceiling for sem %p is now %d on cpu %d\n", | ||
893 | sem, sem->prio_ceiling[cpu], cpu); | ||
894 | } | ||
895 | |||
896 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
897 | |||
898 | return 0; | ||
899 | } | ||
900 | |||
901 | int pfp_mpcp_close(struct litmus_lock* l) | ||
902 | { | ||
903 | struct task_struct *t = current; | ||
904 | struct mpcp_semaphore *sem = mpcp_from_lock(l); | ||
905 | unsigned long flags; | ||
906 | |||
907 | int owner; | ||
908 | |||
909 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
910 | |||
911 | owner = sem->owner == t; | ||
912 | |||
913 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
914 | |||
915 | if (owner) | ||
916 | pfp_mpcp_unlock(l); | ||
917 | |||
918 | return 0; | ||
919 | } | ||
920 | |||
921 | void pfp_mpcp_free(struct litmus_lock* lock) | ||
922 | { | ||
923 | kfree(mpcp_from_lock(lock)); | ||
924 | } | ||
925 | |||
926 | static struct litmus_lock_ops pfp_mpcp_lock_ops = { | ||
927 | .close = pfp_mpcp_close, | ||
928 | .lock = pfp_mpcp_lock, | ||
929 | .open = pfp_mpcp_open, | ||
930 | .unlock = pfp_mpcp_unlock, | ||
931 | .deallocate = pfp_mpcp_free, | ||
932 | }; | ||
933 | |||
934 | static struct litmus_lock* pfp_new_mpcp(int vspin) | ||
935 | { | ||
936 | struct mpcp_semaphore* sem; | ||
937 | int cpu; | ||
938 | |||
939 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
940 | if (!sem) | ||
941 | return NULL; | ||
942 | |||
943 | sem->owner = NULL; | ||
944 | init_waitqueue_head(&sem->wait); | ||
945 | sem->litmus_lock.ops = &pfp_mpcp_lock_ops; | ||
946 | |||
947 | for (cpu = 0; cpu < NR_CPUS; cpu++) | ||
948 | sem->prio_ceiling[cpu] = OMEGA_CEILING; | ||
949 | |||
950 | /* mark as virtual spinning */ | ||
951 | sem->vspin = vspin; | ||
952 | |||
953 | return &sem->litmus_lock; | ||
954 | } | ||
955 | |||
956 | |||
957 | /* ******************** PCP support ********************** */ | ||
958 | |||
959 | |||
960 | struct pcp_semaphore { | ||
961 | struct litmus_lock litmus_lock; | ||
962 | |||
963 | struct list_head ceiling; | ||
964 | |||
965 | /* current resource holder */ | ||
966 | struct task_struct *owner; | ||
967 | |||
968 | /* priority ceiling --- can be negative due to DPCP support */ | ||
969 | int prio_ceiling; | ||
970 | |||
971 | /* on which processor is this PCP semaphore allocated? */ | ||
972 | int on_cpu; | ||
973 | }; | ||
974 | |||
975 | static inline struct pcp_semaphore* pcp_from_lock(struct litmus_lock* lock) | ||
976 | { | ||
977 | return container_of(lock, struct pcp_semaphore, litmus_lock); | ||
978 | } | ||
979 | |||
980 | |||
981 | struct pcp_state { | ||
982 | struct list_head system_ceiling; | ||
983 | |||
984 | /* highest-priority waiting task */ | ||
985 | struct task_struct* hp_waiter; | ||
986 | |||
987 | /* list of jobs waiting to get past the system ceiling */ | ||
988 | wait_queue_head_t ceiling_blocked; | ||
989 | }; | ||
990 | |||
991 | static void pcp_init_state(struct pcp_state* s) | ||
992 | { | ||
993 | INIT_LIST_HEAD(&s->system_ceiling); | ||
994 | s->hp_waiter = NULL; | ||
995 | init_waitqueue_head(&s->ceiling_blocked); | ||
996 | } | ||
997 | |||
998 | static DEFINE_PER_CPU(struct pcp_state, pcp_state); | ||
999 | |||
1000 | /* assumes preemptions are off */ | ||
1001 | static struct pcp_semaphore* pcp_get_ceiling(void) | ||
1002 | { | ||
1003 | struct list_head* top = __get_cpu_var(pcp_state).system_ceiling.next; | ||
1004 | |||
1005 | if (top) | ||
1006 | return list_entry(top, struct pcp_semaphore, ceiling); | ||
1007 | else | ||
1008 | return NULL; | ||
1009 | } | ||
1010 | |||
1011 | /* assumes preempt off */ | ||
1012 | static void pcp_add_ceiling(struct pcp_semaphore* sem) | ||
1013 | { | ||
1014 | struct list_head *pos; | ||
1015 | struct list_head *in_use = &__get_cpu_var(pcp_state).system_ceiling; | ||
1016 | struct pcp_semaphore* held; | ||
1017 | |||
1018 | BUG_ON(sem->on_cpu != smp_processor_id()); | ||
1019 | BUG_ON(in_list(&sem->ceiling)); | ||
1020 | |||
1021 | list_for_each(pos, in_use) { | ||
1022 | held = list_entry(pos, struct pcp_semaphore, ceiling); | ||
1023 | if (held->prio_ceiling >= sem->prio_ceiling) { | ||
1024 | __list_add(&sem->ceiling, pos->prev, pos); | ||
1025 | return; | ||
1026 | } | ||
1027 | } | ||
1028 | |||
1029 | /* we hit the end of the list */ | ||
1030 | |||
1031 | list_add_tail(&sem->ceiling, in_use); | ||
1032 | } | ||
1033 | |||
1034 | /* assumes preempt off */ | ||
1035 | static int pcp_exceeds_ceiling(struct pcp_semaphore* ceiling, | ||
1036 | struct task_struct* task, | ||
1037 | int effective_prio) | ||
1038 | { | ||
1039 | return ceiling == NULL || | ||
1040 | ceiling->prio_ceiling > effective_prio || | ||
1041 | ceiling->owner == task; | ||
1042 | } | ||
1043 | |||
1044 | /* assumes preempt off */ | ||
1045 | static void pcp_priority_inheritance(void) | ||
1046 | { | ||
1047 | unsigned long flags; | ||
1048 | pfp_domain_t* pfp = local_pfp; | ||
1049 | |||
1050 | struct pcp_semaphore* ceiling = pcp_get_ceiling(); | ||
1051 | struct task_struct *blocker, *blocked; | ||
1052 | |||
1053 | blocker = ceiling ? ceiling->owner : NULL; | ||
1054 | blocked = __get_cpu_var(pcp_state).hp_waiter; | ||
1055 | |||
1056 | raw_spin_lock_irqsave(&pfp->slock, flags); | ||
1057 | |||
1058 | /* Current is no longer inheriting anything by default. This should be | ||
1059 | * the currently scheduled job, and hence not currently queued. */ | ||
1060 | BUG_ON(current != pfp->scheduled); | ||
1061 | |||
1062 | fp_set_prio_inh(pfp, current, NULL); | ||
1063 | fp_set_prio_inh(pfp, blocked, NULL); | ||
1064 | fp_set_prio_inh(pfp, blocker, NULL); | ||
1065 | |||
1066 | |||
1067 | /* Let blocking job inherit priority of blocked job, if required. */ | ||
1068 | if (blocker && blocked && | ||
1069 | fp_higher_prio(blocked, blocker)) { | ||
1070 | TRACE_TASK(blocker, "PCP inherits from %s/%d (prio %u -> %u) \n", | ||
1071 | blocked->comm, blocked->pid, | ||
1072 | get_priority(blocker), get_priority(blocked)); | ||
1073 | fp_set_prio_inh(pfp, blocker, blocked); | ||
1074 | } | ||
1075 | |||
1076 | /* check if anything changed */ | ||
1077 | if (fp_higher_prio(fp_prio_peek(&pfp->ready_queue), pfp->scheduled)) | ||
1078 | preempt(pfp); | ||
1079 | |||
1080 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | ||
1081 | } | ||
1082 | |||
1083 | /* called with preemptions off */ | ||
1084 | static void pcp_raise_ceiling(struct pcp_semaphore* sem, | ||
1085 | int effective_prio) | ||
1086 | { | ||
1087 | struct task_struct* t = current; | ||
1088 | struct pcp_semaphore* ceiling; | ||
1089 | prio_wait_queue_t wait; | ||
1090 | unsigned int waiting_higher_prio; | ||
1091 | |||
1092 | do { | ||
1093 | ceiling = pcp_get_ceiling(); | ||
1094 | if (pcp_exceeds_ceiling(ceiling, t, effective_prio)) | ||
1095 | break; | ||
1096 | |||
1097 | TRACE_CUR("PCP ceiling-blocked, wanted sem %p, but %s/%d has the ceiling \n", | ||
1098 | sem, ceiling->owner->comm, ceiling->owner->pid); | ||
1099 | |||
1100 | /* we need to wait until the ceiling is lowered */ | ||
1101 | |||
1102 | /* enqueue in priority order */ | ||
1103 | init_prio_waitqueue_entry(&wait, t, prio_point(effective_prio)); | ||
1104 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
1105 | waiting_higher_prio = add_wait_queue_prio_exclusive( | ||
1106 | &__get_cpu_var(pcp_state).ceiling_blocked, &wait); | ||
1107 | |||
1108 | if (waiting_higher_prio == 0) { | ||
1109 | TRACE_CUR("PCP new highest-prio waiter => prio inheritance\n"); | ||
1110 | |||
1111 | /* we are the new highest-priority waiting job | ||
1112 | * => update inheritance */ | ||
1113 | __get_cpu_var(pcp_state).hp_waiter = t; | ||
1114 | pcp_priority_inheritance(); | ||
1115 | } | ||
1116 | |||
1117 | TS_LOCK_SUSPEND; | ||
1118 | |||
1119 | preempt_enable_no_resched(); | ||
1120 | schedule(); | ||
1121 | preempt_disable(); | ||
1122 | |||
1123 | /* pcp_resume_unblocked() removed us from wait queue */ | ||
1124 | |||
1125 | TS_LOCK_RESUME; | ||
1126 | } while(1); | ||
1127 | |||
1128 | TRACE_CUR("PCP got the ceiling and sem %p\n", sem); | ||
1129 | |||
1130 | /* We are good to go. The semaphore should be available. */ | ||
1131 | BUG_ON(sem->owner != NULL); | ||
1132 | |||
1133 | sem->owner = t; | ||
1134 | |||
1135 | pcp_add_ceiling(sem); | ||
1136 | } | ||
1137 | |||
1138 | static void pcp_resume_unblocked(void) | ||
1139 | { | ||
1140 | wait_queue_head_t *blocked = &__get_cpu_var(pcp_state).ceiling_blocked; | ||
1141 | unsigned long flags; | ||
1142 | prio_wait_queue_t* q; | ||
1143 | struct task_struct* t = NULL; | ||
1144 | |||
1145 | struct pcp_semaphore* ceiling = pcp_get_ceiling(); | ||
1146 | |||
1147 | spin_lock_irqsave(&blocked->lock, flags); | ||
1148 | |||
1149 | while (waitqueue_active(blocked)) { | ||
1150 | /* check first == highest-priority waiting job */ | ||
1151 | q = list_entry(blocked->task_list.next, | ||
1152 | prio_wait_queue_t, wq.task_list); | ||
1153 | t = (struct task_struct*) q->wq.private; | ||
1154 | |||
1155 | /* can it proceed now? => let it go */ | ||
1156 | if (pcp_exceeds_ceiling(ceiling, t, | ||
1157 | prio_from_point(q->priority))) { | ||
1158 | __remove_wait_queue(blocked, &q->wq); | ||
1159 | wake_up_process(t); | ||
1160 | } else { | ||
1161 | /* We are done. Update highest-priority waiter. */ | ||
1162 | __get_cpu_var(pcp_state).hp_waiter = t; | ||
1163 | goto out; | ||
1164 | } | ||
1165 | } | ||
1166 | /* If we get here, then there are no more waiting | ||
1167 | * jobs. */ | ||
1168 | __get_cpu_var(pcp_state).hp_waiter = NULL; | ||
1169 | out: | ||
1170 | spin_unlock_irqrestore(&blocked->lock, flags); | ||
1171 | } | ||
1172 | |||
1173 | /* assumes preempt off */ | ||
1174 | static void pcp_lower_ceiling(struct pcp_semaphore* sem) | ||
1175 | { | ||
1176 | BUG_ON(!in_list(&sem->ceiling)); | ||
1177 | BUG_ON(sem->owner != current); | ||
1178 | BUG_ON(sem->on_cpu != smp_processor_id()); | ||
1179 | |||
1180 | /* remove from ceiling list */ | ||
1181 | list_del(&sem->ceiling); | ||
1182 | |||
1183 | /* release */ | ||
1184 | sem->owner = NULL; | ||
1185 | |||
1186 | TRACE_CUR("PCP released sem %p\n", sem); | ||
1187 | |||
1188 | /* Wake up all ceiling-blocked jobs that now pass the ceiling. */ | ||
1189 | pcp_resume_unblocked(); | ||
1190 | |||
1191 | pcp_priority_inheritance(); | ||
1192 | } | ||
1193 | |||
1194 | static void pcp_update_prio_ceiling(struct pcp_semaphore* sem, | ||
1195 | int effective_prio) | ||
1196 | { | ||
1197 | /* This needs to be synchronized on something. | ||
1198 | * Might as well use waitqueue lock for the processor. | ||
1199 | * We assume this happens only before the task set starts execution, | ||
1200 | * (i.e., during initialization), but it may happen on multiple processors | ||
1201 | * at the same time. | ||
1202 | */ | ||
1203 | unsigned long flags; | ||
1204 | |||
1205 | struct pcp_state* s = &per_cpu(pcp_state, sem->on_cpu); | ||
1206 | |||
1207 | spin_lock_irqsave(&s->ceiling_blocked.lock, flags); | ||
1208 | |||
1209 | sem->prio_ceiling = min(sem->prio_ceiling, effective_prio); | ||
1210 | |||
1211 | spin_unlock_irqrestore(&s->ceiling_blocked.lock, flags); | ||
1212 | } | ||
1213 | |||
1214 | static void pcp_init_semaphore(struct pcp_semaphore* sem, int cpu) | ||
1215 | { | ||
1216 | sem->owner = NULL; | ||
1217 | INIT_LIST_HEAD(&sem->ceiling); | ||
1218 | sem->prio_ceiling = INT_MAX; | ||
1219 | sem->on_cpu = cpu; | ||
1220 | } | ||
1221 | |||
1222 | int pfp_pcp_lock(struct litmus_lock* l) | ||
1223 | { | ||
1224 | struct task_struct* t = current; | ||
1225 | struct pcp_semaphore *sem = pcp_from_lock(l); | ||
1226 | |||
1227 | int eprio = effective_agent_priority(get_priority(t)); | ||
1228 | int from = get_partition(t); | ||
1229 | int to = sem->on_cpu; | ||
1230 | |||
1231 | if (!is_realtime(t) || from != to) | ||
1232 | return -EPERM; | ||
1233 | |||
1234 | preempt_disable(); | ||
1235 | |||
1236 | pcp_raise_ceiling(sem, eprio); | ||
1237 | |||
1238 | preempt_enable(); | ||
1239 | |||
1240 | return 0; | ||
1241 | } | ||
1242 | |||
1243 | int pfp_pcp_unlock(struct litmus_lock* l) | ||
1244 | { | ||
1245 | struct task_struct *t = current; | ||
1246 | struct pcp_semaphore *sem = pcp_from_lock(l); | ||
1247 | |||
1248 | int err = 0; | ||
1249 | |||
1250 | preempt_disable(); | ||
1251 | |||
1252 | if (sem->on_cpu != smp_processor_id() || sem->owner != t) { | ||
1253 | err = -EINVAL; | ||
1254 | goto out; | ||
1255 | } | ||
1256 | |||
1257 | /* give it back */ | ||
1258 | pcp_lower_ceiling(sem); | ||
1259 | |||
1260 | out: | ||
1261 | preempt_enable(); | ||
1262 | |||
1263 | return err; | ||
1264 | } | ||
1265 | |||
1266 | int pfp_pcp_open(struct litmus_lock* l, void* __user config) | ||
1267 | { | ||
1268 | struct task_struct *t = current; | ||
1269 | struct pcp_semaphore *sem = pcp_from_lock(l); | ||
1270 | |||
1271 | int cpu, eprio; | ||
1272 | |||
1273 | if (!is_realtime(t)) | ||
1274 | /* we need to know the real-time priority */ | ||
1275 | return -EPERM; | ||
1276 | |||
1277 | if (get_user(cpu, (int*) config)) | ||
1278 | return -EFAULT; | ||
1279 | |||
1280 | /* make sure the resource location matches */ | ||
1281 | if (cpu != sem->on_cpu) | ||
1282 | return -EINVAL; | ||
1283 | |||
1284 | eprio = effective_agent_priority(get_priority(t)); | ||
1285 | |||
1286 | pcp_update_prio_ceiling(sem, eprio); | ||
1287 | |||
1288 | return 0; | ||
1289 | } | ||
1290 | |||
1291 | int pfp_pcp_close(struct litmus_lock* l) | ||
1292 | { | ||
1293 | struct task_struct *t = current; | ||
1294 | struct pcp_semaphore *sem = pcp_from_lock(l); | ||
1295 | |||
1296 | int owner = 0; | ||
1297 | |||
1298 | preempt_disable(); | ||
1299 | |||
1300 | if (sem->on_cpu == smp_processor_id()) | ||
1301 | owner = sem->owner == t; | ||
1302 | |||
1303 | preempt_enable(); | ||
1304 | |||
1305 | if (owner) | ||
1306 | pfp_pcp_unlock(l); | ||
1307 | |||
1308 | return 0; | ||
1309 | } | ||
1310 | |||
1311 | void pfp_pcp_free(struct litmus_lock* lock) | ||
1312 | { | ||
1313 | kfree(pcp_from_lock(lock)); | ||
1314 | } | ||
1315 | |||
1316 | |||
1317 | static struct litmus_lock_ops pfp_pcp_lock_ops = { | ||
1318 | .close = pfp_pcp_close, | ||
1319 | .lock = pfp_pcp_lock, | ||
1320 | .open = pfp_pcp_open, | ||
1321 | .unlock = pfp_pcp_unlock, | ||
1322 | .deallocate = pfp_pcp_free, | ||
1323 | }; | ||
1324 | |||
1325 | |||
1326 | static struct litmus_lock* pfp_new_pcp(int on_cpu) | ||
1327 | { | ||
1328 | struct pcp_semaphore* sem; | ||
1329 | |||
1330 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
1331 | if (!sem) | ||
1332 | return NULL; | ||
1333 | |||
1334 | sem->litmus_lock.ops = &pfp_pcp_lock_ops; | ||
1335 | pcp_init_semaphore(sem, on_cpu); | ||
1336 | |||
1337 | return &sem->litmus_lock; | ||
1338 | } | ||
1339 | |||
1340 | /* ******************** DPCP support ********************** */ | ||
1341 | |||
1342 | struct dpcp_semaphore { | ||
1343 | struct litmus_lock litmus_lock; | ||
1344 | struct pcp_semaphore pcp; | ||
1345 | int owner_cpu; | ||
1346 | }; | ||
1347 | |||
1348 | static inline struct dpcp_semaphore* dpcp_from_lock(struct litmus_lock* lock) | ||
1349 | { | ||
1350 | return container_of(lock, struct dpcp_semaphore, litmus_lock); | ||
1351 | } | ||
1352 | |||
1353 | /* called with preemptions disabled */ | ||
1354 | static void pfp_migrate_to(int target_cpu) | ||
1355 | { | ||
1356 | struct task_struct* t = current; | ||
1357 | pfp_domain_t *from; | ||
1358 | |||
1359 | if (get_partition(t) == target_cpu) | ||
1360 | return; | ||
1361 | |||
1362 | /* make sure target_cpu makes sense */ | ||
1363 | BUG_ON(!cpu_online(target_cpu)); | ||
1364 | |||
1365 | local_irq_disable(); | ||
1366 | |||
1367 | /* scheduled task should not be in any ready or release queue */ | ||
1368 | BUG_ON(is_queued(t)); | ||
1369 | |||
1370 | /* lock both pfp domains in order of address */ | ||
1371 | from = task_pfp(t); | ||
1372 | |||
1373 | raw_spin_lock(&from->slock); | ||
1374 | |||
1375 | /* switch partitions */ | ||
1376 | tsk_rt(t)->task_params.cpu = target_cpu; | ||
1377 | |||
1378 | raw_spin_unlock(&from->slock); | ||
1379 | |||
1380 | /* Don't trace scheduler costs as part of | ||
1381 | * locking overhead. Scheduling costs are accounted for | ||
1382 | * explicitly. */ | ||
1383 | TS_LOCK_SUSPEND; | ||
1384 | |||
1385 | local_irq_enable(); | ||
1386 | preempt_enable_no_resched(); | ||
1387 | |||
1388 | /* deschedule to be migrated */ | ||
1389 | schedule(); | ||
1390 | |||
1391 | /* we are now on the target processor */ | ||
1392 | preempt_disable(); | ||
1393 | |||
1394 | /* start recording costs again */ | ||
1395 | TS_LOCK_RESUME; | ||
1396 | |||
1397 | BUG_ON(smp_processor_id() != target_cpu); | ||
1398 | } | ||
1399 | |||
1400 | int pfp_dpcp_lock(struct litmus_lock* l) | ||
1401 | { | ||
1402 | struct task_struct* t = current; | ||
1403 | struct dpcp_semaphore *sem = dpcp_from_lock(l); | ||
1404 | int eprio = effective_agent_priority(get_priority(t)); | ||
1405 | int from = get_partition(t); | ||
1406 | int to = sem->pcp.on_cpu; | ||
1407 | |||
1408 | if (!is_realtime(t)) | ||
1409 | return -EPERM; | ||
1410 | |||
1411 | preempt_disable(); | ||
1412 | |||
1413 | /* Priority-boost ourself *before* we suspend so that | ||
1414 | * our priority is boosted when we resume. */ | ||
1415 | |||
1416 | boost_priority(t, get_priority(t)); | ||
1417 | |||
1418 | pfp_migrate_to(to); | ||
1419 | |||
1420 | pcp_raise_ceiling(&sem->pcp, eprio); | ||
1421 | |||
1422 | /* yep, we got it => execute request */ | ||
1423 | sem->owner_cpu = from; | ||
1424 | |||
1425 | preempt_enable(); | ||
1426 | |||
1427 | return 0; | ||
1428 | } | ||
1429 | |||
1430 | int pfp_dpcp_unlock(struct litmus_lock* l) | ||
1431 | { | ||
1432 | struct task_struct *t = current; | ||
1433 | struct dpcp_semaphore *sem = dpcp_from_lock(l); | ||
1434 | int err = 0; | ||
1435 | int home; | ||
1436 | |||
1437 | preempt_disable(); | ||
1438 | |||
1439 | if (sem->pcp.on_cpu != smp_processor_id() || sem->pcp.owner != t) { | ||
1440 | err = -EINVAL; | ||
1441 | goto out; | ||
1442 | } | ||
1443 | |||
1444 | home = sem->owner_cpu; | ||
1445 | |||
1446 | /* give it back */ | ||
1447 | pcp_lower_ceiling(&sem->pcp); | ||
1448 | |||
1449 | /* we lose the benefit of priority boosting */ | ||
1450 | unboost_priority(t); | ||
1451 | |||
1452 | pfp_migrate_to(home); | ||
1453 | |||
1454 | out: | ||
1455 | preempt_enable(); | ||
1456 | |||
1457 | return err; | ||
1458 | } | ||
1459 | |||
1460 | int pfp_dpcp_open(struct litmus_lock* l, void* __user config) | ||
1461 | { | ||
1462 | struct task_struct *t = current; | ||
1463 | struct dpcp_semaphore *sem = dpcp_from_lock(l); | ||
1464 | int cpu, eprio; | ||
1465 | |||
1466 | if (!is_realtime(t)) | ||
1467 | /* we need to know the real-time priority */ | ||
1468 | return -EPERM; | ||
1469 | |||
1470 | if (get_user(cpu, (int*) config)) | ||
1471 | return -EFAULT; | ||
1472 | |||
1473 | /* make sure the resource location matches */ | ||
1474 | if (cpu != sem->pcp.on_cpu) | ||
1475 | return -EINVAL; | ||
1476 | |||
1477 | eprio = effective_agent_priority(get_priority(t)); | ||
1478 | |||
1479 | pcp_update_prio_ceiling(&sem->pcp, eprio); | ||
1480 | |||
1481 | return 0; | ||
1482 | } | ||
1483 | |||
1484 | int pfp_dpcp_close(struct litmus_lock* l) | ||
1485 | { | ||
1486 | struct task_struct *t = current; | ||
1487 | struct dpcp_semaphore *sem = dpcp_from_lock(l); | ||
1488 | int owner = 0; | ||
1489 | |||
1490 | preempt_disable(); | ||
1491 | |||
1492 | if (sem->pcp.on_cpu == smp_processor_id()) | ||
1493 | owner = sem->pcp.owner == t; | ||
1494 | |||
1495 | preempt_enable(); | ||
1496 | |||
1497 | if (owner) | ||
1498 | pfp_dpcp_unlock(l); | ||
1499 | |||
1500 | return 0; | ||
1501 | } | ||
1502 | |||
1503 | void pfp_dpcp_free(struct litmus_lock* lock) | ||
1504 | { | ||
1505 | kfree(dpcp_from_lock(lock)); | ||
1506 | } | ||
1507 | |||
1508 | static struct litmus_lock_ops pfp_dpcp_lock_ops = { | ||
1509 | .close = pfp_dpcp_close, | ||
1510 | .lock = pfp_dpcp_lock, | ||
1511 | .open = pfp_dpcp_open, | ||
1512 | .unlock = pfp_dpcp_unlock, | ||
1513 | .deallocate = pfp_dpcp_free, | ||
1514 | }; | ||
1515 | |||
1516 | static struct litmus_lock* pfp_new_dpcp(int on_cpu) | ||
1517 | { | ||
1518 | struct dpcp_semaphore* sem; | ||
1519 | |||
1520 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
1521 | if (!sem) | ||
1522 | return NULL; | ||
1523 | |||
1524 | sem->litmus_lock.ops = &pfp_dpcp_lock_ops; | ||
1525 | sem->owner_cpu = NO_CPU; | ||
1526 | pcp_init_semaphore(&sem->pcp, on_cpu); | ||
1527 | |||
1528 | return &sem->litmus_lock; | ||
1529 | } | ||
1530 | |||
1531 | |||
1532 | /* **** lock constructor **** */ | ||
1533 | |||
1534 | |||
1535 | static long pfp_allocate_lock(struct litmus_lock **lock, int type, | ||
1536 | void* __user config) | ||
1537 | { | ||
1538 | int err = -ENXIO, cpu; | ||
1539 | struct srp_semaphore* srp; | ||
1540 | |||
1541 | /* P-FP currently supports the SRP for local resources and the FMLP | ||
1542 | * for global resources. */ | ||
1543 | switch (type) { | ||
1544 | case FMLP_SEM: | ||
1545 | /* FIFO Mutex Locking Protocol */ | ||
1546 | *lock = pfp_new_fmlp(); | ||
1547 | if (*lock) | ||
1548 | err = 0; | ||
1549 | else | ||
1550 | err = -ENOMEM; | ||
1551 | break; | ||
1552 | |||
1553 | case MPCP_SEM: | ||
1554 | /* Multiprocesor Priority Ceiling Protocol */ | ||
1555 | *lock = pfp_new_mpcp(0); | ||
1556 | if (*lock) | ||
1557 | err = 0; | ||
1558 | else | ||
1559 | err = -ENOMEM; | ||
1560 | break; | ||
1561 | |||
1562 | case MPCP_VS_SEM: | ||
1563 | /* Multiprocesor Priority Ceiling Protocol with virtual spinning */ | ||
1564 | *lock = pfp_new_mpcp(1); | ||
1565 | if (*lock) | ||
1566 | err = 0; | ||
1567 | else | ||
1568 | err = -ENOMEM; | ||
1569 | break; | ||
1570 | |||
1571 | case DPCP_SEM: | ||
1572 | /* Distributed Priority Ceiling Protocol */ | ||
1573 | if (get_user(cpu, (int*) config)) | ||
1574 | return -EFAULT; | ||
1575 | |||
1576 | if (!cpu_online(cpu)) | ||
1577 | return -EINVAL; | ||
1578 | |||
1579 | *lock = pfp_new_dpcp(cpu); | ||
1580 | if (*lock) | ||
1581 | err = 0; | ||
1582 | else | ||
1583 | err = -ENOMEM; | ||
1584 | break; | ||
1585 | |||
1586 | case SRP_SEM: | ||
1587 | /* Baker's Stack Resource Policy */ | ||
1588 | srp = allocate_srp_semaphore(); | ||
1589 | if (srp) { | ||
1590 | *lock = &srp->litmus_lock; | ||
1591 | err = 0; | ||
1592 | } else | ||
1593 | err = -ENOMEM; | ||
1594 | break; | ||
1595 | |||
1596 | case PCP_SEM: | ||
1597 | /* Priority Ceiling Protocol */ | ||
1598 | if (get_user(cpu, (int*) config)) | ||
1599 | return -EFAULT; | ||
1600 | |||
1601 | if (!cpu_online(cpu)) | ||
1602 | return -EINVAL; | ||
1603 | |||
1604 | *lock = pfp_new_pcp(cpu); | ||
1605 | if (*lock) | ||
1606 | err = 0; | ||
1607 | else | ||
1608 | err = -ENOMEM; | ||
1609 | break; | ||
1610 | }; | ||
1611 | |||
1612 | return err; | ||
1613 | } | ||
1614 | |||
1615 | #endif | ||
1616 | |||
1617 | static long pfp_admit_task(struct task_struct* tsk) | ||
1618 | { | ||
1619 | if (task_cpu(tsk) == tsk->rt_param.task_params.cpu && | ||
1620 | #ifdef CONFIG_RELEASE_MASTER | ||
1621 | /* don't allow tasks on release master CPU */ | ||
1622 | task_cpu(tsk) != remote_dom(task_cpu(tsk))->release_master && | ||
1623 | #endif | ||
1624 | litmus_is_valid_fixed_prio(get_priority(tsk))) | ||
1625 | return 0; | ||
1626 | else | ||
1627 | return -EINVAL; | ||
1628 | } | ||
1629 | |||
1630 | static long pfp_activate_plugin(void) | ||
1631 | { | ||
1632 | #if defined(CONFIG_RELEASE_MASTER) || defined(CONFIG_LITMUS_LOCKING) | ||
1633 | int cpu; | ||
1634 | #endif | ||
1635 | |||
1636 | #ifdef CONFIG_RELEASE_MASTER | ||
1637 | for_each_online_cpu(cpu) { | ||
1638 | remote_dom(cpu)->release_master = atomic_read(&release_master_cpu); | ||
1639 | } | ||
1640 | #endif | ||
1641 | |||
1642 | #ifdef CONFIG_LITMUS_LOCKING | ||
1643 | get_srp_prio = pfp_get_srp_prio; | ||
1644 | |||
1645 | for_each_online_cpu(cpu) { | ||
1646 | init_waitqueue_head(&per_cpu(mpcpvs_vspin_wait, cpu)); | ||
1647 | per_cpu(mpcpvs_vspin, cpu) = NULL; | ||
1648 | |||
1649 | pcp_init_state(&per_cpu(pcp_state, cpu)); | ||
1650 | pfp_doms[cpu] = remote_pfp(cpu); | ||
1651 | } | ||
1652 | |||
1653 | #endif | ||
1654 | |||
1655 | return 0; | ||
1656 | } | ||
1657 | |||
1658 | |||
1659 | /* Plugin object */ | ||
1660 | static struct sched_plugin pfp_plugin __cacheline_aligned_in_smp = { | ||
1661 | .plugin_name = "P-FP", | ||
1662 | .tick = pfp_tick, | ||
1663 | .task_new = pfp_task_new, | ||
1664 | .complete_job = complete_job, | ||
1665 | .task_exit = pfp_task_exit, | ||
1666 | .schedule = pfp_schedule, | ||
1667 | .task_wake_up = pfp_task_wake_up, | ||
1668 | .task_block = pfp_task_block, | ||
1669 | .admit_task = pfp_admit_task, | ||
1670 | .activate_plugin = pfp_activate_plugin, | ||
1671 | #ifdef CONFIG_LITMUS_LOCKING | ||
1672 | .allocate_lock = pfp_allocate_lock, | ||
1673 | .finish_switch = pfp_finish_switch, | ||
1674 | #endif | ||
1675 | }; | ||
1676 | |||
1677 | |||
1678 | static int __init init_pfp(void) | ||
1679 | { | ||
1680 | int i; | ||
1681 | |||
1682 | /* We do not really want to support cpu hotplug, do we? ;) | ||
1683 | * However, if we are so crazy to do so, | ||
1684 | * we cannot use num_online_cpu() | ||
1685 | */ | ||
1686 | for (i = 0; i < num_online_cpus(); i++) { | ||
1687 | pfp_domain_init(remote_pfp(i), i); | ||
1688 | } | ||
1689 | return register_sched_plugin(&pfp_plugin); | ||
1690 | } | ||
1691 | |||
1692 | module_init(init_pfp); | ||
1693 | |||
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index eaaec38f43da..4e117be9546b 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <litmus/litmus.h> | 17 | #include <litmus/litmus.h> |
18 | #include <litmus/jobs.h> | 18 | #include <litmus/jobs.h> |
19 | #include <litmus/preempt.h> | 19 | #include <litmus/preempt.h> |
20 | #include <litmus/budget.h> | ||
20 | #include <litmus/sched_plugin.h> | 21 | #include <litmus/sched_plugin.h> |
21 | #include <litmus/edf_common.h> | 22 | #include <litmus/edf_common.h> |
22 | #include <litmus/sched_trace.h> | 23 | #include <litmus/sched_trace.h> |
@@ -132,6 +133,15 @@ static void unboost_priority(struct task_struct* t) | |||
132 | 133 | ||
133 | #endif | 134 | #endif |
134 | 135 | ||
136 | static int psnedf_preempt_check(psnedf_domain_t *pedf) | ||
137 | { | ||
138 | if (edf_preemption_needed(&pedf->domain, pedf->scheduled)) { | ||
139 | preempt(pedf); | ||
140 | return 1; | ||
141 | } else | ||
142 | return 0; | ||
143 | } | ||
144 | |||
135 | /* This check is trivial in partioned systems as we only have to consider | 145 | /* This check is trivial in partioned systems as we only have to consider |
136 | * the CPU of the partition. | 146 | * the CPU of the partition. |
137 | */ | 147 | */ |
@@ -142,11 +152,7 @@ static int psnedf_check_resched(rt_domain_t *edf) | |||
142 | /* because this is a callback from rt_domain_t we already hold | 152 | /* because this is a callback from rt_domain_t we already hold |
143 | * the necessary lock for the ready queue | 153 | * the necessary lock for the ready queue |
144 | */ | 154 | */ |
145 | if (edf_preemption_needed(edf, pedf->scheduled)) { | 155 | return psnedf_preempt_check(pedf); |
146 | preempt(pedf); | ||
147 | return 1; | ||
148 | } else | ||
149 | return 0; | ||
150 | } | 156 | } |
151 | 157 | ||
152 | static void job_completion(struct task_struct* t, int forced) | 158 | static void job_completion(struct task_struct* t, int forced) |
@@ -301,7 +307,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running) | |||
301 | } else { | 307 | } else { |
302 | requeue(t, edf); | 308 | requeue(t, edf); |
303 | /* maybe we have to reschedule */ | 309 | /* maybe we have to reschedule */ |
304 | preempt(pedf); | 310 | psnedf_preempt_check(pedf); |
305 | } | 311 | } |
306 | raw_spin_unlock_irqrestore(&pedf->slock, flags); | 312 | raw_spin_unlock_irqrestore(&pedf->slock, flags); |
307 | } | 313 | } |
@@ -337,8 +343,10 @@ static void psnedf_task_wake_up(struct task_struct *task) | |||
337 | * de-scheduling the task, i.e., wake_up() raced with schedule() | 343 | * de-scheduling the task, i.e., wake_up() raced with schedule() |
338 | * and won. | 344 | * and won. |
339 | */ | 345 | */ |
340 | if (pedf->scheduled != task) | 346 | if (pedf->scheduled != task) { |
341 | requeue(task, edf); | 347 | requeue(task, edf); |
348 | psnedf_preempt_check(pedf); | ||
349 | } | ||
342 | 350 | ||
343 | raw_spin_unlock_irqrestore(&pedf->slock, flags); | 351 | raw_spin_unlock_irqrestore(&pedf->slock, flags); |
344 | TRACE_TASK(task, "wake up done\n"); | 352 | TRACE_TASK(task, "wake up done\n"); |
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c index 8c1ca188bce1..67b01c1dd51b 100644 --- a/litmus/sched_task_trace.c +++ b/litmus/sched_task_trace.c | |||
@@ -7,10 +7,10 @@ | |||
7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
9 | #include <linux/percpu.h> | 9 | #include <linux/percpu.h> |
10 | #include <linux/math64.h> | ||
10 | 11 | ||
11 | #include <litmus/ftdev.h> | 12 | #include <litmus/ftdev.h> |
12 | #include <litmus/litmus.h> | 13 | #include <litmus/litmus.h> |
13 | |||
14 | #include <litmus/sched_trace.h> | 14 | #include <litmus/sched_trace.h> |
15 | #include <litmus/feather_trace.h> | 15 | #include <litmus/feather_trace.h> |
16 | #include <litmus/ftdev.h> | 16 | #include <litmus/ftdev.h> |
@@ -235,13 +235,9 @@ feather_callback void do_sched_trace_task_exit(unsigned long id, | |||
235 | unsigned long _task) | 235 | unsigned long _task) |
236 | { | 236 | { |
237 | struct task_struct *t = (struct task_struct*) _task; | 237 | struct task_struct *t = (struct task_struct*) _task; |
238 | #ifdef CONFIG_PLUGIN_COLOR | ||
239 | const lt_t max_exec_time = tsk_rt(t)->max_exec_time; | 238 | const lt_t max_exec_time = tsk_rt(t)->max_exec_time; |
240 | const lt_t avg_exec_time = tsk_rt(t)->tot_exec_time / (get_rt_job(t) - 1); | 239 | const lt_t avg_exec_time = div64_u64(tsk_rt(t)->tot_exec_time, (get_job_no(t) - 1)); |
241 | #else | 240 | |
242 | const lt_t max_exec_time = 0; | ||
243 | const lt_t avg_exec_time = 0; | ||
244 | #endif | ||
245 | struct st_event_record *rec = get_record(ST_TASK_EXIT, t); | 241 | struct st_event_record *rec = get_record(ST_TASK_EXIT, t); |
246 | if (rec) { | 242 | if (rec) { |
247 | rec->data.task_exit.avg_exec_time = avg_exec_time; | 243 | rec->data.task_exit.avg_exec_time = avg_exec_time; |