diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-09-11 22:42:51 -0400 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-09-11 22:42:51 -0400 |
commit | c1d1979c99ca397241da4e3d7e0cb77f7ec28240 (patch) | |
tree | 2a988aae1ae7c08891543e844171cbcb4281a5bb /include | |
parent | fd3aa01f176cf12b1625f4f46ba01f3340bb57ed (diff) | |
parent | 55e04c94b925b0790c2ae0a79f16e939e9bb2846 (diff) |
Merge branch 'wip-gpu-rtas12' into wip-slave-threads
Conflicts:
include/litmus/unistd_32.h
include/litmus/unistd_64.h
litmus/litmus.c
Diffstat (limited to 'include')
27 files changed, 1596 insertions, 41 deletions
diff --git a/include/linux/completion.h b/include/linux/completion.h index 9d727271c9fe..cff405c4dd3a 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h | |||
@@ -76,6 +76,7 @@ static inline void init_completion(struct completion *x) | |||
76 | init_waitqueue_head(&x->wait); | 76 | init_waitqueue_head(&x->wait); |
77 | } | 77 | } |
78 | 78 | ||
79 | extern void __wait_for_completion_locked(struct completion *); | ||
79 | extern void wait_for_completion(struct completion *); | 80 | extern void wait_for_completion(struct completion *); |
80 | extern int wait_for_completion_interruptible(struct completion *x); | 81 | extern int wait_for_completion_interruptible(struct completion *x); |
81 | extern int wait_for_completion_killable(struct completion *x); | 82 | extern int wait_for_completion_killable(struct completion *x); |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index f6efed0039ed..57a7bc8807be 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -445,6 +445,7 @@ static inline void __raise_softirq_irqoff(unsigned int nr) | |||
445 | 445 | ||
446 | extern void raise_softirq_irqoff(unsigned int nr); | 446 | extern void raise_softirq_irqoff(unsigned int nr); |
447 | extern void raise_softirq(unsigned int nr); | 447 | extern void raise_softirq(unsigned int nr); |
448 | extern void wakeup_softirqd(void); | ||
448 | 449 | ||
449 | /* This is the worklist that queues up per-cpu softirq work. | 450 | /* This is the worklist that queues up per-cpu softirq work. |
450 | * | 451 | * |
@@ -500,6 +501,10 @@ struct tasklet_struct | |||
500 | atomic_t count; | 501 | atomic_t count; |
501 | void (*func)(unsigned long); | 502 | void (*func)(unsigned long); |
502 | unsigned long data; | 503 | unsigned long data; |
504 | |||
505 | #if defined(CONFIG_LITMUS_SOFTIRQD) || defined(CONFIG_LITMUS_PAI_SOFTIRQD) | ||
506 | struct task_struct *owner; | ||
507 | #endif | ||
503 | }; | 508 | }; |
504 | 509 | ||
505 | #define DECLARE_TASKLET(name, func, data) \ | 510 | #define DECLARE_TASKLET(name, func, data) \ |
@@ -537,6 +542,7 @@ static inline void tasklet_unlock_wait(struct tasklet_struct *t) | |||
537 | #define tasklet_unlock(t) do { } while (0) | 542 | #define tasklet_unlock(t) do { } while (0) |
538 | #endif | 543 | #endif |
539 | 544 | ||
545 | extern void ___tasklet_schedule(struct tasklet_struct *t); | ||
540 | extern void __tasklet_schedule(struct tasklet_struct *t); | 546 | extern void __tasklet_schedule(struct tasklet_struct *t); |
541 | 547 | ||
542 | static inline void tasklet_schedule(struct tasklet_struct *t) | 548 | static inline void tasklet_schedule(struct tasklet_struct *t) |
@@ -545,6 +551,7 @@ static inline void tasklet_schedule(struct tasklet_struct *t) | |||
545 | __tasklet_schedule(t); | 551 | __tasklet_schedule(t); |
546 | } | 552 | } |
547 | 553 | ||
554 | extern void ___tasklet_hi_schedule(struct tasklet_struct *t); | ||
548 | extern void __tasklet_hi_schedule(struct tasklet_struct *t); | 555 | extern void __tasklet_hi_schedule(struct tasklet_struct *t); |
549 | 556 | ||
550 | static inline void tasklet_hi_schedule(struct tasklet_struct *t) | 557 | static inline void tasklet_hi_schedule(struct tasklet_struct *t) |
@@ -553,6 +560,7 @@ static inline void tasklet_hi_schedule(struct tasklet_struct *t) | |||
553 | __tasklet_hi_schedule(t); | 560 | __tasklet_hi_schedule(t); |
554 | } | 561 | } |
555 | 562 | ||
563 | extern void ___tasklet_hi_schedule_first(struct tasklet_struct *t); | ||
556 | extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); | 564 | extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); |
557 | 565 | ||
558 | /* | 566 | /* |
@@ -582,7 +590,7 @@ static inline void tasklet_disable(struct tasklet_struct *t) | |||
582 | } | 590 | } |
583 | 591 | ||
584 | static inline void tasklet_enable(struct tasklet_struct *t) | 592 | static inline void tasklet_enable(struct tasklet_struct *t) |
585 | { | 593 | { |
586 | smp_mb__before_atomic_dec(); | 594 | smp_mb__before_atomic_dec(); |
587 | atomic_dec(&t->count); | 595 | atomic_dec(&t->count); |
588 | } | 596 | } |
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index a940fe435aca..cb47debbf24d 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -126,6 +126,15 @@ static inline int mutex_is_locked(struct mutex *lock) | |||
126 | return atomic_read(&lock->count) != 1; | 126 | return atomic_read(&lock->count) != 1; |
127 | } | 127 | } |
128 | 128 | ||
129 | /* return non-zero to abort. only pre-side-effects may abort */ | ||
130 | typedef int (*side_effect_t)(unsigned long); | ||
131 | extern void mutex_lock_sfx(struct mutex *lock, | ||
132 | side_effect_t pre, unsigned long pre_arg, | ||
133 | side_effect_t post, unsigned long post_arg); | ||
134 | extern void mutex_unlock_sfx(struct mutex *lock, | ||
135 | side_effect_t pre, unsigned long pre_arg, | ||
136 | side_effect_t post, unsigned long post_arg); | ||
137 | |||
129 | /* | 138 | /* |
130 | * See kernel/mutex.c for detailed documentation of these APIs. | 139 | * See kernel/mutex.c for detailed documentation of these APIs. |
131 | * Also see Documentation/mutex-design.txt. | 140 | * Also see Documentation/mutex-design.txt. |
@@ -153,6 +162,7 @@ extern void mutex_lock(struct mutex *lock); | |||
153 | extern int __must_check mutex_lock_interruptible(struct mutex *lock); | 162 | extern int __must_check mutex_lock_interruptible(struct mutex *lock); |
154 | extern int __must_check mutex_lock_killable(struct mutex *lock); | 163 | extern int __must_check mutex_lock_killable(struct mutex *lock); |
155 | 164 | ||
165 | |||
156 | # define mutex_lock_nested(lock, subclass) mutex_lock(lock) | 166 | # define mutex_lock_nested(lock, subclass) mutex_lock(lock) |
157 | # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) | 167 | # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) |
158 | # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) | 168 | # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) |
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h index 39fa04966aa8..c83fc2b65f01 100644 --- a/include/linux/semaphore.h +++ b/include/linux/semaphore.h | |||
@@ -43,4 +43,13 @@ extern int __must_check down_trylock(struct semaphore *sem); | |||
43 | extern int __must_check down_timeout(struct semaphore *sem, long jiffies); | 43 | extern int __must_check down_timeout(struct semaphore *sem, long jiffies); |
44 | extern void up(struct semaphore *sem); | 44 | extern void up(struct semaphore *sem); |
45 | 45 | ||
46 | extern void __down(struct semaphore *sem); | ||
47 | extern void __up(struct semaphore *sem); | ||
48 | |||
49 | struct semaphore_waiter { | ||
50 | struct list_head list; | ||
51 | struct task_struct *task; | ||
52 | int up; | ||
53 | }; | ||
54 | |||
46 | #endif /* __LINUX_SEMAPHORE_H */ | 55 | #endif /* __LINUX_SEMAPHORE_H */ |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index f584aba78ca9..1ec2ec7d4e3b 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -83,6 +83,9 @@ struct work_struct { | |||
83 | #ifdef CONFIG_LOCKDEP | 83 | #ifdef CONFIG_LOCKDEP |
84 | struct lockdep_map lockdep_map; | 84 | struct lockdep_map lockdep_map; |
85 | #endif | 85 | #endif |
86 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
87 | struct task_struct *owner; | ||
88 | #endif | ||
86 | }; | 89 | }; |
87 | 90 | ||
88 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU) | 91 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU) |
@@ -115,11 +118,25 @@ struct execute_work { | |||
115 | #define __WORK_INIT_LOCKDEP_MAP(n, k) | 118 | #define __WORK_INIT_LOCKDEP_MAP(n, k) |
116 | #endif | 119 | #endif |
117 | 120 | ||
121 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
122 | #define __WORK_INIT_OWNER() \ | ||
123 | .owner = NULL, | ||
124 | |||
125 | #define PREPARE_OWNER(_work, _owner) \ | ||
126 | do { \ | ||
127 | (_work)->owner = (_owner); \ | ||
128 | } while(0) | ||
129 | #else | ||
130 | #define __WORK_INIT_OWNER() | ||
131 | #define PREPARE_OWNER(_work, _owner) | ||
132 | #endif | ||
133 | |||
118 | #define __WORK_INITIALIZER(n, f) { \ | 134 | #define __WORK_INITIALIZER(n, f) { \ |
119 | .data = WORK_DATA_STATIC_INIT(), \ | 135 | .data = WORK_DATA_STATIC_INIT(), \ |
120 | .entry = { &(n).entry, &(n).entry }, \ | 136 | .entry = { &(n).entry, &(n).entry }, \ |
121 | .func = (f), \ | 137 | .func = (f), \ |
122 | __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ | 138 | __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ |
139 | __WORK_INIT_OWNER() \ | ||
123 | } | 140 | } |
124 | 141 | ||
125 | #define __DELAYED_WORK_INITIALIZER(n, f) { \ | 142 | #define __DELAYED_WORK_INITIALIZER(n, f) { \ |
@@ -357,6 +374,7 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
357 | extern void flush_workqueue(struct workqueue_struct *wq); | 374 | extern void flush_workqueue(struct workqueue_struct *wq); |
358 | extern void flush_scheduled_work(void); | 375 | extern void flush_scheduled_work(void); |
359 | 376 | ||
377 | extern int __schedule_work(struct work_struct *work); | ||
360 | extern int schedule_work(struct work_struct *work); | 378 | extern int schedule_work(struct work_struct *work); |
361 | extern int schedule_work_on(int cpu, struct work_struct *work); | 379 | extern int schedule_work_on(int cpu, struct work_struct *work); |
362 | extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay); | 380 | extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay); |
diff --git a/include/litmus/budget.h b/include/litmus/budget.h index 33344ee8d5f9..763b31c0e9f6 100644 --- a/include/litmus/budget.h +++ b/include/litmus/budget.h | |||
@@ -5,6 +5,9 @@ | |||
5 | * the next task. */ | 5 | * the next task. */ |
6 | void update_enforcement_timer(struct task_struct* t); | 6 | void update_enforcement_timer(struct task_struct* t); |
7 | 7 | ||
8 | /* Send SIG_BUDGET to a real-time task. */ | ||
9 | void send_sigbudget(struct task_struct* t); | ||
10 | |||
8 | inline static int budget_exhausted(struct task_struct* t) | 11 | inline static int budget_exhausted(struct task_struct* t) |
9 | { | 12 | { |
10 | return get_exec_time(t) >= get_exec_cost(t); | 13 | return get_exec_time(t) >= get_exec_cost(t); |
@@ -19,10 +22,21 @@ inline static lt_t budget_remaining(struct task_struct* t) | |||
19 | return 0; | 22 | return 0; |
20 | } | 23 | } |
21 | 24 | ||
22 | #define budget_enforced(t) (tsk_rt(t)->task_params.budget_policy != NO_ENFORCEMENT) | 25 | #define budget_enforced(t) (\ |
26 | tsk_rt(t)->task_params.budget_policy != NO_ENFORCEMENT) | ||
27 | |||
28 | #define budget_precisely_tracked(t) (\ | ||
29 | tsk_rt(t)->task_params.budget_policy == PRECISE_ENFORCEMENT || \ | ||
30 | tsk_rt(t)->task_params.budget_signal_policy == PRECISE_SIGNALS) | ||
31 | |||
32 | #define budget_signalled(t) (\ | ||
33 | tsk_rt(t)->task_params.budget_signal_policy != NO_SIGNALS) | ||
34 | |||
35 | #define budget_precisely_signalled(t) (\ | ||
36 | tsk_rt(t)->task_params.budget_policy == PRECISE_SIGNALS) | ||
23 | 37 | ||
24 | #define budget_precisely_enforced(t) (tsk_rt(t)->task_params.budget_policy \ | 38 | #define sigbudget_sent(t) (\ |
25 | == PRECISE_ENFORCEMENT) | 39 | test_bit(RT_JOB_SIG_BUDGET_SENT, &tsk_rt(t)->job_params.flags)) |
26 | 40 | ||
27 | static inline int requeue_preempted_job(struct task_struct* t) | 41 | static inline int requeue_preempted_job(struct task_struct* t) |
28 | { | 42 | { |
diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h index bbaf22ea7f12..63dff7efe8fb 100644 --- a/include/litmus/edf_common.h +++ b/include/litmus/edf_common.h | |||
@@ -20,6 +20,18 @@ int edf_higher_prio(struct task_struct* first, | |||
20 | 20 | ||
21 | int edf_ready_order(struct bheap_node* a, struct bheap_node* b); | 21 | int edf_ready_order(struct bheap_node* a, struct bheap_node* b); |
22 | 22 | ||
23 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
24 | /* binheap_nodes must be embedded within 'struct litmus_lock' */ | ||
25 | int edf_max_heap_order(struct binheap_node *a, struct binheap_node *b); | ||
26 | int edf_min_heap_order(struct binheap_node *a, struct binheap_node *b); | ||
27 | int edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b); | ||
28 | int edf_min_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b); | ||
29 | |||
30 | int __edf_higher_prio(struct task_struct* first, comparison_mode_t first_mode, | ||
31 | struct task_struct* second, comparison_mode_t second_mode); | ||
32 | |||
33 | #endif | ||
34 | |||
23 | int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t); | 35 | int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t); |
24 | 36 | ||
25 | #endif | 37 | #endif |
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h index f2115b83f1e4..1469c0fd0460 100644 --- a/include/litmus/fdso.h +++ b/include/litmus/fdso.h | |||
@@ -24,9 +24,18 @@ typedef enum { | |||
24 | MPCP_VS_SEM = 3, | 24 | MPCP_VS_SEM = 3, |
25 | DPCP_SEM = 4, | 25 | DPCP_SEM = 4, |
26 | 26 | ||
27 | PCP_SEM = 5, | 27 | PCP_SEM = 5, |
28 | 28 | ||
29 | MAX_OBJ_TYPE = 5 | 29 | RSM_MUTEX = 6, |
30 | IKGLP_SEM = 7, | ||
31 | KFMLP_SEM = 8, | ||
32 | |||
33 | IKGLP_SIMPLE_GPU_AFF_OBS = 9, | ||
34 | IKGLP_GPU_AFF_OBS = 10, | ||
35 | KFMLP_SIMPLE_GPU_AFF_OBS = 11, | ||
36 | KFMLP_GPU_AFF_OBS = 12, | ||
37 | |||
38 | MAX_OBJ_TYPE = 12 | ||
30 | } obj_type_t; | 39 | } obj_type_t; |
31 | 40 | ||
32 | struct inode_obj_id { | 41 | struct inode_obj_id { |
@@ -70,8 +79,11 @@ static inline void* od_lookup(int od, obj_type_t type) | |||
70 | } | 79 | } |
71 | 80 | ||
72 | #define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM)) | 81 | #define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM)) |
82 | #define lookup_kfmlp_sem(od)((struct pi_semaphore*) od_lookup(od, KFMLP_SEM)) | ||
73 | #define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) | 83 | #define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) |
74 | #define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) | 84 | #define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) |
75 | 85 | ||
86 | #define lookup_rsm_mutex(od)((struct litmus_lock*) od_lookup(od, FMLP_SEM)) | ||
87 | |||
76 | 88 | ||
77 | #endif | 89 | #endif |
diff --git a/include/litmus/fpmath.h b/include/litmus/fpmath.h new file mode 100644 index 000000000000..04d4bcaeae96 --- /dev/null +++ b/include/litmus/fpmath.h | |||
@@ -0,0 +1,145 @@ | |||
1 | #ifndef __FP_MATH_H__ | ||
2 | #define __FP_MATH_H__ | ||
3 | |||
4 | #ifndef __KERNEL__ | ||
5 | #include <stdint.h> | ||
6 | #define abs(x) (((x) < 0) ? -(x) : x) | ||
7 | #endif | ||
8 | |||
9 | // Use 64-bit because we want to track things at the nanosecond scale. | ||
10 | // This can lead to very large numbers. | ||
11 | typedef int64_t fpbuf_t; | ||
12 | typedef struct | ||
13 | { | ||
14 | fpbuf_t val; | ||
15 | } fp_t; | ||
16 | |||
17 | #define FP_SHIFT 10 | ||
18 | #define ROUND_BIT (FP_SHIFT - 1) | ||
19 | |||
20 | #define _fp(x) ((fp_t) {x}) | ||
21 | |||
22 | #ifdef __KERNEL__ | ||
23 | static const fp_t LITMUS_FP_ZERO = {.val = 0}; | ||
24 | static const fp_t LITMUS_FP_ONE = {.val = (1 << FP_SHIFT)}; | ||
25 | #endif | ||
26 | |||
27 | static inline fp_t FP(fpbuf_t x) | ||
28 | { | ||
29 | return _fp(((fpbuf_t) x) << FP_SHIFT); | ||
30 | } | ||
31 | |||
32 | /* divide two integers to obtain a fixed point value */ | ||
33 | static inline fp_t _frac(fpbuf_t a, fpbuf_t b) | ||
34 | { | ||
35 | return _fp(FP(a).val / (b)); | ||
36 | } | ||
37 | |||
38 | static inline fpbuf_t _point(fp_t x) | ||
39 | { | ||
40 | return (x.val % (1 << FP_SHIFT)); | ||
41 | |||
42 | } | ||
43 | |||
44 | #define fp2str(x) x.val | ||
45 | /*(x.val >> FP_SHIFT), (x.val % (1 << FP_SHIFT)) */ | ||
46 | #define _FP_ "%ld/1024" | ||
47 | |||
48 | static inline fpbuf_t _floor(fp_t x) | ||
49 | { | ||
50 | return x.val >> FP_SHIFT; | ||
51 | } | ||
52 | |||
53 | /* FIXME: negative rounding */ | ||
54 | static inline fpbuf_t _round(fp_t x) | ||
55 | { | ||
56 | return _floor(x) + ((x.val >> ROUND_BIT) & 1); | ||
57 | } | ||
58 | |||
59 | /* multiply two fixed point values */ | ||
60 | static inline fp_t _mul(fp_t a, fp_t b) | ||
61 | { | ||
62 | return _fp((a.val * b.val) >> FP_SHIFT); | ||
63 | } | ||
64 | |||
65 | static inline fp_t _div(fp_t a, fp_t b) | ||
66 | { | ||
67 | #if !defined(__KERNEL__) && !defined(unlikely) | ||
68 | #define unlikely(x) (x) | ||
69 | #define DO_UNDEF_UNLIKELY | ||
70 | #endif | ||
71 | /* try not to overflow */ | ||
72 | if (unlikely( a.val > (2l << ((sizeof(fpbuf_t)*8) - FP_SHIFT)) )) | ||
73 | return _fp((a.val / b.val) << FP_SHIFT); | ||
74 | else | ||
75 | return _fp((a.val << FP_SHIFT) / b.val); | ||
76 | #ifdef DO_UNDEF_UNLIKELY | ||
77 | #undef unlikely | ||
78 | #undef DO_UNDEF_UNLIKELY | ||
79 | #endif | ||
80 | } | ||
81 | |||
82 | static inline fp_t _add(fp_t a, fp_t b) | ||
83 | { | ||
84 | return _fp(a.val + b.val); | ||
85 | } | ||
86 | |||
87 | static inline fp_t _sub(fp_t a, fp_t b) | ||
88 | { | ||
89 | return _fp(a.val - b.val); | ||
90 | } | ||
91 | |||
92 | static inline fp_t _neg(fp_t x) | ||
93 | { | ||
94 | return _fp(-x.val); | ||
95 | } | ||
96 | |||
97 | static inline fp_t _abs(fp_t x) | ||
98 | { | ||
99 | return _fp(abs(x.val)); | ||
100 | } | ||
101 | |||
102 | /* works the same as casting float/double to integer */ | ||
103 | static inline fpbuf_t _fp_to_integer(fp_t x) | ||
104 | { | ||
105 | return _floor(_abs(x)) * ((x.val > 0) ? 1 : -1); | ||
106 | } | ||
107 | |||
108 | static inline fp_t _integer_to_fp(fpbuf_t x) | ||
109 | { | ||
110 | return _frac(x,1); | ||
111 | } | ||
112 | |||
113 | static inline int _leq(fp_t a, fp_t b) | ||
114 | { | ||
115 | return a.val <= b.val; | ||
116 | } | ||
117 | |||
118 | static inline int _geq(fp_t a, fp_t b) | ||
119 | { | ||
120 | return a.val >= b.val; | ||
121 | } | ||
122 | |||
123 | static inline int _lt(fp_t a, fp_t b) | ||
124 | { | ||
125 | return a.val < b.val; | ||
126 | } | ||
127 | |||
128 | static inline int _gt(fp_t a, fp_t b) | ||
129 | { | ||
130 | return a.val > b.val; | ||
131 | } | ||
132 | |||
133 | static inline int _eq(fp_t a, fp_t b) | ||
134 | { | ||
135 | return a.val == b.val; | ||
136 | } | ||
137 | |||
138 | static inline fp_t _max(fp_t a, fp_t b) | ||
139 | { | ||
140 | if (a.val < b.val) | ||
141 | return b; | ||
142 | else | ||
143 | return a; | ||
144 | } | ||
145 | #endif | ||
diff --git a/include/litmus/gpu_affinity.h b/include/litmus/gpu_affinity.h new file mode 100644 index 000000000000..d64a15cbf2a5 --- /dev/null +++ b/include/litmus/gpu_affinity.h | |||
@@ -0,0 +1,50 @@ | |||
1 | #ifndef LITMUS_GPU_AFFINITY_H | ||
2 | #define LITMUS_GPU_AFFINITY_H | ||
3 | |||
4 | #include <litmus/rt_param.h> | ||
5 | #include <litmus/sched_plugin.h> | ||
6 | #include <litmus/litmus.h> | ||
7 | |||
8 | void update_gpu_estimate(struct task_struct* t, lt_t observed); | ||
9 | gpu_migration_dist_t gpu_migration_distance(int a, int b); | ||
10 | |||
11 | static inline void reset_gpu_tracker(struct task_struct* t) | ||
12 | { | ||
13 | t->rt_param.accum_gpu_time = 0; | ||
14 | } | ||
15 | |||
16 | static inline void start_gpu_tracker(struct task_struct* t) | ||
17 | { | ||
18 | t->rt_param.gpu_time_stamp = litmus_clock(); | ||
19 | } | ||
20 | |||
21 | static inline void stop_gpu_tracker(struct task_struct* t) | ||
22 | { | ||
23 | lt_t now = litmus_clock(); | ||
24 | t->rt_param.accum_gpu_time += (now - t->rt_param.gpu_time_stamp); | ||
25 | } | ||
26 | |||
27 | static inline lt_t get_gpu_time(struct task_struct* t) | ||
28 | { | ||
29 | return t->rt_param.accum_gpu_time; | ||
30 | } | ||
31 | |||
32 | static inline lt_t get_gpu_estimate(struct task_struct* t, gpu_migration_dist_t dist) | ||
33 | { | ||
34 | // int i; | ||
35 | // fpbuf_t temp = _fp_to_integer(t->rt_param.gpu_migration_est[dist].est); | ||
36 | // lt_t val = (temp >= 0) ? temp : 0; // never allow negative estimates... | ||
37 | lt_t val = t->rt_param.gpu_migration_est[dist].avg; | ||
38 | |||
39 | // WARN_ON(temp < 0); | ||
40 | |||
41 | // lower-bound a distant migration to be at least equal to the level | ||
42 | // below it. | ||
43 | // for(i = dist-1; (val == 0) && (i >= MIG_LOCAL); --i) { | ||
44 | // val = _fp_to_integer(t->rt_param.gpu_migration_est[i].est); | ||
45 | // } | ||
46 | |||
47 | return ((val > 0) ? val : dist+1); | ||
48 | } | ||
49 | |||
50 | #endif | ||
diff --git a/include/litmus/ikglp_lock.h b/include/litmus/ikglp_lock.h new file mode 100644 index 000000000000..0b89c8135360 --- /dev/null +++ b/include/litmus/ikglp_lock.h | |||
@@ -0,0 +1,160 @@ | |||
1 | #ifndef LITMUS_IKGLP_H | ||
2 | #define LITMUS_IKGLP_H | ||
3 | |||
4 | #include <litmus/litmus.h> | ||
5 | #include <litmus/binheap.h> | ||
6 | #include <litmus/locking.h> | ||
7 | |||
8 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | ||
9 | #include <litmus/kexclu_affinity.h> | ||
10 | |||
11 | struct ikglp_affinity; | ||
12 | #endif | ||
13 | |||
14 | typedef struct ikglp_heap_node | ||
15 | { | ||
16 | struct task_struct *task; | ||
17 | struct binheap_node node; | ||
18 | } ikglp_heap_node_t; | ||
19 | |||
20 | struct fifo_queue; | ||
21 | struct ikglp_wait_state; | ||
22 | |||
23 | typedef struct ikglp_donee_heap_node | ||
24 | { | ||
25 | struct task_struct *task; | ||
26 | struct fifo_queue *fq; | ||
27 | struct ikglp_wait_state *donor_info; // cross-linked with ikglp_wait_state_t of donor | ||
28 | |||
29 | struct binheap_node node; | ||
30 | } ikglp_donee_heap_node_t; | ||
31 | |||
32 | // Maintains the state of a request as it goes through the IKGLP | ||
33 | typedef struct ikglp_wait_state { | ||
34 | struct task_struct *task; // pointer back to the requesting task | ||
35 | |||
36 | // Data for while waiting in FIFO Queue | ||
37 | wait_queue_t fq_node; | ||
38 | ikglp_heap_node_t global_heap_node; | ||
39 | ikglp_donee_heap_node_t donee_heap_node; | ||
40 | |||
41 | // Data for while waiting in PQ | ||
42 | ikglp_heap_node_t pq_node; | ||
43 | |||
44 | // Data for while waiting as a donor | ||
45 | ikglp_donee_heap_node_t *donee_info; // cross-linked with donee's ikglp_donee_heap_node_t | ||
46 | struct nested_info prio_donation; | ||
47 | struct binheap_node node; | ||
48 | } ikglp_wait_state_t; | ||
49 | |||
50 | /* struct for semaphore with priority inheritance */ | ||
51 | struct fifo_queue | ||
52 | { | ||
53 | wait_queue_head_t wait; | ||
54 | struct task_struct* owner; | ||
55 | |||
56 | // used for bookkeepping | ||
57 | ikglp_heap_node_t global_heap_node; | ||
58 | ikglp_donee_heap_node_t donee_heap_node; | ||
59 | |||
60 | struct task_struct* hp_waiter; | ||
61 | int count; /* number of waiters + holder */ | ||
62 | |||
63 | struct nested_info nest; | ||
64 | }; | ||
65 | |||
66 | struct ikglp_semaphore | ||
67 | { | ||
68 | struct litmus_lock litmus_lock; | ||
69 | |||
70 | raw_spinlock_t lock; | ||
71 | raw_spinlock_t real_lock; | ||
72 | |||
73 | int nr_replicas; // AKA k | ||
74 | int m; | ||
75 | |||
76 | int max_fifo_len; // max len of a fifo queue | ||
77 | int nr_in_fifos; | ||
78 | |||
79 | struct binheap top_m; // min heap, base prio | ||
80 | int top_m_size; // number of nodes in top_m | ||
81 | |||
82 | struct binheap not_top_m; // max heap, base prio | ||
83 | |||
84 | struct binheap donees; // min-heap, base prio | ||
85 | struct fifo_queue *shortest_fifo_queue; // pointer to shortest fifo queue | ||
86 | |||
87 | /* data structures for holding requests */ | ||
88 | struct fifo_queue *fifo_queues; // array nr_replicas in length | ||
89 | struct binheap priority_queue; // max-heap, base prio | ||
90 | struct binheap donors; // max-heap, base prio | ||
91 | |||
92 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | ||
93 | struct ikglp_affinity *aff_obs; | ||
94 | #endif | ||
95 | }; | ||
96 | |||
97 | static inline struct ikglp_semaphore* ikglp_from_lock(struct litmus_lock* lock) | ||
98 | { | ||
99 | return container_of(lock, struct ikglp_semaphore, litmus_lock); | ||
100 | } | ||
101 | |||
102 | int ikglp_lock(struct litmus_lock* l); | ||
103 | int ikglp_unlock(struct litmus_lock* l); | ||
104 | int ikglp_close(struct litmus_lock* l); | ||
105 | void ikglp_free(struct litmus_lock* l); | ||
106 | struct litmus_lock* ikglp_new(int m, struct litmus_lock_ops*, void* __user arg); | ||
107 | |||
108 | |||
109 | |||
110 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) | ||
111 | |||
112 | struct ikglp_queue_info | ||
113 | { | ||
114 | struct fifo_queue* q; | ||
115 | lt_t estimated_len; | ||
116 | int *nr_cur_users; | ||
117 | }; | ||
118 | |||
119 | struct ikglp_affinity_ops | ||
120 | { | ||
121 | struct fifo_queue* (*advise_enqueue)(struct ikglp_affinity* aff, struct task_struct* t); // select FIFO | ||
122 | ikglp_wait_state_t* (*advise_steal)(struct ikglp_affinity* aff, struct fifo_queue* dst); // select steal from FIFO | ||
123 | ikglp_donee_heap_node_t* (*advise_donee_selection)(struct ikglp_affinity* aff, struct task_struct* t); // select a donee | ||
124 | ikglp_wait_state_t* (*advise_donor_to_fq)(struct ikglp_affinity* aff, struct fifo_queue* dst); // select a donor to move to PQ | ||
125 | |||
126 | void (*notify_enqueue)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // fifo enqueue | ||
127 | void (*notify_dequeue)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // fifo dequeue | ||
128 | void (*notify_acquired)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // replica acquired | ||
129 | void (*notify_freed)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // replica freed | ||
130 | int (*replica_to_resource)(struct ikglp_affinity* aff, struct fifo_queue* fq); // convert a replica # to a GPU (includes offsets and simult user folding) | ||
131 | }; | ||
132 | |||
133 | struct ikglp_affinity | ||
134 | { | ||
135 | struct affinity_observer obs; | ||
136 | struct ikglp_affinity_ops *ops; | ||
137 | struct ikglp_queue_info *q_info; | ||
138 | int *nr_cur_users_on_rsrc; | ||
139 | int offset; | ||
140 | int nr_simult; | ||
141 | int nr_rsrc; | ||
142 | int relax_max_fifo_len; | ||
143 | }; | ||
144 | |||
145 | static inline struct ikglp_affinity* ikglp_aff_obs_from_aff_obs(struct affinity_observer* aff_obs) | ||
146 | { | ||
147 | return container_of(aff_obs, struct ikglp_affinity, obs); | ||
148 | } | ||
149 | |||
150 | int ikglp_aff_obs_close(struct affinity_observer*); | ||
151 | void ikglp_aff_obs_free(struct affinity_observer*); | ||
152 | struct affinity_observer* ikglp_gpu_aff_obs_new(struct affinity_observer_ops*, | ||
153 | void* __user arg); | ||
154 | struct affinity_observer* ikglp_simple_gpu_aff_obs_new(struct affinity_observer_ops*, | ||
155 | void* __user arg); | ||
156 | #endif | ||
157 | |||
158 | |||
159 | |||
160 | #endif | ||
diff --git a/include/litmus/kexclu_affinity.h b/include/litmus/kexclu_affinity.h new file mode 100644 index 000000000000..f6355de49074 --- /dev/null +++ b/include/litmus/kexclu_affinity.h | |||
@@ -0,0 +1,35 @@ | |||
1 | #ifndef LITMUS_AFF_OBS_H | ||
2 | #define LITMUS_AFF_OBS_H | ||
3 | |||
4 | #include <litmus/locking.h> | ||
5 | |||
6 | struct affinity_observer_ops; | ||
7 | |||
8 | struct affinity_observer | ||
9 | { | ||
10 | struct affinity_observer_ops* ops; | ||
11 | int type; | ||
12 | int ident; | ||
13 | |||
14 | struct litmus_lock* lock; // the lock under observation | ||
15 | }; | ||
16 | |||
17 | typedef int (*aff_obs_open_t)(struct affinity_observer* aff_obs, | ||
18 | void* __user arg); | ||
19 | typedef int (*aff_obs_close_t)(struct affinity_observer* aff_obs); | ||
20 | typedef void (*aff_obs_free_t)(struct affinity_observer* aff_obs); | ||
21 | |||
22 | struct affinity_observer_ops | ||
23 | { | ||
24 | aff_obs_open_t open; | ||
25 | aff_obs_close_t close; | ||
26 | aff_obs_free_t deallocate; | ||
27 | }; | ||
28 | |||
29 | struct litmus_lock* get_lock_from_od(int od); | ||
30 | |||
31 | void affinity_observer_new(struct affinity_observer* aff, | ||
32 | struct affinity_observer_ops* ops, | ||
33 | struct affinity_observer_args* args); | ||
34 | |||
35 | #endif | ||
diff --git a/include/litmus/kfmlp_lock.h b/include/litmus/kfmlp_lock.h new file mode 100644 index 000000000000..5f0aae6e6f42 --- /dev/null +++ b/include/litmus/kfmlp_lock.h | |||
@@ -0,0 +1,97 @@ | |||
1 | #ifndef LITMUS_KFMLP_H | ||
2 | #define LITMUS_KFMLP_H | ||
3 | |||
4 | #include <litmus/litmus.h> | ||
5 | #include <litmus/locking.h> | ||
6 | |||
7 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | ||
8 | #include <litmus/kexclu_affinity.h> | ||
9 | |||
10 | struct kfmlp_affinity; | ||
11 | #endif | ||
12 | |||
13 | /* struct for semaphore with priority inheritance */ | ||
14 | struct kfmlp_queue | ||
15 | { | ||
16 | wait_queue_head_t wait; | ||
17 | struct task_struct* owner; | ||
18 | struct task_struct* hp_waiter; | ||
19 | int count; /* number of waiters + holder */ | ||
20 | }; | ||
21 | |||
22 | struct kfmlp_semaphore | ||
23 | { | ||
24 | struct litmus_lock litmus_lock; | ||
25 | |||
26 | spinlock_t lock; | ||
27 | |||
28 | int num_resources; /* aka k */ | ||
29 | |||
30 | struct kfmlp_queue *queues; /* array */ | ||
31 | struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */ | ||
32 | |||
33 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | ||
34 | struct kfmlp_affinity *aff_obs; | ||
35 | #endif | ||
36 | }; | ||
37 | |||
38 | static inline struct kfmlp_semaphore* kfmlp_from_lock(struct litmus_lock* lock) | ||
39 | { | ||
40 | return container_of(lock, struct kfmlp_semaphore, litmus_lock); | ||
41 | } | ||
42 | |||
43 | int kfmlp_lock(struct litmus_lock* l); | ||
44 | int kfmlp_unlock(struct litmus_lock* l); | ||
45 | int kfmlp_close(struct litmus_lock* l); | ||
46 | void kfmlp_free(struct litmus_lock* l); | ||
47 | struct litmus_lock* kfmlp_new(struct litmus_lock_ops*, void* __user arg); | ||
48 | |||
49 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) | ||
50 | |||
51 | struct kfmlp_queue_info | ||
52 | { | ||
53 | struct kfmlp_queue* q; | ||
54 | lt_t estimated_len; | ||
55 | int *nr_cur_users; | ||
56 | }; | ||
57 | |||
58 | struct kfmlp_affinity_ops | ||
59 | { | ||
60 | struct kfmlp_queue* (*advise_enqueue)(struct kfmlp_affinity* aff, struct task_struct* t); | ||
61 | struct task_struct* (*advise_steal)(struct kfmlp_affinity* aff, wait_queue_t** to_steal, struct kfmlp_queue** to_steal_from); | ||
62 | void (*notify_enqueue)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t); | ||
63 | void (*notify_dequeue)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t); | ||
64 | void (*notify_acquired)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t); | ||
65 | void (*notify_freed)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t); | ||
66 | int (*replica_to_resource)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq); | ||
67 | }; | ||
68 | |||
69 | struct kfmlp_affinity | ||
70 | { | ||
71 | struct affinity_observer obs; | ||
72 | struct kfmlp_affinity_ops *ops; | ||
73 | struct kfmlp_queue_info *q_info; | ||
74 | int *nr_cur_users_on_rsrc; | ||
75 | int offset; | ||
76 | int nr_simult; | ||
77 | int nr_rsrc; | ||
78 | }; | ||
79 | |||
80 | static inline struct kfmlp_affinity* kfmlp_aff_obs_from_aff_obs(struct affinity_observer* aff_obs) | ||
81 | { | ||
82 | return container_of(aff_obs, struct kfmlp_affinity, obs); | ||
83 | } | ||
84 | |||
85 | int kfmlp_aff_obs_close(struct affinity_observer*); | ||
86 | void kfmlp_aff_obs_free(struct affinity_observer*); | ||
87 | struct affinity_observer* kfmlp_gpu_aff_obs_new(struct affinity_observer_ops*, | ||
88 | void* __user arg); | ||
89 | struct affinity_observer* kfmlp_simple_gpu_aff_obs_new(struct affinity_observer_ops*, | ||
90 | void* __user arg); | ||
91 | |||
92 | |||
93 | #endif | ||
94 | |||
95 | #endif | ||
96 | |||
97 | |||
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 338245abd6ed..1d70ab713571 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -26,6 +26,7 @@ static inline int in_list(struct list_head* list) | |||
26 | ); | 26 | ); |
27 | } | 27 | } |
28 | 28 | ||
29 | |||
29 | struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq); | 30 | struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq); |
30 | 31 | ||
31 | #define NO_CPU 0xffffffff | 32 | #define NO_CPU 0xffffffff |
@@ -62,8 +63,12 @@ void litmus_exit_task(struct task_struct *tsk); | |||
62 | /* job_param macros */ | 63 | /* job_param macros */ |
63 | #define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) | 64 | #define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) |
64 | #define get_deadline(t) (tsk_rt(t)->job_params.deadline) | 65 | #define get_deadline(t) (tsk_rt(t)->job_params.deadline) |
66 | #define get_period(t) (tsk_rt(t)->task_params.period) | ||
65 | #define get_release(t) (tsk_rt(t)->job_params.release) | 67 | #define get_release(t) (tsk_rt(t)->job_params.release) |
68 | #define get_lateness(t) (tsk_rt(t)->job_params.lateness) | ||
66 | 69 | ||
70 | #define effective_priority(t) ((!(tsk_rt(t)->inh_task)) ? t : tsk_rt(t)->inh_task) | ||
71 | #define base_priority(t) (t) | ||
67 | 72 | ||
68 | #define is_hrt(t) \ | 73 | #define is_hrt(t) \ |
69 | (tsk_rt(t)->task_params.cls == RT_CLASS_HARD) | 74 | (tsk_rt(t)->task_params.cls == RT_CLASS_HARD) |
@@ -101,10 +106,12 @@ static inline lt_t litmus_clock(void) | |||
101 | #define earlier_deadline(a, b) (lt_before(\ | 106 | #define earlier_deadline(a, b) (lt_before(\ |
102 | (a)->rt_param.job_params.deadline,\ | 107 | (a)->rt_param.job_params.deadline,\ |
103 | (b)->rt_param.job_params.deadline)) | 108 | (b)->rt_param.job_params.deadline)) |
109 | #define shorter_period(a, b) (lt_before(\ | ||
110 | (a)->rt_param.task_params.period,\ | ||
111 | (b)->rt_param.task_params.period)) | ||
104 | #define earlier_release(a, b) (lt_before(\ | 112 | #define earlier_release(a, b) (lt_before(\ |
105 | (a)->rt_param.job_params.release,\ | 113 | (a)->rt_param.job_params.release,\ |
106 | (b)->rt_param.job_params.release)) | 114 | (b)->rt_param.job_params.release)) |
107 | |||
108 | void preempt_if_preemptable(struct task_struct* t, int on_cpu); | 115 | void preempt_if_preemptable(struct task_struct* t, int on_cpu); |
109 | 116 | ||
110 | #ifdef CONFIG_LITMUS_LOCKING | 117 | #ifdef CONFIG_LITMUS_LOCKING |
diff --git a/include/litmus/litmus_softirq.h b/include/litmus/litmus_softirq.h new file mode 100644 index 000000000000..1eb5ea1a6c4b --- /dev/null +++ b/include/litmus/litmus_softirq.h | |||
@@ -0,0 +1,199 @@ | |||
1 | #ifndef __LITMUS_SOFTIRQ_H | ||
2 | #define __LITMUS_SOFTIRQ_H | ||
3 | |||
4 | #include <linux/interrupt.h> | ||
5 | #include <linux/workqueue.h> | ||
6 | |||
7 | /* | ||
8 | Threaded tasklet handling for Litmus. Tasklets | ||
9 | are scheduled with the priority of the tasklet's | ||
10 | owner---that is, the RT task on behalf the tasklet | ||
11 | runs. | ||
12 | |||
13 | Tasklets are current scheduled in FIFO order with | ||
14 | NO priority inheritance for "blocked" tasklets. | ||
15 | |||
16 | klitirqd assumes the priority of the owner of the | ||
17 | tasklet when the tasklet is next to execute. | ||
18 | |||
19 | Currently, hi-tasklets are scheduled before | ||
20 | low-tasklets, regardless of priority of low-tasklets. | ||
21 | And likewise, low-tasklets are scheduled before work | ||
22 | queue objects. This priority inversion probably needs | ||
23 | to be fixed, though it is not an issue if our work with | ||
24 | GPUs as GPUs are owned (and associated klitirqds) for | ||
25 | exclusive time periods, thus no inversions can | ||
26 | occur. | ||
27 | */ | ||
28 | |||
29 | |||
30 | |||
31 | #define NR_LITMUS_SOFTIRQD CONFIG_NR_LITMUS_SOFTIRQD | ||
32 | |||
33 | /* Spawns NR_LITMUS_SOFTIRQD klitirqd daemons. | ||
34 | Actual launch of threads is deffered to kworker's | ||
35 | workqueue, so daemons will likely not be immediately | ||
36 | running when this function returns, though the required | ||
37 | data will be initialized. | ||
38 | |||
39 | @affinity_set: an array expressing the processor affinity | ||
40 | for each of the NR_LITMUS_SOFTIRQD daemons. May be set | ||
41 | to NULL for global scheduling. | ||
42 | |||
43 | - Examples - | ||
44 | 8-CPU system with two CPU clusters: | ||
45 | affinity[] = {0, 0, 0, 0, 3, 3, 3, 3} | ||
46 | NOTE: Daemons not actually bound to specified CPU, but rather | ||
47 | cluster in which the CPU resides. | ||
48 | |||
49 | 8-CPU system, partitioned: | ||
50 | affinity[] = {0, 1, 2, 3, 4, 5, 6, 7} | ||
51 | |||
52 | FIXME: change array to a CPU topology or array of cpumasks | ||
53 | |||
54 | */ | ||
55 | void spawn_klitirqd(int* affinity); | ||
56 | |||
57 | |||
58 | /* Raises a flag to tell klitirqds to terminate. | ||
59 | Termination is async, so some threads may be running | ||
60 | after function return. */ | ||
61 | void kill_klitirqd(void); | ||
62 | |||
63 | |||
64 | /* Returns 1 if all NR_LITMUS_SOFTIRQD klitirqs are ready | ||
65 | to handle tasklets. 0, otherwise.*/ | ||
66 | int klitirqd_is_ready(void); | ||
67 | |||
68 | /* Returns 1 if no NR_LITMUS_SOFTIRQD klitirqs are ready | ||
69 | to handle tasklets. 0, otherwise.*/ | ||
70 | int klitirqd_is_dead(void); | ||
71 | |||
72 | /* Flushes all pending work out to the OS for regular | ||
73 | * tasklet/work processing of the specified 'owner' | ||
74 | * | ||
75 | * PRECOND: klitirqd_thread must have a clear entry | ||
76 | * in the GPU registry, otherwise this call will become | ||
77 | * a no-op as work will loop back to the klitirqd_thread. | ||
78 | * | ||
79 | * Pass NULL for owner to flush ALL pending items. | ||
80 | */ | ||
81 | void flush_pending(struct task_struct* klitirqd_thread, | ||
82 | struct task_struct* owner); | ||
83 | |||
84 | struct task_struct* get_klitirqd(unsigned int k_id); | ||
85 | |||
86 | |||
87 | extern int __litmus_tasklet_schedule( | ||
88 | struct tasklet_struct *t, | ||
89 | unsigned int k_id); | ||
90 | |||
91 | /* schedule a tasklet on klitirqd #k_id */ | ||
92 | static inline int litmus_tasklet_schedule( | ||
93 | struct tasklet_struct *t, | ||
94 | unsigned int k_id) | ||
95 | { | ||
96 | int ret = 0; | ||
97 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
98 | ret = __litmus_tasklet_schedule(t, k_id); | ||
99 | return(ret); | ||
100 | } | ||
101 | |||
102 | /* for use by __tasklet_schedule() */ | ||
103 | static inline int _litmus_tasklet_schedule( | ||
104 | struct tasklet_struct *t, | ||
105 | unsigned int k_id) | ||
106 | { | ||
107 | return(__litmus_tasklet_schedule(t, k_id)); | ||
108 | } | ||
109 | |||
110 | |||
111 | |||
112 | |||
113 | extern int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, | ||
114 | unsigned int k_id); | ||
115 | |||
116 | /* schedule a hi tasklet on klitirqd #k_id */ | ||
117 | static inline int litmus_tasklet_hi_schedule(struct tasklet_struct *t, | ||
118 | unsigned int k_id) | ||
119 | { | ||
120 | int ret = 0; | ||
121 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
122 | ret = __litmus_tasklet_hi_schedule(t, k_id); | ||
123 | return(ret); | ||
124 | } | ||
125 | |||
126 | /* for use by __tasklet_hi_schedule() */ | ||
127 | static inline int _litmus_tasklet_hi_schedule(struct tasklet_struct *t, | ||
128 | unsigned int k_id) | ||
129 | { | ||
130 | return(__litmus_tasklet_hi_schedule(t, k_id)); | ||
131 | } | ||
132 | |||
133 | |||
134 | |||
135 | |||
136 | |||
137 | extern int __litmus_tasklet_hi_schedule_first( | ||
138 | struct tasklet_struct *t, | ||
139 | unsigned int k_id); | ||
140 | |||
141 | /* schedule a hi tasklet on klitirqd #k_id on next go-around */ | ||
142 | /* PRECONDITION: Interrupts must be disabled. */ | ||
143 | static inline int litmus_tasklet_hi_schedule_first( | ||
144 | struct tasklet_struct *t, | ||
145 | unsigned int k_id) | ||
146 | { | ||
147 | int ret = 0; | ||
148 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
149 | ret = __litmus_tasklet_hi_schedule_first(t, k_id); | ||
150 | return(ret); | ||
151 | } | ||
152 | |||
153 | /* for use by __tasklet_hi_schedule_first() */ | ||
154 | static inline int _litmus_tasklet_hi_schedule_first( | ||
155 | struct tasklet_struct *t, | ||
156 | unsigned int k_id) | ||
157 | { | ||
158 | return(__litmus_tasklet_hi_schedule_first(t, k_id)); | ||
159 | } | ||
160 | |||
161 | |||
162 | |||
163 | ////////////// | ||
164 | |||
165 | extern int __litmus_schedule_work( | ||
166 | struct work_struct* w, | ||
167 | unsigned int k_id); | ||
168 | |||
169 | static inline int litmus_schedule_work( | ||
170 | struct work_struct* w, | ||
171 | unsigned int k_id) | ||
172 | { | ||
173 | return(__litmus_schedule_work(w, k_id)); | ||
174 | } | ||
175 | |||
176 | |||
177 | |||
178 | ///////////// mutex operations for client threads. | ||
179 | |||
180 | void down_and_set_stat(struct task_struct* t, | ||
181 | enum klitirqd_sem_status to_set, | ||
182 | struct mutex* sem); | ||
183 | |||
184 | void __down_and_reset_and_set_stat(struct task_struct* t, | ||
185 | enum klitirqd_sem_status to_reset, | ||
186 | enum klitirqd_sem_status to_set, | ||
187 | struct mutex* sem); | ||
188 | |||
189 | void up_and_set_stat(struct task_struct* t, | ||
190 | enum klitirqd_sem_status to_set, | ||
191 | struct mutex* sem); | ||
192 | |||
193 | |||
194 | |||
195 | void release_klitirqd_lock(struct task_struct* t); | ||
196 | |||
197 | int reacquire_klitirqd_lock(struct task_struct* t); | ||
198 | |||
199 | #endif | ||
diff --git a/include/litmus/locking.h b/include/litmus/locking.h index 4d7b870cb443..296bbf6f7af0 100644 --- a/include/litmus/locking.h +++ b/include/litmus/locking.h | |||
@@ -1,28 +1,160 @@ | |||
1 | #ifndef LITMUS_LOCKING_H | 1 | #ifndef LITMUS_LOCKING_H |
2 | #define LITMUS_LOCKING_H | 2 | #define LITMUS_LOCKING_H |
3 | 3 | ||
4 | #include <linux/list.h> | ||
5 | |||
4 | struct litmus_lock_ops; | 6 | struct litmus_lock_ops; |
5 | 7 | ||
8 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
9 | struct nested_info | ||
10 | { | ||
11 | struct litmus_lock *lock; | ||
12 | struct task_struct *hp_waiter_eff_prio; | ||
13 | struct task_struct **hp_waiter_ptr; | ||
14 | struct binheap_node hp_binheap_node; | ||
15 | }; | ||
16 | |||
17 | static inline struct task_struct* top_priority(struct binheap* handle) { | ||
18 | if(!binheap_empty(handle)) { | ||
19 | return (struct task_struct*)(binheap_top_entry(handle, struct nested_info, hp_binheap_node)->hp_waiter_eff_prio); | ||
20 | } | ||
21 | return NULL; | ||
22 | } | ||
23 | |||
24 | void print_hp_waiters(struct binheap_node* n, int depth); | ||
25 | #endif | ||
26 | |||
27 | |||
6 | /* Generic base struct for LITMUS^RT userspace semaphores. | 28 | /* Generic base struct for LITMUS^RT userspace semaphores. |
7 | * This structure should be embedded in protocol-specific semaphores. | 29 | * This structure should be embedded in protocol-specific semaphores. |
8 | */ | 30 | */ |
9 | struct litmus_lock { | 31 | struct litmus_lock { |
10 | struct litmus_lock_ops *ops; | 32 | struct litmus_lock_ops *ops; |
11 | int type; | 33 | int type; |
34 | |||
35 | int ident; | ||
36 | |||
37 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
38 | struct nested_info nest; | ||
39 | //#ifdef CONFIG_DEBUG_SPINLOCK | ||
40 | char cheat_lockdep[2]; | ||
41 | struct lock_class_key key; | ||
42 | //#endif | ||
43 | #endif | ||
12 | }; | 44 | }; |
13 | 45 | ||
46 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
47 | |||
48 | #define MAX_DGL_SIZE CONFIG_LITMUS_MAX_DGL_SIZE | ||
49 | |||
50 | typedef struct dgl_wait_state { | ||
51 | struct task_struct *task; /* task waiting on DGL */ | ||
52 | struct litmus_lock *locks[MAX_DGL_SIZE]; /* requested locks in DGL */ | ||
53 | int size; /* size of the DGL */ | ||
54 | int nr_remaining; /* nr locks remainging before DGL is complete */ | ||
55 | int last_primary; /* index lock in locks[] that has active priority */ | ||
56 | wait_queue_t wq_nodes[MAX_DGL_SIZE]; | ||
57 | } dgl_wait_state_t; | ||
58 | |||
59 | void wake_or_wait_on_next_lock(dgl_wait_state_t *dgl_wait); | ||
60 | void select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmus_lock* prev_lock*/); | ||
61 | |||
62 | void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait); | ||
63 | int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key); | ||
64 | void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait, struct task_struct **task); | ||
65 | #endif | ||
66 | |||
67 | typedef int (*lock_op_t)(struct litmus_lock *l); | ||
68 | typedef lock_op_t lock_close_t; | ||
69 | typedef lock_op_t lock_lock_t; | ||
70 | typedef lock_op_t lock_unlock_t; | ||
71 | |||
72 | typedef int (*lock_open_t)(struct litmus_lock *l, void* __user arg); | ||
73 | typedef void (*lock_free_t)(struct litmus_lock *l); | ||
74 | |||
14 | struct litmus_lock_ops { | 75 | struct litmus_lock_ops { |
15 | /* Current task tries to obtain / drop a reference to a lock. | 76 | /* Current task tries to obtain / drop a reference to a lock. |
16 | * Optional methods, allowed by default. */ | 77 | * Optional methods, allowed by default. */ |
17 | int (*open)(struct litmus_lock*, void* __user); | 78 | lock_open_t open; |
18 | int (*close)(struct litmus_lock*); | 79 | lock_close_t close; |
19 | 80 | ||
20 | /* Current tries to lock/unlock this lock (mandatory methods). */ | 81 | /* Current tries to lock/unlock this lock (mandatory methods). */ |
21 | int (*lock)(struct litmus_lock*); | 82 | lock_lock_t lock; |
22 | int (*unlock)(struct litmus_lock*); | 83 | lock_unlock_t unlock; |
23 | 84 | ||
24 | /* The lock is no longer being referenced (mandatory method). */ | 85 | /* The lock is no longer being referenced (mandatory method). */ |
25 | void (*deallocate)(struct litmus_lock*); | 86 | lock_free_t deallocate; |
87 | |||
88 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
89 | void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); | ||
90 | void (*propagate_decrease_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); | ||
91 | #endif | ||
92 | |||
93 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
94 | raw_spinlock_t* (*get_dgl_spin_lock)(struct litmus_lock *l); | ||
95 | int (*dgl_lock)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node); | ||
96 | int (*is_owner)(struct litmus_lock *l, struct task_struct *t); | ||
97 | void (*enable_priority)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait); | ||
98 | #endif | ||
26 | }; | 99 | }; |
27 | 100 | ||
101 | |||
102 | /* | ||
103 | Nested inheritance can be achieved with fine-grain locking when there is | ||
104 | no need for DGL support, presuming locks are acquired in a partial order | ||
105 | (no cycles!). However, DGLs allow locks to be acquired in any order. This | ||
106 | makes nested inheritance very difficult (we don't yet know a solution) to | ||
107 | realize with fine-grain locks, so we use a big lock instead. | ||
108 | |||
109 | Code contains both fine-grain and coarse-grain methods together, side-by-side. | ||
110 | Each lock operation *IS NOT* surrounded by ifdef/endif to help make code more | ||
111 | readable. However, this leads to the odd situation where both code paths | ||
112 | appear together in code as if they were both active together. | ||
113 | |||
114 | THIS IS NOT REALLY THE CASE! ONLY ONE CODE PATH IS ACTUALLY ACTIVE! | ||
115 | |||
116 | Example: | ||
117 | lock_global_irqsave(coarseLock, flags); | ||
118 | lock_fine_irqsave(fineLock, flags); | ||
119 | |||
120 | Reality (coarse): | ||
121 | lock_global_irqsave(coarseLock, flags); | ||
122 | //lock_fine_irqsave(fineLock, flags); | ||
123 | |||
124 | Reality (fine): | ||
125 | //lock_global_irqsave(coarseLock, flags); | ||
126 | lock_fine_irqsave(fineLock, flags); | ||
127 | |||
128 | Be careful when you read code involving nested inheritance. | ||
129 | */ | ||
130 | #if defined(CONFIG_LITMUS_DGL_SUPPORT) | ||
131 | /* DGL requires a big lock to implement nested inheritance */ | ||
132 | #define lock_global_irqsave(lock, flags) raw_spin_lock_irqsave((lock), (flags)) | ||
133 | #define lock_global(lock) raw_spin_lock((lock)) | ||
134 | #define unlock_global_irqrestore(lock, flags) raw_spin_unlock_irqrestore((lock), (flags)) | ||
135 | #define unlock_global(lock) raw_spin_unlock((lock)) | ||
136 | |||
137 | /* fine-grain locking are no-ops with DGL support */ | ||
138 | #define lock_fine_irqsave(lock, flags) | ||
139 | #define lock_fine(lock) | ||
140 | #define unlock_fine_irqrestore(lock, flags) | ||
141 | #define unlock_fine(lock) | ||
142 | |||
143 | #elif defined(CONFIG_LITMUS_NESTED_LOCKING) | ||
144 | /* Use fine-grain locking when DGLs are disabled. */ | ||
145 | /* global locking are no-ops without DGL support */ | ||
146 | #define lock_global_irqsave(lock, flags) | ||
147 | #define lock_global(lock) | ||
148 | #define unlock_global_irqrestore(lock, flags) | ||
149 | #define unlock_global(lock) | ||
150 | |||
151 | #define lock_fine_irqsave(lock, flags) raw_spin_lock_irqsave((lock), (flags)) | ||
152 | #define lock_fine(lock) raw_spin_lock((lock)) | ||
153 | #define unlock_fine_irqrestore(lock, flags) raw_spin_unlock_irqrestore((lock), (flags)) | ||
154 | #define unlock_fine(lock) raw_spin_unlock((lock)) | ||
155 | |||
28 | #endif | 156 | #endif |
157 | |||
158 | |||
159 | #endif | ||
160 | |||
diff --git a/include/litmus/nvidia_info.h b/include/litmus/nvidia_info.h new file mode 100644 index 000000000000..97c9577141db --- /dev/null +++ b/include/litmus/nvidia_info.h | |||
@@ -0,0 +1,46 @@ | |||
1 | #ifndef __LITMUS_NVIDIA_H | ||
2 | #define __LITMUS_NVIDIA_H | ||
3 | |||
4 | #include <linux/interrupt.h> | ||
5 | |||
6 | |||
7 | #include <litmus/litmus_softirq.h> | ||
8 | |||
9 | |||
10 | //#define NV_DEVICE_NUM NR_LITMUS_SOFTIRQD | ||
11 | #define NV_DEVICE_NUM CONFIG_NV_DEVICE_NUM | ||
12 | #define NV_MAX_SIMULT_USERS CONFIG_NV_MAX_SIMULT_USERS | ||
13 | |||
14 | int init_nvidia_info(void); | ||
15 | void shutdown_nvidia_info(void); | ||
16 | |||
17 | int is_nvidia_func(void* func_addr); | ||
18 | |||
19 | void dump_nvidia_info(const struct tasklet_struct *t); | ||
20 | |||
21 | |||
22 | // Returns the Nvidia device # associated with provided tasklet and work_struct. | ||
23 | u32 get_tasklet_nv_device_num(const struct tasklet_struct *t); | ||
24 | u32 get_work_nv_device_num(const struct work_struct *t); | ||
25 | |||
26 | |||
27 | int init_nv_device_reg(void); | ||
28 | //int get_nv_device_id(struct task_struct* owner); | ||
29 | |||
30 | |||
31 | int reg_nv_device(int reg_device_id, int register_device, struct task_struct *t); | ||
32 | |||
33 | struct task_struct* get_nv_max_device_owner(u32 target_device_id); | ||
34 | //int is_nv_device_owner(u32 target_device_id); | ||
35 | |||
36 | void lock_nv_registry(u32 reg_device_id, unsigned long* flags); | ||
37 | void unlock_nv_registry(u32 reg_device_id, unsigned long* flags); | ||
38 | |||
39 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
40 | void pai_check_priority_increase(struct task_struct *t, int reg_device_id); | ||
41 | void pai_check_priority_decrease(struct task_struct *t, int reg_device_id); | ||
42 | #endif | ||
43 | |||
44 | //void increment_nv_int_count(u32 device); | ||
45 | |||
46 | #endif | ||
diff --git a/include/litmus/preempt.h b/include/litmus/preempt.h index 380b886d78ff..8f3a9ca2d4e3 100644 --- a/include/litmus/preempt.h +++ b/include/litmus/preempt.h | |||
@@ -26,12 +26,12 @@ const char* sched_state_name(int s); | |||
26 | (x), #x, __FUNCTION__); \ | 26 | (x), #x, __FUNCTION__); \ |
27 | } while (0); | 27 | } while (0); |
28 | 28 | ||
29 | //#define TRACE_SCHED_STATE_CHANGE(x, y, cpu) /* ignore */ | ||
29 | #define TRACE_SCHED_STATE_CHANGE(x, y, cpu) \ | 30 | #define TRACE_SCHED_STATE_CHANGE(x, y, cpu) \ |
30 | TRACE_STATE("[P%d] 0x%x (%s) -> 0x%x (%s)\n", \ | 31 | TRACE_STATE("[P%d] 0x%x (%s) -> 0x%x (%s)\n", \ |
31 | cpu, (x), sched_state_name(x), \ | 32 | cpu, (x), sched_state_name(x), \ |
32 | (y), sched_state_name(y)) | 33 | (y), sched_state_name(y)) |
33 | 34 | ||
34 | |||
35 | typedef enum scheduling_state { | 35 | typedef enum scheduling_state { |
36 | TASK_SCHEDULED = (1 << 0), /* The currently scheduled task is the one that | 36 | TASK_SCHEDULED = (1 << 0), /* The currently scheduled task is the one that |
37 | * should be scheduled, and the processor does not | 37 | * should be scheduled, and the processor does not |
diff --git a/include/litmus/rsm_lock.h b/include/litmus/rsm_lock.h new file mode 100644 index 000000000000..a15189683de4 --- /dev/null +++ b/include/litmus/rsm_lock.h | |||
@@ -0,0 +1,54 @@ | |||
1 | #ifndef LITMUS_RSM_H | ||
2 | #define LITMUS_RSM_H | ||
3 | |||
4 | #include <litmus/litmus.h> | ||
5 | #include <litmus/binheap.h> | ||
6 | #include <litmus/locking.h> | ||
7 | |||
8 | /* struct for semaphore with priority inheritance */ | ||
9 | struct rsm_mutex { | ||
10 | struct litmus_lock litmus_lock; | ||
11 | |||
12 | /* current resource holder */ | ||
13 | struct task_struct *owner; | ||
14 | |||
15 | /* highest-priority waiter */ | ||
16 | struct task_struct *hp_waiter; | ||
17 | |||
18 | /* FIFO queue of waiting tasks -- for now. time stamp in the future. */ | ||
19 | wait_queue_head_t wait; | ||
20 | |||
21 | /* we do some nesting within spinlocks, so we can't use the normal | ||
22 | sleeplocks found in wait_queue_head_t. */ | ||
23 | raw_spinlock_t lock; | ||
24 | }; | ||
25 | |||
26 | static inline struct rsm_mutex* rsm_mutex_from_lock(struct litmus_lock* lock) | ||
27 | { | ||
28 | return container_of(lock, struct rsm_mutex, litmus_lock); | ||
29 | } | ||
30 | |||
31 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
32 | int rsm_mutex_is_owner(struct litmus_lock *l, struct task_struct *t); | ||
33 | int rsm_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node); | ||
34 | void rsm_mutex_enable_priority(struct litmus_lock *l, dgl_wait_state_t* dgl_wait); | ||
35 | #endif | ||
36 | |||
37 | void rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, | ||
38 | struct task_struct* t, | ||
39 | raw_spinlock_t* to_unlock, | ||
40 | unsigned long irqflags); | ||
41 | |||
42 | void rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | ||
43 | struct task_struct* t, | ||
44 | raw_spinlock_t* to_unlock, | ||
45 | unsigned long irqflags); | ||
46 | |||
47 | int rsm_mutex_lock(struct litmus_lock* l); | ||
48 | int rsm_mutex_unlock(struct litmus_lock* l); | ||
49 | int rsm_mutex_close(struct litmus_lock* l); | ||
50 | void rsm_mutex_free(struct litmus_lock* l); | ||
51 | struct litmus_lock* rsm_mutex_new(struct litmus_lock_ops*); | ||
52 | |||
53 | |||
54 | #endif \ No newline at end of file | ||
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 21430623a940..02b750a9570b 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -5,6 +5,8 @@ | |||
5 | #ifndef _LINUX_RT_PARAM_H_ | 5 | #ifndef _LINUX_RT_PARAM_H_ |
6 | #define _LINUX_RT_PARAM_H_ | 6 | #define _LINUX_RT_PARAM_H_ |
7 | 7 | ||
8 | #include <litmus/fpmath.h> | ||
9 | |||
8 | /* Litmus time type. */ | 10 | /* Litmus time type. */ |
9 | typedef unsigned long long lt_t; | 11 | typedef unsigned long long lt_t; |
10 | 12 | ||
@@ -30,9 +32,15 @@ typedef enum { | |||
30 | typedef enum { | 32 | typedef enum { |
31 | NO_ENFORCEMENT, /* job may overrun unhindered */ | 33 | NO_ENFORCEMENT, /* job may overrun unhindered */ |
32 | QUANTUM_ENFORCEMENT, /* budgets are only checked on quantum boundaries */ | 34 | QUANTUM_ENFORCEMENT, /* budgets are only checked on quantum boundaries */ |
33 | PRECISE_ENFORCEMENT /* budgets are enforced with hrtimers */ | 35 | PRECISE_ENFORCEMENT, /* budgets are enforced with hrtimers */ |
34 | } budget_policy_t; | 36 | } budget_policy_t; |
35 | 37 | ||
38 | typedef enum { | ||
39 | NO_SIGNALS, /* job receives no signals when it exhausts its budget */ | ||
40 | QUANTUM_SIGNALS, /* budget signals are only sent on quantum boundaries */ | ||
41 | PRECISE_SIGNALS, /* budget signals are triggered with hrtimers */ | ||
42 | } budget_signal_policy_t; | ||
43 | |||
36 | /* We use the common priority interpretation "lower index == higher priority", | 44 | /* We use the common priority interpretation "lower index == higher priority", |
37 | * which is commonly used in fixed-priority schedulability analysis papers. | 45 | * which is commonly used in fixed-priority schedulability analysis papers. |
38 | * So, a numerically lower priority value implies higher scheduling priority, | 46 | * So, a numerically lower priority value implies higher scheduling priority, |
@@ -62,6 +70,7 @@ struct rt_task { | |||
62 | unsigned int priority; | 70 | unsigned int priority; |
63 | task_class_t cls; | 71 | task_class_t cls; |
64 | budget_policy_t budget_policy; /* ignored by pfair */ | 72 | budget_policy_t budget_policy; /* ignored by pfair */ |
73 | budget_signal_policy_t budget_signal_policy; /* currently ignored by pfair */ | ||
65 | }; | 74 | }; |
66 | 75 | ||
67 | union np_flag { | 76 | union np_flag { |
@@ -74,6 +83,19 @@ union np_flag { | |||
74 | } np; | 83 | } np; |
75 | }; | 84 | }; |
76 | 85 | ||
86 | struct affinity_observer_args | ||
87 | { | ||
88 | int lock_od; | ||
89 | }; | ||
90 | |||
91 | struct gpu_affinity_observer_args | ||
92 | { | ||
93 | struct affinity_observer_args obs; | ||
94 | int replica_to_gpu_offset; | ||
95 | int nr_simult_users; | ||
96 | int relaxed_rules; | ||
97 | }; | ||
98 | |||
77 | /* The definition of the data that is shared between the kernel and real-time | 99 | /* The definition of the data that is shared between the kernel and real-time |
78 | * tasks via a shared page (see litmus/ctrldev.c). | 100 | * tasks via a shared page (see litmus/ctrldev.c). |
79 | * | 101 | * |
@@ -97,6 +119,9 @@ struct control_page { | |||
97 | /* don't export internal data structures to user space (liblitmus) */ | 119 | /* don't export internal data structures to user space (liblitmus) */ |
98 | #ifdef __KERNEL__ | 120 | #ifdef __KERNEL__ |
99 | 121 | ||
122 | #include <litmus/binheap.h> | ||
123 | #include <linux/semaphore.h> | ||
124 | |||
100 | struct _rt_domain; | 125 | struct _rt_domain; |
101 | struct bheap_node; | 126 | struct bheap_node; |
102 | struct release_heap; | 127 | struct release_heap; |
@@ -110,6 +135,12 @@ struct rt_job { | |||
110 | /* How much service has this job received so far? */ | 135 | /* How much service has this job received so far? */ |
111 | lt_t exec_time; | 136 | lt_t exec_time; |
112 | 137 | ||
138 | /* By how much did the prior job miss its deadline by? | ||
139 | * Value differs from tardiness in that lateness may | ||
140 | * be negative (when job finishes before its deadline). | ||
141 | */ | ||
142 | long long lateness; | ||
143 | |||
113 | /* Which job is this. This is used to let user space | 144 | /* Which job is this. This is used to let user space |
114 | * specify which job to wait for, which is important if jobs | 145 | * specify which job to wait for, which is important if jobs |
115 | * overrun. If we just call sys_sleep_next_period() then we | 146 | * overrun. If we just call sys_sleep_next_period() then we |
@@ -118,10 +149,54 @@ struct rt_job { | |||
118 | * Increase this sequence number when a job is released. | 149 | * Increase this sequence number when a job is released. |
119 | */ | 150 | */ |
120 | unsigned int job_no; | 151 | unsigned int job_no; |
152 | |||
153 | /* bits: | ||
154 | * 0th: Set if a budget exhaustion signal has already been sent for | ||
155 | * the current job. */ | ||
156 | unsigned long flags; | ||
121 | }; | 157 | }; |
122 | 158 | ||
159 | #define RT_JOB_SIG_BUDGET_SENT 0 | ||
160 | |||
123 | struct pfair_param; | 161 | struct pfair_param; |
124 | 162 | ||
163 | enum klitirqd_sem_status | ||
164 | { | ||
165 | NEED_TO_REACQUIRE, | ||
166 | REACQUIRING, | ||
167 | NOT_HELD, | ||
168 | HELD | ||
169 | }; | ||
170 | |||
171 | typedef enum gpu_migration_dist | ||
172 | { | ||
173 | // TODO: Make this variable against NR_NVIDIA_GPUS | ||
174 | MIG_LOCAL = 0, | ||
175 | MIG_NEAR = 1, | ||
176 | MIG_MED = 2, | ||
177 | MIG_FAR = 3, // 8 GPUs in a binary tree hierarchy | ||
178 | MIG_NONE = 4, | ||
179 | |||
180 | MIG_LAST = MIG_NONE | ||
181 | } gpu_migration_dist_t; | ||
182 | |||
183 | typedef struct feedback_est{ | ||
184 | fp_t est; | ||
185 | fp_t accum_err; | ||
186 | } feedback_est_t; | ||
187 | |||
188 | |||
189 | #define AVG_EST_WINDOW_SIZE 20 | ||
190 | |||
191 | typedef struct avg_est{ | ||
192 | lt_t history[AVG_EST_WINDOW_SIZE]; | ||
193 | uint16_t count; | ||
194 | uint16_t idx; | ||
195 | lt_t sum; | ||
196 | lt_t std; | ||
197 | lt_t avg; | ||
198 | } avg_est_t; | ||
199 | |||
125 | /* RT task parameters for scheduling extensions | 200 | /* RT task parameters for scheduling extensions |
126 | * These parameters are inherited during clone and therefore must | 201 | * These parameters are inherited during clone and therefore must |
127 | * be explicitly set up before the task set is launched. | 202 | * be explicitly set up before the task set is launched. |
@@ -136,6 +211,50 @@ struct rt_param { | |||
136 | /* is the task present? (true if it can be scheduled) */ | 211 | /* is the task present? (true if it can be scheduled) */ |
137 | unsigned int present:1; | 212 | unsigned int present:1; |
138 | 213 | ||
214 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
215 | /* proxy threads have minimum priority by default */ | ||
216 | unsigned int is_proxy_thread:1; | ||
217 | |||
218 | /* pointer to klitirqd currently working on this | ||
219 | task_struct's behalf. only set by the task pointed | ||
220 | to by klitirqd. | ||
221 | |||
222 | ptr only valid if is_proxy_thread == 0 | ||
223 | */ | ||
224 | struct task_struct* cur_klitirqd; | ||
225 | |||
226 | /* Used to implement mutual execution exclusion between | ||
227 | * job and klitirqd execution. Job must always hold | ||
228 | * it's klitirqd_sem to execute. klitirqd instance | ||
229 | * must hold the semaphore before executing on behalf | ||
230 | * of a job. | ||
231 | */ | ||
232 | struct mutex klitirqd_sem; | ||
233 | |||
234 | /* status of held klitirqd_sem, even if the held klitirqd_sem is from | ||
235 | another task (only proxy threads do this though). | ||
236 | */ | ||
237 | atomic_t klitirqd_sem_stat; | ||
238 | #endif | ||
239 | |||
240 | #ifdef CONFIG_LITMUS_NVIDIA | ||
241 | /* number of top-half interrupts handled on behalf of current job */ | ||
242 | atomic_t nv_int_count; | ||
243 | long unsigned int held_gpus; // bitmap of held GPUs. | ||
244 | |||
245 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | ||
246 | avg_est_t gpu_migration_est[MIG_LAST+1]; | ||
247 | |||
248 | gpu_migration_dist_t gpu_migration; | ||
249 | int last_gpu; | ||
250 | |||
251 | lt_t accum_gpu_time; | ||
252 | lt_t gpu_time_stamp; | ||
253 | |||
254 | unsigned int suspend_gpu_tracker_on_block:1; | ||
255 | #endif | ||
256 | #endif | ||
257 | |||
139 | #ifdef CONFIG_LITMUS_LOCKING | 258 | #ifdef CONFIG_LITMUS_LOCKING |
140 | /* Is the task being priority-boosted by a locking protocol? */ | 259 | /* Is the task being priority-boosted by a locking protocol? */ |
141 | unsigned int priority_boosted:1; | 260 | unsigned int priority_boosted:1; |
@@ -155,11 +274,20 @@ struct rt_param { | |||
155 | * could point to self if PI does not result in | 274 | * could point to self if PI does not result in |
156 | * an increased task priority. | 275 | * an increased task priority. |
157 | */ | 276 | */ |
158 | struct task_struct* inh_task; | 277 | struct task_struct* inh_task; |
278 | |||
279 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
280 | raw_spinlock_t hp_blocked_tasks_lock; | ||
281 | struct binheap hp_blocked_tasks; | ||
282 | |||
283 | /* pointer to lock upon which is currently blocked */ | ||
284 | struct litmus_lock* blocked_lock; | ||
285 | #endif | ||
159 | 286 | ||
160 | 287 | ||
161 | struct task_struct* hp_group; | 288 | struct task_struct* hp_group; |
162 | unsigned int is_slave:1; | 289 | unsigned int is_slave:1; |
290 | unsigned int has_slaves:1; | ||
163 | 291 | ||
164 | 292 | ||
165 | #ifdef CONFIG_NP_SECTION | 293 | #ifdef CONFIG_NP_SECTION |
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h index 6e7cabdddae8..24a6858b4b0b 100644 --- a/include/litmus/sched_plugin.h +++ b/include/litmus/sched_plugin.h | |||
@@ -11,6 +11,12 @@ | |||
11 | #include <litmus/locking.h> | 11 | #include <litmus/locking.h> |
12 | #endif | 12 | #endif |
13 | 13 | ||
14 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | ||
15 | #include <litmus/kexclu_affinity.h> | ||
16 | #endif | ||
17 | |||
18 | #include <linux/interrupt.h> | ||
19 | |||
14 | /************************ setup/tear down ********************/ | 20 | /************************ setup/tear down ********************/ |
15 | 21 | ||
16 | typedef long (*activate_plugin_t) (void); | 22 | typedef long (*activate_plugin_t) (void); |
@@ -29,7 +35,6 @@ typedef struct task_struct* (*schedule_t)(struct task_struct * prev); | |||
29 | */ | 35 | */ |
30 | typedef void (*finish_switch_t)(struct task_struct *prev); | 36 | typedef void (*finish_switch_t)(struct task_struct *prev); |
31 | 37 | ||
32 | |||
33 | /********************* task state changes ********************/ | 38 | /********************* task state changes ********************/ |
34 | 39 | ||
35 | /* Called to setup a new real-time task. | 40 | /* Called to setup a new real-time task. |
@@ -58,6 +63,47 @@ typedef void (*task_exit_t) (struct task_struct *); | |||
58 | typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type, | 63 | typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type, |
59 | void* __user config); | 64 | void* __user config); |
60 | 65 | ||
66 | struct affinity_observer; | ||
67 | typedef long (*allocate_affinity_observer_t) ( | ||
68 | struct affinity_observer **aff_obs, int type, | ||
69 | void* __user config); | ||
70 | |||
71 | typedef void (*increase_prio_t)(struct task_struct* t, struct task_struct* prio_inh); | ||
72 | typedef void (*decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh); | ||
73 | typedef void (*nested_increase_prio_t)(struct task_struct* t, struct task_struct* prio_inh, | ||
74 | raw_spinlock_t *to_unlock, unsigned long irqflags); | ||
75 | typedef void (*nested_decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh, | ||
76 | raw_spinlock_t *to_unlock, unsigned long irqflags); | ||
77 | |||
78 | typedef void (*increase_prio_klitirq_t)(struct task_struct* klitirqd, | ||
79 | struct task_struct* old_owner, | ||
80 | struct task_struct* new_owner); | ||
81 | typedef void (*decrease_prio_klitirqd_t)(struct task_struct* klitirqd, | ||
82 | struct task_struct* old_owner); | ||
83 | |||
84 | |||
85 | typedef int (*enqueue_pai_tasklet_t)(struct tasklet_struct* tasklet); | ||
86 | typedef void (*change_prio_pai_tasklet_t)(struct task_struct *old_prio, | ||
87 | struct task_struct *new_prio); | ||
88 | typedef void (*run_tasklets_t)(struct task_struct* next); | ||
89 | |||
90 | typedef raw_spinlock_t* (*get_dgl_spinlock_t) (struct task_struct *t); | ||
91 | |||
92 | |||
93 | typedef int (*higher_prio_t)(struct task_struct* a, struct task_struct* b); | ||
94 | |||
95 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
96 | |||
97 | typedef enum | ||
98 | { | ||
99 | BASE, | ||
100 | EFFECTIVE | ||
101 | } comparison_mode_t; | ||
102 | |||
103 | typedef int (*__higher_prio_t)(struct task_struct* a, comparison_mode_t a_mod, | ||
104 | struct task_struct* b, comparison_mode_t b_mod); | ||
105 | #endif | ||
106 | |||
61 | 107 | ||
62 | /********************* sys call backends ********************/ | 108 | /********************* sys call backends ********************/ |
63 | /* This function causes the caller to sleep until the next release */ | 109 | /* This function causes the caller to sleep until the next release */ |
@@ -88,14 +134,40 @@ struct sched_plugin { | |||
88 | /* task state changes */ | 134 | /* task state changes */ |
89 | admit_task_t admit_task; | 135 | admit_task_t admit_task; |
90 | 136 | ||
91 | task_new_t task_new; | 137 | task_new_t task_new; |
92 | task_wake_up_t task_wake_up; | 138 | task_wake_up_t task_wake_up; |
93 | task_block_t task_block; | 139 | task_block_t task_block; |
94 | task_exit_t task_exit; | 140 | task_exit_t task_exit; |
95 | 141 | ||
142 | higher_prio_t compare; | ||
143 | |||
96 | #ifdef CONFIG_LITMUS_LOCKING | 144 | #ifdef CONFIG_LITMUS_LOCKING |
97 | /* locking protocols */ | 145 | /* locking protocols */ |
98 | allocate_lock_t allocate_lock; | 146 | allocate_lock_t allocate_lock; |
147 | increase_prio_t increase_prio; | ||
148 | decrease_prio_t decrease_prio; | ||
149 | #endif | ||
150 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
151 | nested_increase_prio_t nested_increase_prio; | ||
152 | nested_decrease_prio_t nested_decrease_prio; | ||
153 | __higher_prio_t __compare; | ||
154 | #endif | ||
155 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
156 | get_dgl_spinlock_t get_dgl_spinlock; | ||
157 | #endif | ||
158 | |||
159 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | ||
160 | allocate_affinity_observer_t allocate_aff_obs; | ||
161 | #endif | ||
162 | |||
163 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
164 | increase_prio_klitirq_t increase_prio_klitirqd; | ||
165 | decrease_prio_klitirqd_t decrease_prio_klitirqd; | ||
166 | #endif | ||
167 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
168 | enqueue_pai_tasklet_t enqueue_pai_tasklet; | ||
169 | change_prio_pai_tasklet_t change_prio_pai_tasklet; | ||
170 | run_tasklets_t run_tasklets; | ||
99 | #endif | 171 | #endif |
100 | } __attribute__ ((__aligned__(SMP_CACHE_BYTES))); | 172 | } __attribute__ ((__aligned__(SMP_CACHE_BYTES))); |
101 | 173 | ||
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h index 82bde8241298..7af12f49c600 100644 --- a/include/litmus/sched_trace.h +++ b/include/litmus/sched_trace.h | |||
@@ -10,13 +10,14 @@ struct st_trace_header { | |||
10 | u8 type; /* Of what type is this record? */ | 10 | u8 type; /* Of what type is this record? */ |
11 | u8 cpu; /* On which CPU was it recorded? */ | 11 | u8 cpu; /* On which CPU was it recorded? */ |
12 | u16 pid; /* PID of the task. */ | 12 | u16 pid; /* PID of the task. */ |
13 | u32 job; /* The job sequence number. */ | 13 | u32 job:24; /* The job sequence number. */ |
14 | }; | 14 | u8 extra; |
15 | } __attribute__((packed)); | ||
15 | 16 | ||
16 | #define ST_NAME_LEN 16 | 17 | #define ST_NAME_LEN 16 |
17 | struct st_name_data { | 18 | struct st_name_data { |
18 | char cmd[ST_NAME_LEN];/* The name of the executable of this process. */ | 19 | char cmd[ST_NAME_LEN];/* The name of the executable of this process. */ |
19 | }; | 20 | } __attribute__((packed)); |
20 | 21 | ||
21 | struct st_param_data { /* regular params */ | 22 | struct st_param_data { /* regular params */ |
22 | u32 wcet; | 23 | u32 wcet; |
@@ -25,30 +26,29 @@ struct st_param_data { /* regular params */ | |||
25 | u8 partition; | 26 | u8 partition; |
26 | u8 class; | 27 | u8 class; |
27 | u8 __unused[2]; | 28 | u8 __unused[2]; |
28 | }; | 29 | } __attribute__((packed)); |
29 | 30 | ||
30 | struct st_release_data { /* A job is was/is going to be released. */ | 31 | struct st_release_data { /* A job is was/is going to be released. */ |
31 | u64 release; /* What's the release time? */ | 32 | u64 release; /* What's the release time? */ |
32 | u64 deadline; /* By when must it finish? */ | 33 | u64 deadline; /* By when must it finish? */ |
33 | }; | 34 | } __attribute__((packed)); |
34 | 35 | ||
35 | struct st_assigned_data { /* A job was asigned to a CPU. */ | 36 | struct st_assigned_data { /* A job was asigned to a CPU. */ |
36 | u64 when; | 37 | u64 when; |
37 | u8 target; /* Where should it execute? */ | 38 | u8 target; /* Where should it execute? */ |
38 | u8 __unused[7]; | 39 | u8 __unused[7]; |
39 | }; | 40 | } __attribute__((packed)); |
40 | 41 | ||
41 | struct st_switch_to_data { /* A process was switched to on a given CPU. */ | 42 | struct st_switch_to_data { /* A process was switched to on a given CPU. */ |
42 | u64 when; /* When did this occur? */ | 43 | u64 when; /* When did this occur? */ |
43 | u32 exec_time; /* Time the current job has executed. */ | 44 | u32 exec_time; /* Time the current job has executed. */ |
44 | u8 __unused[4]; | 45 | u8 __unused[4]; |
45 | 46 | } __attribute__((packed)); | |
46 | }; | ||
47 | 47 | ||
48 | struct st_switch_away_data { /* A process was switched away from on a given CPU. */ | 48 | struct st_switch_away_data { /* A process was switched away from on a given CPU. */ |
49 | u64 when; | 49 | u64 when; |
50 | u64 exec_time; | 50 | u64 exec_time; |
51 | }; | 51 | } __attribute__((packed)); |
52 | 52 | ||
53 | struct st_completion_data { /* A job completed. */ | 53 | struct st_completion_data { /* A job completed. */ |
54 | u64 when; | 54 | u64 when; |
@@ -56,35 +56,108 @@ struct st_completion_data { /* A job completed. */ | |||
56 | * next task automatically; set to 0 otherwise. | 56 | * next task automatically; set to 0 otherwise. |
57 | */ | 57 | */ |
58 | u8 __uflags:7; | 58 | u8 __uflags:7; |
59 | u8 __unused[7]; | 59 | u16 nv_int_count; |
60 | }; | 60 | u8 __unused[5]; |
61 | } __attribute__((packed)); | ||
61 | 62 | ||
62 | struct st_block_data { /* A task blocks. */ | 63 | struct st_block_data { /* A task blocks. */ |
63 | u64 when; | 64 | u64 when; |
64 | u64 __unused; | 65 | u64 __unused; |
65 | }; | 66 | } __attribute__((packed)); |
66 | 67 | ||
67 | struct st_resume_data { /* A task resumes. */ | 68 | struct st_resume_data { /* A task resumes. */ |
68 | u64 when; | 69 | u64 when; |
69 | u64 __unused; | 70 | u64 __unused; |
70 | }; | 71 | } __attribute__((packed)); |
71 | 72 | ||
72 | struct st_action_data { | 73 | struct st_action_data { |
73 | u64 when; | 74 | u64 when; |
74 | u8 action; | 75 | u8 action; |
75 | u8 __unused[7]; | 76 | u8 __unused[7]; |
76 | }; | 77 | } __attribute__((packed)); |
77 | 78 | ||
78 | struct st_sys_release_data { | 79 | struct st_sys_release_data { |
79 | u64 when; | 80 | u64 when; |
80 | u64 release; | 81 | u64 release; |
81 | }; | 82 | } __attribute__((packed)); |
83 | |||
84 | |||
85 | struct st_tasklet_release_data { | ||
86 | u64 when; | ||
87 | u64 __unused; | ||
88 | } __attribute__((packed)); | ||
89 | |||
90 | struct st_tasklet_begin_data { | ||
91 | u64 when; | ||
92 | u16 exe_pid; | ||
93 | u8 __unused[6]; | ||
94 | } __attribute__((packed)); | ||
95 | |||
96 | struct st_tasklet_end_data { | ||
97 | u64 when; | ||
98 | u16 exe_pid; | ||
99 | u8 flushed; | ||
100 | u8 __unused[5]; | ||
101 | } __attribute__((packed)); | ||
102 | |||
103 | |||
104 | struct st_work_release_data { | ||
105 | u64 when; | ||
106 | u64 __unused; | ||
107 | } __attribute__((packed)); | ||
108 | |||
109 | struct st_work_begin_data { | ||
110 | u64 when; | ||
111 | u16 exe_pid; | ||
112 | u8 __unused[6]; | ||
113 | } __attribute__((packed)); | ||
114 | |||
115 | struct st_work_end_data { | ||
116 | u64 when; | ||
117 | u16 exe_pid; | ||
118 | u8 flushed; | ||
119 | u8 __unused[5]; | ||
120 | } __attribute__((packed)); | ||
121 | |||
122 | struct st_effective_priority_change_data { | ||
123 | u64 when; | ||
124 | u16 inh_pid; | ||
125 | u8 __unused[6]; | ||
126 | } __attribute__((packed)); | ||
127 | |||
128 | struct st_nv_interrupt_begin_data { | ||
129 | u64 when; | ||
130 | u32 device; | ||
131 | u32 serialNumber; | ||
132 | } __attribute__((packed)); | ||
133 | |||
134 | struct st_nv_interrupt_end_data { | ||
135 | u64 when; | ||
136 | u32 device; | ||
137 | u32 serialNumber; | ||
138 | } __attribute__((packed)); | ||
139 | |||
140 | struct st_prediction_err_data { | ||
141 | u64 distance; | ||
142 | u64 rel_err; | ||
143 | } __attribute__((packed)); | ||
144 | |||
145 | struct st_migration_data { | ||
146 | u64 observed; | ||
147 | u64 estimated; | ||
148 | } __attribute__((packed)); | ||
149 | |||
150 | struct migration_info { | ||
151 | u64 observed; | ||
152 | u64 estimated; | ||
153 | u8 distance; | ||
154 | } __attribute__((packed)); | ||
82 | 155 | ||
83 | #define DATA(x) struct st_ ## x ## _data x; | 156 | #define DATA(x) struct st_ ## x ## _data x; |
84 | 157 | ||
85 | typedef enum { | 158 | typedef enum { |
86 | ST_NAME = 1, /* Start at one, so that we can spot | 159 | ST_NAME = 1, /* Start at one, so that we can spot |
87 | * uninitialized records. */ | 160 | * uninitialized records. */ |
88 | ST_PARAM, | 161 | ST_PARAM, |
89 | ST_RELEASE, | 162 | ST_RELEASE, |
90 | ST_ASSIGNED, | 163 | ST_ASSIGNED, |
@@ -94,7 +167,19 @@ typedef enum { | |||
94 | ST_BLOCK, | 167 | ST_BLOCK, |
95 | ST_RESUME, | 168 | ST_RESUME, |
96 | ST_ACTION, | 169 | ST_ACTION, |
97 | ST_SYS_RELEASE | 170 | ST_SYS_RELEASE, |
171 | ST_TASKLET_RELEASE, | ||
172 | ST_TASKLET_BEGIN, | ||
173 | ST_TASKLET_END, | ||
174 | ST_WORK_RELEASE, | ||
175 | ST_WORK_BEGIN, | ||
176 | ST_WORK_END, | ||
177 | ST_EFF_PRIO_CHANGE, | ||
178 | ST_NV_INTERRUPT_BEGIN, | ||
179 | ST_NV_INTERRUPT_END, | ||
180 | |||
181 | ST_PREDICTION_ERR, | ||
182 | ST_MIGRATION, | ||
98 | } st_event_record_type_t; | 183 | } st_event_record_type_t; |
99 | 184 | ||
100 | struct st_event_record { | 185 | struct st_event_record { |
@@ -113,8 +198,20 @@ struct st_event_record { | |||
113 | DATA(resume); | 198 | DATA(resume); |
114 | DATA(action); | 199 | DATA(action); |
115 | DATA(sys_release); | 200 | DATA(sys_release); |
201 | DATA(tasklet_release); | ||
202 | DATA(tasklet_begin); | ||
203 | DATA(tasklet_end); | ||
204 | DATA(work_release); | ||
205 | DATA(work_begin); | ||
206 | DATA(work_end); | ||
207 | DATA(effective_priority_change); | ||
208 | DATA(nv_interrupt_begin); | ||
209 | DATA(nv_interrupt_end); | ||
210 | |||
211 | DATA(prediction_err); | ||
212 | DATA(migration); | ||
116 | } data; | 213 | } data; |
117 | }; | 214 | } __attribute__((packed)); |
118 | 215 | ||
119 | #undef DATA | 216 | #undef DATA |
120 | 217 | ||
@@ -129,6 +226,8 @@ struct st_event_record { | |||
129 | ft_event1(id, callback, task) | 226 | ft_event1(id, callback, task) |
130 | #define SCHED_TRACE2(id, callback, task, xtra) \ | 227 | #define SCHED_TRACE2(id, callback, task, xtra) \ |
131 | ft_event2(id, callback, task, xtra) | 228 | ft_event2(id, callback, task, xtra) |
229 | #define SCHED_TRACE3(id, callback, task, xtra1, xtra2) \ | ||
230 | ft_event3(id, callback, task, xtra1, xtra2) | ||
132 | 231 | ||
133 | /* provide prototypes; needed on sparc64 */ | 232 | /* provide prototypes; needed on sparc64 */ |
134 | #ifndef NO_TASK_TRACE_DECLS | 233 | #ifndef NO_TASK_TRACE_DECLS |
@@ -155,12 +254,58 @@ feather_callback void do_sched_trace_action(unsigned long id, | |||
155 | feather_callback void do_sched_trace_sys_release(unsigned long id, | 254 | feather_callback void do_sched_trace_sys_release(unsigned long id, |
156 | lt_t* start); | 255 | lt_t* start); |
157 | 256 | ||
257 | |||
258 | feather_callback void do_sched_trace_tasklet_release(unsigned long id, | ||
259 | struct task_struct* owner); | ||
260 | feather_callback void do_sched_trace_tasklet_begin(unsigned long id, | ||
261 | struct task_struct* owner); | ||
262 | feather_callback void do_sched_trace_tasklet_end(unsigned long id, | ||
263 | struct task_struct* owner, | ||
264 | unsigned long flushed); | ||
265 | |||
266 | feather_callback void do_sched_trace_work_release(unsigned long id, | ||
267 | struct task_struct* owner); | ||
268 | feather_callback void do_sched_trace_work_begin(unsigned long id, | ||
269 | struct task_struct* owner, | ||
270 | struct task_struct* exe); | ||
271 | feather_callback void do_sched_trace_work_end(unsigned long id, | ||
272 | struct task_struct* owner, | ||
273 | struct task_struct* exe, | ||
274 | unsigned long flushed); | ||
275 | |||
276 | feather_callback void do_sched_trace_eff_prio_change(unsigned long id, | ||
277 | struct task_struct* task, | ||
278 | struct task_struct* inh); | ||
279 | |||
280 | feather_callback void do_sched_trace_nv_interrupt_begin(unsigned long id, | ||
281 | u32 device); | ||
282 | feather_callback void do_sched_trace_nv_interrupt_end(unsigned long id, | ||
283 | unsigned long unused); | ||
284 | |||
285 | feather_callback void do_sched_trace_prediction_err(unsigned long id, | ||
286 | struct task_struct* task, | ||
287 | gpu_migration_dist_t* distance, | ||
288 | fp_t* rel_err); | ||
289 | |||
290 | |||
291 | |||
292 | |||
293 | |||
294 | feather_callback void do_sched_trace_migration(unsigned long id, | ||
295 | struct task_struct* task, | ||
296 | struct migration_info* mig_info); | ||
297 | |||
298 | |||
299 | /* returns true if we're tracing an interrupt on current CPU */ | ||
300 | /* int is_interrupt_tracing_active(void); */ | ||
301 | |||
158 | #endif | 302 | #endif |
159 | 303 | ||
160 | #else | 304 | #else |
161 | 305 | ||
162 | #define SCHED_TRACE(id, callback, task) /* no tracing */ | 306 | #define SCHED_TRACE(id, callback, task) /* no tracing */ |
163 | #define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */ | 307 | #define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */ |
308 | #define SCHED_TRACE3(id, callback, task, xtra1, xtra2) | ||
164 | 309 | ||
165 | #endif | 310 | #endif |
166 | 311 | ||
@@ -252,6 +397,41 @@ feather_callback void do_sched_trace_sys_release(unsigned long id, | |||
252 | trace_litmus_sys_release(when); \ | 397 | trace_litmus_sys_release(when); \ |
253 | } while (0) | 398 | } while (0) |
254 | 399 | ||
400 | #define sched_trace_tasklet_release(t) \ | ||
401 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 11, do_sched_trace_tasklet_release, t) | ||
402 | |||
403 | #define sched_trace_tasklet_begin(t) \ | ||
404 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 12, do_sched_trace_tasklet_begin, t) | ||
405 | |||
406 | #define sched_trace_tasklet_end(t, flushed) \ | ||
407 | SCHED_TRACE2(SCHED_TRACE_BASE_ID + 13, do_sched_trace_tasklet_end, t, flushed) | ||
408 | |||
409 | |||
410 | #define sched_trace_work_release(t) \ | ||
411 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 14, do_sched_trace_work_release, t) | ||
412 | |||
413 | #define sched_trace_work_begin(t, e) \ | ||
414 | SCHED_TRACE2(SCHED_TRACE_BASE_ID + 15, do_sched_trace_work_begin, t, e) | ||
415 | |||
416 | #define sched_trace_work_end(t, e, flushed) \ | ||
417 | SCHED_TRACE3(SCHED_TRACE_BASE_ID + 16, do_sched_trace_work_end, t, e, flushed) | ||
418 | |||
419 | |||
420 | #define sched_trace_eff_prio_change(t, inh) \ | ||
421 | SCHED_TRACE2(SCHED_TRACE_BASE_ID + 17, do_sched_trace_eff_prio_change, t, inh) | ||
422 | |||
423 | |||
424 | #define sched_trace_nv_interrupt_begin(d) \ | ||
425 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 18, do_sched_trace_nv_interrupt_begin, d) | ||
426 | #define sched_trace_nv_interrupt_end(d) \ | ||
427 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 19, do_sched_trace_nv_interrupt_end, d) | ||
428 | |||
429 | #define sched_trace_prediction_err(t, dist, rel_err) \ | ||
430 | SCHED_TRACE3(SCHED_TRACE_BASE_ID + 20, do_sched_trace_prediction_err, t, dist, rel_err) | ||
431 | |||
432 | #define sched_trace_migration(t, mig_info) \ | ||
433 | SCHED_TRACE2(SCHED_TRACE_BASE_ID + 21, do_sched_trace_migration, t, mig_info) | ||
434 | |||
255 | #define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ | 435 | #define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ |
256 | 436 | ||
257 | #endif /* __KERNEL__ */ | 437 | #endif /* __KERNEL__ */ |
diff --git a/include/litmus/sched_trace_external.h b/include/litmus/sched_trace_external.h new file mode 100644 index 000000000000..e70e45e4cf51 --- /dev/null +++ b/include/litmus/sched_trace_external.h | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | * sched_trace.h -- record scheduler events to a byte stream for offline analysis. | ||
3 | */ | ||
4 | #ifndef _LINUX_SCHED_TRACE_EXTERNAL_H_ | ||
5 | #define _LINUX_SCHED_TRACE_EXTERNAL_H_ | ||
6 | |||
7 | |||
8 | #ifdef CONFIG_SCHED_TASK_TRACE | ||
9 | extern void __sched_trace_tasklet_begin_external(struct task_struct* t); | ||
10 | static inline void sched_trace_tasklet_begin_external(struct task_struct* t) | ||
11 | { | ||
12 | __sched_trace_tasklet_begin_external(t); | ||
13 | } | ||
14 | |||
15 | extern void __sched_trace_tasklet_end_external(struct task_struct* t, unsigned long flushed); | ||
16 | static inline void sched_trace_tasklet_end_external(struct task_struct* t, unsigned long flushed) | ||
17 | { | ||
18 | __sched_trace_tasklet_end_external(t, flushed); | ||
19 | } | ||
20 | |||
21 | extern void __sched_trace_work_begin_external(struct task_struct* t, struct task_struct* e); | ||
22 | static inline void sched_trace_work_begin_external(struct task_struct* t, struct task_struct* e) | ||
23 | { | ||
24 | __sched_trace_work_begin_external(t, e); | ||
25 | } | ||
26 | |||
27 | extern void __sched_trace_work_end_external(struct task_struct* t, struct task_struct* e, unsigned long f); | ||
28 | static inline void sched_trace_work_end_external(struct task_struct* t, struct task_struct* e, unsigned long f) | ||
29 | { | ||
30 | __sched_trace_work_end_external(t, e, f); | ||
31 | } | ||
32 | |||
33 | #ifdef CONFIG_LITMUS_NVIDIA | ||
34 | extern void __sched_trace_nv_interrupt_begin_external(u32 device); | ||
35 | static inline void sched_trace_nv_interrupt_begin_external(u32 device) | ||
36 | { | ||
37 | __sched_trace_nv_interrupt_begin_external(device); | ||
38 | } | ||
39 | |||
40 | extern void __sched_trace_nv_interrupt_end_external(u32 device); | ||
41 | static inline void sched_trace_nv_interrupt_end_external(u32 device) | ||
42 | { | ||
43 | __sched_trace_nv_interrupt_end_external(device); | ||
44 | } | ||
45 | #endif | ||
46 | |||
47 | #else | ||
48 | |||
49 | // no tracing. | ||
50 | static inline void sched_trace_tasklet_begin_external(struct task_struct* t){} | ||
51 | static inline void sched_trace_tasklet_end_external(struct task_struct* t, unsigned long flushed){} | ||
52 | static inline void sched_trace_work_begin_external(struct task_struct* t, struct task_struct* e){} | ||
53 | static inline void sched_trace_work_end_external(struct task_struct* t, struct task_struct* e, unsigned long f){} | ||
54 | |||
55 | #ifdef CONFIG_LITMUS_NVIDIA | ||
56 | static inline void sched_trace_nv_interrupt_begin_external(u32 device){} | ||
57 | static inline void sched_trace_nv_interrupt_end_external(u32 device){} | ||
58 | #endif | ||
59 | |||
60 | #endif | ||
61 | |||
62 | |||
63 | #ifdef CONFIG_LITMUS_NVIDIA | ||
64 | |||
65 | #define EX_TS(evt) \ | ||
66 | extern void __##evt(void); \ | ||
67 | static inline void EX_##evt(void) { __##evt(); } | ||
68 | |||
69 | EX_TS(TS_NV_TOPISR_START) | ||
70 | EX_TS(TS_NV_TOPISR_END) | ||
71 | EX_TS(TS_NV_BOTISR_START) | ||
72 | EX_TS(TS_NV_BOTISR_END) | ||
73 | EX_TS(TS_NV_RELEASE_BOTISR_START) | ||
74 | EX_TS(TS_NV_RELEASE_BOTISR_END) | ||
75 | |||
76 | #endif | ||
77 | |||
78 | #endif | ||
diff --git a/include/litmus/signal.h b/include/litmus/signal.h new file mode 100644 index 000000000000..b3d82b294984 --- /dev/null +++ b/include/litmus/signal.h | |||
@@ -0,0 +1,47 @@ | |||
1 | #ifndef LITMUS_SIGNAL_H | ||
2 | #define LITMUS_SIGNAL_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | #include <linux/signal.h> | ||
6 | #else | ||
7 | #include <signal.h> | ||
8 | #endif | ||
9 | |||
10 | /* Signals used by Litmus to asynchronously communicate events | ||
11 | * to real-time tasks. | ||
12 | * | ||
13 | * Signal values overlap with [SIGRTMIN, SIGRTMAX], so beware of | ||
14 | * application-level conflicts when dealing with COTS user-level | ||
15 | * code. | ||
16 | */ | ||
17 | |||
18 | /* Sent to a Litmus task when all of the following conditions are true: | ||
19 | * (1) The task has exhausted its budget. | ||
20 | * (2) budget_signal_policy is QUANTUM_SIGNALS or PRECISE_SIGNALS. | ||
21 | * | ||
22 | * Note: If a task does not have a registered handler for SIG_BUDGET, | ||
23 | * the signal will cause the task to terminate (default action). | ||
24 | */ | ||
25 | |||
26 | /* Assigned values start at SIGRTMAX and decrease, hopefully reducing | ||
27 | * likelihood of user-level conflicts. | ||
28 | */ | ||
29 | #define SIG_BUDGET (SIGRTMAX - 0) | ||
30 | |||
31 | /* | ||
32 | Future signals could include: | ||
33 | |||
34 | #define SIG_DEADLINE_MISS (SIGRTMAX - 1) | ||
35 | #define SIG_CRIT_LEVEL_CHANGE (SIGRTMAX - 2) | ||
36 | */ | ||
37 | |||
38 | #define SIGLITMUSMIN SIG_BUDGET | ||
39 | |||
40 | #ifdef __KERNEL__ | ||
41 | #if (SIGLITMUSMIN < SIGRTMIN) | ||
42 | /* no compile-time check in user-space since SIGRTMIN may be a variable. */ | ||
43 | #error "Too many LITMUS^RT signals!" | ||
44 | #endif | ||
45 | #endif | ||
46 | |||
47 | #endif | ||
diff --git a/include/litmus/trace.h b/include/litmus/trace.h index e809376d6487..e078aee4234d 100644 --- a/include/litmus/trace.h +++ b/include/litmus/trace.h | |||
@@ -103,14 +103,46 @@ feather_callback void save_task_latency(unsigned long event, unsigned long when_ | |||
103 | #define TS_LOCK_START TIMESTAMP(170) | 103 | #define TS_LOCK_START TIMESTAMP(170) |
104 | #define TS_LOCK_SUSPEND TIMESTAMP(171) | 104 | #define TS_LOCK_SUSPEND TIMESTAMP(171) |
105 | #define TS_LOCK_RESUME TIMESTAMP(172) | 105 | #define TS_LOCK_RESUME TIMESTAMP(172) |
106 | #define TS_LOCK_END TIMESTAMP(173) | 106 | #define TS_LOCK_END TIMESTAMP(173) |
107 | |||
108 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
109 | #define TS_DGL_LOCK_START TIMESTAMP(175) | ||
110 | #define TS_DGL_LOCK_SUSPEND TIMESTAMP(176) | ||
111 | #define TS_DGL_LOCK_RESUME TIMESTAMP(177) | ||
112 | #define TS_DGL_LOCK_END TIMESTAMP(178) | ||
113 | #endif | ||
107 | 114 | ||
108 | #define TS_UNLOCK_START TIMESTAMP(180) | 115 | #define TS_UNLOCK_START TIMESTAMP(180) |
109 | #define TS_UNLOCK_END TIMESTAMP(181) | 116 | #define TS_UNLOCK_END TIMESTAMP(181) |
110 | 117 | ||
118 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
119 | #define TS_DGL_UNLOCK_START TIMESTAMP(185) | ||
120 | #define TS_DGL_UNLOCK_END TIMESTAMP(186) | ||
121 | #endif | ||
122 | |||
111 | #define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c) | 123 | #define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c) |
112 | #define TS_SEND_RESCHED_END DTIMESTAMP(191, TSK_UNKNOWN) | 124 | #define TS_SEND_RESCHED_END DTIMESTAMP(191, TSK_UNKNOWN) |
113 | 125 | ||
114 | #define TS_RELEASE_LATENCY(when) LTIMESTAMP(208, &(when)) | 126 | #define TS_RELEASE_LATENCY(when) LTIMESTAMP(208, &(when)) |
115 | 127 | ||
128 | |||
129 | #ifdef CONFIG_LITMUS_NVIDIA | ||
130 | |||
131 | #define TS_NV_TOPISR_START TIMESTAMP(200) | ||
132 | #define TS_NV_TOPISR_END TIMESTAMP(201) | ||
133 | |||
134 | #define TS_NV_BOTISR_START TIMESTAMP(202) | ||
135 | #define TS_NV_BOTISR_END TIMESTAMP(203) | ||
136 | |||
137 | #define TS_NV_RELEASE_BOTISR_START TIMESTAMP(204) | ||
138 | #define TS_NV_RELEASE_BOTISR_END TIMESTAMP(205) | ||
139 | |||
140 | #endif | ||
141 | |||
142 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
143 | #define TS_NV_SCHED_BOTISR_START TIMESTAMP(206) | ||
144 | #define TS_NV_SCHED_BOTISR_END TIMESTAMP(207) | ||
145 | #endif | ||
146 | |||
147 | |||
116 | #endif /* !_SYS_TRACE_H_ */ | 148 | #endif /* !_SYS_TRACE_H_ */ |
diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h index bcb8f1183b4f..4fd92956d13f 100644 --- a/include/litmus/unistd_32.h +++ b/include/litmus/unistd_32.h | |||
@@ -17,7 +17,10 @@ | |||
17 | #define __NR_wait_for_ts_release __LSC(9) | 17 | #define __NR_wait_for_ts_release __LSC(9) |
18 | #define __NR_release_ts __LSC(10) | 18 | #define __NR_release_ts __LSC(10) |
19 | #define __NR_null_call __LSC(11) | 19 | #define __NR_null_call __LSC(11) |
20 | #define __NR_litmus_dgl_lock __LSC(12) | ||
21 | #define __NR_litmus_dgl_unlock __LSC(13) | ||
22 | #define __NR_register_nv_device __LSC(14) | ||
20 | 23 | ||
21 | #define __NR_slave_non_rt_threads _LSC(12) | 24 | #define __NR_slave_non_rt_threads _LSC(15) |
22 | 25 | ||
23 | #define NR_litmus_syscalls 13 | 26 | #define NR_litmus_syscalls 16 |
diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h index 5f56d5947343..abb45c181e8e 100644 --- a/include/litmus/unistd_64.h +++ b/include/litmus/unistd_64.h | |||
@@ -29,8 +29,14 @@ __SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release) | |||
29 | __SYSCALL(__NR_release_ts, sys_release_ts) | 29 | __SYSCALL(__NR_release_ts, sys_release_ts) |
30 | #define __NR_null_call __LSC(11) | 30 | #define __NR_null_call __LSC(11) |
31 | __SYSCALL(__NR_null_call, sys_null_call) | 31 | __SYSCALL(__NR_null_call, sys_null_call) |
32 | #define __NR_litmus_dgl_lock __LSC(12) | ||
33 | __SYSCALL(__NR_litmus_dgl_lock, sys_litmus_dgl_lock) | ||
34 | #define __NR_litmus_dgl_unlock __LSC(13) | ||
35 | __SYSCALL(__NR_litmus_dgl_unlock, sys_litmus_dgl_unlock) | ||
36 | #define __NR_register_nv_device __LSC(14) | ||
37 | __SYSCALL(__NR_register_nv_device, sys_register_nv_device) | ||
32 | 38 | ||
33 | #define __NR_slave_non_rt_threads __LSC(12) | 39 | #define __NR_slave_non_rt_threads __LSC(15) |
34 | __SYSCALL(__NR_slave_non_rt_threads, sys_slave_non_rt_threads) | 40 | __SYSCALL(__NR_slave_non_rt_threads, sys_slave_non_rt_threads) |
35 | 41 | ||
36 | #define NR_litmus_syscalls 13 | 42 | #define NR_litmus_syscalls 16 |