diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 20:52:29 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 21:01:20 -0500 |
commit | 12d312072e3f4caa6e4e500d5a23c85402494cd1 (patch) | |
tree | 9cde0ea468ea97c51d3c6370c9924827376efcc2 | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) | |
parent | 3d1c6d44d3f133909d1c594351c2b7c779b1d7d4 (diff) |
Merge branch 'wip-pai' into wip-gpu-interrupts
Conflicts:
include/litmus/affinity.h
kernel/sched.c
kernel/softirq.c
litmus/Kconfig
litmus/affinity.c
litmus/litmus.c
litmus/preempt.c
litmus/sched_cedf.c
litmus/sched_gsn_edf.c
45 files changed, 6325 insertions, 116 deletions
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 6c0802eb2f7f..680a5cb4b585 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -10,6 +10,10 @@ | |||
10 | #include <linux/ftrace.h> | 10 | #include <linux/ftrace.h> |
11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
12 | 12 | ||
13 | #ifdef CONFIG_LITMUS_NVIDIA | ||
14 | #include <litmus/sched_trace.h> | ||
15 | #endif | ||
16 | |||
13 | #include <asm/apic.h> | 17 | #include <asm/apic.h> |
14 | #include <asm/io_apic.h> | 18 | #include <asm/io_apic.h> |
15 | #include <asm/irq.h> | 19 | #include <asm/irq.h> |
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index d0126222b394..0cb4373698e7 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S | |||
@@ -358,3 +358,4 @@ ENTRY(sys_call_table) | |||
358 | .long sys_wait_for_ts_release | 358 | .long sys_wait_for_ts_release |
359 | .long sys_release_ts /* +10 */ | 359 | .long sys_release_ts /* +10 */ |
360 | .long sys_null_call | 360 | .long sys_null_call |
361 | .long sys_register_nv_device | ||
diff --git a/include/linux/completion.h b/include/linux/completion.h index 9d727271c9fe..cff405c4dd3a 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h | |||
@@ -76,6 +76,7 @@ static inline void init_completion(struct completion *x) | |||
76 | init_waitqueue_head(&x->wait); | 76 | init_waitqueue_head(&x->wait); |
77 | } | 77 | } |
78 | 78 | ||
79 | extern void __wait_for_completion_locked(struct completion *); | ||
79 | extern void wait_for_completion(struct completion *); | 80 | extern void wait_for_completion(struct completion *); |
80 | extern int wait_for_completion_interruptible(struct completion *x); | 81 | extern int wait_for_completion_interruptible(struct completion *x); |
81 | extern int wait_for_completion_killable(struct completion *x); | 82 | extern int wait_for_completion_killable(struct completion *x); |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index f6efed0039ed..57a7bc8807be 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -445,6 +445,7 @@ static inline void __raise_softirq_irqoff(unsigned int nr) | |||
445 | 445 | ||
446 | extern void raise_softirq_irqoff(unsigned int nr); | 446 | extern void raise_softirq_irqoff(unsigned int nr); |
447 | extern void raise_softirq(unsigned int nr); | 447 | extern void raise_softirq(unsigned int nr); |
448 | extern void wakeup_softirqd(void); | ||
448 | 449 | ||
449 | /* This is the worklist that queues up per-cpu softirq work. | 450 | /* This is the worklist that queues up per-cpu softirq work. |
450 | * | 451 | * |
@@ -500,6 +501,10 @@ struct tasklet_struct | |||
500 | atomic_t count; | 501 | atomic_t count; |
501 | void (*func)(unsigned long); | 502 | void (*func)(unsigned long); |
502 | unsigned long data; | 503 | unsigned long data; |
504 | |||
505 | #if defined(CONFIG_LITMUS_SOFTIRQD) || defined(CONFIG_LITMUS_PAI_SOFTIRQD) | ||
506 | struct task_struct *owner; | ||
507 | #endif | ||
503 | }; | 508 | }; |
504 | 509 | ||
505 | #define DECLARE_TASKLET(name, func, data) \ | 510 | #define DECLARE_TASKLET(name, func, data) \ |
@@ -537,6 +542,7 @@ static inline void tasklet_unlock_wait(struct tasklet_struct *t) | |||
537 | #define tasklet_unlock(t) do { } while (0) | 542 | #define tasklet_unlock(t) do { } while (0) |
538 | #endif | 543 | #endif |
539 | 544 | ||
545 | extern void ___tasklet_schedule(struct tasklet_struct *t); | ||
540 | extern void __tasklet_schedule(struct tasklet_struct *t); | 546 | extern void __tasklet_schedule(struct tasklet_struct *t); |
541 | 547 | ||
542 | static inline void tasklet_schedule(struct tasklet_struct *t) | 548 | static inline void tasklet_schedule(struct tasklet_struct *t) |
@@ -545,6 +551,7 @@ static inline void tasklet_schedule(struct tasklet_struct *t) | |||
545 | __tasklet_schedule(t); | 551 | __tasklet_schedule(t); |
546 | } | 552 | } |
547 | 553 | ||
554 | extern void ___tasklet_hi_schedule(struct tasklet_struct *t); | ||
548 | extern void __tasklet_hi_schedule(struct tasklet_struct *t); | 555 | extern void __tasklet_hi_schedule(struct tasklet_struct *t); |
549 | 556 | ||
550 | static inline void tasklet_hi_schedule(struct tasklet_struct *t) | 557 | static inline void tasklet_hi_schedule(struct tasklet_struct *t) |
@@ -553,6 +560,7 @@ static inline void tasklet_hi_schedule(struct tasklet_struct *t) | |||
553 | __tasklet_hi_schedule(t); | 560 | __tasklet_hi_schedule(t); |
554 | } | 561 | } |
555 | 562 | ||
563 | extern void ___tasklet_hi_schedule_first(struct tasklet_struct *t); | ||
556 | extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); | 564 | extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); |
557 | 565 | ||
558 | /* | 566 | /* |
@@ -582,7 +590,7 @@ static inline void tasklet_disable(struct tasklet_struct *t) | |||
582 | } | 590 | } |
583 | 591 | ||
584 | static inline void tasklet_enable(struct tasklet_struct *t) | 592 | static inline void tasklet_enable(struct tasklet_struct *t) |
585 | { | 593 | { |
586 | smp_mb__before_atomic_dec(); | 594 | smp_mb__before_atomic_dec(); |
587 | atomic_dec(&t->count); | 595 | atomic_dec(&t->count); |
588 | } | 596 | } |
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index a940fe435aca..cb47debbf24d 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -126,6 +126,15 @@ static inline int mutex_is_locked(struct mutex *lock) | |||
126 | return atomic_read(&lock->count) != 1; | 126 | return atomic_read(&lock->count) != 1; |
127 | } | 127 | } |
128 | 128 | ||
129 | /* return non-zero to abort. only pre-side-effects may abort */ | ||
130 | typedef int (*side_effect_t)(unsigned long); | ||
131 | extern void mutex_lock_sfx(struct mutex *lock, | ||
132 | side_effect_t pre, unsigned long pre_arg, | ||
133 | side_effect_t post, unsigned long post_arg); | ||
134 | extern void mutex_unlock_sfx(struct mutex *lock, | ||
135 | side_effect_t pre, unsigned long pre_arg, | ||
136 | side_effect_t post, unsigned long post_arg); | ||
137 | |||
129 | /* | 138 | /* |
130 | * See kernel/mutex.c for detailed documentation of these APIs. | 139 | * See kernel/mutex.c for detailed documentation of these APIs. |
131 | * Also see Documentation/mutex-design.txt. | 140 | * Also see Documentation/mutex-design.txt. |
@@ -153,6 +162,7 @@ extern void mutex_lock(struct mutex *lock); | |||
153 | extern int __must_check mutex_lock_interruptible(struct mutex *lock); | 162 | extern int __must_check mutex_lock_interruptible(struct mutex *lock); |
154 | extern int __must_check mutex_lock_killable(struct mutex *lock); | 163 | extern int __must_check mutex_lock_killable(struct mutex *lock); |
155 | 164 | ||
165 | |||
156 | # define mutex_lock_nested(lock, subclass) mutex_lock(lock) | 166 | # define mutex_lock_nested(lock, subclass) mutex_lock(lock) |
157 | # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) | 167 | # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) |
158 | # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) | 168 | # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) |
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h index 39fa04966aa8..c83fc2b65f01 100644 --- a/include/linux/semaphore.h +++ b/include/linux/semaphore.h | |||
@@ -43,4 +43,13 @@ extern int __must_check down_trylock(struct semaphore *sem); | |||
43 | extern int __must_check down_timeout(struct semaphore *sem, long jiffies); | 43 | extern int __must_check down_timeout(struct semaphore *sem, long jiffies); |
44 | extern void up(struct semaphore *sem); | 44 | extern void up(struct semaphore *sem); |
45 | 45 | ||
46 | extern void __down(struct semaphore *sem); | ||
47 | extern void __up(struct semaphore *sem); | ||
48 | |||
49 | struct semaphore_waiter { | ||
50 | struct list_head list; | ||
51 | struct task_struct *task; | ||
52 | int up; | ||
53 | }; | ||
54 | |||
46 | #endif /* __LINUX_SEMAPHORE_H */ | 55 | #endif /* __LINUX_SEMAPHORE_H */ |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index f584aba78ca9..1ec2ec7d4e3b 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -83,6 +83,9 @@ struct work_struct { | |||
83 | #ifdef CONFIG_LOCKDEP | 83 | #ifdef CONFIG_LOCKDEP |
84 | struct lockdep_map lockdep_map; | 84 | struct lockdep_map lockdep_map; |
85 | #endif | 85 | #endif |
86 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
87 | struct task_struct *owner; | ||
88 | #endif | ||
86 | }; | 89 | }; |
87 | 90 | ||
88 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU) | 91 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU) |
@@ -115,11 +118,25 @@ struct execute_work { | |||
115 | #define __WORK_INIT_LOCKDEP_MAP(n, k) | 118 | #define __WORK_INIT_LOCKDEP_MAP(n, k) |
116 | #endif | 119 | #endif |
117 | 120 | ||
121 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
122 | #define __WORK_INIT_OWNER() \ | ||
123 | .owner = NULL, | ||
124 | |||
125 | #define PREPARE_OWNER(_work, _owner) \ | ||
126 | do { \ | ||
127 | (_work)->owner = (_owner); \ | ||
128 | } while(0) | ||
129 | #else | ||
130 | #define __WORK_INIT_OWNER() | ||
131 | #define PREPARE_OWNER(_work, _owner) | ||
132 | #endif | ||
133 | |||
118 | #define __WORK_INITIALIZER(n, f) { \ | 134 | #define __WORK_INITIALIZER(n, f) { \ |
119 | .data = WORK_DATA_STATIC_INIT(), \ | 135 | .data = WORK_DATA_STATIC_INIT(), \ |
120 | .entry = { &(n).entry, &(n).entry }, \ | 136 | .entry = { &(n).entry, &(n).entry }, \ |
121 | .func = (f), \ | 137 | .func = (f), \ |
122 | __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ | 138 | __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ |
139 | __WORK_INIT_OWNER() \ | ||
123 | } | 140 | } |
124 | 141 | ||
125 | #define __DELAYED_WORK_INITIALIZER(n, f) { \ | 142 | #define __DELAYED_WORK_INITIALIZER(n, f) { \ |
@@ -357,6 +374,7 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
357 | extern void flush_workqueue(struct workqueue_struct *wq); | 374 | extern void flush_workqueue(struct workqueue_struct *wq); |
358 | extern void flush_scheduled_work(void); | 375 | extern void flush_scheduled_work(void); |
359 | 376 | ||
377 | extern int __schedule_work(struct work_struct *work); | ||
360 | extern int schedule_work(struct work_struct *work); | 378 | extern int schedule_work(struct work_struct *work); |
361 | extern int schedule_work_on(int cpu, struct work_struct *work); | 379 | extern int schedule_work_on(int cpu, struct work_struct *work); |
362 | extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay); | 380 | extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay); |
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h index caf2a1e6918c..c740e8fc3e88 100644 --- a/include/litmus/fdso.h +++ b/include/litmus/fdso.h | |||
@@ -18,9 +18,10 @@ typedef enum { | |||
18 | MIN_OBJ_TYPE = 0, | 18 | MIN_OBJ_TYPE = 0, |
19 | 19 | ||
20 | FMLP_SEM = 0, | 20 | FMLP_SEM = 0, |
21 | SRP_SEM = 1, | 21 | KFMLP_SEM = 1, |
22 | SRP_SEM = 2, | ||
22 | 23 | ||
23 | MAX_OBJ_TYPE = 1 | 24 | MAX_OBJ_TYPE = SRP_SEM |
24 | } obj_type_t; | 25 | } obj_type_t; |
25 | 26 | ||
26 | struct inode_obj_id { | 27 | struct inode_obj_id { |
@@ -64,6 +65,7 @@ static inline void* od_lookup(int od, obj_type_t type) | |||
64 | } | 65 | } |
65 | 66 | ||
66 | #define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM)) | 67 | #define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM)) |
68 | #define lookup_kfmlp_sem(od)((struct pi_semaphore*) od_lookup(od, KFMLP_SEM)) | ||
67 | #define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) | 69 | #define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) |
68 | #define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) | 70 | #define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) |
69 | 71 | ||
diff --git a/include/litmus/fifo_common.h b/include/litmus/fifo_common.h new file mode 100644 index 000000000000..12cfbfea41ee --- /dev/null +++ b/include/litmus/fifo_common.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * EDF common data structures and utility functions shared by all EDF | ||
3 | * based scheduler plugins | ||
4 | */ | ||
5 | |||
6 | /* CLEANUP: Add comments and make it less messy. | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #ifndef __UNC_FIFO_COMMON_H__ | ||
11 | #define __UNC_FIFO_COMMON_H__ | ||
12 | |||
13 | #include <litmus/rt_domain.h> | ||
14 | |||
15 | void fifo_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | ||
16 | release_jobs_t release); | ||
17 | |||
18 | int fifo_higher_prio(struct task_struct* first, | ||
19 | struct task_struct* second); | ||
20 | |||
21 | int fifo_ready_order(struct bheap_node* a, struct bheap_node* b); | ||
22 | |||
23 | int fifo_preemption_needed(rt_domain_t* rt, struct task_struct *t); | ||
24 | |||
25 | #endif | ||
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 0b071fd359f9..a2e564b885a7 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -26,6 +26,7 @@ static inline int in_list(struct list_head* list) | |||
26 | ); | 26 | ); |
27 | } | 27 | } |
28 | 28 | ||
29 | |||
29 | struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq); | 30 | struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq); |
30 | 31 | ||
31 | #define NO_CPU 0xffffffff | 32 | #define NO_CPU 0xffffffff |
@@ -117,7 +118,9 @@ static inline lt_t litmus_clock(void) | |||
117 | #define earlier_release(a, b) (lt_before(\ | 118 | #define earlier_release(a, b) (lt_before(\ |
118 | (a)->rt_param.job_params.release,\ | 119 | (a)->rt_param.job_params.release,\ |
119 | (b)->rt_param.job_params.release)) | 120 | (b)->rt_param.job_params.release)) |
120 | 121 | #define shorter_period(a, b) (lt_before(\ | |
122 | (a)->rt_param.task_params.period,\ | ||
123 | (b)->rt_param.task_params.period)) | ||
121 | void preempt_if_preemptable(struct task_struct* t, int on_cpu); | 124 | void preempt_if_preemptable(struct task_struct* t, int on_cpu); |
122 | 125 | ||
123 | #ifdef CONFIG_LITMUS_LOCKING | 126 | #ifdef CONFIG_LITMUS_LOCKING |
diff --git a/include/litmus/litmus_softirq.h b/include/litmus/litmus_softirq.h new file mode 100644 index 000000000000..34287f3cbb8d --- /dev/null +++ b/include/litmus/litmus_softirq.h | |||
@@ -0,0 +1,199 @@ | |||
1 | #ifndef __LITMUS_SOFTIRQ_H | ||
2 | #define __LITMUS_SOFTIRQ_H | ||
3 | |||
4 | #include <linux/interrupt.h> | ||
5 | #include <linux/workqueue.h> | ||
6 | |||
7 | /* | ||
8 | Threaded tasklet handling for Litmus. Tasklets | ||
9 | are scheduled with the priority of the tasklet's | ||
10 | owner---that is, the RT task on behalf the tasklet | ||
11 | runs. | ||
12 | |||
13 | Tasklets are current scheduled in FIFO order with | ||
14 | NO priority inheritance for "blocked" tasklets. | ||
15 | |||
16 | klitirqd assumes the priority of the owner of the | ||
17 | tasklet when the tasklet is next to execute. | ||
18 | |||
19 | Currently, hi-tasklets are scheduled before | ||
20 | low-tasklets, regardless of priority of low-tasklets. | ||
21 | And likewise, low-tasklets are scheduled before work | ||
22 | queue objects. This priority inversion probably needs | ||
23 | to be fixed, though it is not an issue if our work with | ||
24 | GPUs as GPUs are owned (and associated klitirqds) for | ||
25 | exclusive time periods, thus no inversions can | ||
26 | occur. | ||
27 | */ | ||
28 | |||
29 | |||
30 | |||
31 | #define NR_LITMUS_SOFTIRQD CONFIG_NR_LITMUS_SOFTIRQD | ||
32 | |||
33 | /* Spawns NR_LITMUS_SOFTIRQD klitirqd daemons. | ||
34 | Actual launch of threads is deffered to kworker's | ||
35 | workqueue, so daemons will likely not be immediately | ||
36 | running when this function returns, though the required | ||
37 | data will be initialized. | ||
38 | |||
39 | @affinity_set: an array expressing the processor affinity | ||
40 | for each of the NR_LITMUS_SOFTIRQD daemons. May be set | ||
41 | to NULL for global scheduling. | ||
42 | |||
43 | - Examples - | ||
44 | 8-CPU system with two CPU clusters: | ||
45 | affinity[] = {0, 0, 0, 0, 3, 3, 3, 3} | ||
46 | NOTE: Daemons not actually bound to specified CPU, but rather | ||
47 | cluster in which the CPU resides. | ||
48 | |||
49 | 8-CPU system, partitioned: | ||
50 | affinity[] = {0, 1, 2, 3, 4, 5, 6, 7} | ||
51 | |||
52 | FIXME: change array to a CPU topology or array of cpumasks | ||
53 | |||
54 | */ | ||
55 | void spawn_klitirqd(int* affinity); | ||
56 | |||
57 | |||
58 | /* Raises a flag to tell klitirqds to terminate. | ||
59 | Termination is async, so some threads may be running | ||
60 | after function return. */ | ||
61 | void kill_klitirqd(void); | ||
62 | |||
63 | |||
64 | /* Returns 1 if all NR_LITMUS_SOFTIRQD klitirqs are ready | ||
65 | to handle tasklets. 0, otherwise.*/ | ||
66 | int klitirqd_is_ready(void); | ||
67 | |||
68 | /* Returns 1 if no NR_LITMUS_SOFTIRQD klitirqs are ready | ||
69 | to handle tasklets. 0, otherwise.*/ | ||
70 | int klitirqd_is_dead(void); | ||
71 | |||
72 | /* Flushes all pending work out to the OS for regular | ||
73 | * tasklet/work processing of the specified 'owner' | ||
74 | * | ||
75 | * PRECOND: klitirqd_thread must have a clear entry | ||
76 | * in the GPU registry, otherwise this call will become | ||
77 | * a no-op as work will loop back to the klitirqd_thread. | ||
78 | * | ||
79 | * Pass NULL for owner to flush ALL pending items. | ||
80 | */ | ||
81 | void flush_pending(struct task_struct* klitirqd_thread, | ||
82 | struct task_struct* owner); | ||
83 | |||
84 | struct task_struct* get_klitirqd(unsigned int k_id); | ||
85 | |||
86 | |||
87 | extern int __litmus_tasklet_schedule( | ||
88 | struct tasklet_struct *t, | ||
89 | unsigned int k_id); | ||
90 | |||
91 | /* schedule a tasklet on klitirqd #k_id */ | ||
92 | static inline int litmus_tasklet_schedule( | ||
93 | struct tasklet_struct *t, | ||
94 | unsigned int k_id) | ||
95 | { | ||
96 | int ret = 0; | ||
97 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
98 | ret = __litmus_tasklet_schedule(t, k_id); | ||
99 | return(ret); | ||
100 | } | ||
101 | |||
102 | /* for use by __tasklet_schedule() */ | ||
103 | static inline int _litmus_tasklet_schedule( | ||
104 | struct tasklet_struct *t, | ||
105 | unsigned int k_id) | ||
106 | { | ||
107 | return(__litmus_tasklet_schedule(t, k_id)); | ||
108 | } | ||
109 | |||
110 | |||
111 | |||
112 | |||
113 | extern int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, | ||
114 | unsigned int k_id); | ||
115 | |||
116 | /* schedule a hi tasklet on klitirqd #k_id */ | ||
117 | static inline int litmus_tasklet_hi_schedule(struct tasklet_struct *t, | ||
118 | unsigned int k_id) | ||
119 | { | ||
120 | int ret = 0; | ||
121 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
122 | ret = __litmus_tasklet_hi_schedule(t, k_id); | ||
123 | return(ret); | ||
124 | } | ||
125 | |||
126 | /* for use by __tasklet_hi_schedule() */ | ||
127 | static inline int _litmus_tasklet_hi_schedule(struct tasklet_struct *t, | ||
128 | unsigned int k_id) | ||
129 | { | ||
130 | return(__litmus_tasklet_hi_schedule(t, k_id)); | ||
131 | } | ||
132 | |||
133 | |||
134 | |||
135 | |||
136 | |||
137 | extern int __litmus_tasklet_hi_schedule_first( | ||
138 | struct tasklet_struct *t, | ||
139 | unsigned int k_id); | ||
140 | |||
141 | /* schedule a hi tasklet on klitirqd #k_id on next go-around */ | ||
142 | /* PRECONDITION: Interrupts must be disabled. */ | ||
143 | static inline int litmus_tasklet_hi_schedule_first( | ||
144 | struct tasklet_struct *t, | ||
145 | unsigned int k_id) | ||
146 | { | ||
147 | int ret = 0; | ||
148 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
149 | ret = __litmus_tasklet_hi_schedule_first(t, k_id); | ||
150 | return(ret); | ||
151 | } | ||
152 | |||
153 | /* for use by __tasklet_hi_schedule_first() */ | ||
154 | static inline int _litmus_tasklet_hi_schedule_first( | ||
155 | struct tasklet_struct *t, | ||
156 | unsigned int k_id) | ||
157 | { | ||
158 | return(__litmus_tasklet_hi_schedule_first(t, k_id)); | ||
159 | } | ||
160 | |||
161 | |||
162 | |||
163 | ////////////// | ||
164 | |||
165 | extern int __litmus_schedule_work( | ||
166 | struct work_struct* w, | ||
167 | unsigned int k_id); | ||
168 | |||
169 | static inline int litmus_schedule_work( | ||
170 | struct work_struct* w, | ||
171 | unsigned int k_id) | ||
172 | { | ||
173 | return(__litmus_schedule_work(w, k_id)); | ||
174 | } | ||
175 | |||
176 | |||
177 | |||
178 | ///////////// mutex operations for client threads. | ||
179 | |||
180 | void down_and_set_stat(struct task_struct* t, | ||
181 | enum klitirqd_sem_status to_set, | ||
182 | struct mutex* sem); | ||
183 | |||
184 | void __down_and_reset_and_set_stat(struct task_struct* t, | ||
185 | enum klitirqd_sem_status to_reset, | ||
186 | enum klitirqd_sem_status to_set, | ||
187 | struct mutex* sem); | ||
188 | |||
189 | void up_and_set_stat(struct task_struct* t, | ||
190 | enum klitirqd_sem_status to_set, | ||
191 | struct mutex* sem); | ||
192 | |||
193 | |||
194 | |||
195 | void release_klitirqd_lock(struct task_struct* t); | ||
196 | |||
197 | int reacquire_klitirqd_lock(struct task_struct* t); | ||
198 | |||
199 | #endif | ||
diff --git a/include/litmus/nvidia_info.h b/include/litmus/nvidia_info.h new file mode 100644 index 000000000000..9e07a27fdee3 --- /dev/null +++ b/include/litmus/nvidia_info.h | |||
@@ -0,0 +1,38 @@ | |||
1 | #ifndef __LITMUS_NVIDIA_H | ||
2 | #define __LITMUS_NVIDIA_H | ||
3 | |||
4 | #include <linux/interrupt.h> | ||
5 | |||
6 | |||
7 | #include <litmus/litmus_softirq.h> | ||
8 | |||
9 | |||
10 | //#define NV_DEVICE_NUM NR_LITMUS_SOFTIRQD | ||
11 | #define NV_DEVICE_NUM CONFIG_NV_DEVICE_NUM | ||
12 | |||
13 | int init_nvidia_info(void); | ||
14 | |||
15 | int is_nvidia_func(void* func_addr); | ||
16 | |||
17 | void dump_nvidia_info(const struct tasklet_struct *t); | ||
18 | |||
19 | |||
20 | // Returns the Nvidia device # associated with provided tasklet and work_struct. | ||
21 | u32 get_tasklet_nv_device_num(const struct tasklet_struct *t); | ||
22 | u32 get_work_nv_device_num(const struct work_struct *t); | ||
23 | |||
24 | |||
25 | int init_nv_device_reg(void); | ||
26 | //int get_nv_device_id(struct task_struct* owner); | ||
27 | |||
28 | |||
29 | int reg_nv_device(int reg_device_id, int register_device); | ||
30 | |||
31 | struct task_struct* get_nv_device_owner(u32 target_device_id); | ||
32 | |||
33 | void lock_nv_registry(u32 reg_device_id, unsigned long* flags); | ||
34 | void unlock_nv_registry(u32 reg_device_id, unsigned long* flags); | ||
35 | |||
36 | void increment_nv_int_count(u32 device); | ||
37 | |||
38 | #endif | ||
diff --git a/include/litmus/preempt.h b/include/litmus/preempt.h index 380b886d78ff..9f2a153ed236 100644 --- a/include/litmus/preempt.h +++ b/include/litmus/preempt.h | |||
@@ -26,6 +26,7 @@ const char* sched_state_name(int s); | |||
26 | (x), #x, __FUNCTION__); \ | 26 | (x), #x, __FUNCTION__); \ |
27 | } while (0); | 27 | } while (0); |
28 | 28 | ||
29 | //#define TRACE_SCHED_STATE_CHANGE(x, y, cpu) /* ignore */ | ||
29 | #define TRACE_SCHED_STATE_CHANGE(x, y, cpu) \ | 30 | #define TRACE_SCHED_STATE_CHANGE(x, y, cpu) \ |
30 | TRACE_STATE("[P%d] 0x%x (%s) -> 0x%x (%s)\n", \ | 31 | TRACE_STATE("[P%d] 0x%x (%s) -> 0x%x (%s)\n", \ |
31 | cpu, (x), sched_state_name(x), \ | 32 | cpu, (x), sched_state_name(x), \ |
diff --git a/include/litmus/rm_common.h b/include/litmus/rm_common.h new file mode 100644 index 000000000000..5991b0b4e758 --- /dev/null +++ b/include/litmus/rm_common.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * EDF common data structures and utility functions shared by all EDF | ||
3 | * based scheduler plugins | ||
4 | */ | ||
5 | |||
6 | /* CLEANUP: Add comments and make it less messy. | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #ifndef __UNC_RM_COMMON_H__ | ||
11 | #define __UNC_RM_COMMON_H__ | ||
12 | |||
13 | #include <litmus/rt_domain.h> | ||
14 | |||
15 | void rm_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | ||
16 | release_jobs_t release); | ||
17 | |||
18 | int rm_higher_prio(struct task_struct* first, | ||
19 | struct task_struct* second); | ||
20 | |||
21 | int rm_ready_order(struct bheap_node* a, struct bheap_node* b); | ||
22 | |||
23 | int rm_preemption_needed(rt_domain_t* rt, struct task_struct *t); | ||
24 | |||
25 | #endif | ||
diff --git a/include/litmus/rm_srt_common.h b/include/litmus/rm_srt_common.h new file mode 100644 index 000000000000..78aa287327a2 --- /dev/null +++ b/include/litmus/rm_srt_common.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * EDF common data structures and utility functions shared by all EDF | ||
3 | * based scheduler plugins | ||
4 | */ | ||
5 | |||
6 | /* CLEANUP: Add comments and make it less messy. | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #ifndef __UNC_RM_SRT_COMMON_H__ | ||
11 | #define __UNC_RM_SRT_COMMON_H__ | ||
12 | |||
13 | #include <litmus/rt_domain.h> | ||
14 | |||
15 | void rm_srt_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | ||
16 | release_jobs_t release); | ||
17 | |||
18 | int rm_srt_higher_prio(struct task_struct* first, | ||
19 | struct task_struct* second); | ||
20 | |||
21 | int rm_srt_ready_order(struct bheap_node* a, struct bheap_node* b); | ||
22 | |||
23 | int rm_srt_preemption_needed(rt_domain_t* rt, struct task_struct *t); | ||
24 | |||
25 | #endif | ||
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index d6d799174160..f50af3322c4b 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -75,6 +75,8 @@ struct control_page { | |||
75 | /* don't export internal data structures to user space (liblitmus) */ | 75 | /* don't export internal data structures to user space (liblitmus) */ |
76 | #ifdef __KERNEL__ | 76 | #ifdef __KERNEL__ |
77 | 77 | ||
78 | #include <linux/semaphore.h> | ||
79 | |||
78 | struct _rt_domain; | 80 | struct _rt_domain; |
79 | struct bheap_node; | 81 | struct bheap_node; |
80 | struct release_heap; | 82 | struct release_heap; |
@@ -100,6 +102,14 @@ struct rt_job { | |||
100 | 102 | ||
101 | struct pfair_param; | 103 | struct pfair_param; |
102 | 104 | ||
105 | enum klitirqd_sem_status | ||
106 | { | ||
107 | NEED_TO_REACQUIRE, | ||
108 | REACQUIRING, | ||
109 | NOT_HELD, | ||
110 | HELD | ||
111 | }; | ||
112 | |||
103 | /* RT task parameters for scheduling extensions | 113 | /* RT task parameters for scheduling extensions |
104 | * These parameters are inherited during clone and therefore must | 114 | * These parameters are inherited during clone and therefore must |
105 | * be explicitly set up before the task set is launched. | 115 | * be explicitly set up before the task set is launched. |
@@ -114,6 +124,38 @@ struct rt_param { | |||
114 | /* is the task present? (true if it can be scheduled) */ | 124 | /* is the task present? (true if it can be scheduled) */ |
115 | unsigned int present:1; | 125 | unsigned int present:1; |
116 | 126 | ||
127 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
128 | /* proxy threads have minimum priority by default */ | ||
129 | unsigned int is_proxy_thread:1; | ||
130 | |||
131 | /* pointer to klitirqd currently working on this | ||
132 | task_struct's behalf. only set by the task pointed | ||
133 | to by klitirqd. | ||
134 | |||
135 | ptr only valid if is_proxy_thread == 0 | ||
136 | */ | ||
137 | struct task_struct* cur_klitirqd; | ||
138 | |||
139 | /* Used to implement mutual execution exclusion between | ||
140 | * job and klitirqd execution. Job must always hold | ||
141 | * it's klitirqd_sem to execute. klitirqd instance | ||
142 | * must hold the semaphore before executing on behalf | ||
143 | * of a job. | ||
144 | */ | ||
145 | //struct semaphore klitirqd_sem; | ||
146 | struct mutex klitirqd_sem; | ||
147 | |||
148 | /* status of held klitirqd_sem, even if the held klitirqd_sem is from | ||
149 | another task (only proxy threads do this though). | ||
150 | */ | ||
151 | atomic_t klitirqd_sem_stat; | ||
152 | #endif | ||
153 | |||
154 | #ifdef CONFIG_LITMUS_NVIDIA | ||
155 | /* number of top-half interrupts handled on behalf of current job */ | ||
156 | atomic_t nv_int_count; | ||
157 | #endif | ||
158 | |||
117 | #ifdef CONFIG_LITMUS_LOCKING | 159 | #ifdef CONFIG_LITMUS_LOCKING |
118 | /* Is the task being priority-boosted by a locking protocol? */ | 160 | /* Is the task being priority-boosted by a locking protocol? */ |
119 | unsigned int priority_boosted:1; | 161 | unsigned int priority_boosted:1; |
@@ -134,7 +176,7 @@ struct rt_param { | |||
134 | * an increased task priority. | 176 | * an increased task priority. |
135 | */ | 177 | */ |
136 | struct task_struct* inh_task; | 178 | struct task_struct* inh_task; |
137 | 179 | ||
138 | #ifdef CONFIG_NP_SECTION | 180 | #ifdef CONFIG_NP_SECTION |
139 | /* For the FMLP under PSN-EDF, it is required to make the task | 181 | /* For the FMLP under PSN-EDF, it is required to make the task |
140 | * non-preemptive from kernel space. In order not to interfere with | 182 | * non-preemptive from kernel space. In order not to interfere with |
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h index 6e7cabdddae8..8fdf05dd7cd3 100644 --- a/include/litmus/sched_plugin.h +++ b/include/litmus/sched_plugin.h | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <litmus/locking.h> | 11 | #include <litmus/locking.h> |
12 | #endif | 12 | #endif |
13 | 13 | ||
14 | #include <linux/interrupt.h> | ||
15 | |||
14 | /************************ setup/tear down ********************/ | 16 | /************************ setup/tear down ********************/ |
15 | 17 | ||
16 | typedef long (*activate_plugin_t) (void); | 18 | typedef long (*activate_plugin_t) (void); |
@@ -29,7 +31,6 @@ typedef struct task_struct* (*schedule_t)(struct task_struct * prev); | |||
29 | */ | 31 | */ |
30 | typedef void (*finish_switch_t)(struct task_struct *prev); | 32 | typedef void (*finish_switch_t)(struct task_struct *prev); |
31 | 33 | ||
32 | |||
33 | /********************* task state changes ********************/ | 34 | /********************* task state changes ********************/ |
34 | 35 | ||
35 | /* Called to setup a new real-time task. | 36 | /* Called to setup a new real-time task. |
@@ -58,6 +59,21 @@ typedef void (*task_exit_t) (struct task_struct *); | |||
58 | typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type, | 59 | typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type, |
59 | void* __user config); | 60 | void* __user config); |
60 | 61 | ||
62 | /* Called to change inheritance levels of given task */ | ||
63 | typedef void (*set_prio_inh_t)(struct task_struct* t, | ||
64 | struct task_struct* prio_inh); | ||
65 | typedef void (*clear_prio_inh_t)(struct task_struct* t); | ||
66 | |||
67 | |||
68 | typedef void (*set_prio_inh_klitirq_t)(struct task_struct* klitirqd, | ||
69 | struct task_struct* old_owner, | ||
70 | struct task_struct* new_owner); | ||
71 | typedef void (*clear_prio_inh_klitirqd_t)(struct task_struct* klitirqd, | ||
72 | struct task_struct* old_owner); | ||
73 | |||
74 | |||
75 | typedef int (*enqueue_pai_tasklet_t)(struct tasklet_struct* tasklet); | ||
76 | typedef void (*run_tasklets_t)(struct task_struct* next); | ||
61 | 77 | ||
62 | /********************* sys call backends ********************/ | 78 | /********************* sys call backends ********************/ |
63 | /* This function causes the caller to sleep until the next release */ | 79 | /* This function causes the caller to sleep until the next release */ |
@@ -88,7 +104,7 @@ struct sched_plugin { | |||
88 | /* task state changes */ | 104 | /* task state changes */ |
89 | admit_task_t admit_task; | 105 | admit_task_t admit_task; |
90 | 106 | ||
91 | task_new_t task_new; | 107 | task_new_t task_new; |
92 | task_wake_up_t task_wake_up; | 108 | task_wake_up_t task_wake_up; |
93 | task_block_t task_block; | 109 | task_block_t task_block; |
94 | task_exit_t task_exit; | 110 | task_exit_t task_exit; |
@@ -96,6 +112,19 @@ struct sched_plugin { | |||
96 | #ifdef CONFIG_LITMUS_LOCKING | 112 | #ifdef CONFIG_LITMUS_LOCKING |
97 | /* locking protocols */ | 113 | /* locking protocols */ |
98 | allocate_lock_t allocate_lock; | 114 | allocate_lock_t allocate_lock; |
115 | |||
116 | set_prio_inh_t set_prio_inh; | ||
117 | clear_prio_inh_t clear_prio_inh; | ||
118 | #endif | ||
119 | |||
120 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
121 | set_prio_inh_klitirq_t set_prio_inh_klitirqd; | ||
122 | clear_prio_inh_klitirqd_t clear_prio_inh_klitirqd; | ||
123 | #endif | ||
124 | |||
125 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
126 | enqueue_pai_tasklet_t enqueue_pai_tasklet; | ||
127 | run_tasklets_t run_tasklets; | ||
99 | #endif | 128 | #endif |
100 | } __attribute__ ((__aligned__(SMP_CACHE_BYTES))); | 129 | } __attribute__ ((__aligned__(SMP_CACHE_BYTES))); |
101 | 130 | ||
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h index 7ca34cb13881..232c7588d103 100644 --- a/include/litmus/sched_trace.h +++ b/include/litmus/sched_trace.h | |||
@@ -11,12 +11,12 @@ struct st_trace_header { | |||
11 | u8 cpu; /* On which CPU was it recorded? */ | 11 | u8 cpu; /* On which CPU was it recorded? */ |
12 | u16 pid; /* PID of the task. */ | 12 | u16 pid; /* PID of the task. */ |
13 | u32 job; /* The job sequence number. */ | 13 | u32 job; /* The job sequence number. */ |
14 | }; | 14 | } __attribute__((packed)); |
15 | 15 | ||
16 | #define ST_NAME_LEN 16 | 16 | #define ST_NAME_LEN 16 |
17 | struct st_name_data { | 17 | struct st_name_data { |
18 | char cmd[ST_NAME_LEN];/* The name of the executable of this process. */ | 18 | char cmd[ST_NAME_LEN];/* The name of the executable of this process. */ |
19 | }; | 19 | } __attribute__((packed)); |
20 | 20 | ||
21 | struct st_param_data { /* regular params */ | 21 | struct st_param_data { /* regular params */ |
22 | u32 wcet; | 22 | u32 wcet; |
@@ -25,30 +25,29 @@ struct st_param_data { /* regular params */ | |||
25 | u8 partition; | 25 | u8 partition; |
26 | u8 class; | 26 | u8 class; |
27 | u8 __unused[2]; | 27 | u8 __unused[2]; |
28 | }; | 28 | } __attribute__((packed)); |
29 | 29 | ||
30 | struct st_release_data { /* A job is was/is going to be released. */ | 30 | struct st_release_data { /* A job is was/is going to be released. */ |
31 | u64 release; /* What's the release time? */ | 31 | u64 release; /* What's the release time? */ |
32 | u64 deadline; /* By when must it finish? */ | 32 | u64 deadline; /* By when must it finish? */ |
33 | }; | 33 | } __attribute__((packed)); |
34 | 34 | ||
35 | struct st_assigned_data { /* A job was asigned to a CPU. */ | 35 | struct st_assigned_data { /* A job was asigned to a CPU. */ |
36 | u64 when; | 36 | u64 when; |
37 | u8 target; /* Where should it execute? */ | 37 | u8 target; /* Where should it execute? */ |
38 | u8 __unused[7]; | 38 | u8 __unused[7]; |
39 | }; | 39 | } __attribute__((packed)); |
40 | 40 | ||
41 | struct st_switch_to_data { /* A process was switched to on a given CPU. */ | 41 | struct st_switch_to_data { /* A process was switched to on a given CPU. */ |
42 | u64 when; /* When did this occur? */ | 42 | u64 when; /* When did this occur? */ |
43 | u32 exec_time; /* Time the current job has executed. */ | 43 | u32 exec_time; /* Time the current job has executed. */ |
44 | u8 __unused[4]; | 44 | u8 __unused[4]; |
45 | 45 | } __attribute__((packed)); | |
46 | }; | ||
47 | 46 | ||
48 | struct st_switch_away_data { /* A process was switched away from on a given CPU. */ | 47 | struct st_switch_away_data { /* A process was switched away from on a given CPU. */ |
49 | u64 when; | 48 | u64 when; |
50 | u64 exec_time; | 49 | u64 exec_time; |
51 | }; | 50 | } __attribute__((packed)); |
52 | 51 | ||
53 | struct st_completion_data { /* A job completed. */ | 52 | struct st_completion_data { /* A job completed. */ |
54 | u64 when; | 53 | u64 when; |
@@ -56,35 +55,92 @@ struct st_completion_data { /* A job completed. */ | |||
56 | * next task automatically; set to 0 otherwise. | 55 | * next task automatically; set to 0 otherwise. |
57 | */ | 56 | */ |
58 | u8 __uflags:7; | 57 | u8 __uflags:7; |
59 | u8 __unused[7]; | 58 | u16 nv_int_count; |
60 | }; | 59 | u8 __unused[5]; |
60 | } __attribute__((packed)); | ||
61 | 61 | ||
62 | struct st_block_data { /* A task blocks. */ | 62 | struct st_block_data { /* A task blocks. */ |
63 | u64 when; | 63 | u64 when; |
64 | u64 __unused; | 64 | u64 __unused; |
65 | }; | 65 | } __attribute__((packed)); |
66 | 66 | ||
67 | struct st_resume_data { /* A task resumes. */ | 67 | struct st_resume_data { /* A task resumes. */ |
68 | u64 when; | 68 | u64 when; |
69 | u64 __unused; | 69 | u64 __unused; |
70 | }; | 70 | } __attribute__((packed)); |
71 | 71 | ||
72 | struct st_action_data { | 72 | struct st_action_data { |
73 | u64 when; | 73 | u64 when; |
74 | u8 action; | 74 | u8 action; |
75 | u8 __unused[7]; | 75 | u8 __unused[7]; |
76 | }; | 76 | } __attribute__((packed)); |
77 | 77 | ||
78 | struct st_sys_release_data { | 78 | struct st_sys_release_data { |
79 | u64 when; | 79 | u64 when; |
80 | u64 release; | 80 | u64 release; |
81 | }; | 81 | } __attribute__((packed)); |
82 | |||
83 | |||
84 | struct st_tasklet_release_data { | ||
85 | u64 when; | ||
86 | u64 __unused; | ||
87 | } __attribute__((packed)); | ||
88 | |||
89 | struct st_tasklet_begin_data { | ||
90 | u64 when; | ||
91 | u16 exe_pid; | ||
92 | u8 __unused[6]; | ||
93 | } __attribute__((packed)); | ||
94 | |||
95 | struct st_tasklet_end_data { | ||
96 | u64 when; | ||
97 | u16 exe_pid; | ||
98 | u8 flushed; | ||
99 | u8 __unused[5]; | ||
100 | } __attribute__((packed)); | ||
101 | |||
102 | |||
103 | struct st_work_release_data { | ||
104 | u64 when; | ||
105 | u64 __unused; | ||
106 | } __attribute__((packed)); | ||
107 | |||
108 | struct st_work_begin_data { | ||
109 | u64 when; | ||
110 | u16 exe_pid; | ||
111 | u8 __unused[6]; | ||
112 | } __attribute__((packed)); | ||
113 | |||
114 | struct st_work_end_data { | ||
115 | u64 when; | ||
116 | u16 exe_pid; | ||
117 | u8 flushed; | ||
118 | u8 __unused[5]; | ||
119 | } __attribute__((packed)); | ||
120 | |||
121 | struct st_effective_priority_change_data { | ||
122 | u64 when; | ||
123 | u16 inh_pid; | ||
124 | u8 __unused[6]; | ||
125 | } __attribute__((packed)); | ||
126 | |||
127 | struct st_nv_interrupt_begin_data { | ||
128 | u64 when; | ||
129 | u32 device; | ||
130 | u32 serialNumber; | ||
131 | } __attribute__((packed)); | ||
132 | |||
133 | struct st_nv_interrupt_end_data { | ||
134 | u64 when; | ||
135 | u32 device; | ||
136 | u32 serialNumber; | ||
137 | } __attribute__((packed)); | ||
82 | 138 | ||
83 | #define DATA(x) struct st_ ## x ## _data x; | 139 | #define DATA(x) struct st_ ## x ## _data x; |
84 | 140 | ||
85 | typedef enum { | 141 | typedef enum { |
86 | ST_NAME = 1, /* Start at one, so that we can spot | 142 | ST_NAME = 1, /* Start at one, so that we can spot |
87 | * uninitialized records. */ | 143 | * uninitialized records. */ |
88 | ST_PARAM, | 144 | ST_PARAM, |
89 | ST_RELEASE, | 145 | ST_RELEASE, |
90 | ST_ASSIGNED, | 146 | ST_ASSIGNED, |
@@ -94,7 +150,16 @@ typedef enum { | |||
94 | ST_BLOCK, | 150 | ST_BLOCK, |
95 | ST_RESUME, | 151 | ST_RESUME, |
96 | ST_ACTION, | 152 | ST_ACTION, |
97 | ST_SYS_RELEASE | 153 | ST_SYS_RELEASE, |
154 | ST_TASKLET_RELEASE, | ||
155 | ST_TASKLET_BEGIN, | ||
156 | ST_TASKLET_END, | ||
157 | ST_WORK_RELEASE, | ||
158 | ST_WORK_BEGIN, | ||
159 | ST_WORK_END, | ||
160 | ST_EFF_PRIO_CHANGE, | ||
161 | ST_NV_INTERRUPT_BEGIN, | ||
162 | ST_NV_INTERRUPT_END, | ||
98 | } st_event_record_type_t; | 163 | } st_event_record_type_t; |
99 | 164 | ||
100 | struct st_event_record { | 165 | struct st_event_record { |
@@ -113,8 +178,17 @@ struct st_event_record { | |||
113 | DATA(resume); | 178 | DATA(resume); |
114 | DATA(action); | 179 | DATA(action); |
115 | DATA(sys_release); | 180 | DATA(sys_release); |
181 | DATA(tasklet_release); | ||
182 | DATA(tasklet_begin); | ||
183 | DATA(tasklet_end); | ||
184 | DATA(work_release); | ||
185 | DATA(work_begin); | ||
186 | DATA(work_end); | ||
187 | DATA(effective_priority_change); | ||
188 | DATA(nv_interrupt_begin); | ||
189 | DATA(nv_interrupt_end); | ||
116 | } data; | 190 | } data; |
117 | }; | 191 | } __attribute__((packed)); |
118 | 192 | ||
119 | #undef DATA | 193 | #undef DATA |
120 | 194 | ||
@@ -129,6 +203,8 @@ struct st_event_record { | |||
129 | ft_event1(id, callback, task) | 203 | ft_event1(id, callback, task) |
130 | #define SCHED_TRACE2(id, callback, task, xtra) \ | 204 | #define SCHED_TRACE2(id, callback, task, xtra) \ |
131 | ft_event2(id, callback, task, xtra) | 205 | ft_event2(id, callback, task, xtra) |
206 | #define SCHED_TRACE3(id, callback, task, xtra1, xtra2) \ | ||
207 | ft_event3(id, callback, task, xtra1, xtra2) | ||
132 | 208 | ||
133 | /* provide prototypes; needed on sparc64 */ | 209 | /* provide prototypes; needed on sparc64 */ |
134 | #ifndef NO_TASK_TRACE_DECLS | 210 | #ifndef NO_TASK_TRACE_DECLS |
@@ -155,12 +231,45 @@ feather_callback void do_sched_trace_action(unsigned long id, | |||
155 | feather_callback void do_sched_trace_sys_release(unsigned long id, | 231 | feather_callback void do_sched_trace_sys_release(unsigned long id, |
156 | lt_t* start); | 232 | lt_t* start); |
157 | 233 | ||
234 | |||
235 | feather_callback void do_sched_trace_tasklet_release(unsigned long id, | ||
236 | struct task_struct* owner); | ||
237 | feather_callback void do_sched_trace_tasklet_begin(unsigned long id, | ||
238 | struct task_struct* owner); | ||
239 | feather_callback void do_sched_trace_tasklet_end(unsigned long id, | ||
240 | struct task_struct* owner, | ||
241 | unsigned long flushed); | ||
242 | |||
243 | feather_callback void do_sched_trace_work_release(unsigned long id, | ||
244 | struct task_struct* owner); | ||
245 | feather_callback void do_sched_trace_work_begin(unsigned long id, | ||
246 | struct task_struct* owner, | ||
247 | struct task_struct* exe); | ||
248 | feather_callback void do_sched_trace_work_end(unsigned long id, | ||
249 | struct task_struct* owner, | ||
250 | struct task_struct* exe, | ||
251 | unsigned long flushed); | ||
252 | |||
253 | feather_callback void do_sched_trace_eff_prio_change(unsigned long id, | ||
254 | struct task_struct* task, | ||
255 | struct task_struct* inh); | ||
256 | |||
257 | feather_callback void do_sched_trace_nv_interrupt_begin(unsigned long id, | ||
258 | u32 device); | ||
259 | feather_callback void do_sched_trace_nv_interrupt_end(unsigned long id, | ||
260 | unsigned long unused); | ||
261 | |||
262 | |||
263 | /* returns true if we're tracing an interrupt on current CPU */ | ||
264 | /* int is_interrupt_tracing_active(void); */ | ||
265 | |||
158 | #endif | 266 | #endif |
159 | 267 | ||
160 | #else | 268 | #else |
161 | 269 | ||
162 | #define SCHED_TRACE(id, callback, task) /* no tracing */ | 270 | #define SCHED_TRACE(id, callback, task) /* no tracing */ |
163 | #define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */ | 271 | #define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */ |
272 | #define SCHED_TRACE3(id, callback, task, xtra1, xtra2) | ||
164 | 273 | ||
165 | #endif | 274 | #endif |
166 | 275 | ||
@@ -193,6 +302,35 @@ feather_callback void do_sched_trace_sys_release(unsigned long id, | |||
193 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, do_sched_trace_sys_release, when) | 302 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, do_sched_trace_sys_release, when) |
194 | 303 | ||
195 | 304 | ||
305 | #define sched_trace_tasklet_release(t) \ | ||
306 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 11, do_sched_trace_tasklet_release, t) | ||
307 | |||
308 | #define sched_trace_tasklet_begin(t) \ | ||
309 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 12, do_sched_trace_tasklet_begin, t) | ||
310 | |||
311 | #define sched_trace_tasklet_end(t, flushed) \ | ||
312 | SCHED_TRACE2(SCHED_TRACE_BASE_ID + 13, do_sched_trace_tasklet_end, t, flushed) | ||
313 | |||
314 | |||
315 | #define sched_trace_work_release(t) \ | ||
316 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 14, do_sched_trace_work_release, t) | ||
317 | |||
318 | #define sched_trace_work_begin(t, e) \ | ||
319 | SCHED_TRACE2(SCHED_TRACE_BASE_ID + 15, do_sched_trace_work_begin, t, e) | ||
320 | |||
321 | #define sched_trace_work_end(t, e, flushed) \ | ||
322 | SCHED_TRACE3(SCHED_TRACE_BASE_ID + 16, do_sched_trace_work_end, t, e, flushed) | ||
323 | |||
324 | |||
325 | #define sched_trace_eff_prio_change(t, inh) \ | ||
326 | SCHED_TRACE2(SCHED_TRACE_BASE_ID + 17, do_sched_trace_eff_prio_change, t, inh) | ||
327 | |||
328 | |||
329 | #define sched_trace_nv_interrupt_begin(d) \ | ||
330 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 18, do_sched_trace_nv_interrupt_begin, d) | ||
331 | #define sched_trace_nv_interrupt_end(d) \ | ||
332 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 19, do_sched_trace_nv_interrupt_end, d) | ||
333 | |||
196 | #define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ | 334 | #define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ |
197 | 335 | ||
198 | #endif /* __KERNEL__ */ | 336 | #endif /* __KERNEL__ */ |
diff --git a/include/litmus/sched_trace_external.h b/include/litmus/sched_trace_external.h new file mode 100644 index 000000000000..e70e45e4cf51 --- /dev/null +++ b/include/litmus/sched_trace_external.h | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | * sched_trace.h -- record scheduler events to a byte stream for offline analysis. | ||
3 | */ | ||
4 | #ifndef _LINUX_SCHED_TRACE_EXTERNAL_H_ | ||
5 | #define _LINUX_SCHED_TRACE_EXTERNAL_H_ | ||
6 | |||
7 | |||
8 | #ifdef CONFIG_SCHED_TASK_TRACE | ||
9 | extern void __sched_trace_tasklet_begin_external(struct task_struct* t); | ||
10 | static inline void sched_trace_tasklet_begin_external(struct task_struct* t) | ||
11 | { | ||
12 | __sched_trace_tasklet_begin_external(t); | ||
13 | } | ||
14 | |||
15 | extern void __sched_trace_tasklet_end_external(struct task_struct* t, unsigned long flushed); | ||
16 | static inline void sched_trace_tasklet_end_external(struct task_struct* t, unsigned long flushed) | ||
17 | { | ||
18 | __sched_trace_tasklet_end_external(t, flushed); | ||
19 | } | ||
20 | |||
21 | extern void __sched_trace_work_begin_external(struct task_struct* t, struct task_struct* e); | ||
22 | static inline void sched_trace_work_begin_external(struct task_struct* t, struct task_struct* e) | ||
23 | { | ||
24 | __sched_trace_work_begin_external(t, e); | ||
25 | } | ||
26 | |||
27 | extern void __sched_trace_work_end_external(struct task_struct* t, struct task_struct* e, unsigned long f); | ||
28 | static inline void sched_trace_work_end_external(struct task_struct* t, struct task_struct* e, unsigned long f) | ||
29 | { | ||
30 | __sched_trace_work_end_external(t, e, f); | ||
31 | } | ||
32 | |||
33 | #ifdef CONFIG_LITMUS_NVIDIA | ||
34 | extern void __sched_trace_nv_interrupt_begin_external(u32 device); | ||
35 | static inline void sched_trace_nv_interrupt_begin_external(u32 device) | ||
36 | { | ||
37 | __sched_trace_nv_interrupt_begin_external(device); | ||
38 | } | ||
39 | |||
40 | extern void __sched_trace_nv_interrupt_end_external(u32 device); | ||
41 | static inline void sched_trace_nv_interrupt_end_external(u32 device) | ||
42 | { | ||
43 | __sched_trace_nv_interrupt_end_external(device); | ||
44 | } | ||
45 | #endif | ||
46 | |||
47 | #else | ||
48 | |||
49 | // no tracing. | ||
50 | static inline void sched_trace_tasklet_begin_external(struct task_struct* t){} | ||
51 | static inline void sched_trace_tasklet_end_external(struct task_struct* t, unsigned long flushed){} | ||
52 | static inline void sched_trace_work_begin_external(struct task_struct* t, struct task_struct* e){} | ||
53 | static inline void sched_trace_work_end_external(struct task_struct* t, struct task_struct* e, unsigned long f){} | ||
54 | |||
55 | #ifdef CONFIG_LITMUS_NVIDIA | ||
56 | static inline void sched_trace_nv_interrupt_begin_external(u32 device){} | ||
57 | static inline void sched_trace_nv_interrupt_end_external(u32 device){} | ||
58 | #endif | ||
59 | |||
60 | #endif | ||
61 | |||
62 | |||
63 | #ifdef CONFIG_LITMUS_NVIDIA | ||
64 | |||
65 | #define EX_TS(evt) \ | ||
66 | extern void __##evt(void); \ | ||
67 | static inline void EX_##evt(void) { __##evt(); } | ||
68 | |||
69 | EX_TS(TS_NV_TOPISR_START) | ||
70 | EX_TS(TS_NV_TOPISR_END) | ||
71 | EX_TS(TS_NV_BOTISR_START) | ||
72 | EX_TS(TS_NV_BOTISR_END) | ||
73 | EX_TS(TS_NV_RELEASE_BOTISR_START) | ||
74 | EX_TS(TS_NV_RELEASE_BOTISR_END) | ||
75 | |||
76 | #endif | ||
77 | |||
78 | #endif | ||
diff --git a/include/litmus/trace.h b/include/litmus/trace.h index e809376d6487..baa542d0135a 100644 --- a/include/litmus/trace.h +++ b/include/litmus/trace.h | |||
@@ -113,4 +113,24 @@ feather_callback void save_task_latency(unsigned long event, unsigned long when_ | |||
113 | 113 | ||
114 | #define TS_RELEASE_LATENCY(when) LTIMESTAMP(208, &(when)) | 114 | #define TS_RELEASE_LATENCY(when) LTIMESTAMP(208, &(when)) |
115 | 115 | ||
116 | |||
117 | #ifdef CONFIG_LITMUS_NVIDIA | ||
118 | |||
119 | #define TS_NV_TOPISR_START TIMESTAMP(200) | ||
120 | #define TS_NV_TOPISR_END TIMESTAMP(201) | ||
121 | |||
122 | #define TS_NV_BOTISR_START TIMESTAMP(202) | ||
123 | #define TS_NV_BOTISR_END TIMESTAMP(203) | ||
124 | |||
125 | #define TS_NV_RELEASE_BOTISR_START TIMESTAMP(204) | ||
126 | #define TS_NV_RELEASE_BOTISR_END TIMESTAMP(205) | ||
127 | |||
128 | #endif | ||
129 | |||
130 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
131 | #define TS_NV_SCHED_BOTISR_START TIMESTAMP(206) | ||
132 | #define TS_NV_SCHED_BOTISR_END TIMESTAMP(207) | ||
133 | #endif | ||
134 | |||
135 | |||
116 | #endif /* !_SYS_TRACE_H_ */ | 136 | #endif /* !_SYS_TRACE_H_ */ |
diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h index 94264c27d9ac..c6efc4c40af2 100644 --- a/include/litmus/unistd_32.h +++ b/include/litmus/unistd_32.h | |||
@@ -17,5 +17,6 @@ | |||
17 | #define __NR_wait_for_ts_release __LSC(9) | 17 | #define __NR_wait_for_ts_release __LSC(9) |
18 | #define __NR_release_ts __LSC(10) | 18 | #define __NR_release_ts __LSC(10) |
19 | #define __NR_null_call __LSC(11) | 19 | #define __NR_null_call __LSC(11) |
20 | #define __NR_register_nv_device __LSC(12) | ||
20 | 21 | ||
21 | #define NR_litmus_syscalls 12 | 22 | #define NR_litmus_syscalls 13 |
diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h index d5ced0d2642c..b44a7c33bdf8 100644 --- a/include/litmus/unistd_64.h +++ b/include/litmus/unistd_64.h | |||
@@ -29,5 +29,8 @@ __SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release) | |||
29 | __SYSCALL(__NR_release_ts, sys_release_ts) | 29 | __SYSCALL(__NR_release_ts, sys_release_ts) |
30 | #define __NR_null_call __LSC(11) | 30 | #define __NR_null_call __LSC(11) |
31 | __SYSCALL(__NR_null_call, sys_null_call) | 31 | __SYSCALL(__NR_null_call, sys_null_call) |
32 | #define __NR_register_nv_device __LSC(12) | ||
33 | __SYSCALL(__NR_register_nv_device, sys_register_nv_device) | ||
32 | 34 | ||
33 | #define NR_litmus_syscalls 12 | 35 | |
36 | #define NR_litmus_syscalls 13 | ||
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 298c9276dfdb..3f2f54a49001 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -542,7 +542,7 @@ static void print_lock(struct held_lock *hlock) | |||
542 | print_ip_sym(hlock->acquire_ip); | 542 | print_ip_sym(hlock->acquire_ip); |
543 | } | 543 | } |
544 | 544 | ||
545 | static void lockdep_print_held_locks(struct task_struct *curr) | 545 | void lockdep_print_held_locks(struct task_struct *curr) |
546 | { | 546 | { |
547 | int i, depth = curr->lockdep_depth; | 547 | int i, depth = curr->lockdep_depth; |
548 | 548 | ||
@@ -558,6 +558,7 @@ static void lockdep_print_held_locks(struct task_struct *curr) | |||
558 | print_lock(curr->held_locks + i); | 558 | print_lock(curr->held_locks + i); |
559 | } | 559 | } |
560 | } | 560 | } |
561 | EXPORT_SYMBOL(lockdep_print_held_locks); | ||
561 | 562 | ||
562 | static void print_kernel_version(void) | 563 | static void print_kernel_version(void) |
563 | { | 564 | { |
diff --git a/kernel/mutex.c b/kernel/mutex.c index d607ed5dd441..2f363b9bfc1f 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -498,3 +498,128 @@ int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) | |||
498 | return 1; | 498 | return 1; |
499 | } | 499 | } |
500 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); | 500 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); |
501 | |||
502 | |||
503 | |||
504 | |||
505 | void mutex_lock_sfx(struct mutex *lock, | ||
506 | side_effect_t pre, unsigned long pre_arg, | ||
507 | side_effect_t post, unsigned long post_arg) | ||
508 | { | ||
509 | long state = TASK_UNINTERRUPTIBLE; | ||
510 | |||
511 | struct task_struct *task = current; | ||
512 | struct mutex_waiter waiter; | ||
513 | unsigned long flags; | ||
514 | |||
515 | preempt_disable(); | ||
516 | mutex_acquire(&lock->dep_map, subclass, 0, ip); | ||
517 | |||
518 | spin_lock_mutex(&lock->wait_lock, flags); | ||
519 | |||
520 | if(pre) | ||
521 | { | ||
522 | if(unlikely(pre(pre_arg))) | ||
523 | { | ||
524 | // this will fuck with lockdep's CONFIG_PROVE_LOCKING... | ||
525 | spin_unlock_mutex(&lock->wait_lock, flags); | ||
526 | preempt_enable(); | ||
527 | return; | ||
528 | } | ||
529 | } | ||
530 | |||
531 | debug_mutex_lock_common(lock, &waiter); | ||
532 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); | ||
533 | |||
534 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | ||
535 | list_add_tail(&waiter.list, &lock->wait_list); | ||
536 | waiter.task = task; | ||
537 | |||
538 | if (atomic_xchg(&lock->count, -1) == 1) | ||
539 | goto done; | ||
540 | |||
541 | lock_contended(&lock->dep_map, ip); | ||
542 | |||
543 | for (;;) { | ||
544 | /* | ||
545 | * Lets try to take the lock again - this is needed even if | ||
546 | * we get here for the first time (shortly after failing to | ||
547 | * acquire the lock), to make sure that we get a wakeup once | ||
548 | * it's unlocked. Later on, if we sleep, this is the | ||
549 | * operation that gives us the lock. We xchg it to -1, so | ||
550 | * that when we release the lock, we properly wake up the | ||
551 | * other waiters: | ||
552 | */ | ||
553 | if (atomic_xchg(&lock->count, -1) == 1) | ||
554 | break; | ||
555 | |||
556 | __set_task_state(task, state); | ||
557 | |||
558 | /* didnt get the lock, go to sleep: */ | ||
559 | spin_unlock_mutex(&lock->wait_lock, flags); | ||
560 | preempt_enable_no_resched(); | ||
561 | schedule(); | ||
562 | preempt_disable(); | ||
563 | spin_lock_mutex(&lock->wait_lock, flags); | ||
564 | } | ||
565 | |||
566 | done: | ||
567 | lock_acquired(&lock->dep_map, ip); | ||
568 | /* got the lock - rejoice! */ | ||
569 | mutex_remove_waiter(lock, &waiter, current_thread_info()); | ||
570 | mutex_set_owner(lock); | ||
571 | |||
572 | /* set it to 0 if there are no waiters left: */ | ||
573 | if (likely(list_empty(&lock->wait_list))) | ||
574 | atomic_set(&lock->count, 0); | ||
575 | |||
576 | if(post) | ||
577 | post(post_arg); | ||
578 | |||
579 | spin_unlock_mutex(&lock->wait_lock, flags); | ||
580 | |||
581 | debug_mutex_free_waiter(&waiter); | ||
582 | preempt_enable(); | ||
583 | } | ||
584 | EXPORT_SYMBOL(mutex_lock_sfx); | ||
585 | |||
586 | void mutex_unlock_sfx(struct mutex *lock, | ||
587 | side_effect_t pre, unsigned long pre_arg, | ||
588 | side_effect_t post, unsigned long post_arg) | ||
589 | { | ||
590 | unsigned long flags; | ||
591 | |||
592 | spin_lock_mutex(&lock->wait_lock, flags); | ||
593 | |||
594 | if(pre) | ||
595 | pre(pre_arg); | ||
596 | |||
597 | //mutex_release(&lock->dep_map, nested, _RET_IP_); | ||
598 | mutex_release(&lock->dep_map, 1, _RET_IP_); | ||
599 | debug_mutex_unlock(lock); | ||
600 | |||
601 | /* | ||
602 | * some architectures leave the lock unlocked in the fastpath failure | ||
603 | * case, others need to leave it locked. In the later case we have to | ||
604 | * unlock it here | ||
605 | */ | ||
606 | if (__mutex_slowpath_needs_to_unlock()) | ||
607 | atomic_set(&lock->count, 1); | ||
608 | |||
609 | if (!list_empty(&lock->wait_list)) { | ||
610 | /* get the first entry from the wait-list: */ | ||
611 | struct mutex_waiter *waiter = | ||
612 | list_entry(lock->wait_list.next, | ||
613 | struct mutex_waiter, list); | ||
614 | |||
615 | debug_mutex_wake_waiter(lock, waiter); | ||
616 | |||
617 | wake_up_process(waiter->task); | ||
618 | } | ||
619 | |||
620 | if(post) | ||
621 | post(post_arg); | ||
622 | |||
623 | spin_unlock_mutex(&lock->wait_lock, flags); | ||
624 | } | ||
625 | EXPORT_SYMBOL(mutex_unlock_sfx); | ||
diff --git a/kernel/sched.c b/kernel/sched.c index baaca61bc3a3..f3d9a69a3777 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -83,6 +83,10 @@ | |||
83 | #include <litmus/sched_trace.h> | 83 | #include <litmus/sched_trace.h> |
84 | #include <litmus/trace.h> | 84 | #include <litmus/trace.h> |
85 | 85 | ||
86 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
87 | #include <litmus/litmus_softirq.h> | ||
88 | #endif | ||
89 | |||
86 | static void litmus_tick(struct rq*, struct task_struct*); | 90 | static void litmus_tick(struct rq*, struct task_struct*); |
87 | 91 | ||
88 | #define CREATE_TRACE_POINTS | 92 | #define CREATE_TRACE_POINTS |
@@ -4305,6 +4309,7 @@ pick_next_task(struct rq *rq) | |||
4305 | BUG(); /* the idle class will always have a runnable task */ | 4309 | BUG(); /* the idle class will always have a runnable task */ |
4306 | } | 4310 | } |
4307 | 4311 | ||
4312 | |||
4308 | /* | 4313 | /* |
4309 | * schedule() is the main scheduler function. | 4314 | * schedule() is the main scheduler function. |
4310 | */ | 4315 | */ |
@@ -4323,6 +4328,10 @@ need_resched: | |||
4323 | rcu_note_context_switch(cpu); | 4328 | rcu_note_context_switch(cpu); |
4324 | prev = rq->curr; | 4329 | prev = rq->curr; |
4325 | 4330 | ||
4331 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
4332 | release_klitirqd_lock(prev); | ||
4333 | #endif | ||
4334 | |||
4326 | /* LITMUS^RT: quickly re-evaluate the scheduling decision | 4335 | /* LITMUS^RT: quickly re-evaluate the scheduling decision |
4327 | * if the previous one is no longer valid after CTX. | 4336 | * if the previous one is no longer valid after CTX. |
4328 | */ | 4337 | */ |
@@ -4411,13 +4420,24 @@ litmus_need_resched_nonpreemptible: | |||
4411 | goto litmus_need_resched_nonpreemptible; | 4420 | goto litmus_need_resched_nonpreemptible; |
4412 | 4421 | ||
4413 | preempt_enable_no_resched(); | 4422 | preempt_enable_no_resched(); |
4423 | |||
4414 | if (need_resched()) | 4424 | if (need_resched()) |
4415 | goto need_resched; | 4425 | goto need_resched; |
4416 | 4426 | ||
4427 | #ifdef LITMUS_SOFTIRQD | ||
4428 | reacquire_klitirqd_lock(prev); | ||
4429 | #endif | ||
4430 | |||
4431 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
4432 | litmus->run_tasklets(prev); | ||
4433 | #endif | ||
4434 | |||
4417 | srp_ceiling_block(); | 4435 | srp_ceiling_block(); |
4418 | } | 4436 | } |
4419 | EXPORT_SYMBOL(schedule); | 4437 | EXPORT_SYMBOL(schedule); |
4420 | 4438 | ||
4439 | |||
4440 | |||
4421 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | 4441 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
4422 | 4442 | ||
4423 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) | 4443 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) |
@@ -4561,6 +4581,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | |||
4561 | } | 4581 | } |
4562 | } | 4582 | } |
4563 | 4583 | ||
4584 | |||
4564 | /** | 4585 | /** |
4565 | * __wake_up - wake up threads blocked on a waitqueue. | 4586 | * __wake_up - wake up threads blocked on a waitqueue. |
4566 | * @q: the waitqueue | 4587 | * @q: the waitqueue |
@@ -4747,6 +4768,12 @@ void __sched wait_for_completion(struct completion *x) | |||
4747 | } | 4768 | } |
4748 | EXPORT_SYMBOL(wait_for_completion); | 4769 | EXPORT_SYMBOL(wait_for_completion); |
4749 | 4770 | ||
4771 | void __sched __wait_for_completion_locked(struct completion *x) | ||
4772 | { | ||
4773 | do_wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); | ||
4774 | } | ||
4775 | EXPORT_SYMBOL(__wait_for_completion_locked); | ||
4776 | |||
4750 | /** | 4777 | /** |
4751 | * wait_for_completion_timeout: - waits for completion of a task (w/timeout) | 4778 | * wait_for_completion_timeout: - waits for completion of a task (w/timeout) |
4752 | * @x: holds the state of this particular completion | 4779 | * @x: holds the state of this particular completion |
diff --git a/kernel/semaphore.c b/kernel/semaphore.c index 94a62c0d4ade..c947a046a6d7 100644 --- a/kernel/semaphore.c +++ b/kernel/semaphore.c | |||
@@ -33,11 +33,11 @@ | |||
33 | #include <linux/spinlock.h> | 33 | #include <linux/spinlock.h> |
34 | #include <linux/ftrace.h> | 34 | #include <linux/ftrace.h> |
35 | 35 | ||
36 | static noinline void __down(struct semaphore *sem); | 36 | noinline void __down(struct semaphore *sem); |
37 | static noinline int __down_interruptible(struct semaphore *sem); | 37 | static noinline int __down_interruptible(struct semaphore *sem); |
38 | static noinline int __down_killable(struct semaphore *sem); | 38 | static noinline int __down_killable(struct semaphore *sem); |
39 | static noinline int __down_timeout(struct semaphore *sem, long jiffies); | 39 | static noinline int __down_timeout(struct semaphore *sem, long jiffies); |
40 | static noinline void __up(struct semaphore *sem); | 40 | noinline void __up(struct semaphore *sem); |
41 | 41 | ||
42 | /** | 42 | /** |
43 | * down - acquire the semaphore | 43 | * down - acquire the semaphore |
@@ -190,11 +190,13 @@ EXPORT_SYMBOL(up); | |||
190 | 190 | ||
191 | /* Functions for the contended case */ | 191 | /* Functions for the contended case */ |
192 | 192 | ||
193 | /* | ||
193 | struct semaphore_waiter { | 194 | struct semaphore_waiter { |
194 | struct list_head list; | 195 | struct list_head list; |
195 | struct task_struct *task; | 196 | struct task_struct *task; |
196 | int up; | 197 | int up; |
197 | }; | 198 | }; |
199 | */ | ||
198 | 200 | ||
199 | /* | 201 | /* |
200 | * Because this function is inlined, the 'state' parameter will be | 202 | * Because this function is inlined, the 'state' parameter will be |
@@ -233,10 +235,12 @@ static inline int __sched __down_common(struct semaphore *sem, long state, | |||
233 | return -EINTR; | 235 | return -EINTR; |
234 | } | 236 | } |
235 | 237 | ||
236 | static noinline void __sched __down(struct semaphore *sem) | 238 | noinline void __sched __down(struct semaphore *sem) |
237 | { | 239 | { |
238 | __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); | 240 | __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); |
239 | } | 241 | } |
242 | EXPORT_SYMBOL(__down); | ||
243 | |||
240 | 244 | ||
241 | static noinline int __sched __down_interruptible(struct semaphore *sem) | 245 | static noinline int __sched __down_interruptible(struct semaphore *sem) |
242 | { | 246 | { |
@@ -253,7 +257,7 @@ static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies) | |||
253 | return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies); | 257 | return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies); |
254 | } | 258 | } |
255 | 259 | ||
256 | static noinline void __sched __up(struct semaphore *sem) | 260 | noinline void __sched __up(struct semaphore *sem) |
257 | { | 261 | { |
258 | struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, | 262 | struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, |
259 | struct semaphore_waiter, list); | 263 | struct semaphore_waiter, list); |
@@ -261,3 +265,4 @@ static noinline void __sched __up(struct semaphore *sem) | |||
261 | waiter->up = 1; | 265 | waiter->up = 1; |
262 | wake_up_process(waiter->task); | 266 | wake_up_process(waiter->task); |
263 | } | 267 | } |
268 | EXPORT_SYMBOL(__up); \ No newline at end of file | ||
diff --git a/kernel/softirq.c b/kernel/softirq.c index fca82c32042b..48d6bde692a1 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -29,6 +29,15 @@ | |||
29 | #include <trace/events/irq.h> | 29 | #include <trace/events/irq.h> |
30 | 30 | ||
31 | #include <asm/irq.h> | 31 | #include <asm/irq.h> |
32 | |||
33 | #include <litmus/litmus.h> | ||
34 | #include <litmus/sched_trace.h> | ||
35 | |||
36 | #ifdef CONFIG_LITMUS_NVIDIA | ||
37 | #include <litmus/nvidia_info.h> | ||
38 | #include <litmus/trace.h> | ||
39 | #endif | ||
40 | |||
32 | /* | 41 | /* |
33 | - No shared variables, all the data are CPU local. | 42 | - No shared variables, all the data are CPU local. |
34 | - If a softirq needs serialization, let it serialize itself | 43 | - If a softirq needs serialization, let it serialize itself |
@@ -67,7 +76,7 @@ char *softirq_to_name[NR_SOFTIRQS] = { | |||
67 | * to the pending events, so lets the scheduler to balance | 76 | * to the pending events, so lets the scheduler to balance |
68 | * the softirq load for us. | 77 | * the softirq load for us. |
69 | */ | 78 | */ |
70 | static void wakeup_softirqd(void) | 79 | void wakeup_softirqd(void) |
71 | { | 80 | { |
72 | /* Interrupts are disabled: no need to stop preemption */ | 81 | /* Interrupts are disabled: no need to stop preemption */ |
73 | struct task_struct *tsk = __this_cpu_read(ksoftirqd); | 82 | struct task_struct *tsk = __this_cpu_read(ksoftirqd); |
@@ -193,6 +202,7 @@ void local_bh_enable_ip(unsigned long ip) | |||
193 | } | 202 | } |
194 | EXPORT_SYMBOL(local_bh_enable_ip); | 203 | EXPORT_SYMBOL(local_bh_enable_ip); |
195 | 204 | ||
205 | |||
196 | /* | 206 | /* |
197 | * We restart softirq processing MAX_SOFTIRQ_RESTART times, | 207 | * We restart softirq processing MAX_SOFTIRQ_RESTART times, |
198 | * and we fall back to softirqd after that. | 208 | * and we fall back to softirqd after that. |
@@ -204,14 +214,15 @@ EXPORT_SYMBOL(local_bh_enable_ip); | |||
204 | */ | 214 | */ |
205 | #define MAX_SOFTIRQ_RESTART 10 | 215 | #define MAX_SOFTIRQ_RESTART 10 |
206 | 216 | ||
207 | asmlinkage void __do_softirq(void) | 217 | static void ____do_softirq(void) |
208 | { | 218 | { |
209 | struct softirq_action *h; | ||
210 | __u32 pending; | 219 | __u32 pending; |
211 | int max_restart = MAX_SOFTIRQ_RESTART; | 220 | |
221 | struct softirq_action *h; | ||
212 | int cpu; | 222 | int cpu; |
213 | 223 | ||
214 | pending = local_softirq_pending(); | 224 | pending = local_softirq_pending(); |
225 | |||
215 | account_system_vtime(current); | 226 | account_system_vtime(current); |
216 | 227 | ||
217 | __local_bh_disable((unsigned long)__builtin_return_address(0), | 228 | __local_bh_disable((unsigned long)__builtin_return_address(0), |
@@ -219,14 +230,13 @@ asmlinkage void __do_softirq(void) | |||
219 | lockdep_softirq_enter(); | 230 | lockdep_softirq_enter(); |
220 | 231 | ||
221 | cpu = smp_processor_id(); | 232 | cpu = smp_processor_id(); |
222 | restart: | ||
223 | /* Reset the pending bitmask before enabling irqs */ | ||
224 | set_softirq_pending(0); | ||
225 | 233 | ||
234 | set_softirq_pending(0); | ||
235 | |||
226 | local_irq_enable(); | 236 | local_irq_enable(); |
227 | 237 | ||
228 | h = softirq_vec; | 238 | h = softirq_vec; |
229 | 239 | ||
230 | do { | 240 | do { |
231 | if (pending & 1) { | 241 | if (pending & 1) { |
232 | unsigned int vec_nr = h - softirq_vec; | 242 | unsigned int vec_nr = h - softirq_vec; |
@@ -245,14 +255,23 @@ restart: | |||
245 | prev_count, preempt_count()); | 255 | prev_count, preempt_count()); |
246 | preempt_count() = prev_count; | 256 | preempt_count() = prev_count; |
247 | } | 257 | } |
248 | 258 | ||
249 | rcu_bh_qs(cpu); | 259 | rcu_bh_qs(cpu); |
250 | } | 260 | } |
251 | h++; | 261 | h++; |
252 | pending >>= 1; | 262 | pending >>= 1; |
253 | } while (pending); | 263 | } while (pending); |
254 | 264 | ||
255 | local_irq_disable(); | 265 | local_irq_disable(); |
266 | } | ||
267 | |||
268 | static void ___do_softirq(void) | ||
269 | { | ||
270 | int max_restart = MAX_SOFTIRQ_RESTART; | ||
271 | __u32 pending; | ||
272 | |||
273 | restart: | ||
274 | ____do_softirq(); | ||
256 | 275 | ||
257 | pending = local_softirq_pending(); | 276 | pending = local_softirq_pending(); |
258 | if (pending && --max_restart) | 277 | if (pending && --max_restart) |
@@ -260,9 +279,38 @@ restart: | |||
260 | 279 | ||
261 | if (pending) | 280 | if (pending) |
262 | wakeup_softirqd(); | 281 | wakeup_softirqd(); |
282 | } | ||
263 | 283 | ||
284 | asmlinkage void __do_softirq(void) | ||
285 | { | ||
286 | #ifdef LITMUS_THREAD_ALL_SOFTIRQ | ||
287 | /* Skip straight to wakeup_softirqd() if we're using | ||
288 | LITMUS_THREAD_ALL_SOFTIRQ (unless there's really high prio-stuff waiting.). */ | ||
289 | struct task_struct *tsk = __get_cpu_var(ksoftirqd); | ||
290 | |||
291 | if(tsk) | ||
292 | { | ||
293 | __u32 pending = local_softirq_pending(); | ||
294 | const __u32 high_prio_softirq = (1<<HI_SOFTIRQ) | (1<<TIMER_SOFTIRQ) | (1<<HRTIMER_SOFTIRQ); | ||
295 | if(pending && !(pending & high_prio_softirq)) | ||
296 | { | ||
297 | wakeup_softirqd(); | ||
298 | return; | ||
299 | } | ||
300 | } | ||
301 | #endif | ||
302 | |||
303 | /* | ||
304 | * 'immediate' softirq execution: | ||
305 | */ | ||
306 | __local_bh_disable((unsigned long)__builtin_return_address(0), | ||
307 | SOFTIRQ_OFFSET); | ||
308 | lockdep_softirq_enter(); | ||
309 | |||
310 | ___do_softirq(); | ||
311 | |||
264 | lockdep_softirq_exit(); | 312 | lockdep_softirq_exit(); |
265 | 313 | ||
266 | account_system_vtime(current); | 314 | account_system_vtime(current); |
267 | __local_bh_enable(SOFTIRQ_OFFSET); | 315 | __local_bh_enable(SOFTIRQ_OFFSET); |
268 | } | 316 | } |
@@ -402,8 +450,65 @@ struct tasklet_head | |||
402 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); | 450 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); |
403 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); | 451 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); |
404 | 452 | ||
453 | |||
405 | void __tasklet_schedule(struct tasklet_struct *t) | 454 | void __tasklet_schedule(struct tasklet_struct *t) |
406 | { | 455 | { |
456 | #ifdef CONFIG_LITMUS_NVIDIA | ||
457 | if(is_nvidia_func(t->func)) | ||
458 | { | ||
459 | u32 nvidia_device = get_tasklet_nv_device_num(t); | ||
460 | // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n", | ||
461 | // __FUNCTION__, nvidia_device,litmus_clock()); | ||
462 | |||
463 | unsigned long flags; | ||
464 | struct task_struct* device_owner; | ||
465 | |||
466 | lock_nv_registry(nvidia_device, &flags); | ||
467 | |||
468 | device_owner = get_nv_device_owner(nvidia_device); | ||
469 | |||
470 | if(device_owner==NULL) | ||
471 | { | ||
472 | t->owner = NULL; | ||
473 | } | ||
474 | else | ||
475 | { | ||
476 | if(is_realtime(device_owner)) | ||
477 | { | ||
478 | TRACE("%s: Handling NVIDIA tasklet for device %u at %llu\n", | ||
479 | __FUNCTION__, nvidia_device,litmus_clock()); | ||
480 | TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n", | ||
481 | __FUNCTION__,device_owner->pid,nvidia_device); | ||
482 | |||
483 | t->owner = device_owner; | ||
484 | sched_trace_tasklet_release(t->owner); | ||
485 | |||
486 | if(likely(_litmus_tasklet_schedule(t,nvidia_device))) | ||
487 | { | ||
488 | unlock_nv_registry(nvidia_device, &flags); | ||
489 | return; | ||
490 | } | ||
491 | else | ||
492 | { | ||
493 | t->owner = NULL; /* fall through to normal scheduling */ | ||
494 | } | ||
495 | } | ||
496 | else | ||
497 | { | ||
498 | t->owner = NULL; | ||
499 | } | ||
500 | } | ||
501 | unlock_nv_registry(nvidia_device, &flags); | ||
502 | } | ||
503 | #endif | ||
504 | |||
505 | ___tasklet_schedule(t); | ||
506 | } | ||
507 | EXPORT_SYMBOL(__tasklet_schedule); | ||
508 | |||
509 | |||
510 | void ___tasklet_schedule(struct tasklet_struct *t) | ||
511 | { | ||
407 | unsigned long flags; | 512 | unsigned long flags; |
408 | 513 | ||
409 | local_irq_save(flags); | 514 | local_irq_save(flags); |
@@ -413,11 +518,65 @@ void __tasklet_schedule(struct tasklet_struct *t) | |||
413 | raise_softirq_irqoff(TASKLET_SOFTIRQ); | 518 | raise_softirq_irqoff(TASKLET_SOFTIRQ); |
414 | local_irq_restore(flags); | 519 | local_irq_restore(flags); |
415 | } | 520 | } |
521 | EXPORT_SYMBOL(___tasklet_schedule); | ||
416 | 522 | ||
417 | EXPORT_SYMBOL(__tasklet_schedule); | ||
418 | 523 | ||
419 | void __tasklet_hi_schedule(struct tasklet_struct *t) | 524 | void __tasklet_hi_schedule(struct tasklet_struct *t) |
420 | { | 525 | { |
526 | #ifdef CONFIG_LITMUS_NVIDIA | ||
527 | if(is_nvidia_func(t->func)) | ||
528 | { | ||
529 | u32 nvidia_device = get_tasklet_nv_device_num(t); | ||
530 | // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n", | ||
531 | // __FUNCTION__, nvidia_device,litmus_clock()); | ||
532 | |||
533 | unsigned long flags; | ||
534 | struct task_struct* device_owner; | ||
535 | |||
536 | lock_nv_registry(nvidia_device, &flags); | ||
537 | |||
538 | device_owner = get_nv_device_owner(nvidia_device); | ||
539 | |||
540 | if(device_owner==NULL) | ||
541 | { | ||
542 | t->owner = NULL; | ||
543 | } | ||
544 | else | ||
545 | { | ||
546 | if( is_realtime(device_owner)) | ||
547 | { | ||
548 | TRACE("%s: Handling NVIDIA tasklet for device %u\tat %llu\n", | ||
549 | __FUNCTION__, nvidia_device,litmus_clock()); | ||
550 | TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n", | ||
551 | __FUNCTION__,device_owner->pid,nvidia_device); | ||
552 | |||
553 | t->owner = device_owner; | ||
554 | sched_trace_tasklet_release(t->owner); | ||
555 | if(likely(_litmus_tasklet_hi_schedule(t,nvidia_device))) | ||
556 | { | ||
557 | unlock_nv_registry(nvidia_device, &flags); | ||
558 | return; | ||
559 | } | ||
560 | else | ||
561 | { | ||
562 | t->owner = NULL; /* fall through to normal scheduling */ | ||
563 | } | ||
564 | } | ||
565 | else | ||
566 | { | ||
567 | t->owner = NULL; | ||
568 | } | ||
569 | } | ||
570 | unlock_nv_registry(nvidia_device, &flags); | ||
571 | } | ||
572 | #endif | ||
573 | |||
574 | ___tasklet_hi_schedule(t); | ||
575 | } | ||
576 | EXPORT_SYMBOL(__tasklet_hi_schedule); | ||
577 | |||
578 | void ___tasklet_hi_schedule(struct tasklet_struct* t) | ||
579 | { | ||
421 | unsigned long flags; | 580 | unsigned long flags; |
422 | 581 | ||
423 | local_irq_save(flags); | 582 | local_irq_save(flags); |
@@ -427,19 +586,72 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) | |||
427 | raise_softirq_irqoff(HI_SOFTIRQ); | 586 | raise_softirq_irqoff(HI_SOFTIRQ); |
428 | local_irq_restore(flags); | 587 | local_irq_restore(flags); |
429 | } | 588 | } |
430 | 589 | EXPORT_SYMBOL(___tasklet_hi_schedule); | |
431 | EXPORT_SYMBOL(__tasklet_hi_schedule); | ||
432 | 590 | ||
433 | void __tasklet_hi_schedule_first(struct tasklet_struct *t) | 591 | void __tasklet_hi_schedule_first(struct tasklet_struct *t) |
434 | { | 592 | { |
435 | BUG_ON(!irqs_disabled()); | 593 | BUG_ON(!irqs_disabled()); |
594 | #ifdef CONFIG_LITMUS_NVIDIA | ||
595 | if(is_nvidia_func(t->func)) | ||
596 | { | ||
597 | u32 nvidia_device = get_tasklet_nv_device_num(t); | ||
598 | // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n", | ||
599 | // __FUNCTION__, nvidia_device,litmus_clock()); | ||
600 | unsigned long flags; | ||
601 | struct task_struct* device_owner; | ||
602 | |||
603 | lock_nv_registry(nvidia_device, &flags); | ||
604 | |||
605 | device_owner = get_nv_device_owner(nvidia_device); | ||
606 | |||
607 | if(device_owner==NULL) | ||
608 | { | ||
609 | t->owner = NULL; | ||
610 | } | ||
611 | else | ||
612 | { | ||
613 | if(is_realtime(device_owner)) | ||
614 | { | ||
615 | TRACE("%s: Handling NVIDIA tasklet for device %u at %llu\n", | ||
616 | __FUNCTION__, nvidia_device,litmus_clock()); | ||
617 | |||
618 | TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n", | ||
619 | __FUNCTION__,device_owner->pid,nvidia_device); | ||
620 | |||
621 | t->owner = device_owner; | ||
622 | sched_trace_tasklet_release(t->owner); | ||
623 | if(likely(_litmus_tasklet_hi_schedule_first(t,nvidia_device))) | ||
624 | { | ||
625 | unlock_nv_registry(nvidia_device, &flags); | ||
626 | return; | ||
627 | } | ||
628 | else | ||
629 | { | ||
630 | t->owner = NULL; /* fall through to normal scheduling */ | ||
631 | } | ||
632 | } | ||
633 | else | ||
634 | { | ||
635 | t->owner = NULL; | ||
636 | } | ||
637 | } | ||
638 | unlock_nv_registry(nvidia_device, &flags); | ||
639 | } | ||
640 | #endif | ||
641 | |||
642 | ___tasklet_hi_schedule_first(t); | ||
643 | } | ||
644 | EXPORT_SYMBOL(__tasklet_hi_schedule_first); | ||
645 | |||
646 | void ___tasklet_hi_schedule_first(struct tasklet_struct* t) | ||
647 | { | ||
648 | BUG_ON(!irqs_disabled()); | ||
436 | 649 | ||
437 | t->next = __this_cpu_read(tasklet_hi_vec.head); | 650 | t->next = __this_cpu_read(tasklet_hi_vec.head); |
438 | __this_cpu_write(tasklet_hi_vec.head, t); | 651 | __this_cpu_write(tasklet_hi_vec.head, t); |
439 | __raise_softirq_irqoff(HI_SOFTIRQ); | 652 | __raise_softirq_irqoff(HI_SOFTIRQ); |
440 | } | 653 | } |
441 | 654 | EXPORT_SYMBOL(___tasklet_hi_schedule_first); | |
442 | EXPORT_SYMBOL(__tasklet_hi_schedule_first); | ||
443 | 655 | ||
444 | static void tasklet_action(struct softirq_action *a) | 656 | static void tasklet_action(struct softirq_action *a) |
445 | { | 657 | { |
@@ -495,6 +707,7 @@ static void tasklet_hi_action(struct softirq_action *a) | |||
495 | if (!atomic_read(&t->count)) { | 707 | if (!atomic_read(&t->count)) { |
496 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) | 708 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) |
497 | BUG(); | 709 | BUG(); |
710 | |||
498 | t->func(t->data); | 711 | t->func(t->data); |
499 | tasklet_unlock(t); | 712 | tasklet_unlock(t); |
500 | continue; | 713 | continue; |
@@ -518,8 +731,13 @@ void tasklet_init(struct tasklet_struct *t, | |||
518 | t->next = NULL; | 731 | t->next = NULL; |
519 | t->state = 0; | 732 | t->state = 0; |
520 | atomic_set(&t->count, 0); | 733 | atomic_set(&t->count, 0); |
734 | |||
521 | t->func = func; | 735 | t->func = func; |
522 | t->data = data; | 736 | t->data = data; |
737 | |||
738 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
739 | t->owner = NULL; | ||
740 | #endif | ||
523 | } | 741 | } |
524 | 742 | ||
525 | EXPORT_SYMBOL(tasklet_init); | 743 | EXPORT_SYMBOL(tasklet_init); |
@@ -534,6 +752,7 @@ void tasklet_kill(struct tasklet_struct *t) | |||
534 | yield(); | 752 | yield(); |
535 | } while (test_bit(TASKLET_STATE_SCHED, &t->state)); | 753 | } while (test_bit(TASKLET_STATE_SCHED, &t->state)); |
536 | } | 754 | } |
755 | |||
537 | tasklet_unlock_wait(t); | 756 | tasklet_unlock_wait(t); |
538 | clear_bit(TASKLET_STATE_SCHED, &t->state); | 757 | clear_bit(TASKLET_STATE_SCHED, &t->state); |
539 | } | 758 | } |
@@ -808,6 +1027,7 @@ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu) | |||
808 | for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { | 1027 | for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { |
809 | if (*i == t) { | 1028 | if (*i == t) { |
810 | *i = t->next; | 1029 | *i = t->next; |
1030 | |||
811 | /* If this was the tail element, move the tail ptr */ | 1031 | /* If this was the tail element, move the tail ptr */ |
812 | if (*i == NULL) | 1032 | if (*i == NULL) |
813 | per_cpu(tasklet_vec, cpu).tail = i; | 1033 | per_cpu(tasklet_vec, cpu).tail = i; |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 0400553f0d04..2ceb7b43a045 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -44,6 +44,13 @@ | |||
44 | 44 | ||
45 | #include "workqueue_sched.h" | 45 | #include "workqueue_sched.h" |
46 | 46 | ||
47 | #ifdef CONFIG_LITMUS_NVIDIA | ||
48 | #include <litmus/litmus.h> | ||
49 | #include <litmus/sched_trace.h> | ||
50 | #include <litmus/nvidia_info.h> | ||
51 | #endif | ||
52 | |||
53 | |||
47 | enum { | 54 | enum { |
48 | /* global_cwq flags */ | 55 | /* global_cwq flags */ |
49 | GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ | 56 | GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ |
@@ -1047,9 +1054,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
1047 | work_flags |= WORK_STRUCT_DELAYED; | 1054 | work_flags |= WORK_STRUCT_DELAYED; |
1048 | worklist = &cwq->delayed_works; | 1055 | worklist = &cwq->delayed_works; |
1049 | } | 1056 | } |
1050 | |||
1051 | insert_work(cwq, work, worklist, work_flags); | 1057 | insert_work(cwq, work, worklist, work_flags); |
1052 | |||
1053 | spin_unlock_irqrestore(&gcwq->lock, flags); | 1058 | spin_unlock_irqrestore(&gcwq->lock, flags); |
1054 | } | 1059 | } |
1055 | 1060 | ||
@@ -2687,10 +2692,70 @@ EXPORT_SYMBOL(cancel_delayed_work_sync); | |||
2687 | */ | 2692 | */ |
2688 | int schedule_work(struct work_struct *work) | 2693 | int schedule_work(struct work_struct *work) |
2689 | { | 2694 | { |
2690 | return queue_work(system_wq, work); | 2695 | #if 0 |
2696 | #if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD) | ||
2697 | if(is_nvidia_func(work->func)) | ||
2698 | { | ||
2699 | u32 nvidiaDevice = get_work_nv_device_num(work); | ||
2700 | |||
2701 | //1) Ask Litmus which task owns GPU <nvidiaDevice>. (API to be defined.) | ||
2702 | unsigned long flags; | ||
2703 | struct task_struct* device_owner; | ||
2704 | |||
2705 | lock_nv_registry(nvidiaDevice, &flags); | ||
2706 | |||
2707 | device_owner = get_nv_device_owner(nvidiaDevice); | ||
2708 | |||
2709 | //2) If there is an owner, set work->owner to the owner's task struct. | ||
2710 | if(device_owner==NULL) | ||
2711 | { | ||
2712 | work->owner = NULL; | ||
2713 | //TRACE("%s: the owner task of NVIDIA Device %u is NULL\n",__FUNCTION__,nvidiaDevice); | ||
2714 | } | ||
2715 | else | ||
2716 | { | ||
2717 | if( is_realtime(device_owner)) | ||
2718 | { | ||
2719 | TRACE("%s: Handling NVIDIA work for device\t%u\tat\t%llu\n", | ||
2720 | __FUNCTION__, nvidiaDevice,litmus_clock()); | ||
2721 | TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n", | ||
2722 | __FUNCTION__, | ||
2723 | device_owner->pid, | ||
2724 | nvidiaDevice); | ||
2725 | |||
2726 | //3) Call litmus_schedule_work() and return (don't execute the rest | ||
2727 | // of schedule_schedule()). | ||
2728 | work->owner = device_owner; | ||
2729 | sched_trace_work_release(work->owner); | ||
2730 | if(likely(litmus_schedule_work(work, nvidiaDevice))) | ||
2731 | { | ||
2732 | unlock_nv_registry(nvidiaDevice, &flags); | ||
2733 | return 1; | ||
2734 | } | ||
2735 | else | ||
2736 | { | ||
2737 | work->owner = NULL; /* fall through to normal work scheduling */ | ||
2738 | } | ||
2739 | } | ||
2740 | else | ||
2741 | { | ||
2742 | work->owner = NULL; | ||
2743 | } | ||
2744 | } | ||
2745 | unlock_nv_registry(nvidiaDevice, &flags); | ||
2746 | } | ||
2747 | #endif | ||
2748 | #endif | ||
2749 | return(__schedule_work(work)); | ||
2691 | } | 2750 | } |
2692 | EXPORT_SYMBOL(schedule_work); | 2751 | EXPORT_SYMBOL(schedule_work); |
2693 | 2752 | ||
2753 | int __schedule_work(struct work_struct* work) | ||
2754 | { | ||
2755 | return queue_work(system_wq, work); | ||
2756 | } | ||
2757 | EXPORT_SYMBOL(__schedule_work); | ||
2758 | |||
2694 | /* | 2759 | /* |
2695 | * schedule_work_on - put work task on a specific cpu | 2760 | * schedule_work_on - put work task on a specific cpu |
2696 | * @cpu: cpu to put the work task on | 2761 | * @cpu: cpu to put the work task on |
diff --git a/litmus/Kconfig b/litmus/Kconfig index 94b48e199577..158261e0ed08 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig | |||
@@ -215,4 +215,106 @@ config PREEMPT_STATE_TRACE | |||
215 | 215 | ||
216 | endmenu | 216 | endmenu |
217 | 217 | ||
218 | menu "Interrupt Handling" | ||
219 | |||
220 | config LITMUS_THREAD_ALL_SOFTIRQ | ||
221 | bool "Process all softirqs in ksoftirqd threads." | ||
222 | default n | ||
223 | help | ||
224 | (Experimental) Thread all softirqs to ksoftirqd | ||
225 | daemon threads, similar to PREEMPT_RT. I/O | ||
226 | throughput will will drop with this enabled, but | ||
227 | latencies due to interrupts will be reduced. | ||
228 | |||
229 | WARNING: Timer responsiveness will likely be | ||
230 | decreased as timer callbacks are also threaded. | ||
231 | This is unlike PREEEMPT_RTs hardirqs. | ||
232 | |||
233 | If unsure, say No. | ||
234 | |||
235 | |||
236 | choice | ||
237 | prompt "Scheduling of interrupt bottom-halves in Litmus." | ||
238 | default LITMUS_SOFTIRQD_NONE | ||
239 | depends on LITMUS_LOCKING && !LITMUS_THREAD_ALL_SOFTIRQ | ||
240 | help | ||
241 | Schedule tasklets with known priorities in Litmus. | ||
242 | |||
243 | config LITMUS_SOFTIRQD_NONE | ||
244 | bool "No tasklet scheduling in Litmus." | ||
245 | help | ||
246 | Don't schedule tasklets in Litmus. Default. | ||
247 | |||
248 | config LITMUS_SOFTIRQD | ||
249 | bool "Spawn klitirqd interrupt handling threads." | ||
250 | help | ||
251 | Create klitirqd interrupt handling threads. Work must be | ||
252 | specifically dispatched to these workers. (Softirqs for | ||
253 | Litmus tasks are not magically redirected to klitirqd.) | ||
254 | |||
255 | G-EDF/RM, C-EDF/RM ONLY for now! | ||
256 | |||
257 | |||
258 | config LITMUS_PAI_SOFTIRQD | ||
259 | bool "Defer tasklets to context switch points." | ||
260 | help | ||
261 | Only execute scheduled tasklet bottom halves at | ||
262 | scheduling points. Trades context switch overhead | ||
263 | at the cost of non-preemptive durations of bottom half | ||
264 | processing. | ||
265 | |||
266 | G-EDF/RM, C-EDF/RM ONLY for now! | ||
267 | |||
268 | endchoice | ||
269 | |||
270 | |||
271 | config NR_LITMUS_SOFTIRQD | ||
272 | int "Number of klitirqd." | ||
273 | depends on LITMUS_SOFTIRQD | ||
274 | range 1 4096 | ||
275 | default "1" | ||
276 | help | ||
277 | Should be <= to the number of CPUs in your system. | ||
278 | |||
279 | config LITMUS_NVIDIA | ||
280 | bool "Litmus handling of NVIDIA interrupts." | ||
281 | depends on LITMUS_SOFTIRQD || LITMUS_PAI_SOFTIRQD | ||
282 | default n | ||
283 | help | ||
284 | Direct tasklets from NVIDIA devices to Litmus's klitirqd. | ||
285 | |||
286 | If unsure, say No. | ||
287 | |||
288 | config NV_DEVICE_NUM | ||
289 | int "Number of NVIDIA GPUs." | ||
290 | depends on LITMUS_SOFTIRQD || LITMUS_PAI_SOFTIRQD | ||
291 | range 1 4096 | ||
292 | default "1" | ||
293 | help | ||
294 | Should be (<= to the number of CPUs) and | ||
295 | (<= to the number of GPUs) in your system. | ||
296 | |||
297 | choice | ||
298 | prompt "CUDA/Driver Version Support" | ||
299 | default CUDA_4_0 | ||
300 | depends on LITMUS_NVIDIA | ||
301 | help | ||
302 | Select the version of CUDA/driver to support. | ||
303 | |||
304 | config CUDA_4_0 | ||
305 | bool "CUDA 4.0" | ||
306 | depends on LITMUS_NVIDIA | ||
307 | help | ||
308 | Support CUDA 4.0 RC2 (dev. driver version: x86_64-270.40) | ||
309 | |||
310 | config CUDA_3_2 | ||
311 | bool "CUDA 3.2" | ||
312 | depends on LITMUS_NVIDIA | ||
313 | help | ||
314 | Support CUDA 3.2 (dev. driver version: x86_64-260.24) | ||
315 | |||
316 | endchoice | ||
317 | |||
318 | endmenu | ||
319 | |||
218 | endmenu | 320 | endmenu |
diff --git a/litmus/Makefile b/litmus/Makefile index 7338180f196f..3acb335f3197 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -27,3 +27,7 @@ obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o | |||
27 | obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o | 27 | obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o |
28 | obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o | 28 | obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o |
29 | obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o | 29 | obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o |
30 | |||
31 | obj-$(CONFIG_LITMUS_SOFTIRQD) += litmus_softirq.o | ||
32 | obj-$(CONFIG_LITMUS_PAI_SOFTIRQD) += litmus_pai_softirq.o | ||
33 | obj-$(CONFIG_LITMUS_NVIDIA) += nvidia_info.o sched_trace_external.o | ||
diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 9b44dc2d8d1e..0a06d7a26c00 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c | |||
@@ -63,8 +63,52 @@ int edf_higher_prio(struct task_struct* first, | |||
63 | 63 | ||
64 | #endif | 64 | #endif |
65 | 65 | ||
66 | if (!is_realtime(second_task)) | ||
67 | return true; | ||
68 | |||
69 | if (earlier_deadline(first_task, second_task)) | ||
70 | return true; | ||
71 | |||
72 | if (get_deadline(first_task) == get_deadline(second_task)) | ||
73 | { | ||
74 | if (shorter_period(first_task, second_task)) | ||
75 | { | ||
76 | return true; | ||
77 | } | ||
78 | if (get_rt_period(first_task) == get_rt_period(second_task)) | ||
79 | { | ||
80 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
81 | if (first_task->rt_param.is_proxy_thread < second_task->rt_param.is_proxy_thread) | ||
82 | { | ||
83 | return true; | ||
84 | } | ||
85 | if (first_task->rt_param.is_proxy_thread == second_task->rt_param.is_proxy_thread) | ||
86 | { | ||
87 | #endif | ||
88 | if (first_task->pid < second_task->pid) | ||
89 | { | ||
90 | return true; | ||
91 | } | ||
92 | if (first_task->pid == second_task->pid) | ||
93 | { | ||
94 | return !second->rt_param.inh_task; | ||
95 | } | ||
96 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
97 | } | ||
98 | #endif | ||
99 | } | ||
100 | } | ||
101 | |||
102 | return false; | ||
66 | 103 | ||
104 | #if 0 | ||
67 | return !is_realtime(second_task) || | 105 | return !is_realtime(second_task) || |
106 | |||
107 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
108 | /* proxy threads always lose w/o inheritance. */ | ||
109 | (first_task->rt_param.is_proxy_thread < | ||
110 | second_task->rt_param.is_proxy_thread) || | ||
111 | #endif | ||
68 | 112 | ||
69 | /* is the deadline of the first task earlier? | 113 | /* is the deadline of the first task earlier? |
70 | * Then it has higher priority. | 114 | * Then it has higher priority. |
@@ -82,6 +126,7 @@ int edf_higher_prio(struct task_struct* first, | |||
82 | */ | 126 | */ |
83 | (first_task->pid == second_task->pid && | 127 | (first_task->pid == second_task->pid && |
84 | !second->rt_param.inh_task))); | 128 | !second->rt_param.inh_task))); |
129 | #endif | ||
85 | } | 130 | } |
86 | 131 | ||
87 | int edf_ready_order(struct bheap_node* a, struct bheap_node* b) | 132 | int edf_ready_order(struct bheap_node* a, struct bheap_node* b) |
diff --git a/litmus/fdso.c b/litmus/fdso.c index aa7b384264e3..2b7f9ba85857 100644 --- a/litmus/fdso.c +++ b/litmus/fdso.c | |||
@@ -22,6 +22,7 @@ extern struct fdso_ops generic_lock_ops; | |||
22 | 22 | ||
23 | static const struct fdso_ops* fdso_ops[] = { | 23 | static const struct fdso_ops* fdso_ops[] = { |
24 | &generic_lock_ops, /* FMLP_SEM */ | 24 | &generic_lock_ops, /* FMLP_SEM */ |
25 | &generic_lock_ops, /* KFMLP_SEM */ | ||
25 | &generic_lock_ops, /* SRP_SEM */ | 26 | &generic_lock_ops, /* SRP_SEM */ |
26 | }; | 27 | }; |
27 | 28 | ||
diff --git a/litmus/litmus.c b/litmus/litmus.c index 301390148d02..ea3ffade6490 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -21,6 +21,10 @@ | |||
21 | #include <litmus/affinity.h> | 21 | #include <litmus/affinity.h> |
22 | #endif | 22 | #endif |
23 | 23 | ||
24 | #ifdef CONFIG_LITMUS_NVIDIA | ||
25 | #include <litmus/nvidia_info.h> | ||
26 | #endif | ||
27 | |||
24 | /* Number of RT tasks that exist in the system */ | 28 | /* Number of RT tasks that exist in the system */ |
25 | atomic_t rt_task_count = ATOMIC_INIT(0); | 29 | atomic_t rt_task_count = ATOMIC_INIT(0); |
26 | static DEFINE_RAW_SPINLOCK(task_transition_lock); | 30 | static DEFINE_RAW_SPINLOCK(task_transition_lock); |
@@ -51,6 +55,28 @@ void bheap_node_free(struct bheap_node* hn) | |||
51 | struct release_heap* release_heap_alloc(int gfp_flags); | 55 | struct release_heap* release_heap_alloc(int gfp_flags); |
52 | void release_heap_free(struct release_heap* rh); | 56 | void release_heap_free(struct release_heap* rh); |
53 | 57 | ||
58 | #ifdef CONFIG_LITMUS_NVIDIA | ||
59 | /* | ||
60 | * sys_register_nv_device | ||
61 | * @nv_device_id: The Nvidia device id that the task want to register | ||
62 | * @reg_action: set to '1' to register the specified device. zero otherwise. | ||
63 | * Syscall for register task's designated nvidia device into NV_DEVICE_REG array | ||
64 | * Returns EFAULT if nv_device_id is out of range. | ||
65 | * 0 if success | ||
66 | */ | ||
67 | asmlinkage long sys_register_nv_device(int nv_device_id, int reg_action) | ||
68 | { | ||
69 | /* register the device to caller (aka 'current') */ | ||
70 | return(reg_nv_device(nv_device_id, reg_action)); | ||
71 | } | ||
72 | #else | ||
73 | asmlinkage long sys_register_nv_device(int nv_device_id, int reg_action) | ||
74 | { | ||
75 | return(-EINVAL); | ||
76 | } | ||
77 | #endif | ||
78 | |||
79 | |||
54 | /* | 80 | /* |
55 | * sys_set_task_rt_param | 81 | * sys_set_task_rt_param |
56 | * @pid: Pid of the task which scheduling parameters must be changed | 82 | * @pid: Pid of the task which scheduling parameters must be changed |
@@ -135,6 +161,22 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param) | |||
135 | 161 | ||
136 | target->rt_param.task_params = tp; | 162 | target->rt_param.task_params = tp; |
137 | 163 | ||
164 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
165 | /* proxy thread off by default */ | ||
166 | target->rt_param.is_proxy_thread = 0; | ||
167 | target->rt_param.cur_klitirqd = NULL; | ||
168 | //init_MUTEX(&target->rt_param.klitirqd_sem); | ||
169 | mutex_init(&target->rt_param.klitirqd_sem); | ||
170 | //init_completion(&target->rt_param.klitirqd_sem); | ||
171 | //target->rt_param.klitirqd_sem_stat = NOT_HELD; | ||
172 | atomic_set(&target->rt_param.klitirqd_sem_stat, NOT_HELD); | ||
173 | #endif | ||
174 | |||
175 | #ifdef CONFIG_LITMUS_NVIDIA | ||
176 | atomic_set(&target->rt_param.nv_int_count, 0); | ||
177 | #endif | ||
178 | |||
179 | |||
138 | retval = 0; | 180 | retval = 0; |
139 | out_unlock: | 181 | out_unlock: |
140 | read_unlock_irq(&tasklist_lock); | 182 | read_unlock_irq(&tasklist_lock); |
@@ -269,6 +311,7 @@ asmlinkage long sys_query_job_no(unsigned int __user *job) | |||
269 | return retval; | 311 | return retval; |
270 | } | 312 | } |
271 | 313 | ||
314 | |||
272 | /* sys_null_call() is only used for determining raw system call | 315 | /* sys_null_call() is only used for determining raw system call |
273 | * overheads (kernel entry, kernel exit). It has no useful side effects. | 316 | * overheads (kernel entry, kernel exit). It has no useful side effects. |
274 | * If ts is non-NULL, then the current Feather-Trace time is recorded. | 317 | * If ts is non-NULL, then the current Feather-Trace time is recorded. |
@@ -282,7 +325,7 @@ asmlinkage long sys_null_call(cycles_t __user *ts) | |||
282 | now = get_cycles(); | 325 | now = get_cycles(); |
283 | ret = put_user(now, ts); | 326 | ret = put_user(now, ts); |
284 | } | 327 | } |
285 | 328 | ||
286 | return ret; | 329 | return ret; |
287 | } | 330 | } |
288 | 331 | ||
@@ -303,6 +346,20 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
303 | * at this point in time. | 346 | * at this point in time. |
304 | */ | 347 | */ |
305 | WARN_ON(p->rt_param.inh_task); | 348 | WARN_ON(p->rt_param.inh_task); |
349 | |||
350 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
351 | /* We probably should not have any tasklets executing for | ||
352 | * us at this time. | ||
353 | */ | ||
354 | WARN_ON(p->rt_param.cur_klitirqd); | ||
355 | WARN_ON(atomic_read(&p->rt_param.klitirqd_sem_stat) == HELD); | ||
356 | |||
357 | if(p->rt_param.cur_klitirqd) | ||
358 | flush_pending(p->rt_param.cur_klitirqd, p); | ||
359 | |||
360 | if(atomic_read(&p->rt_param.klitirqd_sem_stat) == HELD) | ||
361 | up_and_set_stat(p, NOT_HELD, &p->rt_param.klitirqd_sem); | ||
362 | #endif | ||
306 | 363 | ||
307 | /* Cleanup everything else. */ | 364 | /* Cleanup everything else. */ |
308 | memset(&p->rt_param, 0, sizeof(p->rt_param)); | 365 | memset(&p->rt_param, 0, sizeof(p->rt_param)); |
@@ -403,7 +460,7 @@ static void synch_on_plugin_switch(void* info) | |||
403 | */ | 460 | */ |
404 | int switch_sched_plugin(struct sched_plugin* plugin) | 461 | int switch_sched_plugin(struct sched_plugin* plugin) |
405 | { | 462 | { |
406 | unsigned long flags; | 463 | //unsigned long flags; |
407 | int ret = 0; | 464 | int ret = 0; |
408 | 465 | ||
409 | BUG_ON(!plugin); | 466 | BUG_ON(!plugin); |
@@ -417,8 +474,15 @@ int switch_sched_plugin(struct sched_plugin* plugin) | |||
417 | while (atomic_read(&cannot_use_plugin) < num_online_cpus()) | 474 | while (atomic_read(&cannot_use_plugin) < num_online_cpus()) |
418 | cpu_relax(); | 475 | cpu_relax(); |
419 | 476 | ||
477 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
478 | if(!klitirqd_is_dead()) | ||
479 | { | ||
480 | kill_klitirqd(); | ||
481 | } | ||
482 | #endif | ||
483 | |||
420 | /* stop task transitions */ | 484 | /* stop task transitions */ |
421 | raw_spin_lock_irqsave(&task_transition_lock, flags); | 485 | //raw_spin_lock_irqsave(&task_transition_lock, flags); |
422 | 486 | ||
423 | /* don't switch if there are active real-time tasks */ | 487 | /* don't switch if there are active real-time tasks */ |
424 | if (atomic_read(&rt_task_count) == 0) { | 488 | if (atomic_read(&rt_task_count) == 0) { |
@@ -436,7 +500,7 @@ int switch_sched_plugin(struct sched_plugin* plugin) | |||
436 | } else | 500 | } else |
437 | ret = -EBUSY; | 501 | ret = -EBUSY; |
438 | out: | 502 | out: |
439 | raw_spin_unlock_irqrestore(&task_transition_lock, flags); | 503 | //raw_spin_unlock_irqrestore(&task_transition_lock, flags); |
440 | atomic_set(&cannot_use_plugin, 0); | 504 | atomic_set(&cannot_use_plugin, 0); |
441 | return ret; | 505 | return ret; |
442 | } | 506 | } |
diff --git a/litmus/litmus_pai_softirq.c b/litmus/litmus_pai_softirq.c new file mode 100644 index 000000000000..b31eeb8a2538 --- /dev/null +++ b/litmus/litmus_pai_softirq.c | |||
@@ -0,0 +1,64 @@ | |||
1 | #include <linux/interrupt.h> | ||
2 | #include <linux/percpu.h> | ||
3 | #include <linux/cpu.h> | ||
4 | #include <linux/kthread.h> | ||
5 | #include <linux/ftrace.h> | ||
6 | #include <linux/smp.h> | ||
7 | #include <linux/slab.h> | ||
8 | #include <linux/mutex.h> | ||
9 | |||
10 | #include <linux/sched.h> | ||
11 | #include <linux/cpuset.h> | ||
12 | |||
13 | #include <litmus/litmus.h> | ||
14 | #include <litmus/sched_trace.h> | ||
15 | #include <litmus/jobs.h> | ||
16 | #include <litmus/sched_plugin.h> | ||
17 | #include <litmus/litmus_softirq.h> | ||
18 | |||
19 | |||
20 | |||
21 | int __litmus_tasklet_schedule(struct tasklet_struct *t, unsigned int k_id) | ||
22 | { | ||
23 | int ret = 0; /* assume failure */ | ||
24 | if(unlikely((t->owner == NULL) || !is_realtime(t->owner))) | ||
25 | { | ||
26 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
27 | BUG(); | ||
28 | } | ||
29 | |||
30 | ret = litmus->enqueue_pai_tasklet(t); | ||
31 | |||
32 | return(ret); | ||
33 | } | ||
34 | |||
35 | EXPORT_SYMBOL(__litmus_tasklet_schedule); | ||
36 | |||
37 | |||
38 | |||
39 | // failure causes default Linux handling. | ||
40 | int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, unsigned int k_id) | ||
41 | { | ||
42 | int ret = 0; /* assume failure */ | ||
43 | return(ret); | ||
44 | } | ||
45 | EXPORT_SYMBOL(__litmus_tasklet_hi_schedule); | ||
46 | |||
47 | |||
48 | // failure causes default Linux handling. | ||
49 | int __litmus_tasklet_hi_schedule_first(struct tasklet_struct *t, unsigned int k_id) | ||
50 | { | ||
51 | int ret = 0; /* assume failure */ | ||
52 | return(ret); | ||
53 | } | ||
54 | EXPORT_SYMBOL(__litmus_tasklet_hi_schedule_first); | ||
55 | |||
56 | |||
57 | // failure causes default Linux handling. | ||
58 | int __litmus_schedule_work(struct work_struct *w, unsigned int k_id) | ||
59 | { | ||
60 | int ret = 0; /* assume failure */ | ||
61 | return(ret); | ||
62 | } | ||
63 | EXPORT_SYMBOL(__litmus_schedule_work); | ||
64 | |||
diff --git a/litmus/litmus_proc.c b/litmus/litmus_proc.c index 4bf725a36c9c..381513366c7a 100644 --- a/litmus/litmus_proc.c +++ b/litmus/litmus_proc.c | |||
@@ -20,11 +20,18 @@ static struct proc_dir_entry *litmus_dir = NULL, | |||
20 | #ifdef CONFIG_RELEASE_MASTER | 20 | #ifdef CONFIG_RELEASE_MASTER |
21 | *release_master_file = NULL, | 21 | *release_master_file = NULL, |
22 | #endif | 22 | #endif |
23 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
24 | *klitirqd_file = NULL, | ||
25 | #endif | ||
23 | *plugs_file = NULL; | 26 | *plugs_file = NULL; |
24 | 27 | ||
25 | /* in litmus/sync.c */ | 28 | /* in litmus/sync.c */ |
26 | int count_tasks_waiting_for_release(void); | 29 | int count_tasks_waiting_for_release(void); |
27 | 30 | ||
31 | extern int proc_read_klitirqd_stats(char *page, char **start, | ||
32 | off_t off, int count, | ||
33 | int *eof, void *data); | ||
34 | |||
28 | static int proc_read_stats(char *page, char **start, | 35 | static int proc_read_stats(char *page, char **start, |
29 | off_t off, int count, | 36 | off_t off, int count, |
30 | int *eof, void *data) | 37 | int *eof, void *data) |
@@ -161,6 +168,12 @@ int __init init_litmus_proc(void) | |||
161 | release_master_file->write_proc = proc_write_release_master; | 168 | release_master_file->write_proc = proc_write_release_master; |
162 | #endif | 169 | #endif |
163 | 170 | ||
171 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
172 | klitirqd_file = | ||
173 | create_proc_read_entry("klitirqd_stats", 0444, litmus_dir, | ||
174 | proc_read_klitirqd_stats, NULL); | ||
175 | #endif | ||
176 | |||
164 | stat_file = create_proc_read_entry("stats", 0444, litmus_dir, | 177 | stat_file = create_proc_read_entry("stats", 0444, litmus_dir, |
165 | proc_read_stats, NULL); | 178 | proc_read_stats, NULL); |
166 | 179 | ||
@@ -187,6 +200,10 @@ void exit_litmus_proc(void) | |||
187 | remove_proc_entry("stats", litmus_dir); | 200 | remove_proc_entry("stats", litmus_dir); |
188 | if (curr_file) | 201 | if (curr_file) |
189 | remove_proc_entry("active_plugin", litmus_dir); | 202 | remove_proc_entry("active_plugin", litmus_dir); |
203 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
204 | if (klitirqd_file) | ||
205 | remove_proc_entry("klitirqd_stats", litmus_dir); | ||
206 | #endif | ||
190 | #ifdef CONFIG_RELEASE_MASTER | 207 | #ifdef CONFIG_RELEASE_MASTER |
191 | if (release_master_file) | 208 | if (release_master_file) |
192 | remove_proc_entry("release_master", litmus_dir); | 209 | remove_proc_entry("release_master", litmus_dir); |
diff --git a/litmus/litmus_softirq.c b/litmus/litmus_softirq.c new file mode 100644 index 000000000000..c49676c6d3a7 --- /dev/null +++ b/litmus/litmus_softirq.c | |||
@@ -0,0 +1,1584 @@ | |||
1 | #include <linux/interrupt.h> | ||
2 | #include <linux/percpu.h> | ||
3 | #include <linux/cpu.h> | ||
4 | #include <linux/kthread.h> | ||
5 | #include <linux/ftrace.h> | ||
6 | #include <linux/smp.h> | ||
7 | #include <linux/slab.h> | ||
8 | #include <linux/mutex.h> | ||
9 | |||
10 | #include <linux/sched.h> | ||
11 | #include <linux/cpuset.h> | ||
12 | |||
13 | #include <litmus/litmus.h> | ||
14 | #include <litmus/sched_trace.h> | ||
15 | #include <litmus/jobs.h> | ||
16 | #include <litmus/sched_plugin.h> | ||
17 | #include <litmus/litmus_softirq.h> | ||
18 | |||
19 | /* TODO: Remove unneeded mb() and other barriers. */ | ||
20 | |||
21 | |||
22 | /* counts number of daemons ready to handle litmus irqs. */ | ||
23 | static atomic_t num_ready_klitirqds = ATOMIC_INIT(0); | ||
24 | |||
25 | enum pending_flags | ||
26 | { | ||
27 | LIT_TASKLET_LOW = 0x1, | ||
28 | LIT_TASKLET_HI = LIT_TASKLET_LOW<<1, | ||
29 | LIT_WORK = LIT_TASKLET_HI<<1 | ||
30 | }; | ||
31 | |||
32 | /* only support tasklet processing for now. */ | ||
33 | struct tasklet_head | ||
34 | { | ||
35 | struct tasklet_struct *head; | ||
36 | struct tasklet_struct **tail; | ||
37 | }; | ||
38 | |||
39 | struct klitirqd_info | ||
40 | { | ||
41 | struct task_struct* klitirqd; | ||
42 | struct task_struct* current_owner; | ||
43 | int terminating; | ||
44 | |||
45 | |||
46 | raw_spinlock_t lock; | ||
47 | |||
48 | u32 pending; | ||
49 | atomic_t num_hi_pending; | ||
50 | atomic_t num_low_pending; | ||
51 | atomic_t num_work_pending; | ||
52 | |||
53 | /* in order of priority */ | ||
54 | struct tasklet_head pending_tasklets_hi; | ||
55 | struct tasklet_head pending_tasklets; | ||
56 | struct list_head worklist; | ||
57 | }; | ||
58 | |||
59 | /* one list for each klitirqd */ | ||
60 | static struct klitirqd_info klitirqds[NR_LITMUS_SOFTIRQD]; | ||
61 | |||
62 | |||
63 | |||
64 | |||
65 | |||
66 | int proc_read_klitirqd_stats(char *page, char **start, | ||
67 | off_t off, int count, | ||
68 | int *eof, void *data) | ||
69 | { | ||
70 | int len = snprintf(page, PAGE_SIZE, | ||
71 | "num ready klitirqds: %d\n\n", | ||
72 | atomic_read(&num_ready_klitirqds)); | ||
73 | |||
74 | if(klitirqd_is_ready()) | ||
75 | { | ||
76 | int i; | ||
77 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
78 | { | ||
79 | len += | ||
80 | snprintf(page + len - 1, PAGE_SIZE, /* -1 to strip off \0 */ | ||
81 | "klitirqd_th%d: %s/%d\n" | ||
82 | "\tcurrent_owner: %s/%d\n" | ||
83 | "\tpending: %x\n" | ||
84 | "\tnum hi: %d\n" | ||
85 | "\tnum low: %d\n" | ||
86 | "\tnum work: %d\n\n", | ||
87 | i, | ||
88 | klitirqds[i].klitirqd->comm, klitirqds[i].klitirqd->pid, | ||
89 | (klitirqds[i].current_owner != NULL) ? | ||
90 | klitirqds[i].current_owner->comm : "(null)", | ||
91 | (klitirqds[i].current_owner != NULL) ? | ||
92 | klitirqds[i].current_owner->pid : 0, | ||
93 | klitirqds[i].pending, | ||
94 | atomic_read(&klitirqds[i].num_hi_pending), | ||
95 | atomic_read(&klitirqds[i].num_low_pending), | ||
96 | atomic_read(&klitirqds[i].num_work_pending)); | ||
97 | } | ||
98 | } | ||
99 | |||
100 | return(len); | ||
101 | } | ||
102 | |||
103 | |||
104 | |||
105 | |||
106 | |||
107 | #if 0 | ||
108 | static atomic_t dump_id = ATOMIC_INIT(0); | ||
109 | |||
110 | static void __dump_state(struct klitirqd_info* which, const char* caller) | ||
111 | { | ||
112 | struct tasklet_struct* list; | ||
113 | |||
114 | int id = atomic_inc_return(&dump_id); | ||
115 | |||
116 | //if(in_interrupt()) | ||
117 | { | ||
118 | if(which->current_owner) | ||
119 | { | ||
120 | TRACE("(id: %d caller: %s)\n" | ||
121 | "klitirqd: %s/%d\n" | ||
122 | "current owner: %s/%d\n" | ||
123 | "pending: %x\n", | ||
124 | id, caller, | ||
125 | which->klitirqd->comm, which->klitirqd->pid, | ||
126 | which->current_owner->comm, which->current_owner->pid, | ||
127 | which->pending); | ||
128 | } | ||
129 | else | ||
130 | { | ||
131 | TRACE("(id: %d caller: %s)\n" | ||
132 | "klitirqd: %s/%d\n" | ||
133 | "current owner: %p\n" | ||
134 | "pending: %x\n", | ||
135 | id, caller, | ||
136 | which->klitirqd->comm, which->klitirqd->pid, | ||
137 | NULL, | ||
138 | which->pending); | ||
139 | } | ||
140 | |||
141 | list = which->pending_tasklets.head; | ||
142 | while(list) | ||
143 | { | ||
144 | struct tasklet_struct *t = list; | ||
145 | list = list->next; /* advance */ | ||
146 | if(t->owner) | ||
147 | TRACE("(id: %d caller: %s) Tasklet: %x, Owner = %s/%d\n", id, caller, t, t->owner->comm, t->owner->pid); | ||
148 | else | ||
149 | TRACE("(id: %d caller: %s) Tasklet: %x, Owner = %p\n", id, caller, t, NULL); | ||
150 | } | ||
151 | } | ||
152 | } | ||
153 | |||
154 | static void dump_state(struct klitirqd_info* which, const char* caller) | ||
155 | { | ||
156 | unsigned long flags; | ||
157 | |||
158 | raw_spin_lock_irqsave(&which->lock, flags); | ||
159 | __dump_state(which, caller); | ||
160 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
161 | } | ||
162 | #endif | ||
163 | |||
164 | |||
165 | /* forward declarations */ | ||
166 | static void ___litmus_tasklet_schedule(struct tasklet_struct *t, | ||
167 | struct klitirqd_info *which, | ||
168 | int wakeup); | ||
169 | static void ___litmus_tasklet_hi_schedule(struct tasklet_struct *t, | ||
170 | struct klitirqd_info *which, | ||
171 | int wakeup); | ||
172 | static void ___litmus_schedule_work(struct work_struct *w, | ||
173 | struct klitirqd_info *which, | ||
174 | int wakeup); | ||
175 | |||
176 | |||
177 | |||
178 | inline unsigned int klitirqd_id(struct task_struct* tsk) | ||
179 | { | ||
180 | int i; | ||
181 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
182 | { | ||
183 | if(klitirqds[i].klitirqd == tsk) | ||
184 | { | ||
185 | return i; | ||
186 | } | ||
187 | } | ||
188 | |||
189 | BUG(); | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | |||
195 | inline static u32 litirq_pending_hi_irqoff(struct klitirqd_info* which) | ||
196 | { | ||
197 | return (which->pending & LIT_TASKLET_HI); | ||
198 | } | ||
199 | |||
200 | inline static u32 litirq_pending_low_irqoff(struct klitirqd_info* which) | ||
201 | { | ||
202 | return (which->pending & LIT_TASKLET_LOW); | ||
203 | } | ||
204 | |||
205 | inline static u32 litirq_pending_work_irqoff(struct klitirqd_info* which) | ||
206 | { | ||
207 | return (which->pending & LIT_WORK); | ||
208 | } | ||
209 | |||
210 | inline static u32 litirq_pending_irqoff(struct klitirqd_info* which) | ||
211 | { | ||
212 | return(which->pending); | ||
213 | } | ||
214 | |||
215 | |||
216 | inline static u32 litirq_pending(struct klitirqd_info* which) | ||
217 | { | ||
218 | unsigned long flags; | ||
219 | u32 pending; | ||
220 | |||
221 | raw_spin_lock_irqsave(&which->lock, flags); | ||
222 | pending = litirq_pending_irqoff(which); | ||
223 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
224 | |||
225 | return pending; | ||
226 | }; | ||
227 | |||
228 | inline static u32 litirq_pending_with_owner(struct klitirqd_info* which, struct task_struct* owner) | ||
229 | { | ||
230 | unsigned long flags; | ||
231 | u32 pending; | ||
232 | |||
233 | raw_spin_lock_irqsave(&which->lock, flags); | ||
234 | pending = litirq_pending_irqoff(which); | ||
235 | if(pending) | ||
236 | { | ||
237 | if(which->current_owner != owner) | ||
238 | { | ||
239 | pending = 0; // owner switch! | ||
240 | } | ||
241 | } | ||
242 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
243 | |||
244 | return pending; | ||
245 | } | ||
246 | |||
247 | |||
248 | inline static u32 litirq_pending_and_sem_and_owner(struct klitirqd_info* which, | ||
249 | struct mutex** sem, | ||
250 | struct task_struct** t) | ||
251 | { | ||
252 | unsigned long flags; | ||
253 | u32 pending; | ||
254 | |||
255 | /* init values */ | ||
256 | *sem = NULL; | ||
257 | *t = NULL; | ||
258 | |||
259 | raw_spin_lock_irqsave(&which->lock, flags); | ||
260 | |||
261 | pending = litirq_pending_irqoff(which); | ||
262 | if(pending) | ||
263 | { | ||
264 | if(which->current_owner != NULL) | ||
265 | { | ||
266 | *t = which->current_owner; | ||
267 | *sem = &tsk_rt(which->current_owner)->klitirqd_sem; | ||
268 | } | ||
269 | else | ||
270 | { | ||
271 | BUG(); | ||
272 | } | ||
273 | } | ||
274 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
275 | |||
276 | if(likely(*sem)) | ||
277 | { | ||
278 | return pending; | ||
279 | } | ||
280 | else | ||
281 | { | ||
282 | return 0; | ||
283 | } | ||
284 | } | ||
285 | |||
286 | /* returns true if the next piece of work to do is from a different owner. | ||
287 | */ | ||
288 | static int tasklet_ownership_change( | ||
289 | struct klitirqd_info* which, | ||
290 | enum pending_flags taskletQ) | ||
291 | { | ||
292 | /* this function doesn't have to look at work objects since they have | ||
293 | priority below tasklets. */ | ||
294 | |||
295 | unsigned long flags; | ||
296 | int ret = 0; | ||
297 | |||
298 | raw_spin_lock_irqsave(&which->lock, flags); | ||
299 | |||
300 | switch(taskletQ) | ||
301 | { | ||
302 | case LIT_TASKLET_HI: | ||
303 | if(litirq_pending_hi_irqoff(which)) | ||
304 | { | ||
305 | ret = (which->pending_tasklets_hi.head->owner != | ||
306 | which->current_owner); | ||
307 | } | ||
308 | break; | ||
309 | case LIT_TASKLET_LOW: | ||
310 | if(litirq_pending_low_irqoff(which)) | ||
311 | { | ||
312 | ret = (which->pending_tasklets.head->owner != | ||
313 | which->current_owner); | ||
314 | } | ||
315 | break; | ||
316 | default: | ||
317 | break; | ||
318 | } | ||
319 | |||
320 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
321 | |||
322 | TRACE_TASK(which->klitirqd, "ownership change needed: %d\n", ret); | ||
323 | |||
324 | return ret; | ||
325 | } | ||
326 | |||
327 | |||
328 | static void __reeval_prio(struct klitirqd_info* which) | ||
329 | { | ||
330 | struct task_struct* next_owner = NULL; | ||
331 | struct task_struct* klitirqd = which->klitirqd; | ||
332 | |||
333 | /* Check in prio-order */ | ||
334 | u32 pending = litirq_pending_irqoff(which); | ||
335 | |||
336 | //__dump_state(which, "__reeval_prio: before"); | ||
337 | |||
338 | if(pending) | ||
339 | { | ||
340 | if(pending & LIT_TASKLET_HI) | ||
341 | { | ||
342 | next_owner = which->pending_tasklets_hi.head->owner; | ||
343 | } | ||
344 | else if(pending & LIT_TASKLET_LOW) | ||
345 | { | ||
346 | next_owner = which->pending_tasklets.head->owner; | ||
347 | } | ||
348 | else if(pending & LIT_WORK) | ||
349 | { | ||
350 | struct work_struct* work = | ||
351 | list_first_entry(&which->worklist, struct work_struct, entry); | ||
352 | next_owner = work->owner; | ||
353 | } | ||
354 | } | ||
355 | |||
356 | if(next_owner != which->current_owner) | ||
357 | { | ||
358 | struct task_struct* old_owner = which->current_owner; | ||
359 | |||
360 | /* bind the next owner. */ | ||
361 | which->current_owner = next_owner; | ||
362 | mb(); | ||
363 | |||
364 | if(next_owner != NULL) | ||
365 | { | ||
366 | if(!in_interrupt()) | ||
367 | { | ||
368 | TRACE_CUR("%s: Ownership change: %s/%d to %s/%d\n", __FUNCTION__, | ||
369 | ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->comm, | ||
370 | ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->pid, | ||
371 | next_owner->comm, next_owner->pid); | ||
372 | } | ||
373 | else | ||
374 | { | ||
375 | TRACE("%s: Ownership change: %s/%d to %s/%d\n", __FUNCTION__, | ||
376 | ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->comm, | ||
377 | ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->pid, | ||
378 | next_owner->comm, next_owner->pid); | ||
379 | } | ||
380 | |||
381 | litmus->set_prio_inh_klitirqd(klitirqd, old_owner, next_owner); | ||
382 | } | ||
383 | else | ||
384 | { | ||
385 | if(likely(!in_interrupt())) | ||
386 | { | ||
387 | TRACE_CUR("%s: Ownership change: %s/%d to NULL (reverting)\n", | ||
388 | __FUNCTION__, klitirqd->comm, klitirqd->pid); | ||
389 | } | ||
390 | else | ||
391 | { | ||
392 | // is this a bug? | ||
393 | TRACE("%s: Ownership change: %s/%d to NULL (reverting)\n", | ||
394 | __FUNCTION__, klitirqd->comm, klitirqd->pid); | ||
395 | } | ||
396 | |||
397 | BUG_ON(pending != 0); | ||
398 | litmus->clear_prio_inh_klitirqd(klitirqd, old_owner); | ||
399 | } | ||
400 | } | ||
401 | |||
402 | //__dump_state(which, "__reeval_prio: after"); | ||
403 | } | ||
404 | |||
405 | static void reeval_prio(struct klitirqd_info* which) | ||
406 | { | ||
407 | unsigned long flags; | ||
408 | |||
409 | raw_spin_lock_irqsave(&which->lock, flags); | ||
410 | __reeval_prio(which); | ||
411 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
412 | } | ||
413 | |||
414 | |||
415 | static void wakeup_litirqd_locked(struct klitirqd_info* which) | ||
416 | { | ||
417 | /* Interrupts are disabled: no need to stop preemption */ | ||
418 | if (which && which->klitirqd) | ||
419 | { | ||
420 | __reeval_prio(which); /* configure the proper priority */ | ||
421 | |||
422 | if(which->klitirqd->state != TASK_RUNNING) | ||
423 | { | ||
424 | TRACE("%s: Waking up klitirqd: %s/%d\n", __FUNCTION__, | ||
425 | which->klitirqd->comm, which->klitirqd->pid); | ||
426 | |||
427 | wake_up_process(which->klitirqd); | ||
428 | } | ||
429 | } | ||
430 | } | ||
431 | |||
432 | |||
433 | static void do_lit_tasklet(struct klitirqd_info* which, | ||
434 | struct tasklet_head* pending_tasklets) | ||
435 | { | ||
436 | unsigned long flags; | ||
437 | struct tasklet_struct *list; | ||
438 | atomic_t* count; | ||
439 | |||
440 | raw_spin_lock_irqsave(&which->lock, flags); | ||
441 | |||
442 | //__dump_state(which, "do_lit_tasklet: before steal"); | ||
443 | |||
444 | /* copy out the tasklets for our private use. */ | ||
445 | list = pending_tasklets->head; | ||
446 | pending_tasklets->head = NULL; | ||
447 | pending_tasklets->tail = &pending_tasklets->head; | ||
448 | |||
449 | /* remove pending flag */ | ||
450 | which->pending &= (pending_tasklets == &which->pending_tasklets) ? | ||
451 | ~LIT_TASKLET_LOW : | ||
452 | ~LIT_TASKLET_HI; | ||
453 | |||
454 | count = (pending_tasklets == &which->pending_tasklets) ? | ||
455 | &which->num_low_pending: | ||
456 | &which->num_hi_pending; | ||
457 | |||
458 | //__dump_state(which, "do_lit_tasklet: after steal"); | ||
459 | |||
460 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
461 | |||
462 | |||
463 | while(list) | ||
464 | { | ||
465 | struct tasklet_struct *t = list; | ||
466 | |||
467 | /* advance, lest we forget */ | ||
468 | list = list->next; | ||
469 | |||
470 | /* execute tasklet if it has my priority and is free */ | ||
471 | if ((t->owner == which->current_owner) && tasklet_trylock(t)) { | ||
472 | if (!atomic_read(&t->count)) { | ||
473 | |||
474 | sched_trace_tasklet_begin(t->owner); | ||
475 | |||
476 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) | ||
477 | { | ||
478 | BUG(); | ||
479 | } | ||
480 | TRACE_CUR("%s: Invoking tasklet.\n", __FUNCTION__); | ||
481 | t->func(t->data); | ||
482 | tasklet_unlock(t); | ||
483 | |||
484 | atomic_dec(count); | ||
485 | |||
486 | sched_trace_tasklet_end(t->owner, 0ul); | ||
487 | |||
488 | continue; /* process more tasklets */ | ||
489 | } | ||
490 | tasklet_unlock(t); | ||
491 | } | ||
492 | |||
493 | TRACE_CUR("%s: Could not invoke tasklet. Requeuing.\n", __FUNCTION__); | ||
494 | |||
495 | /* couldn't process tasklet. put it back at the end of the queue. */ | ||
496 | if(pending_tasklets == &which->pending_tasklets) | ||
497 | ___litmus_tasklet_schedule(t, which, 0); | ||
498 | else | ||
499 | ___litmus_tasklet_hi_schedule(t, which, 0); | ||
500 | } | ||
501 | } | ||
502 | |||
503 | |||
504 | // returns 1 if priorities need to be changed to continue processing | ||
505 | // pending tasklets. | ||
506 | static int do_litirq(struct klitirqd_info* which) | ||
507 | { | ||
508 | u32 pending; | ||
509 | int resched = 0; | ||
510 | |||
511 | if(in_interrupt()) | ||
512 | { | ||
513 | TRACE("%s: exiting early: in interrupt context!\n", __FUNCTION__); | ||
514 | return(0); | ||
515 | } | ||
516 | |||
517 | if(which->klitirqd != current) | ||
518 | { | ||
519 | TRACE_CUR("%s: exiting early: thread/info mismatch! Running %s/%d but given %s/%d.\n", | ||
520 | __FUNCTION__, current->comm, current->pid, | ||
521 | which->klitirqd->comm, which->klitirqd->pid); | ||
522 | return(0); | ||
523 | } | ||
524 | |||
525 | if(!is_realtime(current)) | ||
526 | { | ||
527 | TRACE_CUR("%s: exiting early: klitirqd is not real-time. Sched Policy = %d\n", | ||
528 | __FUNCTION__, current->policy); | ||
529 | return(0); | ||
530 | } | ||
531 | |||
532 | |||
533 | /* We only handle tasklets & work objects, no need for RCU triggers? */ | ||
534 | |||
535 | pending = litirq_pending(which); | ||
536 | if(pending) | ||
537 | { | ||
538 | /* extract the work to do and do it! */ | ||
539 | if(pending & LIT_TASKLET_HI) | ||
540 | { | ||
541 | TRACE_CUR("%s: Invoking HI tasklets.\n", __FUNCTION__); | ||
542 | do_lit_tasklet(which, &which->pending_tasklets_hi); | ||
543 | resched = tasklet_ownership_change(which, LIT_TASKLET_HI); | ||
544 | |||
545 | if(resched) | ||
546 | { | ||
547 | TRACE_CUR("%s: HI tasklets of another owner remain. " | ||
548 | "Skipping any LOW tasklets.\n", __FUNCTION__); | ||
549 | } | ||
550 | } | ||
551 | |||
552 | if(!resched && (pending & LIT_TASKLET_LOW)) | ||
553 | { | ||
554 | TRACE_CUR("%s: Invoking LOW tasklets.\n", __FUNCTION__); | ||
555 | do_lit_tasklet(which, &which->pending_tasklets); | ||
556 | resched = tasklet_ownership_change(which, LIT_TASKLET_LOW); | ||
557 | |||
558 | if(resched) | ||
559 | { | ||
560 | TRACE_CUR("%s: LOW tasklets of another owner remain. " | ||
561 | "Skipping any work objects.\n", __FUNCTION__); | ||
562 | } | ||
563 | } | ||
564 | } | ||
565 | |||
566 | return(resched); | ||
567 | } | ||
568 | |||
569 | |||
570 | static void do_work(struct klitirqd_info* which) | ||
571 | { | ||
572 | unsigned long flags; | ||
573 | work_func_t f; | ||
574 | struct work_struct* work; | ||
575 | |||
576 | // only execute one work-queue item to yield to tasklets. | ||
577 | // ...is this a good idea, or should we just batch them? | ||
578 | raw_spin_lock_irqsave(&which->lock, flags); | ||
579 | |||
580 | if(!litirq_pending_work_irqoff(which)) | ||
581 | { | ||
582 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
583 | goto no_work; | ||
584 | } | ||
585 | |||
586 | work = list_first_entry(&which->worklist, struct work_struct, entry); | ||
587 | list_del_init(&work->entry); | ||
588 | |||
589 | if(list_empty(&which->worklist)) | ||
590 | { | ||
591 | which->pending &= ~LIT_WORK; | ||
592 | } | ||
593 | |||
594 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
595 | |||
596 | |||
597 | |||
598 | /* safe to read current_owner outside of lock since only this thread | ||
599 | may write to the pointer. */ | ||
600 | if(work->owner == which->current_owner) | ||
601 | { | ||
602 | TRACE_CUR("%s: Invoking work object.\n", __FUNCTION__); | ||
603 | // do the work! | ||
604 | work_clear_pending(work); | ||
605 | f = work->func; | ||
606 | f(work); /* can't touch 'work' after this point, | ||
607 | the user may have freed it. */ | ||
608 | |||
609 | atomic_dec(&which->num_work_pending); | ||
610 | } | ||
611 | else | ||
612 | { | ||
613 | TRACE_CUR("%s: Could not invoke work object. Requeuing.\n", | ||
614 | __FUNCTION__); | ||
615 | ___litmus_schedule_work(work, which, 0); | ||
616 | } | ||
617 | |||
618 | no_work: | ||
619 | return; | ||
620 | } | ||
621 | |||
622 | |||
623 | static int set_litmus_daemon_sched(void) | ||
624 | { | ||
625 | /* set up a daemon job that will never complete. | ||
626 | it should only ever run on behalf of another | ||
627 | real-time task. | ||
628 | |||
629 | TODO: Transition to a new job whenever a | ||
630 | new tasklet is handled */ | ||
631 | |||
632 | int ret = 0; | ||
633 | |||
634 | struct rt_task tp = { | ||
635 | .exec_cost = 0, | ||
636 | .period = 1000000000, /* dummy 1 second period */ | ||
637 | .phase = 0, | ||
638 | .cpu = task_cpu(current), | ||
639 | .budget_policy = NO_ENFORCEMENT, | ||
640 | .cls = RT_CLASS_BEST_EFFORT | ||
641 | }; | ||
642 | |||
643 | struct sched_param param = { .sched_priority = 0}; | ||
644 | |||
645 | |||
646 | /* set task params, mark as proxy thread, and init other data */ | ||
647 | tsk_rt(current)->task_params = tp; | ||
648 | tsk_rt(current)->is_proxy_thread = 1; | ||
649 | tsk_rt(current)->cur_klitirqd = NULL; | ||
650 | //init_MUTEX(&tsk_rt(current)->klitirqd_sem); | ||
651 | mutex_init(&tsk_rt(current)->klitirqd_sem); | ||
652 | //init_completion(&tsk_rt(current)->klitirqd_sem); | ||
653 | atomic_set(&tsk_rt(current)->klitirqd_sem_stat, NOT_HELD); | ||
654 | |||
655 | /* inform the OS we're SCHED_LITMUS -- | ||
656 | sched_setscheduler_nocheck() calls litmus_admit_task(). */ | ||
657 | sched_setscheduler_nocheck(current, SCHED_LITMUS, ¶m); | ||
658 | |||
659 | return ret; | ||
660 | } | ||
661 | |||
662 | static void enter_execution_phase(struct klitirqd_info* which, | ||
663 | struct mutex* sem, | ||
664 | struct task_struct* t) | ||
665 | { | ||
666 | TRACE_CUR("%s: Trying to enter execution phase. " | ||
667 | "Acquiring semaphore of %s/%d\n", __FUNCTION__, | ||
668 | t->comm, t->pid); | ||
669 | down_and_set_stat(current, HELD, sem); | ||
670 | TRACE_CUR("%s: Execution phase entered! " | ||
671 | "Acquired semaphore of %s/%d\n", __FUNCTION__, | ||
672 | t->comm, t->pid); | ||
673 | } | ||
674 | |||
675 | static void exit_execution_phase(struct klitirqd_info* which, | ||
676 | struct mutex* sem, | ||
677 | struct task_struct* t) | ||
678 | { | ||
679 | TRACE_CUR("%s: Exiting execution phase. " | ||
680 | "Releasing semaphore of %s/%d\n", __FUNCTION__, | ||
681 | t->comm, t->pid); | ||
682 | if(atomic_read(&tsk_rt(current)->klitirqd_sem_stat) == HELD) | ||
683 | { | ||
684 | up_and_set_stat(current, NOT_HELD, sem); | ||
685 | TRACE_CUR("%s: Execution phase exited! " | ||
686 | "Released semaphore of %s/%d\n", __FUNCTION__, | ||
687 | t->comm, t->pid); | ||
688 | } | ||
689 | else | ||
690 | { | ||
691 | TRACE_CUR("%s: COULDN'T RELEASE SEMAPHORE BECAUSE ONE IS NOT HELD!\n", __FUNCTION__); | ||
692 | } | ||
693 | } | ||
694 | |||
695 | /* main loop for klitsoftirqd */ | ||
696 | static int run_klitirqd(void* unused) | ||
697 | { | ||
698 | struct klitirqd_info* which = &klitirqds[klitirqd_id(current)]; | ||
699 | struct mutex* sem; | ||
700 | struct task_struct* owner; | ||
701 | |||
702 | int rt_status = set_litmus_daemon_sched(); | ||
703 | |||
704 | if(rt_status != 0) | ||
705 | { | ||
706 | TRACE_CUR("%s: Failed to transition to rt-task.\n", __FUNCTION__); | ||
707 | goto rt_failed; | ||
708 | } | ||
709 | |||
710 | atomic_inc(&num_ready_klitirqds); | ||
711 | |||
712 | set_current_state(TASK_INTERRUPTIBLE); | ||
713 | |||
714 | while (!kthread_should_stop()) | ||
715 | { | ||
716 | preempt_disable(); | ||
717 | if (!litirq_pending(which)) | ||
718 | { | ||
719 | /* sleep for work */ | ||
720 | TRACE_CUR("%s: No more tasklets or work objects. Going to sleep.\n", | ||
721 | __FUNCTION__); | ||
722 | preempt_enable_no_resched(); | ||
723 | schedule(); | ||
724 | |||
725 | if(kthread_should_stop()) /* bail out */ | ||
726 | { | ||
727 | TRACE_CUR("%s:%d: Signaled to terminate.\n", __FUNCTION__, __LINE__); | ||
728 | continue; | ||
729 | } | ||
730 | |||
731 | preempt_disable(); | ||
732 | } | ||
733 | |||
734 | __set_current_state(TASK_RUNNING); | ||
735 | |||
736 | while (litirq_pending_and_sem_and_owner(which, &sem, &owner)) | ||
737 | { | ||
738 | int needs_resched = 0; | ||
739 | |||
740 | preempt_enable_no_resched(); | ||
741 | |||
742 | BUG_ON(sem == NULL); | ||
743 | |||
744 | // wait to enter execution phase; wait for 'current_owner' to block. | ||
745 | enter_execution_phase(which, sem, owner); | ||
746 | |||
747 | if(kthread_should_stop()) | ||
748 | { | ||
749 | TRACE_CUR("%s:%d: Signaled to terminate.\n", __FUNCTION__, __LINE__); | ||
750 | break; | ||
751 | } | ||
752 | |||
753 | preempt_disable(); | ||
754 | |||
755 | /* Double check that there's still pending work and the owner hasn't | ||
756 | * changed. Pending items may have been flushed while we were sleeping. | ||
757 | */ | ||
758 | if(litirq_pending_with_owner(which, owner)) | ||
759 | { | ||
760 | TRACE_CUR("%s: Executing tasklets and/or work objects.\n", | ||
761 | __FUNCTION__); | ||
762 | |||
763 | needs_resched = do_litirq(which); | ||
764 | |||
765 | preempt_enable_no_resched(); | ||
766 | |||
767 | // work objects are preemptible. | ||
768 | if(!needs_resched) | ||
769 | { | ||
770 | do_work(which); | ||
771 | } | ||
772 | |||
773 | // exit execution phase. | ||
774 | exit_execution_phase(which, sem, owner); | ||
775 | |||
776 | TRACE_CUR("%s: Setting up next priority.\n", __FUNCTION__); | ||
777 | reeval_prio(which); /* check if we need to change priority here */ | ||
778 | } | ||
779 | else | ||
780 | { | ||
781 | TRACE_CUR("%s: Pending work was flushed! Prev owner was %s/%d\n", | ||
782 | __FUNCTION__, | ||
783 | owner->comm, owner->pid); | ||
784 | preempt_enable_no_resched(); | ||
785 | |||
786 | // exit execution phase. | ||
787 | exit_execution_phase(which, sem, owner); | ||
788 | } | ||
789 | |||
790 | cond_resched(); | ||
791 | preempt_disable(); | ||
792 | } | ||
793 | preempt_enable(); | ||
794 | set_current_state(TASK_INTERRUPTIBLE); | ||
795 | } | ||
796 | __set_current_state(TASK_RUNNING); | ||
797 | |||
798 | atomic_dec(&num_ready_klitirqds); | ||
799 | |||
800 | rt_failed: | ||
801 | litmus_exit_task(current); | ||
802 | |||
803 | return rt_status; | ||
804 | } | ||
805 | |||
806 | |||
807 | struct klitirqd_launch_data | ||
808 | { | ||
809 | int* cpu_affinity; | ||
810 | struct work_struct work; | ||
811 | }; | ||
812 | |||
813 | /* executed by a kworker from workqueues */ | ||
814 | static void launch_klitirqd(struct work_struct *work) | ||
815 | { | ||
816 | int i; | ||
817 | |||
818 | struct klitirqd_launch_data* launch_data = | ||
819 | container_of(work, struct klitirqd_launch_data, work); | ||
820 | |||
821 | TRACE("%s: Creating %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); | ||
822 | |||
823 | /* create the daemon threads */ | ||
824 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
825 | { | ||
826 | if(launch_data->cpu_affinity) | ||
827 | { | ||
828 | klitirqds[i].klitirqd = | ||
829 | kthread_create( | ||
830 | run_klitirqd, | ||
831 | /* treat the affinity as a pointer, we'll cast it back later */ | ||
832 | (void*)(long long)launch_data->cpu_affinity[i], | ||
833 | "klitirqd_th%d/%d", | ||
834 | i, | ||
835 | launch_data->cpu_affinity[i]); | ||
836 | |||
837 | /* litmus will put is in the right cluster. */ | ||
838 | kthread_bind(klitirqds[i].klitirqd, launch_data->cpu_affinity[i]); | ||
839 | } | ||
840 | else | ||
841 | { | ||
842 | klitirqds[i].klitirqd = | ||
843 | kthread_create( | ||
844 | run_klitirqd, | ||
845 | /* treat the affinity as a pointer, we'll cast it back later */ | ||
846 | (void*)(long long)(-1), | ||
847 | "klitirqd_th%d", | ||
848 | i); | ||
849 | } | ||
850 | } | ||
851 | |||
852 | TRACE("%s: Launching %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); | ||
853 | |||
854 | /* unleash the daemons */ | ||
855 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
856 | { | ||
857 | wake_up_process(klitirqds[i].klitirqd); | ||
858 | } | ||
859 | |||
860 | if(launch_data->cpu_affinity) | ||
861 | kfree(launch_data->cpu_affinity); | ||
862 | kfree(launch_data); | ||
863 | } | ||
864 | |||
865 | |||
866 | void spawn_klitirqd(int* affinity) | ||
867 | { | ||
868 | int i; | ||
869 | struct klitirqd_launch_data* delayed_launch; | ||
870 | |||
871 | if(atomic_read(&num_ready_klitirqds) != 0) | ||
872 | { | ||
873 | TRACE("%s: At least one klitirqd is already running! Need to call kill_klitirqd()?\n"); | ||
874 | return; | ||
875 | } | ||
876 | |||
877 | /* init the tasklet & work queues */ | ||
878 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
879 | { | ||
880 | klitirqds[i].terminating = 0; | ||
881 | klitirqds[i].pending = 0; | ||
882 | |||
883 | klitirqds[i].num_hi_pending.counter = 0; | ||
884 | klitirqds[i].num_low_pending.counter = 0; | ||
885 | klitirqds[i].num_work_pending.counter = 0; | ||
886 | |||
887 | klitirqds[i].pending_tasklets_hi.head = NULL; | ||
888 | klitirqds[i].pending_tasklets_hi.tail = &klitirqds[i].pending_tasklets_hi.head; | ||
889 | |||
890 | klitirqds[i].pending_tasklets.head = NULL; | ||
891 | klitirqds[i].pending_tasklets.tail = &klitirqds[i].pending_tasklets.head; | ||
892 | |||
893 | INIT_LIST_HEAD(&klitirqds[i].worklist); | ||
894 | |||
895 | raw_spin_lock_init(&klitirqds[i].lock); | ||
896 | } | ||
897 | |||
898 | /* wait to flush the initializations to memory since other threads | ||
899 | will access it. */ | ||
900 | mb(); | ||
901 | |||
902 | /* tell a work queue to launch the threads. we can't make scheduling | ||
903 | calls since we're in an atomic state. */ | ||
904 | TRACE("%s: Setting callback up to launch klitirqds\n", __FUNCTION__); | ||
905 | delayed_launch = kmalloc(sizeof(struct klitirqd_launch_data), GFP_ATOMIC); | ||
906 | if(affinity) | ||
907 | { | ||
908 | delayed_launch->cpu_affinity = | ||
909 | kmalloc(sizeof(int)*NR_LITMUS_SOFTIRQD, GFP_ATOMIC); | ||
910 | |||
911 | memcpy(delayed_launch->cpu_affinity, affinity, | ||
912 | sizeof(int)*NR_LITMUS_SOFTIRQD); | ||
913 | } | ||
914 | else | ||
915 | { | ||
916 | delayed_launch->cpu_affinity = NULL; | ||
917 | } | ||
918 | INIT_WORK(&delayed_launch->work, launch_klitirqd); | ||
919 | schedule_work(&delayed_launch->work); | ||
920 | } | ||
921 | |||
922 | |||
923 | void kill_klitirqd(void) | ||
924 | { | ||
925 | if(!klitirqd_is_dead()) | ||
926 | { | ||
927 | int i; | ||
928 | |||
929 | TRACE("%s: Killing %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); | ||
930 | |||
931 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
932 | { | ||
933 | if(klitirqds[i].terminating != 1) | ||
934 | { | ||
935 | klitirqds[i].terminating = 1; | ||
936 | mb(); /* just to be sure? */ | ||
937 | flush_pending(klitirqds[i].klitirqd, NULL); | ||
938 | |||
939 | /* signal termination */ | ||
940 | kthread_stop(klitirqds[i].klitirqd); | ||
941 | } | ||
942 | } | ||
943 | } | ||
944 | } | ||
945 | |||
946 | |||
947 | int klitirqd_is_ready(void) | ||
948 | { | ||
949 | return(atomic_read(&num_ready_klitirqds) == NR_LITMUS_SOFTIRQD); | ||
950 | } | ||
951 | |||
952 | int klitirqd_is_dead(void) | ||
953 | { | ||
954 | return(atomic_read(&num_ready_klitirqds) == 0); | ||
955 | } | ||
956 | |||
957 | |||
958 | struct task_struct* get_klitirqd(unsigned int k_id) | ||
959 | { | ||
960 | return(klitirqds[k_id].klitirqd); | ||
961 | } | ||
962 | |||
963 | |||
964 | void flush_pending(struct task_struct* klitirqd_thread, | ||
965 | struct task_struct* owner) | ||
966 | { | ||
967 | unsigned int k_id = klitirqd_id(klitirqd_thread); | ||
968 | struct klitirqd_info *which = &klitirqds[k_id]; | ||
969 | |||
970 | unsigned long flags; | ||
971 | struct tasklet_struct *list; | ||
972 | |||
973 | u32 work_flushed = 0; | ||
974 | |||
975 | raw_spin_lock_irqsave(&which->lock, flags); | ||
976 | |||
977 | //__dump_state(which, "flush_pending: before"); | ||
978 | |||
979 | // flush hi tasklets. | ||
980 | if(litirq_pending_hi_irqoff(which)) | ||
981 | { | ||
982 | which->pending &= ~LIT_TASKLET_HI; | ||
983 | |||
984 | list = which->pending_tasklets_hi.head; | ||
985 | which->pending_tasklets_hi.head = NULL; | ||
986 | which->pending_tasklets_hi.tail = &which->pending_tasklets_hi.head; | ||
987 | |||
988 | TRACE("%s: Handing HI tasklets back to Linux.\n", __FUNCTION__); | ||
989 | |||
990 | while(list) | ||
991 | { | ||
992 | struct tasklet_struct *t = list; | ||
993 | list = list->next; | ||
994 | |||
995 | if(likely((t->owner == owner) || (owner == NULL))) | ||
996 | { | ||
997 | if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))) | ||
998 | { | ||
999 | BUG(); | ||
1000 | } | ||
1001 | |||
1002 | work_flushed |= LIT_TASKLET_HI; | ||
1003 | |||
1004 | t->owner = NULL; | ||
1005 | |||
1006 | // WTF? | ||
1007 | if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
1008 | { | ||
1009 | atomic_dec(&which->num_hi_pending); | ||
1010 | ___tasklet_hi_schedule(t); | ||
1011 | } | ||
1012 | else | ||
1013 | { | ||
1014 | TRACE("%s: dropped hi tasklet??\n", __FUNCTION__); | ||
1015 | BUG(); | ||
1016 | } | ||
1017 | } | ||
1018 | else | ||
1019 | { | ||
1020 | TRACE("%s: Could not flush a HI tasklet.\n", __FUNCTION__); | ||
1021 | // put back on queue. | ||
1022 | ___litmus_tasklet_hi_schedule(t, which, 0); | ||
1023 | } | ||
1024 | } | ||
1025 | } | ||
1026 | |||
1027 | // flush low tasklets. | ||
1028 | if(litirq_pending_low_irqoff(which)) | ||
1029 | { | ||
1030 | which->pending &= ~LIT_TASKLET_LOW; | ||
1031 | |||
1032 | list = which->pending_tasklets.head; | ||
1033 | which->pending_tasklets.head = NULL; | ||
1034 | which->pending_tasklets.tail = &which->pending_tasklets.head; | ||
1035 | |||
1036 | TRACE("%s: Handing LOW tasklets back to Linux.\n", __FUNCTION__); | ||
1037 | |||
1038 | while(list) | ||
1039 | { | ||
1040 | struct tasklet_struct *t = list; | ||
1041 | list = list->next; | ||
1042 | |||
1043 | if(likely((t->owner == owner) || (owner == NULL))) | ||
1044 | { | ||
1045 | if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))) | ||
1046 | { | ||
1047 | BUG(); | ||
1048 | } | ||
1049 | |||
1050 | work_flushed |= LIT_TASKLET_LOW; | ||
1051 | |||
1052 | t->owner = NULL; | ||
1053 | sched_trace_tasklet_end(owner, 1ul); | ||
1054 | |||
1055 | if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
1056 | { | ||
1057 | atomic_dec(&which->num_low_pending); | ||
1058 | ___tasklet_schedule(t); | ||
1059 | } | ||
1060 | else | ||
1061 | { | ||
1062 | TRACE("%s: dropped tasklet??\n", __FUNCTION__); | ||
1063 | BUG(); | ||
1064 | } | ||
1065 | } | ||
1066 | else | ||
1067 | { | ||
1068 | TRACE("%s: Could not flush a LOW tasklet.\n", __FUNCTION__); | ||
1069 | // put back on queue | ||
1070 | ___litmus_tasklet_schedule(t, which, 0); | ||
1071 | } | ||
1072 | } | ||
1073 | } | ||
1074 | |||
1075 | // flush work objects | ||
1076 | if(litirq_pending_work_irqoff(which)) | ||
1077 | { | ||
1078 | which->pending &= ~LIT_WORK; | ||
1079 | |||
1080 | TRACE("%s: Handing work objects back to Linux.\n", __FUNCTION__); | ||
1081 | |||
1082 | while(!list_empty(&which->worklist)) | ||
1083 | { | ||
1084 | struct work_struct* work = | ||
1085 | list_first_entry(&which->worklist, struct work_struct, entry); | ||
1086 | list_del_init(&work->entry); | ||
1087 | |||
1088 | if(likely((work->owner == owner) || (owner == NULL))) | ||
1089 | { | ||
1090 | work_flushed |= LIT_WORK; | ||
1091 | atomic_dec(&which->num_work_pending); | ||
1092 | |||
1093 | work->owner = NULL; | ||
1094 | sched_trace_work_end(owner, current, 1ul); | ||
1095 | __schedule_work(work); | ||
1096 | } | ||
1097 | else | ||
1098 | { | ||
1099 | TRACE("%s: Could not flush a work object.\n", __FUNCTION__); | ||
1100 | // put back on queue | ||
1101 | ___litmus_schedule_work(work, which, 0); | ||
1102 | } | ||
1103 | } | ||
1104 | } | ||
1105 | |||
1106 | //__dump_state(which, "flush_pending: after (before reeval prio)"); | ||
1107 | |||
1108 | |||
1109 | mb(); /* commit changes to pending flags */ | ||
1110 | |||
1111 | /* reset the scheduling priority */ | ||
1112 | if(work_flushed) | ||
1113 | { | ||
1114 | __reeval_prio(which); | ||
1115 | |||
1116 | /* Try to offload flushed tasklets to Linux's ksoftirqd. */ | ||
1117 | if(work_flushed & (LIT_TASKLET_LOW | LIT_TASKLET_HI)) | ||
1118 | { | ||
1119 | wakeup_softirqd(); | ||
1120 | } | ||
1121 | } | ||
1122 | else | ||
1123 | { | ||
1124 | TRACE_CUR("%s: no work flushed, so __reeval_prio() skipped\n", __FUNCTION__); | ||
1125 | } | ||
1126 | |||
1127 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
1128 | } | ||
1129 | |||
1130 | |||
1131 | |||
1132 | |||
1133 | static void ___litmus_tasklet_schedule(struct tasklet_struct *t, | ||
1134 | struct klitirqd_info *which, | ||
1135 | int wakeup) | ||
1136 | { | ||
1137 | unsigned long flags; | ||
1138 | u32 old_pending; | ||
1139 | |||
1140 | t->next = NULL; | ||
1141 | |||
1142 | raw_spin_lock_irqsave(&which->lock, flags); | ||
1143 | |||
1144 | //__dump_state(which, "___litmus_tasklet_schedule: before queuing"); | ||
1145 | |||
1146 | *(which->pending_tasklets.tail) = t; | ||
1147 | which->pending_tasklets.tail = &t->next; | ||
1148 | |||
1149 | old_pending = which->pending; | ||
1150 | which->pending |= LIT_TASKLET_LOW; | ||
1151 | |||
1152 | atomic_inc(&which->num_low_pending); | ||
1153 | |||
1154 | mb(); | ||
1155 | |||
1156 | if(!old_pending && wakeup) | ||
1157 | { | ||
1158 | wakeup_litirqd_locked(which); /* wake up the klitirqd */ | ||
1159 | } | ||
1160 | |||
1161 | //__dump_state(which, "___litmus_tasklet_schedule: after queuing"); | ||
1162 | |||
1163 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
1164 | } | ||
1165 | |||
1166 | int __litmus_tasklet_schedule(struct tasklet_struct *t, unsigned int k_id) | ||
1167 | { | ||
1168 | int ret = 0; /* assume failure */ | ||
1169 | if(unlikely((t->owner == NULL) || !is_realtime(t->owner))) | ||
1170 | { | ||
1171 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
1172 | BUG(); | ||
1173 | } | ||
1174 | |||
1175 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | ||
1176 | { | ||
1177 | TRACE("%s: No klitirqd_th%d!\n", __FUNCTION__, k_id); | ||
1178 | BUG(); | ||
1179 | } | ||
1180 | |||
1181 | if(likely(!klitirqds[k_id].terminating)) | ||
1182 | { | ||
1183 | /* Can't accept tasklets while we're processing a workqueue | ||
1184 | because they're handled by the same thread. This case is | ||
1185 | very RARE. | ||
1186 | |||
1187 | TODO: Use a separate thread for work objects!!!!!! | ||
1188 | */ | ||
1189 | if(likely(atomic_read(&klitirqds[k_id].num_work_pending) == 0)) | ||
1190 | { | ||
1191 | ret = 1; | ||
1192 | ___litmus_tasklet_schedule(t, &klitirqds[k_id], 1); | ||
1193 | } | ||
1194 | else | ||
1195 | { | ||
1196 | TRACE("%s: rejected tasklet because of pending work.\n", | ||
1197 | __FUNCTION__); | ||
1198 | } | ||
1199 | } | ||
1200 | return(ret); | ||
1201 | } | ||
1202 | |||
1203 | EXPORT_SYMBOL(__litmus_tasklet_schedule); | ||
1204 | |||
1205 | |||
1206 | static void ___litmus_tasklet_hi_schedule(struct tasklet_struct *t, | ||
1207 | struct klitirqd_info *which, | ||
1208 | int wakeup) | ||
1209 | { | ||
1210 | unsigned long flags; | ||
1211 | u32 old_pending; | ||
1212 | |||
1213 | t->next = NULL; | ||
1214 | |||
1215 | raw_spin_lock_irqsave(&which->lock, flags); | ||
1216 | |||
1217 | *(which->pending_tasklets_hi.tail) = t; | ||
1218 | which->pending_tasklets_hi.tail = &t->next; | ||
1219 | |||
1220 | old_pending = which->pending; | ||
1221 | which->pending |= LIT_TASKLET_HI; | ||
1222 | |||
1223 | atomic_inc(&which->num_hi_pending); | ||
1224 | |||
1225 | mb(); | ||
1226 | |||
1227 | if(!old_pending && wakeup) | ||
1228 | { | ||
1229 | wakeup_litirqd_locked(which); /* wake up the klitirqd */ | ||
1230 | } | ||
1231 | |||
1232 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
1233 | } | ||
1234 | |||
1235 | int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, unsigned int k_id) | ||
1236 | { | ||
1237 | int ret = 0; /* assume failure */ | ||
1238 | if(unlikely((t->owner == NULL) || !is_realtime(t->owner))) | ||
1239 | { | ||
1240 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
1241 | BUG(); | ||
1242 | } | ||
1243 | |||
1244 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | ||
1245 | { | ||
1246 | TRACE("%s: No klitirqd_th%d!\n", __FUNCTION__, k_id); | ||
1247 | BUG(); | ||
1248 | } | ||
1249 | |||
1250 | if(unlikely(!klitirqd_is_ready())) | ||
1251 | { | ||
1252 | TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id); | ||
1253 | BUG(); | ||
1254 | } | ||
1255 | |||
1256 | if(likely(!klitirqds[k_id].terminating)) | ||
1257 | { | ||
1258 | if(likely(atomic_read(&klitirqds[k_id].num_work_pending) == 0)) | ||
1259 | { | ||
1260 | ret = 1; | ||
1261 | ___litmus_tasklet_hi_schedule(t, &klitirqds[k_id], 1); | ||
1262 | } | ||
1263 | else | ||
1264 | { | ||
1265 | TRACE("%s: rejected tasklet because of pending work.\n", | ||
1266 | __FUNCTION__); | ||
1267 | } | ||
1268 | } | ||
1269 | return(ret); | ||
1270 | } | ||
1271 | |||
1272 | EXPORT_SYMBOL(__litmus_tasklet_hi_schedule); | ||
1273 | |||
1274 | |||
1275 | int __litmus_tasklet_hi_schedule_first(struct tasklet_struct *t, unsigned int k_id) | ||
1276 | { | ||
1277 | int ret = 0; /* assume failure */ | ||
1278 | u32 old_pending; | ||
1279 | |||
1280 | BUG_ON(!irqs_disabled()); | ||
1281 | |||
1282 | if(unlikely((t->owner == NULL) || !is_realtime(t->owner))) | ||
1283 | { | ||
1284 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
1285 | BUG(); | ||
1286 | } | ||
1287 | |||
1288 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | ||
1289 | { | ||
1290 | TRACE("%s: No klitirqd_th%u!\n", __FUNCTION__, k_id); | ||
1291 | BUG(); | ||
1292 | } | ||
1293 | |||
1294 | if(unlikely(!klitirqd_is_ready())) | ||
1295 | { | ||
1296 | TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id); | ||
1297 | BUG(); | ||
1298 | } | ||
1299 | |||
1300 | if(likely(!klitirqds[k_id].terminating)) | ||
1301 | { | ||
1302 | raw_spin_lock(&klitirqds[k_id].lock); | ||
1303 | |||
1304 | if(likely(atomic_read(&klitirqds[k_id].num_work_pending) == 0)) | ||
1305 | { | ||
1306 | ret = 1; // success! | ||
1307 | |||
1308 | t->next = klitirqds[k_id].pending_tasklets_hi.head; | ||
1309 | klitirqds[k_id].pending_tasklets_hi.head = t; | ||
1310 | |||
1311 | old_pending = klitirqds[k_id].pending; | ||
1312 | klitirqds[k_id].pending |= LIT_TASKLET_HI; | ||
1313 | |||
1314 | atomic_inc(&klitirqds[k_id].num_hi_pending); | ||
1315 | |||
1316 | mb(); | ||
1317 | |||
1318 | if(!old_pending) | ||
1319 | wakeup_litirqd_locked(&klitirqds[k_id]); /* wake up the klitirqd */ | ||
1320 | } | ||
1321 | else | ||
1322 | { | ||
1323 | TRACE("%s: rejected tasklet because of pending work.\n", | ||
1324 | __FUNCTION__); | ||
1325 | } | ||
1326 | |||
1327 | raw_spin_unlock(&klitirqds[k_id].lock); | ||
1328 | } | ||
1329 | return(ret); | ||
1330 | } | ||
1331 | |||
1332 | EXPORT_SYMBOL(__litmus_tasklet_hi_schedule_first); | ||
1333 | |||
1334 | |||
1335 | |||
1336 | static void ___litmus_schedule_work(struct work_struct *w, | ||
1337 | struct klitirqd_info *which, | ||
1338 | int wakeup) | ||
1339 | { | ||
1340 | unsigned long flags; | ||
1341 | u32 old_pending; | ||
1342 | |||
1343 | raw_spin_lock_irqsave(&which->lock, flags); | ||
1344 | |||
1345 | work_pending(w); | ||
1346 | list_add_tail(&w->entry, &which->worklist); | ||
1347 | |||
1348 | old_pending = which->pending; | ||
1349 | which->pending |= LIT_WORK; | ||
1350 | |||
1351 | atomic_inc(&which->num_work_pending); | ||
1352 | |||
1353 | mb(); | ||
1354 | |||
1355 | if(!old_pending && wakeup) | ||
1356 | { | ||
1357 | wakeup_litirqd_locked(which); /* wakeup the klitirqd */ | ||
1358 | } | ||
1359 | |||
1360 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
1361 | } | ||
1362 | |||
1363 | int __litmus_schedule_work(struct work_struct *w, unsigned int k_id) | ||
1364 | { | ||
1365 | int ret = 1; /* assume success */ | ||
1366 | if(unlikely(w->owner == NULL) || !is_realtime(w->owner)) | ||
1367 | { | ||
1368 | TRACE("%s: No owner associated with this work object!\n", __FUNCTION__); | ||
1369 | BUG(); | ||
1370 | } | ||
1371 | |||
1372 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | ||
1373 | { | ||
1374 | TRACE("%s: No klitirqd_th%u!\n", k_id); | ||
1375 | BUG(); | ||
1376 | } | ||
1377 | |||
1378 | if(unlikely(!klitirqd_is_ready())) | ||
1379 | { | ||
1380 | TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id); | ||
1381 | BUG(); | ||
1382 | } | ||
1383 | |||
1384 | if(likely(!klitirqds[k_id].terminating)) | ||
1385 | ___litmus_schedule_work(w, &klitirqds[k_id], 1); | ||
1386 | else | ||
1387 | ret = 0; | ||
1388 | return(ret); | ||
1389 | } | ||
1390 | EXPORT_SYMBOL(__litmus_schedule_work); | ||
1391 | |||
1392 | |||
1393 | static int set_klitirqd_sem_status(unsigned long stat) | ||
1394 | { | ||
1395 | TRACE_CUR("SETTING STATUS FROM %d TO %d\n", | ||
1396 | atomic_read(&tsk_rt(current)->klitirqd_sem_stat), | ||
1397 | stat); | ||
1398 | atomic_set(&tsk_rt(current)->klitirqd_sem_stat, stat); | ||
1399 | //mb(); | ||
1400 | |||
1401 | return(0); | ||
1402 | } | ||
1403 | |||
1404 | static int set_klitirqd_sem_status_if_not_held(unsigned long stat) | ||
1405 | { | ||
1406 | if(atomic_read(&tsk_rt(current)->klitirqd_sem_stat) != HELD) | ||
1407 | { | ||
1408 | return(set_klitirqd_sem_status(stat)); | ||
1409 | } | ||
1410 | return(-1); | ||
1411 | } | ||
1412 | |||
1413 | |||
1414 | void __down_and_reset_and_set_stat(struct task_struct* t, | ||
1415 | enum klitirqd_sem_status to_reset, | ||
1416 | enum klitirqd_sem_status to_set, | ||
1417 | struct mutex* sem) | ||
1418 | { | ||
1419 | #if 0 | ||
1420 | struct rt_param* param = container_of(sem, struct rt_param, klitirqd_sem); | ||
1421 | struct task_struct* task = container_of(param, struct task_struct, rt_param); | ||
1422 | |||
1423 | TRACE_CUR("%s: entered. Locking semaphore of %s/%d\n", | ||
1424 | __FUNCTION__, task->comm, task->pid); | ||
1425 | #endif | ||
1426 | |||
1427 | mutex_lock_sfx(sem, | ||
1428 | set_klitirqd_sem_status_if_not_held, to_reset, | ||
1429 | set_klitirqd_sem_status, to_set); | ||
1430 | #if 0 | ||
1431 | TRACE_CUR("%s: exiting. Have semaphore of %s/%d\n", | ||
1432 | __FUNCTION__, task->comm, task->pid); | ||
1433 | #endif | ||
1434 | } | ||
1435 | |||
1436 | void down_and_set_stat(struct task_struct* t, | ||
1437 | enum klitirqd_sem_status to_set, | ||
1438 | struct mutex* sem) | ||
1439 | { | ||
1440 | #if 0 | ||
1441 | struct rt_param* param = container_of(sem, struct rt_param, klitirqd_sem); | ||
1442 | struct task_struct* task = container_of(param, struct task_struct, rt_param); | ||
1443 | |||
1444 | TRACE_CUR("%s: entered. Locking semaphore of %s/%d\n", | ||
1445 | __FUNCTION__, task->comm, task->pid); | ||
1446 | #endif | ||
1447 | |||
1448 | mutex_lock_sfx(sem, | ||
1449 | NULL, 0, | ||
1450 | set_klitirqd_sem_status, to_set); | ||
1451 | |||
1452 | #if 0 | ||
1453 | TRACE_CUR("%s: exiting. Have semaphore of %s/%d\n", | ||
1454 | __FUNCTION__, task->comm, task->pid); | ||
1455 | #endif | ||
1456 | } | ||
1457 | |||
1458 | |||
1459 | void up_and_set_stat(struct task_struct* t, | ||
1460 | enum klitirqd_sem_status to_set, | ||
1461 | struct mutex* sem) | ||
1462 | { | ||
1463 | #if 0 | ||
1464 | struct rt_param* param = container_of(sem, struct rt_param, klitirqd_sem); | ||
1465 | struct task_struct* task = container_of(param, struct task_struct, rt_param); | ||
1466 | |||
1467 | TRACE_CUR("%s: entered. Unlocking semaphore of %s/%d\n", | ||
1468 | __FUNCTION__, | ||
1469 | task->comm, task->pid); | ||
1470 | #endif | ||
1471 | |||
1472 | mutex_unlock_sfx(sem, NULL, 0, | ||
1473 | set_klitirqd_sem_status, to_set); | ||
1474 | |||
1475 | #if 0 | ||
1476 | TRACE_CUR("%s: exiting. Unlocked semaphore of %s/%d\n", | ||
1477 | __FUNCTION__, | ||
1478 | task->comm, task->pid); | ||
1479 | #endif | ||
1480 | } | ||
1481 | |||
1482 | |||
1483 | |||
1484 | void release_klitirqd_lock(struct task_struct* t) | ||
1485 | { | ||
1486 | if(is_realtime(t) && (atomic_read(&tsk_rt(t)->klitirqd_sem_stat) == HELD)) | ||
1487 | { | ||
1488 | struct mutex* sem; | ||
1489 | struct task_struct* owner = t; | ||
1490 | |||
1491 | if(t->state == TASK_RUNNING) | ||
1492 | { | ||
1493 | TRACE_TASK(t, "NOT giving up klitirqd_sem because we're not blocked!\n"); | ||
1494 | return; | ||
1495 | } | ||
1496 | |||
1497 | if(likely(!tsk_rt(t)->is_proxy_thread)) | ||
1498 | { | ||
1499 | sem = &tsk_rt(t)->klitirqd_sem; | ||
1500 | } | ||
1501 | else | ||
1502 | { | ||
1503 | unsigned int k_id = klitirqd_id(t); | ||
1504 | owner = klitirqds[k_id].current_owner; | ||
1505 | |||
1506 | BUG_ON(t != klitirqds[k_id].klitirqd); | ||
1507 | |||
1508 | if(likely(owner)) | ||
1509 | { | ||
1510 | sem = &tsk_rt(owner)->klitirqd_sem; | ||
1511 | } | ||
1512 | else | ||
1513 | { | ||
1514 | BUG(); | ||
1515 | |||
1516 | // We had the rug pulled out from under us. Abort attempt | ||
1517 | // to reacquire the lock since our client no longer needs us. | ||
1518 | TRACE_CUR("HUH?! How did this happen?\n"); | ||
1519 | atomic_set(&tsk_rt(t)->klitirqd_sem_stat, NOT_HELD); | ||
1520 | return; | ||
1521 | } | ||
1522 | } | ||
1523 | |||
1524 | //TRACE_CUR("Releasing semaphore of %s/%d...\n", owner->comm, owner->pid); | ||
1525 | up_and_set_stat(t, NEED_TO_REACQUIRE, sem); | ||
1526 | //TRACE_CUR("Semaphore of %s/%d released!\n", owner->comm, owner->pid); | ||
1527 | } | ||
1528 | /* | ||
1529 | else if(is_realtime(t)) | ||
1530 | { | ||
1531 | TRACE_CUR("%s: Nothing to do. Stat = %d\n", __FUNCTION__, tsk_rt(t)->klitirqd_sem_stat); | ||
1532 | } | ||
1533 | */ | ||
1534 | } | ||
1535 | |||
1536 | int reacquire_klitirqd_lock(struct task_struct* t) | ||
1537 | { | ||
1538 | int ret = 0; | ||
1539 | |||
1540 | if(is_realtime(t) && (atomic_read(&tsk_rt(t)->klitirqd_sem_stat) == NEED_TO_REACQUIRE)) | ||
1541 | { | ||
1542 | struct mutex* sem; | ||
1543 | struct task_struct* owner = t; | ||
1544 | |||
1545 | if(likely(!tsk_rt(t)->is_proxy_thread)) | ||
1546 | { | ||
1547 | sem = &tsk_rt(t)->klitirqd_sem; | ||
1548 | } | ||
1549 | else | ||
1550 | { | ||
1551 | unsigned int k_id = klitirqd_id(t); | ||
1552 | //struct task_struct* owner = klitirqds[k_id].current_owner; | ||
1553 | owner = klitirqds[k_id].current_owner; | ||
1554 | |||
1555 | BUG_ON(t != klitirqds[k_id].klitirqd); | ||
1556 | |||
1557 | if(likely(owner)) | ||
1558 | { | ||
1559 | sem = &tsk_rt(owner)->klitirqd_sem; | ||
1560 | } | ||
1561 | else | ||
1562 | { | ||
1563 | // We had the rug pulled out from under us. Abort attempt | ||
1564 | // to reacquire the lock since our client no longer needs us. | ||
1565 | TRACE_CUR("No longer needs to reacquire klitirqd_sem!\n"); | ||
1566 | atomic_set(&tsk_rt(t)->klitirqd_sem_stat, NOT_HELD); | ||
1567 | return(0); | ||
1568 | } | ||
1569 | } | ||
1570 | |||
1571 | //TRACE_CUR("Trying to reacquire semaphore of %s/%d\n", owner->comm, owner->pid); | ||
1572 | __down_and_reset_and_set_stat(t, REACQUIRING, HELD, sem); | ||
1573 | //TRACE_CUR("Reacquired semaphore %s/%d\n", owner->comm, owner->pid); | ||
1574 | } | ||
1575 | /* | ||
1576 | else if(is_realtime(t)) | ||
1577 | { | ||
1578 | TRACE_CUR("%s: Nothing to do. Stat = %d\n", __FUNCTION__, tsk_rt(t)->klitirqd_sem_stat); | ||
1579 | } | ||
1580 | */ | ||
1581 | |||
1582 | return(ret); | ||
1583 | } | ||
1584 | |||
diff --git a/litmus/locking.c b/litmus/locking.c index 0c1aa6aa40b7..b3279c1930b7 100644 --- a/litmus/locking.c +++ b/litmus/locking.c | |||
@@ -121,7 +121,6 @@ struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq) | |||
121 | return(t); | 121 | return(t); |
122 | } | 122 | } |
123 | 123 | ||
124 | |||
125 | #else | 124 | #else |
126 | 125 | ||
127 | struct fdso_ops generic_lock_ops = {}; | 126 | struct fdso_ops generic_lock_ops = {}; |
diff --git a/litmus/nvidia_info.c b/litmus/nvidia_info.c new file mode 100644 index 000000000000..d17152138c63 --- /dev/null +++ b/litmus/nvidia_info.c | |||
@@ -0,0 +1,536 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <linux/semaphore.h> | ||
3 | #include <linux/pci.h> | ||
4 | |||
5 | #include <litmus/sched_trace.h> | ||
6 | #include <litmus/nvidia_info.h> | ||
7 | #include <litmus/litmus.h> | ||
8 | |||
9 | typedef unsigned char NvV8; /* "void": enumerated or multiple fields */ | ||
10 | typedef unsigned short NvV16; /* "void": enumerated or multiple fields */ | ||
11 | typedef unsigned char NvU8; /* 0 to 255 */ | ||
12 | typedef unsigned short NvU16; /* 0 to 65535 */ | ||
13 | typedef signed char NvS8; /* -128 to 127 */ | ||
14 | typedef signed short NvS16; /* -32768 to 32767 */ | ||
15 | typedef float NvF32; /* IEEE Single Precision (S1E8M23) */ | ||
16 | typedef double NvF64; /* IEEE Double Precision (S1E11M52) */ | ||
17 | typedef unsigned int NvV32; /* "void": enumerated or multiple fields */ | ||
18 | typedef unsigned int NvU32; /* 0 to 4294967295 */ | ||
19 | typedef unsigned long long NvU64; /* 0 to 18446744073709551615 */ | ||
20 | typedef union | ||
21 | { | ||
22 | volatile NvV8 Reg008[1]; | ||
23 | volatile NvV16 Reg016[1]; | ||
24 | volatile NvV32 Reg032[1]; | ||
25 | } litmus_nv_hwreg_t, * litmus_nv_phwreg_t; | ||
26 | |||
27 | typedef struct | ||
28 | { | ||
29 | NvU64 address; | ||
30 | NvU64 size; | ||
31 | NvU32 offset; | ||
32 | NvU32 *map; | ||
33 | litmus_nv_phwreg_t map_u; | ||
34 | } litmus_nv_aperture_t; | ||
35 | |||
36 | typedef struct | ||
37 | { | ||
38 | void *priv; /* private data */ | ||
39 | void *os_state; /* os-specific device state */ | ||
40 | |||
41 | int rmInitialized; | ||
42 | int flags; | ||
43 | |||
44 | /* PCI config info */ | ||
45 | NvU32 domain; | ||
46 | NvU16 bus; | ||
47 | NvU16 slot; | ||
48 | NvU16 vendor_id; | ||
49 | NvU16 device_id; | ||
50 | NvU16 subsystem_id; | ||
51 | NvU32 gpu_id; | ||
52 | void *handle; | ||
53 | |||
54 | NvU32 pci_cfg_space[16]; | ||
55 | |||
56 | /* physical characteristics */ | ||
57 | litmus_nv_aperture_t bars[3]; | ||
58 | litmus_nv_aperture_t *regs; | ||
59 | litmus_nv_aperture_t *fb, ud; | ||
60 | litmus_nv_aperture_t agp; | ||
61 | |||
62 | NvU32 interrupt_line; | ||
63 | |||
64 | NvU32 agp_config; | ||
65 | NvU32 agp_status; | ||
66 | |||
67 | NvU32 primary_vga; | ||
68 | |||
69 | NvU32 sim_env; | ||
70 | |||
71 | NvU32 rc_timer_enabled; | ||
72 | |||
73 | /* list of events allocated for this device */ | ||
74 | void *event_list; | ||
75 | |||
76 | void *kern_mappings; | ||
77 | |||
78 | } litmus_nv_state_t; | ||
79 | |||
80 | typedef struct work_struct litmus_nv_task_t; | ||
81 | |||
82 | typedef struct litmus_nv_work_s { | ||
83 | litmus_nv_task_t task; | ||
84 | void *data; | ||
85 | } litmus_nv_work_t; | ||
86 | |||
87 | typedef struct litmus_nv_linux_state_s { | ||
88 | litmus_nv_state_t nv_state; | ||
89 | atomic_t usage_count; | ||
90 | |||
91 | struct pci_dev *dev; | ||
92 | void *agp_bridge; | ||
93 | void *alloc_queue; | ||
94 | |||
95 | void *timer_sp; | ||
96 | void *isr_sp; | ||
97 | void *pci_cfgchk_sp; | ||
98 | void *isr_bh_sp; | ||
99 | |||
100 | #ifdef CONFIG_CUDA_4_0 | ||
101 | char registry_keys[512]; | ||
102 | #endif | ||
103 | |||
104 | /* keep track of any pending bottom halfes */ | ||
105 | struct tasklet_struct tasklet; | ||
106 | litmus_nv_work_t work; | ||
107 | |||
108 | /* get a timer callback every second */ | ||
109 | struct timer_list rc_timer; | ||
110 | |||
111 | /* lock for linux-specific data, not used by core rm */ | ||
112 | struct semaphore ldata_lock; | ||
113 | |||
114 | /* lock for linux-specific alloc queue */ | ||
115 | struct semaphore at_lock; | ||
116 | |||
117 | #if 0 | ||
118 | #if defined(NV_USER_MAP) | ||
119 | /* list of user mappings */ | ||
120 | struct nv_usermap_s *usermap_list; | ||
121 | |||
122 | /* lock for VMware-specific mapping list */ | ||
123 | struct semaphore mt_lock; | ||
124 | #endif /* defined(NV_USER_MAP) */ | ||
125 | #if defined(NV_PM_SUPPORT_OLD_STYLE_APM) | ||
126 | void *apm_nv_dev; | ||
127 | #endif | ||
128 | #endif | ||
129 | |||
130 | NvU32 device_num; | ||
131 | struct litmus_nv_linux_state_s *next; | ||
132 | } litmus_nv_linux_state_t; | ||
133 | |||
134 | void dump_nvidia_info(const struct tasklet_struct *t) | ||
135 | { | ||
136 | litmus_nv_state_t* nvstate = NULL; | ||
137 | litmus_nv_linux_state_t* linuxstate = NULL; | ||
138 | struct pci_dev* pci = NULL; | ||
139 | |||
140 | nvstate = (litmus_nv_state_t*)(t->data); | ||
141 | |||
142 | if(nvstate) | ||
143 | { | ||
144 | TRACE("NV State:\n" | ||
145 | "\ttasklet ptr = %p\n" | ||
146 | "\tstate ptr = %p\n" | ||
147 | "\tprivate data ptr = %p\n" | ||
148 | "\tos state ptr = %p\n" | ||
149 | "\tdomain = %u\n" | ||
150 | "\tbus = %u\n" | ||
151 | "\tslot = %u\n" | ||
152 | "\tvender_id = %u\n" | ||
153 | "\tdevice_id = %u\n" | ||
154 | "\tsubsystem_id = %u\n" | ||
155 | "\tgpu_id = %u\n" | ||
156 | "\tinterrupt_line = %u\n", | ||
157 | t, | ||
158 | nvstate, | ||
159 | nvstate->priv, | ||
160 | nvstate->os_state, | ||
161 | nvstate->domain, | ||
162 | nvstate->bus, | ||
163 | nvstate->slot, | ||
164 | nvstate->vendor_id, | ||
165 | nvstate->device_id, | ||
166 | nvstate->subsystem_id, | ||
167 | nvstate->gpu_id, | ||
168 | nvstate->interrupt_line); | ||
169 | |||
170 | linuxstate = container_of(nvstate, litmus_nv_linux_state_t, nv_state); | ||
171 | } | ||
172 | else | ||
173 | { | ||
174 | TRACE("INVALID NVSTATE????\n"); | ||
175 | } | ||
176 | |||
177 | if(linuxstate) | ||
178 | { | ||
179 | int ls_offset = (void*)(&(linuxstate->device_num)) - (void*)(linuxstate); | ||
180 | int ns_offset_raw = (void*)(&(linuxstate->device_num)) - (void*)(&(linuxstate->nv_state)); | ||
181 | int ns_offset_desired = (void*)(&(linuxstate->device_num)) - (void*)(nvstate); | ||
182 | |||
183 | |||
184 | TRACE("LINUX NV State:\n" | ||
185 | "\tlinux nv state ptr: %p\n" | ||
186 | "\taddress of tasklet: %p\n" | ||
187 | "\taddress of work: %p\n" | ||
188 | "\tusage_count: %d\n" | ||
189 | "\tdevice_num: %u\n" | ||
190 | "\ttasklet addr == this tasklet: %d\n" | ||
191 | "\tpci: %p\n", | ||
192 | linuxstate, | ||
193 | &(linuxstate->tasklet), | ||
194 | &(linuxstate->work), | ||
195 | atomic_read(&(linuxstate->usage_count)), | ||
196 | linuxstate->device_num, | ||
197 | (t == &(linuxstate->tasklet)), | ||
198 | linuxstate->dev); | ||
199 | |||
200 | pci = linuxstate->dev; | ||
201 | |||
202 | TRACE("Offsets:\n" | ||
203 | "\tOffset from LinuxState: %d, %x\n" | ||
204 | "\tOffset from NVState: %d, %x\n" | ||
205 | "\tOffset from parameter: %d, %x\n" | ||
206 | "\tdevice_num: %u\n", | ||
207 | ls_offset, ls_offset, | ||
208 | ns_offset_raw, ns_offset_raw, | ||
209 | ns_offset_desired, ns_offset_desired, | ||
210 | *((u32*)((void*)nvstate + ns_offset_desired))); | ||
211 | } | ||
212 | else | ||
213 | { | ||
214 | TRACE("INVALID LINUXNVSTATE?????\n"); | ||
215 | } | ||
216 | |||
217 | #if 0 | ||
218 | if(pci) | ||
219 | { | ||
220 | TRACE("PCI DEV Info:\n" | ||
221 | "pci device ptr: %p\n" | ||
222 | "\tdevfn = %d\n" | ||
223 | "\tvendor = %d\n" | ||
224 | "\tdevice = %d\n" | ||
225 | "\tsubsystem_vendor = %d\n" | ||
226 | "\tsubsystem_device = %d\n" | ||
227 | "\tslot # = %d\n", | ||
228 | pci, | ||
229 | pci->devfn, | ||
230 | pci->vendor, | ||
231 | pci->device, | ||
232 | pci->subsystem_vendor, | ||
233 | pci->subsystem_device, | ||
234 | pci->slot->number); | ||
235 | } | ||
236 | else | ||
237 | { | ||
238 | TRACE("INVALID PCIDEV PTR?????\n"); | ||
239 | } | ||
240 | #endif | ||
241 | } | ||
242 | |||
243 | static struct module* nvidia_mod = NULL; | ||
244 | int init_nvidia_info(void) | ||
245 | { | ||
246 | mutex_lock(&module_mutex); | ||
247 | nvidia_mod = find_module("nvidia"); | ||
248 | mutex_unlock(&module_mutex); | ||
249 | if(nvidia_mod != NULL) | ||
250 | { | ||
251 | TRACE("%s : Found NVIDIA module. Core Code: %p to %p\n", __FUNCTION__, | ||
252 | (void*)(nvidia_mod->module_core), | ||
253 | (void*)(nvidia_mod->module_core) + nvidia_mod->core_size); | ||
254 | init_nv_device_reg(); | ||
255 | return(0); | ||
256 | } | ||
257 | else | ||
258 | { | ||
259 | TRACE("%s : Could not find NVIDIA module! Loaded?\n", __FUNCTION__); | ||
260 | return(-1); | ||
261 | } | ||
262 | } | ||
263 | |||
264 | |||
265 | /* works with pointers to static data inside the module too. */ | ||
266 | int is_nvidia_func(void* func_addr) | ||
267 | { | ||
268 | int ret = 0; | ||
269 | if(nvidia_mod) | ||
270 | { | ||
271 | ret = within_module_core((long unsigned int)func_addr, nvidia_mod); | ||
272 | /* | ||
273 | if(ret) | ||
274 | { | ||
275 | TRACE("%s : %p is in NVIDIA module: %d\n", | ||
276 | __FUNCTION__, func_addr, ret); | ||
277 | }*/ | ||
278 | } | ||
279 | |||
280 | return(ret); | ||
281 | } | ||
282 | |||
283 | u32 get_tasklet_nv_device_num(const struct tasklet_struct *t) | ||
284 | { | ||
285 | // life is too short to use hard-coded offsets. update this later. | ||
286 | litmus_nv_state_t* nvstate = (litmus_nv_state_t*)(t->data); | ||
287 | litmus_nv_linux_state_t* linuxstate = container_of(nvstate, litmus_nv_linux_state_t, nv_state); | ||
288 | |||
289 | BUG_ON(linuxstate->device_num >= NV_DEVICE_NUM); | ||
290 | |||
291 | return(linuxstate->device_num); | ||
292 | |||
293 | //int DEVICE_NUM_OFFSET = (void*)(&(linuxstate->device_num)) - (void*)(nvstate); | ||
294 | |||
295 | #if 0 | ||
296 | // offset determined though observed behavior of the NV driver. | ||
297 | //const int DEVICE_NUM_OFFSET = 0x480; // CUDA 4.0 RC1 | ||
298 | //const int DEVICE_NUM_OFFSET = 0x510; // CUDA 4.0 RC2 | ||
299 | |||
300 | void* state = (void*)(t->data); | ||
301 | void* device_num_ptr = state + DEVICE_NUM_OFFSET; | ||
302 | |||
303 | //dump_nvidia_info(t); | ||
304 | return(*((u32*)device_num_ptr)); | ||
305 | #endif | ||
306 | } | ||
307 | |||
308 | u32 get_work_nv_device_num(const struct work_struct *t) | ||
309 | { | ||
310 | // offset determined though observed behavior of the NV driver. | ||
311 | const int DEVICE_NUM_OFFSET = sizeof(struct work_struct); | ||
312 | void* state = (void*)(t); | ||
313 | void** device_num_ptr = state + DEVICE_NUM_OFFSET; | ||
314 | return(*((u32*)(*device_num_ptr))); | ||
315 | } | ||
316 | |||
317 | |||
318 | |||
319 | typedef struct { | ||
320 | raw_spinlock_t lock; | ||
321 | struct task_struct *device_owner; | ||
322 | }nv_device_registry_t; | ||
323 | |||
324 | static nv_device_registry_t NV_DEVICE_REG[NV_DEVICE_NUM]; | ||
325 | |||
326 | int init_nv_device_reg(void) | ||
327 | { | ||
328 | int i; | ||
329 | |||
330 | //memset(NV_DEVICE_REG, 0, sizeof(NV_DEVICE_REG)); | ||
331 | |||
332 | for(i = 0; i < NV_DEVICE_NUM; ++i) | ||
333 | { | ||
334 | raw_spin_lock_init(&NV_DEVICE_REG[i].lock); | ||
335 | NV_DEVICE_REG[i].device_owner = NULL; | ||
336 | } | ||
337 | |||
338 | return(1); | ||
339 | } | ||
340 | |||
341 | /* use to get nv_device_id by given owner. | ||
342 | (if return -1, can't get the assocaite device id)*/ | ||
343 | /* | ||
344 | int get_nv_device_id(struct task_struct* owner) | ||
345 | { | ||
346 | int i; | ||
347 | if(!owner) | ||
348 | { | ||
349 | return(-1); | ||
350 | } | ||
351 | for(i = 0; i < NV_DEVICE_NUM; ++i) | ||
352 | { | ||
353 | if(NV_DEVICE_REG[i].device_owner == owner) | ||
354 | return(i); | ||
355 | } | ||
356 | return(-1); | ||
357 | } | ||
358 | */ | ||
359 | |||
360 | |||
361 | |||
362 | static int __reg_nv_device(int reg_device_id) | ||
363 | { | ||
364 | int ret = 0; | ||
365 | struct task_struct* old = | ||
366 | cmpxchg(&NV_DEVICE_REG[reg_device_id].device_owner, | ||
367 | NULL, | ||
368 | current); | ||
369 | |||
370 | mb(); | ||
371 | |||
372 | if(likely(old == NULL)) | ||
373 | { | ||
374 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
375 | down_and_set_stat(current, HELD, &tsk_rt(current)->klitirqd_sem); | ||
376 | #endif | ||
377 | TRACE_CUR("%s: device %d registered.\n", __FUNCTION__, reg_device_id); | ||
378 | } | ||
379 | else | ||
380 | { | ||
381 | TRACE_CUR("%s: device %d is already in use!\n", __FUNCTION__, reg_device_id); | ||
382 | ret = -EBUSY; | ||
383 | } | ||
384 | |||
385 | return(ret); | ||
386 | |||
387 | |||
388 | |||
389 | #if 0 | ||
390 | //unsigned long flags; | ||
391 | //raw_spin_lock_irqsave(&NV_DEVICE_REG[reg_device_id].lock, flags); | ||
392 | //lock_nv_registry(reg_device_id, &flags); | ||
393 | |||
394 | if(likely(NV_DEVICE_REG[reg_device_id].device_owner == NULL)) | ||
395 | { | ||
396 | NV_DEVICE_REG[reg_device_id].device_owner = current; | ||
397 | mb(); // needed? | ||
398 | |||
399 | // release spin lock before chance of going to sleep. | ||
400 | //raw_spin_unlock_irqrestore(&NV_DEVICE_REG[reg_device_id].lock, flags); | ||
401 | //unlock_nv_registry(reg_device_id, &flags); | ||
402 | |||
403 | down_and_set_stat(current, HELD, &tsk_rt(current)->klitirqd_sem); | ||
404 | TRACE_CUR("%s: device %d registered.\n", __FUNCTION__, reg_device_id); | ||
405 | return(0); | ||
406 | } | ||
407 | else | ||
408 | { | ||
409 | //raw_spin_unlock_irqrestore(&NV_DEVICE_REG[reg_device_id].lock, flags); | ||
410 | //unlock_nv_registry(reg_device_id, &flags); | ||
411 | |||
412 | TRACE_CUR("%s: device %d is already in use!\n", __FUNCTION__, reg_device_id); | ||
413 | return(-EBUSY); | ||
414 | } | ||
415 | #endif | ||
416 | } | ||
417 | |||
418 | static int __clear_reg_nv_device(int de_reg_device_id) | ||
419 | { | ||
420 | int ret = 0; | ||
421 | struct task_struct* old; | ||
422 | |||
423 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
424 | unsigned long flags; | ||
425 | struct task_struct* klitirqd_th = get_klitirqd(de_reg_device_id); | ||
426 | lock_nv_registry(de_reg_device_id, &flags); | ||
427 | #endif | ||
428 | |||
429 | old = cmpxchg(&NV_DEVICE_REG[de_reg_device_id].device_owner, | ||
430 | current, | ||
431 | NULL); | ||
432 | |||
433 | mb(); | ||
434 | |||
435 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
436 | if(likely(old == current)) | ||
437 | { | ||
438 | flush_pending(klitirqd_th, current); | ||
439 | //unlock_nv_registry(de_reg_device_id, &flags); | ||
440 | |||
441 | up_and_set_stat(current, NOT_HELD, &tsk_rt(current)->klitirqd_sem); | ||
442 | |||
443 | unlock_nv_registry(de_reg_device_id, &flags); | ||
444 | ret = 0; | ||
445 | |||
446 | TRACE_CUR("%s: semaphore released.\n",__FUNCTION__); | ||
447 | } | ||
448 | else | ||
449 | { | ||
450 | unlock_nv_registry(de_reg_device_id, &flags); | ||
451 | ret = -EINVAL; | ||
452 | |||
453 | if(old) | ||
454 | TRACE_CUR("%s: device %d is not registered for this process's use! %s/%d is!\n", | ||
455 | __FUNCTION__, de_reg_device_id, old->comm, old->pid); | ||
456 | else | ||
457 | TRACE_CUR("%s: device %d is not registered for this process's use! No one is!\n", | ||
458 | __FUNCTION__, de_reg_device_id); | ||
459 | } | ||
460 | #endif | ||
461 | |||
462 | return(ret); | ||
463 | } | ||
464 | |||
465 | |||
466 | int reg_nv_device(int reg_device_id, int reg_action) | ||
467 | { | ||
468 | int ret; | ||
469 | |||
470 | if((reg_device_id < NV_DEVICE_NUM) && (reg_device_id >= 0)) | ||
471 | { | ||
472 | if(reg_action) | ||
473 | ret = __reg_nv_device(reg_device_id); | ||
474 | else | ||
475 | ret = __clear_reg_nv_device(reg_device_id); | ||
476 | } | ||
477 | else | ||
478 | { | ||
479 | ret = -ENODEV; | ||
480 | } | ||
481 | |||
482 | return(ret); | ||
483 | } | ||
484 | |||
485 | /* use to get the owner of nv_device_id. */ | ||
486 | struct task_struct* get_nv_device_owner(u32 target_device_id) | ||
487 | { | ||
488 | struct task_struct* owner; | ||
489 | BUG_ON(target_device_id >= NV_DEVICE_NUM); | ||
490 | owner = NV_DEVICE_REG[target_device_id].device_owner; | ||
491 | return(owner); | ||
492 | } | ||
493 | |||
494 | void lock_nv_registry(u32 target_device_id, unsigned long* flags) | ||
495 | { | ||
496 | BUG_ON(target_device_id >= NV_DEVICE_NUM); | ||
497 | |||
498 | if(in_interrupt()) | ||
499 | TRACE("Locking registry for %d.\n", target_device_id); | ||
500 | else | ||
501 | TRACE_CUR("Locking registry for %d.\n", target_device_id); | ||
502 | |||
503 | raw_spin_lock_irqsave(&NV_DEVICE_REG[target_device_id].lock, *flags); | ||
504 | } | ||
505 | |||
506 | void unlock_nv_registry(u32 target_device_id, unsigned long* flags) | ||
507 | { | ||
508 | BUG_ON(target_device_id >= NV_DEVICE_NUM); | ||
509 | |||
510 | if(in_interrupt()) | ||
511 | TRACE("Unlocking registry for %d.\n", target_device_id); | ||
512 | else | ||
513 | TRACE_CUR("Unlocking registry for %d.\n", target_device_id); | ||
514 | |||
515 | raw_spin_unlock_irqrestore(&NV_DEVICE_REG[target_device_id].lock, *flags); | ||
516 | } | ||
517 | |||
518 | |||
519 | void increment_nv_int_count(u32 device) | ||
520 | { | ||
521 | unsigned long flags; | ||
522 | struct task_struct* owner; | ||
523 | |||
524 | lock_nv_registry(device, &flags); | ||
525 | |||
526 | owner = NV_DEVICE_REG[device].device_owner; | ||
527 | if(owner) | ||
528 | { | ||
529 | atomic_inc(&tsk_rt(owner)->nv_int_count); | ||
530 | } | ||
531 | |||
532 | unlock_nv_registry(device, &flags); | ||
533 | } | ||
534 | EXPORT_SYMBOL(increment_nv_int_count); | ||
535 | |||
536 | |||
diff --git a/litmus/preempt.c b/litmus/preempt.c index 5704d0bf4c0b..28368d5bc046 100644 --- a/litmus/preempt.c +++ b/litmus/preempt.c | |||
@@ -30,6 +30,7 @@ void sched_state_will_schedule(struct task_struct* tsk) | |||
30 | /* Litmus tasks should never be subject to a remote | 30 | /* Litmus tasks should never be subject to a remote |
31 | * set_tsk_need_resched(). */ | 31 | * set_tsk_need_resched(). */ |
32 | BUG_ON(is_realtime(tsk)); | 32 | BUG_ON(is_realtime(tsk)); |
33 | |||
33 | #ifdef CONFIG_PREEMPT_STATE_TRACE | 34 | #ifdef CONFIG_PREEMPT_STATE_TRACE |
34 | TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p\n", | 35 | TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p\n", |
35 | __builtin_return_address(0)); | 36 | __builtin_return_address(0)); |
@@ -45,13 +46,17 @@ void sched_state_ipi(void) | |||
45 | /* Cause scheduler to be invoked. | 46 | /* Cause scheduler to be invoked. |
46 | * This will cause a transition to WILL_SCHEDULE. */ | 47 | * This will cause a transition to WILL_SCHEDULE. */ |
47 | set_tsk_need_resched(current); | 48 | set_tsk_need_resched(current); |
49 | /* | ||
48 | TRACE_STATE("IPI -> set_tsk_need_resched(%s/%d)\n", | 50 | TRACE_STATE("IPI -> set_tsk_need_resched(%s/%d)\n", |
49 | current->comm, current->pid); | 51 | current->comm, current->pid); |
52 | */ | ||
50 | } else { | 53 | } else { |
51 | /* ignore */ | 54 | /* ignore */ |
55 | /* | ||
52 | TRACE_STATE("ignoring IPI in state %x (%s)\n", | 56 | TRACE_STATE("ignoring IPI in state %x (%s)\n", |
53 | get_sched_state(), | 57 | get_sched_state(), |
54 | sched_state_name(get_sched_state())); | 58 | sched_state_name(get_sched_state())); |
59 | */ | ||
55 | } | 60 | } |
56 | } | 61 | } |
57 | 62 | ||
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 480c62bc895b..3251fb1602f8 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/percpu.h> | 29 | #include <linux/percpu.h> |
30 | #include <linux/sched.h> | 30 | #include <linux/sched.h> |
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include <linux/uaccess.h> | ||
32 | 33 | ||
33 | #include <linux/module.h> | 34 | #include <linux/module.h> |
34 | 35 | ||
@@ -49,7 +50,23 @@ | |||
49 | 50 | ||
50 | /* to configure the cluster size */ | 51 | /* to configure the cluster size */ |
51 | #include <litmus/litmus_proc.h> | 52 | #include <litmus/litmus_proc.h> |
52 | #include <linux/uaccess.h> | 53 | |
54 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
55 | #include <litmus/affinity.h> | ||
56 | #endif | ||
57 | |||
58 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
59 | #include <litmus/litmus_softirq.h> | ||
60 | #endif | ||
61 | |||
62 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
63 | #include <linux/interrupt.h> | ||
64 | #include <litmus/trace.h> | ||
65 | #endif | ||
66 | |||
67 | #ifdef CONFIG_LITMUS_NVIDIA | ||
68 | #include <litmus/nvidia_info.h> | ||
69 | #endif | ||
53 | 70 | ||
54 | /* Reference configuration variable. Determines which cache level is used to | 71 | /* Reference configuration variable. Determines which cache level is used to |
55 | * group CPUs into clusters. GLOBAL_CLUSTER, which is the default, means that | 72 | * group CPUs into clusters. GLOBAL_CLUSTER, which is the default, means that |
@@ -83,6 +100,15 @@ DEFINE_PER_CPU(cpu_entry_t, cedf_cpu_entries); | |||
83 | #define test_will_schedule(cpu) \ | 100 | #define test_will_schedule(cpu) \ |
84 | (atomic_read(&per_cpu(cedf_cpu_entries, cpu).will_schedule)) | 101 | (atomic_read(&per_cpu(cedf_cpu_entries, cpu).will_schedule)) |
85 | 102 | ||
103 | |||
104 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
105 | struct tasklet_head | ||
106 | { | ||
107 | struct tasklet_struct *head; | ||
108 | struct tasklet_struct **tail; | ||
109 | }; | ||
110 | #endif | ||
111 | |||
86 | /* | 112 | /* |
87 | * In C-EDF there is a cedf domain _per_ cluster | 113 | * In C-EDF there is a cedf domain _per_ cluster |
88 | * The number of clusters is dynamically determined accordingly to the | 114 | * The number of clusters is dynamically determined accordingly to the |
@@ -100,6 +126,10 @@ typedef struct clusterdomain { | |||
100 | struct bheap cpu_heap; | 126 | struct bheap cpu_heap; |
101 | /* lock for this cluster */ | 127 | /* lock for this cluster */ |
102 | #define cluster_lock domain.ready_lock | 128 | #define cluster_lock domain.ready_lock |
129 | |||
130 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
131 | struct tasklet_head pending_tasklets; | ||
132 | #endif | ||
103 | } cedf_domain_t; | 133 | } cedf_domain_t; |
104 | 134 | ||
105 | /* a cedf_domain per cluster; allocation is done at init/activation time */ | 135 | /* a cedf_domain per cluster; allocation is done at init/activation time */ |
@@ -208,7 +238,7 @@ static noinline void link_task_to_cpu(struct task_struct* linked, | |||
208 | } | 238 | } |
209 | 239 | ||
210 | /* unlink - Make sure a task is not linked any longer to an entry | 240 | /* unlink - Make sure a task is not linked any longer to an entry |
211 | * where it was linked before. Must hold cedf_lock. | 241 | * where it was linked before. Must hold cluster_lock. |
212 | */ | 242 | */ |
213 | static noinline void unlink(struct task_struct* t) | 243 | static noinline void unlink(struct task_struct* t) |
214 | { | 244 | { |
@@ -244,7 +274,7 @@ static void preempt(cpu_entry_t *entry) | |||
244 | } | 274 | } |
245 | 275 | ||
246 | /* requeue - Put an unlinked task into gsn-edf domain. | 276 | /* requeue - Put an unlinked task into gsn-edf domain. |
247 | * Caller must hold cedf_lock. | 277 | * Caller must hold cluster_lock. |
248 | */ | 278 | */ |
249 | static noinline void requeue(struct task_struct* task) | 279 | static noinline void requeue(struct task_struct* task) |
250 | { | 280 | { |
@@ -339,13 +369,17 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
339 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); | 369 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); |
340 | } | 370 | } |
341 | 371 | ||
342 | /* caller holds cedf_lock */ | 372 | /* caller holds cluster_lock */ |
343 | static noinline void job_completion(struct task_struct *t, int forced) | 373 | static noinline void job_completion(struct task_struct *t, int forced) |
344 | { | 374 | { |
345 | BUG_ON(!t); | 375 | BUG_ON(!t); |
346 | 376 | ||
347 | sched_trace_task_completion(t, forced); | 377 | sched_trace_task_completion(t, forced); |
348 | 378 | ||
379 | #ifdef CONFIG_LITMUS_NVIDIA | ||
380 | atomic_set(&tsk_rt(t)->nv_int_count, 0); | ||
381 | #endif | ||
382 | |||
349 | TRACE_TASK(t, "job_completion().\n"); | 383 | TRACE_TASK(t, "job_completion().\n"); |
350 | 384 | ||
351 | /* set flags */ | 385 | /* set flags */ |
@@ -389,6 +423,461 @@ static void cedf_tick(struct task_struct* t) | |||
389 | } | 423 | } |
390 | } | 424 | } |
391 | 425 | ||
426 | |||
427 | |||
428 | |||
429 | |||
430 | |||
431 | |||
432 | |||
433 | |||
434 | |||
435 | |||
436 | |||
437 | |||
438 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
439 | |||
440 | |||
441 | static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed) | ||
442 | { | ||
443 | if (!atomic_read(&tasklet->count)) { | ||
444 | if(tasklet->owner) { | ||
445 | sched_trace_tasklet_begin(tasklet->owner); | ||
446 | } | ||
447 | |||
448 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state)) | ||
449 | { | ||
450 | BUG(); | ||
451 | } | ||
452 | TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", | ||
453 | __FUNCTION__, | ||
454 | (tasklet->owner) ? tasklet->owner->pid : -1, | ||
455 | (tasklet->owner) ? 0 : 1); | ||
456 | tasklet->func(tasklet->data); | ||
457 | tasklet_unlock(tasklet); | ||
458 | |||
459 | if(tasklet->owner) { | ||
460 | sched_trace_tasklet_end(tasklet->owner, flushed); | ||
461 | } | ||
462 | } | ||
463 | else { | ||
464 | BUG(); | ||
465 | } | ||
466 | } | ||
467 | |||
468 | |||
469 | static void __extract_tasklets(cedf_domain_t* cluster, struct task_struct* task, struct tasklet_head* task_tasklets) | ||
470 | { | ||
471 | struct tasklet_struct* step; | ||
472 | struct tasklet_struct* tasklet; | ||
473 | struct tasklet_struct* prev; | ||
474 | |||
475 | task_tasklets->head = NULL; | ||
476 | task_tasklets->tail = &(task_tasklets->head); | ||
477 | |||
478 | prev = NULL; | ||
479 | for(step = cluster->pending_tasklets.head; step != NULL; step = step->next) | ||
480 | { | ||
481 | if(step->owner == task) | ||
482 | { | ||
483 | TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid); | ||
484 | |||
485 | tasklet = step; | ||
486 | |||
487 | if(prev) { | ||
488 | prev->next = tasklet->next; | ||
489 | } | ||
490 | else if(cluster->pending_tasklets.head == tasklet) { | ||
491 | // we're at the head. | ||
492 | cluster->pending_tasklets.head = tasklet->next; | ||
493 | } | ||
494 | |||
495 | if(cluster->pending_tasklets.tail == &tasklet) { | ||
496 | // we're at the tail | ||
497 | if(prev) { | ||
498 | cluster->pending_tasklets.tail = &prev; | ||
499 | } | ||
500 | else { | ||
501 | cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head); | ||
502 | } | ||
503 | } | ||
504 | |||
505 | tasklet->next = NULL; | ||
506 | *(task_tasklets->tail) = tasklet; | ||
507 | task_tasklets->tail = &(tasklet->next); | ||
508 | } | ||
509 | else { | ||
510 | prev = step; | ||
511 | } | ||
512 | } | ||
513 | } | ||
514 | |||
515 | static void flush_tasklets(cedf_domain_t* cluster, struct task_struct* task) | ||
516 | { | ||
517 | #if 0 | ||
518 | unsigned long flags; | ||
519 | struct tasklet_head task_tasklets; | ||
520 | struct tasklet_struct* step; | ||
521 | |||
522 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); | ||
523 | __extract_tasklets(cluster, task, &task_tasklets); | ||
524 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); | ||
525 | |||
526 | if(cluster->pending_tasklets.head != NULL) { | ||
527 | TRACE("%s: Flushing tasklets for %d...\n", __FUNCTION__, task->pid); | ||
528 | } | ||
529 | |||
530 | // now execute any flushed tasklets. | ||
531 | for(step = cluster->pending_tasklets.head; step != NULL; /**/) | ||
532 | { | ||
533 | struct tasklet_struct* temp = step->next; | ||
534 | |||
535 | step->next = NULL; | ||
536 | __do_lit_tasklet(step, 1ul); | ||
537 | |||
538 | step = temp; | ||
539 | } | ||
540 | #endif | ||
541 | |||
542 | // lazy flushing. | ||
543 | // just change ownership to NULL and let an idle processor | ||
544 | // take care of it. :P | ||
545 | |||
546 | struct tasklet_struct* step; | ||
547 | unsigned long flags; | ||
548 | |||
549 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); | ||
550 | |||
551 | for(step = cluster->pending_tasklets.head; step != NULL; step = step->next) | ||
552 | { | ||
553 | if(step->owner == task) | ||
554 | { | ||
555 | TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid); | ||
556 | step->owner = NULL; | ||
557 | } | ||
558 | } | ||
559 | |||
560 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); | ||
561 | } | ||
562 | |||
563 | |||
564 | static void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_task) | ||
565 | { | ||
566 | int work_to_do = 1; | ||
567 | struct tasklet_struct *tasklet = NULL; | ||
568 | //struct tasklet_struct *step; | ||
569 | unsigned long flags; | ||
570 | |||
571 | while(work_to_do) { | ||
572 | |||
573 | TS_NV_SCHED_BOTISR_START; | ||
574 | |||
575 | // remove tasklet at head of list if it has higher priority. | ||
576 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); | ||
577 | |||
578 | /* | ||
579 | step = cluster->pending_tasklets.head; | ||
580 | TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__); | ||
581 | while(step != NULL){ | ||
582 | TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); | ||
583 | step = step->next; | ||
584 | } | ||
585 | TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); | ||
586 | TRACE("%s: done.\n", __FUNCTION__); | ||
587 | */ | ||
588 | |||
589 | if(cluster->pending_tasklets.head != NULL) { | ||
590 | // remove tasklet at head. | ||
591 | tasklet = cluster->pending_tasklets.head; | ||
592 | |||
593 | if(edf_higher_prio(tasklet->owner, sched_task)) { | ||
594 | |||
595 | if(NULL == tasklet->next) { | ||
596 | // tasklet is at the head, list only has one element | ||
597 | TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, (tasklet->owner) ? tasklet->owner->pid : -1); | ||
598 | cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head); | ||
599 | } | ||
600 | |||
601 | // remove the tasklet from the queue | ||
602 | cluster->pending_tasklets.head = tasklet->next; | ||
603 | |||
604 | TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, (tasklet->owner) ? tasklet->owner->pid : -1); | ||
605 | } | ||
606 | else { | ||
607 | TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, (tasklet->owner) ? tasklet->owner->pid : -1, smp_processor_id()); | ||
608 | tasklet = NULL; | ||
609 | } | ||
610 | } | ||
611 | else { | ||
612 | TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__); | ||
613 | } | ||
614 | |||
615 | |||
616 | /* | ||
617 | step = cluster->pending_tasklets.head; | ||
618 | TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__); | ||
619 | while(step != NULL){ | ||
620 | TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); | ||
621 | step = step->next; | ||
622 | } | ||
623 | TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); | ||
624 | TRACE("%s: done.\n", __FUNCTION__); | ||
625 | */ | ||
626 | |||
627 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); | ||
628 | |||
629 | |||
630 | TS_NV_SCHED_BOTISR_END; | ||
631 | |||
632 | if(tasklet) { | ||
633 | __do_lit_tasklet(tasklet, 0ul); | ||
634 | tasklet = NULL; | ||
635 | } | ||
636 | else { | ||
637 | work_to_do = 0; | ||
638 | } | ||
639 | } | ||
640 | |||
641 | //TRACE("%s: exited.\n", __FUNCTION__); | ||
642 | } | ||
643 | |||
644 | |||
645 | static void run_tasklets(struct task_struct* sched_task) | ||
646 | { | ||
647 | cedf_domain_t* cluster; | ||
648 | |||
649 | #if 0 | ||
650 | int task_is_rt = is_realtime(sched_task); | ||
651 | cedf_domain_t* cluster; | ||
652 | |||
653 | if(is_realtime(sched_task)) { | ||
654 | cluster = task_cpu_cluster(sched_task); | ||
655 | } | ||
656 | else { | ||
657 | cluster = remote_cluster(get_cpu()); | ||
658 | } | ||
659 | |||
660 | if(cluster && cluster->pending_tasklets.head != NULL) { | ||
661 | TRACE("%s: There are tasklets to process.\n", __FUNCTION__); | ||
662 | |||
663 | do_lit_tasklets(cluster, sched_task); | ||
664 | } | ||
665 | |||
666 | if(!task_is_rt) { | ||
667 | put_cpu_no_resched(); | ||
668 | } | ||
669 | #else | ||
670 | |||
671 | preempt_disable(); | ||
672 | |||
673 | cluster = (is_realtime(sched_task)) ? | ||
674 | task_cpu_cluster(sched_task) : | ||
675 | remote_cluster(smp_processor_id()); | ||
676 | |||
677 | if(cluster && cluster->pending_tasklets.head != NULL) { | ||
678 | TRACE("%s: There are tasklets to process.\n", __FUNCTION__); | ||
679 | do_lit_tasklets(cluster, sched_task); | ||
680 | } | ||
681 | |||
682 | preempt_enable_no_resched(); | ||
683 | |||
684 | #endif | ||
685 | } | ||
686 | |||
687 | |||
688 | static void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* cluster) | ||
689 | { | ||
690 | struct tasklet_struct* step; | ||
691 | |||
692 | /* | ||
693 | step = cluster->pending_tasklets.head; | ||
694 | TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__); | ||
695 | while(step != NULL){ | ||
696 | TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); | ||
697 | step = step->next; | ||
698 | } | ||
699 | TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); | ||
700 | TRACE("%s: done.\n", __FUNCTION__); | ||
701 | */ | ||
702 | |||
703 | |||
704 | tasklet->next = NULL; // make sure there are no old values floating around | ||
705 | |||
706 | step = cluster->pending_tasklets.head; | ||
707 | if(step == NULL) { | ||
708 | TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid); | ||
709 | // insert at tail. | ||
710 | *(cluster->pending_tasklets.tail) = tasklet; | ||
711 | cluster->pending_tasklets.tail = &(tasklet->next); | ||
712 | } | ||
713 | else if((*(cluster->pending_tasklets.tail) != NULL) && | ||
714 | edf_higher_prio((*(cluster->pending_tasklets.tail))->owner, tasklet->owner)) { | ||
715 | // insert at tail. | ||
716 | TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid); | ||
717 | |||
718 | *(cluster->pending_tasklets.tail) = tasklet; | ||
719 | cluster->pending_tasklets.tail = &(tasklet->next); | ||
720 | } | ||
721 | else { | ||
722 | |||
723 | //WARN_ON(1 == 1); | ||
724 | |||
725 | // insert the tasklet somewhere in the middle. | ||
726 | |||
727 | TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__); | ||
728 | |||
729 | while(step->next && edf_higher_prio(step->next->owner, tasklet->owner)) { | ||
730 | step = step->next; | ||
731 | } | ||
732 | |||
733 | // insert tasklet right before step->next. | ||
734 | |||
735 | TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, | ||
736 | tasklet->owner->pid, | ||
737 | (step->owner) ? | ||
738 | step->owner->pid : | ||
739 | -1, | ||
740 | (step->next) ? | ||
741 | ((step->next->owner) ? | ||
742 | step->next->owner->pid : | ||
743 | -1) : | ||
744 | -1); | ||
745 | |||
746 | tasklet->next = step->next; | ||
747 | step->next = tasklet; | ||
748 | |||
749 | // patch up the head if needed. | ||
750 | if(cluster->pending_tasklets.head == step) | ||
751 | { | ||
752 | TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid); | ||
753 | cluster->pending_tasklets.head = tasklet; | ||
754 | } | ||
755 | } | ||
756 | |||
757 | /* | ||
758 | step = cluster->pending_tasklets.head; | ||
759 | TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__); | ||
760 | while(step != NULL){ | ||
761 | TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); | ||
762 | step = step->next; | ||
763 | } | ||
764 | TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); | ||
765 | TRACE("%s: done.\n", __FUNCTION__); | ||
766 | */ | ||
767 | |||
768 | // TODO: Maintain this list in priority order. | ||
769 | // tasklet->next = NULL; | ||
770 | // *(cluster->pending_tasklets.tail) = tasklet; | ||
771 | // cluster->pending_tasklets.tail = &tasklet->next; | ||
772 | } | ||
773 | |||
774 | static int enqueue_pai_tasklet(struct tasklet_struct* tasklet) | ||
775 | { | ||
776 | cedf_domain_t *cluster = NULL; | ||
777 | cpu_entry_t *targetCPU = NULL; | ||
778 | int thisCPU; | ||
779 | int runLocal = 0; | ||
780 | int runNow = 0; | ||
781 | unsigned long flags; | ||
782 | |||
783 | if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner))) | ||
784 | { | ||
785 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
786 | return 0; | ||
787 | } | ||
788 | |||
789 | cluster = task_cpu_cluster(tasklet->owner); | ||
790 | |||
791 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); | ||
792 | |||
793 | thisCPU = smp_processor_id(); | ||
794 | |||
795 | #if 1 | ||
796 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
797 | { | ||
798 | cpu_entry_t* affinity = NULL; | ||
799 | |||
800 | // use this CPU if it is in our cluster and isn't running any RT work. | ||
801 | if(cpu_isset(thisCPU, *cluster->cpu_map) && (__get_cpu_var(cedf_cpu_entries).linked == NULL)) { | ||
802 | affinity = &(__get_cpu_var(cedf_cpu_entries)); | ||
803 | } | ||
804 | else { | ||
805 | // this CPU is busy or shouldn't run tasklet in this cluster. | ||
806 | // look for available near by CPUs. | ||
807 | // NOTE: Affinity towards owner and not this CPU. Is this right? | ||
808 | affinity = | ||
809 | cedf_get_nearest_available_cpu(cluster, | ||
810 | &per_cpu(cedf_cpu_entries, task_cpu(tasklet->owner))); | ||
811 | } | ||
812 | |||
813 | targetCPU = affinity; | ||
814 | } | ||
815 | #endif | ||
816 | #endif | ||
817 | |||
818 | if (targetCPU == NULL) { | ||
819 | targetCPU = lowest_prio_cpu(cluster); | ||
820 | } | ||
821 | |||
822 | if (edf_higher_prio(tasklet->owner, targetCPU->linked)) { | ||
823 | if (thisCPU == targetCPU->cpu) { | ||
824 | TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__); | ||
825 | runLocal = 1; | ||
826 | runNow = 1; | ||
827 | } | ||
828 | else { | ||
829 | TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__); | ||
830 | runLocal = 0; | ||
831 | runNow = 1; | ||
832 | } | ||
833 | } | ||
834 | else { | ||
835 | runLocal = 0; | ||
836 | runNow = 0; | ||
837 | } | ||
838 | |||
839 | if(!runLocal) { | ||
840 | // enqueue the tasklet | ||
841 | __add_pai_tasklet(tasklet, cluster); | ||
842 | } | ||
843 | |||
844 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); | ||
845 | |||
846 | |||
847 | if (runLocal /*&& runNow */) { // runNow == 1 is implied | ||
848 | TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__); | ||
849 | __do_lit_tasklet(tasklet, 0ul); | ||
850 | } | ||
851 | else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied | ||
852 | TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu); | ||
853 | preempt(targetCPU); // need to be protected by cluster_lock? | ||
854 | } | ||
855 | else { | ||
856 | TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__); | ||
857 | } | ||
858 | |||
859 | return(1); // success | ||
860 | } | ||
861 | |||
862 | |||
863 | #endif | ||
864 | |||
865 | |||
866 | |||
867 | |||
868 | |||
869 | |||
870 | |||
871 | |||
872 | |||
873 | |||
874 | |||
875 | |||
876 | |||
877 | |||
878 | |||
879 | |||
880 | |||
392 | /* Getting schedule() right is a bit tricky. schedule() may not make any | 881 | /* Getting schedule() right is a bit tricky. schedule() may not make any |
393 | * assumptions on the state of the current task since it may be called for a | 882 | * assumptions on the state of the current task since it may be called for a |
394 | * number of reasons. The reasons include a scheduler_tick() determined that it | 883 | * number of reasons. The reasons include a scheduler_tick() determined that it |
@@ -514,7 +1003,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
514 | raw_spin_unlock(&cluster->cluster_lock); | 1003 | raw_spin_unlock(&cluster->cluster_lock); |
515 | 1004 | ||
516 | #ifdef WANT_ALL_SCHED_EVENTS | 1005 | #ifdef WANT_ALL_SCHED_EVENTS |
517 | TRACE("cedf_lock released, next=0x%p\n", next); | 1006 | TRACE("cluster_lock released, next=0x%p\n", next); |
518 | 1007 | ||
519 | if (next) | 1008 | if (next) |
520 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | 1009 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); |
@@ -522,7 +1011,6 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
522 | TRACE("becomes idle at %llu.\n", litmus_clock()); | 1011 | TRACE("becomes idle at %llu.\n", litmus_clock()); |
523 | #endif | 1012 | #endif |
524 | 1013 | ||
525 | |||
526 | return next; | 1014 | return next; |
527 | } | 1015 | } |
528 | 1016 | ||
@@ -586,7 +1074,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) | |||
586 | static void cedf_task_wake_up(struct task_struct *task) | 1074 | static void cedf_task_wake_up(struct task_struct *task) |
587 | { | 1075 | { |
588 | unsigned long flags; | 1076 | unsigned long flags; |
589 | lt_t now; | 1077 | //lt_t now; |
590 | cedf_domain_t *cluster; | 1078 | cedf_domain_t *cluster; |
591 | 1079 | ||
592 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | 1080 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); |
@@ -594,6 +1082,8 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
594 | cluster = task_cpu_cluster(task); | 1082 | cluster = task_cpu_cluster(task); |
595 | 1083 | ||
596 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); | 1084 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); |
1085 | |||
1086 | #if 0 // sproadic task model | ||
597 | /* We need to take suspensions because of semaphores into | 1087 | /* We need to take suspensions because of semaphores into |
598 | * account! If a job resumes after being suspended due to acquiring | 1088 | * account! If a job resumes after being suspended due to acquiring |
599 | * a semaphore, it should never be treated as a new job release. | 1089 | * a semaphore, it should never be treated as a new job release. |
@@ -615,7 +1105,13 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
615 | } | 1105 | } |
616 | } | 1106 | } |
617 | } | 1107 | } |
618 | cedf_job_arrival(task); | 1108 | #endif |
1109 | |||
1110 | set_rt_flags(task, RT_F_RUNNING); // periodic model | ||
1111 | |||
1112 | if(tsk_rt(task)->linked_on == NO_CPU) | ||
1113 | cedf_job_arrival(task); | ||
1114 | |||
619 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); | 1115 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); |
620 | } | 1116 | } |
621 | 1117 | ||
@@ -642,6 +1138,10 @@ static void cedf_task_exit(struct task_struct * t) | |||
642 | unsigned long flags; | 1138 | unsigned long flags; |
643 | cedf_domain_t *cluster = task_cpu_cluster(t); | 1139 | cedf_domain_t *cluster = task_cpu_cluster(t); |
644 | 1140 | ||
1141 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
1142 | flush_tasklets(cluster, t); | ||
1143 | #endif | ||
1144 | |||
645 | /* unlink if necessary */ | 1145 | /* unlink if necessary */ |
646 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); | 1146 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); |
647 | unlink(t); | 1147 | unlink(t); |
@@ -662,6 +1162,711 @@ static long cedf_admit_task(struct task_struct* tsk) | |||
662 | return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; | 1162 | return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; |
663 | } | 1163 | } |
664 | 1164 | ||
1165 | |||
1166 | |||
1167 | #ifdef CONFIG_LITMUS_LOCKING | ||
1168 | |||
1169 | #include <litmus/fdso.h> | ||
1170 | |||
1171 | |||
1172 | static void __set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
1173 | { | ||
1174 | int linked_on; | ||
1175 | int check_preempt = 0; | ||
1176 | |||
1177 | cedf_domain_t* cluster = task_cpu_cluster(t); | ||
1178 | |||
1179 | if(prio_inh != NULL) | ||
1180 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | ||
1181 | else | ||
1182 | TRACE_TASK(t, "inherits priority from %p\n", prio_inh); | ||
1183 | |||
1184 | sched_trace_eff_prio_change(t, prio_inh); | ||
1185 | |||
1186 | tsk_rt(t)->inh_task = prio_inh; | ||
1187 | |||
1188 | linked_on = tsk_rt(t)->linked_on; | ||
1189 | |||
1190 | /* If it is scheduled, then we need to reorder the CPU heap. */ | ||
1191 | if (linked_on != NO_CPU) { | ||
1192 | TRACE_TASK(t, "%s: linked on %d\n", | ||
1193 | __FUNCTION__, linked_on); | ||
1194 | /* Holder is scheduled; need to re-order CPUs. | ||
1195 | * We can't use heap_decrease() here since | ||
1196 | * the cpu_heap is ordered in reverse direction, so | ||
1197 | * it is actually an increase. */ | ||
1198 | bheap_delete(cpu_lower_prio, &cluster->cpu_heap, | ||
1199 | per_cpu(cedf_cpu_entries, linked_on).hn); | ||
1200 | bheap_insert(cpu_lower_prio, &cluster->cpu_heap, | ||
1201 | per_cpu(cedf_cpu_entries, linked_on).hn); | ||
1202 | } else { | ||
1203 | /* holder may be queued: first stop queue changes */ | ||
1204 | raw_spin_lock(&cluster->domain.release_lock); | ||
1205 | if (is_queued(t)) { | ||
1206 | TRACE_TASK(t, "%s: is queued\n", __FUNCTION__); | ||
1207 | |||
1208 | /* We need to update the position of holder in some | ||
1209 | * heap. Note that this could be a release heap if we | ||
1210 | * budget enforcement is used and this job overran. */ | ||
1211 | check_preempt = !bheap_decrease(edf_ready_order, tsk_rt(t)->heap_node); | ||
1212 | |||
1213 | } else { | ||
1214 | /* Nothing to do: if it is not queued and not linked | ||
1215 | * then it is either sleeping or currently being moved | ||
1216 | * by other code (e.g., a timer interrupt handler) that | ||
1217 | * will use the correct priority when enqueuing the | ||
1218 | * task. */ | ||
1219 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", __FUNCTION__); | ||
1220 | } | ||
1221 | raw_spin_unlock(&cluster->domain.release_lock); | ||
1222 | |||
1223 | /* If holder was enqueued in a release heap, then the following | ||
1224 | * preemption check is pointless, but we can't easily detect | ||
1225 | * that case. If you want to fix this, then consider that | ||
1226 | * simply adding a state flag requires O(n) time to update when | ||
1227 | * releasing n tasks, which conflicts with the goal to have | ||
1228 | * O(log n) merges. */ | ||
1229 | if (check_preempt) { | ||
1230 | /* heap_decrease() hit the top level of the heap: make | ||
1231 | * sure preemption checks get the right task, not the | ||
1232 | * potentially stale cache. */ | ||
1233 | bheap_uncache_min(edf_ready_order, &cluster->domain.ready_queue); | ||
1234 | check_for_preemptions(cluster); | ||
1235 | } | ||
1236 | } | ||
1237 | } | ||
1238 | |||
1239 | /* called with IRQs off */ | ||
1240 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
1241 | { | ||
1242 | cedf_domain_t* cluster = task_cpu_cluster(t); | ||
1243 | |||
1244 | raw_spin_lock(&cluster->cluster_lock); | ||
1245 | |||
1246 | __set_priority_inheritance(t, prio_inh); | ||
1247 | |||
1248 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1249 | if(tsk_rt(t)->cur_klitirqd != NULL) | ||
1250 | { | ||
1251 | TRACE_TASK(t, "%s/%d inherits a new priority!\n", | ||
1252 | tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); | ||
1253 | |||
1254 | __set_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh); | ||
1255 | } | ||
1256 | #endif | ||
1257 | |||
1258 | raw_spin_unlock(&cluster->cluster_lock); | ||
1259 | } | ||
1260 | |||
1261 | |||
1262 | /* called with IRQs off */ | ||
1263 | static void __clear_priority_inheritance(struct task_struct* t) | ||
1264 | { | ||
1265 | TRACE_TASK(t, "priority restored\n"); | ||
1266 | |||
1267 | if(tsk_rt(t)->scheduled_on != NO_CPU) | ||
1268 | { | ||
1269 | sched_trace_eff_prio_change(t, NULL); | ||
1270 | |||
1271 | tsk_rt(t)->inh_task = NULL; | ||
1272 | |||
1273 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
1274 | * since the priority was effectively lowered. */ | ||
1275 | unlink(t); | ||
1276 | cedf_job_arrival(t); | ||
1277 | } | ||
1278 | else | ||
1279 | { | ||
1280 | __set_priority_inheritance(t, NULL); | ||
1281 | } | ||
1282 | |||
1283 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1284 | if(tsk_rt(t)->cur_klitirqd != NULL) | ||
1285 | { | ||
1286 | TRACE_TASK(t, "%s/%d inheritance set back to owner.\n", | ||
1287 | tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); | ||
1288 | |||
1289 | if(tsk_rt(tsk_rt(t)->cur_klitirqd)->scheduled_on != NO_CPU) | ||
1290 | { | ||
1291 | sched_trace_eff_prio_change(tsk_rt(t)->cur_klitirqd, t); | ||
1292 | |||
1293 | tsk_rt(tsk_rt(t)->cur_klitirqd)->inh_task = t; | ||
1294 | |||
1295 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
1296 | * since the priority was effectively lowered. */ | ||
1297 | unlink(tsk_rt(t)->cur_klitirqd); | ||
1298 | cedf_job_arrival(tsk_rt(t)->cur_klitirqd); | ||
1299 | } | ||
1300 | else | ||
1301 | { | ||
1302 | __set_priority_inheritance(tsk_rt(t)->cur_klitirqd, t); | ||
1303 | } | ||
1304 | } | ||
1305 | #endif | ||
1306 | } | ||
1307 | |||
1308 | /* called with IRQs off */ | ||
1309 | static void clear_priority_inheritance(struct task_struct* t) | ||
1310 | { | ||
1311 | cedf_domain_t* cluster = task_cpu_cluster(t); | ||
1312 | |||
1313 | raw_spin_lock(&cluster->cluster_lock); | ||
1314 | __clear_priority_inheritance(t); | ||
1315 | raw_spin_unlock(&cluster->cluster_lock); | ||
1316 | } | ||
1317 | |||
1318 | |||
1319 | |||
1320 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1321 | /* called with IRQs off */ | ||
1322 | static void set_priority_inheritance_klitirqd(struct task_struct* klitirqd, | ||
1323 | struct task_struct* old_owner, | ||
1324 | struct task_struct* new_owner) | ||
1325 | { | ||
1326 | cedf_domain_t* cluster = task_cpu_cluster(klitirqd); | ||
1327 | |||
1328 | BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); | ||
1329 | |||
1330 | raw_spin_lock(&cluster->cluster_lock); | ||
1331 | |||
1332 | if(old_owner != new_owner) | ||
1333 | { | ||
1334 | if(old_owner) | ||
1335 | { | ||
1336 | // unreachable? | ||
1337 | tsk_rt(old_owner)->cur_klitirqd = NULL; | ||
1338 | } | ||
1339 | |||
1340 | TRACE_TASK(klitirqd, "giving ownership to %s/%d.\n", | ||
1341 | new_owner->comm, new_owner->pid); | ||
1342 | |||
1343 | tsk_rt(new_owner)->cur_klitirqd = klitirqd; | ||
1344 | } | ||
1345 | |||
1346 | __set_priority_inheritance(klitirqd, | ||
1347 | (tsk_rt(new_owner)->inh_task == NULL) ? | ||
1348 | new_owner : | ||
1349 | tsk_rt(new_owner)->inh_task); | ||
1350 | |||
1351 | raw_spin_unlock(&cluster->cluster_lock); | ||
1352 | } | ||
1353 | |||
1354 | /* called with IRQs off */ | ||
1355 | static void clear_priority_inheritance_klitirqd(struct task_struct* klitirqd, | ||
1356 | struct task_struct* old_owner) | ||
1357 | { | ||
1358 | cedf_domain_t* cluster = task_cpu_cluster(klitirqd); | ||
1359 | |||
1360 | BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); | ||
1361 | |||
1362 | raw_spin_lock(&cluster->cluster_lock); | ||
1363 | |||
1364 | TRACE_TASK(klitirqd, "priority restored\n"); | ||
1365 | |||
1366 | if(tsk_rt(klitirqd)->scheduled_on != NO_CPU) | ||
1367 | { | ||
1368 | tsk_rt(klitirqd)->inh_task = NULL; | ||
1369 | |||
1370 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
1371 | * since the priority was effectively lowered. */ | ||
1372 | unlink(klitirqd); | ||
1373 | cedf_job_arrival(klitirqd); | ||
1374 | } | ||
1375 | else | ||
1376 | { | ||
1377 | __set_priority_inheritance(klitirqd, NULL); | ||
1378 | } | ||
1379 | |||
1380 | tsk_rt(old_owner)->cur_klitirqd = NULL; | ||
1381 | |||
1382 | raw_spin_unlock(&cluster->cluster_lock); | ||
1383 | } | ||
1384 | #endif // CONFIG_LITMUS_SOFTIRQD | ||
1385 | |||
1386 | |||
1387 | /* ******************** KFMLP support ********************** */ | ||
1388 | |||
1389 | /* struct for semaphore with priority inheritance */ | ||
1390 | struct kfmlp_queue | ||
1391 | { | ||
1392 | wait_queue_head_t wait; | ||
1393 | struct task_struct* owner; | ||
1394 | struct task_struct* hp_waiter; | ||
1395 | int count; /* number of waiters + holder */ | ||
1396 | }; | ||
1397 | |||
1398 | struct kfmlp_semaphore | ||
1399 | { | ||
1400 | struct litmus_lock litmus_lock; | ||
1401 | |||
1402 | spinlock_t lock; | ||
1403 | |||
1404 | int num_resources; /* aka k */ | ||
1405 | struct kfmlp_queue *queues; /* array */ | ||
1406 | struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */ | ||
1407 | }; | ||
1408 | |||
1409 | static inline struct kfmlp_semaphore* kfmlp_from_lock(struct litmus_lock* lock) | ||
1410 | { | ||
1411 | return container_of(lock, struct kfmlp_semaphore, litmus_lock); | ||
1412 | } | ||
1413 | |||
1414 | static inline int kfmlp_get_idx(struct kfmlp_semaphore* sem, | ||
1415 | struct kfmlp_queue* queue) | ||
1416 | { | ||
1417 | return (queue - &sem->queues[0]); | ||
1418 | } | ||
1419 | |||
1420 | static inline struct kfmlp_queue* kfmlp_get_queue(struct kfmlp_semaphore* sem, | ||
1421 | struct task_struct* holder) | ||
1422 | { | ||
1423 | int i; | ||
1424 | for(i = 0; i < sem->num_resources; ++i) | ||
1425 | if(sem->queues[i].owner == holder) | ||
1426 | return(&sem->queues[i]); | ||
1427 | return(NULL); | ||
1428 | } | ||
1429 | |||
1430 | /* caller is responsible for locking */ | ||
1431 | static struct task_struct* kfmlp_find_hp_waiter(struct kfmlp_queue *kqueue, | ||
1432 | struct task_struct *skip) | ||
1433 | { | ||
1434 | struct list_head *pos; | ||
1435 | struct task_struct *queued, *found = NULL; | ||
1436 | |||
1437 | list_for_each(pos, &kqueue->wait.task_list) { | ||
1438 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
1439 | task_list)->private; | ||
1440 | |||
1441 | /* Compare task prios, find high prio task. */ | ||
1442 | if (queued != skip && edf_higher_prio(queued, found)) | ||
1443 | found = queued; | ||
1444 | } | ||
1445 | return found; | ||
1446 | } | ||
1447 | |||
1448 | static inline struct kfmlp_queue* kfmlp_find_shortest( | ||
1449 | struct kfmlp_semaphore* sem, | ||
1450 | struct kfmlp_queue* search_start) | ||
1451 | { | ||
1452 | // we start our search at search_start instead of at the beginning of the | ||
1453 | // queue list to load-balance across all resources. | ||
1454 | struct kfmlp_queue* step = search_start; | ||
1455 | struct kfmlp_queue* shortest = sem->shortest_queue; | ||
1456 | |||
1457 | do | ||
1458 | { | ||
1459 | step = (step+1 != &sem->queues[sem->num_resources]) ? | ||
1460 | step+1 : &sem->queues[0]; | ||
1461 | if(step->count < shortest->count) | ||
1462 | { | ||
1463 | shortest = step; | ||
1464 | if(step->count == 0) | ||
1465 | break; /* can't get any shorter */ | ||
1466 | } | ||
1467 | }while(step != search_start); | ||
1468 | |||
1469 | return(shortest); | ||
1470 | } | ||
1471 | |||
1472 | static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem) | ||
1473 | { | ||
1474 | /* must hold sem->lock */ | ||
1475 | |||
1476 | struct kfmlp_queue *my_queue = NULL; | ||
1477 | struct task_struct *max_hp = NULL; | ||
1478 | |||
1479 | |||
1480 | struct list_head *pos; | ||
1481 | struct task_struct *queued; | ||
1482 | int i; | ||
1483 | |||
1484 | for(i = 0; i < sem->num_resources; ++i) | ||
1485 | { | ||
1486 | if( (sem->queues[i].count > 1) && | ||
1487 | ((my_queue == NULL) || | ||
1488 | (edf_higher_prio(sem->queues[i].hp_waiter, my_queue->hp_waiter))) ) | ||
1489 | { | ||
1490 | my_queue = &sem->queues[i]; | ||
1491 | } | ||
1492 | } | ||
1493 | |||
1494 | if(my_queue) | ||
1495 | { | ||
1496 | cedf_domain_t* cluster; | ||
1497 | |||
1498 | max_hp = my_queue->hp_waiter; | ||
1499 | BUG_ON(!max_hp); | ||
1500 | |||
1501 | TRACE_CUR("queue %d: stealing %s/%d from queue %d\n", | ||
1502 | kfmlp_get_idx(sem, my_queue), | ||
1503 | max_hp->comm, max_hp->pid, | ||
1504 | kfmlp_get_idx(sem, my_queue)); | ||
1505 | |||
1506 | my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, max_hp); | ||
1507 | |||
1508 | /* | ||
1509 | if(my_queue->hp_waiter) | ||
1510 | TRACE_CUR("queue %d: new hp_waiter is %s/%d\n", | ||
1511 | kfmlp_get_idx(sem, my_queue), | ||
1512 | my_queue->hp_waiter->comm, | ||
1513 | my_queue->hp_waiter->pid); | ||
1514 | else | ||
1515 | TRACE_CUR("queue %d: new hp_waiter is %p\n", | ||
1516 | kfmlp_get_idx(sem, my_queue), NULL); | ||
1517 | */ | ||
1518 | |||
1519 | cluster = task_cpu_cluster(max_hp); | ||
1520 | |||
1521 | raw_spin_lock(&cluster->cluster_lock); | ||
1522 | |||
1523 | /* | ||
1524 | if(my_queue->owner) | ||
1525 | TRACE_CUR("queue %d: owner is %s/%d\n", | ||
1526 | kfmlp_get_idx(sem, my_queue), | ||
1527 | my_queue->owner->comm, | ||
1528 | my_queue->owner->pid); | ||
1529 | else | ||
1530 | TRACE_CUR("queue %d: owner is %p\n", | ||
1531 | kfmlp_get_idx(sem, my_queue), | ||
1532 | NULL); | ||
1533 | */ | ||
1534 | |||
1535 | if(tsk_rt(my_queue->owner)->inh_task == max_hp) | ||
1536 | { | ||
1537 | __clear_priority_inheritance(my_queue->owner); | ||
1538 | if(my_queue->hp_waiter != NULL) | ||
1539 | { | ||
1540 | __set_priority_inheritance(my_queue->owner, my_queue->hp_waiter); | ||
1541 | } | ||
1542 | } | ||
1543 | raw_spin_unlock(&cluster->cluster_lock); | ||
1544 | |||
1545 | list_for_each(pos, &my_queue->wait.task_list) | ||
1546 | { | ||
1547 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
1548 | task_list)->private; | ||
1549 | /* Compare task prios, find high prio task. */ | ||
1550 | if (queued == max_hp) | ||
1551 | { | ||
1552 | /* | ||
1553 | TRACE_CUR("queue %d: found entry in wait queue. REMOVING!\n", | ||
1554 | kfmlp_get_idx(sem, my_queue)); | ||
1555 | */ | ||
1556 | __remove_wait_queue(&my_queue->wait, | ||
1557 | list_entry(pos, wait_queue_t, task_list)); | ||
1558 | break; | ||
1559 | } | ||
1560 | } | ||
1561 | --(my_queue->count); | ||
1562 | } | ||
1563 | |||
1564 | return(max_hp); | ||
1565 | } | ||
1566 | |||
1567 | int cedf_kfmlp_lock(struct litmus_lock* l) | ||
1568 | { | ||
1569 | struct task_struct* t = current; | ||
1570 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1571 | struct kfmlp_queue* my_queue; | ||
1572 | wait_queue_t wait; | ||
1573 | unsigned long flags; | ||
1574 | |||
1575 | if (!is_realtime(t)) | ||
1576 | return -EPERM; | ||
1577 | |||
1578 | spin_lock_irqsave(&sem->lock, flags); | ||
1579 | |||
1580 | my_queue = sem->shortest_queue; | ||
1581 | |||
1582 | if (my_queue->owner) { | ||
1583 | /* resource is not free => must suspend and wait */ | ||
1584 | TRACE_CUR("queue %d: Resource is not free => must suspend and wait.\n", | ||
1585 | kfmlp_get_idx(sem, my_queue)); | ||
1586 | |||
1587 | init_waitqueue_entry(&wait, t); | ||
1588 | |||
1589 | /* FIXME: interruptible would be nice some day */ | ||
1590 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
1591 | |||
1592 | __add_wait_queue_tail_exclusive(&my_queue->wait, &wait); | ||
1593 | |||
1594 | /* check if we need to activate priority inheritance */ | ||
1595 | if (edf_higher_prio(t, my_queue->hp_waiter)) | ||
1596 | { | ||
1597 | my_queue->hp_waiter = t; | ||
1598 | if (edf_higher_prio(t, my_queue->owner)) | ||
1599 | { | ||
1600 | set_priority_inheritance(my_queue->owner, my_queue->hp_waiter); | ||
1601 | } | ||
1602 | } | ||
1603 | |||
1604 | ++(my_queue->count); | ||
1605 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); | ||
1606 | |||
1607 | /* release lock before sleeping */ | ||
1608 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1609 | |||
1610 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
1611 | * when we wake up; we are guaranteed to have the lock since | ||
1612 | * there is only one wake up per release (or steal). | ||
1613 | */ | ||
1614 | schedule(); | ||
1615 | |||
1616 | |||
1617 | if(my_queue->owner == t) | ||
1618 | { | ||
1619 | TRACE_CUR("queue %d: acquired through waiting\n", | ||
1620 | kfmlp_get_idx(sem, my_queue)); | ||
1621 | } | ||
1622 | else | ||
1623 | { | ||
1624 | /* this case may happen if our wait entry was stolen | ||
1625 | between queues. record where we went.*/ | ||
1626 | my_queue = kfmlp_get_queue(sem, t); | ||
1627 | BUG_ON(!my_queue); | ||
1628 | TRACE_CUR("queue %d: acquired through stealing\n", | ||
1629 | kfmlp_get_idx(sem, my_queue)); | ||
1630 | } | ||
1631 | } | ||
1632 | else | ||
1633 | { | ||
1634 | TRACE_CUR("queue %d: acquired immediately\n", | ||
1635 | kfmlp_get_idx(sem, my_queue)); | ||
1636 | |||
1637 | my_queue->owner = t; | ||
1638 | |||
1639 | ++(my_queue->count); | ||
1640 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); | ||
1641 | |||
1642 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1643 | } | ||
1644 | |||
1645 | return kfmlp_get_idx(sem, my_queue); | ||
1646 | } | ||
1647 | |||
1648 | int cedf_kfmlp_unlock(struct litmus_lock* l) | ||
1649 | { | ||
1650 | struct task_struct *t = current, *next; | ||
1651 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1652 | struct kfmlp_queue *my_queue; | ||
1653 | unsigned long flags; | ||
1654 | int err = 0; | ||
1655 | |||
1656 | spin_lock_irqsave(&sem->lock, flags); | ||
1657 | |||
1658 | my_queue = kfmlp_get_queue(sem, t); | ||
1659 | |||
1660 | if (!my_queue) { | ||
1661 | err = -EINVAL; | ||
1662 | goto out; | ||
1663 | } | ||
1664 | |||
1665 | /* check if there are jobs waiting for this resource */ | ||
1666 | next = __waitqueue_remove_first(&my_queue->wait); | ||
1667 | if (next) { | ||
1668 | /* | ||
1669 | TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n", | ||
1670 | kfmlp_get_idx(sem, my_queue), | ||
1671 | next->comm, next->pid); | ||
1672 | */ | ||
1673 | /* next becomes the resouce holder */ | ||
1674 | my_queue->owner = next; | ||
1675 | |||
1676 | --(my_queue->count); | ||
1677 | if(my_queue->count < sem->shortest_queue->count) | ||
1678 | { | ||
1679 | sem->shortest_queue = my_queue; | ||
1680 | } | ||
1681 | |||
1682 | TRACE_CUR("queue %d: lock ownership passed to %s/%d\n", | ||
1683 | kfmlp_get_idx(sem, my_queue), next->comm, next->pid); | ||
1684 | |||
1685 | /* determine new hp_waiter if necessary */ | ||
1686 | if (next == my_queue->hp_waiter) { | ||
1687 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
1688 | /* next has the highest priority --- it doesn't need to | ||
1689 | * inherit. However, we need to make sure that the | ||
1690 | * next-highest priority in the queue is reflected in | ||
1691 | * hp_waiter. */ | ||
1692 | my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, next); | ||
1693 | if (my_queue->hp_waiter) | ||
1694 | TRACE_TASK(my_queue->hp_waiter, "queue %d: is new highest-prio waiter\n", kfmlp_get_idx(sem, my_queue)); | ||
1695 | else | ||
1696 | TRACE("queue %d: no further waiters\n", kfmlp_get_idx(sem, my_queue)); | ||
1697 | } else { | ||
1698 | /* Well, if next is not the highest-priority waiter, | ||
1699 | * then it ought to inherit the highest-priority | ||
1700 | * waiter's priority. */ | ||
1701 | set_priority_inheritance(next, my_queue->hp_waiter); | ||
1702 | } | ||
1703 | |||
1704 | /* wake up next */ | ||
1705 | wake_up_process(next); | ||
1706 | } | ||
1707 | else | ||
1708 | { | ||
1709 | TRACE_CUR("queue %d: looking to steal someone...\n", kfmlp_get_idx(sem, my_queue)); | ||
1710 | |||
1711 | next = kfmlp_remove_hp_waiter(sem); /* returns NULL if nothing to steal */ | ||
1712 | |||
1713 | /* | ||
1714 | if(next) | ||
1715 | TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - steal\n", | ||
1716 | kfmlp_get_idx(sem, my_queue), | ||
1717 | next->comm, next->pid); | ||
1718 | */ | ||
1719 | |||
1720 | my_queue->owner = next; | ||
1721 | |||
1722 | if(next) | ||
1723 | { | ||
1724 | TRACE_CUR("queue %d: lock ownership passed to %s/%d (which was stolen)\n", | ||
1725 | kfmlp_get_idx(sem, my_queue), | ||
1726 | next->comm, next->pid); | ||
1727 | |||
1728 | /* wake up next */ | ||
1729 | wake_up_process(next); | ||
1730 | } | ||
1731 | else | ||
1732 | { | ||
1733 | TRACE_CUR("queue %d: no one to steal.\n", kfmlp_get_idx(sem, my_queue)); | ||
1734 | |||
1735 | --(my_queue->count); | ||
1736 | if(my_queue->count < sem->shortest_queue->count) | ||
1737 | { | ||
1738 | sem->shortest_queue = my_queue; | ||
1739 | } | ||
1740 | } | ||
1741 | } | ||
1742 | |||
1743 | /* we lose the benefit of priority inheritance (if any) */ | ||
1744 | if (tsk_rt(t)->inh_task) | ||
1745 | clear_priority_inheritance(t); | ||
1746 | |||
1747 | out: | ||
1748 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1749 | |||
1750 | return err; | ||
1751 | } | ||
1752 | |||
1753 | int cedf_kfmlp_close(struct litmus_lock* l) | ||
1754 | { | ||
1755 | struct task_struct *t = current; | ||
1756 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1757 | struct kfmlp_queue *my_queue; | ||
1758 | unsigned long flags; | ||
1759 | |||
1760 | int owner; | ||
1761 | |||
1762 | spin_lock_irqsave(&sem->lock, flags); | ||
1763 | |||
1764 | my_queue = kfmlp_get_queue(sem, t); | ||
1765 | owner = (my_queue) ? (my_queue->owner == t) : 0; | ||
1766 | |||
1767 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1768 | |||
1769 | if (owner) | ||
1770 | cedf_kfmlp_unlock(l); | ||
1771 | |||
1772 | return 0; | ||
1773 | } | ||
1774 | |||
1775 | void cedf_kfmlp_free(struct litmus_lock* l) | ||
1776 | { | ||
1777 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1778 | kfree(sem->queues); | ||
1779 | kfree(sem); | ||
1780 | } | ||
1781 | |||
1782 | static struct litmus_lock_ops cedf_kfmlp_lock_ops = { | ||
1783 | .close = cedf_kfmlp_close, | ||
1784 | .lock = cedf_kfmlp_lock, | ||
1785 | .unlock = cedf_kfmlp_unlock, | ||
1786 | .deallocate = cedf_kfmlp_free, | ||
1787 | }; | ||
1788 | |||
1789 | static struct litmus_lock* cedf_new_kfmlp(void* __user arg, int* ret_code) | ||
1790 | { | ||
1791 | struct kfmlp_semaphore* sem; | ||
1792 | int num_resources = 0; | ||
1793 | int i; | ||
1794 | |||
1795 | if(!access_ok(VERIFY_READ, arg, sizeof(num_resources))) | ||
1796 | { | ||
1797 | *ret_code = -EINVAL; | ||
1798 | return(NULL); | ||
1799 | } | ||
1800 | if(__copy_from_user(&num_resources, arg, sizeof(num_resources))) | ||
1801 | { | ||
1802 | *ret_code = -EINVAL; | ||
1803 | return(NULL); | ||
1804 | } | ||
1805 | if(num_resources < 1) | ||
1806 | { | ||
1807 | *ret_code = -EINVAL; | ||
1808 | return(NULL); | ||
1809 | } | ||
1810 | |||
1811 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
1812 | if(!sem) | ||
1813 | { | ||
1814 | *ret_code = -ENOMEM; | ||
1815 | return NULL; | ||
1816 | } | ||
1817 | |||
1818 | sem->queues = kmalloc(sizeof(struct kfmlp_queue)*num_resources, GFP_KERNEL); | ||
1819 | if(!sem->queues) | ||
1820 | { | ||
1821 | kfree(sem); | ||
1822 | *ret_code = -ENOMEM; | ||
1823 | return NULL; | ||
1824 | } | ||
1825 | |||
1826 | sem->litmus_lock.ops = &cedf_kfmlp_lock_ops; | ||
1827 | spin_lock_init(&sem->lock); | ||
1828 | sem->num_resources = num_resources; | ||
1829 | |||
1830 | for(i = 0; i < num_resources; ++i) | ||
1831 | { | ||
1832 | sem->queues[i].owner = NULL; | ||
1833 | sem->queues[i].hp_waiter = NULL; | ||
1834 | init_waitqueue_head(&sem->queues[i].wait); | ||
1835 | sem->queues[i].count = 0; | ||
1836 | } | ||
1837 | |||
1838 | sem->shortest_queue = &sem->queues[0]; | ||
1839 | |||
1840 | *ret_code = 0; | ||
1841 | return &sem->litmus_lock; | ||
1842 | } | ||
1843 | |||
1844 | |||
1845 | /* **** lock constructor **** */ | ||
1846 | |||
1847 | static long cedf_allocate_lock(struct litmus_lock **lock, int type, | ||
1848 | void* __user arg) | ||
1849 | { | ||
1850 | int err = -ENXIO; | ||
1851 | |||
1852 | /* C-EDF currently only supports the FMLP for global resources | ||
1853 | WITHIN a given cluster. DO NOT USE CROSS-CLUSTER! */ | ||
1854 | switch (type) { | ||
1855 | case KFMLP_SEM: | ||
1856 | *lock = cedf_new_kfmlp(arg, &err); | ||
1857 | break; | ||
1858 | }; | ||
1859 | |||
1860 | return err; | ||
1861 | } | ||
1862 | |||
1863 | #endif // CONFIG_LITMUS_LOCKING | ||
1864 | |||
1865 | |||
1866 | |||
1867 | |||
1868 | |||
1869 | |||
665 | /* total number of cluster */ | 1870 | /* total number of cluster */ |
666 | static int num_clusters; | 1871 | static int num_clusters; |
667 | /* we do not support cluster of different sizes */ | 1872 | /* we do not support cluster of different sizes */ |
@@ -755,6 +1960,13 @@ static long cedf_activate_plugin(void) | |||
755 | bheap_init(&(cedf[i].cpu_heap)); | 1960 | bheap_init(&(cedf[i].cpu_heap)); |
756 | edf_domain_init(&(cedf[i].domain), NULL, cedf_release_jobs); | 1961 | edf_domain_init(&(cedf[i].domain), NULL, cedf_release_jobs); |
757 | 1962 | ||
1963 | |||
1964 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
1965 | cedf[i].pending_tasklets.head = NULL; | ||
1966 | cedf[i].pending_tasklets.tail = &(cedf[i].pending_tasklets.head); | ||
1967 | #endif | ||
1968 | |||
1969 | |||
758 | if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC)) | 1970 | if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC)) |
759 | return -ENOMEM; | 1971 | return -ENOMEM; |
760 | #ifdef CONFIG_RELEASE_MASTER | 1972 | #ifdef CONFIG_RELEASE_MASTER |
@@ -812,6 +2024,40 @@ static long cedf_activate_plugin(void) | |||
812 | break; | 2024 | break; |
813 | } | 2025 | } |
814 | } | 2026 | } |
2027 | |||
2028 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
2029 | { | ||
2030 | /* distribute the daemons evenly across the clusters. */ | ||
2031 | int* affinity = kmalloc(NR_LITMUS_SOFTIRQD * sizeof(int), GFP_ATOMIC); | ||
2032 | int num_daemons_per_cluster = NR_LITMUS_SOFTIRQD / num_clusters; | ||
2033 | int left_over = NR_LITMUS_SOFTIRQD % num_clusters; | ||
2034 | |||
2035 | int daemon = 0; | ||
2036 | for(i = 0; i < num_clusters; ++i) | ||
2037 | { | ||
2038 | int num_on_this_cluster = num_daemons_per_cluster; | ||
2039 | if(left_over) | ||
2040 | { | ||
2041 | ++num_on_this_cluster; | ||
2042 | --left_over; | ||
2043 | } | ||
2044 | |||
2045 | for(j = 0; j < num_on_this_cluster; ++j) | ||
2046 | { | ||
2047 | // first CPU of this cluster | ||
2048 | affinity[daemon++] = i*cluster_size; | ||
2049 | } | ||
2050 | } | ||
2051 | |||
2052 | spawn_klitirqd(affinity); | ||
2053 | |||
2054 | kfree(affinity); | ||
2055 | } | ||
2056 | #endif | ||
2057 | |||
2058 | #ifdef CONFIG_LITMUS_NVIDIA | ||
2059 | init_nvidia_info(); | ||
2060 | #endif | ||
815 | 2061 | ||
816 | free_cpumask_var(mask); | 2062 | free_cpumask_var(mask); |
817 | clusters_allocated = 1; | 2063 | clusters_allocated = 1; |
@@ -831,6 +2077,19 @@ static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { | |||
831 | .task_block = cedf_task_block, | 2077 | .task_block = cedf_task_block, |
832 | .admit_task = cedf_admit_task, | 2078 | .admit_task = cedf_admit_task, |
833 | .activate_plugin = cedf_activate_plugin, | 2079 | .activate_plugin = cedf_activate_plugin, |
2080 | #ifdef CONFIG_LITMUS_LOCKING | ||
2081 | .allocate_lock = cedf_allocate_lock, | ||
2082 | .set_prio_inh = set_priority_inheritance, | ||
2083 | .clear_prio_inh = clear_priority_inheritance, | ||
2084 | #endif | ||
2085 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
2086 | .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd, | ||
2087 | .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd, | ||
2088 | #endif | ||
2089 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
2090 | .enqueue_pai_tasklet = enqueue_pai_tasklet, | ||
2091 | .run_tasklets = run_tasklets, | ||
2092 | #endif | ||
834 | }; | 2093 | }; |
835 | 2094 | ||
836 | static struct proc_dir_entry *cluster_file = NULL, *cedf_dir = NULL; | 2095 | static struct proc_dir_entry *cluster_file = NULL, *cedf_dir = NULL; |
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 6ed504f4750e..3ddab5875c8c 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -12,6 +12,8 @@ | |||
12 | #include <linux/percpu.h> | 12 | #include <linux/percpu.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/uaccess.h> | ||
16 | |||
15 | 17 | ||
16 | #include <litmus/litmus.h> | 18 | #include <litmus/litmus.h> |
17 | #include <litmus/jobs.h> | 19 | #include <litmus/jobs.h> |
@@ -30,6 +32,24 @@ | |||
30 | 32 | ||
31 | #include <linux/module.h> | 33 | #include <linux/module.h> |
32 | 34 | ||
35 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
36 | #include <litmus/affinity.h> | ||
37 | #endif | ||
38 | |||
39 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
40 | #include <litmus/litmus_softirq.h> | ||
41 | #endif | ||
42 | |||
43 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
44 | #include <linux/interrupt.h> | ||
45 | #include <litmus/trace.h> | ||
46 | #endif | ||
47 | |||
48 | #ifdef CONFIG_LITMUS_NVIDIA | ||
49 | #include <litmus/nvidia_info.h> | ||
50 | #endif | ||
51 | |||
52 | |||
33 | /* Overview of GSN-EDF operations. | 53 | /* Overview of GSN-EDF operations. |
34 | * | 54 | * |
35 | * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This | 55 | * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This |
@@ -116,6 +136,16 @@ static struct bheap gsnedf_cpu_heap; | |||
116 | static rt_domain_t gsnedf; | 136 | static rt_domain_t gsnedf; |
117 | #define gsnedf_lock (gsnedf.ready_lock) | 137 | #define gsnedf_lock (gsnedf.ready_lock) |
118 | 138 | ||
139 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
140 | struct tasklet_head | ||
141 | { | ||
142 | struct tasklet_struct *head; | ||
143 | struct tasklet_struct **tail; | ||
144 | }; | ||
145 | |||
146 | struct tasklet_head gsnedf_pending_tasklets; | ||
147 | #endif | ||
148 | |||
119 | 149 | ||
120 | /* Uncomment this if you want to see all scheduling decisions in the | 150 | /* Uncomment this if you want to see all scheduling decisions in the |
121 | * TRACE() log. | 151 | * TRACE() log. |
@@ -313,7 +343,7 @@ static void check_for_preemptions(void) | |||
313 | static noinline void gsnedf_job_arrival(struct task_struct* task) | 343 | static noinline void gsnedf_job_arrival(struct task_struct* task) |
314 | { | 344 | { |
315 | BUG_ON(!task); | 345 | BUG_ON(!task); |
316 | 346 | ||
317 | requeue(task); | 347 | requeue(task); |
318 | check_for_preemptions(); | 348 | check_for_preemptions(); |
319 | } | 349 | } |
@@ -334,9 +364,13 @@ static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
334 | static noinline void job_completion(struct task_struct *t, int forced) | 364 | static noinline void job_completion(struct task_struct *t, int forced) |
335 | { | 365 | { |
336 | BUG_ON(!t); | 366 | BUG_ON(!t); |
337 | 367 | ||
338 | sched_trace_task_completion(t, forced); | 368 | sched_trace_task_completion(t, forced); |
339 | 369 | ||
370 | #ifdef CONFIG_LITMUS_NVIDIA | ||
371 | atomic_set(&tsk_rt(t)->nv_int_count, 0); | ||
372 | #endif | ||
373 | |||
340 | TRACE_TASK(t, "job_completion().\n"); | 374 | TRACE_TASK(t, "job_completion().\n"); |
341 | 375 | ||
342 | /* set flags */ | 376 | /* set flags */ |
@@ -379,6 +413,414 @@ static void gsnedf_tick(struct task_struct* t) | |||
379 | } | 413 | } |
380 | } | 414 | } |
381 | 415 | ||
416 | |||
417 | |||
418 | |||
419 | |||
420 | |||
421 | |||
422 | |||
423 | |||
424 | |||
425 | |||
426 | |||
427 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
428 | |||
429 | |||
430 | static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed) | ||
431 | { | ||
432 | if (!atomic_read(&tasklet->count)) { | ||
433 | sched_trace_tasklet_begin(tasklet->owner); | ||
434 | |||
435 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state)) | ||
436 | { | ||
437 | BUG(); | ||
438 | } | ||
439 | TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", __FUNCTION__, tasklet->owner->pid, flushed); | ||
440 | tasklet->func(tasklet->data); | ||
441 | tasklet_unlock(tasklet); | ||
442 | |||
443 | sched_trace_tasklet_end(tasklet->owner, flushed); | ||
444 | } | ||
445 | else { | ||
446 | BUG(); | ||
447 | } | ||
448 | } | ||
449 | |||
450 | |||
451 | static void __extract_tasklets(struct task_struct* task, struct tasklet_head* task_tasklets) | ||
452 | { | ||
453 | struct tasklet_struct* step; | ||
454 | struct tasklet_struct* tasklet; | ||
455 | struct tasklet_struct* prev; | ||
456 | |||
457 | task_tasklets->head = NULL; | ||
458 | task_tasklets->tail = &(task_tasklets->head); | ||
459 | |||
460 | prev = NULL; | ||
461 | for(step = gsnedf_pending_tasklets.head; step != NULL; step = step->next) | ||
462 | { | ||
463 | if(step->owner == task) | ||
464 | { | ||
465 | TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid); | ||
466 | |||
467 | tasklet = step; | ||
468 | |||
469 | if(prev) { | ||
470 | prev->next = tasklet->next; | ||
471 | } | ||
472 | else if(gsnedf_pending_tasklets.head == tasklet) { | ||
473 | // we're at the head. | ||
474 | gsnedf_pending_tasklets.head = tasklet->next; | ||
475 | } | ||
476 | |||
477 | if(gsnedf_pending_tasklets.tail == &tasklet) { | ||
478 | // we're at the tail | ||
479 | if(prev) { | ||
480 | gsnedf_pending_tasklets.tail = &prev; | ||
481 | } | ||
482 | else { | ||
483 | gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head); | ||
484 | } | ||
485 | } | ||
486 | |||
487 | tasklet->next = NULL; | ||
488 | *(task_tasklets->tail) = tasklet; | ||
489 | task_tasklets->tail = &(tasklet->next); | ||
490 | } | ||
491 | else { | ||
492 | prev = step; | ||
493 | } | ||
494 | } | ||
495 | } | ||
496 | |||
497 | static void flush_tasklets(struct task_struct* task) | ||
498 | { | ||
499 | unsigned long flags; | ||
500 | struct tasklet_head task_tasklets; | ||
501 | struct tasklet_struct* step; | ||
502 | |||
503 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
504 | __extract_tasklets(task, &task_tasklets); | ||
505 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
506 | |||
507 | if(gsnedf_pending_tasklets.head != NULL) { | ||
508 | TRACE("%s: Flushing tasklets for %d...\n", __FUNCTION__, task->pid); | ||
509 | } | ||
510 | |||
511 | // now execute any flushed tasklets. | ||
512 | for(step = gsnedf_pending_tasklets.head; step != NULL; /**/) | ||
513 | { | ||
514 | struct tasklet_struct* temp = step->next; | ||
515 | |||
516 | step->next = NULL; | ||
517 | __do_lit_tasklet(step, 1ul); | ||
518 | |||
519 | step = temp; | ||
520 | } | ||
521 | } | ||
522 | |||
523 | |||
524 | static void do_lit_tasklets(struct task_struct* sched_task) | ||
525 | { | ||
526 | int work_to_do = 1; | ||
527 | struct tasklet_struct *tasklet = NULL; | ||
528 | //struct tasklet_struct *step; | ||
529 | unsigned long flags; | ||
530 | |||
531 | while(work_to_do) { | ||
532 | |||
533 | TS_NV_SCHED_BOTISR_START; | ||
534 | |||
535 | // remove tasklet at head of list if it has higher priority. | ||
536 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
537 | |||
538 | /* | ||
539 | step = gsnedf_pending_tasklets.head; | ||
540 | TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__); | ||
541 | while(step != NULL){ | ||
542 | TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); | ||
543 | step = step->next; | ||
544 | } | ||
545 | TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(gsnedf_pending_tasklets.tail), (*(gsnedf_pending_tasklets.tail) != NULL) ? (*(gsnedf_pending_tasklets.tail))->owner->pid : -1); | ||
546 | TRACE("%s: done.\n", __FUNCTION__); | ||
547 | */ | ||
548 | |||
549 | |||
550 | if(gsnedf_pending_tasklets.head != NULL) { | ||
551 | // remove tasklet at head. | ||
552 | tasklet = gsnedf_pending_tasklets.head; | ||
553 | |||
554 | if(edf_higher_prio(tasklet->owner, sched_task)) { | ||
555 | |||
556 | if(NULL == tasklet->next) { | ||
557 | // tasklet is at the head, list only has one element | ||
558 | TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); | ||
559 | gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head); | ||
560 | } | ||
561 | |||
562 | // remove the tasklet from the queue | ||
563 | gsnedf_pending_tasklets.head = tasklet->next; | ||
564 | |||
565 | TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); | ||
566 | } | ||
567 | else { | ||
568 | TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id()); | ||
569 | tasklet = NULL; | ||
570 | } | ||
571 | } | ||
572 | else { | ||
573 | TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__); | ||
574 | } | ||
575 | |||
576 | |||
577 | /* | ||
578 | step = gsnedf_pending_tasklets.head; | ||
579 | TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__); | ||
580 | while(step != NULL){ | ||
581 | TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); | ||
582 | step = step->next; | ||
583 | } | ||
584 | TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(gsnedf_pending_tasklets.tail), (*(gsnedf_pending_tasklets.tail) != NULL) ? (*(gsnedf_pending_tasklets.tail))->owner->pid : -1); | ||
585 | TRACE("%s: done.\n", __FUNCTION__); | ||
586 | */ | ||
587 | |||
588 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
589 | |||
590 | TS_NV_SCHED_BOTISR_END; | ||
591 | |||
592 | if(tasklet) { | ||
593 | __do_lit_tasklet(tasklet, 0ul); | ||
594 | tasklet = NULL; | ||
595 | } | ||
596 | else { | ||
597 | work_to_do = 0; | ||
598 | } | ||
599 | } | ||
600 | |||
601 | //TRACE("%s: exited.\n", __FUNCTION__); | ||
602 | } | ||
603 | |||
604 | |||
605 | static void run_tasklets(struct task_struct* sched_task) | ||
606 | { | ||
607 | #if 0 | ||
608 | int task_is_rt = is_realtime(sched_task); | ||
609 | cedf_domain_t* cluster; | ||
610 | |||
611 | if(is_realtime(sched_task)) { | ||
612 | cluster = task_cpu_cluster(sched_task); | ||
613 | } | ||
614 | else { | ||
615 | cluster = remote_cluster(get_cpu()); | ||
616 | } | ||
617 | |||
618 | if(cluster && gsnedf_pending_tasklets.head != NULL) { | ||
619 | TRACE("%s: There are tasklets to process.\n", __FUNCTION__); | ||
620 | |||
621 | do_lit_tasklets(cluster, sched_task); | ||
622 | } | ||
623 | |||
624 | if(!task_is_rt) { | ||
625 | put_cpu_no_resched(); | ||
626 | } | ||
627 | #else | ||
628 | |||
629 | preempt_disable(); | ||
630 | |||
631 | if(gsnedf_pending_tasklets.head != NULL) { | ||
632 | TRACE("%s: There are tasklets to process.\n", __FUNCTION__); | ||
633 | do_lit_tasklets(sched_task); | ||
634 | } | ||
635 | |||
636 | preempt_enable_no_resched(); | ||
637 | |||
638 | #endif | ||
639 | } | ||
640 | |||
641 | |||
642 | static void __add_pai_tasklet(struct tasklet_struct* tasklet) | ||
643 | { | ||
644 | struct tasklet_struct* step; | ||
645 | |||
646 | /* | ||
647 | step = gsnedf_pending_tasklets.head; | ||
648 | TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__); | ||
649 | while(step != NULL){ | ||
650 | TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); | ||
651 | step = step->next; | ||
652 | } | ||
653 | TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(gsnedf_pending_tasklets.tail), (*(gsnedf_pending_tasklets.tail) != NULL) ? (*(gsnedf_pending_tasklets.tail))->owner->pid : -1); | ||
654 | TRACE("%s: done.\n", __FUNCTION__); | ||
655 | */ | ||
656 | |||
657 | |||
658 | tasklet->next = NULL; // make sure there are no old values floating around | ||
659 | |||
660 | step = gsnedf_pending_tasklets.head; | ||
661 | if(step == NULL) { | ||
662 | TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid); | ||
663 | // insert at tail. | ||
664 | *(gsnedf_pending_tasklets.tail) = tasklet; | ||
665 | gsnedf_pending_tasklets.tail = &(tasklet->next); | ||
666 | } | ||
667 | else if((*(gsnedf_pending_tasklets.tail) != NULL) && | ||
668 | edf_higher_prio((*(gsnedf_pending_tasklets.tail))->owner, tasklet->owner)) { | ||
669 | // insert at tail. | ||
670 | TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid); | ||
671 | |||
672 | *(gsnedf_pending_tasklets.tail) = tasklet; | ||
673 | gsnedf_pending_tasklets.tail = &(tasklet->next); | ||
674 | } | ||
675 | else { | ||
676 | |||
677 | //WARN_ON(1 == 1); | ||
678 | |||
679 | // insert the tasklet somewhere in the middle. | ||
680 | |||
681 | TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__); | ||
682 | |||
683 | while(step->next && edf_higher_prio(step->next->owner, tasklet->owner)) { | ||
684 | step = step->next; | ||
685 | } | ||
686 | |||
687 | // insert tasklet right before step->next. | ||
688 | |||
689 | TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1); | ||
690 | |||
691 | tasklet->next = step->next; | ||
692 | step->next = tasklet; | ||
693 | |||
694 | // patch up the head if needed. | ||
695 | if(gsnedf_pending_tasklets.head == step) | ||
696 | { | ||
697 | TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid); | ||
698 | gsnedf_pending_tasklets.head = tasklet; | ||
699 | } | ||
700 | } | ||
701 | |||
702 | /* | ||
703 | step = gsnedf_pending_tasklets.head; | ||
704 | TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__); | ||
705 | while(step != NULL){ | ||
706 | TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); | ||
707 | step = step->next; | ||
708 | } | ||
709 | TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(gsnedf_pending_tasklets.tail), (*(gsnedf_pending_tasklets.tail) != NULL) ? (*(gsnedf_pending_tasklets.tail))->owner->pid : -1); | ||
710 | TRACE("%s: done.\n", __FUNCTION__); | ||
711 | */ | ||
712 | |||
713 | // TODO: Maintain this list in priority order. | ||
714 | // tasklet->next = NULL; | ||
715 | // *(gsnedf_pending_tasklets.tail) = tasklet; | ||
716 | // gsnedf_pending_tasklets.tail = &tasklet->next; | ||
717 | } | ||
718 | |||
719 | static int enqueue_pai_tasklet(struct tasklet_struct* tasklet) | ||
720 | { | ||
721 | cpu_entry_t *targetCPU = NULL; | ||
722 | int thisCPU; | ||
723 | int runLocal = 0; | ||
724 | int runNow = 0; | ||
725 | unsigned long flags; | ||
726 | |||
727 | if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner))) | ||
728 | { | ||
729 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
730 | return 0; | ||
731 | } | ||
732 | |||
733 | |||
734 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
735 | |||
736 | thisCPU = smp_processor_id(); | ||
737 | |||
738 | #if 1 | ||
739 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
740 | { | ||
741 | cpu_entry_t* affinity = NULL; | ||
742 | |||
743 | // use this CPU if it is in our cluster and isn't running any RT work. | ||
744 | if( | ||
745 | #ifdef CONFIG_RELEASE_MASTER | ||
746 | (thisCPU != gsnedf.release_master) && | ||
747 | #endif | ||
748 | (__get_cpu_var(gsnedf_cpu_entries).linked == NULL)) { | ||
749 | affinity = &(__get_cpu_var(gsnedf_cpu_entries)); | ||
750 | } | ||
751 | else { | ||
752 | // this CPU is busy or shouldn't run tasklet in this cluster. | ||
753 | // look for available near by CPUs. | ||
754 | // NOTE: Affinity towards owner and not this CPU. Is this right? | ||
755 | affinity = | ||
756 | gsnedf_get_nearest_available_cpu( | ||
757 | &per_cpu(gsnedf_cpu_entries, task_cpu(tasklet->owner))); | ||
758 | } | ||
759 | |||
760 | targetCPU = affinity; | ||
761 | } | ||
762 | #endif | ||
763 | #endif | ||
764 | |||
765 | if (targetCPU == NULL) { | ||
766 | targetCPU = lowest_prio_cpu(); | ||
767 | } | ||
768 | |||
769 | if (edf_higher_prio(tasklet->owner, targetCPU->linked)) { | ||
770 | if (thisCPU == targetCPU->cpu) { | ||
771 | TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__); | ||
772 | runLocal = 1; | ||
773 | runNow = 1; | ||
774 | } | ||
775 | else { | ||
776 | TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__); | ||
777 | runLocal = 0; | ||
778 | runNow = 1; | ||
779 | } | ||
780 | } | ||
781 | else { | ||
782 | runLocal = 0; | ||
783 | runNow = 0; | ||
784 | } | ||
785 | |||
786 | if(!runLocal) { | ||
787 | // enqueue the tasklet | ||
788 | __add_pai_tasklet(tasklet); | ||
789 | } | ||
790 | |||
791 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
792 | |||
793 | |||
794 | if (runLocal /*&& runNow */) { // runNow == 1 is implied | ||
795 | TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__); | ||
796 | __do_lit_tasklet(tasklet, 0ul); | ||
797 | } | ||
798 | else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied | ||
799 | TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu); | ||
800 | preempt(targetCPU); // need to be protected by cedf_lock? | ||
801 | } | ||
802 | else { | ||
803 | TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__); | ||
804 | } | ||
805 | |||
806 | return(1); // success | ||
807 | } | ||
808 | |||
809 | |||
810 | #endif | ||
811 | |||
812 | |||
813 | |||
814 | |||
815 | |||
816 | |||
817 | |||
818 | |||
819 | |||
820 | |||
821 | |||
822 | |||
823 | |||
382 | /* Getting schedule() right is a bit tricky. schedule() may not make any | 824 | /* Getting schedule() right is a bit tricky. schedule() may not make any |
383 | * assumptions on the state of the current task since it may be called for a | 825 | * assumptions on the state of the current task since it may be called for a |
384 | * number of reasons. The reasons include a scheduler_tick() determined that it | 826 | * number of reasons. The reasons include a scheduler_tick() determined that it |
@@ -437,17 +879,19 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
437 | TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); | 879 | TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); |
438 | #endif | 880 | #endif |
439 | 881 | ||
882 | /* | ||
440 | if (exists) | 883 | if (exists) |
441 | TRACE_TASK(prev, | 884 | TRACE_TASK(prev, |
442 | "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " | 885 | "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " |
443 | "state:%d sig:%d\n", | 886 | "state:%d sig:%d\n", |
444 | blocks, out_of_time, np, sleep, preempt, | 887 | blocks, out_of_time, np, sleep, preempt, |
445 | prev->state, signal_pending(prev)); | 888 | prev->state, signal_pending(prev)); |
889 | */ | ||
890 | |||
446 | if (entry->linked && preempt) | 891 | if (entry->linked && preempt) |
447 | TRACE_TASK(prev, "will be preempted by %s/%d\n", | 892 | TRACE_TASK(prev, "will be preempted by %s/%d\n", |
448 | entry->linked->comm, entry->linked->pid); | 893 | entry->linked->comm, entry->linked->pid); |
449 | 894 | ||
450 | |||
451 | /* If a task blocks we have no choice but to reschedule. | 895 | /* If a task blocks we have no choice but to reschedule. |
452 | */ | 896 | */ |
453 | if (blocks) | 897 | if (blocks) |
@@ -492,12 +936,15 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
492 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | 936 | entry->scheduled->rt_param.scheduled_on = NO_CPU; |
493 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | 937 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); |
494 | } | 938 | } |
495 | } else | 939 | } |
940 | else | ||
941 | { | ||
496 | /* Only override Linux scheduler if we have a real-time task | 942 | /* Only override Linux scheduler if we have a real-time task |
497 | * scheduled that needs to continue. | 943 | * scheduled that needs to continue. |
498 | */ | 944 | */ |
499 | if (exists) | 945 | if (exists) |
500 | next = prev; | 946 | next = prev; |
947 | } | ||
501 | 948 | ||
502 | sched_state_task_picked(); | 949 | sched_state_task_picked(); |
503 | 950 | ||
@@ -522,8 +969,9 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
522 | static void gsnedf_finish_switch(struct task_struct *prev) | 969 | static void gsnedf_finish_switch(struct task_struct *prev) |
523 | { | 970 | { |
524 | cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); | 971 | cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); |
525 | 972 | ||
526 | entry->scheduled = is_realtime(current) ? current : NULL; | 973 | entry->scheduled = is_realtime(current) ? current : NULL; |
974 | |||
527 | #ifdef WANT_ALL_SCHED_EVENTS | 975 | #ifdef WANT_ALL_SCHED_EVENTS |
528 | TRACE_TASK(prev, "switched away from\n"); | 976 | TRACE_TASK(prev, "switched away from\n"); |
529 | #endif | 977 | #endif |
@@ -572,11 +1020,14 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) | |||
572 | static void gsnedf_task_wake_up(struct task_struct *task) | 1020 | static void gsnedf_task_wake_up(struct task_struct *task) |
573 | { | 1021 | { |
574 | unsigned long flags; | 1022 | unsigned long flags; |
575 | lt_t now; | 1023 | //lt_t now; |
576 | 1024 | ||
577 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | 1025 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); |
578 | 1026 | ||
579 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | 1027 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
1028 | |||
1029 | |||
1030 | #if 0 // sporadic task model | ||
580 | /* We need to take suspensions because of semaphores into | 1031 | /* We need to take suspensions because of semaphores into |
581 | * account! If a job resumes after being suspended due to acquiring | 1032 | * account! If a job resumes after being suspended due to acquiring |
582 | * a semaphore, it should never be treated as a new job release. | 1033 | * a semaphore, it should never be treated as a new job release. |
@@ -598,19 +1049,26 @@ static void gsnedf_task_wake_up(struct task_struct *task) | |||
598 | } | 1049 | } |
599 | } | 1050 | } |
600 | } | 1051 | } |
1052 | #else // periodic task model | ||
1053 | set_rt_flags(task, RT_F_RUNNING); | ||
1054 | #endif | ||
1055 | |||
601 | gsnedf_job_arrival(task); | 1056 | gsnedf_job_arrival(task); |
602 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | 1057 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
603 | } | 1058 | } |
604 | 1059 | ||
605 | static void gsnedf_task_block(struct task_struct *t) | 1060 | static void gsnedf_task_block(struct task_struct *t) |
606 | { | 1061 | { |
1062 | // TODO: is this called on preemption?? | ||
607 | unsigned long flags; | 1063 | unsigned long flags; |
608 | 1064 | ||
609 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); | 1065 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); |
610 | 1066 | ||
611 | /* unlink if necessary */ | 1067 | /* unlink if necessary */ |
612 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | 1068 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
1069 | |||
613 | unlink(t); | 1070 | unlink(t); |
1071 | |||
614 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | 1072 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
615 | 1073 | ||
616 | BUG_ON(!is_realtime(t)); | 1074 | BUG_ON(!is_realtime(t)); |
@@ -621,6 +1079,10 @@ static void gsnedf_task_exit(struct task_struct * t) | |||
621 | { | 1079 | { |
622 | unsigned long flags; | 1080 | unsigned long flags; |
623 | 1081 | ||
1082 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
1083 | flush_tasklets(t); | ||
1084 | #endif | ||
1085 | |||
624 | /* unlink if necessary */ | 1086 | /* unlink if necessary */ |
625 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | 1087 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
626 | unlink(t); | 1088 | unlink(t); |
@@ -629,7 +1091,7 @@ static void gsnedf_task_exit(struct task_struct * t) | |||
629 | tsk_rt(t)->scheduled_on = NO_CPU; | 1091 | tsk_rt(t)->scheduled_on = NO_CPU; |
630 | } | 1092 | } |
631 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | 1093 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
632 | 1094 | ||
633 | BUG_ON(!is_realtime(t)); | 1095 | BUG_ON(!is_realtime(t)); |
634 | TRACE_TASK(t, "RIP\n"); | 1096 | TRACE_TASK(t, "RIP\n"); |
635 | } | 1097 | } |
@@ -644,51 +1106,53 @@ static long gsnedf_admit_task(struct task_struct* tsk) | |||
644 | 1106 | ||
645 | #include <litmus/fdso.h> | 1107 | #include <litmus/fdso.h> |
646 | 1108 | ||
647 | /* called with IRQs off */ | 1109 | |
648 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | 1110 | static void __set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) |
649 | { | 1111 | { |
650 | int linked_on; | 1112 | int linked_on; |
651 | int check_preempt = 0; | 1113 | int check_preempt = 0; |
652 | 1114 | ||
653 | raw_spin_lock(&gsnedf_lock); | 1115 | if(prio_inh != NULL) |
654 | 1116 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | |
655 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | 1117 | else |
1118 | TRACE_TASK(t, "inherits priority from %p\n", prio_inh); | ||
1119 | |||
1120 | sched_trace_eff_prio_change(t, prio_inh); | ||
1121 | |||
656 | tsk_rt(t)->inh_task = prio_inh; | 1122 | tsk_rt(t)->inh_task = prio_inh; |
657 | 1123 | ||
658 | linked_on = tsk_rt(t)->linked_on; | 1124 | linked_on = tsk_rt(t)->linked_on; |
659 | 1125 | ||
660 | /* If it is scheduled, then we need to reorder the CPU heap. */ | 1126 | /* If it is scheduled, then we need to reorder the CPU heap. */ |
661 | if (linked_on != NO_CPU) { | 1127 | if (linked_on != NO_CPU) { |
662 | TRACE_TASK(t, "%s: linked on %d\n", | 1128 | TRACE_TASK(t, "%s: linked on %d\n", |
663 | __FUNCTION__, linked_on); | 1129 | __FUNCTION__, linked_on); |
664 | /* Holder is scheduled; need to re-order CPUs. | 1130 | /* Holder is scheduled; need to re-order CPUs. |
665 | * We can't use heap_decrease() here since | 1131 | * We can't use heap_decrease() here since |
666 | * the cpu_heap is ordered in reverse direction, so | 1132 | * the cpu_heap is ordered in reverse direction, so |
667 | * it is actually an increase. */ | 1133 | * it is actually an increase. */ |
668 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, | 1134 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, |
669 | gsnedf_cpus[linked_on]->hn); | 1135 | gsnedf_cpus[linked_on]->hn); |
670 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, | 1136 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, |
671 | gsnedf_cpus[linked_on]->hn); | 1137 | gsnedf_cpus[linked_on]->hn); |
672 | } else { | 1138 | } else { |
673 | /* holder may be queued: first stop queue changes */ | 1139 | /* holder may be queued: first stop queue changes */ |
674 | raw_spin_lock(&gsnedf.release_lock); | 1140 | raw_spin_lock(&gsnedf.release_lock); |
675 | if (is_queued(t)) { | 1141 | if (is_queued(t)) { |
676 | TRACE_TASK(t, "%s: is queued\n", | 1142 | TRACE_TASK(t, "%s: is queued\n", __FUNCTION__); |
677 | __FUNCTION__); | 1143 | |
678 | /* We need to update the position of holder in some | 1144 | /* We need to update the position of holder in some |
679 | * heap. Note that this could be a release heap if we | 1145 | * heap. Note that this could be a release heap if we |
680 | * budget enforcement is used and this job overran. */ | 1146 | * budget enforcement is used and this job overran. */ |
681 | check_preempt = | 1147 | check_preempt = !bheap_decrease(edf_ready_order, tsk_rt(t)->heap_node); |
682 | !bheap_decrease(edf_ready_order, | 1148 | |
683 | tsk_rt(t)->heap_node); | ||
684 | } else { | 1149 | } else { |
685 | /* Nothing to do: if it is not queued and not linked | 1150 | /* Nothing to do: if it is not queued and not linked |
686 | * then it is either sleeping or currently being moved | 1151 | * then it is either sleeping or currently being moved |
687 | * by other code (e.g., a timer interrupt handler) that | 1152 | * by other code (e.g., a timer interrupt handler) that |
688 | * will use the correct priority when enqueuing the | 1153 | * will use the correct priority when enqueuing the |
689 | * task. */ | 1154 | * task. */ |
690 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", | 1155 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", __FUNCTION__); |
691 | __FUNCTION__); | ||
692 | } | 1156 | } |
693 | raw_spin_unlock(&gsnedf.release_lock); | 1157 | raw_spin_unlock(&gsnedf.release_lock); |
694 | 1158 | ||
@@ -702,34 +1166,148 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct* | |||
702 | /* heap_decrease() hit the top level of the heap: make | 1166 | /* heap_decrease() hit the top level of the heap: make |
703 | * sure preemption checks get the right task, not the | 1167 | * sure preemption checks get the right task, not the |
704 | * potentially stale cache. */ | 1168 | * potentially stale cache. */ |
705 | bheap_uncache_min(edf_ready_order, | 1169 | bheap_uncache_min(edf_ready_order, &gsnedf.ready_queue); |
706 | &gsnedf.ready_queue); | ||
707 | check_for_preemptions(); | 1170 | check_for_preemptions(); |
708 | } | 1171 | } |
709 | } | 1172 | } |
1173 | } | ||
1174 | |||
1175 | /* called with IRQs off */ | ||
1176 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
1177 | { | ||
1178 | raw_spin_lock(&gsnedf_lock); | ||
710 | 1179 | ||
1180 | __set_priority_inheritance(t, prio_inh); | ||
1181 | |||
1182 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1183 | if(tsk_rt(t)->cur_klitirqd != NULL) | ||
1184 | { | ||
1185 | TRACE_TASK(t, "%s/%d inherits a new priority!\n", | ||
1186 | tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); | ||
1187 | |||
1188 | __set_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh); | ||
1189 | } | ||
1190 | #endif | ||
1191 | |||
711 | raw_spin_unlock(&gsnedf_lock); | 1192 | raw_spin_unlock(&gsnedf_lock); |
712 | } | 1193 | } |
713 | 1194 | ||
1195 | |||
1196 | /* called with IRQs off */ | ||
1197 | static void __clear_priority_inheritance(struct task_struct* t) | ||
1198 | { | ||
1199 | TRACE_TASK(t, "priority restored\n"); | ||
1200 | |||
1201 | if(tsk_rt(t)->scheduled_on != NO_CPU) | ||
1202 | { | ||
1203 | sched_trace_eff_prio_change(t, NULL); | ||
1204 | |||
1205 | tsk_rt(t)->inh_task = NULL; | ||
1206 | |||
1207 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
1208 | * since the priority was effectively lowered. */ | ||
1209 | unlink(t); | ||
1210 | gsnedf_job_arrival(t); | ||
1211 | } | ||
1212 | else | ||
1213 | { | ||
1214 | __set_priority_inheritance(t, NULL); | ||
1215 | } | ||
1216 | |||
1217 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1218 | if(tsk_rt(t)->cur_klitirqd != NULL) | ||
1219 | { | ||
1220 | TRACE_TASK(t, "%s/%d inheritance set back to owner.\n", | ||
1221 | tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); | ||
1222 | |||
1223 | if(tsk_rt(tsk_rt(t)->cur_klitirqd)->scheduled_on != NO_CPU) | ||
1224 | { | ||
1225 | sched_trace_eff_prio_change(tsk_rt(t)->cur_klitirqd, t); | ||
1226 | |||
1227 | tsk_rt(tsk_rt(t)->cur_klitirqd)->inh_task = t; | ||
1228 | |||
1229 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
1230 | * since the priority was effectively lowered. */ | ||
1231 | unlink(tsk_rt(t)->cur_klitirqd); | ||
1232 | gsnedf_job_arrival(tsk_rt(t)->cur_klitirqd); | ||
1233 | } | ||
1234 | else | ||
1235 | { | ||
1236 | __set_priority_inheritance(tsk_rt(t)->cur_klitirqd, t); | ||
1237 | } | ||
1238 | } | ||
1239 | #endif | ||
1240 | } | ||
1241 | |||
714 | /* called with IRQs off */ | 1242 | /* called with IRQs off */ |
715 | static void clear_priority_inheritance(struct task_struct* t) | 1243 | static void clear_priority_inheritance(struct task_struct* t) |
716 | { | 1244 | { |
717 | raw_spin_lock(&gsnedf_lock); | 1245 | raw_spin_lock(&gsnedf_lock); |
1246 | __clear_priority_inheritance(t); | ||
1247 | raw_spin_unlock(&gsnedf_lock); | ||
1248 | } | ||
718 | 1249 | ||
719 | /* A job only stops inheriting a priority when it releases a | 1250 | #ifdef CONFIG_LITMUS_SOFTIRQD |
720 | * resource. Thus we can make the following assumption.*/ | 1251 | /* called with IRQs off */ |
721 | BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); | 1252 | static void set_priority_inheritance_klitirqd(struct task_struct* klitirqd, |
722 | 1253 | struct task_struct* old_owner, | |
723 | TRACE_TASK(t, "priority restored\n"); | 1254 | struct task_struct* new_owner) |
724 | tsk_rt(t)->inh_task = NULL; | 1255 | { |
1256 | BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); | ||
1257 | |||
1258 | raw_spin_lock(&gsnedf_lock); | ||
1259 | |||
1260 | if(old_owner != new_owner) | ||
1261 | { | ||
1262 | if(old_owner) | ||
1263 | { | ||
1264 | // unreachable? | ||
1265 | tsk_rt(old_owner)->cur_klitirqd = NULL; | ||
1266 | } | ||
1267 | |||
1268 | TRACE_TASK(klitirqd, "giving ownership to %s/%d.\n", | ||
1269 | new_owner->comm, new_owner->pid); | ||
725 | 1270 | ||
726 | /* Check if rescheduling is necessary. We can't use heap_decrease() | 1271 | tsk_rt(new_owner)->cur_klitirqd = klitirqd; |
727 | * since the priority was effectively lowered. */ | 1272 | } |
728 | unlink(t); | 1273 | |
729 | gsnedf_job_arrival(t); | 1274 | __set_priority_inheritance(klitirqd, |
1275 | (tsk_rt(new_owner)->inh_task == NULL) ? | ||
1276 | new_owner : | ||
1277 | tsk_rt(new_owner)->inh_task); | ||
1278 | |||
1279 | raw_spin_unlock(&gsnedf_lock); | ||
1280 | } | ||
730 | 1281 | ||
1282 | /* called with IRQs off */ | ||
1283 | static void clear_priority_inheritance_klitirqd(struct task_struct* klitirqd, | ||
1284 | struct task_struct* old_owner) | ||
1285 | { | ||
1286 | BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); | ||
1287 | |||
1288 | raw_spin_lock(&gsnedf_lock); | ||
1289 | |||
1290 | TRACE_TASK(klitirqd, "priority restored\n"); | ||
1291 | |||
1292 | if(tsk_rt(klitirqd)->scheduled_on != NO_CPU) | ||
1293 | { | ||
1294 | tsk_rt(klitirqd)->inh_task = NULL; | ||
1295 | |||
1296 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
1297 | * since the priority was effectively lowered. */ | ||
1298 | unlink(klitirqd); | ||
1299 | gsnedf_job_arrival(klitirqd); | ||
1300 | } | ||
1301 | else | ||
1302 | { | ||
1303 | __set_priority_inheritance(klitirqd, NULL); | ||
1304 | } | ||
1305 | |||
1306 | tsk_rt(old_owner)->cur_klitirqd = NULL; | ||
1307 | |||
731 | raw_spin_unlock(&gsnedf_lock); | 1308 | raw_spin_unlock(&gsnedf_lock); |
732 | } | 1309 | } |
1310 | #endif | ||
733 | 1311 | ||
734 | 1312 | ||
735 | /* ******************** FMLP support ********************** */ | 1313 | /* ******************** FMLP support ********************** */ |
@@ -932,11 +1510,483 @@ static struct litmus_lock* gsnedf_new_fmlp(void) | |||
932 | return &sem->litmus_lock; | 1510 | return &sem->litmus_lock; |
933 | } | 1511 | } |
934 | 1512 | ||
1513 | |||
1514 | |||
1515 | |||
1516 | |||
1517 | |||
1518 | |||
1519 | /* ******************** KFMLP support ********************** */ | ||
1520 | |||
1521 | /* struct for semaphore with priority inheritance */ | ||
1522 | struct kfmlp_queue | ||
1523 | { | ||
1524 | wait_queue_head_t wait; | ||
1525 | struct task_struct* owner; | ||
1526 | struct task_struct* hp_waiter; | ||
1527 | int count; /* number of waiters + holder */ | ||
1528 | }; | ||
1529 | |||
1530 | struct kfmlp_semaphore | ||
1531 | { | ||
1532 | struct litmus_lock litmus_lock; | ||
1533 | |||
1534 | spinlock_t lock; | ||
1535 | |||
1536 | int num_resources; /* aka k */ | ||
1537 | |||
1538 | struct kfmlp_queue *queues; /* array */ | ||
1539 | struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */ | ||
1540 | }; | ||
1541 | |||
1542 | static inline struct kfmlp_semaphore* kfmlp_from_lock(struct litmus_lock* lock) | ||
1543 | { | ||
1544 | return container_of(lock, struct kfmlp_semaphore, litmus_lock); | ||
1545 | } | ||
1546 | |||
1547 | static inline int kfmlp_get_idx(struct kfmlp_semaphore* sem, | ||
1548 | struct kfmlp_queue* queue) | ||
1549 | { | ||
1550 | return (queue - &sem->queues[0]); | ||
1551 | } | ||
1552 | |||
1553 | static inline struct kfmlp_queue* kfmlp_get_queue(struct kfmlp_semaphore* sem, | ||
1554 | struct task_struct* holder) | ||
1555 | { | ||
1556 | int i; | ||
1557 | for(i = 0; i < sem->num_resources; ++i) | ||
1558 | if(sem->queues[i].owner == holder) | ||
1559 | return(&sem->queues[i]); | ||
1560 | return(NULL); | ||
1561 | } | ||
1562 | |||
1563 | /* caller is responsible for locking */ | ||
1564 | static struct task_struct* kfmlp_find_hp_waiter(struct kfmlp_queue *kqueue, | ||
1565 | struct task_struct *skip) | ||
1566 | { | ||
1567 | struct list_head *pos; | ||
1568 | struct task_struct *queued, *found = NULL; | ||
1569 | |||
1570 | list_for_each(pos, &kqueue->wait.task_list) { | ||
1571 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
1572 | task_list)->private; | ||
1573 | |||
1574 | /* Compare task prios, find high prio task. */ | ||
1575 | if (queued != skip && edf_higher_prio(queued, found)) | ||
1576 | found = queued; | ||
1577 | } | ||
1578 | return found; | ||
1579 | } | ||
1580 | |||
1581 | static inline struct kfmlp_queue* kfmlp_find_shortest( | ||
1582 | struct kfmlp_semaphore* sem, | ||
1583 | struct kfmlp_queue* search_start) | ||
1584 | { | ||
1585 | // we start our search at search_start instead of at the beginning of the | ||
1586 | // queue list to load-balance across all resources. | ||
1587 | struct kfmlp_queue* step = search_start; | ||
1588 | struct kfmlp_queue* shortest = sem->shortest_queue; | ||
1589 | |||
1590 | do | ||
1591 | { | ||
1592 | step = (step+1 != &sem->queues[sem->num_resources]) ? | ||
1593 | step+1 : &sem->queues[0]; | ||
1594 | |||
1595 | if(step->count < shortest->count) | ||
1596 | { | ||
1597 | shortest = step; | ||
1598 | if(step->count == 0) | ||
1599 | break; /* can't get any shorter */ | ||
1600 | } | ||
1601 | |||
1602 | }while(step != search_start); | ||
1603 | |||
1604 | return(shortest); | ||
1605 | } | ||
1606 | |||
1607 | static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem) | ||
1608 | { | ||
1609 | /* must hold sem->lock */ | ||
1610 | |||
1611 | struct kfmlp_queue *my_queue = NULL; | ||
1612 | struct task_struct *max_hp = NULL; | ||
1613 | |||
1614 | |||
1615 | struct list_head *pos; | ||
1616 | struct task_struct *queued; | ||
1617 | int i; | ||
1618 | |||
1619 | for(i = 0; i < sem->num_resources; ++i) | ||
1620 | { | ||
1621 | if( (sem->queues[i].count > 1) && | ||
1622 | ((my_queue == NULL) || | ||
1623 | (edf_higher_prio(sem->queues[i].hp_waiter, my_queue->hp_waiter))) ) | ||
1624 | { | ||
1625 | my_queue = &sem->queues[i]; | ||
1626 | } | ||
1627 | } | ||
1628 | |||
1629 | if(my_queue) | ||
1630 | { | ||
1631 | max_hp = my_queue->hp_waiter; | ||
1632 | |||
1633 | BUG_ON(!max_hp); | ||
1634 | |||
1635 | TRACE_CUR("queue %d: stealing %s/%d from queue %d\n", | ||
1636 | kfmlp_get_idx(sem, my_queue), | ||
1637 | max_hp->comm, max_hp->pid, | ||
1638 | kfmlp_get_idx(sem, my_queue)); | ||
1639 | |||
1640 | my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, max_hp); | ||
1641 | |||
1642 | /* | ||
1643 | if(my_queue->hp_waiter) | ||
1644 | TRACE_CUR("queue %d: new hp_waiter is %s/%d\n", | ||
1645 | kfmlp_get_idx(sem, my_queue), | ||
1646 | my_queue->hp_waiter->comm, | ||
1647 | my_queue->hp_waiter->pid); | ||
1648 | else | ||
1649 | TRACE_CUR("queue %d: new hp_waiter is %p\n", | ||
1650 | kfmlp_get_idx(sem, my_queue), NULL); | ||
1651 | */ | ||
1652 | |||
1653 | raw_spin_lock(&gsnedf_lock); | ||
1654 | |||
1655 | /* | ||
1656 | if(my_queue->owner) | ||
1657 | TRACE_CUR("queue %d: owner is %s/%d\n", | ||
1658 | kfmlp_get_idx(sem, my_queue), | ||
1659 | my_queue->owner->comm, | ||
1660 | my_queue->owner->pid); | ||
1661 | else | ||
1662 | TRACE_CUR("queue %d: owner is %p\n", | ||
1663 | kfmlp_get_idx(sem, my_queue), | ||
1664 | NULL); | ||
1665 | */ | ||
1666 | |||
1667 | if(tsk_rt(my_queue->owner)->inh_task == max_hp) | ||
1668 | { | ||
1669 | __clear_priority_inheritance(my_queue->owner); | ||
1670 | if(my_queue->hp_waiter != NULL) | ||
1671 | { | ||
1672 | __set_priority_inheritance(my_queue->owner, my_queue->hp_waiter); | ||
1673 | } | ||
1674 | } | ||
1675 | raw_spin_unlock(&gsnedf_lock); | ||
1676 | |||
1677 | list_for_each(pos, &my_queue->wait.task_list) | ||
1678 | { | ||
1679 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
1680 | task_list)->private; | ||
1681 | /* Compare task prios, find high prio task. */ | ||
1682 | if (queued == max_hp) | ||
1683 | { | ||
1684 | /* | ||
1685 | TRACE_CUR("queue %d: found entry in wait queue. REMOVING!\n", | ||
1686 | kfmlp_get_idx(sem, my_queue)); | ||
1687 | */ | ||
1688 | __remove_wait_queue(&my_queue->wait, | ||
1689 | list_entry(pos, wait_queue_t, task_list)); | ||
1690 | break; | ||
1691 | } | ||
1692 | } | ||
1693 | --(my_queue->count); | ||
1694 | } | ||
1695 | |||
1696 | return(max_hp); | ||
1697 | } | ||
1698 | |||
1699 | int gsnedf_kfmlp_lock(struct litmus_lock* l) | ||
1700 | { | ||
1701 | struct task_struct* t = current; | ||
1702 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1703 | struct kfmlp_queue* my_queue; | ||
1704 | wait_queue_t wait; | ||
1705 | unsigned long flags; | ||
1706 | |||
1707 | if (!is_realtime(t)) | ||
1708 | return -EPERM; | ||
1709 | |||
1710 | spin_lock_irqsave(&sem->lock, flags); | ||
1711 | |||
1712 | my_queue = sem->shortest_queue; | ||
1713 | |||
1714 | if (my_queue->owner) { | ||
1715 | /* resource is not free => must suspend and wait */ | ||
1716 | TRACE_CUR("queue %d: Resource is not free => must suspend and wait.\n", | ||
1717 | kfmlp_get_idx(sem, my_queue)); | ||
1718 | |||
1719 | init_waitqueue_entry(&wait, t); | ||
1720 | |||
1721 | /* FIXME: interruptible would be nice some day */ | ||
1722 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
1723 | |||
1724 | __add_wait_queue_tail_exclusive(&my_queue->wait, &wait); | ||
1725 | |||
1726 | /* check if we need to activate priority inheritance */ | ||
1727 | if (edf_higher_prio(t, my_queue->hp_waiter)) | ||
1728 | { | ||
1729 | my_queue->hp_waiter = t; | ||
1730 | if (edf_higher_prio(t, my_queue->owner)) | ||
1731 | { | ||
1732 | set_priority_inheritance(my_queue->owner, my_queue->hp_waiter); | ||
1733 | } | ||
1734 | } | ||
1735 | |||
1736 | ++(my_queue->count); | ||
1737 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); | ||
1738 | |||
1739 | /* release lock before sleeping */ | ||
1740 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1741 | |||
1742 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
1743 | * when we wake up; we are guaranteed to have the lock since | ||
1744 | * there is only one wake up per release (or steal). | ||
1745 | */ | ||
1746 | schedule(); | ||
1747 | |||
1748 | |||
1749 | if(my_queue->owner == t) | ||
1750 | { | ||
1751 | TRACE_CUR("queue %d: acquired through waiting\n", | ||
1752 | kfmlp_get_idx(sem, my_queue)); | ||
1753 | } | ||
1754 | else | ||
1755 | { | ||
1756 | /* this case may happen if our wait entry was stolen | ||
1757 | between queues. record where we went. */ | ||
1758 | my_queue = kfmlp_get_queue(sem, t); | ||
1759 | |||
1760 | BUG_ON(!my_queue); | ||
1761 | TRACE_CUR("queue %d: acquired through stealing\n", | ||
1762 | kfmlp_get_idx(sem, my_queue)); | ||
1763 | } | ||
1764 | } | ||
1765 | else | ||
1766 | { | ||
1767 | TRACE_CUR("queue %d: acquired immediately\n", | ||
1768 | kfmlp_get_idx(sem, my_queue)); | ||
1769 | |||
1770 | my_queue->owner = t; | ||
1771 | |||
1772 | ++(my_queue->count); | ||
1773 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); | ||
1774 | |||
1775 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1776 | } | ||
1777 | |||
1778 | return kfmlp_get_idx(sem, my_queue); | ||
1779 | } | ||
1780 | |||
1781 | int gsnedf_kfmlp_unlock(struct litmus_lock* l) | ||
1782 | { | ||
1783 | struct task_struct *t = current, *next; | ||
1784 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1785 | struct kfmlp_queue *my_queue; | ||
1786 | unsigned long flags; | ||
1787 | int err = 0; | ||
1788 | |||
1789 | spin_lock_irqsave(&sem->lock, flags); | ||
1790 | |||
1791 | my_queue = kfmlp_get_queue(sem, t); | ||
1792 | |||
1793 | if (!my_queue) { | ||
1794 | err = -EINVAL; | ||
1795 | goto out; | ||
1796 | } | ||
1797 | |||
1798 | /* check if there are jobs waiting for this resource */ | ||
1799 | next = __waitqueue_remove_first(&my_queue->wait); | ||
1800 | if (next) { | ||
1801 | /* | ||
1802 | TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n", | ||
1803 | kfmlp_get_idx(sem, my_queue), | ||
1804 | next->comm, next->pid); | ||
1805 | */ | ||
1806 | /* next becomes the resouce holder */ | ||
1807 | my_queue->owner = next; | ||
1808 | |||
1809 | --(my_queue->count); | ||
1810 | // the '=' of '<=' is a dumb method to attempt to build | ||
1811 | // affinity until tasks can tell us where they ran last... | ||
1812 | if(my_queue->count <= sem->shortest_queue->count) | ||
1813 | { | ||
1814 | sem->shortest_queue = my_queue; | ||
1815 | } | ||
1816 | |||
1817 | TRACE_CUR("queue %d: lock ownership passed to %s/%d\n", | ||
1818 | kfmlp_get_idx(sem, my_queue), next->comm, next->pid); | ||
1819 | |||
1820 | /* determine new hp_waiter if necessary */ | ||
1821 | if (next == my_queue->hp_waiter) { | ||
1822 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
1823 | /* next has the highest priority --- it doesn't need to | ||
1824 | * inherit. However, we need to make sure that the | ||
1825 | * next-highest priority in the queue is reflected in | ||
1826 | * hp_waiter. */ | ||
1827 | my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, next); | ||
1828 | if (my_queue->hp_waiter) | ||
1829 | TRACE_TASK(my_queue->hp_waiter, "queue %d: is new highest-prio waiter\n", kfmlp_get_idx(sem, my_queue)); | ||
1830 | else | ||
1831 | TRACE("queue %d: no further waiters\n", kfmlp_get_idx(sem, my_queue)); | ||
1832 | } else { | ||
1833 | /* Well, if next is not the highest-priority waiter, | ||
1834 | * then it ought to inherit the highest-priority | ||
1835 | * waiter's priority. */ | ||
1836 | set_priority_inheritance(next, my_queue->hp_waiter); | ||
1837 | } | ||
1838 | |||
1839 | /* wake up next */ | ||
1840 | wake_up_process(next); | ||
1841 | } | ||
1842 | else | ||
1843 | { | ||
1844 | TRACE_CUR("queue %d: looking to steal someone...\n", kfmlp_get_idx(sem, my_queue)); | ||
1845 | |||
1846 | next = kfmlp_remove_hp_waiter(sem); /* returns NULL if nothing to steal */ | ||
1847 | |||
1848 | /* | ||
1849 | if(next) | ||
1850 | TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - steal\n", | ||
1851 | kfmlp_get_idx(sem, my_queue), | ||
1852 | next->comm, next->pid); | ||
1853 | */ | ||
1854 | |||
1855 | my_queue->owner = next; | ||
1856 | |||
1857 | if(next) | ||
1858 | { | ||
1859 | TRACE_CUR("queue %d: lock ownership passed to %s/%d (which was stolen)\n", | ||
1860 | kfmlp_get_idx(sem, my_queue), | ||
1861 | next->comm, next->pid); | ||
1862 | |||
1863 | /* wake up next */ | ||
1864 | wake_up_process(next); | ||
1865 | } | ||
1866 | else | ||
1867 | { | ||
1868 | TRACE_CUR("queue %d: no one to steal.\n", kfmlp_get_idx(sem, my_queue)); | ||
1869 | |||
1870 | --(my_queue->count); | ||
1871 | // the '=' of '<=' is a dumb method to attempt to build | ||
1872 | // affinity until tasks can tell us where they ran last... | ||
1873 | if(my_queue->count <= sem->shortest_queue->count) | ||
1874 | { | ||
1875 | sem->shortest_queue = my_queue; | ||
1876 | } | ||
1877 | } | ||
1878 | } | ||
1879 | |||
1880 | /* we lose the benefit of priority inheritance (if any) */ | ||
1881 | if (tsk_rt(t)->inh_task) | ||
1882 | clear_priority_inheritance(t); | ||
1883 | |||
1884 | out: | ||
1885 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1886 | |||
1887 | return err; | ||
1888 | } | ||
1889 | |||
1890 | int gsnedf_kfmlp_close(struct litmus_lock* l) | ||
1891 | { | ||
1892 | struct task_struct *t = current; | ||
1893 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1894 | struct kfmlp_queue *my_queue; | ||
1895 | unsigned long flags; | ||
1896 | |||
1897 | int owner; | ||
1898 | |||
1899 | spin_lock_irqsave(&sem->lock, flags); | ||
1900 | |||
1901 | my_queue = kfmlp_get_queue(sem, t); | ||
1902 | owner = (my_queue) ? (my_queue->owner == t) : 0; | ||
1903 | |||
1904 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1905 | |||
1906 | if (owner) | ||
1907 | gsnedf_kfmlp_unlock(l); | ||
1908 | |||
1909 | return 0; | ||
1910 | } | ||
1911 | |||
1912 | void gsnedf_kfmlp_free(struct litmus_lock* l) | ||
1913 | { | ||
1914 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1915 | kfree(sem->queues); | ||
1916 | kfree(sem); | ||
1917 | } | ||
1918 | |||
1919 | static struct litmus_lock_ops gsnedf_kfmlp_lock_ops = { | ||
1920 | .close = gsnedf_kfmlp_close, | ||
1921 | .lock = gsnedf_kfmlp_lock, | ||
1922 | .unlock = gsnedf_kfmlp_unlock, | ||
1923 | .deallocate = gsnedf_kfmlp_free, | ||
1924 | }; | ||
1925 | |||
1926 | static struct litmus_lock* gsnedf_new_kfmlp(void* __user arg, int* ret_code) | ||
1927 | { | ||
1928 | struct kfmlp_semaphore* sem; | ||
1929 | int num_resources = 0; | ||
1930 | int i; | ||
1931 | |||
1932 | if(!access_ok(VERIFY_READ, arg, sizeof(num_resources))) | ||
1933 | { | ||
1934 | *ret_code = -EINVAL; | ||
1935 | return(NULL); | ||
1936 | } | ||
1937 | if(__copy_from_user(&num_resources, arg, sizeof(num_resources))) | ||
1938 | { | ||
1939 | *ret_code = -EINVAL; | ||
1940 | return(NULL); | ||
1941 | } | ||
1942 | if(num_resources < 1) | ||
1943 | { | ||
1944 | *ret_code = -EINVAL; | ||
1945 | return(NULL); | ||
1946 | } | ||
1947 | |||
1948 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
1949 | if(!sem) | ||
1950 | { | ||
1951 | *ret_code = -ENOMEM; | ||
1952 | return NULL; | ||
1953 | } | ||
1954 | |||
1955 | sem->queues = kmalloc(sizeof(struct kfmlp_queue)*num_resources, GFP_KERNEL); | ||
1956 | if(!sem->queues) | ||
1957 | { | ||
1958 | kfree(sem); | ||
1959 | *ret_code = -ENOMEM; | ||
1960 | return NULL; | ||
1961 | } | ||
1962 | |||
1963 | sem->litmus_lock.ops = &gsnedf_kfmlp_lock_ops; | ||
1964 | spin_lock_init(&sem->lock); | ||
1965 | sem->num_resources = num_resources; | ||
1966 | |||
1967 | for(i = 0; i < num_resources; ++i) | ||
1968 | { | ||
1969 | sem->queues[i].owner = NULL; | ||
1970 | sem->queues[i].hp_waiter = NULL; | ||
1971 | init_waitqueue_head(&sem->queues[i].wait); | ||
1972 | sem->queues[i].count = 0; | ||
1973 | } | ||
1974 | |||
1975 | sem->shortest_queue = &sem->queues[0]; | ||
1976 | |||
1977 | *ret_code = 0; | ||
1978 | return &sem->litmus_lock; | ||
1979 | } | ||
1980 | |||
1981 | |||
1982 | |||
1983 | |||
1984 | |||
935 | /* **** lock constructor **** */ | 1985 | /* **** lock constructor **** */ |
936 | 1986 | ||
937 | 1987 | ||
938 | static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, | 1988 | static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, |
939 | void* __user unused) | 1989 | void* __user arg) |
940 | { | 1990 | { |
941 | int err = -ENXIO; | 1991 | int err = -ENXIO; |
942 | 1992 | ||
@@ -951,7 +2001,10 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, | |||
951 | else | 2001 | else |
952 | err = -ENOMEM; | 2002 | err = -ENOMEM; |
953 | break; | 2003 | break; |
954 | 2004 | ||
2005 | case KFMLP_SEM: | ||
2006 | *lock = gsnedf_new_kfmlp(arg, &err); | ||
2007 | break; | ||
955 | }; | 2008 | }; |
956 | 2009 | ||
957 | return err; | 2010 | return err; |
@@ -959,7 +2012,6 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, | |||
959 | 2012 | ||
960 | #endif | 2013 | #endif |
961 | 2014 | ||
962 | |||
963 | static long gsnedf_activate_plugin(void) | 2015 | static long gsnedf_activate_plugin(void) |
964 | { | 2016 | { |
965 | int cpu; | 2017 | int cpu; |
@@ -986,6 +2038,20 @@ static long gsnedf_activate_plugin(void) | |||
986 | } | 2038 | } |
987 | #endif | 2039 | #endif |
988 | } | 2040 | } |
2041 | |||
2042 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
2043 | gsnedf_pending_tasklets.head = NULL; | ||
2044 | gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head); | ||
2045 | #endif | ||
2046 | |||
2047 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
2048 | spawn_klitirqd(NULL); | ||
2049 | #endif | ||
2050 | |||
2051 | #ifdef CONFIG_LITMUS_NVIDIA | ||
2052 | init_nvidia_info(); | ||
2053 | #endif | ||
2054 | |||
989 | return 0; | 2055 | return 0; |
990 | } | 2056 | } |
991 | 2057 | ||
@@ -1003,7 +2069,17 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | |||
1003 | .admit_task = gsnedf_admit_task, | 2069 | .admit_task = gsnedf_admit_task, |
1004 | .activate_plugin = gsnedf_activate_plugin, | 2070 | .activate_plugin = gsnedf_activate_plugin, |
1005 | #ifdef CONFIG_LITMUS_LOCKING | 2071 | #ifdef CONFIG_LITMUS_LOCKING |
1006 | .allocate_lock = gsnedf_allocate_lock, | 2072 | .allocate_lock = gsnedf_allocate_lock, |
2073 | .set_prio_inh = set_priority_inheritance, | ||
2074 | .clear_prio_inh = clear_priority_inheritance, | ||
2075 | #endif | ||
2076 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
2077 | .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd, | ||
2078 | .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd, | ||
2079 | #endif | ||
2080 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
2081 | .enqueue_pai_tasklet = enqueue_pai_tasklet, | ||
2082 | .run_tasklets = run_tasklets, | ||
1007 | #endif | 2083 | #endif |
1008 | }; | 2084 | }; |
1009 | 2085 | ||
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index 5a15ce938984..9a6fe487718e 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c | |||
@@ -103,7 +103,9 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) | |||
103 | } | 103 | } |
104 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW | 104 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW |
105 | if (next->oncpu) | 105 | if (next->oncpu) |
106 | { | ||
106 | TRACE_TASK(next, "waiting for !oncpu"); | 107 | TRACE_TASK(next, "waiting for !oncpu"); |
108 | } | ||
107 | while (next->oncpu) { | 109 | while (next->oncpu) { |
108 | cpu_relax(); | 110 | cpu_relax(); |
109 | mb(); | 111 | mb(); |
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index 00a1900d6457..c910a08b7049 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c | |||
@@ -118,6 +118,40 @@ static long litmus_dummy_allocate_lock(struct litmus_lock **lock, int type, | |||
118 | return -ENXIO; | 118 | return -ENXIO; |
119 | } | 119 | } |
120 | 120 | ||
121 | static void litmus_dummy_set_prio_inh(struct task_struct* a, struct task_struct* b) | ||
122 | { | ||
123 | } | ||
124 | |||
125 | static void litmus_dummy_clear_prio_inh(struct task_struct* t) | ||
126 | { | ||
127 | } | ||
128 | |||
129 | #endif | ||
130 | |||
131 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
132 | static void litmus_dummy_set_prio_inh_klitirq(struct task_struct* klitirqd, | ||
133 | struct task_struct* old_owner, | ||
134 | struct task_struct* new_owner) | ||
135 | { | ||
136 | } | ||
137 | |||
138 | static void litmus_dummy_clear_prio_inh_klitirqd(struct task_struct* klitirqd, | ||
139 | struct task_struct* old_owner) | ||
140 | { | ||
141 | } | ||
142 | #endif | ||
143 | |||
144 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
145 | static int litmus_dummy_enqueue_pai_tasklet(struct tasklet_struct* t) | ||
146 | { | ||
147 | TRACE("%s: PAI Tasklet unsupported in this plugin!!!!!!\n", __FUNCTION__); | ||
148 | return(0); // failure. | ||
149 | } | ||
150 | |||
151 | static void litmus_dummy_run_tasklets(struct task_struct* t) | ||
152 | { | ||
153 | //TRACE("%s: PAI Tasklet unsupported in this plugin!!!!!!\n", __FUNCTION__); | ||
154 | } | ||
121 | #endif | 155 | #endif |
122 | 156 | ||
123 | 157 | ||
@@ -138,6 +172,16 @@ struct sched_plugin linux_sched_plugin = { | |||
138 | .deactivate_plugin = litmus_dummy_deactivate_plugin, | 172 | .deactivate_plugin = litmus_dummy_deactivate_plugin, |
139 | #ifdef CONFIG_LITMUS_LOCKING | 173 | #ifdef CONFIG_LITMUS_LOCKING |
140 | .allocate_lock = litmus_dummy_allocate_lock, | 174 | .allocate_lock = litmus_dummy_allocate_lock, |
175 | .set_prio_inh = litmus_dummy_set_prio_inh, | ||
176 | .clear_prio_inh = litmus_dummy_clear_prio_inh, | ||
177 | #endif | ||
178 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
179 | .set_prio_inh_klitirqd = litmus_dummy_set_prio_inh_klitirq, | ||
180 | .clear_prio_inh_klitirqd = litmus_dummy_clear_prio_inh_klitirqd, | ||
181 | #endif | ||
182 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
183 | .enqueue_pai_tasklet = litmus_dummy_enqueue_pai_tasklet, | ||
184 | .run_tasklets = litmus_dummy_run_tasklets, | ||
141 | #endif | 185 | #endif |
142 | .admit_task = litmus_dummy_admit_task | 186 | .admit_task = litmus_dummy_admit_task |
143 | }; | 187 | }; |
@@ -176,6 +220,8 @@ int register_sched_plugin(struct sched_plugin* plugin) | |||
176 | CHECK(deactivate_plugin); | 220 | CHECK(deactivate_plugin); |
177 | #ifdef CONFIG_LITMUS_LOCKING | 221 | #ifdef CONFIG_LITMUS_LOCKING |
178 | CHECK(allocate_lock); | 222 | CHECK(allocate_lock); |
223 | CHECK(set_prio_inh); | ||
224 | CHECK(clear_prio_inh); | ||
179 | #endif | 225 | #endif |
180 | CHECK(admit_task); | 226 | CHECK(admit_task); |
181 | 227 | ||
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c index 5ef8d09ab41f..d079df2b292a 100644 --- a/litmus/sched_task_trace.c +++ b/litmus/sched_task_trace.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
9 | #include <linux/percpu.h> | 9 | #include <linux/percpu.h> |
10 | #include <linux/hardirq.h> | ||
10 | 11 | ||
11 | #include <litmus/ftdev.h> | 12 | #include <litmus/ftdev.h> |
12 | #include <litmus/litmus.h> | 13 | #include <litmus/litmus.h> |
@@ -16,13 +17,13 @@ | |||
16 | #include <litmus/ftdev.h> | 17 | #include <litmus/ftdev.h> |
17 | 18 | ||
18 | 19 | ||
19 | #define NO_EVENTS (1 << CONFIG_SCHED_TASK_TRACE_SHIFT) | 20 | #define NUM_EVENTS (1 << (CONFIG_SCHED_TASK_TRACE_SHIFT+11)) |
20 | 21 | ||
21 | #define now() litmus_clock() | 22 | #define now() litmus_clock() |
22 | 23 | ||
23 | struct local_buffer { | 24 | struct local_buffer { |
24 | struct st_event_record record[NO_EVENTS]; | 25 | struct st_event_record record[NUM_EVENTS]; |
25 | char flag[NO_EVENTS]; | 26 | char flag[NUM_EVENTS]; |
26 | struct ft_buffer ftbuf; | 27 | struct ft_buffer ftbuf; |
27 | }; | 28 | }; |
28 | 29 | ||
@@ -41,7 +42,7 @@ static int __init init_sched_task_trace(void) | |||
41 | int i, ok = 0, err; | 42 | int i, ok = 0, err; |
42 | printk("Allocated %u sched_trace_xxx() events per CPU " | 43 | printk("Allocated %u sched_trace_xxx() events per CPU " |
43 | "(buffer size: %d bytes)\n", | 44 | "(buffer size: %d bytes)\n", |
44 | NO_EVENTS, (int) sizeof(struct local_buffer)); | 45 | NUM_EVENTS, (int) sizeof(struct local_buffer)); |
45 | 46 | ||
46 | err = ftdev_init(&st_dev, THIS_MODULE, | 47 | err = ftdev_init(&st_dev, THIS_MODULE, |
47 | num_online_cpus(), "sched_trace"); | 48 | num_online_cpus(), "sched_trace"); |
@@ -50,7 +51,7 @@ static int __init init_sched_task_trace(void) | |||
50 | 51 | ||
51 | for (i = 0; i < st_dev.minor_cnt; i++) { | 52 | for (i = 0; i < st_dev.minor_cnt; i++) { |
52 | buf = &per_cpu(st_event_buffer, i); | 53 | buf = &per_cpu(st_event_buffer, i); |
53 | ok += init_ft_buffer(&buf->ftbuf, NO_EVENTS, | 54 | ok += init_ft_buffer(&buf->ftbuf, NUM_EVENTS, |
54 | sizeof(struct st_event_record), | 55 | sizeof(struct st_event_record), |
55 | buf->flag, | 56 | buf->flag, |
56 | buf->record); | 57 | buf->record); |
@@ -154,7 +155,8 @@ feather_callback void do_sched_trace_task_switch_to(unsigned long id, | |||
154 | { | 155 | { |
155 | struct task_struct *t = (struct task_struct*) _task; | 156 | struct task_struct *t = (struct task_struct*) _task; |
156 | struct st_event_record* rec; | 157 | struct st_event_record* rec; |
157 | if (is_realtime(t)) { | 158 | //if (is_realtime(t)) /* comment out to trace EVERYTHING */ |
159 | { | ||
158 | rec = get_record(ST_SWITCH_TO, t); | 160 | rec = get_record(ST_SWITCH_TO, t); |
159 | if (rec) { | 161 | if (rec) { |
160 | rec->data.switch_to.when = now(); | 162 | rec->data.switch_to.when = now(); |
@@ -169,7 +171,8 @@ feather_callback void do_sched_trace_task_switch_away(unsigned long id, | |||
169 | { | 171 | { |
170 | struct task_struct *t = (struct task_struct*) _task; | 172 | struct task_struct *t = (struct task_struct*) _task; |
171 | struct st_event_record* rec; | 173 | struct st_event_record* rec; |
172 | if (is_realtime(t)) { | 174 | //if (is_realtime(t)) /* comment out to trace EVERYTHING */ |
175 | { | ||
173 | rec = get_record(ST_SWITCH_AWAY, t); | 176 | rec = get_record(ST_SWITCH_AWAY, t); |
174 | if (rec) { | 177 | if (rec) { |
175 | rec->data.switch_away.when = now(); | 178 | rec->data.switch_away.when = now(); |
@@ -188,6 +191,9 @@ feather_callback void do_sched_trace_task_completion(unsigned long id, | |||
188 | if (rec) { | 191 | if (rec) { |
189 | rec->data.completion.when = now(); | 192 | rec->data.completion.when = now(); |
190 | rec->data.completion.forced = forced; | 193 | rec->data.completion.forced = forced; |
194 | #ifdef LITMUS_NVIDIA | ||
195 | rec->data.completion.nv_int_count = (u16)atomic_read(&tsk_rt(t)->nv_int_count); | ||
196 | #endif | ||
191 | put_record(rec); | 197 | put_record(rec); |
192 | } | 198 | } |
193 | } | 199 | } |
@@ -239,3 +245,215 @@ feather_callback void do_sched_trace_action(unsigned long id, | |||
239 | put_record(rec); | 245 | put_record(rec); |
240 | } | 246 | } |
241 | } | 247 | } |
248 | |||
249 | |||
250 | feather_callback void do_sched_trace_tasklet_release(unsigned long id, | ||
251 | unsigned long _owner) | ||
252 | { | ||
253 | struct task_struct *t = (struct task_struct*) _owner; | ||
254 | struct st_event_record *rec = get_record(ST_TASKLET_RELEASE, t); | ||
255 | |||
256 | if (rec) { | ||
257 | rec->data.tasklet_release.when = now(); | ||
258 | put_record(rec); | ||
259 | } | ||
260 | } | ||
261 | |||
262 | |||
263 | feather_callback void do_sched_trace_tasklet_begin(unsigned long id, | ||
264 | unsigned long _owner) | ||
265 | { | ||
266 | struct task_struct *t = (struct task_struct*) _owner; | ||
267 | struct st_event_record *rec = get_record(ST_TASKLET_BEGIN, t); | ||
268 | |||
269 | if (rec) { | ||
270 | rec->data.tasklet_begin.when = now(); | ||
271 | |||
272 | if(!in_interrupt()) | ||
273 | rec->data.tasklet_begin.exe_pid = current->pid; | ||
274 | else | ||
275 | rec->data.tasklet_begin.exe_pid = 0; | ||
276 | |||
277 | put_record(rec); | ||
278 | } | ||
279 | } | ||
280 | EXPORT_SYMBOL(do_sched_trace_tasklet_begin); | ||
281 | |||
282 | |||
283 | feather_callback void do_sched_trace_tasklet_end(unsigned long id, | ||
284 | unsigned long _owner, | ||
285 | unsigned long _flushed) | ||
286 | { | ||
287 | struct task_struct *t = (struct task_struct*) _owner; | ||
288 | struct st_event_record *rec = get_record(ST_TASKLET_END, t); | ||
289 | |||
290 | if (rec) { | ||
291 | rec->data.tasklet_end.when = now(); | ||
292 | rec->data.tasklet_end.flushed = _flushed; | ||
293 | |||
294 | if(!in_interrupt()) | ||
295 | rec->data.tasklet_end.exe_pid = current->pid; | ||
296 | else | ||
297 | rec->data.tasklet_end.exe_pid = 0; | ||
298 | |||
299 | put_record(rec); | ||
300 | } | ||
301 | } | ||
302 | EXPORT_SYMBOL(do_sched_trace_tasklet_end); | ||
303 | |||
304 | |||
305 | feather_callback void do_sched_trace_work_release(unsigned long id, | ||
306 | unsigned long _owner) | ||
307 | { | ||
308 | struct task_struct *t = (struct task_struct*) _owner; | ||
309 | struct st_event_record *rec = get_record(ST_WORK_RELEASE, t); | ||
310 | |||
311 | if (rec) { | ||
312 | rec->data.work_release.when = now(); | ||
313 | put_record(rec); | ||
314 | } | ||
315 | } | ||
316 | |||
317 | |||
318 | feather_callback void do_sched_trace_work_begin(unsigned long id, | ||
319 | unsigned long _owner, | ||
320 | unsigned long _exe) | ||
321 | { | ||
322 | struct task_struct *t = (struct task_struct*) _owner; | ||
323 | struct st_event_record *rec = get_record(ST_WORK_BEGIN, t); | ||
324 | |||
325 | if (rec) { | ||
326 | struct task_struct *exe = (struct task_struct*) _exe; | ||
327 | rec->data.work_begin.exe_pid = exe->pid; | ||
328 | rec->data.work_begin.when = now(); | ||
329 | put_record(rec); | ||
330 | } | ||
331 | } | ||
332 | EXPORT_SYMBOL(do_sched_trace_work_begin); | ||
333 | |||
334 | |||
335 | feather_callback void do_sched_trace_work_end(unsigned long id, | ||
336 | unsigned long _owner, | ||
337 | unsigned long _exe, | ||
338 | unsigned long _flushed) | ||
339 | { | ||
340 | struct task_struct *t = (struct task_struct*) _owner; | ||
341 | struct st_event_record *rec = get_record(ST_WORK_END, t); | ||
342 | |||
343 | if (rec) { | ||
344 | struct task_struct *exe = (struct task_struct*) _exe; | ||
345 | rec->data.work_end.exe_pid = exe->pid; | ||
346 | rec->data.work_end.flushed = _flushed; | ||
347 | rec->data.work_end.when = now(); | ||
348 | put_record(rec); | ||
349 | } | ||
350 | } | ||
351 | EXPORT_SYMBOL(do_sched_trace_work_end); | ||
352 | |||
353 | |||
354 | feather_callback void do_sched_trace_eff_prio_change(unsigned long id, | ||
355 | unsigned long _task, | ||
356 | unsigned long _inh) | ||
357 | { | ||
358 | struct task_struct *t = (struct task_struct*) _task; | ||
359 | struct st_event_record *rec = get_record(ST_EFF_PRIO_CHANGE, t); | ||
360 | |||
361 | if (rec) { | ||
362 | struct task_struct *inh = (struct task_struct*) _inh; | ||
363 | rec->data.effective_priority_change.when = now(); | ||
364 | rec->data.effective_priority_change.inh_pid = (inh != NULL) ? | ||
365 | inh->pid : | ||
366 | 0xffff; | ||
367 | |||
368 | put_record(rec); | ||
369 | } | ||
370 | } | ||
371 | |||
372 | /* pray for no nesting of nv interrupts on same CPU... */ | ||
373 | struct tracing_interrupt_map | ||
374 | { | ||
375 | int active; | ||
376 | int count; | ||
377 | unsigned long data[128]; // assume nesting less than 128... | ||
378 | unsigned long serial[128]; | ||
379 | }; | ||
380 | DEFINE_PER_CPU(struct tracing_interrupt_map, active_interrupt_tracing); | ||
381 | |||
382 | |||
383 | DEFINE_PER_CPU(u32, intCounter); | ||
384 | |||
385 | feather_callback void do_sched_trace_nv_interrupt_begin(unsigned long id, | ||
386 | unsigned long _device) | ||
387 | { | ||
388 | struct st_event_record *rec; | ||
389 | u32 serialNum; | ||
390 | |||
391 | { | ||
392 | u32* serial; | ||
393 | struct tracing_interrupt_map* int_map = &per_cpu(active_interrupt_tracing, smp_processor_id()); | ||
394 | if(!int_map->active == 0xcafebabe) | ||
395 | { | ||
396 | int_map->count++; | ||
397 | } | ||
398 | else | ||
399 | { | ||
400 | int_map->active = 0xcafebabe; | ||
401 | int_map->count = 1; | ||
402 | } | ||
403 | //int_map->data[int_map->count-1] = _device; | ||
404 | |||
405 | serial = &per_cpu(intCounter, smp_processor_id()); | ||
406 | *serial += num_online_cpus(); | ||
407 | serialNum = *serial; | ||
408 | int_map->serial[int_map->count-1] = serialNum; | ||
409 | } | ||
410 | |||
411 | rec = get_record(ST_NV_INTERRUPT_BEGIN, NULL); | ||
412 | if(rec) { | ||
413 | u32 device = _device; | ||
414 | rec->data.nv_interrupt_begin.when = now(); | ||
415 | rec->data.nv_interrupt_begin.device = device; | ||
416 | rec->data.nv_interrupt_begin.serialNumber = serialNum; | ||
417 | put_record(rec); | ||
418 | } | ||
419 | } | ||
420 | EXPORT_SYMBOL(do_sched_trace_nv_interrupt_begin); | ||
421 | |||
422 | /* | ||
423 | int is_interrupt_tracing_active(void) | ||
424 | { | ||
425 | struct tracing_interrupt_map* int_map = &per_cpu(active_interrupt_tracing, smp_processor_id()); | ||
426 | if(int_map->active == 0xcafebabe) | ||
427 | return 1; | ||
428 | return 0; | ||
429 | } | ||
430 | */ | ||
431 | |||
432 | feather_callback void do_sched_trace_nv_interrupt_end(unsigned long id, unsigned long _device) | ||
433 | { | ||
434 | struct tracing_interrupt_map* int_map = &per_cpu(active_interrupt_tracing, smp_processor_id()); | ||
435 | if(int_map->active == 0xcafebabe) | ||
436 | { | ||
437 | struct st_event_record *rec = get_record(ST_NV_INTERRUPT_END, NULL); | ||
438 | |||
439 | int_map->count--; | ||
440 | if(int_map->count == 0) | ||
441 | int_map->active = 0; | ||
442 | |||
443 | if(rec) { | ||
444 | u32 device = _device; | ||
445 | rec->data.nv_interrupt_end.when = now(); | ||
446 | //rec->data.nv_interrupt_end.device = int_map->data[int_map->count]; | ||
447 | rec->data.nv_interrupt_end.device = device; | ||
448 | rec->data.nv_interrupt_end.serialNumber = int_map->serial[int_map->count]; | ||
449 | put_record(rec); | ||
450 | } | ||
451 | } | ||
452 | } | ||
453 | EXPORT_SYMBOL(do_sched_trace_nv_interrupt_end); | ||
454 | |||
455 | |||
456 | |||
457 | |||
458 | |||
459 | |||
diff --git a/litmus/sched_trace_external.c b/litmus/sched_trace_external.c new file mode 100644 index 000000000000..cf8e1d78aa77 --- /dev/null +++ b/litmus/sched_trace_external.c | |||
@@ -0,0 +1,64 @@ | |||
1 | #include <linux/module.h> | ||
2 | |||
3 | #include <litmus/trace.h> | ||
4 | #include <litmus/sched_trace.h> | ||
5 | #include <litmus/litmus.h> | ||
6 | |||
7 | void __sched_trace_tasklet_begin_external(struct task_struct* t) | ||
8 | { | ||
9 | sched_trace_tasklet_begin(t); | ||
10 | } | ||
11 | EXPORT_SYMBOL(__sched_trace_tasklet_begin_external); | ||
12 | |||
13 | void __sched_trace_tasklet_end_external(struct task_struct* t, unsigned long flushed) | ||
14 | { | ||
15 | sched_trace_tasklet_end(t, flushed); | ||
16 | } | ||
17 | EXPORT_SYMBOL(__sched_trace_tasklet_end_external); | ||
18 | |||
19 | |||
20 | |||
21 | void __sched_trace_work_begin_external(struct task_struct* t, struct task_struct* e) | ||
22 | { | ||
23 | sched_trace_work_begin(t, e); | ||
24 | } | ||
25 | EXPORT_SYMBOL(__sched_trace_work_begin_external); | ||
26 | |||
27 | void __sched_trace_work_end_external(struct task_struct* t, struct task_struct* e, unsigned long f) | ||
28 | { | ||
29 | sched_trace_work_end(t, e, f); | ||
30 | } | ||
31 | EXPORT_SYMBOL(__sched_trace_work_end_external); | ||
32 | |||
33 | |||
34 | |||
35 | void __sched_trace_nv_interrupt_begin_external(u32 device) | ||
36 | { | ||
37 | //unsigned long _device = device; | ||
38 | sched_trace_nv_interrupt_begin((unsigned long)device); | ||
39 | } | ||
40 | EXPORT_SYMBOL(__sched_trace_nv_interrupt_begin_external); | ||
41 | |||
42 | void __sched_trace_nv_interrupt_end_external(u32 device) | ||
43 | { | ||
44 | //unsigned long _device = device; | ||
45 | sched_trace_nv_interrupt_end((unsigned long)device); | ||
46 | } | ||
47 | EXPORT_SYMBOL(__sched_trace_nv_interrupt_end_external); | ||
48 | |||
49 | |||
50 | #ifdef CONFIG_LITMUS_NVIDIA | ||
51 | |||
52 | #define EXX_TS(evt) \ | ||
53 | void __##evt(void) { evt; } \ | ||
54 | EXPORT_SYMBOL(__##evt); | ||
55 | |||
56 | EXX_TS(TS_NV_TOPISR_START) | ||
57 | EXX_TS(TS_NV_TOPISR_END) | ||
58 | EXX_TS(TS_NV_BOTISR_START) | ||
59 | EXX_TS(TS_NV_BOTISR_END) | ||
60 | EXX_TS(TS_NV_RELEASE_BOTISR_START) | ||
61 | EXX_TS(TS_NV_RELEASE_BOTISR_END) | ||
62 | |||
63 | #endif | ||
64 | |||