diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2011-06-02 16:06:05 -0400 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2011-06-02 16:06:05 -0400 |
commit | 3d5537c160c1484e8d562b9828baf679cc53f67a (patch) | |
tree | b595364f1b0f94ac2426c8315bc5967debc7bbb0 /litmus | |
parent | 7d754596756240fa918b94cd0c3011c77a638987 (diff) |
Full patch for klitirqd with Nvidia GPU support.
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/Kconfig | 89 | ||||
-rw-r--r-- | litmus/Makefile | 4 | ||||
-rw-r--r-- | litmus/affinity.c | 49 | ||||
-rw-r--r-- | litmus/edf_common.c | 6 | ||||
-rw-r--r-- | litmus/fdso.c | 1 | ||||
-rw-r--r-- | litmus/litmus.c | 82 | ||||
-rw-r--r-- | litmus/litmus_proc.c | 17 | ||||
-rw-r--r-- | litmus/litmus_softirq.c | 1579 | ||||
-rw-r--r-- | litmus/locking.c | 1 | ||||
-rw-r--r-- | litmus/nvidia_info.c | 526 | ||||
-rw-r--r-- | litmus/preempt.c | 7 | ||||
-rw-r--r-- | litmus/sched_cedf.c | 852 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 756 | ||||
-rw-r--r-- | litmus/sched_litmus.c | 2 | ||||
-rw-r--r-- | litmus/sched_plugin.c | 29 | ||||
-rw-r--r-- | litmus/sched_task_trace.c | 216 | ||||
-rw-r--r-- | litmus/sched_trace_external.c | 45 |
17 files changed, 4184 insertions, 77 deletions
diff --git a/litmus/Kconfig b/litmus/Kconfig index ad8dc8308cf0..7e865d4dd703 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig | |||
@@ -62,6 +62,25 @@ config LITMUS_LOCKING | |||
62 | 62 | ||
63 | endmenu | 63 | endmenu |
64 | 64 | ||
65 | menu "Performance Enhancements" | ||
66 | |||
67 | config SCHED_CPU_AFFINITY | ||
68 | bool "Local Migration Affinity" | ||
69 | default y | ||
70 | help | ||
71 | Rescheduled tasks prefer CPUs near to their previously used CPU. This | ||
72 | may improve performance through possible preservation of cache affinity. | ||
73 | |||
74 | Warning: May make bugs ahrder to find since tasks may migrate less often. | ||
75 | |||
76 | NOTES: | ||
77 | * Pfair/PD^2 does not support this option. | ||
78 | * Only x86 currently supported. | ||
79 | |||
80 | Say Yes if unsure. | ||
81 | |||
82 | endmenu | ||
83 | |||
65 | menu "Tracing" | 84 | menu "Tracing" |
66 | 85 | ||
67 | config FEATHER_TRACE | 86 | config FEATHER_TRACE |
@@ -182,4 +201,74 @@ config SCHED_DEBUG_TRACE_CALLER | |||
182 | 201 | ||
183 | endmenu | 202 | endmenu |
184 | 203 | ||
204 | menu "Interrupt Handling" | ||
205 | |||
206 | config LITMUS_THREAD_ALL_SOFTIRQ | ||
207 | bool "Process all softirqs in ksoftirqd threads." | ||
208 | default n | ||
209 | help | ||
210 | (Experimental) Thread all softirqs to ksoftirqd | ||
211 | daemon threads, similar to PREEMPT_RT. I/O | ||
212 | throughput will will drop with this enabled, but | ||
213 | latencies due to interrupts will be reduced. | ||
214 | |||
215 | WARNING: Timer responsiveness will likely be | ||
216 | decreased as timer callbacks are also threaded. | ||
217 | This is unlike PREEEMPT_RTs hardirqs. | ||
218 | |||
219 | If unsure, say No. | ||
220 | |||
221 | config LITMUS_SOFTIRQD | ||
222 | bool "Spawn klitirqd interrupt handling threads." | ||
223 | depends on LITMUS_LOCKING | ||
224 | default n | ||
225 | help | ||
226 | Create klitirqd interrupt handling threads. Work must be | ||
227 | specifically dispatched to these workers. (Softirqs for | ||
228 | Litmus tasks are not magically redirected to klitirqd.) | ||
229 | |||
230 | G-EDF ONLY for now! | ||
231 | |||
232 | If unsure, say No. | ||
233 | |||
234 | config NR_LITMUS_SOFTIRQD | ||
235 | int "Number of klitirqd." | ||
236 | depends on LITMUS_SOFTIRQD | ||
237 | range 1 4096 | ||
238 | default "1" | ||
239 | help | ||
240 | Should be <= to the number of CPUs in your system. | ||
241 | |||
242 | config LITMUS_NVIDIA | ||
243 | bool "Litmus handling of NVIDIA interrupts." | ||
244 | depends on LITMUS_SOFTIRQD | ||
245 | default n | ||
246 | help | ||
247 | Direct tasklets from NVIDIA devices to Litmus's klitirqd. | ||
248 | |||
249 | If unsure, say No. | ||
250 | |||
251 | choice | ||
252 | prompt "CUDA/Driver Version Support" | ||
253 | default CUDA_4_0 | ||
254 | depends on LITMUS_NVIDIA | ||
255 | help | ||
256 | Select the version of CUDA/driver to support. | ||
257 | |||
258 | config CUDA_4_0 | ||
259 | bool "CUDA 4.0" | ||
260 | depends on LITMUS_NVIDIA | ||
261 | help | ||
262 | Support CUDA 4.0 RC2 (dev. driver version: x86_64-270.40) | ||
263 | |||
264 | config CUDA_3_2 | ||
265 | bool "CUDA 3.2" | ||
266 | depends on LITMUS_NVIDIA | ||
267 | help | ||
268 | Support CUDA 3.2 (dev. driver version: x86_64-260.24) | ||
269 | |||
270 | endchoice | ||
271 | |||
272 | endmenu | ||
273 | |||
185 | endmenu | 274 | endmenu |
diff --git a/litmus/Makefile b/litmus/Makefile index ad9936e07b83..892e01c2e1b3 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -21,8 +21,12 @@ obj-y = sched_plugin.o litmus.o \ | |||
21 | 21 | ||
22 | obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o | 22 | obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o |
23 | obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o | 23 | obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o |
24 | obj-$(CONFIG_SCHED_CPU_AFFINITY) += affinity.o | ||
24 | 25 | ||
25 | obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o | 26 | obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o |
26 | obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o | 27 | obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o |
27 | obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o | 28 | obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o |
28 | obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o | 29 | obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o |
30 | |||
31 | obj-$(CONFIG_LITMUS_SOFTIRQD) += litmus_softirq.o | ||
32 | obj-$(CONFIG_LITMUS_NVIDIA) += nvidia_info.o sched_trace_external.o | ||
diff --git a/litmus/affinity.c b/litmus/affinity.c new file mode 100644 index 000000000000..3b430d18885b --- /dev/null +++ b/litmus/affinity.c | |||
@@ -0,0 +1,49 @@ | |||
1 | #include <linux/cpu.h> | ||
2 | |||
3 | #include <litmus/affinity.h> | ||
4 | |||
5 | struct neighborhood neigh_info[NR_CPUS]; | ||
6 | |||
7 | /* called by _init_litmus() */ | ||
8 | void init_topology(void) | ||
9 | { | ||
10 | int cpu; | ||
11 | int i; | ||
12 | int chk; | ||
13 | int depth = num_cache_leaves; | ||
14 | |||
15 | if(depth > NUM_CACHE_LEVELS) | ||
16 | depth = NUM_CACHE_LEVELS; | ||
17 | |||
18 | for_each_online_cpu(cpu) | ||
19 | { | ||
20 | for(i = 0; i < depth; ++i) | ||
21 | { | ||
22 | long unsigned int firstbits; | ||
23 | |||
24 | chk = get_shared_cpu_map((struct cpumask *)&neigh_info[cpu].neighbors[i], cpu, i); | ||
25 | if(chk) /* failed */ | ||
26 | { | ||
27 | neigh_info[cpu].size[i] = 0; | ||
28 | } | ||
29 | else | ||
30 | { | ||
31 | /* size = num bits in mask */ | ||
32 | neigh_info[cpu].size[i] = cpumask_weight((struct cpumask *)&neigh_info[cpu].neighbors[i]); | ||
33 | } | ||
34 | firstbits = *neigh_info[cpu].neighbors[i]->bits; | ||
35 | printk("CPU %d has %d neighbors at level %d. (mask = %lx)\n", | ||
36 | cpu, neigh_info[cpu].size[i], i, firstbits); | ||
37 | } | ||
38 | |||
39 | /* set data for non-existent levels */ | ||
40 | for(; i < NUM_CACHE_LEVELS; ++i) | ||
41 | { | ||
42 | neigh_info[cpu].size[i] = 0; | ||
43 | |||
44 | printk("CPU %d has %d neighbors at level %d. (mask = %lx)\n", | ||
45 | cpu, neigh_info[cpu].size[i], i, 0lu); | ||
46 | } | ||
47 | } | ||
48 | } | ||
49 | |||
diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 9b44dc2d8d1e..fbd67ab5f467 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c | |||
@@ -65,6 +65,12 @@ int edf_higher_prio(struct task_struct* first, | |||
65 | 65 | ||
66 | 66 | ||
67 | return !is_realtime(second_task) || | 67 | return !is_realtime(second_task) || |
68 | |||
69 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
70 | /* proxy threads always lose w/o inheritance. */ | ||
71 | (first_task->rt_param.is_proxy_thread < | ||
72 | second_task->rt_param.is_proxy_thread) || | ||
73 | #endif | ||
68 | 74 | ||
69 | /* is the deadline of the first task earlier? | 75 | /* is the deadline of the first task earlier? |
70 | * Then it has higher priority. | 76 | * Then it has higher priority. |
diff --git a/litmus/fdso.c b/litmus/fdso.c index aa7b384264e3..2b7f9ba85857 100644 --- a/litmus/fdso.c +++ b/litmus/fdso.c | |||
@@ -22,6 +22,7 @@ extern struct fdso_ops generic_lock_ops; | |||
22 | 22 | ||
23 | static const struct fdso_ops* fdso_ops[] = { | 23 | static const struct fdso_ops* fdso_ops[] = { |
24 | &generic_lock_ops, /* FMLP_SEM */ | 24 | &generic_lock_ops, /* FMLP_SEM */ |
25 | &generic_lock_ops, /* KFMLP_SEM */ | ||
25 | &generic_lock_ops, /* SRP_SEM */ | 26 | &generic_lock_ops, /* SRP_SEM */ |
26 | }; | 27 | }; |
27 | 28 | ||
diff --git a/litmus/litmus.c b/litmus/litmus.c index 26938acacafc..29363c6ad565 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -17,6 +17,14 @@ | |||
17 | #include <litmus/litmus_proc.h> | 17 | #include <litmus/litmus_proc.h> |
18 | #include <litmus/sched_trace.h> | 18 | #include <litmus/sched_trace.h> |
19 | 19 | ||
20 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
21 | #include <litmus/affinity.h> | ||
22 | #endif | ||
23 | |||
24 | #ifdef CONFIG_LITMUS_NVIDIA | ||
25 | #include <litmus/nvidia_info.h> | ||
26 | #endif | ||
27 | |||
20 | /* Number of RT tasks that exist in the system */ | 28 | /* Number of RT tasks that exist in the system */ |
21 | atomic_t rt_task_count = ATOMIC_INIT(0); | 29 | atomic_t rt_task_count = ATOMIC_INIT(0); |
22 | static DEFINE_RAW_SPINLOCK(task_transition_lock); | 30 | static DEFINE_RAW_SPINLOCK(task_transition_lock); |
@@ -47,6 +55,28 @@ void bheap_node_free(struct bheap_node* hn) | |||
47 | struct release_heap* release_heap_alloc(int gfp_flags); | 55 | struct release_heap* release_heap_alloc(int gfp_flags); |
48 | void release_heap_free(struct release_heap* rh); | 56 | void release_heap_free(struct release_heap* rh); |
49 | 57 | ||
58 | #ifdef CONFIG_LITMUS_NVIDIA | ||
59 | /* | ||
60 | * sys_register_nv_device | ||
61 | * @nv_device_id: The Nvidia device id that the task want to register | ||
62 | * @reg_action: set to '1' to register the specified device. zero otherwise. | ||
63 | * Syscall for register task's designated nvidia device into NV_DEVICE_REG array | ||
64 | * Returns EFAULT if nv_device_id is out of range. | ||
65 | * 0 if success | ||
66 | */ | ||
67 | asmlinkage long sys_register_nv_device(int nv_device_id, int reg_action) | ||
68 | { | ||
69 | /* register the device to caller (aka 'current') */ | ||
70 | return(reg_nv_device(nv_device_id, reg_action)); | ||
71 | } | ||
72 | #else | ||
73 | asmlinkage long sys_register_nv_device(int nv_device_id, int reg_action) | ||
74 | { | ||
75 | return(-EINVAL); | ||
76 | } | ||
77 | #endif | ||
78 | |||
79 | |||
50 | /* | 80 | /* |
51 | * sys_set_task_rt_param | 81 | * sys_set_task_rt_param |
52 | * @pid: Pid of the task which scheduling parameters must be changed | 82 | * @pid: Pid of the task which scheduling parameters must be changed |
@@ -115,7 +145,7 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param) | |||
115 | tp.cls != RT_CLASS_BEST_EFFORT) | 145 | tp.cls != RT_CLASS_BEST_EFFORT) |
116 | { | 146 | { |
117 | printk(KERN_INFO "litmus: real-time task %d rejected " | 147 | printk(KERN_INFO "litmus: real-time task %d rejected " |
118 | "because its class is invalid\n"); | 148 | "because its class is invalid\n", pid); |
119 | goto out_unlock; | 149 | goto out_unlock; |
120 | } | 150 | } |
121 | if (tp.budget_policy != NO_ENFORCEMENT && | 151 | if (tp.budget_policy != NO_ENFORCEMENT && |
@@ -131,6 +161,22 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param) | |||
131 | 161 | ||
132 | target->rt_param.task_params = tp; | 162 | target->rt_param.task_params = tp; |
133 | 163 | ||
164 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
165 | /* proxy thread off by default */ | ||
166 | target->rt_param.is_proxy_thread = 0; | ||
167 | target->rt_param.cur_klitirqd = NULL; | ||
168 | //init_MUTEX(&target->rt_param.klitirqd_sem); | ||
169 | mutex_init(&target->rt_param.klitirqd_sem); | ||
170 | //init_completion(&target->rt_param.klitirqd_sem); | ||
171 | //target->rt_param.klitirqd_sem_stat = NOT_HELD; | ||
172 | atomic_set(&target->rt_param.klitirqd_sem_stat, NOT_HELD); | ||
173 | #endif | ||
174 | |||
175 | #ifdef CONFIG_LITMUS_NVIDIA | ||
176 | atomic_set(&target->rt_param.nv_int_count, 0); | ||
177 | #endif | ||
178 | |||
179 | |||
134 | retval = 0; | 180 | retval = 0; |
135 | out_unlock: | 181 | out_unlock: |
136 | read_unlock_irq(&tasklist_lock); | 182 | read_unlock_irq(&tasklist_lock); |
@@ -265,6 +311,7 @@ asmlinkage long sys_query_job_no(unsigned int __user *job) | |||
265 | return retval; | 311 | return retval; |
266 | } | 312 | } |
267 | 313 | ||
314 | |||
268 | /* sys_null_call() is only used for determining raw system call | 315 | /* sys_null_call() is only used for determining raw system call |
269 | * overheads (kernel entry, kernel exit). It has no useful side effects. | 316 | * overheads (kernel entry, kernel exit). It has no useful side effects. |
270 | * If ts is non-NULL, then the current Feather-Trace time is recorded. | 317 | * If ts is non-NULL, then the current Feather-Trace time is recorded. |
@@ -278,7 +325,7 @@ asmlinkage long sys_null_call(cycles_t __user *ts) | |||
278 | now = get_cycles(); | 325 | now = get_cycles(); |
279 | ret = put_user(now, ts); | 326 | ret = put_user(now, ts); |
280 | } | 327 | } |
281 | 328 | ||
282 | return ret; | 329 | return ret; |
283 | } | 330 | } |
284 | 331 | ||
@@ -299,6 +346,20 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
299 | * at this point in time. | 346 | * at this point in time. |
300 | */ | 347 | */ |
301 | WARN_ON(p->rt_param.inh_task); | 348 | WARN_ON(p->rt_param.inh_task); |
349 | |||
350 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
351 | /* We probably should not have any tasklets executing for | ||
352 | * us at this time. | ||
353 | */ | ||
354 | WARN_ON(p->rt_param.cur_klitirqd); | ||
355 | WARN_ON(atomic_read(&p->rt_param.klitirqd_sem_stat) == HELD); | ||
356 | |||
357 | if(p->rt_param.cur_klitirqd) | ||
358 | flush_pending(p->rt_param.cur_klitirqd, p); | ||
359 | |||
360 | if(atomic_read(&p->rt_param.klitirqd_sem_stat) == HELD) | ||
361 | up_and_set_stat(p, NOT_HELD, &p->rt_param.klitirqd_sem); | ||
362 | #endif | ||
302 | 363 | ||
303 | /* Cleanup everything else. */ | 364 | /* Cleanup everything else. */ |
304 | memset(&p->rt_param, 0, sizeof(p->rt_param)); | 365 | memset(&p->rt_param, 0, sizeof(p->rt_param)); |
@@ -399,7 +460,7 @@ static void synch_on_plugin_switch(void* info) | |||
399 | */ | 460 | */ |
400 | int switch_sched_plugin(struct sched_plugin* plugin) | 461 | int switch_sched_plugin(struct sched_plugin* plugin) |
401 | { | 462 | { |
402 | unsigned long flags; | 463 | //unsigned long flags; |
403 | int ret = 0; | 464 | int ret = 0; |
404 | 465 | ||
405 | BUG_ON(!plugin); | 466 | BUG_ON(!plugin); |
@@ -413,8 +474,15 @@ int switch_sched_plugin(struct sched_plugin* plugin) | |||
413 | while (atomic_read(&cannot_use_plugin) < num_online_cpus()) | 474 | while (atomic_read(&cannot_use_plugin) < num_online_cpus()) |
414 | cpu_relax(); | 475 | cpu_relax(); |
415 | 476 | ||
477 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
478 | if(!klitirqd_is_dead()) | ||
479 | { | ||
480 | kill_klitirqd(); | ||
481 | } | ||
482 | #endif | ||
483 | |||
416 | /* stop task transitions */ | 484 | /* stop task transitions */ |
417 | raw_spin_lock_irqsave(&task_transition_lock, flags); | 485 | //raw_spin_lock_irqsave(&task_transition_lock, flags); |
418 | 486 | ||
419 | /* don't switch if there are active real-time tasks */ | 487 | /* don't switch if there are active real-time tasks */ |
420 | if (atomic_read(&rt_task_count) == 0) { | 488 | if (atomic_read(&rt_task_count) == 0) { |
@@ -432,7 +500,7 @@ int switch_sched_plugin(struct sched_plugin* plugin) | |||
432 | } else | 500 | } else |
433 | ret = -EBUSY; | 501 | ret = -EBUSY; |
434 | out: | 502 | out: |
435 | raw_spin_unlock_irqrestore(&task_transition_lock, flags); | 503 | //raw_spin_unlock_irqrestore(&task_transition_lock, flags); |
436 | atomic_set(&cannot_use_plugin, 0); | 504 | atomic_set(&cannot_use_plugin, 0); |
437 | return ret; | 505 | return ret; |
438 | } | 506 | } |
@@ -540,6 +608,10 @@ static int __init _init_litmus(void) | |||
540 | 608 | ||
541 | init_litmus_proc(); | 609 | init_litmus_proc(); |
542 | 610 | ||
611 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
612 | init_topology(); | ||
613 | #endif | ||
614 | |||
543 | return 0; | 615 | return 0; |
544 | } | 616 | } |
545 | 617 | ||
diff --git a/litmus/litmus_proc.c b/litmus/litmus_proc.c index 4bf725a36c9c..381513366c7a 100644 --- a/litmus/litmus_proc.c +++ b/litmus/litmus_proc.c | |||
@@ -20,11 +20,18 @@ static struct proc_dir_entry *litmus_dir = NULL, | |||
20 | #ifdef CONFIG_RELEASE_MASTER | 20 | #ifdef CONFIG_RELEASE_MASTER |
21 | *release_master_file = NULL, | 21 | *release_master_file = NULL, |
22 | #endif | 22 | #endif |
23 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
24 | *klitirqd_file = NULL, | ||
25 | #endif | ||
23 | *plugs_file = NULL; | 26 | *plugs_file = NULL; |
24 | 27 | ||
25 | /* in litmus/sync.c */ | 28 | /* in litmus/sync.c */ |
26 | int count_tasks_waiting_for_release(void); | 29 | int count_tasks_waiting_for_release(void); |
27 | 30 | ||
31 | extern int proc_read_klitirqd_stats(char *page, char **start, | ||
32 | off_t off, int count, | ||
33 | int *eof, void *data); | ||
34 | |||
28 | static int proc_read_stats(char *page, char **start, | 35 | static int proc_read_stats(char *page, char **start, |
29 | off_t off, int count, | 36 | off_t off, int count, |
30 | int *eof, void *data) | 37 | int *eof, void *data) |
@@ -161,6 +168,12 @@ int __init init_litmus_proc(void) | |||
161 | release_master_file->write_proc = proc_write_release_master; | 168 | release_master_file->write_proc = proc_write_release_master; |
162 | #endif | 169 | #endif |
163 | 170 | ||
171 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
172 | klitirqd_file = | ||
173 | create_proc_read_entry("klitirqd_stats", 0444, litmus_dir, | ||
174 | proc_read_klitirqd_stats, NULL); | ||
175 | #endif | ||
176 | |||
164 | stat_file = create_proc_read_entry("stats", 0444, litmus_dir, | 177 | stat_file = create_proc_read_entry("stats", 0444, litmus_dir, |
165 | proc_read_stats, NULL); | 178 | proc_read_stats, NULL); |
166 | 179 | ||
@@ -187,6 +200,10 @@ void exit_litmus_proc(void) | |||
187 | remove_proc_entry("stats", litmus_dir); | 200 | remove_proc_entry("stats", litmus_dir); |
188 | if (curr_file) | 201 | if (curr_file) |
189 | remove_proc_entry("active_plugin", litmus_dir); | 202 | remove_proc_entry("active_plugin", litmus_dir); |
203 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
204 | if (klitirqd_file) | ||
205 | remove_proc_entry("klitirqd_stats", litmus_dir); | ||
206 | #endif | ||
190 | #ifdef CONFIG_RELEASE_MASTER | 207 | #ifdef CONFIG_RELEASE_MASTER |
191 | if (release_master_file) | 208 | if (release_master_file) |
192 | remove_proc_entry("release_master", litmus_dir); | 209 | remove_proc_entry("release_master", litmus_dir); |
diff --git a/litmus/litmus_softirq.c b/litmus/litmus_softirq.c new file mode 100644 index 000000000000..271e770dbaea --- /dev/null +++ b/litmus/litmus_softirq.c | |||
@@ -0,0 +1,1579 @@ | |||
1 | #include <linux/interrupt.h> | ||
2 | #include <linux/percpu.h> | ||
3 | #include <linux/cpu.h> | ||
4 | #include <linux/kthread.h> | ||
5 | #include <linux/ftrace.h> | ||
6 | #include <linux/smp.h> | ||
7 | #include <linux/slab.h> | ||
8 | #include <linux/mutex.h> | ||
9 | |||
10 | #include <linux/sched.h> | ||
11 | #include <linux/cpuset.h> | ||
12 | |||
13 | #include <litmus/litmus.h> | ||
14 | #include <litmus/sched_trace.h> | ||
15 | #include <litmus/jobs.h> | ||
16 | #include <litmus/sched_plugin.h> | ||
17 | #include <litmus/litmus_softirq.h> | ||
18 | |||
19 | /* TODO: Remove unneeded mb() and other barriers. */ | ||
20 | |||
21 | |||
22 | /* counts number of daemons ready to handle litmus irqs. */ | ||
23 | static atomic_t num_ready_klitirqds = ATOMIC_INIT(0); | ||
24 | |||
25 | enum pending_flags | ||
26 | { | ||
27 | LIT_TASKLET_LOW = 0x1, | ||
28 | LIT_TASKLET_HI = LIT_TASKLET_LOW<<1, | ||
29 | LIT_WORK = LIT_TASKLET_HI<<1 | ||
30 | }; | ||
31 | |||
32 | /* only support tasklet processing for now. */ | ||
33 | struct tasklet_head | ||
34 | { | ||
35 | struct tasklet_struct *head; | ||
36 | struct tasklet_struct **tail; | ||
37 | }; | ||
38 | |||
39 | struct klitirqd_info | ||
40 | { | ||
41 | struct task_struct* klitirqd; | ||
42 | struct task_struct* current_owner; | ||
43 | int terminating; | ||
44 | |||
45 | |||
46 | raw_spinlock_t lock; | ||
47 | |||
48 | u32 pending; | ||
49 | atomic_t num_hi_pending; | ||
50 | atomic_t num_low_pending; | ||
51 | atomic_t num_work_pending; | ||
52 | |||
53 | /* in order of priority */ | ||
54 | struct tasklet_head pending_tasklets_hi; | ||
55 | struct tasklet_head pending_tasklets; | ||
56 | struct list_head worklist; | ||
57 | }; | ||
58 | |||
59 | /* one list for each klitirqd */ | ||
60 | static struct klitirqd_info klitirqds[NR_LITMUS_SOFTIRQD]; | ||
61 | |||
62 | |||
63 | |||
64 | |||
65 | |||
66 | int proc_read_klitirqd_stats(char *page, char **start, | ||
67 | off_t off, int count, | ||
68 | int *eof, void *data) | ||
69 | { | ||
70 | int len = snprintf(page, PAGE_SIZE, | ||
71 | "num ready klitirqds: %d\n\n", | ||
72 | atomic_read(&num_ready_klitirqds)); | ||
73 | |||
74 | if(klitirqd_is_ready()) | ||
75 | { | ||
76 | int i; | ||
77 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
78 | { | ||
79 | len += | ||
80 | snprintf(page + len - 1, PAGE_SIZE, /* -1 to strip off \0 */ | ||
81 | "klitirqd_th%d: %s/%d\n" | ||
82 | "\tcurrent_owner: %s/%d\n" | ||
83 | "\tpending: %x\n" | ||
84 | "\tnum hi: %d\n" | ||
85 | "\tnum low: %d\n" | ||
86 | "\tnum work: %d\n\n", | ||
87 | i, | ||
88 | klitirqds[i].klitirqd->comm, klitirqds[i].klitirqd->pid, | ||
89 | (klitirqds[i].current_owner != NULL) ? | ||
90 | klitirqds[i].current_owner->comm : "(null)", | ||
91 | (klitirqds[i].current_owner != NULL) ? | ||
92 | klitirqds[i].current_owner->pid : 0, | ||
93 | klitirqds[i].pending, | ||
94 | atomic_read(&klitirqds[i].num_hi_pending), | ||
95 | atomic_read(&klitirqds[i].num_low_pending), | ||
96 | atomic_read(&klitirqds[i].num_work_pending)); | ||
97 | } | ||
98 | } | ||
99 | |||
100 | return(len); | ||
101 | } | ||
102 | |||
103 | |||
104 | |||
105 | |||
106 | |||
107 | #if 0 | ||
108 | static atomic_t dump_id = ATOMIC_INIT(0); | ||
109 | |||
110 | static void __dump_state(struct klitirqd_info* which, const char* caller) | ||
111 | { | ||
112 | struct tasklet_struct* list; | ||
113 | |||
114 | int id = atomic_inc_return(&dump_id); | ||
115 | |||
116 | //if(in_interrupt()) | ||
117 | { | ||
118 | if(which->current_owner) | ||
119 | { | ||
120 | TRACE("(id: %d caller: %s)\n" | ||
121 | "klitirqd: %s/%d\n" | ||
122 | "current owner: %s/%d\n" | ||
123 | "pending: %x\n", | ||
124 | id, caller, | ||
125 | which->klitirqd->comm, which->klitirqd->pid, | ||
126 | which->current_owner->comm, which->current_owner->pid, | ||
127 | which->pending); | ||
128 | } | ||
129 | else | ||
130 | { | ||
131 | TRACE("(id: %d caller: %s)\n" | ||
132 | "klitirqd: %s/%d\n" | ||
133 | "current owner: %p\n" | ||
134 | "pending: %x\n", | ||
135 | id, caller, | ||
136 | which->klitirqd->comm, which->klitirqd->pid, | ||
137 | NULL, | ||
138 | which->pending); | ||
139 | } | ||
140 | |||
141 | list = which->pending_tasklets.head; | ||
142 | while(list) | ||
143 | { | ||
144 | struct tasklet_struct *t = list; | ||
145 | list = list->next; /* advance */ | ||
146 | if(t->owner) | ||
147 | TRACE("(id: %d caller: %s) Tasklet: %x, Owner = %s/%d\n", id, caller, t, t->owner->comm, t->owner->pid); | ||
148 | else | ||
149 | TRACE("(id: %d caller: %s) Tasklet: %x, Owner = %p\n", id, caller, t, NULL); | ||
150 | } | ||
151 | } | ||
152 | } | ||
153 | |||
154 | static void dump_state(struct klitirqd_info* which, const char* caller) | ||
155 | { | ||
156 | unsigned long flags; | ||
157 | |||
158 | raw_spin_lock_irqsave(&which->lock, flags); | ||
159 | __dump_state(which, caller); | ||
160 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
161 | } | ||
162 | #endif | ||
163 | |||
164 | |||
165 | /* forward declarations */ | ||
166 | static void ___litmus_tasklet_schedule(struct tasklet_struct *t, | ||
167 | struct klitirqd_info *which, | ||
168 | int wakeup); | ||
169 | static void ___litmus_tasklet_hi_schedule(struct tasklet_struct *t, | ||
170 | struct klitirqd_info *which, | ||
171 | int wakeup); | ||
172 | static void ___litmus_schedule_work(struct work_struct *w, | ||
173 | struct klitirqd_info *which, | ||
174 | int wakeup); | ||
175 | |||
176 | |||
177 | |||
178 | inline unsigned int klitirqd_id(struct task_struct* tsk) | ||
179 | { | ||
180 | int i; | ||
181 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
182 | { | ||
183 | if(klitirqds[i].klitirqd == tsk) | ||
184 | { | ||
185 | return i; | ||
186 | } | ||
187 | } | ||
188 | |||
189 | BUG(); | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | |||
195 | inline static u32 litirq_pending_hi_irqoff(struct klitirqd_info* which) | ||
196 | { | ||
197 | return (which->pending & LIT_TASKLET_HI); | ||
198 | } | ||
199 | |||
200 | inline static u32 litirq_pending_low_irqoff(struct klitirqd_info* which) | ||
201 | { | ||
202 | return (which->pending & LIT_TASKLET_LOW); | ||
203 | } | ||
204 | |||
205 | inline static u32 litirq_pending_work_irqoff(struct klitirqd_info* which) | ||
206 | { | ||
207 | return (which->pending & LIT_WORK); | ||
208 | } | ||
209 | |||
210 | inline static u32 litirq_pending_irqoff(struct klitirqd_info* which) | ||
211 | { | ||
212 | return(which->pending); | ||
213 | } | ||
214 | |||
215 | |||
216 | inline static u32 litirq_pending(struct klitirqd_info* which) | ||
217 | { | ||
218 | unsigned long flags; | ||
219 | u32 pending; | ||
220 | |||
221 | raw_spin_lock_irqsave(&which->lock, flags); | ||
222 | pending = litirq_pending_irqoff(which); | ||
223 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
224 | |||
225 | return pending; | ||
226 | }; | ||
227 | |||
228 | inline static u32 litirq_pending_with_owner(struct klitirqd_info* which, struct task_struct* owner) | ||
229 | { | ||
230 | unsigned long flags; | ||
231 | u32 pending; | ||
232 | |||
233 | raw_spin_lock_irqsave(&which->lock, flags); | ||
234 | pending = litirq_pending_irqoff(which); | ||
235 | if(pending) | ||
236 | { | ||
237 | if(which->current_owner != owner) | ||
238 | { | ||
239 | pending = 0; // owner switch! | ||
240 | } | ||
241 | } | ||
242 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
243 | |||
244 | return pending; | ||
245 | } | ||
246 | |||
247 | |||
248 | inline static u32 litirq_pending_and_sem_and_owner(struct klitirqd_info* which, | ||
249 | struct mutex** sem, | ||
250 | struct task_struct** t) | ||
251 | { | ||
252 | unsigned long flags; | ||
253 | u32 pending; | ||
254 | |||
255 | /* init values */ | ||
256 | *sem = NULL; | ||
257 | *t = NULL; | ||
258 | |||
259 | raw_spin_lock_irqsave(&which->lock, flags); | ||
260 | |||
261 | pending = litirq_pending_irqoff(which); | ||
262 | if(pending) | ||
263 | { | ||
264 | if(which->current_owner != NULL) | ||
265 | { | ||
266 | *t = which->current_owner; | ||
267 | *sem = &tsk_rt(which->current_owner)->klitirqd_sem; | ||
268 | } | ||
269 | else | ||
270 | { | ||
271 | BUG(); | ||
272 | } | ||
273 | } | ||
274 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
275 | |||
276 | if(likely(*sem)) | ||
277 | { | ||
278 | return pending; | ||
279 | } | ||
280 | else | ||
281 | { | ||
282 | return 0; | ||
283 | } | ||
284 | } | ||
285 | |||
286 | /* returns true if the next piece of work to do is from a different owner. | ||
287 | */ | ||
288 | static int tasklet_ownership_change( | ||
289 | struct klitirqd_info* which, | ||
290 | enum pending_flags taskletQ) | ||
291 | { | ||
292 | /* this function doesn't have to look at work objects since they have | ||
293 | priority below tasklets. */ | ||
294 | |||
295 | unsigned long flags; | ||
296 | int ret = 0; | ||
297 | |||
298 | raw_spin_lock_irqsave(&which->lock, flags); | ||
299 | |||
300 | switch(taskletQ) | ||
301 | { | ||
302 | case LIT_TASKLET_HI: | ||
303 | if(litirq_pending_hi_irqoff(which)) | ||
304 | { | ||
305 | ret = (which->pending_tasklets_hi.head->owner != | ||
306 | which->current_owner); | ||
307 | } | ||
308 | break; | ||
309 | case LIT_TASKLET_LOW: | ||
310 | if(litirq_pending_low_irqoff(which)) | ||
311 | { | ||
312 | ret = (which->pending_tasklets.head->owner != | ||
313 | which->current_owner); | ||
314 | } | ||
315 | break; | ||
316 | default: | ||
317 | break; | ||
318 | } | ||
319 | |||
320 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
321 | |||
322 | TRACE_TASK(which->klitirqd, "ownership change needed: %d\n", ret); | ||
323 | |||
324 | return ret; | ||
325 | } | ||
326 | |||
327 | |||
328 | static void __reeval_prio(struct klitirqd_info* which) | ||
329 | { | ||
330 | struct task_struct* next_owner = NULL; | ||
331 | struct task_struct* klitirqd = which->klitirqd; | ||
332 | |||
333 | /* Check in prio-order */ | ||
334 | u32 pending = litirq_pending_irqoff(which); | ||
335 | |||
336 | //__dump_state(which, "__reeval_prio: before"); | ||
337 | |||
338 | if(pending) | ||
339 | { | ||
340 | if(pending & LIT_TASKLET_HI) | ||
341 | { | ||
342 | next_owner = which->pending_tasklets_hi.head->owner; | ||
343 | } | ||
344 | else if(pending & LIT_TASKLET_LOW) | ||
345 | { | ||
346 | next_owner = which->pending_tasklets.head->owner; | ||
347 | } | ||
348 | else if(pending & LIT_WORK) | ||
349 | { | ||
350 | struct work_struct* work = | ||
351 | list_first_entry(&which->worklist, struct work_struct, entry); | ||
352 | next_owner = work->owner; | ||
353 | } | ||
354 | } | ||
355 | |||
356 | if(next_owner != which->current_owner) | ||
357 | { | ||
358 | struct task_struct* old_owner = which->current_owner; | ||
359 | |||
360 | /* bind the next owner. */ | ||
361 | which->current_owner = next_owner; | ||
362 | mb(); | ||
363 | |||
364 | if(next_owner != NULL) | ||
365 | { | ||
366 | if(!in_interrupt()) | ||
367 | { | ||
368 | TRACE_CUR("%s: Ownership change: %s/%d to %s/%d\n", __FUNCTION__, | ||
369 | ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->comm, | ||
370 | ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->pid, | ||
371 | next_owner->comm, next_owner->pid); | ||
372 | } | ||
373 | else | ||
374 | { | ||
375 | TRACE("%s: Ownership change: %s/%d to %s/%d\n", __FUNCTION__, | ||
376 | ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->comm, | ||
377 | ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->pid, | ||
378 | next_owner->comm, next_owner->pid); | ||
379 | } | ||
380 | |||
381 | litmus->set_prio_inh_klitirqd(klitirqd, old_owner, next_owner); | ||
382 | } | ||
383 | else | ||
384 | { | ||
385 | if(likely(!in_interrupt())) | ||
386 | { | ||
387 | TRACE_CUR("%s: Ownership change: %s/%d to NULL (reverting)\n", | ||
388 | __FUNCTION__, klitirqd->comm, klitirqd->pid); | ||
389 | } | ||
390 | else | ||
391 | { | ||
392 | // is this a bug? | ||
393 | TRACE("%s: Ownership change: %s/%d to NULL (reverting)\n", | ||
394 | __FUNCTION__, klitirqd->comm, klitirqd->pid); | ||
395 | } | ||
396 | |||
397 | BUG_ON(pending != 0); | ||
398 | litmus->clear_prio_inh_klitirqd(klitirqd, old_owner); | ||
399 | } | ||
400 | } | ||
401 | |||
402 | //__dump_state(which, "__reeval_prio: after"); | ||
403 | } | ||
404 | |||
405 | static void reeval_prio(struct klitirqd_info* which) | ||
406 | { | ||
407 | unsigned long flags; | ||
408 | |||
409 | raw_spin_lock_irqsave(&which->lock, flags); | ||
410 | __reeval_prio(which); | ||
411 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
412 | } | ||
413 | |||
414 | |||
415 | static void wakeup_litirqd_locked(struct klitirqd_info* which) | ||
416 | { | ||
417 | /* Interrupts are disabled: no need to stop preemption */ | ||
418 | if (which && which->klitirqd) | ||
419 | { | ||
420 | __reeval_prio(which); /* configure the proper priority */ | ||
421 | |||
422 | if(which->klitirqd->state != TASK_RUNNING) | ||
423 | { | ||
424 | TRACE("%s: Waking up klitirqd: %s/%d\n", __FUNCTION__, | ||
425 | which->klitirqd->comm, which->klitirqd->pid); | ||
426 | |||
427 | wake_up_process(which->klitirqd); | ||
428 | } | ||
429 | } | ||
430 | } | ||
431 | |||
432 | |||
433 | static void do_lit_tasklet(struct klitirqd_info* which, | ||
434 | struct tasklet_head* pending_tasklets) | ||
435 | { | ||
436 | unsigned long flags; | ||
437 | struct tasklet_struct *list; | ||
438 | atomic_t* count; | ||
439 | |||
440 | raw_spin_lock_irqsave(&which->lock, flags); | ||
441 | |||
442 | //__dump_state(which, "do_lit_tasklet: before steal"); | ||
443 | |||
444 | /* copy out the tasklets for our private use. */ | ||
445 | list = pending_tasklets->head; | ||
446 | pending_tasklets->head = NULL; | ||
447 | pending_tasklets->tail = &pending_tasklets->head; | ||
448 | |||
449 | /* remove pending flag */ | ||
450 | which->pending &= (pending_tasklets == &which->pending_tasklets) ? | ||
451 | ~LIT_TASKLET_LOW : | ||
452 | ~LIT_TASKLET_HI; | ||
453 | |||
454 | count = (pending_tasklets == &which->pending_tasklets) ? | ||
455 | &which->num_low_pending: | ||
456 | &which->num_hi_pending; | ||
457 | |||
458 | //__dump_state(which, "do_lit_tasklet: after steal"); | ||
459 | |||
460 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
461 | |||
462 | |||
463 | while(list) | ||
464 | { | ||
465 | struct tasklet_struct *t = list; | ||
466 | |||
467 | /* advance, lest we forget */ | ||
468 | list = list->next; | ||
469 | |||
470 | /* execute tasklet if it has my priority and is free */ | ||
471 | if ((t->owner == which->current_owner) && tasklet_trylock(t)) { | ||
472 | if (!atomic_read(&t->count)) { | ||
473 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) | ||
474 | { | ||
475 | BUG(); | ||
476 | } | ||
477 | TRACE_CUR("%s: Invoking tasklet.\n", __FUNCTION__); | ||
478 | t->func(t->data); | ||
479 | tasklet_unlock(t); | ||
480 | |||
481 | atomic_dec(count); | ||
482 | |||
483 | continue; /* process more tasklets */ | ||
484 | } | ||
485 | tasklet_unlock(t); | ||
486 | } | ||
487 | |||
488 | TRACE_CUR("%s: Could not invoke tasklet. Requeuing.\n", __FUNCTION__); | ||
489 | |||
490 | /* couldn't process tasklet. put it back at the end of the queue. */ | ||
491 | if(pending_tasklets == &which->pending_tasklets) | ||
492 | ___litmus_tasklet_schedule(t, which, 0); | ||
493 | else | ||
494 | ___litmus_tasklet_hi_schedule(t, which, 0); | ||
495 | } | ||
496 | } | ||
497 | |||
498 | |||
499 | // returns 1 if priorities need to be changed to continue processing | ||
500 | // pending tasklets. | ||
501 | static int do_litirq(struct klitirqd_info* which) | ||
502 | { | ||
503 | u32 pending; | ||
504 | int resched = 0; | ||
505 | |||
506 | if(in_interrupt()) | ||
507 | { | ||
508 | TRACE("%s: exiting early: in interrupt context!\n", __FUNCTION__); | ||
509 | return(0); | ||
510 | } | ||
511 | |||
512 | if(which->klitirqd != current) | ||
513 | { | ||
514 | TRACE_CUR("%s: exiting early: thread/info mismatch! Running %s/%d but given %s/%d.\n", | ||
515 | __FUNCTION__, current->comm, current->pid, | ||
516 | which->klitirqd->comm, which->klitirqd->pid); | ||
517 | return(0); | ||
518 | } | ||
519 | |||
520 | if(!is_realtime(current)) | ||
521 | { | ||
522 | TRACE_CUR("%s: exiting early: klitirqd is not real-time. Sched Policy = %d\n", | ||
523 | __FUNCTION__, current->policy); | ||
524 | return(0); | ||
525 | } | ||
526 | |||
527 | |||
528 | /* We only handle tasklets & work objects, no need for RCU triggers? */ | ||
529 | |||
530 | pending = litirq_pending(which); | ||
531 | if(pending) | ||
532 | { | ||
533 | /* extract the work to do and do it! */ | ||
534 | if(pending & LIT_TASKLET_HI) | ||
535 | { | ||
536 | TRACE_CUR("%s: Invoking HI tasklets.\n", __FUNCTION__); | ||
537 | do_lit_tasklet(which, &which->pending_tasklets_hi); | ||
538 | resched = tasklet_ownership_change(which, LIT_TASKLET_HI); | ||
539 | |||
540 | if(resched) | ||
541 | { | ||
542 | TRACE_CUR("%s: HI tasklets of another owner remain. " | ||
543 | "Skipping any LOW tasklets.\n", __FUNCTION__); | ||
544 | } | ||
545 | } | ||
546 | |||
547 | if(!resched && (pending & LIT_TASKLET_LOW)) | ||
548 | { | ||
549 | TRACE_CUR("%s: Invoking LOW tasklets.\n", __FUNCTION__); | ||
550 | do_lit_tasklet(which, &which->pending_tasklets); | ||
551 | resched = tasklet_ownership_change(which, LIT_TASKLET_LOW); | ||
552 | |||
553 | if(resched) | ||
554 | { | ||
555 | TRACE_CUR("%s: LOW tasklets of another owner remain. " | ||
556 | "Skipping any work objects.\n", __FUNCTION__); | ||
557 | } | ||
558 | } | ||
559 | } | ||
560 | |||
561 | return(resched); | ||
562 | } | ||
563 | |||
564 | |||
565 | static void do_work(struct klitirqd_info* which) | ||
566 | { | ||
567 | unsigned long flags; | ||
568 | work_func_t f; | ||
569 | struct work_struct* work; | ||
570 | |||
571 | // only execute one work-queue item to yield to tasklets. | ||
572 | // ...is this a good idea, or should we just batch them? | ||
573 | raw_spin_lock_irqsave(&which->lock, flags); | ||
574 | |||
575 | if(!litirq_pending_work_irqoff(which)) | ||
576 | { | ||
577 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
578 | goto no_work; | ||
579 | } | ||
580 | |||
581 | work = list_first_entry(&which->worklist, struct work_struct, entry); | ||
582 | list_del_init(&work->entry); | ||
583 | |||
584 | if(list_empty(&which->worklist)) | ||
585 | { | ||
586 | which->pending &= ~LIT_WORK; | ||
587 | } | ||
588 | |||
589 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
590 | |||
591 | |||
592 | |||
593 | /* safe to read current_owner outside of lock since only this thread | ||
594 | may write to the pointer. */ | ||
595 | if(work->owner == which->current_owner) | ||
596 | { | ||
597 | TRACE_CUR("%s: Invoking work object.\n", __FUNCTION__); | ||
598 | // do the work! | ||
599 | work_clear_pending(work); | ||
600 | f = work->func; | ||
601 | f(work); /* can't touch 'work' after this point, | ||
602 | the user may have freed it. */ | ||
603 | |||
604 | atomic_dec(&which->num_work_pending); | ||
605 | } | ||
606 | else | ||
607 | { | ||
608 | TRACE_CUR("%s: Could not invoke work object. Requeuing.\n", | ||
609 | __FUNCTION__); | ||
610 | ___litmus_schedule_work(work, which, 0); | ||
611 | } | ||
612 | |||
613 | no_work: | ||
614 | return; | ||
615 | } | ||
616 | |||
617 | |||
618 | static int set_litmus_daemon_sched(void) | ||
619 | { | ||
620 | /* set up a daemon job that will never complete. | ||
621 | it should only ever run on behalf of another | ||
622 | real-time task. | ||
623 | |||
624 | TODO: Transition to a new job whenever a | ||
625 | new tasklet is handled */ | ||
626 | |||
627 | int ret = 0; | ||
628 | |||
629 | struct rt_task tp = { | ||
630 | .exec_cost = 0, | ||
631 | .period = 1000000000, /* dummy 1 second period */ | ||
632 | .phase = 0, | ||
633 | .cpu = task_cpu(current), | ||
634 | .budget_policy = NO_ENFORCEMENT, | ||
635 | .cls = RT_CLASS_BEST_EFFORT | ||
636 | }; | ||
637 | |||
638 | struct sched_param param = { .sched_priority = 0}; | ||
639 | |||
640 | |||
641 | /* set task params, mark as proxy thread, and init other data */ | ||
642 | tsk_rt(current)->task_params = tp; | ||
643 | tsk_rt(current)->is_proxy_thread = 1; | ||
644 | tsk_rt(current)->cur_klitirqd = NULL; | ||
645 | //init_MUTEX(&tsk_rt(current)->klitirqd_sem); | ||
646 | mutex_init(&tsk_rt(current)->klitirqd_sem); | ||
647 | //init_completion(&tsk_rt(current)->klitirqd_sem); | ||
648 | atomic_set(&tsk_rt(current)->klitirqd_sem_stat, NOT_HELD); | ||
649 | |||
650 | /* inform the OS we're SCHED_LITMUS -- | ||
651 | sched_setscheduler_nocheck() calls litmus_admit_task(). */ | ||
652 | sched_setscheduler_nocheck(current, SCHED_LITMUS, ¶m); | ||
653 | |||
654 | return ret; | ||
655 | } | ||
656 | |||
657 | static void enter_execution_phase(struct klitirqd_info* which, | ||
658 | struct mutex* sem, | ||
659 | struct task_struct* t) | ||
660 | { | ||
661 | TRACE_CUR("%s: Trying to enter execution phase. " | ||
662 | "Acquiring semaphore of %s/%d\n", __FUNCTION__, | ||
663 | t->comm, t->pid); | ||
664 | down_and_set_stat(current, HELD, sem); | ||
665 | TRACE_CUR("%s: Execution phase entered! " | ||
666 | "Acquired semaphore of %s/%d\n", __FUNCTION__, | ||
667 | t->comm, t->pid); | ||
668 | } | ||
669 | |||
670 | static void exit_execution_phase(struct klitirqd_info* which, | ||
671 | struct mutex* sem, | ||
672 | struct task_struct* t) | ||
673 | { | ||
674 | TRACE_CUR("%s: Exiting execution phase. " | ||
675 | "Releasing semaphore of %s/%d\n", __FUNCTION__, | ||
676 | t->comm, t->pid); | ||
677 | if(atomic_read(&tsk_rt(current)->klitirqd_sem_stat) == HELD) | ||
678 | { | ||
679 | up_and_set_stat(current, NOT_HELD, sem); | ||
680 | TRACE_CUR("%s: Execution phase exited! " | ||
681 | "Released semaphore of %s/%d\n", __FUNCTION__, | ||
682 | t->comm, t->pid); | ||
683 | } | ||
684 | else | ||
685 | { | ||
686 | TRACE_CUR("%s: COULDN'T RELEASE SEMAPHORE BECAUSE ONE IS NOT HELD!\n", __FUNCTION__); | ||
687 | } | ||
688 | } | ||
689 | |||
690 | /* main loop for klitsoftirqd */ | ||
691 | static int run_klitirqd(void* unused) | ||
692 | { | ||
693 | struct klitirqd_info* which = &klitirqds[klitirqd_id(current)]; | ||
694 | struct mutex* sem; | ||
695 | struct task_struct* owner; | ||
696 | |||
697 | int rt_status = set_litmus_daemon_sched(); | ||
698 | |||
699 | if(rt_status != 0) | ||
700 | { | ||
701 | TRACE_CUR("%s: Failed to transition to rt-task.\n", __FUNCTION__); | ||
702 | goto rt_failed; | ||
703 | } | ||
704 | |||
705 | atomic_inc(&num_ready_klitirqds); | ||
706 | |||
707 | set_current_state(TASK_INTERRUPTIBLE); | ||
708 | |||
709 | while (!kthread_should_stop()) | ||
710 | { | ||
711 | preempt_disable(); | ||
712 | if (!litirq_pending(which)) | ||
713 | { | ||
714 | /* sleep for work */ | ||
715 | TRACE_CUR("%s: No more tasklets or work objects. Going to sleep.\n", | ||
716 | __FUNCTION__); | ||
717 | preempt_enable_no_resched(); | ||
718 | schedule(); | ||
719 | |||
720 | if(kthread_should_stop()) /* bail out */ | ||
721 | { | ||
722 | TRACE_CUR("%s:%d: Signaled to terminate.\n", __FUNCTION__, __LINE__); | ||
723 | continue; | ||
724 | } | ||
725 | |||
726 | preempt_disable(); | ||
727 | } | ||
728 | |||
729 | __set_current_state(TASK_RUNNING); | ||
730 | |||
731 | while (litirq_pending_and_sem_and_owner(which, &sem, &owner)) | ||
732 | { | ||
733 | int needs_resched = 0; | ||
734 | |||
735 | preempt_enable_no_resched(); | ||
736 | |||
737 | BUG_ON(sem == NULL); | ||
738 | |||
739 | // wait to enter execution phase; wait for 'current_owner' to block. | ||
740 | enter_execution_phase(which, sem, owner); | ||
741 | |||
742 | if(kthread_should_stop()) | ||
743 | { | ||
744 | TRACE_CUR("%s:%d: Signaled to terminate.\n", __FUNCTION__, __LINE__); | ||
745 | break; | ||
746 | } | ||
747 | |||
748 | preempt_disable(); | ||
749 | |||
750 | /* Double check that there's still pending work and the owner hasn't | ||
751 | * changed. Pending items may have been flushed while we were sleeping. | ||
752 | */ | ||
753 | if(litirq_pending_with_owner(which, owner)) | ||
754 | { | ||
755 | TRACE_CUR("%s: Executing tasklets and/or work objects.\n", | ||
756 | __FUNCTION__); | ||
757 | |||
758 | needs_resched = do_litirq(which); | ||
759 | |||
760 | preempt_enable_no_resched(); | ||
761 | |||
762 | // work objects are preemptible. | ||
763 | if(!needs_resched) | ||
764 | { | ||
765 | do_work(which); | ||
766 | } | ||
767 | |||
768 | // exit execution phase. | ||
769 | exit_execution_phase(which, sem, owner); | ||
770 | |||
771 | TRACE_CUR("%s: Setting up next priority.\n", __FUNCTION__); | ||
772 | reeval_prio(which); /* check if we need to change priority here */ | ||
773 | } | ||
774 | else | ||
775 | { | ||
776 | TRACE_CUR("%s: Pending work was flushed! Prev owner was %s/%d\n", | ||
777 | __FUNCTION__, | ||
778 | owner->comm, owner->pid); | ||
779 | preempt_enable_no_resched(); | ||
780 | |||
781 | // exit execution phase. | ||
782 | exit_execution_phase(which, sem, owner); | ||
783 | } | ||
784 | |||
785 | cond_resched(); | ||
786 | preempt_disable(); | ||
787 | } | ||
788 | preempt_enable(); | ||
789 | set_current_state(TASK_INTERRUPTIBLE); | ||
790 | } | ||
791 | __set_current_state(TASK_RUNNING); | ||
792 | |||
793 | atomic_dec(&num_ready_klitirqds); | ||
794 | |||
795 | rt_failed: | ||
796 | litmus_exit_task(current); | ||
797 | |||
798 | return rt_status; | ||
799 | } | ||
800 | |||
801 | |||
802 | struct klitirqd_launch_data | ||
803 | { | ||
804 | int* cpu_affinity; | ||
805 | struct work_struct work; | ||
806 | }; | ||
807 | |||
808 | /* executed by a kworker from workqueues */ | ||
809 | static void launch_klitirqd(struct work_struct *work) | ||
810 | { | ||
811 | int i; | ||
812 | |||
813 | struct klitirqd_launch_data* launch_data = | ||
814 | container_of(work, struct klitirqd_launch_data, work); | ||
815 | |||
816 | TRACE("%s: Creating %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); | ||
817 | |||
818 | /* create the daemon threads */ | ||
819 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
820 | { | ||
821 | if(launch_data->cpu_affinity) | ||
822 | { | ||
823 | klitirqds[i].klitirqd = | ||
824 | kthread_create( | ||
825 | run_klitirqd, | ||
826 | /* treat the affinity as a pointer, we'll cast it back later */ | ||
827 | (void*)(long long)launch_data->cpu_affinity[i], | ||
828 | "klitirqd_th%d/%d", | ||
829 | i, | ||
830 | launch_data->cpu_affinity[i]); | ||
831 | |||
832 | /* litmus will put is in the right cluster. */ | ||
833 | kthread_bind(klitirqds[i].klitirqd, launch_data->cpu_affinity[i]); | ||
834 | } | ||
835 | else | ||
836 | { | ||
837 | klitirqds[i].klitirqd = | ||
838 | kthread_create( | ||
839 | run_klitirqd, | ||
840 | /* treat the affinity as a pointer, we'll cast it back later */ | ||
841 | (void*)(long long)(-1), | ||
842 | "klitirqd_th%d", | ||
843 | i); | ||
844 | } | ||
845 | } | ||
846 | |||
847 | TRACE("%s: Launching %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); | ||
848 | |||
849 | /* unleash the daemons */ | ||
850 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
851 | { | ||
852 | wake_up_process(klitirqds[i].klitirqd); | ||
853 | } | ||
854 | |||
855 | if(launch_data->cpu_affinity) | ||
856 | kfree(launch_data->cpu_affinity); | ||
857 | kfree(launch_data); | ||
858 | } | ||
859 | |||
860 | |||
861 | void spawn_klitirqd(int* affinity) | ||
862 | { | ||
863 | int i; | ||
864 | struct klitirqd_launch_data* delayed_launch; | ||
865 | |||
866 | if(atomic_read(&num_ready_klitirqds) != 0) | ||
867 | { | ||
868 | TRACE("%s: At least one klitirqd is already running! Need to call kill_klitirqd()?\n"); | ||
869 | return; | ||
870 | } | ||
871 | |||
872 | /* init the tasklet & work queues */ | ||
873 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
874 | { | ||
875 | klitirqds[i].terminating = 0; | ||
876 | klitirqds[i].pending = 0; | ||
877 | |||
878 | klitirqds[i].num_hi_pending.counter = 0; | ||
879 | klitirqds[i].num_low_pending.counter = 0; | ||
880 | klitirqds[i].num_work_pending.counter = 0; | ||
881 | |||
882 | klitirqds[i].pending_tasklets_hi.head = NULL; | ||
883 | klitirqds[i].pending_tasklets_hi.tail = &klitirqds[i].pending_tasklets_hi.head; | ||
884 | |||
885 | klitirqds[i].pending_tasklets.head = NULL; | ||
886 | klitirqds[i].pending_tasklets.tail = &klitirqds[i].pending_tasklets.head; | ||
887 | |||
888 | INIT_LIST_HEAD(&klitirqds[i].worklist); | ||
889 | |||
890 | raw_spin_lock_init(&klitirqds[i].lock); | ||
891 | } | ||
892 | |||
893 | /* wait to flush the initializations to memory since other threads | ||
894 | will access it. */ | ||
895 | mb(); | ||
896 | |||
897 | /* tell a work queue to launch the threads. we can't make scheduling | ||
898 | calls since we're in an atomic state. */ | ||
899 | TRACE("%s: Setting callback up to launch klitirqds\n", __FUNCTION__); | ||
900 | delayed_launch = kmalloc(sizeof(struct klitirqd_launch_data), GFP_ATOMIC); | ||
901 | if(affinity) | ||
902 | { | ||
903 | delayed_launch->cpu_affinity = | ||
904 | kmalloc(sizeof(int)*NR_LITMUS_SOFTIRQD, GFP_ATOMIC); | ||
905 | |||
906 | memcpy(delayed_launch->cpu_affinity, affinity, | ||
907 | sizeof(int)*NR_LITMUS_SOFTIRQD); | ||
908 | } | ||
909 | else | ||
910 | { | ||
911 | delayed_launch->cpu_affinity = NULL; | ||
912 | } | ||
913 | INIT_WORK(&delayed_launch->work, launch_klitirqd); | ||
914 | schedule_work(&delayed_launch->work); | ||
915 | } | ||
916 | |||
917 | |||
918 | void kill_klitirqd(void) | ||
919 | { | ||
920 | if(!klitirqd_is_dead()) | ||
921 | { | ||
922 | int i; | ||
923 | |||
924 | TRACE("%s: Killing %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); | ||
925 | |||
926 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
927 | { | ||
928 | if(klitirqds[i].terminating != 1) | ||
929 | { | ||
930 | klitirqds[i].terminating = 1; | ||
931 | mb(); /* just to be sure? */ | ||
932 | flush_pending(klitirqds[i].klitirqd, NULL); | ||
933 | |||
934 | /* signal termination */ | ||
935 | kthread_stop(klitirqds[i].klitirqd); | ||
936 | } | ||
937 | } | ||
938 | } | ||
939 | } | ||
940 | |||
941 | |||
942 | int klitirqd_is_ready(void) | ||
943 | { | ||
944 | return(atomic_read(&num_ready_klitirqds) == NR_LITMUS_SOFTIRQD); | ||
945 | } | ||
946 | |||
947 | int klitirqd_is_dead(void) | ||
948 | { | ||
949 | return(atomic_read(&num_ready_klitirqds) == 0); | ||
950 | } | ||
951 | |||
952 | |||
953 | struct task_struct* get_klitirqd(unsigned int k_id) | ||
954 | { | ||
955 | return(klitirqds[k_id].klitirqd); | ||
956 | } | ||
957 | |||
958 | |||
959 | void flush_pending(struct task_struct* klitirqd_thread, | ||
960 | struct task_struct* owner) | ||
961 | { | ||
962 | unsigned int k_id = klitirqd_id(klitirqd_thread); | ||
963 | struct klitirqd_info *which = &klitirqds[k_id]; | ||
964 | |||
965 | unsigned long flags; | ||
966 | struct tasklet_struct *list; | ||
967 | |||
968 | u32 work_flushed = 0; | ||
969 | |||
970 | raw_spin_lock_irqsave(&which->lock, flags); | ||
971 | |||
972 | //__dump_state(which, "flush_pending: before"); | ||
973 | |||
974 | // flush hi tasklets. | ||
975 | if(litirq_pending_hi_irqoff(which)) | ||
976 | { | ||
977 | which->pending &= ~LIT_TASKLET_HI; | ||
978 | |||
979 | list = which->pending_tasklets_hi.head; | ||
980 | which->pending_tasklets_hi.head = NULL; | ||
981 | which->pending_tasklets_hi.tail = &which->pending_tasklets_hi.head; | ||
982 | |||
983 | TRACE("%s: Handing HI tasklets back to Linux.\n", __FUNCTION__); | ||
984 | |||
985 | while(list) | ||
986 | { | ||
987 | struct tasklet_struct *t = list; | ||
988 | list = list->next; | ||
989 | |||
990 | if(likely((t->owner == owner) || (owner == NULL))) | ||
991 | { | ||
992 | if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))) | ||
993 | { | ||
994 | BUG(); | ||
995 | } | ||
996 | |||
997 | work_flushed |= LIT_TASKLET_HI; | ||
998 | |||
999 | t->owner = NULL; | ||
1000 | |||
1001 | // WTF? | ||
1002 | if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
1003 | { | ||
1004 | atomic_dec(&which->num_hi_pending); | ||
1005 | ___tasklet_hi_schedule(t); | ||
1006 | } | ||
1007 | else | ||
1008 | { | ||
1009 | TRACE("%s: dropped hi tasklet??\n", __FUNCTION__); | ||
1010 | BUG(); | ||
1011 | } | ||
1012 | } | ||
1013 | else | ||
1014 | { | ||
1015 | TRACE("%s: Could not flush a HI tasklet.\n", __FUNCTION__); | ||
1016 | // put back on queue. | ||
1017 | ___litmus_tasklet_hi_schedule(t, which, 0); | ||
1018 | } | ||
1019 | } | ||
1020 | } | ||
1021 | |||
1022 | // flush low tasklets. | ||
1023 | if(litirq_pending_low_irqoff(which)) | ||
1024 | { | ||
1025 | which->pending &= ~LIT_TASKLET_LOW; | ||
1026 | |||
1027 | list = which->pending_tasklets.head; | ||
1028 | which->pending_tasklets.head = NULL; | ||
1029 | which->pending_tasklets.tail = &which->pending_tasklets.head; | ||
1030 | |||
1031 | TRACE("%s: Handing LOW tasklets back to Linux.\n", __FUNCTION__); | ||
1032 | |||
1033 | while(list) | ||
1034 | { | ||
1035 | struct tasklet_struct *t = list; | ||
1036 | list = list->next; | ||
1037 | |||
1038 | if(likely((t->owner == owner) || (owner == NULL))) | ||
1039 | { | ||
1040 | if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))) | ||
1041 | { | ||
1042 | BUG(); | ||
1043 | } | ||
1044 | |||
1045 | work_flushed |= LIT_TASKLET_LOW; | ||
1046 | |||
1047 | t->owner = NULL; | ||
1048 | sched_trace_tasklet_end(owner, 1ul); | ||
1049 | |||
1050 | if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
1051 | { | ||
1052 | atomic_dec(&which->num_low_pending); | ||
1053 | ___tasklet_schedule(t); | ||
1054 | } | ||
1055 | else | ||
1056 | { | ||
1057 | TRACE("%s: dropped tasklet??\n", __FUNCTION__); | ||
1058 | BUG(); | ||
1059 | } | ||
1060 | } | ||
1061 | else | ||
1062 | { | ||
1063 | TRACE("%s: Could not flush a LOW tasklet.\n", __FUNCTION__); | ||
1064 | // put back on queue | ||
1065 | ___litmus_tasklet_schedule(t, which, 0); | ||
1066 | } | ||
1067 | } | ||
1068 | } | ||
1069 | |||
1070 | // flush work objects | ||
1071 | if(litirq_pending_work_irqoff(which)) | ||
1072 | { | ||
1073 | which->pending &= ~LIT_WORK; | ||
1074 | |||
1075 | TRACE("%s: Handing work objects back to Linux.\n", __FUNCTION__); | ||
1076 | |||
1077 | while(!list_empty(&which->worklist)) | ||
1078 | { | ||
1079 | struct work_struct* work = | ||
1080 | list_first_entry(&which->worklist, struct work_struct, entry); | ||
1081 | list_del_init(&work->entry); | ||
1082 | |||
1083 | if(likely((work->owner == owner) || (owner == NULL))) | ||
1084 | { | ||
1085 | work_flushed |= LIT_WORK; | ||
1086 | atomic_dec(&which->num_work_pending); | ||
1087 | |||
1088 | work->owner = NULL; | ||
1089 | sched_trace_work_end(owner, current, 1ul); | ||
1090 | __schedule_work(work); | ||
1091 | } | ||
1092 | else | ||
1093 | { | ||
1094 | TRACE("%s: Could not flush a work object.\n", __FUNCTION__); | ||
1095 | // put back on queue | ||
1096 | ___litmus_schedule_work(work, which, 0); | ||
1097 | } | ||
1098 | } | ||
1099 | } | ||
1100 | |||
1101 | //__dump_state(which, "flush_pending: after (before reeval prio)"); | ||
1102 | |||
1103 | |||
1104 | mb(); /* commit changes to pending flags */ | ||
1105 | |||
1106 | /* reset the scheduling priority */ | ||
1107 | if(work_flushed) | ||
1108 | { | ||
1109 | __reeval_prio(which); | ||
1110 | |||
1111 | /* Try to offload flushed tasklets to Linux's ksoftirqd. */ | ||
1112 | if(work_flushed & (LIT_TASKLET_LOW | LIT_TASKLET_HI)) | ||
1113 | { | ||
1114 | wakeup_softirqd(); | ||
1115 | } | ||
1116 | } | ||
1117 | else | ||
1118 | { | ||
1119 | TRACE_CUR("%s: no work flushed, so __reeval_prio() skipped\n", __FUNCTION__); | ||
1120 | } | ||
1121 | |||
1122 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
1123 | } | ||
1124 | |||
1125 | |||
1126 | |||
1127 | |||
1128 | static void ___litmus_tasklet_schedule(struct tasklet_struct *t, | ||
1129 | struct klitirqd_info *which, | ||
1130 | int wakeup) | ||
1131 | { | ||
1132 | unsigned long flags; | ||
1133 | u32 old_pending; | ||
1134 | |||
1135 | t->next = NULL; | ||
1136 | |||
1137 | raw_spin_lock_irqsave(&which->lock, flags); | ||
1138 | |||
1139 | //__dump_state(which, "___litmus_tasklet_schedule: before queuing"); | ||
1140 | |||
1141 | *(which->pending_tasklets.tail) = t; | ||
1142 | which->pending_tasklets.tail = &t->next; | ||
1143 | |||
1144 | old_pending = which->pending; | ||
1145 | which->pending |= LIT_TASKLET_LOW; | ||
1146 | |||
1147 | atomic_inc(&which->num_low_pending); | ||
1148 | |||
1149 | mb(); | ||
1150 | |||
1151 | if(!old_pending && wakeup) | ||
1152 | { | ||
1153 | wakeup_litirqd_locked(which); /* wake up the klitirqd */ | ||
1154 | } | ||
1155 | |||
1156 | //__dump_state(which, "___litmus_tasklet_schedule: after queuing"); | ||
1157 | |||
1158 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
1159 | } | ||
1160 | |||
1161 | int __litmus_tasklet_schedule(struct tasklet_struct *t, unsigned int k_id) | ||
1162 | { | ||
1163 | int ret = 0; /* assume failure */ | ||
1164 | if(unlikely((t->owner == NULL) || !is_realtime(t->owner))) | ||
1165 | { | ||
1166 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
1167 | BUG(); | ||
1168 | } | ||
1169 | |||
1170 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | ||
1171 | { | ||
1172 | TRACE("%s: No klitirqd_th%d!\n", __FUNCTION__, k_id); | ||
1173 | BUG(); | ||
1174 | } | ||
1175 | |||
1176 | if(likely(!klitirqds[k_id].terminating)) | ||
1177 | { | ||
1178 | /* Can't accept tasklets while we're processing a workqueue | ||
1179 | because they're handled by the same thread. This case is | ||
1180 | very RARE. | ||
1181 | |||
1182 | TODO: Use a separate thread for work objects!!!!!! | ||
1183 | */ | ||
1184 | if(likely(atomic_read(&klitirqds[k_id].num_work_pending) == 0)) | ||
1185 | { | ||
1186 | ret = 1; | ||
1187 | ___litmus_tasklet_schedule(t, &klitirqds[k_id], 1); | ||
1188 | } | ||
1189 | else | ||
1190 | { | ||
1191 | TRACE("%s: rejected tasklet because of pending work.\n", | ||
1192 | __FUNCTION__); | ||
1193 | } | ||
1194 | } | ||
1195 | return(ret); | ||
1196 | } | ||
1197 | |||
1198 | EXPORT_SYMBOL(__litmus_tasklet_schedule); | ||
1199 | |||
1200 | |||
1201 | static void ___litmus_tasklet_hi_schedule(struct tasklet_struct *t, | ||
1202 | struct klitirqd_info *which, | ||
1203 | int wakeup) | ||
1204 | { | ||
1205 | unsigned long flags; | ||
1206 | u32 old_pending; | ||
1207 | |||
1208 | t->next = NULL; | ||
1209 | |||
1210 | raw_spin_lock_irqsave(&which->lock, flags); | ||
1211 | |||
1212 | *(which->pending_tasklets_hi.tail) = t; | ||
1213 | which->pending_tasklets_hi.tail = &t->next; | ||
1214 | |||
1215 | old_pending = which->pending; | ||
1216 | which->pending |= LIT_TASKLET_HI; | ||
1217 | |||
1218 | atomic_inc(&which->num_hi_pending); | ||
1219 | |||
1220 | mb(); | ||
1221 | |||
1222 | if(!old_pending && wakeup) | ||
1223 | { | ||
1224 | wakeup_litirqd_locked(which); /* wake up the klitirqd */ | ||
1225 | } | ||
1226 | |||
1227 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
1228 | } | ||
1229 | |||
1230 | int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, unsigned int k_id) | ||
1231 | { | ||
1232 | int ret = 0; /* assume failure */ | ||
1233 | if(unlikely((t->owner == NULL) || !is_realtime(t->owner))) | ||
1234 | { | ||
1235 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
1236 | BUG(); | ||
1237 | } | ||
1238 | |||
1239 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | ||
1240 | { | ||
1241 | TRACE("%s: No klitirqd_th%d!\n", __FUNCTION__, k_id); | ||
1242 | BUG(); | ||
1243 | } | ||
1244 | |||
1245 | if(unlikely(!klitirqd_is_ready())) | ||
1246 | { | ||
1247 | TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id); | ||
1248 | BUG(); | ||
1249 | } | ||
1250 | |||
1251 | if(likely(!klitirqds[k_id].terminating)) | ||
1252 | { | ||
1253 | if(likely(atomic_read(&klitirqds[k_id].num_work_pending) == 0)) | ||
1254 | { | ||
1255 | ret = 1; | ||
1256 | ___litmus_tasklet_hi_schedule(t, &klitirqds[k_id], 1); | ||
1257 | } | ||
1258 | else | ||
1259 | { | ||
1260 | TRACE("%s: rejected tasklet because of pending work.\n", | ||
1261 | __FUNCTION__); | ||
1262 | } | ||
1263 | } | ||
1264 | return(ret); | ||
1265 | } | ||
1266 | |||
1267 | EXPORT_SYMBOL(__litmus_tasklet_hi_schedule); | ||
1268 | |||
1269 | |||
1270 | int __litmus_tasklet_hi_schedule_first(struct tasklet_struct *t, unsigned int k_id) | ||
1271 | { | ||
1272 | int ret = 0; /* assume failure */ | ||
1273 | u32 old_pending; | ||
1274 | |||
1275 | BUG_ON(!irqs_disabled()); | ||
1276 | |||
1277 | if(unlikely((t->owner == NULL) || !is_realtime(t->owner))) | ||
1278 | { | ||
1279 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
1280 | BUG(); | ||
1281 | } | ||
1282 | |||
1283 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | ||
1284 | { | ||
1285 | TRACE("%s: No klitirqd_th%u!\n", __FUNCTION__, k_id); | ||
1286 | BUG(); | ||
1287 | } | ||
1288 | |||
1289 | if(unlikely(!klitirqd_is_ready())) | ||
1290 | { | ||
1291 | TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id); | ||
1292 | BUG(); | ||
1293 | } | ||
1294 | |||
1295 | if(likely(!klitirqds[k_id].terminating)) | ||
1296 | { | ||
1297 | raw_spin_lock(&klitirqds[k_id].lock); | ||
1298 | |||
1299 | if(likely(atomic_read(&klitirqds[k_id].num_work_pending) == 0)) | ||
1300 | { | ||
1301 | ret = 1; // success! | ||
1302 | |||
1303 | t->next = klitirqds[k_id].pending_tasklets_hi.head; | ||
1304 | klitirqds[k_id].pending_tasklets_hi.head = t; | ||
1305 | |||
1306 | old_pending = klitirqds[k_id].pending; | ||
1307 | klitirqds[k_id].pending |= LIT_TASKLET_HI; | ||
1308 | |||
1309 | atomic_inc(&klitirqds[k_id].num_hi_pending); | ||
1310 | |||
1311 | mb(); | ||
1312 | |||
1313 | if(!old_pending) | ||
1314 | wakeup_litirqd_locked(&klitirqds[k_id]); /* wake up the klitirqd */ | ||
1315 | } | ||
1316 | else | ||
1317 | { | ||
1318 | TRACE("%s: rejected tasklet because of pending work.\n", | ||
1319 | __FUNCTION__); | ||
1320 | } | ||
1321 | |||
1322 | raw_spin_unlock(&klitirqds[k_id].lock); | ||
1323 | } | ||
1324 | return(ret); | ||
1325 | } | ||
1326 | |||
1327 | EXPORT_SYMBOL(__litmus_tasklet_hi_schedule_first); | ||
1328 | |||
1329 | |||
1330 | |||
1331 | static void ___litmus_schedule_work(struct work_struct *w, | ||
1332 | struct klitirqd_info *which, | ||
1333 | int wakeup) | ||
1334 | { | ||
1335 | unsigned long flags; | ||
1336 | u32 old_pending; | ||
1337 | |||
1338 | raw_spin_lock_irqsave(&which->lock, flags); | ||
1339 | |||
1340 | work_pending(w); | ||
1341 | list_add_tail(&w->entry, &which->worklist); | ||
1342 | |||
1343 | old_pending = which->pending; | ||
1344 | which->pending |= LIT_WORK; | ||
1345 | |||
1346 | atomic_inc(&which->num_work_pending); | ||
1347 | |||
1348 | mb(); | ||
1349 | |||
1350 | if(!old_pending && wakeup) | ||
1351 | { | ||
1352 | wakeup_litirqd_locked(which); /* wakeup the klitirqd */ | ||
1353 | } | ||
1354 | |||
1355 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
1356 | } | ||
1357 | |||
1358 | int __litmus_schedule_work(struct work_struct *w, unsigned int k_id) | ||
1359 | { | ||
1360 | int ret = 1; /* assume success */ | ||
1361 | if(unlikely(w->owner == NULL) || !is_realtime(w->owner)) | ||
1362 | { | ||
1363 | TRACE("%s: No owner associated with this work object!\n", __FUNCTION__); | ||
1364 | BUG(); | ||
1365 | } | ||
1366 | |||
1367 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | ||
1368 | { | ||
1369 | TRACE("%s: No klitirqd_th%u!\n", k_id); | ||
1370 | BUG(); | ||
1371 | } | ||
1372 | |||
1373 | if(unlikely(!klitirqd_is_ready())) | ||
1374 | { | ||
1375 | TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id); | ||
1376 | BUG(); | ||
1377 | } | ||
1378 | |||
1379 | if(likely(!klitirqds[k_id].terminating)) | ||
1380 | ___litmus_schedule_work(w, &klitirqds[k_id], 1); | ||
1381 | else | ||
1382 | ret = 0; | ||
1383 | return(ret); | ||
1384 | } | ||
1385 | EXPORT_SYMBOL(__litmus_schedule_work); | ||
1386 | |||
1387 | |||
1388 | static int set_klitirqd_sem_status(unsigned long stat) | ||
1389 | { | ||
1390 | TRACE_CUR("SETTING STATUS FROM %d TO %d\n", | ||
1391 | atomic_read(&tsk_rt(current)->klitirqd_sem_stat), | ||
1392 | stat); | ||
1393 | atomic_set(&tsk_rt(current)->klitirqd_sem_stat, stat); | ||
1394 | //mb(); | ||
1395 | |||
1396 | return(0); | ||
1397 | } | ||
1398 | |||
1399 | static int set_klitirqd_sem_status_if_not_held(unsigned long stat) | ||
1400 | { | ||
1401 | if(atomic_read(&tsk_rt(current)->klitirqd_sem_stat) != HELD) | ||
1402 | { | ||
1403 | return(set_klitirqd_sem_status(stat)); | ||
1404 | } | ||
1405 | return(-1); | ||
1406 | } | ||
1407 | |||
1408 | |||
1409 | void __down_and_reset_and_set_stat(struct task_struct* t, | ||
1410 | enum klitirqd_sem_status to_reset, | ||
1411 | enum klitirqd_sem_status to_set, | ||
1412 | struct mutex* sem) | ||
1413 | { | ||
1414 | #if 0 | ||
1415 | struct rt_param* param = container_of(sem, struct rt_param, klitirqd_sem); | ||
1416 | struct task_struct* task = container_of(param, struct task_struct, rt_param); | ||
1417 | |||
1418 | TRACE_CUR("%s: entered. Locking semaphore of %s/%d\n", | ||
1419 | __FUNCTION__, task->comm, task->pid); | ||
1420 | #endif | ||
1421 | |||
1422 | mutex_lock_sfx(sem, | ||
1423 | set_klitirqd_sem_status_if_not_held, to_reset, | ||
1424 | set_klitirqd_sem_status, to_set); | ||
1425 | #if 0 | ||
1426 | TRACE_CUR("%s: exiting. Have semaphore of %s/%d\n", | ||
1427 | __FUNCTION__, task->comm, task->pid); | ||
1428 | #endif | ||
1429 | } | ||
1430 | |||
1431 | void down_and_set_stat(struct task_struct* t, | ||
1432 | enum klitirqd_sem_status to_set, | ||
1433 | struct mutex* sem) | ||
1434 | { | ||
1435 | #if 0 | ||
1436 | struct rt_param* param = container_of(sem, struct rt_param, klitirqd_sem); | ||
1437 | struct task_struct* task = container_of(param, struct task_struct, rt_param); | ||
1438 | |||
1439 | TRACE_CUR("%s: entered. Locking semaphore of %s/%d\n", | ||
1440 | __FUNCTION__, task->comm, task->pid); | ||
1441 | #endif | ||
1442 | |||
1443 | mutex_lock_sfx(sem, | ||
1444 | NULL, 0, | ||
1445 | set_klitirqd_sem_status, to_set); | ||
1446 | |||
1447 | #if 0 | ||
1448 | TRACE_CUR("%s: exiting. Have semaphore of %s/%d\n", | ||
1449 | __FUNCTION__, task->comm, task->pid); | ||
1450 | #endif | ||
1451 | } | ||
1452 | |||
1453 | |||
1454 | void up_and_set_stat(struct task_struct* t, | ||
1455 | enum klitirqd_sem_status to_set, | ||
1456 | struct mutex* sem) | ||
1457 | { | ||
1458 | #if 0 | ||
1459 | struct rt_param* param = container_of(sem, struct rt_param, klitirqd_sem); | ||
1460 | struct task_struct* task = container_of(param, struct task_struct, rt_param); | ||
1461 | |||
1462 | TRACE_CUR("%s: entered. Unlocking semaphore of %s/%d\n", | ||
1463 | __FUNCTION__, | ||
1464 | task->comm, task->pid); | ||
1465 | #endif | ||
1466 | |||
1467 | mutex_unlock_sfx(sem, NULL, 0, | ||
1468 | set_klitirqd_sem_status, to_set); | ||
1469 | |||
1470 | #if 0 | ||
1471 | TRACE_CUR("%s: exiting. Unlocked semaphore of %s/%d\n", | ||
1472 | __FUNCTION__, | ||
1473 | task->comm, task->pid); | ||
1474 | #endif | ||
1475 | } | ||
1476 | |||
1477 | |||
1478 | |||
1479 | void release_klitirqd_lock(struct task_struct* t) | ||
1480 | { | ||
1481 | if(is_realtime(t) && (atomic_read(&tsk_rt(t)->klitirqd_sem_stat) == HELD)) | ||
1482 | { | ||
1483 | struct mutex* sem; | ||
1484 | struct task_struct* owner = t; | ||
1485 | |||
1486 | if(t->state == TASK_RUNNING) | ||
1487 | { | ||
1488 | TRACE_TASK(t, "NOT giving up klitirqd_sem because we're not blocked!\n"); | ||
1489 | return; | ||
1490 | } | ||
1491 | |||
1492 | if(likely(!tsk_rt(t)->is_proxy_thread)) | ||
1493 | { | ||
1494 | sem = &tsk_rt(t)->klitirqd_sem; | ||
1495 | } | ||
1496 | else | ||
1497 | { | ||
1498 | unsigned int k_id = klitirqd_id(t); | ||
1499 | owner = klitirqds[k_id].current_owner; | ||
1500 | |||
1501 | BUG_ON(t != klitirqds[k_id].klitirqd); | ||
1502 | |||
1503 | if(likely(owner)) | ||
1504 | { | ||
1505 | sem = &tsk_rt(owner)->klitirqd_sem; | ||
1506 | } | ||
1507 | else | ||
1508 | { | ||
1509 | BUG(); | ||
1510 | |||
1511 | // We had the rug pulled out from under us. Abort attempt | ||
1512 | // to reacquire the lock since our client no longer needs us. | ||
1513 | TRACE_CUR("HUH?! How did this happen?\n"); | ||
1514 | atomic_set(&tsk_rt(t)->klitirqd_sem_stat, NOT_HELD); | ||
1515 | return; | ||
1516 | } | ||
1517 | } | ||
1518 | |||
1519 | //TRACE_CUR("Releasing semaphore of %s/%d...\n", owner->comm, owner->pid); | ||
1520 | up_and_set_stat(t, NEED_TO_REACQUIRE, sem); | ||
1521 | //TRACE_CUR("Semaphore of %s/%d released!\n", owner->comm, owner->pid); | ||
1522 | } | ||
1523 | /* | ||
1524 | else if(is_realtime(t)) | ||
1525 | { | ||
1526 | TRACE_CUR("%s: Nothing to do. Stat = %d\n", __FUNCTION__, tsk_rt(t)->klitirqd_sem_stat); | ||
1527 | } | ||
1528 | */ | ||
1529 | } | ||
1530 | |||
1531 | int reacquire_klitirqd_lock(struct task_struct* t) | ||
1532 | { | ||
1533 | int ret = 0; | ||
1534 | |||
1535 | if(is_realtime(t) && (atomic_read(&tsk_rt(t)->klitirqd_sem_stat) == NEED_TO_REACQUIRE)) | ||
1536 | { | ||
1537 | struct mutex* sem; | ||
1538 | struct task_struct* owner = t; | ||
1539 | |||
1540 | if(likely(!tsk_rt(t)->is_proxy_thread)) | ||
1541 | { | ||
1542 | sem = &tsk_rt(t)->klitirqd_sem; | ||
1543 | } | ||
1544 | else | ||
1545 | { | ||
1546 | unsigned int k_id = klitirqd_id(t); | ||
1547 | //struct task_struct* owner = klitirqds[k_id].current_owner; | ||
1548 | owner = klitirqds[k_id].current_owner; | ||
1549 | |||
1550 | BUG_ON(t != klitirqds[k_id].klitirqd); | ||
1551 | |||
1552 | if(likely(owner)) | ||
1553 | { | ||
1554 | sem = &tsk_rt(owner)->klitirqd_sem; | ||
1555 | } | ||
1556 | else | ||
1557 | { | ||
1558 | // We had the rug pulled out from under us. Abort attempt | ||
1559 | // to reacquire the lock since our client no longer needs us. | ||
1560 | TRACE_CUR("No longer needs to reacquire klitirqd_sem!\n"); | ||
1561 | atomic_set(&tsk_rt(t)->klitirqd_sem_stat, NOT_HELD); | ||
1562 | return(0); | ||
1563 | } | ||
1564 | } | ||
1565 | |||
1566 | //TRACE_CUR("Trying to reacquire semaphore of %s/%d\n", owner->comm, owner->pid); | ||
1567 | __down_and_reset_and_set_stat(t, REACQUIRING, HELD, sem); | ||
1568 | //TRACE_CUR("Reacquired semaphore %s/%d\n", owner->comm, owner->pid); | ||
1569 | } | ||
1570 | /* | ||
1571 | else if(is_realtime(t)) | ||
1572 | { | ||
1573 | TRACE_CUR("%s: Nothing to do. Stat = %d\n", __FUNCTION__, tsk_rt(t)->klitirqd_sem_stat); | ||
1574 | } | ||
1575 | */ | ||
1576 | |||
1577 | return(ret); | ||
1578 | } | ||
1579 | |||
diff --git a/litmus/locking.c b/litmus/locking.c index 2693f1aca859..cfce98e7480d 100644 --- a/litmus/locking.c +++ b/litmus/locking.c | |||
@@ -121,7 +121,6 @@ struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq) | |||
121 | return(t); | 121 | return(t); |
122 | } | 122 | } |
123 | 123 | ||
124 | |||
125 | #else | 124 | #else |
126 | 125 | ||
127 | struct fdso_ops generic_lock_ops = {}; | 126 | struct fdso_ops generic_lock_ops = {}; |
diff --git a/litmus/nvidia_info.c b/litmus/nvidia_info.c new file mode 100644 index 000000000000..78f035244d21 --- /dev/null +++ b/litmus/nvidia_info.c | |||
@@ -0,0 +1,526 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <linux/semaphore.h> | ||
3 | #include <linux/pci.h> | ||
4 | |||
5 | #include <litmus/sched_trace.h> | ||
6 | #include <litmus/nvidia_info.h> | ||
7 | #include <litmus/litmus.h> | ||
8 | |||
9 | typedef unsigned char NvV8; /* "void": enumerated or multiple fields */ | ||
10 | typedef unsigned short NvV16; /* "void": enumerated or multiple fields */ | ||
11 | typedef unsigned char NvU8; /* 0 to 255 */ | ||
12 | typedef unsigned short NvU16; /* 0 to 65535 */ | ||
13 | typedef signed char NvS8; /* -128 to 127 */ | ||
14 | typedef signed short NvS16; /* -32768 to 32767 */ | ||
15 | typedef float NvF32; /* IEEE Single Precision (S1E8M23) */ | ||
16 | typedef double NvF64; /* IEEE Double Precision (S1E11M52) */ | ||
17 | typedef unsigned int NvV32; /* "void": enumerated or multiple fields */ | ||
18 | typedef unsigned int NvU32; /* 0 to 4294967295 */ | ||
19 | typedef unsigned long long NvU64; /* 0 to 18446744073709551615 */ | ||
20 | typedef union | ||
21 | { | ||
22 | volatile NvV8 Reg008[1]; | ||
23 | volatile NvV16 Reg016[1]; | ||
24 | volatile NvV32 Reg032[1]; | ||
25 | } litmus_nv_hwreg_t, * litmus_nv_phwreg_t; | ||
26 | |||
27 | typedef struct | ||
28 | { | ||
29 | NvU64 address; | ||
30 | NvU64 size; | ||
31 | NvU32 offset; | ||
32 | NvU32 *map; | ||
33 | litmus_nv_phwreg_t map_u; | ||
34 | } litmus_nv_aperture_t; | ||
35 | |||
36 | typedef struct | ||
37 | { | ||
38 | void *priv; /* private data */ | ||
39 | void *os_state; /* os-specific device state */ | ||
40 | |||
41 | int rmInitialized; | ||
42 | int flags; | ||
43 | |||
44 | /* PCI config info */ | ||
45 | NvU32 domain; | ||
46 | NvU16 bus; | ||
47 | NvU16 slot; | ||
48 | NvU16 vendor_id; | ||
49 | NvU16 device_id; | ||
50 | NvU16 subsystem_id; | ||
51 | NvU32 gpu_id; | ||
52 | void *handle; | ||
53 | |||
54 | NvU32 pci_cfg_space[16]; | ||
55 | |||
56 | /* physical characteristics */ | ||
57 | litmus_nv_aperture_t bars[3]; | ||
58 | litmus_nv_aperture_t *regs; | ||
59 | litmus_nv_aperture_t *fb, ud; | ||
60 | litmus_nv_aperture_t agp; | ||
61 | |||
62 | NvU32 interrupt_line; | ||
63 | |||
64 | NvU32 agp_config; | ||
65 | NvU32 agp_status; | ||
66 | |||
67 | NvU32 primary_vga; | ||
68 | |||
69 | NvU32 sim_env; | ||
70 | |||
71 | NvU32 rc_timer_enabled; | ||
72 | |||
73 | /* list of events allocated for this device */ | ||
74 | void *event_list; | ||
75 | |||
76 | void *kern_mappings; | ||
77 | |||
78 | } litmus_nv_state_t; | ||
79 | |||
80 | typedef struct work_struct litmus_nv_task_t; | ||
81 | |||
82 | typedef struct litmus_nv_work_s { | ||
83 | litmus_nv_task_t task; | ||
84 | void *data; | ||
85 | } litmus_nv_work_t; | ||
86 | |||
87 | typedef struct litmus_nv_linux_state_s { | ||
88 | litmus_nv_state_t nv_state; | ||
89 | atomic_t usage_count; | ||
90 | |||
91 | struct pci_dev *dev; | ||
92 | void *agp_bridge; | ||
93 | void *alloc_queue; | ||
94 | |||
95 | void *timer_sp; | ||
96 | void *isr_sp; | ||
97 | void *pci_cfgchk_sp; | ||
98 | void *isr_bh_sp; | ||
99 | |||
100 | #ifdef CONFIG_CUDA_4_0 | ||
101 | char registry_keys[512]; | ||
102 | #endif | ||
103 | |||
104 | /* keep track of any pending bottom halfes */ | ||
105 | struct tasklet_struct tasklet; | ||
106 | litmus_nv_work_t work; | ||
107 | |||
108 | /* get a timer callback every second */ | ||
109 | struct timer_list rc_timer; | ||
110 | |||
111 | /* lock for linux-specific data, not used by core rm */ | ||
112 | struct semaphore ldata_lock; | ||
113 | |||
114 | /* lock for linux-specific alloc queue */ | ||
115 | struct semaphore at_lock; | ||
116 | |||
117 | #if 0 | ||
118 | #if defined(NV_USER_MAP) | ||
119 | /* list of user mappings */ | ||
120 | struct nv_usermap_s *usermap_list; | ||
121 | |||
122 | /* lock for VMware-specific mapping list */ | ||
123 | struct semaphore mt_lock; | ||
124 | #endif /* defined(NV_USER_MAP) */ | ||
125 | #if defined(NV_PM_SUPPORT_OLD_STYLE_APM) | ||
126 | void *apm_nv_dev; | ||
127 | #endif | ||
128 | #endif | ||
129 | |||
130 | NvU32 device_num; | ||
131 | struct litmus_nv_linux_state_s *next; | ||
132 | } litmus_nv_linux_state_t; | ||
133 | |||
134 | void dump_nvidia_info(const struct tasklet_struct *t) | ||
135 | { | ||
136 | litmus_nv_state_t* nvstate = NULL; | ||
137 | litmus_nv_linux_state_t* linuxstate = NULL; | ||
138 | struct pci_dev* pci = NULL; | ||
139 | |||
140 | nvstate = (litmus_nv_state_t*)(t->data); | ||
141 | |||
142 | if(nvstate) | ||
143 | { | ||
144 | TRACE("NV State:\n" | ||
145 | "\ttasklet ptr = %p\n" | ||
146 | "\tstate ptr = %p\n" | ||
147 | "\tprivate data ptr = %p\n" | ||
148 | "\tos state ptr = %p\n" | ||
149 | "\tdomain = %u\n" | ||
150 | "\tbus = %u\n" | ||
151 | "\tslot = %u\n" | ||
152 | "\tvender_id = %u\n" | ||
153 | "\tdevice_id = %u\n" | ||
154 | "\tsubsystem_id = %u\n" | ||
155 | "\tgpu_id = %u\n" | ||
156 | "\tinterrupt_line = %u\n", | ||
157 | t, | ||
158 | nvstate, | ||
159 | nvstate->priv, | ||
160 | nvstate->os_state, | ||
161 | nvstate->domain, | ||
162 | nvstate->bus, | ||
163 | nvstate->slot, | ||
164 | nvstate->vendor_id, | ||
165 | nvstate->device_id, | ||
166 | nvstate->subsystem_id, | ||
167 | nvstate->gpu_id, | ||
168 | nvstate->interrupt_line); | ||
169 | |||
170 | linuxstate = container_of(nvstate, litmus_nv_linux_state_t, nv_state); | ||
171 | } | ||
172 | else | ||
173 | { | ||
174 | TRACE("INVALID NVSTATE????\n"); | ||
175 | } | ||
176 | |||
177 | if(linuxstate) | ||
178 | { | ||
179 | int ls_offset = (void*)(&(linuxstate->device_num)) - (void*)(linuxstate); | ||
180 | int ns_offset_raw = (void*)(&(linuxstate->device_num)) - (void*)(&(linuxstate->nv_state)); | ||
181 | int ns_offset_desired = (void*)(&(linuxstate->device_num)) - (void*)(nvstate); | ||
182 | |||
183 | |||
184 | TRACE("LINUX NV State:\n" | ||
185 | "\tlinux nv state ptr: %p\n" | ||
186 | "\taddress of tasklet: %p\n" | ||
187 | "\taddress of work: %p\n" | ||
188 | "\tusage_count: %d\n" | ||
189 | "\tdevice_num: %u\n" | ||
190 | "\ttasklet addr == this tasklet: %d\n" | ||
191 | "\tpci: %p\n", | ||
192 | linuxstate, | ||
193 | &(linuxstate->tasklet), | ||
194 | &(linuxstate->work), | ||
195 | atomic_read(&(linuxstate->usage_count)), | ||
196 | linuxstate->device_num, | ||
197 | (t == &(linuxstate->tasklet)), | ||
198 | linuxstate->dev); | ||
199 | |||
200 | pci = linuxstate->dev; | ||
201 | |||
202 | TRACE("Offsets:\n" | ||
203 | "\tOffset from LinuxState: %d, %x\n" | ||
204 | "\tOffset from NVState: %d, %x\n" | ||
205 | "\tOffset from parameter: %d, %x\n" | ||
206 | "\tdevice_num: %u\n", | ||
207 | ls_offset, ls_offset, | ||
208 | ns_offset_raw, ns_offset_raw, | ||
209 | ns_offset_desired, ns_offset_desired, | ||
210 | *((u32*)((void*)nvstate + ns_offset_desired))); | ||
211 | } | ||
212 | else | ||
213 | { | ||
214 | TRACE("INVALID LINUXNVSTATE?????\n"); | ||
215 | } | ||
216 | |||
217 | #if 0 | ||
218 | if(pci) | ||
219 | { | ||
220 | TRACE("PCI DEV Info:\n" | ||
221 | "pci device ptr: %p\n" | ||
222 | "\tdevfn = %d\n" | ||
223 | "\tvendor = %d\n" | ||
224 | "\tdevice = %d\n" | ||
225 | "\tsubsystem_vendor = %d\n" | ||
226 | "\tsubsystem_device = %d\n" | ||
227 | "\tslot # = %d\n", | ||
228 | pci, | ||
229 | pci->devfn, | ||
230 | pci->vendor, | ||
231 | pci->device, | ||
232 | pci->subsystem_vendor, | ||
233 | pci->subsystem_device, | ||
234 | pci->slot->number); | ||
235 | } | ||
236 | else | ||
237 | { | ||
238 | TRACE("INVALID PCIDEV PTR?????\n"); | ||
239 | } | ||
240 | #endif | ||
241 | } | ||
242 | |||
243 | static struct module* nvidia_mod = NULL; | ||
244 | int init_nvidia_info(void) | ||
245 | { | ||
246 | mutex_lock(&module_mutex); | ||
247 | nvidia_mod = find_module("nvidia"); | ||
248 | mutex_unlock(&module_mutex); | ||
249 | if(nvidia_mod != NULL) | ||
250 | { | ||
251 | TRACE("%s : Found NVIDIA module. Core Code: %p to %p\n", __FUNCTION__, | ||
252 | (void*)(nvidia_mod->module_core), | ||
253 | (void*)(nvidia_mod->module_core) + nvidia_mod->core_size); | ||
254 | init_nv_device_reg(); | ||
255 | return(0); | ||
256 | } | ||
257 | else | ||
258 | { | ||
259 | TRACE("%s : Could not find NVIDIA module! Loaded?\n", __FUNCTION__); | ||
260 | return(-1); | ||
261 | } | ||
262 | } | ||
263 | |||
264 | |||
265 | /* works with pointers to static data inside the module too. */ | ||
266 | int is_nvidia_func(void* func_addr) | ||
267 | { | ||
268 | int ret = 0; | ||
269 | if(nvidia_mod) | ||
270 | { | ||
271 | ret = within_module_core((long unsigned int)func_addr, nvidia_mod); | ||
272 | /* | ||
273 | if(ret) | ||
274 | { | ||
275 | TRACE("%s : %p is in NVIDIA module: %d\n", | ||
276 | __FUNCTION__, func_addr, ret); | ||
277 | }*/ | ||
278 | } | ||
279 | |||
280 | return(ret); | ||
281 | } | ||
282 | |||
283 | u32 get_tasklet_nv_device_num(const struct tasklet_struct *t) | ||
284 | { | ||
285 | // life is too short to use hard-coded offsets. update this later. | ||
286 | litmus_nv_state_t* nvstate = (litmus_nv_state_t*)(t->data); | ||
287 | litmus_nv_linux_state_t* linuxstate = container_of(nvstate, litmus_nv_linux_state_t, nv_state); | ||
288 | |||
289 | BUG_ON(linuxstate->device_num >= NV_DEVICE_NUM); | ||
290 | |||
291 | return(linuxstate->device_num); | ||
292 | |||
293 | //int DEVICE_NUM_OFFSET = (void*)(&(linuxstate->device_num)) - (void*)(nvstate); | ||
294 | |||
295 | #if 0 | ||
296 | // offset determined though observed behavior of the NV driver. | ||
297 | //const int DEVICE_NUM_OFFSET = 0x480; // CUDA 4.0 RC1 | ||
298 | //const int DEVICE_NUM_OFFSET = 0x510; // CUDA 4.0 RC2 | ||
299 | |||
300 | void* state = (void*)(t->data); | ||
301 | void* device_num_ptr = state + DEVICE_NUM_OFFSET; | ||
302 | |||
303 | //dump_nvidia_info(t); | ||
304 | return(*((u32*)device_num_ptr)); | ||
305 | #endif | ||
306 | } | ||
307 | |||
308 | u32 get_work_nv_device_num(const struct work_struct *t) | ||
309 | { | ||
310 | // offset determined though observed behavior of the NV driver. | ||
311 | const int DEVICE_NUM_OFFSET = sizeof(struct work_struct); | ||
312 | void* state = (void*)(t); | ||
313 | void** device_num_ptr = state + DEVICE_NUM_OFFSET; | ||
314 | return(*((u32*)(*device_num_ptr))); | ||
315 | } | ||
316 | |||
317 | |||
318 | |||
319 | typedef struct { | ||
320 | raw_spinlock_t lock; | ||
321 | struct task_struct *device_owner; | ||
322 | }nv_device_registry_t; | ||
323 | |||
324 | static nv_device_registry_t NV_DEVICE_REG[NV_DEVICE_NUM]; | ||
325 | |||
326 | int init_nv_device_reg(void) | ||
327 | { | ||
328 | int i; | ||
329 | |||
330 | //memset(NV_DEVICE_REG, 0, sizeof(NV_DEVICE_REG)); | ||
331 | |||
332 | for(i = 0; i < NV_DEVICE_NUM; ++i) | ||
333 | { | ||
334 | raw_spin_lock_init(&NV_DEVICE_REG[i].lock); | ||
335 | NV_DEVICE_REG[i].device_owner = NULL; | ||
336 | } | ||
337 | |||
338 | return(1); | ||
339 | } | ||
340 | |||
341 | /* use to get nv_device_id by given owner. | ||
342 | (if return -1, can't get the assocaite device id)*/ | ||
343 | /* | ||
344 | int get_nv_device_id(struct task_struct* owner) | ||
345 | { | ||
346 | int i; | ||
347 | if(!owner) | ||
348 | { | ||
349 | return(-1); | ||
350 | } | ||
351 | for(i = 0; i < NV_DEVICE_NUM; ++i) | ||
352 | { | ||
353 | if(NV_DEVICE_REG[i].device_owner == owner) | ||
354 | return(i); | ||
355 | } | ||
356 | return(-1); | ||
357 | } | ||
358 | */ | ||
359 | |||
360 | |||
361 | |||
362 | static int __reg_nv_device(int reg_device_id) | ||
363 | { | ||
364 | struct task_struct* old = | ||
365 | cmpxchg(&NV_DEVICE_REG[reg_device_id].device_owner, | ||
366 | NULL, | ||
367 | current); | ||
368 | |||
369 | mb(); | ||
370 | |||
371 | if(likely(old == NULL)) | ||
372 | { | ||
373 | down_and_set_stat(current, HELD, &tsk_rt(current)->klitirqd_sem); | ||
374 | TRACE_CUR("%s: device %d registered.\n", __FUNCTION__, reg_device_id); | ||
375 | return(0); | ||
376 | } | ||
377 | else | ||
378 | { | ||
379 | TRACE_CUR("%s: device %d is already in use!\n", __FUNCTION__, reg_device_id); | ||
380 | return(-EBUSY); | ||
381 | } | ||
382 | |||
383 | #if 0 | ||
384 | //unsigned long flags; | ||
385 | //raw_spin_lock_irqsave(&NV_DEVICE_REG[reg_device_id].lock, flags); | ||
386 | //lock_nv_registry(reg_device_id, &flags); | ||
387 | |||
388 | if(likely(NV_DEVICE_REG[reg_device_id].device_owner == NULL)) | ||
389 | { | ||
390 | NV_DEVICE_REG[reg_device_id].device_owner = current; | ||
391 | mb(); // needed? | ||
392 | |||
393 | // release spin lock before chance of going to sleep. | ||
394 | //raw_spin_unlock_irqrestore(&NV_DEVICE_REG[reg_device_id].lock, flags); | ||
395 | //unlock_nv_registry(reg_device_id, &flags); | ||
396 | |||
397 | down_and_set_stat(current, HELD, &tsk_rt(current)->klitirqd_sem); | ||
398 | TRACE_CUR("%s: device %d registered.\n", __FUNCTION__, reg_device_id); | ||
399 | return(0); | ||
400 | } | ||
401 | else | ||
402 | { | ||
403 | //raw_spin_unlock_irqrestore(&NV_DEVICE_REG[reg_device_id].lock, flags); | ||
404 | //unlock_nv_registry(reg_device_id, &flags); | ||
405 | |||
406 | TRACE_CUR("%s: device %d is already in use!\n", __FUNCTION__, reg_device_id); | ||
407 | return(-EBUSY); | ||
408 | } | ||
409 | #endif | ||
410 | } | ||
411 | |||
412 | static int __clear_reg_nv_device(int de_reg_device_id) | ||
413 | { | ||
414 | int ret; | ||
415 | unsigned long flags; | ||
416 | struct task_struct* klitirqd_th = get_klitirqd(de_reg_device_id); | ||
417 | struct task_struct* old; | ||
418 | |||
419 | lock_nv_registry(de_reg_device_id, &flags); | ||
420 | |||
421 | old = cmpxchg(&NV_DEVICE_REG[de_reg_device_id].device_owner, | ||
422 | current, | ||
423 | NULL); | ||
424 | |||
425 | mb(); | ||
426 | |||
427 | if(likely(old == current)) | ||
428 | { | ||
429 | flush_pending(klitirqd_th, current); | ||
430 | //unlock_nv_registry(de_reg_device_id, &flags); | ||
431 | |||
432 | up_and_set_stat(current, NOT_HELD, &tsk_rt(current)->klitirqd_sem); | ||
433 | |||
434 | unlock_nv_registry(de_reg_device_id, &flags); | ||
435 | ret = 0; | ||
436 | |||
437 | TRACE_CUR("%s: semaphore released.\n",__FUNCTION__); | ||
438 | } | ||
439 | else | ||
440 | { | ||
441 | unlock_nv_registry(de_reg_device_id, &flags); | ||
442 | ret = -EINVAL; | ||
443 | |||
444 | if(old) | ||
445 | TRACE_CUR("%s: device %d is not registered for this process's use! %s/%d is!\n", | ||
446 | __FUNCTION__, de_reg_device_id, old->comm, old->pid); | ||
447 | else | ||
448 | TRACE_CUR("%s: device %d is not registered for this process's use! No one is!\n", | ||
449 | __FUNCTION__, de_reg_device_id); | ||
450 | } | ||
451 | |||
452 | return(ret); | ||
453 | } | ||
454 | |||
455 | |||
456 | int reg_nv_device(int reg_device_id, int reg_action) | ||
457 | { | ||
458 | int ret; | ||
459 | |||
460 | if((reg_device_id < NV_DEVICE_NUM) && (reg_device_id >= 0)) | ||
461 | { | ||
462 | if(reg_action) | ||
463 | ret = __reg_nv_device(reg_device_id); | ||
464 | else | ||
465 | ret = __clear_reg_nv_device(reg_device_id); | ||
466 | } | ||
467 | else | ||
468 | { | ||
469 | ret = -ENODEV; | ||
470 | } | ||
471 | |||
472 | return(ret); | ||
473 | } | ||
474 | |||
475 | /* use to get the owner of nv_device_id. */ | ||
476 | struct task_struct* get_nv_device_owner(u32 target_device_id) | ||
477 | { | ||
478 | struct task_struct* owner; | ||
479 | BUG_ON(target_device_id >= NV_DEVICE_NUM); | ||
480 | owner = NV_DEVICE_REG[target_device_id].device_owner; | ||
481 | return(owner); | ||
482 | } | ||
483 | |||
484 | void lock_nv_registry(u32 target_device_id, unsigned long* flags) | ||
485 | { | ||
486 | BUG_ON(target_device_id >= NV_DEVICE_NUM); | ||
487 | |||
488 | if(in_interrupt()) | ||
489 | TRACE("Locking registry for %d.\n", target_device_id); | ||
490 | else | ||
491 | TRACE_CUR("Locking registry for %d.\n", target_device_id); | ||
492 | |||
493 | raw_spin_lock_irqsave(&NV_DEVICE_REG[target_device_id].lock, *flags); | ||
494 | } | ||
495 | |||
496 | void unlock_nv_registry(u32 target_device_id, unsigned long* flags) | ||
497 | { | ||
498 | BUG_ON(target_device_id >= NV_DEVICE_NUM); | ||
499 | |||
500 | if(in_interrupt()) | ||
501 | TRACE("Unlocking registry for %d.\n", target_device_id); | ||
502 | else | ||
503 | TRACE_CUR("Unlocking registry for %d.\n", target_device_id); | ||
504 | |||
505 | raw_spin_unlock_irqrestore(&NV_DEVICE_REG[target_device_id].lock, *flags); | ||
506 | } | ||
507 | |||
508 | |||
509 | void increment_nv_int_count(u32 device) | ||
510 | { | ||
511 | unsigned long flags; | ||
512 | struct task_struct* owner; | ||
513 | |||
514 | lock_nv_registry(device, &flags); | ||
515 | |||
516 | owner = NV_DEVICE_REG[device].device_owner; | ||
517 | if(owner) | ||
518 | { | ||
519 | atomic_inc(&tsk_rt(owner)->nv_int_count); | ||
520 | } | ||
521 | |||
522 | unlock_nv_registry(device, &flags); | ||
523 | } | ||
524 | EXPORT_SYMBOL(increment_nv_int_count); | ||
525 | |||
526 | |||
diff --git a/litmus/preempt.c b/litmus/preempt.c index ebe2e3461895..08b98c3b57bf 100644 --- a/litmus/preempt.c +++ b/litmus/preempt.c | |||
@@ -30,8 +30,11 @@ void sched_state_will_schedule(struct task_struct* tsk) | |||
30 | /* Litmus tasks should never be subject to a remote | 30 | /* Litmus tasks should never be subject to a remote |
31 | * set_tsk_need_resched(). */ | 31 | * set_tsk_need_resched(). */ |
32 | BUG_ON(is_realtime(tsk)); | 32 | BUG_ON(is_realtime(tsk)); |
33 | |||
34 | /* | ||
33 | TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p\n", | 35 | TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p\n", |
34 | __builtin_return_address(0)); | 36 | __builtin_return_address(0)); |
37 | */ | ||
35 | } | 38 | } |
36 | 39 | ||
37 | /* Called by the IPI handler after another CPU called smp_send_resched(). */ | 40 | /* Called by the IPI handler after another CPU called smp_send_resched(). */ |
@@ -43,13 +46,17 @@ void sched_state_ipi(void) | |||
43 | /* Cause scheduler to be invoked. | 46 | /* Cause scheduler to be invoked. |
44 | * This will cause a transition to WILL_SCHEDULE. */ | 47 | * This will cause a transition to WILL_SCHEDULE. */ |
45 | set_tsk_need_resched(current); | 48 | set_tsk_need_resched(current); |
49 | /* | ||
46 | TRACE_STATE("IPI -> set_tsk_need_resched(%s/%d)\n", | 50 | TRACE_STATE("IPI -> set_tsk_need_resched(%s/%d)\n", |
47 | current->comm, current->pid); | 51 | current->comm, current->pid); |
52 | */ | ||
48 | } else { | 53 | } else { |
49 | /* ignore */ | 54 | /* ignore */ |
55 | /* | ||
50 | TRACE_STATE("ignoring IPI in state %x (%s)\n", | 56 | TRACE_STATE("ignoring IPI in state %x (%s)\n", |
51 | get_sched_state(), | 57 | get_sched_state(), |
52 | sched_state_name(get_sched_state())); | 58 | sched_state_name(get_sched_state())); |
59 | */ | ||
53 | } | 60 | } |
54 | } | 61 | } |
55 | 62 | ||
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 73fe1c442a0d..9b0a8d3b624d 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/percpu.h> | 29 | #include <linux/percpu.h> |
30 | #include <linux/sched.h> | 30 | #include <linux/sched.h> |
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include <linux/uaccess.h> | ||
32 | 33 | ||
33 | #include <linux/module.h> | 34 | #include <linux/module.h> |
34 | 35 | ||
@@ -45,7 +46,18 @@ | |||
45 | 46 | ||
46 | /* to configure the cluster size */ | 47 | /* to configure the cluster size */ |
47 | #include <litmus/litmus_proc.h> | 48 | #include <litmus/litmus_proc.h> |
48 | #include <linux/uaccess.h> | 49 | |
50 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
51 | #include <litmus/affinity.h> | ||
52 | #endif | ||
53 | |||
54 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
55 | #include <litmus/litmus_softirq.h> | ||
56 | #endif | ||
57 | |||
58 | #ifdef CONFIG_LITMUS_NVIDIA | ||
59 | #include <litmus/nvidia_info.h> | ||
60 | #endif | ||
49 | 61 | ||
50 | /* Reference configuration variable. Determines which cache level is used to | 62 | /* Reference configuration variable. Determines which cache level is used to |
51 | * group CPUs into clusters. GLOBAL_CLUSTER, which is the default, means that | 63 | * group CPUs into clusters. GLOBAL_CLUSTER, which is the default, means that |
@@ -95,7 +107,7 @@ typedef struct clusterdomain { | |||
95 | struct bheap_node *heap_node; | 107 | struct bheap_node *heap_node; |
96 | struct bheap cpu_heap; | 108 | struct bheap cpu_heap; |
97 | /* lock for this cluster */ | 109 | /* lock for this cluster */ |
98 | #define lock domain.ready_lock | 110 | #define cedf_lock domain.ready_lock |
99 | } cedf_domain_t; | 111 | } cedf_domain_t; |
100 | 112 | ||
101 | /* a cedf_domain per cluster; allocation is done at init/activation time */ | 113 | /* a cedf_domain per cluster; allocation is done at init/activation time */ |
@@ -257,21 +269,50 @@ static noinline void requeue(struct task_struct* task) | |||
257 | } | 269 | } |
258 | } | 270 | } |
259 | 271 | ||
272 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
273 | static cpu_entry_t* cedf_get_nearest_available_cpu( | ||
274 | cedf_domain_t *cluster, cpu_entry_t* start) | ||
275 | { | ||
276 | cpu_entry_t* affinity; | ||
277 | |||
278 | get_nearest_available_cpu(affinity, start, cedf_cpu_entries, -1); | ||
279 | |||
280 | /* make sure CPU is in our cluster */ | ||
281 | if(affinity && cpu_isset(affinity->cpu, *cluster->cpu_map)) | ||
282 | return(affinity); | ||
283 | else | ||
284 | return(NULL); | ||
285 | } | ||
286 | #endif | ||
287 | |||
288 | |||
260 | /* check for any necessary preemptions */ | 289 | /* check for any necessary preemptions */ |
261 | static void check_for_preemptions(cedf_domain_t *cluster) | 290 | static void check_for_preemptions(cedf_domain_t *cluster) |
262 | { | 291 | { |
263 | struct task_struct *task; | 292 | struct task_struct *task; |
264 | cpu_entry_t* last; | 293 | cpu_entry_t *last; |
265 | 294 | ||
266 | for(last = lowest_prio_cpu(cluster); | 295 | for(last = lowest_prio_cpu(cluster); |
267 | edf_preemption_needed(&cluster->domain, last->linked); | 296 | edf_preemption_needed(&cluster->domain, last->linked); |
268 | last = lowest_prio_cpu(cluster)) { | 297 | last = lowest_prio_cpu(cluster)) { |
269 | /* preemption necessary */ | 298 | /* preemption necessary */ |
270 | task = __take_ready(&cluster->domain); | 299 | task = __take_ready(&cluster->domain); |
271 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", | 300 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
272 | task->pid, last->cpu); | 301 | { |
302 | cpu_entry_t* affinity = | ||
303 | cedf_get_nearest_available_cpu(cluster, | ||
304 | &per_cpu(cedf_cpu_entries, task_cpu(task))); | ||
305 | if(affinity) | ||
306 | last = affinity; | ||
307 | else if(last->linked) | ||
308 | requeue(last->linked); | ||
309 | } | ||
310 | #else | ||
273 | if (last->linked) | 311 | if (last->linked) |
274 | requeue(last->linked); | 312 | requeue(last->linked); |
313 | #endif | ||
314 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", | ||
315 | task->pid, last->cpu); | ||
275 | link_task_to_cpu(task, last); | 316 | link_task_to_cpu(task, last); |
276 | preempt(last); | 317 | preempt(last); |
277 | } | 318 | } |
@@ -292,12 +333,12 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
292 | cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); | 333 | cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); |
293 | unsigned long flags; | 334 | unsigned long flags; |
294 | 335 | ||
295 | raw_spin_lock_irqsave(&cluster->lock, flags); | 336 | raw_spin_lock_irqsave(&cluster->cedf_lock, flags); |
296 | 337 | ||
297 | __merge_ready(&cluster->domain, tasks); | 338 | __merge_ready(&cluster->domain, tasks); |
298 | check_for_preemptions(cluster); | 339 | check_for_preemptions(cluster); |
299 | 340 | ||
300 | raw_spin_unlock_irqrestore(&cluster->lock, flags); | 341 | raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); |
301 | } | 342 | } |
302 | 343 | ||
303 | /* caller holds cedf_lock */ | 344 | /* caller holds cedf_lock */ |
@@ -307,6 +348,10 @@ static noinline void job_completion(struct task_struct *t, int forced) | |||
307 | 348 | ||
308 | sched_trace_task_completion(t, forced); | 349 | sched_trace_task_completion(t, forced); |
309 | 350 | ||
351 | #ifdef CONFIG_LITMUS_NVIDIA | ||
352 | atomic_set(&tsk_rt(t)->nv_int_count, 0); | ||
353 | #endif | ||
354 | |||
310 | TRACE_TASK(t, "job_completion().\n"); | 355 | TRACE_TASK(t, "job_completion().\n"); |
311 | 356 | ||
312 | /* set flags */ | 357 | /* set flags */ |
@@ -378,7 +423,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
378 | int out_of_time, sleep, preempt, np, exists, blocks; | 423 | int out_of_time, sleep, preempt, np, exists, blocks; |
379 | struct task_struct* next = NULL; | 424 | struct task_struct* next = NULL; |
380 | 425 | ||
381 | raw_spin_lock(&cluster->lock); | 426 | raw_spin_lock(&cluster->cedf_lock); |
382 | clear_will_schedule(); | 427 | clear_will_schedule(); |
383 | 428 | ||
384 | /* sanity checking */ | 429 | /* sanity checking */ |
@@ -462,7 +507,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
462 | next = prev; | 507 | next = prev; |
463 | 508 | ||
464 | sched_state_task_picked(); | 509 | sched_state_task_picked(); |
465 | raw_spin_unlock(&cluster->lock); | 510 | raw_spin_unlock(&cluster->cedf_lock); |
466 | 511 | ||
467 | #ifdef WANT_ALL_SCHED_EVENTS | 512 | #ifdef WANT_ALL_SCHED_EVENTS |
468 | TRACE("cedf_lock released, next=0x%p\n", next); | 513 | TRACE("cedf_lock released, next=0x%p\n", next); |
@@ -504,7 +549,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) | |||
504 | /* the cluster doesn't change even if t is running */ | 549 | /* the cluster doesn't change even if t is running */ |
505 | cluster = task_cpu_cluster(t); | 550 | cluster = task_cpu_cluster(t); |
506 | 551 | ||
507 | raw_spin_lock_irqsave(&cluster->domain.ready_lock, flags); | 552 | raw_spin_lock_irqsave(&cluster->cedf_lock, flags); |
508 | 553 | ||
509 | /* setup job params */ | 554 | /* setup job params */ |
510 | release_at(t, litmus_clock()); | 555 | release_at(t, litmus_clock()); |
@@ -521,20 +566,22 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) | |||
521 | t->rt_param.linked_on = NO_CPU; | 566 | t->rt_param.linked_on = NO_CPU; |
522 | 567 | ||
523 | cedf_job_arrival(t); | 568 | cedf_job_arrival(t); |
524 | raw_spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags); | 569 | raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); |
525 | } | 570 | } |
526 | 571 | ||
527 | static void cedf_task_wake_up(struct task_struct *task) | 572 | static void cedf_task_wake_up(struct task_struct *task) |
528 | { | 573 | { |
529 | unsigned long flags; | 574 | unsigned long flags; |
530 | lt_t now; | 575 | //lt_t now; |
531 | cedf_domain_t *cluster; | 576 | cedf_domain_t *cluster; |
532 | 577 | ||
533 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | 578 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); |
534 | 579 | ||
535 | cluster = task_cpu_cluster(task); | 580 | cluster = task_cpu_cluster(task); |
536 | 581 | ||
537 | raw_spin_lock_irqsave(&cluster->lock, flags); | 582 | raw_spin_lock_irqsave(&cluster->cedf_lock, flags); |
583 | |||
584 | #if 0 // sporadic task model | ||
538 | /* We need to take suspensions because of semaphores into | 585 | /* We need to take suspensions because of semaphores into |
539 | * account! If a job resumes after being suspended due to acquiring | 586 | * account! If a job resumes after being suspended due to acquiring |
540 | * a semaphore, it should never be treated as a new job release. | 587 | * a semaphore, it should never be treated as a new job release. |
@@ -556,8 +603,17 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
556 | } | 603 | } |
557 | } | 604 | } |
558 | } | 605 | } |
559 | cedf_job_arrival(task); | 606 | #endif |
560 | raw_spin_unlock_irqrestore(&cluster->lock, flags); | 607 | |
608 | //BUG_ON(tsk_rt(task)->linked_on != NO_CPU); | ||
609 | set_rt_flags(task, RT_F_RUNNING); // periodic model | ||
610 | |||
611 | if(tsk_rt(task)->linked_on == NO_CPU) | ||
612 | cedf_job_arrival(task); | ||
613 | else | ||
614 | TRACE("WTF, mate?!\n"); | ||
615 | |||
616 | raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); | ||
561 | } | 617 | } |
562 | 618 | ||
563 | static void cedf_task_block(struct task_struct *t) | 619 | static void cedf_task_block(struct task_struct *t) |
@@ -570,9 +626,9 @@ static void cedf_task_block(struct task_struct *t) | |||
570 | cluster = task_cpu_cluster(t); | 626 | cluster = task_cpu_cluster(t); |
571 | 627 | ||
572 | /* unlink if necessary */ | 628 | /* unlink if necessary */ |
573 | raw_spin_lock_irqsave(&cluster->lock, flags); | 629 | raw_spin_lock_irqsave(&cluster->cedf_lock, flags); |
574 | unlink(t); | 630 | unlink(t); |
575 | raw_spin_unlock_irqrestore(&cluster->lock, flags); | 631 | raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); |
576 | 632 | ||
577 | BUG_ON(!is_realtime(t)); | 633 | BUG_ON(!is_realtime(t)); |
578 | } | 634 | } |
@@ -584,7 +640,7 @@ static void cedf_task_exit(struct task_struct * t) | |||
584 | cedf_domain_t *cluster = task_cpu_cluster(t); | 640 | cedf_domain_t *cluster = task_cpu_cluster(t); |
585 | 641 | ||
586 | /* unlink if necessary */ | 642 | /* unlink if necessary */ |
587 | raw_spin_lock_irqsave(&cluster->lock, flags); | 643 | raw_spin_lock_irqsave(&cluster->cedf_lock, flags); |
588 | unlink(t); | 644 | unlink(t); |
589 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | 645 | if (tsk_rt(t)->scheduled_on != NO_CPU) { |
590 | cpu_entry_t *cpu; | 646 | cpu_entry_t *cpu; |
@@ -592,7 +648,7 @@ static void cedf_task_exit(struct task_struct * t) | |||
592 | cpu->scheduled = NULL; | 648 | cpu->scheduled = NULL; |
593 | tsk_rt(t)->scheduled_on = NO_CPU; | 649 | tsk_rt(t)->scheduled_on = NO_CPU; |
594 | } | 650 | } |
595 | raw_spin_unlock_irqrestore(&cluster->lock, flags); | 651 | raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); |
596 | 652 | ||
597 | BUG_ON(!is_realtime(t)); | 653 | BUG_ON(!is_realtime(t)); |
598 | TRACE_TASK(t, "RIP\n"); | 654 | TRACE_TASK(t, "RIP\n"); |
@@ -603,6 +659,721 @@ static long cedf_admit_task(struct task_struct* tsk) | |||
603 | return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; | 659 | return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; |
604 | } | 660 | } |
605 | 661 | ||
662 | |||
663 | |||
664 | |||
665 | |||
666 | |||
667 | |||
668 | |||
669 | |||
670 | |||
671 | |||
672 | |||
673 | |||
674 | #ifdef CONFIG_LITMUS_LOCKING | ||
675 | |||
676 | #include <litmus/fdso.h> | ||
677 | |||
678 | |||
679 | static void __set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
680 | { | ||
681 | int linked_on; | ||
682 | int check_preempt = 0; | ||
683 | |||
684 | cedf_domain_t* cluster = task_cpu_cluster(t); | ||
685 | |||
686 | if(prio_inh != NULL) | ||
687 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | ||
688 | else | ||
689 | TRACE_TASK(t, "inherits priority from %p\n", prio_inh); | ||
690 | |||
691 | sched_trace_eff_prio_change(t, prio_inh); | ||
692 | |||
693 | tsk_rt(t)->inh_task = prio_inh; | ||
694 | |||
695 | linked_on = tsk_rt(t)->linked_on; | ||
696 | |||
697 | /* If it is scheduled, then we need to reorder the CPU heap. */ | ||
698 | if (linked_on != NO_CPU) { | ||
699 | TRACE_TASK(t, "%s: linked on %d\n", | ||
700 | __FUNCTION__, linked_on); | ||
701 | /* Holder is scheduled; need to re-order CPUs. | ||
702 | * We can't use heap_decrease() here since | ||
703 | * the cpu_heap is ordered in reverse direction, so | ||
704 | * it is actually an increase. */ | ||
705 | bheap_delete(cpu_lower_prio, &cluster->cpu_heap, | ||
706 | per_cpu(cedf_cpu_entries, linked_on).hn); | ||
707 | bheap_insert(cpu_lower_prio, &cluster->cpu_heap, | ||
708 | per_cpu(cedf_cpu_entries, linked_on).hn); | ||
709 | } else { | ||
710 | /* holder may be queued: first stop queue changes */ | ||
711 | raw_spin_lock(&cluster->domain.release_lock); | ||
712 | if (is_queued(t)) { | ||
713 | TRACE_TASK(t, "%s: is queued\n", __FUNCTION__); | ||
714 | |||
715 | /* We need to update the position of holder in some | ||
716 | * heap. Note that this could be a release heap if we | ||
717 | * budget enforcement is used and this job overran. */ | ||
718 | check_preempt = !bheap_decrease(edf_ready_order, tsk_rt(t)->heap_node); | ||
719 | |||
720 | } else { | ||
721 | /* Nothing to do: if it is not queued and not linked | ||
722 | * then it is either sleeping or currently being moved | ||
723 | * by other code (e.g., a timer interrupt handler) that | ||
724 | * will use the correct priority when enqueuing the | ||
725 | * task. */ | ||
726 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", __FUNCTION__); | ||
727 | } | ||
728 | raw_spin_unlock(&cluster->domain.release_lock); | ||
729 | |||
730 | /* If holder was enqueued in a release heap, then the following | ||
731 | * preemption check is pointless, but we can't easily detect | ||
732 | * that case. If you want to fix this, then consider that | ||
733 | * simply adding a state flag requires O(n) time to update when | ||
734 | * releasing n tasks, which conflicts with the goal to have | ||
735 | * O(log n) merges. */ | ||
736 | if (check_preempt) { | ||
737 | /* heap_decrease() hit the top level of the heap: make | ||
738 | * sure preemption checks get the right task, not the | ||
739 | * potentially stale cache. */ | ||
740 | bheap_uncache_min(edf_ready_order, &cluster->domain.ready_queue); | ||
741 | check_for_preemptions(cluster); | ||
742 | } | ||
743 | } | ||
744 | } | ||
745 | |||
746 | /* called with IRQs off */ | ||
747 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
748 | { | ||
749 | cedf_domain_t* cluster = task_cpu_cluster(t); | ||
750 | |||
751 | raw_spin_lock(&cluster->cedf_lock); | ||
752 | |||
753 | __set_priority_inheritance(t, prio_inh); | ||
754 | |||
755 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
756 | if(tsk_rt(t)->cur_klitirqd != NULL) | ||
757 | { | ||
758 | TRACE_TASK(t, "%s/%d inherits a new priority!\n", | ||
759 | tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); | ||
760 | |||
761 | __set_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh); | ||
762 | } | ||
763 | #endif | ||
764 | |||
765 | raw_spin_unlock(&cluster->cedf_lock); | ||
766 | } | ||
767 | |||
768 | |||
769 | /* called with IRQs off */ | ||
770 | static void __clear_priority_inheritance(struct task_struct* t) | ||
771 | { | ||
772 | TRACE_TASK(t, "priority restored\n"); | ||
773 | |||
774 | if(tsk_rt(t)->scheduled_on != NO_CPU) | ||
775 | { | ||
776 | sched_trace_eff_prio_change(t, NULL); | ||
777 | |||
778 | tsk_rt(t)->inh_task = NULL; | ||
779 | |||
780 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
781 | * since the priority was effectively lowered. */ | ||
782 | unlink(t); | ||
783 | cedf_job_arrival(t); | ||
784 | } | ||
785 | else | ||
786 | { | ||
787 | __set_priority_inheritance(t, NULL); | ||
788 | } | ||
789 | |||
790 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
791 | if(tsk_rt(t)->cur_klitirqd != NULL) | ||
792 | { | ||
793 | TRACE_TASK(t, "%s/%d inheritance set back to owner.\n", | ||
794 | tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); | ||
795 | |||
796 | if(tsk_rt(tsk_rt(t)->cur_klitirqd)->scheduled_on != NO_CPU) | ||
797 | { | ||
798 | sched_trace_eff_prio_change(tsk_rt(t)->cur_klitirqd, t); | ||
799 | |||
800 | tsk_rt(tsk_rt(t)->cur_klitirqd)->inh_task = t; | ||
801 | |||
802 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
803 | * since the priority was effectively lowered. */ | ||
804 | unlink(tsk_rt(t)->cur_klitirqd); | ||
805 | cedf_job_arrival(tsk_rt(t)->cur_klitirqd); | ||
806 | } | ||
807 | else | ||
808 | { | ||
809 | __set_priority_inheritance(tsk_rt(t)->cur_klitirqd, t); | ||
810 | } | ||
811 | } | ||
812 | #endif | ||
813 | } | ||
814 | |||
815 | /* called with IRQs off */ | ||
816 | static void clear_priority_inheritance(struct task_struct* t) | ||
817 | { | ||
818 | cedf_domain_t* cluster = task_cpu_cluster(t); | ||
819 | |||
820 | raw_spin_lock(&cluster->cedf_lock); | ||
821 | __clear_priority_inheritance(t); | ||
822 | raw_spin_unlock(&cluster->cedf_lock); | ||
823 | } | ||
824 | |||
825 | |||
826 | |||
827 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
828 | /* called with IRQs off */ | ||
829 | static void set_priority_inheritance_klitirqd(struct task_struct* klitirqd, | ||
830 | struct task_struct* old_owner, | ||
831 | struct task_struct* new_owner) | ||
832 | { | ||
833 | cedf_domain_t* cluster = task_cpu_cluster(klitirqd); | ||
834 | |||
835 | BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); | ||
836 | |||
837 | raw_spin_lock(&cluster->cedf_lock); | ||
838 | |||
839 | if(old_owner != new_owner) | ||
840 | { | ||
841 | if(old_owner) | ||
842 | { | ||
843 | // unreachable? | ||
844 | tsk_rt(old_owner)->cur_klitirqd = NULL; | ||
845 | } | ||
846 | |||
847 | TRACE_TASK(klitirqd, "giving ownership to %s/%d.\n", | ||
848 | new_owner->comm, new_owner->pid); | ||
849 | |||
850 | tsk_rt(new_owner)->cur_klitirqd = klitirqd; | ||
851 | } | ||
852 | |||
853 | __set_priority_inheritance(klitirqd, | ||
854 | (tsk_rt(new_owner)->inh_task == NULL) ? | ||
855 | new_owner : | ||
856 | tsk_rt(new_owner)->inh_task); | ||
857 | |||
858 | raw_spin_unlock(&cluster->cedf_lock); | ||
859 | } | ||
860 | |||
861 | /* called with IRQs off */ | ||
862 | static void clear_priority_inheritance_klitirqd(struct task_struct* klitirqd, | ||
863 | struct task_struct* old_owner) | ||
864 | { | ||
865 | cedf_domain_t* cluster = task_cpu_cluster(klitirqd); | ||
866 | |||
867 | BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); | ||
868 | |||
869 | raw_spin_lock(&cluster->cedf_lock); | ||
870 | |||
871 | TRACE_TASK(klitirqd, "priority restored\n"); | ||
872 | |||
873 | if(tsk_rt(klitirqd)->scheduled_on != NO_CPU) | ||
874 | { | ||
875 | tsk_rt(klitirqd)->inh_task = NULL; | ||
876 | |||
877 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
878 | * since the priority was effectively lowered. */ | ||
879 | unlink(klitirqd); | ||
880 | cedf_job_arrival(klitirqd); | ||
881 | } | ||
882 | else | ||
883 | { | ||
884 | __set_priority_inheritance(klitirqd, NULL); | ||
885 | } | ||
886 | |||
887 | tsk_rt(old_owner)->cur_klitirqd = NULL; | ||
888 | |||
889 | raw_spin_unlock(&cluster->cedf_lock); | ||
890 | } | ||
891 | #endif // CONFIG_LITMUS_SOFTIRQD | ||
892 | |||
893 | |||
894 | /* ******************** KFMLP support ********************** */ | ||
895 | |||
896 | /* struct for semaphore with priority inheritance */ | ||
897 | struct kfmlp_queue | ||
898 | { | ||
899 | wait_queue_head_t wait; | ||
900 | struct task_struct* owner; | ||
901 | struct task_struct* hp_waiter; | ||
902 | int count; /* number of waiters + holder */ | ||
903 | }; | ||
904 | |||
905 | struct kfmlp_semaphore | ||
906 | { | ||
907 | struct litmus_lock litmus_lock; | ||
908 | |||
909 | spinlock_t lock; | ||
910 | |||
911 | int num_resources; /* aka k */ | ||
912 | struct kfmlp_queue *queues; /* array */ | ||
913 | struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */ | ||
914 | }; | ||
915 | |||
916 | static inline struct kfmlp_semaphore* kfmlp_from_lock(struct litmus_lock* lock) | ||
917 | { | ||
918 | return container_of(lock, struct kfmlp_semaphore, litmus_lock); | ||
919 | } | ||
920 | |||
921 | static inline int kfmlp_get_idx(struct kfmlp_semaphore* sem, | ||
922 | struct kfmlp_queue* queue) | ||
923 | { | ||
924 | return (queue - &sem->queues[0]); | ||
925 | } | ||
926 | |||
927 | static inline struct kfmlp_queue* kfmlp_get_queue(struct kfmlp_semaphore* sem, | ||
928 | struct task_struct* holder) | ||
929 | { | ||
930 | int i; | ||
931 | for(i = 0; i < sem->num_resources; ++i) | ||
932 | if(sem->queues[i].owner == holder) | ||
933 | return(&sem->queues[i]); | ||
934 | return(NULL); | ||
935 | } | ||
936 | |||
937 | /* caller is responsible for locking */ | ||
938 | static struct task_struct* kfmlp_find_hp_waiter(struct kfmlp_queue *kqueue, | ||
939 | struct task_struct *skip) | ||
940 | { | ||
941 | struct list_head *pos; | ||
942 | struct task_struct *queued, *found = NULL; | ||
943 | |||
944 | list_for_each(pos, &kqueue->wait.task_list) { | ||
945 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
946 | task_list)->private; | ||
947 | |||
948 | /* Compare task prios, find high prio task. */ | ||
949 | if (queued != skip && edf_higher_prio(queued, found)) | ||
950 | found = queued; | ||
951 | } | ||
952 | return found; | ||
953 | } | ||
954 | |||
955 | static inline struct kfmlp_queue* kfmlp_find_shortest( | ||
956 | struct kfmlp_semaphore* sem, | ||
957 | struct kfmlp_queue* search_start) | ||
958 | { | ||
959 | // we start our search at search_start instead of at the beginning of the | ||
960 | // queue list to load-balance across all resources. | ||
961 | struct kfmlp_queue* step = search_start; | ||
962 | struct kfmlp_queue* shortest = sem->shortest_queue; | ||
963 | |||
964 | do | ||
965 | { | ||
966 | step = (step+1 != &sem->queues[sem->num_resources]) ? | ||
967 | step+1 : &sem->queues[0]; | ||
968 | if(step->count < shortest->count) | ||
969 | { | ||
970 | shortest = step; | ||
971 | if(step->count == 0) | ||
972 | break; /* can't get any shorter */ | ||
973 | } | ||
974 | }while(step != search_start); | ||
975 | |||
976 | return(shortest); | ||
977 | } | ||
978 | |||
979 | static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem) | ||
980 | { | ||
981 | /* must hold sem->lock */ | ||
982 | |||
983 | struct kfmlp_queue *my_queue = NULL; | ||
984 | struct task_struct *max_hp = NULL; | ||
985 | |||
986 | |||
987 | struct list_head *pos; | ||
988 | struct task_struct *queued; | ||
989 | int i; | ||
990 | |||
991 | for(i = 0; i < sem->num_resources; ++i) | ||
992 | { | ||
993 | if( (sem->queues[i].count > 1) && | ||
994 | ((my_queue == NULL) || | ||
995 | (edf_higher_prio(sem->queues[i].hp_waiter, my_queue->hp_waiter))) ) | ||
996 | { | ||
997 | my_queue = &sem->queues[i]; | ||
998 | } | ||
999 | } | ||
1000 | |||
1001 | if(my_queue) | ||
1002 | { | ||
1003 | cedf_domain_t* cluster; | ||
1004 | |||
1005 | max_hp = my_queue->hp_waiter; | ||
1006 | BUG_ON(!max_hp); | ||
1007 | |||
1008 | TRACE_CUR("queue %d: stealing %s/%d from queue %d\n", | ||
1009 | kfmlp_get_idx(sem, my_queue), | ||
1010 | max_hp->comm, max_hp->pid, | ||
1011 | kfmlp_get_idx(sem, my_queue)); | ||
1012 | |||
1013 | my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, max_hp); | ||
1014 | |||
1015 | /* | ||
1016 | if(my_queue->hp_waiter) | ||
1017 | TRACE_CUR("queue %d: new hp_waiter is %s/%d\n", | ||
1018 | kfmlp_get_idx(sem, my_queue), | ||
1019 | my_queue->hp_waiter->comm, | ||
1020 | my_queue->hp_waiter->pid); | ||
1021 | else | ||
1022 | TRACE_CUR("queue %d: new hp_waiter is %p\n", | ||
1023 | kfmlp_get_idx(sem, my_queue), NULL); | ||
1024 | */ | ||
1025 | |||
1026 | cluster = task_cpu_cluster(max_hp); | ||
1027 | |||
1028 | raw_spin_lock(&cluster->cedf_lock); | ||
1029 | |||
1030 | /* | ||
1031 | if(my_queue->owner) | ||
1032 | TRACE_CUR("queue %d: owner is %s/%d\n", | ||
1033 | kfmlp_get_idx(sem, my_queue), | ||
1034 | my_queue->owner->comm, | ||
1035 | my_queue->owner->pid); | ||
1036 | else | ||
1037 | TRACE_CUR("queue %d: owner is %p\n", | ||
1038 | kfmlp_get_idx(sem, my_queue), | ||
1039 | NULL); | ||
1040 | */ | ||
1041 | |||
1042 | if(tsk_rt(my_queue->owner)->inh_task == max_hp) | ||
1043 | { | ||
1044 | __clear_priority_inheritance(my_queue->owner); | ||
1045 | if(my_queue->hp_waiter != NULL) | ||
1046 | { | ||
1047 | __set_priority_inheritance(my_queue->owner, my_queue->hp_waiter); | ||
1048 | } | ||
1049 | } | ||
1050 | raw_spin_unlock(&cluster->cedf_lock); | ||
1051 | |||
1052 | list_for_each(pos, &my_queue->wait.task_list) | ||
1053 | { | ||
1054 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
1055 | task_list)->private; | ||
1056 | /* Compare task prios, find high prio task. */ | ||
1057 | if (queued == max_hp) | ||
1058 | { | ||
1059 | /* | ||
1060 | TRACE_CUR("queue %d: found entry in wait queue. REMOVING!\n", | ||
1061 | kfmlp_get_idx(sem, my_queue)); | ||
1062 | */ | ||
1063 | __remove_wait_queue(&my_queue->wait, | ||
1064 | list_entry(pos, wait_queue_t, task_list)); | ||
1065 | break; | ||
1066 | } | ||
1067 | } | ||
1068 | --(my_queue->count); | ||
1069 | } | ||
1070 | |||
1071 | return(max_hp); | ||
1072 | } | ||
1073 | |||
1074 | int cedf_kfmlp_lock(struct litmus_lock* l) | ||
1075 | { | ||
1076 | struct task_struct* t = current; | ||
1077 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1078 | struct kfmlp_queue* my_queue; | ||
1079 | wait_queue_t wait; | ||
1080 | unsigned long flags; | ||
1081 | |||
1082 | if (!is_realtime(t)) | ||
1083 | return -EPERM; | ||
1084 | |||
1085 | spin_lock_irqsave(&sem->lock, flags); | ||
1086 | |||
1087 | my_queue = sem->shortest_queue; | ||
1088 | |||
1089 | if (my_queue->owner) { | ||
1090 | /* resource is not free => must suspend and wait */ | ||
1091 | TRACE_CUR("queue %d: Resource is not free => must suspend and wait.\n", | ||
1092 | kfmlp_get_idx(sem, my_queue)); | ||
1093 | |||
1094 | init_waitqueue_entry(&wait, t); | ||
1095 | |||
1096 | /* FIXME: interruptible would be nice some day */ | ||
1097 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
1098 | |||
1099 | __add_wait_queue_tail_exclusive(&my_queue->wait, &wait); | ||
1100 | |||
1101 | /* check if we need to activate priority inheritance */ | ||
1102 | if (edf_higher_prio(t, my_queue->hp_waiter)) | ||
1103 | { | ||
1104 | my_queue->hp_waiter = t; | ||
1105 | if (edf_higher_prio(t, my_queue->owner)) | ||
1106 | { | ||
1107 | set_priority_inheritance(my_queue->owner, my_queue->hp_waiter); | ||
1108 | } | ||
1109 | } | ||
1110 | |||
1111 | ++(my_queue->count); | ||
1112 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); | ||
1113 | |||
1114 | /* release lock before sleeping */ | ||
1115 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1116 | |||
1117 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
1118 | * when we wake up; we are guaranteed to have the lock since | ||
1119 | * there is only one wake up per release (or steal). | ||
1120 | */ | ||
1121 | schedule(); | ||
1122 | |||
1123 | |||
1124 | if(my_queue->owner == t) | ||
1125 | { | ||
1126 | TRACE_CUR("queue %d: acquired through waiting\n", | ||
1127 | kfmlp_get_idx(sem, my_queue)); | ||
1128 | } | ||
1129 | else | ||
1130 | { | ||
1131 | /* this case may happen if our wait entry was stolen | ||
1132 | between queues. record where we went.*/ | ||
1133 | my_queue = kfmlp_get_queue(sem, t); | ||
1134 | BUG_ON(!my_queue); | ||
1135 | TRACE_CUR("queue %d: acquired through stealing\n", | ||
1136 | kfmlp_get_idx(sem, my_queue)); | ||
1137 | } | ||
1138 | } | ||
1139 | else | ||
1140 | { | ||
1141 | TRACE_CUR("queue %d: acquired immediately\n", | ||
1142 | kfmlp_get_idx(sem, my_queue)); | ||
1143 | |||
1144 | my_queue->owner = t; | ||
1145 | |||
1146 | ++(my_queue->count); | ||
1147 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); | ||
1148 | |||
1149 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1150 | } | ||
1151 | |||
1152 | return kfmlp_get_idx(sem, my_queue); | ||
1153 | } | ||
1154 | |||
1155 | int cedf_kfmlp_unlock(struct litmus_lock* l) | ||
1156 | { | ||
1157 | struct task_struct *t = current, *next; | ||
1158 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1159 | struct kfmlp_queue *my_queue; | ||
1160 | unsigned long flags; | ||
1161 | int err = 0; | ||
1162 | |||
1163 | spin_lock_irqsave(&sem->lock, flags); | ||
1164 | |||
1165 | my_queue = kfmlp_get_queue(sem, t); | ||
1166 | |||
1167 | if (!my_queue) { | ||
1168 | err = -EINVAL; | ||
1169 | goto out; | ||
1170 | } | ||
1171 | |||
1172 | /* check if there are jobs waiting for this resource */ | ||
1173 | next = __waitqueue_remove_first(&my_queue->wait); | ||
1174 | if (next) { | ||
1175 | /* | ||
1176 | TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n", | ||
1177 | kfmlp_get_idx(sem, my_queue), | ||
1178 | next->comm, next->pid); | ||
1179 | */ | ||
1180 | /* next becomes the resouce holder */ | ||
1181 | my_queue->owner = next; | ||
1182 | |||
1183 | --(my_queue->count); | ||
1184 | if(my_queue->count < sem->shortest_queue->count) | ||
1185 | { | ||
1186 | sem->shortest_queue = my_queue; | ||
1187 | } | ||
1188 | |||
1189 | TRACE_CUR("queue %d: lock ownership passed to %s/%d\n", | ||
1190 | kfmlp_get_idx(sem, my_queue), next->comm, next->pid); | ||
1191 | |||
1192 | /* determine new hp_waiter if necessary */ | ||
1193 | if (next == my_queue->hp_waiter) { | ||
1194 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
1195 | /* next has the highest priority --- it doesn't need to | ||
1196 | * inherit. However, we need to make sure that the | ||
1197 | * next-highest priority in the queue is reflected in | ||
1198 | * hp_waiter. */ | ||
1199 | my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, next); | ||
1200 | if (my_queue->hp_waiter) | ||
1201 | TRACE_TASK(my_queue->hp_waiter, "queue %d: is new highest-prio waiter\n", kfmlp_get_idx(sem, my_queue)); | ||
1202 | else | ||
1203 | TRACE("queue %d: no further waiters\n", kfmlp_get_idx(sem, my_queue)); | ||
1204 | } else { | ||
1205 | /* Well, if next is not the highest-priority waiter, | ||
1206 | * then it ought to inherit the highest-priority | ||
1207 | * waiter's priority. */ | ||
1208 | set_priority_inheritance(next, my_queue->hp_waiter); | ||
1209 | } | ||
1210 | |||
1211 | /* wake up next */ | ||
1212 | wake_up_process(next); | ||
1213 | } | ||
1214 | else | ||
1215 | { | ||
1216 | TRACE_CUR("queue %d: looking to steal someone...\n", kfmlp_get_idx(sem, my_queue)); | ||
1217 | |||
1218 | next = kfmlp_remove_hp_waiter(sem); /* returns NULL if nothing to steal */ | ||
1219 | |||
1220 | /* | ||
1221 | if(next) | ||
1222 | TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - steal\n", | ||
1223 | kfmlp_get_idx(sem, my_queue), | ||
1224 | next->comm, next->pid); | ||
1225 | */ | ||
1226 | |||
1227 | my_queue->owner = next; | ||
1228 | |||
1229 | if(next) | ||
1230 | { | ||
1231 | TRACE_CUR("queue %d: lock ownership passed to %s/%d (which was stolen)\n", | ||
1232 | kfmlp_get_idx(sem, my_queue), | ||
1233 | next->comm, next->pid); | ||
1234 | |||
1235 | /* wake up next */ | ||
1236 | wake_up_process(next); | ||
1237 | } | ||
1238 | else | ||
1239 | { | ||
1240 | TRACE_CUR("queue %d: no one to steal.\n", kfmlp_get_idx(sem, my_queue)); | ||
1241 | |||
1242 | --(my_queue->count); | ||
1243 | if(my_queue->count < sem->shortest_queue->count) | ||
1244 | { | ||
1245 | sem->shortest_queue = my_queue; | ||
1246 | } | ||
1247 | } | ||
1248 | } | ||
1249 | |||
1250 | /* we lose the benefit of priority inheritance (if any) */ | ||
1251 | if (tsk_rt(t)->inh_task) | ||
1252 | clear_priority_inheritance(t); | ||
1253 | |||
1254 | out: | ||
1255 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1256 | |||
1257 | return err; | ||
1258 | } | ||
1259 | |||
1260 | int cedf_kfmlp_close(struct litmus_lock* l) | ||
1261 | { | ||
1262 | struct task_struct *t = current; | ||
1263 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1264 | struct kfmlp_queue *my_queue; | ||
1265 | unsigned long flags; | ||
1266 | |||
1267 | int owner; | ||
1268 | |||
1269 | spin_lock_irqsave(&sem->lock, flags); | ||
1270 | |||
1271 | my_queue = kfmlp_get_queue(sem, t); | ||
1272 | owner = (my_queue) ? (my_queue->owner == t) : 0; | ||
1273 | |||
1274 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1275 | |||
1276 | if (owner) | ||
1277 | cedf_kfmlp_unlock(l); | ||
1278 | |||
1279 | return 0; | ||
1280 | } | ||
1281 | |||
1282 | void cedf_kfmlp_free(struct litmus_lock* l) | ||
1283 | { | ||
1284 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1285 | kfree(sem->queues); | ||
1286 | kfree(sem); | ||
1287 | } | ||
1288 | |||
1289 | static struct litmus_lock_ops cedf_kfmlp_lock_ops = { | ||
1290 | .close = cedf_kfmlp_close, | ||
1291 | .lock = cedf_kfmlp_lock, | ||
1292 | .unlock = cedf_kfmlp_unlock, | ||
1293 | .deallocate = cedf_kfmlp_free, | ||
1294 | }; | ||
1295 | |||
1296 | static struct litmus_lock* cedf_new_kfmlp(void* __user arg, int* ret_code) | ||
1297 | { | ||
1298 | struct kfmlp_semaphore* sem; | ||
1299 | int num_resources = 0; | ||
1300 | int i; | ||
1301 | |||
1302 | if(!access_ok(VERIFY_READ, arg, sizeof(num_resources))) | ||
1303 | { | ||
1304 | *ret_code = -EINVAL; | ||
1305 | return(NULL); | ||
1306 | } | ||
1307 | if(__copy_from_user(&num_resources, arg, sizeof(num_resources))) | ||
1308 | { | ||
1309 | *ret_code = -EINVAL; | ||
1310 | return(NULL); | ||
1311 | } | ||
1312 | if(num_resources < 1) | ||
1313 | { | ||
1314 | *ret_code = -EINVAL; | ||
1315 | return(NULL); | ||
1316 | } | ||
1317 | |||
1318 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
1319 | if(!sem) | ||
1320 | { | ||
1321 | *ret_code = -ENOMEM; | ||
1322 | return NULL; | ||
1323 | } | ||
1324 | |||
1325 | sem->queues = kmalloc(sizeof(struct kfmlp_queue)*num_resources, GFP_KERNEL); | ||
1326 | if(!sem->queues) | ||
1327 | { | ||
1328 | kfree(sem); | ||
1329 | *ret_code = -ENOMEM; | ||
1330 | return NULL; | ||
1331 | } | ||
1332 | |||
1333 | sem->litmus_lock.ops = &cedf_kfmlp_lock_ops; | ||
1334 | spin_lock_init(&sem->lock); | ||
1335 | sem->num_resources = num_resources; | ||
1336 | |||
1337 | for(i = 0; i < num_resources; ++i) | ||
1338 | { | ||
1339 | sem->queues[i].owner = NULL; | ||
1340 | sem->queues[i].hp_waiter = NULL; | ||
1341 | init_waitqueue_head(&sem->queues[i].wait); | ||
1342 | sem->queues[i].count = 0; | ||
1343 | } | ||
1344 | |||
1345 | sem->shortest_queue = &sem->queues[0]; | ||
1346 | |||
1347 | *ret_code = 0; | ||
1348 | return &sem->litmus_lock; | ||
1349 | } | ||
1350 | |||
1351 | |||
1352 | /* **** lock constructor **** */ | ||
1353 | |||
1354 | static long cedf_allocate_lock(struct litmus_lock **lock, int type, | ||
1355 | void* __user arg) | ||
1356 | { | ||
1357 | int err = -ENXIO; | ||
1358 | |||
1359 | /* C-EDF currently only supports the FMLP for global resources | ||
1360 | WITHIN a given cluster. DO NOT USE CROSS-CLUSTER! */ | ||
1361 | switch (type) { | ||
1362 | case KFMLP_SEM: | ||
1363 | *lock = cedf_new_kfmlp(arg, &err); | ||
1364 | break; | ||
1365 | }; | ||
1366 | |||
1367 | return err; | ||
1368 | } | ||
1369 | |||
1370 | #endif // CONFIG_LITMUS_LOCKING | ||
1371 | |||
1372 | |||
1373 | |||
1374 | |||
1375 | |||
1376 | |||
606 | /* total number of cluster */ | 1377 | /* total number of cluster */ |
607 | static int num_clusters; | 1378 | static int num_clusters; |
608 | /* we do not support cluster of different sizes */ | 1379 | /* we do not support cluster of different sizes */ |
@@ -746,6 +1517,40 @@ static long cedf_activate_plugin(void) | |||
746 | break; | 1517 | break; |
747 | } | 1518 | } |
748 | } | 1519 | } |
1520 | |||
1521 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1522 | { | ||
1523 | /* distribute the daemons evenly across the clusters. */ | ||
1524 | int* affinity = kmalloc(NR_LITMUS_SOFTIRQD * sizeof(int), GFP_ATOMIC); | ||
1525 | int num_daemons_per_cluster = NR_LITMUS_SOFTIRQD / num_clusters; | ||
1526 | int left_over = NR_LITMUS_SOFTIRQD % num_clusters; | ||
1527 | |||
1528 | int daemon = 0; | ||
1529 | for(i = 0; i < num_clusters; ++i) | ||
1530 | { | ||
1531 | int num_on_this_cluster = num_daemons_per_cluster; | ||
1532 | if(left_over) | ||
1533 | { | ||
1534 | ++num_on_this_cluster; | ||
1535 | --left_over; | ||
1536 | } | ||
1537 | |||
1538 | for(j = 0; j < num_on_this_cluster; ++j) | ||
1539 | { | ||
1540 | // first CPU of this cluster | ||
1541 | affinity[daemon++] = i*cluster_size; | ||
1542 | } | ||
1543 | } | ||
1544 | |||
1545 | spawn_klitirqd(affinity); | ||
1546 | |||
1547 | kfree(affinity); | ||
1548 | } | ||
1549 | #endif | ||
1550 | |||
1551 | #ifdef CONFIG_LITMUS_NVIDIA | ||
1552 | init_nvidia_info(); | ||
1553 | #endif | ||
749 | 1554 | ||
750 | free_cpumask_var(mask); | 1555 | free_cpumask_var(mask); |
751 | clusters_allocated = 1; | 1556 | clusters_allocated = 1; |
@@ -765,6 +1570,15 @@ static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { | |||
765 | .task_block = cedf_task_block, | 1570 | .task_block = cedf_task_block, |
766 | .admit_task = cedf_admit_task, | 1571 | .admit_task = cedf_admit_task, |
767 | .activate_plugin = cedf_activate_plugin, | 1572 | .activate_plugin = cedf_activate_plugin, |
1573 | #ifdef CONFIG_LITMUS_LOCKING | ||
1574 | .allocate_lock = cedf_allocate_lock, | ||
1575 | .set_prio_inh = set_priority_inheritance, | ||
1576 | .clear_prio_inh = clear_priority_inheritance, | ||
1577 | #endif | ||
1578 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1579 | .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd, | ||
1580 | .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd, | ||
1581 | #endif | ||
768 | }; | 1582 | }; |
769 | 1583 | ||
770 | static struct proc_dir_entry *cluster_file = NULL, *cedf_dir = NULL; | 1584 | static struct proc_dir_entry *cluster_file = NULL, *cedf_dir = NULL; |
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 3092797480f8..d04e0703c154 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -12,6 +12,8 @@ | |||
12 | #include <linux/percpu.h> | 12 | #include <linux/percpu.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/uaccess.h> | ||
16 | |||
15 | 17 | ||
16 | #include <litmus/litmus.h> | 18 | #include <litmus/litmus.h> |
17 | #include <litmus/jobs.h> | 19 | #include <litmus/jobs.h> |
@@ -25,6 +27,19 @@ | |||
25 | 27 | ||
26 | #include <linux/module.h> | 28 | #include <linux/module.h> |
27 | 29 | ||
30 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
31 | #include <litmus/affinity.h> | ||
32 | #endif | ||
33 | |||
34 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
35 | #include <litmus/litmus_softirq.h> | ||
36 | #endif | ||
37 | |||
38 | #ifdef CONFIG_LITMUS_NVIDIA | ||
39 | #include <litmus/nvidia_info.h> | ||
40 | #endif | ||
41 | |||
42 | |||
28 | /* Overview of GSN-EDF operations. | 43 | /* Overview of GSN-EDF operations. |
29 | * | 44 | * |
30 | * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This | 45 | * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This |
@@ -253,21 +268,52 @@ static noinline void requeue(struct task_struct* task) | |||
253 | } | 268 | } |
254 | } | 269 | } |
255 | 270 | ||
271 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
272 | static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t* start) | ||
273 | { | ||
274 | cpu_entry_t* affinity; | ||
275 | |||
276 | get_nearest_available_cpu(affinity, start, gsnedf_cpu_entries, | ||
277 | #ifdef CONFIG_RELEASE_MASTER | ||
278 | gsnedf.release_master | ||
279 | #else | ||
280 | -1 | ||
281 | #endif | ||
282 | ); | ||
283 | |||
284 | return(affinity); | ||
285 | } | ||
286 | #endif | ||
287 | |||
256 | /* check for any necessary preemptions */ | 288 | /* check for any necessary preemptions */ |
257 | static void check_for_preemptions(void) | 289 | static void check_for_preemptions(void) |
258 | { | 290 | { |
259 | struct task_struct *task; | 291 | struct task_struct *task; |
260 | cpu_entry_t* last; | 292 | cpu_entry_t *last; |
261 | 293 | ||
262 | for(last = lowest_prio_cpu(); | 294 | for(last = lowest_prio_cpu(); |
263 | edf_preemption_needed(&gsnedf, last->linked); | 295 | edf_preemption_needed(&gsnedf, last->linked); |
264 | last = lowest_prio_cpu()) { | 296 | last = lowest_prio_cpu()) { |
265 | /* preemption necessary */ | 297 | /* preemption necessary */ |
266 | task = __take_ready(&gsnedf); | 298 | task = __take_ready(&gsnedf); |
267 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", | 299 | |
268 | task->pid, last->cpu); | 300 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
301 | { | ||
302 | cpu_entry_t* affinity = gsnedf_get_nearest_available_cpu( | ||
303 | &per_cpu(gsnedf_cpu_entries, task_cpu(task))); | ||
304 | if(affinity) | ||
305 | last = affinity; | ||
306 | else if(last->linked) | ||
307 | requeue(last->linked); | ||
308 | } | ||
309 | #else | ||
269 | if (last->linked) | 310 | if (last->linked) |
270 | requeue(last->linked); | 311 | requeue(last->linked); |
312 | #endif | ||
313 | |||
314 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", | ||
315 | task->pid, last->cpu); | ||
316 | |||
271 | link_task_to_cpu(task, last); | 317 | link_task_to_cpu(task, last); |
272 | preempt(last); | 318 | preempt(last); |
273 | } | 319 | } |
@@ -277,7 +323,7 @@ static void check_for_preemptions(void) | |||
277 | static noinline void gsnedf_job_arrival(struct task_struct* task) | 323 | static noinline void gsnedf_job_arrival(struct task_struct* task) |
278 | { | 324 | { |
279 | BUG_ON(!task); | 325 | BUG_ON(!task); |
280 | 326 | ||
281 | requeue(task); | 327 | requeue(task); |
282 | check_for_preemptions(); | 328 | check_for_preemptions(); |
283 | } | 329 | } |
@@ -298,9 +344,13 @@ static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
298 | static noinline void job_completion(struct task_struct *t, int forced) | 344 | static noinline void job_completion(struct task_struct *t, int forced) |
299 | { | 345 | { |
300 | BUG_ON(!t); | 346 | BUG_ON(!t); |
301 | 347 | ||
302 | sched_trace_task_completion(t, forced); | 348 | sched_trace_task_completion(t, forced); |
303 | 349 | ||
350 | #ifdef CONFIG_LITMUS_NVIDIA | ||
351 | atomic_set(&tsk_rt(t)->nv_int_count, 0); | ||
352 | #endif | ||
353 | |||
304 | TRACE_TASK(t, "job_completion().\n"); | 354 | TRACE_TASK(t, "job_completion().\n"); |
305 | 355 | ||
306 | /* set flags */ | 356 | /* set flags */ |
@@ -401,17 +451,19 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
401 | TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); | 451 | TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); |
402 | #endif | 452 | #endif |
403 | 453 | ||
454 | /* | ||
404 | if (exists) | 455 | if (exists) |
405 | TRACE_TASK(prev, | 456 | TRACE_TASK(prev, |
406 | "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " | 457 | "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " |
407 | "state:%d sig:%d\n", | 458 | "state:%d sig:%d\n", |
408 | blocks, out_of_time, np, sleep, preempt, | 459 | blocks, out_of_time, np, sleep, preempt, |
409 | prev->state, signal_pending(prev)); | 460 | prev->state, signal_pending(prev)); |
461 | */ | ||
462 | |||
410 | if (entry->linked && preempt) | 463 | if (entry->linked && preempt) |
411 | TRACE_TASK(prev, "will be preempted by %s/%d\n", | 464 | TRACE_TASK(prev, "will be preempted by %s/%d\n", |
412 | entry->linked->comm, entry->linked->pid); | 465 | entry->linked->comm, entry->linked->pid); |
413 | 466 | ||
414 | |||
415 | /* If a task blocks we have no choice but to reschedule. | 467 | /* If a task blocks we have no choice but to reschedule. |
416 | */ | 468 | */ |
417 | if (blocks) | 469 | if (blocks) |
@@ -456,12 +508,15 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
456 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | 508 | entry->scheduled->rt_param.scheduled_on = NO_CPU; |
457 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | 509 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); |
458 | } | 510 | } |
459 | } else | 511 | } |
512 | else | ||
513 | { | ||
460 | /* Only override Linux scheduler if we have a real-time task | 514 | /* Only override Linux scheduler if we have a real-time task |
461 | * scheduled that needs to continue. | 515 | * scheduled that needs to continue. |
462 | */ | 516 | */ |
463 | if (exists) | 517 | if (exists) |
464 | next = prev; | 518 | next = prev; |
519 | } | ||
465 | 520 | ||
466 | sched_state_task_picked(); | 521 | sched_state_task_picked(); |
467 | 522 | ||
@@ -486,8 +541,9 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
486 | static void gsnedf_finish_switch(struct task_struct *prev) | 541 | static void gsnedf_finish_switch(struct task_struct *prev) |
487 | { | 542 | { |
488 | cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); | 543 | cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); |
489 | 544 | ||
490 | entry->scheduled = is_realtime(current) ? current : NULL; | 545 | entry->scheduled = is_realtime(current) ? current : NULL; |
546 | |||
491 | #ifdef WANT_ALL_SCHED_EVENTS | 547 | #ifdef WANT_ALL_SCHED_EVENTS |
492 | TRACE_TASK(prev, "switched away from\n"); | 548 | TRACE_TASK(prev, "switched away from\n"); |
493 | #endif | 549 | #endif |
@@ -536,11 +592,14 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) | |||
536 | static void gsnedf_task_wake_up(struct task_struct *task) | 592 | static void gsnedf_task_wake_up(struct task_struct *task) |
537 | { | 593 | { |
538 | unsigned long flags; | 594 | unsigned long flags; |
539 | lt_t now; | 595 | lt_t now; |
540 | 596 | ||
541 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | 597 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); |
542 | 598 | ||
543 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | 599 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
600 | |||
601 | |||
602 | #if 0 // sporadic task model | ||
544 | /* We need to take suspensions because of semaphores into | 603 | /* We need to take suspensions because of semaphores into |
545 | * account! If a job resumes after being suspended due to acquiring | 604 | * account! If a job resumes after being suspended due to acquiring |
546 | * a semaphore, it should never be treated as a new job release. | 605 | * a semaphore, it should never be treated as a new job release. |
@@ -562,19 +621,26 @@ static void gsnedf_task_wake_up(struct task_struct *task) | |||
562 | } | 621 | } |
563 | } | 622 | } |
564 | } | 623 | } |
624 | #else // periodic task model | ||
625 | set_rt_flags(task, RT_F_RUNNING); | ||
626 | #endif | ||
627 | |||
565 | gsnedf_job_arrival(task); | 628 | gsnedf_job_arrival(task); |
566 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | 629 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
567 | } | 630 | } |
568 | 631 | ||
569 | static void gsnedf_task_block(struct task_struct *t) | 632 | static void gsnedf_task_block(struct task_struct *t) |
570 | { | 633 | { |
634 | // TODO: is this called on preemption?? | ||
571 | unsigned long flags; | 635 | unsigned long flags; |
572 | 636 | ||
573 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); | 637 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); |
574 | 638 | ||
575 | /* unlink if necessary */ | 639 | /* unlink if necessary */ |
576 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | 640 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
641 | |||
577 | unlink(t); | 642 | unlink(t); |
643 | |||
578 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | 644 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
579 | 645 | ||
580 | BUG_ON(!is_realtime(t)); | 646 | BUG_ON(!is_realtime(t)); |
@@ -608,51 +674,53 @@ static long gsnedf_admit_task(struct task_struct* tsk) | |||
608 | 674 | ||
609 | #include <litmus/fdso.h> | 675 | #include <litmus/fdso.h> |
610 | 676 | ||
611 | /* called with IRQs off */ | 677 | |
612 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | 678 | static void __set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) |
613 | { | 679 | { |
614 | int linked_on; | 680 | int linked_on; |
615 | int check_preempt = 0; | 681 | int check_preempt = 0; |
616 | 682 | ||
617 | raw_spin_lock(&gsnedf_lock); | 683 | if(prio_inh != NULL) |
618 | 684 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | |
619 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | 685 | else |
686 | TRACE_TASK(t, "inherits priority from %p\n", prio_inh); | ||
687 | |||
688 | sched_trace_eff_prio_change(t, prio_inh); | ||
689 | |||
620 | tsk_rt(t)->inh_task = prio_inh; | 690 | tsk_rt(t)->inh_task = prio_inh; |
621 | 691 | ||
622 | linked_on = tsk_rt(t)->linked_on; | 692 | linked_on = tsk_rt(t)->linked_on; |
623 | 693 | ||
624 | /* If it is scheduled, then we need to reorder the CPU heap. */ | 694 | /* If it is scheduled, then we need to reorder the CPU heap. */ |
625 | if (linked_on != NO_CPU) { | 695 | if (linked_on != NO_CPU) { |
626 | TRACE_TASK(t, "%s: linked on %d\n", | 696 | TRACE_TASK(t, "%s: linked on %d\n", |
627 | __FUNCTION__, linked_on); | 697 | __FUNCTION__, linked_on); |
628 | /* Holder is scheduled; need to re-order CPUs. | 698 | /* Holder is scheduled; need to re-order CPUs. |
629 | * We can't use heap_decrease() here since | 699 | * We can't use heap_decrease() here since |
630 | * the cpu_heap is ordered in reverse direction, so | 700 | * the cpu_heap is ordered in reverse direction, so |
631 | * it is actually an increase. */ | 701 | * it is actually an increase. */ |
632 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, | 702 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, |
633 | gsnedf_cpus[linked_on]->hn); | 703 | gsnedf_cpus[linked_on]->hn); |
634 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, | 704 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, |
635 | gsnedf_cpus[linked_on]->hn); | 705 | gsnedf_cpus[linked_on]->hn); |
636 | } else { | 706 | } else { |
637 | /* holder may be queued: first stop queue changes */ | 707 | /* holder may be queued: first stop queue changes */ |
638 | raw_spin_lock(&gsnedf.release_lock); | 708 | raw_spin_lock(&gsnedf.release_lock); |
639 | if (is_queued(t)) { | 709 | if (is_queued(t)) { |
640 | TRACE_TASK(t, "%s: is queued\n", | 710 | TRACE_TASK(t, "%s: is queued\n", __FUNCTION__); |
641 | __FUNCTION__); | 711 | |
642 | /* We need to update the position of holder in some | 712 | /* We need to update the position of holder in some |
643 | * heap. Note that this could be a release heap if we | 713 | * heap. Note that this could be a release heap if we |
644 | * budget enforcement is used and this job overran. */ | 714 | * budget enforcement is used and this job overran. */ |
645 | check_preempt = | 715 | check_preempt = !bheap_decrease(edf_ready_order, tsk_rt(t)->heap_node); |
646 | !bheap_decrease(edf_ready_order, | 716 | |
647 | tsk_rt(t)->heap_node); | ||
648 | } else { | 717 | } else { |
649 | /* Nothing to do: if it is not queued and not linked | 718 | /* Nothing to do: if it is not queued and not linked |
650 | * then it is either sleeping or currently being moved | 719 | * then it is either sleeping or currently being moved |
651 | * by other code (e.g., a timer interrupt handler) that | 720 | * by other code (e.g., a timer interrupt handler) that |
652 | * will use the correct priority when enqueuing the | 721 | * will use the correct priority when enqueuing the |
653 | * task. */ | 722 | * task. */ |
654 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", | 723 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", __FUNCTION__); |
655 | __FUNCTION__); | ||
656 | } | 724 | } |
657 | raw_spin_unlock(&gsnedf.release_lock); | 725 | raw_spin_unlock(&gsnedf.release_lock); |
658 | 726 | ||
@@ -666,34 +734,148 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct* | |||
666 | /* heap_decrease() hit the top level of the heap: make | 734 | /* heap_decrease() hit the top level of the heap: make |
667 | * sure preemption checks get the right task, not the | 735 | * sure preemption checks get the right task, not the |
668 | * potentially stale cache. */ | 736 | * potentially stale cache. */ |
669 | bheap_uncache_min(edf_ready_order, | 737 | bheap_uncache_min(edf_ready_order, &gsnedf.ready_queue); |
670 | &gsnedf.ready_queue); | ||
671 | check_for_preemptions(); | 738 | check_for_preemptions(); |
672 | } | 739 | } |
673 | } | 740 | } |
741 | } | ||
674 | 742 | ||
743 | /* called with IRQs off */ | ||
744 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
745 | { | ||
746 | raw_spin_lock(&gsnedf_lock); | ||
747 | |||
748 | __set_priority_inheritance(t, prio_inh); | ||
749 | |||
750 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
751 | if(tsk_rt(t)->cur_klitirqd != NULL) | ||
752 | { | ||
753 | TRACE_TASK(t, "%s/%d inherits a new priority!\n", | ||
754 | tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); | ||
755 | |||
756 | __set_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh); | ||
757 | } | ||
758 | #endif | ||
759 | |||
675 | raw_spin_unlock(&gsnedf_lock); | 760 | raw_spin_unlock(&gsnedf_lock); |
676 | } | 761 | } |
677 | 762 | ||
763 | |||
764 | /* called with IRQs off */ | ||
765 | static void __clear_priority_inheritance(struct task_struct* t) | ||
766 | { | ||
767 | TRACE_TASK(t, "priority restored\n"); | ||
768 | |||
769 | if(tsk_rt(t)->scheduled_on != NO_CPU) | ||
770 | { | ||
771 | sched_trace_eff_prio_change(t, NULL); | ||
772 | |||
773 | tsk_rt(t)->inh_task = NULL; | ||
774 | |||
775 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
776 | * since the priority was effectively lowered. */ | ||
777 | unlink(t); | ||
778 | gsnedf_job_arrival(t); | ||
779 | } | ||
780 | else | ||
781 | { | ||
782 | __set_priority_inheritance(t, NULL); | ||
783 | } | ||
784 | |||
785 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
786 | if(tsk_rt(t)->cur_klitirqd != NULL) | ||
787 | { | ||
788 | TRACE_TASK(t, "%s/%d inheritance set back to owner.\n", | ||
789 | tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); | ||
790 | |||
791 | if(tsk_rt(tsk_rt(t)->cur_klitirqd)->scheduled_on != NO_CPU) | ||
792 | { | ||
793 | sched_trace_eff_prio_change(tsk_rt(t)->cur_klitirqd, t); | ||
794 | |||
795 | tsk_rt(tsk_rt(t)->cur_klitirqd)->inh_task = t; | ||
796 | |||
797 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
798 | * since the priority was effectively lowered. */ | ||
799 | unlink(tsk_rt(t)->cur_klitirqd); | ||
800 | gsnedf_job_arrival(tsk_rt(t)->cur_klitirqd); | ||
801 | } | ||
802 | else | ||
803 | { | ||
804 | __set_priority_inheritance(tsk_rt(t)->cur_klitirqd, t); | ||
805 | } | ||
806 | } | ||
807 | #endif | ||
808 | } | ||
809 | |||
678 | /* called with IRQs off */ | 810 | /* called with IRQs off */ |
679 | static void clear_priority_inheritance(struct task_struct* t) | 811 | static void clear_priority_inheritance(struct task_struct* t) |
680 | { | 812 | { |
681 | raw_spin_lock(&gsnedf_lock); | 813 | raw_spin_lock(&gsnedf_lock); |
814 | __clear_priority_inheritance(t); | ||
815 | raw_spin_unlock(&gsnedf_lock); | ||
816 | } | ||
682 | 817 | ||
683 | /* A job only stops inheriting a priority when it releases a | 818 | #ifdef CONFIG_LITMUS_SOFTIRQD |
684 | * resource. Thus we can make the following assumption.*/ | 819 | /* called with IRQs off */ |
685 | BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); | 820 | static void set_priority_inheritance_klitirqd(struct task_struct* klitirqd, |
686 | 821 | struct task_struct* old_owner, | |
687 | TRACE_TASK(t, "priority restored\n"); | 822 | struct task_struct* new_owner) |
688 | tsk_rt(t)->inh_task = NULL; | 823 | { |
824 | BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); | ||
825 | |||
826 | raw_spin_lock(&gsnedf_lock); | ||
827 | |||
828 | if(old_owner != new_owner) | ||
829 | { | ||
830 | if(old_owner) | ||
831 | { | ||
832 | // unreachable? | ||
833 | tsk_rt(old_owner)->cur_klitirqd = NULL; | ||
834 | } | ||
835 | |||
836 | TRACE_TASK(klitirqd, "giving ownership to %s/%d.\n", | ||
837 | new_owner->comm, new_owner->pid); | ||
689 | 838 | ||
690 | /* Check if rescheduling is necessary. We can't use heap_decrease() | 839 | tsk_rt(new_owner)->cur_klitirqd = klitirqd; |
691 | * since the priority was effectively lowered. */ | 840 | } |
692 | unlink(t); | 841 | |
693 | gsnedf_job_arrival(t); | 842 | __set_priority_inheritance(klitirqd, |
843 | (tsk_rt(new_owner)->inh_task == NULL) ? | ||
844 | new_owner : | ||
845 | tsk_rt(new_owner)->inh_task); | ||
846 | |||
847 | raw_spin_unlock(&gsnedf_lock); | ||
848 | } | ||
694 | 849 | ||
850 | /* called with IRQs off */ | ||
851 | static void clear_priority_inheritance_klitirqd(struct task_struct* klitirqd, | ||
852 | struct task_struct* old_owner) | ||
853 | { | ||
854 | BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); | ||
855 | |||
856 | raw_spin_lock(&gsnedf_lock); | ||
857 | |||
858 | TRACE_TASK(klitirqd, "priority restored\n"); | ||
859 | |||
860 | if(tsk_rt(klitirqd)->scheduled_on != NO_CPU) | ||
861 | { | ||
862 | tsk_rt(klitirqd)->inh_task = NULL; | ||
863 | |||
864 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
865 | * since the priority was effectively lowered. */ | ||
866 | unlink(klitirqd); | ||
867 | gsnedf_job_arrival(klitirqd); | ||
868 | } | ||
869 | else | ||
870 | { | ||
871 | __set_priority_inheritance(klitirqd, NULL); | ||
872 | } | ||
873 | |||
874 | tsk_rt(old_owner)->cur_klitirqd = NULL; | ||
875 | |||
695 | raw_spin_unlock(&gsnedf_lock); | 876 | raw_spin_unlock(&gsnedf_lock); |
696 | } | 877 | } |
878 | #endif | ||
697 | 879 | ||
698 | 880 | ||
699 | /* ******************** FMLP support ********************** */ | 881 | /* ******************** FMLP support ********************** */ |
@@ -892,11 +1074,477 @@ static struct litmus_lock* gsnedf_new_fmlp(void) | |||
892 | return &sem->litmus_lock; | 1074 | return &sem->litmus_lock; |
893 | } | 1075 | } |
894 | 1076 | ||
1077 | |||
1078 | |||
1079 | |||
1080 | |||
1081 | |||
1082 | |||
1083 | /* ******************** KFMLP support ********************** */ | ||
1084 | |||
1085 | /* struct for semaphore with priority inheritance */ | ||
1086 | struct kfmlp_queue | ||
1087 | { | ||
1088 | wait_queue_head_t wait; | ||
1089 | struct task_struct* owner; | ||
1090 | struct task_struct* hp_waiter; | ||
1091 | int count; /* number of waiters + holder */ | ||
1092 | }; | ||
1093 | |||
1094 | struct kfmlp_semaphore | ||
1095 | { | ||
1096 | struct litmus_lock litmus_lock; | ||
1097 | |||
1098 | spinlock_t lock; | ||
1099 | |||
1100 | int num_resources; /* aka k */ | ||
1101 | |||
1102 | struct kfmlp_queue *queues; /* array */ | ||
1103 | struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */ | ||
1104 | }; | ||
1105 | |||
1106 | static inline struct kfmlp_semaphore* kfmlp_from_lock(struct litmus_lock* lock) | ||
1107 | { | ||
1108 | return container_of(lock, struct kfmlp_semaphore, litmus_lock); | ||
1109 | } | ||
1110 | |||
1111 | static inline int kfmlp_get_idx(struct kfmlp_semaphore* sem, | ||
1112 | struct kfmlp_queue* queue) | ||
1113 | { | ||
1114 | return (queue - &sem->queues[0]); | ||
1115 | } | ||
1116 | |||
1117 | static inline struct kfmlp_queue* kfmlp_get_queue(struct kfmlp_semaphore* sem, | ||
1118 | struct task_struct* holder) | ||
1119 | { | ||
1120 | int i; | ||
1121 | for(i = 0; i < sem->num_resources; ++i) | ||
1122 | if(sem->queues[i].owner == holder) | ||
1123 | return(&sem->queues[i]); | ||
1124 | return(NULL); | ||
1125 | } | ||
1126 | |||
1127 | /* caller is responsible for locking */ | ||
1128 | static struct task_struct* kfmlp_find_hp_waiter(struct kfmlp_queue *kqueue, | ||
1129 | struct task_struct *skip) | ||
1130 | { | ||
1131 | struct list_head *pos; | ||
1132 | struct task_struct *queued, *found = NULL; | ||
1133 | |||
1134 | list_for_each(pos, &kqueue->wait.task_list) { | ||
1135 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
1136 | task_list)->private; | ||
1137 | |||
1138 | /* Compare task prios, find high prio task. */ | ||
1139 | if (queued != skip && edf_higher_prio(queued, found)) | ||
1140 | found = queued; | ||
1141 | } | ||
1142 | return found; | ||
1143 | } | ||
1144 | |||
1145 | static inline struct kfmlp_queue* kfmlp_find_shortest( | ||
1146 | struct kfmlp_semaphore* sem, | ||
1147 | struct kfmlp_queue* search_start) | ||
1148 | { | ||
1149 | // we start our search at search_start instead of at the beginning of the | ||
1150 | // queue list to load-balance across all resources. | ||
1151 | struct kfmlp_queue* step = search_start; | ||
1152 | struct kfmlp_queue* shortest = sem->shortest_queue; | ||
1153 | |||
1154 | do | ||
1155 | { | ||
1156 | step = (step+1 != &sem->queues[sem->num_resources]) ? | ||
1157 | step+1 : &sem->queues[0]; | ||
1158 | if(step->count < shortest->count) | ||
1159 | { | ||
1160 | shortest = step; | ||
1161 | if(step->count == 0) | ||
1162 | break; /* can't get any shorter */ | ||
1163 | } | ||
1164 | }while(step != search_start); | ||
1165 | |||
1166 | return(shortest); | ||
1167 | } | ||
1168 | |||
1169 | static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem) | ||
1170 | { | ||
1171 | /* must hold sem->lock */ | ||
1172 | |||
1173 | struct kfmlp_queue *my_queue = NULL; | ||
1174 | struct task_struct *max_hp = NULL; | ||
1175 | |||
1176 | |||
1177 | struct list_head *pos; | ||
1178 | struct task_struct *queued; | ||
1179 | int i; | ||
1180 | |||
1181 | for(i = 0; i < sem->num_resources; ++i) | ||
1182 | { | ||
1183 | if( (sem->queues[i].count > 1) && | ||
1184 | ((my_queue == NULL) || | ||
1185 | (edf_higher_prio(sem->queues[i].hp_waiter, my_queue->hp_waiter))) ) | ||
1186 | { | ||
1187 | my_queue = &sem->queues[i]; | ||
1188 | } | ||
1189 | } | ||
1190 | |||
1191 | if(my_queue) | ||
1192 | { | ||
1193 | max_hp = my_queue->hp_waiter; | ||
1194 | |||
1195 | BUG_ON(!max_hp); | ||
1196 | |||
1197 | TRACE_CUR("queue %d: stealing %s/%d from queue %d\n", | ||
1198 | kfmlp_get_idx(sem, my_queue), | ||
1199 | max_hp->comm, max_hp->pid, | ||
1200 | kfmlp_get_idx(sem, my_queue)); | ||
1201 | |||
1202 | my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, max_hp); | ||
1203 | |||
1204 | /* | ||
1205 | if(my_queue->hp_waiter) | ||
1206 | TRACE_CUR("queue %d: new hp_waiter is %s/%d\n", | ||
1207 | kfmlp_get_idx(sem, my_queue), | ||
1208 | my_queue->hp_waiter->comm, | ||
1209 | my_queue->hp_waiter->pid); | ||
1210 | else | ||
1211 | TRACE_CUR("queue %d: new hp_waiter is %p\n", | ||
1212 | kfmlp_get_idx(sem, my_queue), NULL); | ||
1213 | */ | ||
1214 | |||
1215 | raw_spin_lock(&gsnedf_lock); | ||
1216 | |||
1217 | /* | ||
1218 | if(my_queue->owner) | ||
1219 | TRACE_CUR("queue %d: owner is %s/%d\n", | ||
1220 | kfmlp_get_idx(sem, my_queue), | ||
1221 | my_queue->owner->comm, | ||
1222 | my_queue->owner->pid); | ||
1223 | else | ||
1224 | TRACE_CUR("queue %d: owner is %p\n", | ||
1225 | kfmlp_get_idx(sem, my_queue), | ||
1226 | NULL); | ||
1227 | */ | ||
1228 | |||
1229 | if(tsk_rt(my_queue->owner)->inh_task == max_hp) | ||
1230 | { | ||
1231 | __clear_priority_inheritance(my_queue->owner); | ||
1232 | if(my_queue->hp_waiter != NULL) | ||
1233 | { | ||
1234 | __set_priority_inheritance(my_queue->owner, my_queue->hp_waiter); | ||
1235 | } | ||
1236 | } | ||
1237 | raw_spin_unlock(&gsnedf_lock); | ||
1238 | |||
1239 | list_for_each(pos, &my_queue->wait.task_list) | ||
1240 | { | ||
1241 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
1242 | task_list)->private; | ||
1243 | /* Compare task prios, find high prio task. */ | ||
1244 | if (queued == max_hp) | ||
1245 | { | ||
1246 | /* | ||
1247 | TRACE_CUR("queue %d: found entry in wait queue. REMOVING!\n", | ||
1248 | kfmlp_get_idx(sem, my_queue)); | ||
1249 | */ | ||
1250 | __remove_wait_queue(&my_queue->wait, | ||
1251 | list_entry(pos, wait_queue_t, task_list)); | ||
1252 | break; | ||
1253 | } | ||
1254 | } | ||
1255 | --(my_queue->count); | ||
1256 | } | ||
1257 | |||
1258 | return(max_hp); | ||
1259 | } | ||
1260 | |||
1261 | int gsnedf_kfmlp_lock(struct litmus_lock* l) | ||
1262 | { | ||
1263 | struct task_struct* t = current; | ||
1264 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1265 | struct kfmlp_queue* my_queue; | ||
1266 | wait_queue_t wait; | ||
1267 | unsigned long flags; | ||
1268 | |||
1269 | if (!is_realtime(t)) | ||
1270 | return -EPERM; | ||
1271 | |||
1272 | spin_lock_irqsave(&sem->lock, flags); | ||
1273 | |||
1274 | my_queue = sem->shortest_queue; | ||
1275 | |||
1276 | if (my_queue->owner) { | ||
1277 | /* resource is not free => must suspend and wait */ | ||
1278 | TRACE_CUR("queue %d: Resource is not free => must suspend and wait.\n", | ||
1279 | kfmlp_get_idx(sem, my_queue)); | ||
1280 | |||
1281 | init_waitqueue_entry(&wait, t); | ||
1282 | |||
1283 | /* FIXME: interruptible would be nice some day */ | ||
1284 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
1285 | |||
1286 | __add_wait_queue_tail_exclusive(&my_queue->wait, &wait); | ||
1287 | |||
1288 | /* check if we need to activate priority inheritance */ | ||
1289 | if (edf_higher_prio(t, my_queue->hp_waiter)) | ||
1290 | { | ||
1291 | my_queue->hp_waiter = t; | ||
1292 | if (edf_higher_prio(t, my_queue->owner)) | ||
1293 | { | ||
1294 | set_priority_inheritance(my_queue->owner, my_queue->hp_waiter); | ||
1295 | } | ||
1296 | } | ||
1297 | |||
1298 | ++(my_queue->count); | ||
1299 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); | ||
1300 | |||
1301 | /* release lock before sleeping */ | ||
1302 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1303 | |||
1304 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
1305 | * when we wake up; we are guaranteed to have the lock since | ||
1306 | * there is only one wake up per release (or steal). | ||
1307 | */ | ||
1308 | schedule(); | ||
1309 | |||
1310 | |||
1311 | if(my_queue->owner == t) | ||
1312 | { | ||
1313 | TRACE_CUR("queue %d: acquired through waiting\n", | ||
1314 | kfmlp_get_idx(sem, my_queue)); | ||
1315 | } | ||
1316 | else | ||
1317 | { | ||
1318 | /* this case may happen if our wait entry was stolen | ||
1319 | between queues. record where we went. */ | ||
1320 | my_queue = kfmlp_get_queue(sem, t); | ||
1321 | |||
1322 | BUG_ON(!my_queue); | ||
1323 | TRACE_CUR("queue %d: acquired through stealing\n", | ||
1324 | kfmlp_get_idx(sem, my_queue)); | ||
1325 | } | ||
1326 | } | ||
1327 | else | ||
1328 | { | ||
1329 | TRACE_CUR("queue %d: acquired immediately\n", | ||
1330 | kfmlp_get_idx(sem, my_queue)); | ||
1331 | |||
1332 | my_queue->owner = t; | ||
1333 | |||
1334 | ++(my_queue->count); | ||
1335 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); | ||
1336 | |||
1337 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1338 | } | ||
1339 | |||
1340 | return kfmlp_get_idx(sem, my_queue); | ||
1341 | } | ||
1342 | |||
1343 | int gsnedf_kfmlp_unlock(struct litmus_lock* l) | ||
1344 | { | ||
1345 | struct task_struct *t = current, *next; | ||
1346 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1347 | struct kfmlp_queue *my_queue; | ||
1348 | unsigned long flags; | ||
1349 | int err = 0; | ||
1350 | |||
1351 | spin_lock_irqsave(&sem->lock, flags); | ||
1352 | |||
1353 | my_queue = kfmlp_get_queue(sem, t); | ||
1354 | |||
1355 | if (!my_queue) { | ||
1356 | err = -EINVAL; | ||
1357 | goto out; | ||
1358 | } | ||
1359 | |||
1360 | /* check if there are jobs waiting for this resource */ | ||
1361 | next = __waitqueue_remove_first(&my_queue->wait); | ||
1362 | if (next) { | ||
1363 | /* | ||
1364 | TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n", | ||
1365 | kfmlp_get_idx(sem, my_queue), | ||
1366 | next->comm, next->pid); | ||
1367 | */ | ||
1368 | /* next becomes the resouce holder */ | ||
1369 | my_queue->owner = next; | ||
1370 | |||
1371 | --(my_queue->count); | ||
1372 | if(my_queue->count < sem->shortest_queue->count) | ||
1373 | { | ||
1374 | sem->shortest_queue = my_queue; | ||
1375 | } | ||
1376 | |||
1377 | TRACE_CUR("queue %d: lock ownership passed to %s/%d\n", | ||
1378 | kfmlp_get_idx(sem, my_queue), next->comm, next->pid); | ||
1379 | |||
1380 | /* determine new hp_waiter if necessary */ | ||
1381 | if (next == my_queue->hp_waiter) { | ||
1382 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
1383 | /* next has the highest priority --- it doesn't need to | ||
1384 | * inherit. However, we need to make sure that the | ||
1385 | * next-highest priority in the queue is reflected in | ||
1386 | * hp_waiter. */ | ||
1387 | my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, next); | ||
1388 | if (my_queue->hp_waiter) | ||
1389 | TRACE_TASK(my_queue->hp_waiter, "queue %d: is new highest-prio waiter\n", kfmlp_get_idx(sem, my_queue)); | ||
1390 | else | ||
1391 | TRACE("queue %d: no further waiters\n", kfmlp_get_idx(sem, my_queue)); | ||
1392 | } else { | ||
1393 | /* Well, if next is not the highest-priority waiter, | ||
1394 | * then it ought to inherit the highest-priority | ||
1395 | * waiter's priority. */ | ||
1396 | set_priority_inheritance(next, my_queue->hp_waiter); | ||
1397 | } | ||
1398 | |||
1399 | /* wake up next */ | ||
1400 | wake_up_process(next); | ||
1401 | } | ||
1402 | else | ||
1403 | { | ||
1404 | TRACE_CUR("queue %d: looking to steal someone...\n", kfmlp_get_idx(sem, my_queue)); | ||
1405 | |||
1406 | next = kfmlp_remove_hp_waiter(sem); /* returns NULL if nothing to steal */ | ||
1407 | |||
1408 | /* | ||
1409 | if(next) | ||
1410 | TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - steal\n", | ||
1411 | kfmlp_get_idx(sem, my_queue), | ||
1412 | next->comm, next->pid); | ||
1413 | */ | ||
1414 | |||
1415 | my_queue->owner = next; | ||
1416 | |||
1417 | if(next) | ||
1418 | { | ||
1419 | TRACE_CUR("queue %d: lock ownership passed to %s/%d (which was stolen)\n", | ||
1420 | kfmlp_get_idx(sem, my_queue), | ||
1421 | next->comm, next->pid); | ||
1422 | |||
1423 | /* wake up next */ | ||
1424 | wake_up_process(next); | ||
1425 | } | ||
1426 | else | ||
1427 | { | ||
1428 | TRACE_CUR("queue %d: no one to steal.\n", kfmlp_get_idx(sem, my_queue)); | ||
1429 | |||
1430 | --(my_queue->count); | ||
1431 | if(my_queue->count < sem->shortest_queue->count) | ||
1432 | { | ||
1433 | sem->shortest_queue = my_queue; | ||
1434 | } | ||
1435 | } | ||
1436 | } | ||
1437 | |||
1438 | /* we lose the benefit of priority inheritance (if any) */ | ||
1439 | if (tsk_rt(t)->inh_task) | ||
1440 | clear_priority_inheritance(t); | ||
1441 | |||
1442 | out: | ||
1443 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1444 | |||
1445 | return err; | ||
1446 | } | ||
1447 | |||
1448 | int gsnedf_kfmlp_close(struct litmus_lock* l) | ||
1449 | { | ||
1450 | struct task_struct *t = current; | ||
1451 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1452 | struct kfmlp_queue *my_queue; | ||
1453 | unsigned long flags; | ||
1454 | |||
1455 | int owner; | ||
1456 | |||
1457 | spin_lock_irqsave(&sem->lock, flags); | ||
1458 | |||
1459 | my_queue = kfmlp_get_queue(sem, t); | ||
1460 | owner = (my_queue) ? (my_queue->owner == t) : 0; | ||
1461 | |||
1462 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1463 | |||
1464 | if (owner) | ||
1465 | gsnedf_kfmlp_unlock(l); | ||
1466 | |||
1467 | return 0; | ||
1468 | } | ||
1469 | |||
1470 | void gsnedf_kfmlp_free(struct litmus_lock* l) | ||
1471 | { | ||
1472 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1473 | kfree(sem->queues); | ||
1474 | kfree(sem); | ||
1475 | } | ||
1476 | |||
1477 | static struct litmus_lock_ops gsnedf_kfmlp_lock_ops = { | ||
1478 | .close = gsnedf_kfmlp_close, | ||
1479 | .lock = gsnedf_kfmlp_lock, | ||
1480 | .unlock = gsnedf_kfmlp_unlock, | ||
1481 | .deallocate = gsnedf_kfmlp_free, | ||
1482 | }; | ||
1483 | |||
1484 | static struct litmus_lock* gsnedf_new_kfmlp(void* __user arg, int* ret_code) | ||
1485 | { | ||
1486 | struct kfmlp_semaphore* sem; | ||
1487 | int num_resources = 0; | ||
1488 | int i; | ||
1489 | |||
1490 | if(!access_ok(VERIFY_READ, arg, sizeof(num_resources))) | ||
1491 | { | ||
1492 | *ret_code = -EINVAL; | ||
1493 | return(NULL); | ||
1494 | } | ||
1495 | if(__copy_from_user(&num_resources, arg, sizeof(num_resources))) | ||
1496 | { | ||
1497 | *ret_code = -EINVAL; | ||
1498 | return(NULL); | ||
1499 | } | ||
1500 | if(num_resources < 1) | ||
1501 | { | ||
1502 | *ret_code = -EINVAL; | ||
1503 | return(NULL); | ||
1504 | } | ||
1505 | |||
1506 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
1507 | if(!sem) | ||
1508 | { | ||
1509 | *ret_code = -ENOMEM; | ||
1510 | return NULL; | ||
1511 | } | ||
1512 | |||
1513 | sem->queues = kmalloc(sizeof(struct kfmlp_queue)*num_resources, GFP_KERNEL); | ||
1514 | if(!sem->queues) | ||
1515 | { | ||
1516 | kfree(sem); | ||
1517 | *ret_code = -ENOMEM; | ||
1518 | return NULL; | ||
1519 | } | ||
1520 | |||
1521 | sem->litmus_lock.ops = &gsnedf_kfmlp_lock_ops; | ||
1522 | spin_lock_init(&sem->lock); | ||
1523 | sem->num_resources = num_resources; | ||
1524 | |||
1525 | for(i = 0; i < num_resources; ++i) | ||
1526 | { | ||
1527 | sem->queues[i].owner = NULL; | ||
1528 | sem->queues[i].hp_waiter = NULL; | ||
1529 | init_waitqueue_head(&sem->queues[i].wait); | ||
1530 | sem->queues[i].count = 0; | ||
1531 | } | ||
1532 | |||
1533 | sem->shortest_queue = &sem->queues[0]; | ||
1534 | |||
1535 | *ret_code = 0; | ||
1536 | return &sem->litmus_lock; | ||
1537 | } | ||
1538 | |||
1539 | |||
1540 | |||
1541 | |||
1542 | |||
895 | /* **** lock constructor **** */ | 1543 | /* **** lock constructor **** */ |
896 | 1544 | ||
897 | 1545 | ||
898 | static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, | 1546 | static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, |
899 | void* __user unused) | 1547 | void* __user arg) |
900 | { | 1548 | { |
901 | int err = -ENXIO; | 1549 | int err = -ENXIO; |
902 | 1550 | ||
@@ -911,7 +1559,10 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, | |||
911 | else | 1559 | else |
912 | err = -ENOMEM; | 1560 | err = -ENOMEM; |
913 | break; | 1561 | break; |
914 | 1562 | ||
1563 | case KFMLP_SEM: | ||
1564 | *lock = gsnedf_new_kfmlp(arg, &err); | ||
1565 | break; | ||
915 | }; | 1566 | }; |
916 | 1567 | ||
917 | return err; | 1568 | return err; |
@@ -919,7 +1570,6 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, | |||
919 | 1570 | ||
920 | #endif | 1571 | #endif |
921 | 1572 | ||
922 | |||
923 | static long gsnedf_activate_plugin(void) | 1573 | static long gsnedf_activate_plugin(void) |
924 | { | 1574 | { |
925 | int cpu; | 1575 | int cpu; |
@@ -946,6 +1596,15 @@ static long gsnedf_activate_plugin(void) | |||
946 | } | 1596 | } |
947 | #endif | 1597 | #endif |
948 | } | 1598 | } |
1599 | |||
1600 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1601 | spawn_klitirqd(NULL); | ||
1602 | #endif | ||
1603 | |||
1604 | #ifdef CONFIG_LITMUS_NVIDIA | ||
1605 | init_nvidia_info(); | ||
1606 | #endif | ||
1607 | |||
949 | return 0; | 1608 | return 0; |
950 | } | 1609 | } |
951 | 1610 | ||
@@ -963,8 +1622,15 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | |||
963 | .admit_task = gsnedf_admit_task, | 1622 | .admit_task = gsnedf_admit_task, |
964 | .activate_plugin = gsnedf_activate_plugin, | 1623 | .activate_plugin = gsnedf_activate_plugin, |
965 | #ifdef CONFIG_LITMUS_LOCKING | 1624 | #ifdef CONFIG_LITMUS_LOCKING |
966 | .allocate_lock = gsnedf_allocate_lock, | 1625 | .allocate_lock = gsnedf_allocate_lock, |
1626 | .set_prio_inh = set_priority_inheritance, | ||
1627 | .clear_prio_inh = clear_priority_inheritance, | ||
1628 | #endif | ||
1629 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1630 | .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd, | ||
1631 | .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd, | ||
967 | #endif | 1632 | #endif |
1633 | |||
968 | }; | 1634 | }; |
969 | 1635 | ||
970 | 1636 | ||
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index e6952896dc4b..1bca2e1a33cd 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c | |||
@@ -103,7 +103,9 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) | |||
103 | } | 103 | } |
104 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW | 104 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW |
105 | if (next->oncpu) | 105 | if (next->oncpu) |
106 | { | ||
106 | TRACE_TASK(next, "waiting for !oncpu"); | 107 | TRACE_TASK(next, "waiting for !oncpu"); |
108 | } | ||
107 | while (next->oncpu) { | 109 | while (next->oncpu) { |
108 | cpu_relax(); | 110 | cpu_relax(); |
109 | mb(); | 111 | mb(); |
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index d54886df1f57..8802670a4b0b 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c | |||
@@ -129,6 +129,27 @@ static long litmus_dummy_allocate_lock(struct litmus_lock **lock, int type, | |||
129 | return -ENXIO; | 129 | return -ENXIO; |
130 | } | 130 | } |
131 | 131 | ||
132 | static void litmus_dummy_set_prio_inh(struct task_struct* a, struct task_struct* b) | ||
133 | { | ||
134 | } | ||
135 | |||
136 | static void litmus_dummy_clear_prio_inh(struct task_struct* t) | ||
137 | { | ||
138 | } | ||
139 | |||
140 | #endif | ||
141 | |||
142 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
143 | static void litmus_dummy_set_prio_inh_klitirq(struct task_struct* klitirqd, | ||
144 | struct task_struct* old_owner, | ||
145 | struct task_struct* new_owner) | ||
146 | { | ||
147 | } | ||
148 | |||
149 | static void litmus_dummy_clear_prio_inh_klitirqd(struct task_struct* klitirqd, | ||
150 | struct task_struct* old_owner) | ||
151 | { | ||
152 | } | ||
132 | #endif | 153 | #endif |
133 | 154 | ||
134 | 155 | ||
@@ -149,6 +170,12 @@ struct sched_plugin linux_sched_plugin = { | |||
149 | .deactivate_plugin = litmus_dummy_deactivate_plugin, | 170 | .deactivate_plugin = litmus_dummy_deactivate_plugin, |
150 | #ifdef CONFIG_LITMUS_LOCKING | 171 | #ifdef CONFIG_LITMUS_LOCKING |
151 | .allocate_lock = litmus_dummy_allocate_lock, | 172 | .allocate_lock = litmus_dummy_allocate_lock, |
173 | .set_prio_inh = litmus_dummy_set_prio_inh, | ||
174 | .clear_prio_inh = litmus_dummy_clear_prio_inh, | ||
175 | #endif | ||
176 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
177 | .set_prio_inh_klitirqd = litmus_dummy_set_prio_inh_klitirq, | ||
178 | .clear_prio_inh_klitirqd = litmus_dummy_clear_prio_inh_klitirqd, | ||
152 | #endif | 179 | #endif |
153 | .admit_task = litmus_dummy_admit_task | 180 | .admit_task = litmus_dummy_admit_task |
154 | }; | 181 | }; |
@@ -187,6 +214,8 @@ int register_sched_plugin(struct sched_plugin* plugin) | |||
187 | CHECK(deactivate_plugin); | 214 | CHECK(deactivate_plugin); |
188 | #ifdef CONFIG_LITMUS_LOCKING | 215 | #ifdef CONFIG_LITMUS_LOCKING |
189 | CHECK(allocate_lock); | 216 | CHECK(allocate_lock); |
217 | CHECK(set_prio_inh); | ||
218 | CHECK(clear_prio_inh); | ||
190 | #endif | 219 | #endif |
191 | CHECK(admit_task); | 220 | CHECK(admit_task); |
192 | 221 | ||
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c index 5ef8d09ab41f..7aeb99b668d3 100644 --- a/litmus/sched_task_trace.c +++ b/litmus/sched_task_trace.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
9 | #include <linux/percpu.h> | 9 | #include <linux/percpu.h> |
10 | #include <linux/hardirq.h> | ||
10 | 11 | ||
11 | #include <litmus/ftdev.h> | 12 | #include <litmus/ftdev.h> |
12 | #include <litmus/litmus.h> | 13 | #include <litmus/litmus.h> |
@@ -16,13 +17,13 @@ | |||
16 | #include <litmus/ftdev.h> | 17 | #include <litmus/ftdev.h> |
17 | 18 | ||
18 | 19 | ||
19 | #define NO_EVENTS (1 << CONFIG_SCHED_TASK_TRACE_SHIFT) | 20 | #define NUM_EVENTS (1 << (CONFIG_SCHED_TASK_TRACE_SHIFT+11)) |
20 | 21 | ||
21 | #define now() litmus_clock() | 22 | #define now() litmus_clock() |
22 | 23 | ||
23 | struct local_buffer { | 24 | struct local_buffer { |
24 | struct st_event_record record[NO_EVENTS]; | 25 | struct st_event_record record[NUM_EVENTS]; |
25 | char flag[NO_EVENTS]; | 26 | char flag[NUM_EVENTS]; |
26 | struct ft_buffer ftbuf; | 27 | struct ft_buffer ftbuf; |
27 | }; | 28 | }; |
28 | 29 | ||
@@ -41,7 +42,7 @@ static int __init init_sched_task_trace(void) | |||
41 | int i, ok = 0, err; | 42 | int i, ok = 0, err; |
42 | printk("Allocated %u sched_trace_xxx() events per CPU " | 43 | printk("Allocated %u sched_trace_xxx() events per CPU " |
43 | "(buffer size: %d bytes)\n", | 44 | "(buffer size: %d bytes)\n", |
44 | NO_EVENTS, (int) sizeof(struct local_buffer)); | 45 | NUM_EVENTS, (int) sizeof(struct local_buffer)); |
45 | 46 | ||
46 | err = ftdev_init(&st_dev, THIS_MODULE, | 47 | err = ftdev_init(&st_dev, THIS_MODULE, |
47 | num_online_cpus(), "sched_trace"); | 48 | num_online_cpus(), "sched_trace"); |
@@ -50,7 +51,7 @@ static int __init init_sched_task_trace(void) | |||
50 | 51 | ||
51 | for (i = 0; i < st_dev.minor_cnt; i++) { | 52 | for (i = 0; i < st_dev.minor_cnt; i++) { |
52 | buf = &per_cpu(st_event_buffer, i); | 53 | buf = &per_cpu(st_event_buffer, i); |
53 | ok += init_ft_buffer(&buf->ftbuf, NO_EVENTS, | 54 | ok += init_ft_buffer(&buf->ftbuf, NUM_EVENTS, |
54 | sizeof(struct st_event_record), | 55 | sizeof(struct st_event_record), |
55 | buf->flag, | 56 | buf->flag, |
56 | buf->record); | 57 | buf->record); |
@@ -154,7 +155,8 @@ feather_callback void do_sched_trace_task_switch_to(unsigned long id, | |||
154 | { | 155 | { |
155 | struct task_struct *t = (struct task_struct*) _task; | 156 | struct task_struct *t = (struct task_struct*) _task; |
156 | struct st_event_record* rec; | 157 | struct st_event_record* rec; |
157 | if (is_realtime(t)) { | 158 | //if (is_realtime(t)) /* comment out to trace EVERYTHING */ |
159 | { | ||
158 | rec = get_record(ST_SWITCH_TO, t); | 160 | rec = get_record(ST_SWITCH_TO, t); |
159 | if (rec) { | 161 | if (rec) { |
160 | rec->data.switch_to.when = now(); | 162 | rec->data.switch_to.when = now(); |
@@ -169,7 +171,8 @@ feather_callback void do_sched_trace_task_switch_away(unsigned long id, | |||
169 | { | 171 | { |
170 | struct task_struct *t = (struct task_struct*) _task; | 172 | struct task_struct *t = (struct task_struct*) _task; |
171 | struct st_event_record* rec; | 173 | struct st_event_record* rec; |
172 | if (is_realtime(t)) { | 174 | //if (is_realtime(t)) /* comment out to trace EVERYTHING */ |
175 | { | ||
173 | rec = get_record(ST_SWITCH_AWAY, t); | 176 | rec = get_record(ST_SWITCH_AWAY, t); |
174 | if (rec) { | 177 | if (rec) { |
175 | rec->data.switch_away.when = now(); | 178 | rec->data.switch_away.when = now(); |
@@ -188,6 +191,7 @@ feather_callback void do_sched_trace_task_completion(unsigned long id, | |||
188 | if (rec) { | 191 | if (rec) { |
189 | rec->data.completion.when = now(); | 192 | rec->data.completion.when = now(); |
190 | rec->data.completion.forced = forced; | 193 | rec->data.completion.forced = forced; |
194 | rec->data.completion.nv_int_count = (u16)atomic_read(&tsk_rt(t)->nv_int_count); | ||
191 | put_record(rec); | 195 | put_record(rec); |
192 | } | 196 | } |
193 | } | 197 | } |
@@ -239,3 +243,201 @@ feather_callback void do_sched_trace_action(unsigned long id, | |||
239 | put_record(rec); | 243 | put_record(rec); |
240 | } | 244 | } |
241 | } | 245 | } |
246 | |||
247 | |||
248 | feather_callback void do_sched_trace_tasklet_release(unsigned long id, | ||
249 | unsigned long _owner) | ||
250 | { | ||
251 | struct task_struct *t = (struct task_struct*) _owner; | ||
252 | struct st_event_record *rec = get_record(ST_TASKLET_RELEASE, t); | ||
253 | |||
254 | if (rec) { | ||
255 | rec->data.tasklet_release.when = now(); | ||
256 | put_record(rec); | ||
257 | } | ||
258 | } | ||
259 | |||
260 | |||
261 | feather_callback void do_sched_trace_tasklet_begin(unsigned long id, | ||
262 | unsigned long _owner) | ||
263 | { | ||
264 | struct task_struct *t = (struct task_struct*) _owner; | ||
265 | struct st_event_record *rec = get_record(ST_TASKLET_BEGIN, t); | ||
266 | |||
267 | if (rec) { | ||
268 | rec->data.tasklet_begin.when = now(); | ||
269 | |||
270 | if(!in_interrupt()) | ||
271 | rec->data.tasklet_begin.exe_pid = current->pid; | ||
272 | else | ||
273 | rec->data.tasklet_begin.exe_pid = 0; | ||
274 | |||
275 | put_record(rec); | ||
276 | } | ||
277 | } | ||
278 | EXPORT_SYMBOL(do_sched_trace_tasklet_begin); | ||
279 | |||
280 | |||
281 | feather_callback void do_sched_trace_tasklet_end(unsigned long id, | ||
282 | unsigned long _owner, | ||
283 | unsigned long _flushed) | ||
284 | { | ||
285 | struct task_struct *t = (struct task_struct*) _owner; | ||
286 | struct st_event_record *rec = get_record(ST_TASKLET_END, t); | ||
287 | |||
288 | if (rec) { | ||
289 | rec->data.tasklet_end.when = now(); | ||
290 | rec->data.tasklet_end.flushed = _flushed; | ||
291 | |||
292 | if(!in_interrupt()) | ||
293 | rec->data.tasklet_end.exe_pid = current->pid; | ||
294 | else | ||
295 | rec->data.tasklet_end.exe_pid = 0; | ||
296 | |||
297 | put_record(rec); | ||
298 | } | ||
299 | } | ||
300 | EXPORT_SYMBOL(do_sched_trace_tasklet_end); | ||
301 | |||
302 | |||
303 | feather_callback void do_sched_trace_work_release(unsigned long id, | ||
304 | unsigned long _owner) | ||
305 | { | ||
306 | struct task_struct *t = (struct task_struct*) _owner; | ||
307 | struct st_event_record *rec = get_record(ST_WORK_RELEASE, t); | ||
308 | |||
309 | if (rec) { | ||
310 | rec->data.work_release.when = now(); | ||
311 | put_record(rec); | ||
312 | } | ||
313 | } | ||
314 | |||
315 | |||
316 | feather_callback void do_sched_trace_work_begin(unsigned long id, | ||
317 | unsigned long _owner, | ||
318 | unsigned long _exe) | ||
319 | { | ||
320 | struct task_struct *t = (struct task_struct*) _owner; | ||
321 | struct st_event_record *rec = get_record(ST_WORK_BEGIN, t); | ||
322 | |||
323 | if (rec) { | ||
324 | struct task_struct *exe = (struct task_struct*) _exe; | ||
325 | rec->data.work_begin.exe_pid = exe->pid; | ||
326 | rec->data.work_begin.when = now(); | ||
327 | put_record(rec); | ||
328 | } | ||
329 | } | ||
330 | EXPORT_SYMBOL(do_sched_trace_work_begin); | ||
331 | |||
332 | |||
333 | feather_callback void do_sched_trace_work_end(unsigned long id, | ||
334 | unsigned long _owner, | ||
335 | unsigned long _exe, | ||
336 | unsigned long _flushed) | ||
337 | { | ||
338 | struct task_struct *t = (struct task_struct*) _owner; | ||
339 | struct st_event_record *rec = get_record(ST_WORK_END, t); | ||
340 | |||
341 | if (rec) { | ||
342 | struct task_struct *exe = (struct task_struct*) _exe; | ||
343 | rec->data.work_end.exe_pid = exe->pid; | ||
344 | rec->data.work_end.flushed = _flushed; | ||
345 | rec->data.work_end.when = now(); | ||
346 | put_record(rec); | ||
347 | } | ||
348 | } | ||
349 | EXPORT_SYMBOL(do_sched_trace_work_end); | ||
350 | |||
351 | |||
352 | feather_callback void do_sched_trace_eff_prio_change(unsigned long id, | ||
353 | unsigned long _task, | ||
354 | unsigned long _inh) | ||
355 | { | ||
356 | struct task_struct *t = (struct task_struct*) _task; | ||
357 | struct st_event_record *rec = get_record(ST_EFF_PRIO_CHANGE, t); | ||
358 | |||
359 | if (rec) { | ||
360 | struct task_struct *inh = (struct task_struct*) _inh; | ||
361 | rec->data.effective_priority_change.when = now(); | ||
362 | rec->data.effective_priority_change.inh_pid = (inh != NULL) ? | ||
363 | inh->pid : | ||
364 | 0xffff; | ||
365 | |||
366 | put_record(rec); | ||
367 | } | ||
368 | } | ||
369 | |||
370 | |||
371 | /* pray for no nesting of nv interrupts on same CPU... */ | ||
372 | struct tracing_interrupt_map | ||
373 | { | ||
374 | int active; | ||
375 | int count; | ||
376 | unsigned long data[128]; // assume nesting less than 128... | ||
377 | }; | ||
378 | DEFINE_PER_CPU(struct tracing_interrupt_map, active_interrupt_tracing); | ||
379 | |||
380 | feather_callback void do_sched_trace_nv_interrupt_begin(unsigned long id, | ||
381 | unsigned long _device) | ||
382 | { | ||
383 | struct st_event_record *rec; | ||
384 | |||
385 | { | ||
386 | struct tracing_interrupt_map* int_map = &per_cpu(active_interrupt_tracing, smp_processor_id()); | ||
387 | if(int_map->active == 0xcafebabe) | ||
388 | { | ||
389 | int_map->count++; | ||
390 | } | ||
391 | else | ||
392 | { | ||
393 | int_map->active = 0xcafebabe; | ||
394 | int_map->count = 1; | ||
395 | } | ||
396 | int_map->data[int_map->count-1] = _device; | ||
397 | } | ||
398 | |||
399 | rec = get_record(ST_NV_INTERRUPT_BEGIN, NULL); | ||
400 | if(rec) { | ||
401 | u32 device = _device; | ||
402 | rec->data.nv_interrupt_begin.when = now(); | ||
403 | rec->data.nv_interrupt_begin.device = device; | ||
404 | put_record(rec); | ||
405 | } | ||
406 | } | ||
407 | EXPORT_SYMBOL(do_sched_trace_nv_interrupt_begin); | ||
408 | |||
409 | /* | ||
410 | int is_interrupt_tracing_active(void) | ||
411 | { | ||
412 | struct tracing_interrupt_map* int_map = &per_cpu(active_interrupt_tracing, smp_processor_id()); | ||
413 | if(int_map->active == 0xcafebabe) | ||
414 | return 1; | ||
415 | return 0; | ||
416 | } | ||
417 | */ | ||
418 | |||
419 | feather_callback void do_sched_trace_nv_interrupt_end(unsigned long id, unsigned long unused) | ||
420 | { | ||
421 | struct tracing_interrupt_map* int_map = &per_cpu(active_interrupt_tracing, smp_processor_id()); | ||
422 | if(int_map->active == 0xcafebabe) | ||
423 | { | ||
424 | struct st_event_record *rec = get_record(ST_NV_INTERRUPT_END, NULL); | ||
425 | |||
426 | int_map->count--; | ||
427 | if(int_map->count == 0) | ||
428 | int_map->active = 0; | ||
429 | |||
430 | if(rec) { | ||
431 | rec->data.nv_interrupt_end.when = now(); | ||
432 | rec->data.nv_interrupt_end.device = int_map->data[int_map->count]; | ||
433 | put_record(rec); | ||
434 | } | ||
435 | } | ||
436 | } | ||
437 | EXPORT_SYMBOL(do_sched_trace_nv_interrupt_end); | ||
438 | |||
439 | |||
440 | |||
441 | |||
442 | |||
443 | |||
diff --git a/litmus/sched_trace_external.c b/litmus/sched_trace_external.c new file mode 100644 index 000000000000..d7d7d8bae298 --- /dev/null +++ b/litmus/sched_trace_external.c | |||
@@ -0,0 +1,45 @@ | |||
1 | #include <linux/module.h> | ||
2 | |||
3 | #include <litmus/sched_trace.h> | ||
4 | #include <litmus/litmus.h> | ||
5 | |||
6 | void __sched_trace_tasklet_begin_external(struct task_struct* t) | ||
7 | { | ||
8 | sched_trace_tasklet_begin(t); | ||
9 | } | ||
10 | EXPORT_SYMBOL(__sched_trace_tasklet_begin_external); | ||
11 | |||
12 | void __sched_trace_tasklet_end_external(struct task_struct* t, unsigned long flushed) | ||
13 | { | ||
14 | sched_trace_tasklet_end(t, flushed); | ||
15 | } | ||
16 | EXPORT_SYMBOL(__sched_trace_tasklet_end_external); | ||
17 | |||
18 | |||
19 | |||
20 | void __sched_trace_work_begin_external(struct task_struct* t, struct task_struct* e) | ||
21 | { | ||
22 | sched_trace_work_begin(t, e); | ||
23 | } | ||
24 | EXPORT_SYMBOL(__sched_trace_work_begin_external); | ||
25 | |||
26 | void __sched_trace_work_end_external(struct task_struct* t, struct task_struct* e, unsigned long f) | ||
27 | { | ||
28 | sched_trace_work_end(t, e, f); | ||
29 | } | ||
30 | EXPORT_SYMBOL(__sched_trace_work_end_external); | ||
31 | |||
32 | |||
33 | |||
34 | void __sched_trace_nv_interrupt_begin_external(u32 device) | ||
35 | { | ||
36 | unsigned long _device = device; | ||
37 | sched_trace_nv_interrupt_begin(_device); | ||
38 | } | ||
39 | EXPORT_SYMBOL(__sched_trace_nv_interrupt_begin_external); | ||
40 | |||
41 | void __sched_trace_nv_interrupt_end_external(void) | ||
42 | { | ||
43 | sched_trace_nv_interrupt_end(); | ||
44 | } | ||
45 | EXPORT_SYMBOL(__sched_trace_nv_interrupt_end_external); | ||