diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-05-26 17:29:58 -0400 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-05-26 17:29:58 -0400 |
commit | a463f9a9e04385f0729f7435a0a6dff7d89b25de (patch) | |
tree | 00ff42c305926c800e18b13df8440a4de1a1a041 /litmus/litmus.c | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
GPUSync patch for Litmus 2012.1.
Diffstat (limited to 'litmus/litmus.c')
-rw-r--r-- | litmus/litmus.c | 126 |
1 files changed, 123 insertions, 3 deletions
diff --git a/litmus/litmus.c b/litmus/litmus.c index 301390148d02..d1f836c8af6e 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -21,6 +21,10 @@ | |||
21 | #include <litmus/affinity.h> | 21 | #include <litmus/affinity.h> |
22 | #endif | 22 | #endif |
23 | 23 | ||
24 | #ifdef CONFIG_LITMUS_NVIDIA | ||
25 | #include <litmus/nvidia_info.h> | ||
26 | #endif | ||
27 | |||
24 | /* Number of RT tasks that exist in the system */ | 28 | /* Number of RT tasks that exist in the system */ |
25 | atomic_t rt_task_count = ATOMIC_INIT(0); | 29 | atomic_t rt_task_count = ATOMIC_INIT(0); |
26 | static DEFINE_RAW_SPINLOCK(task_transition_lock); | 30 | static DEFINE_RAW_SPINLOCK(task_transition_lock); |
@@ -51,6 +55,28 @@ void bheap_node_free(struct bheap_node* hn) | |||
51 | struct release_heap* release_heap_alloc(int gfp_flags); | 55 | struct release_heap* release_heap_alloc(int gfp_flags); |
52 | void release_heap_free(struct release_heap* rh); | 56 | void release_heap_free(struct release_heap* rh); |
53 | 57 | ||
58 | #ifdef CONFIG_LITMUS_NVIDIA | ||
59 | /* | ||
60 | * sys_register_nv_device | ||
61 | * @nv_device_id: The Nvidia device id that the task want to register | ||
62 | * @reg_action: set to '1' to register the specified device. zero otherwise. | ||
63 | * Syscall for register task's designated nvidia device into NV_DEVICE_REG array | ||
64 | * Returns EFAULT if nv_device_id is out of range. | ||
65 | * 0 if success | ||
66 | */ | ||
67 | asmlinkage long sys_register_nv_device(int nv_device_id, int reg_action) | ||
68 | { | ||
69 | /* register the device to caller (aka 'current') */ | ||
70 | return(reg_nv_device(nv_device_id, reg_action, current)); | ||
71 | } | ||
72 | #else | ||
73 | asmlinkage long sys_register_nv_device(int nv_device_id, int reg_action) | ||
74 | { | ||
75 | return(-EINVAL); | ||
76 | } | ||
77 | #endif | ||
78 | |||
79 | |||
54 | /* | 80 | /* |
55 | * sys_set_task_rt_param | 81 | * sys_set_task_rt_param |
56 | * @pid: Pid of the task which scheduling parameters must be changed | 82 | * @pid: Pid of the task which scheduling parameters must be changed |
@@ -269,6 +295,7 @@ asmlinkage long sys_query_job_no(unsigned int __user *job) | |||
269 | return retval; | 295 | return retval; |
270 | } | 296 | } |
271 | 297 | ||
298 | |||
272 | /* sys_null_call() is only used for determining raw system call | 299 | /* sys_null_call() is only used for determining raw system call |
273 | * overheads (kernel entry, kernel exit). It has no useful side effects. | 300 | * overheads (kernel entry, kernel exit). It has no useful side effects. |
274 | * If ts is non-NULL, then the current Feather-Trace time is recorded. | 301 | * If ts is non-NULL, then the current Feather-Trace time is recorded. |
@@ -286,12 +313,42 @@ asmlinkage long sys_null_call(cycles_t __user *ts) | |||
286 | return ret; | 313 | return ret; |
287 | } | 314 | } |
288 | 315 | ||
316 | |||
317 | #if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING) | ||
318 | void init_gpu_affinity_state(struct task_struct* p) | ||
319 | { | ||
320 | // under-damped | ||
321 | //p->rt_param.gpu_fb_param_a = _frac(14008, 10000); | ||
322 | //p->rt_param.gpu_fb_param_b = _frac(16024, 10000); | ||
323 | |||
324 | // emperical; | ||
325 | p->rt_param.gpu_fb_param_a[0] = _frac(7550, 10000); | ||
326 | p->rt_param.gpu_fb_param_b[0] = _frac(45800, 10000); | ||
327 | |||
328 | p->rt_param.gpu_fb_param_a[1] = _frac(8600, 10000); | ||
329 | p->rt_param.gpu_fb_param_b[1] = _frac(40000, 10000); | ||
330 | |||
331 | p->rt_param.gpu_fb_param_a[2] = _frac(6890, 10000); | ||
332 | p->rt_param.gpu_fb_param_b[2] = _frac(40000, 10000); | ||
333 | |||
334 | p->rt_param.gpu_fb_param_a[3] = _frac(7580, 10000); | ||
335 | p->rt_param.gpu_fb_param_b[3] = _frac(34590, 10000); | ||
336 | |||
337 | p->rt_param.gpu_migration = MIG_NONE; | ||
338 | p->rt_param.last_gpu = -1; | ||
339 | } | ||
340 | #endif | ||
341 | |||
289 | /* p is a real-time task. Re-init its state as a best-effort task. */ | 342 | /* p is a real-time task. Re-init its state as a best-effort task. */ |
290 | static void reinit_litmus_state(struct task_struct* p, int restore) | 343 | static void reinit_litmus_state(struct task_struct* p, int restore) |
291 | { | 344 | { |
292 | struct rt_task user_config = {}; | 345 | struct rt_task user_config = {}; |
293 | void* ctrl_page = NULL; | 346 | void* ctrl_page = NULL; |
294 | 347 | ||
348 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
349 | binheap_order_t prio_order = NULL; | ||
350 | #endif | ||
351 | |||
295 | if (restore) { | 352 | if (restore) { |
296 | /* Safe user-space provided configuration data. | 353 | /* Safe user-space provided configuration data. |
297 | * and allocated page. */ | 354 | * and allocated page. */ |
@@ -299,11 +356,38 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
299 | ctrl_page = p->rt_param.ctrl_page; | 356 | ctrl_page = p->rt_param.ctrl_page; |
300 | } | 357 | } |
301 | 358 | ||
359 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
360 | prio_order = p->rt_param.hp_blocked_tasks.compare; | ||
361 | #endif | ||
362 | |||
302 | /* We probably should not be inheriting any task's priority | 363 | /* We probably should not be inheriting any task's priority |
303 | * at this point in time. | 364 | * at this point in time. |
304 | */ | 365 | */ |
305 | WARN_ON(p->rt_param.inh_task); | 366 | WARN_ON(p->rt_param.inh_task); |
306 | 367 | ||
368 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
369 | WARN_ON(p->rt_param.blocked_lock); | ||
370 | WARN_ON(!binheap_empty(&p->rt_param.hp_blocked_tasks)); | ||
371 | #endif | ||
372 | |||
373 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
374 | /* We probably should not have any tasklets executing for | ||
375 | * us at this time. | ||
376 | */ | ||
377 | WARN_ON(p->rt_param.cur_klitirqd); | ||
378 | WARN_ON(atomic_read(&p->rt_param.klitirqd_sem_stat) == HELD); | ||
379 | |||
380 | if(p->rt_param.cur_klitirqd) | ||
381 | flush_pending(p->rt_param.cur_klitirqd, p); | ||
382 | |||
383 | if(atomic_read(&p->rt_param.klitirqd_sem_stat) == HELD) | ||
384 | up_and_set_stat(p, NOT_HELD, &p->rt_param.klitirqd_sem); | ||
385 | #endif | ||
386 | |||
387 | #ifdef CONFIG_LITMUS_NVIDIA | ||
388 | WARN_ON(p->rt_param.held_gpus != 0); | ||
389 | #endif | ||
390 | |||
307 | /* Cleanup everything else. */ | 391 | /* Cleanup everything else. */ |
308 | memset(&p->rt_param, 0, sizeof(p->rt_param)); | 392 | memset(&p->rt_param, 0, sizeof(p->rt_param)); |
309 | 393 | ||
@@ -312,6 +396,15 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
312 | p->rt_param.task_params = user_config; | 396 | p->rt_param.task_params = user_config; |
313 | p->rt_param.ctrl_page = ctrl_page; | 397 | p->rt_param.ctrl_page = ctrl_page; |
314 | } | 398 | } |
399 | |||
400 | #if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING) | ||
401 | init_gpu_affinity_state(p); | ||
402 | #endif | ||
403 | |||
404 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
405 | INIT_BINHEAP_HANDLE(&p->rt_param.hp_blocked_tasks, prio_order); | ||
406 | raw_spin_lock_init(&p->rt_param.hp_blocked_tasks_lock); | ||
407 | #endif | ||
315 | } | 408 | } |
316 | 409 | ||
317 | long litmus_admit_task(struct task_struct* tsk) | 410 | long litmus_admit_task(struct task_struct* tsk) |
@@ -358,6 +451,26 @@ long litmus_admit_task(struct task_struct* tsk) | |||
358 | bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); | 451 | bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); |
359 | } | 452 | } |
360 | 453 | ||
454 | |||
455 | #ifdef CONFIG_LITMUS_NVIDIA | ||
456 | atomic_set(&tsk_rt(tsk)->nv_int_count, 0); | ||
457 | #endif | ||
458 | #if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING) | ||
459 | init_gpu_affinity_state(tsk); | ||
460 | #endif | ||
461 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
462 | tsk_rt(tsk)->blocked_lock = NULL; | ||
463 | raw_spin_lock_init(&tsk_rt(tsk)->hp_blocked_tasks_lock); | ||
464 | //INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks, prio_order); // done by scheduler | ||
465 | #endif | ||
466 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
467 | /* proxy thread off by default */ | ||
468 | tsk_rt(tsk)is_proxy_thread = 0; | ||
469 | tsk_rt(tsk)cur_klitirqd = NULL; | ||
470 | mutex_init(&tsk_rt(tsk)->klitirqd_sem); | ||
471 | atomic_set(&tsk_rt(tsk)->klitirqd_sem_stat, NOT_HELD); | ||
472 | #endif | ||
473 | |||
361 | retval = litmus->admit_task(tsk); | 474 | retval = litmus->admit_task(tsk); |
362 | 475 | ||
363 | if (!retval) { | 476 | if (!retval) { |
@@ -403,7 +516,7 @@ static void synch_on_plugin_switch(void* info) | |||
403 | */ | 516 | */ |
404 | int switch_sched_plugin(struct sched_plugin* plugin) | 517 | int switch_sched_plugin(struct sched_plugin* plugin) |
405 | { | 518 | { |
406 | unsigned long flags; | 519 | //unsigned long flags; |
407 | int ret = 0; | 520 | int ret = 0; |
408 | 521 | ||
409 | BUG_ON(!plugin); | 522 | BUG_ON(!plugin); |
@@ -417,8 +530,15 @@ int switch_sched_plugin(struct sched_plugin* plugin) | |||
417 | while (atomic_read(&cannot_use_plugin) < num_online_cpus()) | 530 | while (atomic_read(&cannot_use_plugin) < num_online_cpus()) |
418 | cpu_relax(); | 531 | cpu_relax(); |
419 | 532 | ||
533 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
534 | if(!klitirqd_is_dead()) | ||
535 | { | ||
536 | kill_klitirqd(); | ||
537 | } | ||
538 | #endif | ||
539 | |||
420 | /* stop task transitions */ | 540 | /* stop task transitions */ |
421 | raw_spin_lock_irqsave(&task_transition_lock, flags); | 541 | //raw_spin_lock_irqsave(&task_transition_lock, flags); |
422 | 542 | ||
423 | /* don't switch if there are active real-time tasks */ | 543 | /* don't switch if there are active real-time tasks */ |
424 | if (atomic_read(&rt_task_count) == 0) { | 544 | if (atomic_read(&rt_task_count) == 0) { |
@@ -436,7 +556,7 @@ int switch_sched_plugin(struct sched_plugin* plugin) | |||
436 | } else | 556 | } else |
437 | ret = -EBUSY; | 557 | ret = -EBUSY; |
438 | out: | 558 | out: |
439 | raw_spin_unlock_irqrestore(&task_transition_lock, flags); | 559 | //raw_spin_unlock_irqrestore(&task_transition_lock, flags); |
440 | atomic_set(&cannot_use_plugin, 0); | 560 | atomic_set(&cannot_use_plugin, 0); |
441 | return ret; | 561 | return ret; |
442 | } | 562 | } |