aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/litmus.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/litmus.c')
-rw-r--r--litmus/litmus.c280
1 files changed, 247 insertions, 33 deletions
diff --git a/litmus/litmus.c b/litmus/litmus.c
index dc94be71bfb6..2911e7ec7029 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -23,6 +23,14 @@
23#include <litmus/affinity.h> 23#include <litmus/affinity.h>
24#endif 24#endif
25 25
26#ifdef CONFIG_LITMUS_NVIDIA
27#include <litmus/nvidia_info.h>
28#endif
29
30#ifdef CONFIG_REALTIME_AUX_TASKS
31#include <litmus/aux_tasks.h>
32#endif
33
26/* Number of RT tasks that exist in the system */ 34/* Number of RT tasks that exist in the system */
27atomic_t rt_task_count = ATOMIC_INIT(0); 35atomic_t rt_task_count = ATOMIC_INIT(0);
28 36
@@ -135,6 +143,16 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param)
135 pid, tp.budget_policy); 143 pid, tp.budget_policy);
136 goto out_unlock; 144 goto out_unlock;
137 } 145 }
146 if (tp.budget_signal_policy != NO_SIGNALS &&
147 tp.budget_signal_policy != QUANTUM_SIGNALS &&
148 tp.budget_signal_policy != PRECISE_SIGNALS)
149 {
150 printk(KERN_INFO "litmus: real-time task %d rejected "
151 "because unsupported budget signalling policy "
152 "specified (%d)\n",
153 pid, tp.budget_signal_policy);
154 goto out_unlock;
155 }
138 156
139 target->rt_param.task_params = tp; 157 target->rt_param.task_params = tp;
140 158
@@ -272,6 +290,7 @@ asmlinkage long sys_query_job_no(unsigned int __user *job)
272 return retval; 290 return retval;
273} 291}
274 292
293
275/* sys_null_call() is only used for determining raw system call 294/* sys_null_call() is only used for determining raw system call
276 * overheads (kernel entry, kernel exit). It has no useful side effects. 295 * overheads (kernel entry, kernel exit). It has no useful side effects.
277 * If ts is non-NULL, then the current Feather-Trace time is recorded. 296 * If ts is non-NULL, then the current Feather-Trace time is recorded.
@@ -289,12 +308,117 @@ asmlinkage long sys_null_call(cycles_t __user *ts)
289 return ret; 308 return ret;
290} 309}
291 310
311
312asmlinkage long sys_sched_trace_event(int event, struct st_inject_args __user *__args)
313{
314 long retval = 0;
315 struct task_struct* t = current;
316
317 struct st_inject_args args;
318
319 if (is_realtime(t)) {
320 printk(KERN_WARNING "Only non-real-time tasks may inject sched_trace events.\n");
321 retval = -EINVAL;
322 goto out;
323 }
324
325 if (__args && copy_from_user(&args, __args, sizeof(args))) {
326 retval = -EFAULT;
327 goto out;
328 }
329
330 switch(event) {
331 /*************************************/
332 /* events that don't need parameters */
333 /*************************************/
334 case ST_INJECT_NAME:
335 sched_trace_task_name(t);
336 break;
337 case ST_INJECT_PARAM:
338 /* presumes sporadic_task_ns() has already been called
339 * and valid data has been initialized even if the calling
340 * task is SCHED_NORMAL. */
341 sched_trace_task_param(t);
342 break;
343
344 /*******************************/
345 /* events that need parameters */
346 /*******************************/
347 case ST_INJECT_COMPLETION:
348 if (!__args) {
349 retval = -EINVAL;
350 goto out;
351 }
352
353 /* slam in the data */
354 t->rt_param.job_params.job_no = args.job_no;
355
356 sched_trace_task_completion(t, 0);
357 break;
358 case ST_INJECT_RELEASE:
359 if (!__args) {
360 retval = -EINVAL;
361 goto out;
362 }
363
364 /* slam in the data */
365 tsk_rt(t)->job_params.release = args.release;
366 tsk_rt(t)->job_params.deadline = args.deadline;
367
368 sched_trace_task_release(t);
369 break;
370
371 /**********************/
372 /* unsupported events */
373 /**********************/
374 default:
375 retval = -EINVAL;
376 break;
377 }
378
379out:
380 return retval;
381}
382
383
384#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING)
385void init_gpu_affinity_state(struct task_struct* p)
386{
387 // under-damped
388 //p->rt_param.gpu_fb_param_a = _frac(14008, 10000);
389 //p->rt_param.gpu_fb_param_b = _frac(16024, 10000);
390
391#if 0
392 // emperical;
393 p->rt_param.gpu_fb_param_a[0] = _frac(7550, 10000);
394 p->rt_param.gpu_fb_param_b[0] = _frac(45800, 10000);
395
396 p->rt_param.gpu_fb_param_a[1] = _frac(8600, 10000);
397 p->rt_param.gpu_fb_param_b[1] = _frac(40000, 10000);
398
399 p->rt_param.gpu_fb_param_a[2] = _frac(6890, 10000);
400 p->rt_param.gpu_fb_param_b[2] = _frac(40000, 10000);
401
402 p->rt_param.gpu_fb_param_a[3] = _frac(7580, 10000);
403 p->rt_param.gpu_fb_param_b[3] = _frac(34590, 10000);
404#endif
405 p->rt_param.gpu_migration = MIG_NONE;
406 p->rt_param.last_gpu = -1;
407}
408#endif
409
292/* p is a real-time task. Re-init its state as a best-effort task. */ 410/* p is a real-time task. Re-init its state as a best-effort task. */
293static void reinit_litmus_state(struct task_struct* p, int restore) 411static void reinit_litmus_state(struct task_struct* p, int restore)
294{ 412{
295 struct rt_task user_config = {}; 413 struct rt_task user_config = {};
296 void* ctrl_page = NULL; 414 void* ctrl_page = NULL;
297 415
416#ifdef CONFIG_LITMUS_NESTED_LOCKING
417 binheap_order_t prio_order = NULL;
418#endif
419
420 TRACE_TASK(p, "reinit_litmus_state: restore = %d\n", restore);
421
298 if (restore) { 422 if (restore) {
299 /* Safe user-space provided configuration data. 423 /* Safe user-space provided configuration data.
300 * and allocated page. */ 424 * and allocated page. */
@@ -302,48 +426,57 @@ static void reinit_litmus_state(struct task_struct* p, int restore)
302 ctrl_page = p->rt_param.ctrl_page; 426 ctrl_page = p->rt_param.ctrl_page;
303 } 427 }
304 428
429#ifdef CONFIG_LITMUS_NVIDIA
430 WARN_ON(p->rt_param.held_gpus != 0);
431#endif
432
433#ifdef CONFIG_LITMUS_LOCKING
305 /* We probably should not be inheriting any task's priority 434 /* We probably should not be inheriting any task's priority
306 * at this point in time. 435 * at this point in time.
307 */ 436 */
308 WARN_ON(p->rt_param.inh_task); 437 WARN_ON(p->rt_param.inh_task);
438#endif
439
440#ifdef CONFIG_LITMUS_NESTED_LOCKING
441 prio_order = p->rt_param.hp_blocked_tasks.compare;
442#endif
309 443
310 /* Cleanup everything else. */ 444 /* Cleanup everything else. */
311 memset(&p->rt_param, 0, sizeof(p->rt_param)); 445 memset(&p->rt_param, 0, sizeof(p->rt_param));
312 446
447#ifdef CONFIG_REALTIME_AUX_TASKS
448 /* also clear out the aux_data. the !restore case is only called on
449 * fork (initial thread creation). */
450 if (!restore) {
451 memset(&p->aux_data, 0, sizeof(p->aux_data));
452 }
453#endif
454
313 /* Restore preserved fields. */ 455 /* Restore preserved fields. */
314 if (restore) { 456 if (restore) {
315 p->rt_param.task_params = user_config; 457 p->rt_param.task_params = user_config;
316 p->rt_param.ctrl_page = ctrl_page; 458 p->rt_param.ctrl_page = ctrl_page;
317 } 459 }
318}
319 460
320long litmus_admit_task(struct task_struct* tsk) 461#ifdef CONFIG_LITMUS_NVIDIA
321{ 462 INIT_BINHEAP_NODE(&p->rt_param.gpu_owner_node);
322 long retval = 0; 463#endif
323 464
324 BUG_ON(is_realtime(tsk)); 465#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING)
466 init_gpu_affinity_state(p);
467#endif
325 468
326 tsk_rt(tsk)->heap_node = NULL; 469#ifdef CONFIG_LITMUS_NESTED_LOCKING
327 tsk_rt(tsk)->rel_heap = NULL; 470 INIT_BINHEAP_HANDLE(&p->rt_param.hp_blocked_tasks, prio_order);
471 raw_spin_lock_init(&p->rt_param.hp_blocked_tasks_lock);
472#endif
473}
328 474
329 if (get_rt_relative_deadline(tsk) == 0 ||
330 get_exec_cost(tsk) >
331 min(get_rt_relative_deadline(tsk), get_rt_period(tsk)) ) {
332 TRACE_TASK(tsk,
333 "litmus admit: invalid task parameters "
334 "(e = %lu, p = %lu, d = %lu)\n",
335 get_exec_cost(tsk), get_rt_period(tsk),
336 get_rt_relative_deadline(tsk));
337 retval = -EINVAL;
338 goto out;
339 }
340 475
341 if (!cpu_online(get_partition(tsk))) { 476
342 TRACE_TASK(tsk, "litmus admit: cpu %d is not online\n", 477long __litmus_admit_task(struct task_struct* tsk)
343 get_partition(tsk)); 478{
344 retval = -EINVAL; 479 long retval = 0;
345 goto out;
346 }
347 480
348 INIT_LIST_HEAD(&tsk_rt(tsk)->list); 481 INIT_LIST_HEAD(&tsk_rt(tsk)->list);
349 482
@@ -360,6 +493,17 @@ long litmus_admit_task(struct task_struct* tsk)
360 bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); 493 bheap_node_init(&tsk_rt(tsk)->heap_node, tsk);
361 } 494 }
362 495
496#ifdef CONFIG_LITMUS_NVIDIA
497 atomic_set(&tsk_rt(tsk)->nv_int_count, 0);
498#endif
499#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING)
500 init_gpu_affinity_state(tsk);
501#endif
502#ifdef CONFIG_LITMUS_NESTED_LOCKING
503 tsk_rt(tsk)->blocked_lock = NULL;
504 raw_spin_lock_init(&tsk_rt(tsk)->hp_blocked_tasks_lock);
505#endif
506
363 preempt_disable(); 507 preempt_disable();
364 508
365 retval = litmus->admit_task(tsk); 509 retval = litmus->admit_task(tsk);
@@ -372,14 +516,56 @@ long litmus_admit_task(struct task_struct* tsk)
372 516
373 preempt_enable(); 517 preempt_enable();
374 518
375out:
376 if (retval) { 519 if (retval) {
377 bheap_node_free(tsk_rt(tsk)->heap_node); 520 bheap_node_free(tsk_rt(tsk)->heap_node);
378 release_heap_free(tsk_rt(tsk)->rel_heap); 521 release_heap_free(tsk_rt(tsk)->rel_heap);
379 } 522 }
523
524out:
525 return retval;
526}
527
528long litmus_admit_task(struct task_struct* tsk)
529{
530 long retval = 0;
531
532 BUG_ON(is_realtime(tsk));
533
534 if (get_rt_relative_deadline(tsk) == 0 ||
535 get_exec_cost(tsk) >
536 min(get_rt_relative_deadline(tsk), get_rt_period(tsk)) ) {
537 TRACE_TASK(tsk,
538 "litmus admit: invalid task parameters "
539 "(e = %lu, p = %lu, d = %lu)\n",
540 get_exec_cost(tsk), get_rt_period(tsk),
541 get_rt_relative_deadline(tsk));
542 retval = -EINVAL;
543 goto out;
544 }
545
546 if (!cpu_online(get_partition(tsk))) {
547 TRACE_TASK(tsk, "litmus admit: cpu %d is not online\n",
548 get_partition(tsk));
549 retval = -EINVAL;
550 goto out;
551 }
552
553 retval = __litmus_admit_task(tsk);
554
555out:
380 return retval; 556 return retval;
381} 557}
382 558
559void litmus_pre_exit_task(struct task_struct* tsk)
560{
561 if (is_realtime(tsk)) {
562 if (tsk_rt(tsk)->rsrc_exit_cb) {
563 int ret = tsk_rt(tsk)->rsrc_exit_cb(tsk);
564 WARN_ON(ret != 0);
565 }
566 }
567}
568
383void litmus_exit_task(struct task_struct* tsk) 569void litmus_exit_task(struct task_struct* tsk)
384{ 570{
385 if (is_realtime(tsk)) { 571 if (is_realtime(tsk)) {
@@ -388,7 +574,7 @@ void litmus_exit_task(struct task_struct* tsk)
388 litmus->task_exit(tsk); 574 litmus->task_exit(tsk);
389 575
390 BUG_ON(bheap_node_in_heap(tsk_rt(tsk)->heap_node)); 576 BUG_ON(bheap_node_in_heap(tsk_rt(tsk)->heap_node));
391 bheap_node_free(tsk_rt(tsk)->heap_node); 577 bheap_node_free(tsk_rt(tsk)->heap_node);
392 release_heap_free(tsk_rt(tsk)->rel_heap); 578 release_heap_free(tsk_rt(tsk)->rel_heap);
393 579
394 atomic_dec(&rt_task_count); 580 atomic_dec(&rt_task_count);
@@ -406,14 +592,19 @@ static int do_plugin_switch(void *_plugin)
406 ret = litmus->deactivate_plugin(); 592 ret = litmus->deactivate_plugin();
407 if (0 != ret) 593 if (0 != ret)
408 goto out; 594 goto out;
409 ret = plugin->activate_plugin(); 595
596 litmus = plugin; /* optimistic switch */
597 mb();
598
599 ret = litmus->activate_plugin();
410 if (0 != ret) { 600 if (0 != ret) {
411 printk(KERN_INFO "Can't activate %s (%d).\n", 601 printk(KERN_INFO "Can't activate %s (%d).\n",
412 plugin->plugin_name, ret); 602 litmus->plugin_name, ret);
413 plugin = &linux_sched_plugin; 603 litmus = &linux_sched_plugin; /* fail to Linux */
604 ret = litmus->activate_plugin();
605 BUG_ON(ret);
414 } 606 }
415 printk(KERN_INFO "Switching to LITMUS^RT plugin %s.\n", plugin->plugin_name); 607 printk(KERN_INFO "Switched to LITMUS^RT plugin %s.\n", litmus->plugin_name);
416 litmus = plugin;
417 } else 608 } else
418 ret = -EBUSY; 609 ret = -EBUSY;
419out: 610out:
@@ -429,6 +620,12 @@ int switch_sched_plugin(struct sched_plugin* plugin)
429{ 620{
430 BUG_ON(!plugin); 621 BUG_ON(!plugin);
431 622
623#ifdef CONFIG_LITMUS_SOFTIRQD
624 if (!klmirqd_is_dead()) {
625 kill_klmirqd();
626 }
627#endif
628
432 if (atomic_read(&rt_task_count) == 0) 629 if (atomic_read(&rt_task_count) == 0)
433 return stop_machine(do_plugin_switch, plugin, NULL); 630 return stop_machine(do_plugin_switch, plugin, NULL);
434 else 631 else
@@ -441,18 +638,33 @@ int switch_sched_plugin(struct sched_plugin* plugin)
441void litmus_fork(struct task_struct* p) 638void litmus_fork(struct task_struct* p)
442{ 639{
443 if (is_realtime(p)) { 640 if (is_realtime(p)) {
641 TRACE_TASK(p, "fork, is real-time\n");
642
444 /* clean out any litmus related state, don't preserve anything */ 643 /* clean out any litmus related state, don't preserve anything */
445 reinit_litmus_state(p, 0); 644 reinit_litmus_state(p, 0);
645
446 /* Don't let the child be a real-time task. */ 646 /* Don't let the child be a real-time task. */
447 p->sched_reset_on_fork = 1; 647 p->sched_reset_on_fork = 1;
448 } else 648
649 } else {
449 /* non-rt tasks might have ctrl_page set */ 650 /* non-rt tasks might have ctrl_page set */
450 tsk_rt(p)->ctrl_page = NULL; 651 tsk_rt(p)->ctrl_page = NULL;
451 652
653 reinit_litmus_state(p, 0);
654 }
655
452 /* od tables are never inherited across a fork */ 656 /* od tables are never inherited across a fork */
453 p->od_table = NULL; 657 p->od_table = NULL;
454} 658}
455 659
660/* Called right before copy_process() returns a forked thread. */
661void litmus_post_fork_thread(struct task_struct* p)
662{
663#ifdef CONFIG_REALTIME_AUX_TASKS
664 make_aux_task_if_required(p);
665#endif
666}
667
456/* Called upon execve(). 668/* Called upon execve().
457 * current is doing the exec. 669 * current is doing the exec.
458 * Don't let address space specific stuff leak. 670 * Don't let address space specific stuff leak.
@@ -486,8 +698,10 @@ void exit_litmus(struct task_struct *dead_tsk)
486 } 698 }
487 699
488 /* main cleanup only for RT tasks */ 700 /* main cleanup only for RT tasks */
489 if (is_realtime(dead_tsk)) 701 if (is_realtime(dead_tsk)) {
702 litmus_pre_exit_task(dead_tsk); /* todo: double check that no Linux rq lock is held */
490 litmus_exit_task(dead_tsk); 703 litmus_exit_task(dead_tsk);
704 }
491} 705}
492 706
493 707