aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
Diffstat (limited to 'litmus')
-rw-r--r--litmus/ftdev.c1
-rw-r--r--litmus/litmus.c10
-rw-r--r--litmus/rt_domain.c18
-rw-r--r--litmus/sched_cedf.c25
-rw-r--r--litmus/sched_gsn_edf.c36
-rw-r--r--litmus/sched_litmus.c37
-rw-r--r--litmus/sched_pfair.c25
-rw-r--r--litmus/sched_plugin.c14
-rw-r--r--litmus/sched_psn_edf.c24
-rw-r--r--litmus/sched_trace.c40
10 files changed, 108 insertions, 122 deletions
diff --git a/litmus/ftdev.c b/litmus/ftdev.c
index 8b2d74d816a2..51dafaebf8a6 100644
--- a/litmus/ftdev.c
+++ b/litmus/ftdev.c
@@ -1,5 +1,6 @@
1#include <linux/sched.h> 1#include <linux/sched.h>
2#include <linux/fs.h> 2#include <linux/fs.h>
3#include <linux/slab.h>
3#include <linux/cdev.h> 4#include <linux/cdev.h>
4#include <asm/uaccess.h> 5#include <asm/uaccess.h>
5#include <linux/module.h> 6#include <linux/module.h>
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 5bf848386e1c..b71fc819eb51 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -23,7 +23,7 @@
23 23
24/* Number of RT tasks that exist in the system */ 24/* Number of RT tasks that exist in the system */
25atomic_t rt_task_count = ATOMIC_INIT(0); 25atomic_t rt_task_count = ATOMIC_INIT(0);
26static DEFINE_SPINLOCK(task_transition_lock); 26static DEFINE_RAW_SPINLOCK(task_transition_lock);
27/* synchronize plugin switching */ 27/* synchronize plugin switching */
28atomic_t cannot_use_plugin = ATOMIC_INIT(0); 28atomic_t cannot_use_plugin = ATOMIC_INIT(0);
29 29
@@ -330,7 +330,7 @@ long litmus_admit_task(struct task_struct* tsk)
330 INIT_LIST_HEAD(&tsk_rt(tsk)->list); 330 INIT_LIST_HEAD(&tsk_rt(tsk)->list);
331 331
332 /* avoid scheduler plugin changing underneath us */ 332 /* avoid scheduler plugin changing underneath us */
333 spin_lock_irqsave(&task_transition_lock, flags); 333 raw_spin_lock_irqsave(&task_transition_lock, flags);
334 334
335 /* allocate heap node for this task */ 335 /* allocate heap node for this task */
336 tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC); 336 tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC);
@@ -357,7 +357,7 @@ long litmus_admit_task(struct task_struct* tsk)
357 } 357 }
358 358
359out_unlock: 359out_unlock:
360 spin_unlock_irqrestore(&task_transition_lock, flags); 360 raw_spin_unlock_irqrestore(&task_transition_lock, flags);
361out: 361out:
362 return retval; 362 return retval;
363} 363}
@@ -403,7 +403,7 @@ int switch_sched_plugin(struct sched_plugin* plugin)
403 smp_call_function(synch_on_plugin_switch, NULL, 0); 403 smp_call_function(synch_on_plugin_switch, NULL, 0);
404 404
405 /* stop task transitions */ 405 /* stop task transitions */
406 spin_lock_irqsave(&task_transition_lock, flags); 406 raw_spin_lock_irqsave(&task_transition_lock, flags);
407 407
408 /* don't switch if there are active real-time tasks */ 408 /* don't switch if there are active real-time tasks */
409 if (atomic_read(&rt_task_count) == 0) { 409 if (atomic_read(&rt_task_count) == 0) {
@@ -421,7 +421,7 @@ int switch_sched_plugin(struct sched_plugin* plugin)
421 } else 421 } else
422 ret = -EBUSY; 422 ret = -EBUSY;
423out: 423out:
424 spin_unlock_irqrestore(&task_transition_lock, flags); 424 raw_spin_unlock_irqrestore(&task_transition_lock, flags);
425 atomic_set(&cannot_use_plugin, 0); 425 atomic_set(&cannot_use_plugin, 0);
426 return ret; 426 return ret;
427} 427}
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
index 609ff0f82abb..8d5db6050723 100644
--- a/litmus/rt_domain.c
+++ b/litmus/rt_domain.c
@@ -53,11 +53,11 @@ static enum hrtimer_restart on_release_timer(struct hrtimer *timer)
53 53
54 rh = container_of(timer, struct release_heap, timer); 54 rh = container_of(timer, struct release_heap, timer);
55 55
56 spin_lock_irqsave(&rh->dom->release_lock, flags); 56 raw_spin_lock_irqsave(&rh->dom->release_lock, flags);
57 TRACE("CB has the release_lock 0x%p\n", &rh->dom->release_lock); 57 TRACE("CB has the release_lock 0x%p\n", &rh->dom->release_lock);
58 /* remove from release queue */ 58 /* remove from release queue */
59 list_del(&rh->list); 59 list_del(&rh->list);
60 spin_unlock_irqrestore(&rh->dom->release_lock, flags); 60 raw_spin_unlock_irqrestore(&rh->dom->release_lock, flags);
61 TRACE("CB returned release_lock 0x%p\n", &rh->dom->release_lock); 61 TRACE("CB returned release_lock 0x%p\n", &rh->dom->release_lock);
62 62
63 /* call release callback */ 63 /* call release callback */
@@ -185,20 +185,20 @@ static void arm_release_timer(rt_domain_t *_rt)
185 list_del(pos); 185 list_del(pos);
186 186
187 /* put into release heap while holding release_lock */ 187 /* put into release heap while holding release_lock */
188 spin_lock(&rt->release_lock); 188 raw_spin_lock(&rt->release_lock);
189 TRACE_TASK(t, "I have the release_lock 0x%p\n", &rt->release_lock); 189 TRACE_TASK(t, "I have the release_lock 0x%p\n", &rt->release_lock);
190 190
191 rh = get_release_heap(rt, t, 0); 191 rh = get_release_heap(rt, t, 0);
192 if (!rh) { 192 if (!rh) {
193 /* need to use our own, but drop lock first */ 193 /* need to use our own, but drop lock first */
194 spin_unlock(&rt->release_lock); 194 raw_spin_unlock(&rt->release_lock);
195 TRACE_TASK(t, "Dropped release_lock 0x%p\n", 195 TRACE_TASK(t, "Dropped release_lock 0x%p\n",
196 &rt->release_lock); 196 &rt->release_lock);
197 197
198 reinit_release_heap(t); 198 reinit_release_heap(t);
199 TRACE_TASK(t, "release_heap ready\n"); 199 TRACE_TASK(t, "release_heap ready\n");
200 200
201 spin_lock(&rt->release_lock); 201 raw_spin_lock(&rt->release_lock);
202 TRACE_TASK(t, "Re-acquired release_lock 0x%p\n", 202 TRACE_TASK(t, "Re-acquired release_lock 0x%p\n",
203 &rt->release_lock); 203 &rt->release_lock);
204 204
@@ -207,7 +207,7 @@ static void arm_release_timer(rt_domain_t *_rt)
207 bheap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node); 207 bheap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node);
208 TRACE_TASK(t, "arm_release_timer(): added to release heap\n"); 208 TRACE_TASK(t, "arm_release_timer(): added to release heap\n");
209 209
210 spin_unlock(&rt->release_lock); 210 raw_spin_unlock(&rt->release_lock);
211 TRACE_TASK(t, "Returned the release_lock 0x%p\n", &rt->release_lock); 211 TRACE_TASK(t, "Returned the release_lock 0x%p\n", &rt->release_lock);
212 212
213 /* To avoid arming the timer multiple times, we only let the 213 /* To avoid arming the timer multiple times, we only let the
@@ -258,9 +258,9 @@ void rt_domain_init(rt_domain_t *rt,
258 for (i = 0; i < RELEASE_QUEUE_SLOTS; i++) 258 for (i = 0; i < RELEASE_QUEUE_SLOTS; i++)
259 INIT_LIST_HEAD(&rt->release_queue.slot[i]); 259 INIT_LIST_HEAD(&rt->release_queue.slot[i]);
260 260
261 spin_lock_init(&rt->ready_lock); 261 raw_spin_lock_init(&rt->ready_lock);
262 spin_lock_init(&rt->release_lock); 262 raw_spin_lock_init(&rt->release_lock);
263 spin_lock_init(&rt->tobe_lock); 263 raw_spin_lock_init(&rt->tobe_lock);
264 264
265 rt->check_resched = check; 265 rt->check_resched = check;
266 rt->release_jobs = release; 266 rt->release_jobs = release;
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index e57a11afda16..f5b77080cc4f 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -28,6 +28,7 @@
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <linux/percpu.h> 29#include <linux/percpu.h>
30#include <linux/sched.h> 30#include <linux/sched.h>
31#include <linux/slab.h>
31 32
32#include <litmus/litmus.h> 33#include <litmus/litmus.h>
33#include <litmus/jobs.h> 34#include <litmus/jobs.h>
@@ -285,12 +286,12 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
285 cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); 286 cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain);
286 unsigned long flags; 287 unsigned long flags;
287 288
288 spin_lock_irqsave(&cluster->lock, flags); 289 raw_spin_lock_irqsave(&cluster->lock, flags);
289 290
290 __merge_ready(&cluster->domain, tasks); 291 __merge_ready(&cluster->domain, tasks);
291 check_for_preemptions(cluster); 292 check_for_preemptions(cluster);
292 293
293 spin_unlock_irqrestore(&cluster->lock, flags); 294 raw_spin_unlock_irqrestore(&cluster->lock, flags);
294} 295}
295 296
296/* caller holds cedf_lock */ 297/* caller holds cedf_lock */
@@ -371,7 +372,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
371 int out_of_time, sleep, preempt, np, exists, blocks; 372 int out_of_time, sleep, preempt, np, exists, blocks;
372 struct task_struct* next = NULL; 373 struct task_struct* next = NULL;
373 374
374 spin_lock(&cluster->lock); 375 raw_spin_lock(&cluster->lock);
375 clear_will_schedule(); 376 clear_will_schedule();
376 377
377 /* sanity checking */ 378 /* sanity checking */
@@ -454,7 +455,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
454 if (exists) 455 if (exists)
455 next = prev; 456 next = prev;
456 457
457 spin_unlock(&cluster->lock); 458 raw_spin_unlock(&cluster->lock);
458 459
459#ifdef WANT_ALL_SCHED_EVENTS 460#ifdef WANT_ALL_SCHED_EVENTS
460 TRACE("cedf_lock released, next=0x%p\n", next); 461 TRACE("cedf_lock released, next=0x%p\n", next);
@@ -496,7 +497,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running)
496 /* the cluster doesn't change even if t is running */ 497 /* the cluster doesn't change even if t is running */
497 cluster = task_cpu_cluster(t); 498 cluster = task_cpu_cluster(t);
498 499
499 spin_lock_irqsave(&cluster->domain.ready_lock, flags); 500 raw_spin_lock_irqsave(&cluster->domain.ready_lock, flags);
500 501
501 /* setup job params */ 502 /* setup job params */
502 release_at(t, litmus_clock()); 503 release_at(t, litmus_clock());
@@ -513,7 +514,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running)
513 t->rt_param.linked_on = NO_CPU; 514 t->rt_param.linked_on = NO_CPU;
514 515
515 cedf_job_arrival(t); 516 cedf_job_arrival(t);
516 spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags); 517 raw_spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags);
517} 518}
518 519
519static void cedf_task_wake_up(struct task_struct *task) 520static void cedf_task_wake_up(struct task_struct *task)
@@ -526,7 +527,7 @@ static void cedf_task_wake_up(struct task_struct *task)
526 527
527 cluster = task_cpu_cluster(task); 528 cluster = task_cpu_cluster(task);
528 529
529 spin_lock_irqsave(&cluster->lock, flags); 530 raw_spin_lock_irqsave(&cluster->lock, flags);
530 /* We need to take suspensions because of semaphores into 531 /* We need to take suspensions because of semaphores into
531 * account! If a job resumes after being suspended due to acquiring 532 * account! If a job resumes after being suspended due to acquiring
532 * a semaphore, it should never be treated as a new job release. 533 * a semaphore, it should never be treated as a new job release.
@@ -549,7 +550,7 @@ static void cedf_task_wake_up(struct task_struct *task)
549 } 550 }
550 } 551 }
551 cedf_job_arrival(task); 552 cedf_job_arrival(task);
552 spin_unlock_irqrestore(&cluster->lock, flags); 553 raw_spin_unlock_irqrestore(&cluster->lock, flags);
553} 554}
554 555
555static void cedf_task_block(struct task_struct *t) 556static void cedf_task_block(struct task_struct *t)
@@ -562,9 +563,9 @@ static void cedf_task_block(struct task_struct *t)
562 cluster = task_cpu_cluster(t); 563 cluster = task_cpu_cluster(t);
563 564
564 /* unlink if necessary */ 565 /* unlink if necessary */
565 spin_lock_irqsave(&cluster->lock, flags); 566 raw_spin_lock_irqsave(&cluster->lock, flags);
566 unlink(t); 567 unlink(t);
567 spin_unlock_irqrestore(&cluster->lock, flags); 568 raw_spin_unlock_irqrestore(&cluster->lock, flags);
568 569
569 BUG_ON(!is_realtime(t)); 570 BUG_ON(!is_realtime(t));
570} 571}
@@ -576,13 +577,13 @@ static void cedf_task_exit(struct task_struct * t)
576 cedf_domain_t *cluster = task_cpu_cluster(t); 577 cedf_domain_t *cluster = task_cpu_cluster(t);
577 578
578 /* unlink if necessary */ 579 /* unlink if necessary */
579 spin_lock_irqsave(&cluster->lock, flags); 580 raw_spin_lock_irqsave(&cluster->lock, flags);
580 unlink(t); 581 unlink(t);
581 if (tsk_rt(t)->scheduled_on != NO_CPU) { 582 if (tsk_rt(t)->scheduled_on != NO_CPU) {
582 cluster->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; 583 cluster->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL;
583 tsk_rt(t)->scheduled_on = NO_CPU; 584 tsk_rt(t)->scheduled_on = NO_CPU;
584 } 585 }
585 spin_unlock_irqrestore(&cluster->lock, flags); 586 raw_spin_unlock_irqrestore(&cluster->lock, flags);
586 587
587 BUG_ON(!is_realtime(t)); 588 BUG_ON(!is_realtime(t));
588 TRACE_TASK(t, "RIP\n"); 589 TRACE_TASK(t, "RIP\n");
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 6137c74729cb..c0c63eba70ce 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -297,12 +297,12 @@ static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
297{ 297{
298 unsigned long flags; 298 unsigned long flags;
299 299
300 spin_lock_irqsave(&gsnedf_lock, flags); 300 raw_spin_lock_irqsave(&gsnedf_lock, flags);
301 301
302 __merge_ready(rt, tasks); 302 __merge_ready(rt, tasks);
303 check_for_preemptions(); 303 check_for_preemptions();
304 304
305 spin_unlock_irqrestore(&gsnedf_lock, flags); 305 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
306} 306}
307 307
308/* caller holds gsnedf_lock */ 308/* caller holds gsnedf_lock */
@@ -388,7 +388,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
388 if (gsnedf.release_master == entry->cpu) 388 if (gsnedf.release_master == entry->cpu)
389 return NULL; 389 return NULL;
390 390
391 spin_lock(&gsnedf_lock); 391 raw_spin_lock(&gsnedf_lock);
392 clear_will_schedule(); 392 clear_will_schedule();
393 393
394 /* sanity checking */ 394 /* sanity checking */
@@ -471,7 +471,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
471 if (exists) 471 if (exists)
472 next = prev; 472 next = prev;
473 473
474 spin_unlock(&gsnedf_lock); 474 raw_spin_unlock(&gsnedf_lock);
475 475
476#ifdef WANT_ALL_SCHED_EVENTS 476#ifdef WANT_ALL_SCHED_EVENTS
477 TRACE("gsnedf_lock released, next=0x%p\n", next); 477 TRACE("gsnedf_lock released, next=0x%p\n", next);
@@ -509,7 +509,7 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running)
509 509
510 TRACE("gsn edf: task new %d\n", t->pid); 510 TRACE("gsn edf: task new %d\n", t->pid);
511 511
512 spin_lock_irqsave(&gsnedf_lock, flags); 512 raw_spin_lock_irqsave(&gsnedf_lock, flags);
513 513
514 /* setup job params */ 514 /* setup job params */
515 release_at(t, litmus_clock()); 515 release_at(t, litmus_clock());
@@ -532,7 +532,7 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running)
532 t->rt_param.linked_on = NO_CPU; 532 t->rt_param.linked_on = NO_CPU;
533 533
534 gsnedf_job_arrival(t); 534 gsnedf_job_arrival(t);
535 spin_unlock_irqrestore(&gsnedf_lock, flags); 535 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
536} 536}
537 537
538static void gsnedf_task_wake_up(struct task_struct *task) 538static void gsnedf_task_wake_up(struct task_struct *task)
@@ -542,7 +542,7 @@ static void gsnedf_task_wake_up(struct task_struct *task)
542 542
543 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 543 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
544 544
545 spin_lock_irqsave(&gsnedf_lock, flags); 545 raw_spin_lock_irqsave(&gsnedf_lock, flags);
546 /* We need to take suspensions because of semaphores into 546 /* We need to take suspensions because of semaphores into
547 * account! If a job resumes after being suspended due to acquiring 547 * account! If a job resumes after being suspended due to acquiring
548 * a semaphore, it should never be treated as a new job release. 548 * a semaphore, it should never be treated as a new job release.
@@ -565,7 +565,7 @@ static void gsnedf_task_wake_up(struct task_struct *task)
565 } 565 }
566 } 566 }
567 gsnedf_job_arrival(task); 567 gsnedf_job_arrival(task);
568 spin_unlock_irqrestore(&gsnedf_lock, flags); 568 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
569} 569}
570 570
571static void gsnedf_task_block(struct task_struct *t) 571static void gsnedf_task_block(struct task_struct *t)
@@ -575,9 +575,9 @@ static void gsnedf_task_block(struct task_struct *t)
575 TRACE_TASK(t, "block at %llu\n", litmus_clock()); 575 TRACE_TASK(t, "block at %llu\n", litmus_clock());
576 576
577 /* unlink if necessary */ 577 /* unlink if necessary */
578 spin_lock_irqsave(&gsnedf_lock, flags); 578 raw_spin_lock_irqsave(&gsnedf_lock, flags);
579 unlink(t); 579 unlink(t);
580 spin_unlock_irqrestore(&gsnedf_lock, flags); 580 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
581 581
582 BUG_ON(!is_realtime(t)); 582 BUG_ON(!is_realtime(t));
583} 583}
@@ -588,13 +588,13 @@ static void gsnedf_task_exit(struct task_struct * t)
588 unsigned long flags; 588 unsigned long flags;
589 589
590 /* unlink if necessary */ 590 /* unlink if necessary */
591 spin_lock_irqsave(&gsnedf_lock, flags); 591 raw_spin_lock_irqsave(&gsnedf_lock, flags);
592 unlink(t); 592 unlink(t);
593 if (tsk_rt(t)->scheduled_on != NO_CPU) { 593 if (tsk_rt(t)->scheduled_on != NO_CPU) {
594 gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; 594 gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL;
595 tsk_rt(t)->scheduled_on = NO_CPU; 595 tsk_rt(t)->scheduled_on = NO_CPU;
596 } 596 }
597 spin_unlock_irqrestore(&gsnedf_lock, flags); 597 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
598 598
599 BUG_ON(!is_realtime(t)); 599 BUG_ON(!is_realtime(t));
600 TRACE_TASK(t, "RIP\n"); 600 TRACE_TASK(t, "RIP\n");
@@ -630,7 +630,7 @@ static void update_queue_position(struct task_struct *holder)
630 gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); 630 gsnedf_cpus[tsk_rt(holder)->linked_on]->hn);
631 } else { 631 } else {
632 /* holder may be queued: first stop queue changes */ 632 /* holder may be queued: first stop queue changes */
633 spin_lock(&gsnedf.release_lock); 633 raw_spin_lock(&gsnedf.release_lock);
634 if (is_queued(holder)) { 634 if (is_queued(holder)) {
635 TRACE_TASK(holder, "%s: is queued\n", 635 TRACE_TASK(holder, "%s: is queued\n",
636 __FUNCTION__); 636 __FUNCTION__);
@@ -648,7 +648,7 @@ static void update_queue_position(struct task_struct *holder)
648 TRACE_TASK(holder, "%s: is NOT queued => Done.\n", 648 TRACE_TASK(holder, "%s: is NOT queued => Done.\n",
649 __FUNCTION__); 649 __FUNCTION__);
650 } 650 }
651 spin_unlock(&gsnedf.release_lock); 651 raw_spin_unlock(&gsnedf.release_lock);
652 652
653 /* If holder was enqueued in a release heap, then the following 653 /* If holder was enqueued in a release heap, then the following
654 * preemption check is pointless, but we can't easily detect 654 * preemption check is pointless, but we can't easily detect
@@ -682,7 +682,7 @@ static long gsnedf_pi_block(struct pi_semaphore *sem,
682 if (edf_higher_prio(new_waiter, sem->hp.task)) { 682 if (edf_higher_prio(new_waiter, sem->hp.task)) {
683 TRACE_TASK(new_waiter, " boosts priority via %p\n", sem); 683 TRACE_TASK(new_waiter, " boosts priority via %p\n", sem);
684 /* called with IRQs disabled */ 684 /* called with IRQs disabled */
685 spin_lock(&gsnedf_lock); 685 raw_spin_lock(&gsnedf_lock);
686 /* store new highest-priority task */ 686 /* store new highest-priority task */
687 sem->hp.task = new_waiter; 687 sem->hp.task = new_waiter;
688 if (sem->holder) { 688 if (sem->holder) {
@@ -694,7 +694,7 @@ static long gsnedf_pi_block(struct pi_semaphore *sem,
694 sem->holder->rt_param.inh_task = new_waiter; 694 sem->holder->rt_param.inh_task = new_waiter;
695 update_queue_position(sem->holder); 695 update_queue_position(sem->holder);
696 } 696 }
697 spin_unlock(&gsnedf_lock); 697 raw_spin_unlock(&gsnedf_lock);
698 } 698 }
699 699
700 return 0; 700 return 0;
@@ -740,7 +740,7 @@ static long gsnedf_return_priority(struct pi_semaphore *sem)
740 740
741 if (t->rt_param.inh_task) { 741 if (t->rt_param.inh_task) {
742 /* interrupts already disabled by PI code */ 742 /* interrupts already disabled by PI code */
743 spin_lock(&gsnedf_lock); 743 raw_spin_lock(&gsnedf_lock);
744 744
745 /* Reset inh_task to NULL. */ 745 /* Reset inh_task to NULL. */
746 t->rt_param.inh_task = NULL; 746 t->rt_param.inh_task = NULL;
@@ -748,7 +748,7 @@ static long gsnedf_return_priority(struct pi_semaphore *sem)
748 /* Check if rescheduling is necessary */ 748 /* Check if rescheduling is necessary */
749 unlink(t); 749 unlink(t);
750 gsnedf_job_arrival(t); 750 gsnedf_job_arrival(t);
751 spin_unlock(&gsnedf_lock); 751 raw_spin_unlock(&gsnedf_lock);
752 } 752 }
753 753
754 return ret; 754 return ret;
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c
index 889d300760f9..81ea464a81bc 100644
--- a/litmus/sched_litmus.c
+++ b/litmus/sched_litmus.c
@@ -62,7 +62,7 @@ litmus_schedule(struct rq *rq, struct task_struct *prev)
62 */ 62 */
63 was_running = is_running(prev); 63 was_running = is_running(prev);
64 mb(); 64 mb();
65 spin_unlock(&rq->lock); 65 raw_spin_unlock(&rq->lock);
66 66
67 /* Don't race with a concurrent switch. This could deadlock in 67 /* Don't race with a concurrent switch. This could deadlock in
68 * the case of cross or circular migrations. It's the job of 68 * the case of cross or circular migrations. It's the job of
@@ -93,7 +93,7 @@ litmus_schedule(struct rq *rq, struct task_struct *prev)
93 next = NULL; 93 next = NULL;
94 94
95 /* bail out */ 95 /* bail out */
96 spin_lock(&rq->lock); 96 raw_spin_lock(&rq->lock);
97 return next; 97 return next;
98 } 98 }
99 } 99 }
@@ -141,7 +141,7 @@ litmus_schedule(struct rq *rq, struct task_struct *prev)
141 next = NULL; 141 next = NULL;
142 } 142 }
143 /* release the other CPU's runqueue, but keep ours */ 143 /* release the other CPU's runqueue, but keep ours */
144 spin_unlock(&other_rq->lock); 144 raw_spin_unlock(&other_rq->lock);
145 } 145 }
146 if (next) { 146 if (next) {
147 next->rt_param.stack_in_use = rq->cpu; 147 next->rt_param.stack_in_use = rq->cpu;
@@ -152,7 +152,7 @@ litmus_schedule(struct rq *rq, struct task_struct *prev)
152} 152}
153 153
154static void enqueue_task_litmus(struct rq *rq, struct task_struct *p, 154static void enqueue_task_litmus(struct rq *rq, struct task_struct *p,
155 int wakeup) 155 int wakeup, bool head)
156{ 156{
157 if (wakeup) { 157 if (wakeup) {
158 sched_trace_task_resume(p); 158 sched_trace_task_resume(p);
@@ -245,7 +245,7 @@ static void prio_changed_litmus(struct rq *rq, struct task_struct *p,
245{ 245{
246} 246}
247 247
248unsigned int get_rr_interval_litmus(struct task_struct *p) 248unsigned int get_rr_interval_litmus(struct rq *rq, struct task_struct *p)
249{ 249{
250 /* return infinity */ 250 /* return infinity */
251 return 0; 251 return 0;
@@ -263,31 +263,16 @@ static void set_curr_task_litmus(struct rq *rq)
263 263
264 264
265#ifdef CONFIG_SMP 265#ifdef CONFIG_SMP
266/* execve tries to rebalance task in this scheduling domain */ 266/* execve tries to rebalance task in this scheduling domain.
267 * We don't care about the scheduling domain; can gets called from
268 * exec, fork, wakeup.
269 */
267static int select_task_rq_litmus(struct task_struct *p, int sd_flag, int flags) 270static int select_task_rq_litmus(struct task_struct *p, int sd_flag, int flags)
268{ 271{
269 /* preemption is already disabled. 272 /* preemption is already disabled.
270 * We don't want to change cpu here 273 * We don't want to change cpu here
271 */ 274 */
272 return smp_processor_id(); 275 return task_cpu(p);
273}
274
275/* we don't repartition at runtime */
276
277static unsigned long
278load_balance_litmus(struct rq *this_rq, int this_cpu, struct rq *busiest,
279 unsigned long max_load_move,
280 struct sched_domain *sd, enum cpu_idle_type idle,
281 int *all_pinned, int *this_best_prio)
282{
283 return 0;
284}
285
286static int
287move_one_task_litmus(struct rq *this_rq, int this_cpu, struct rq *busiest,
288 struct sched_domain *sd, enum cpu_idle_type idle)
289{
290 return 0;
291} 276}
292#endif 277#endif
293 278
@@ -305,8 +290,6 @@ const struct sched_class litmus_sched_class = {
305#ifdef CONFIG_SMP 290#ifdef CONFIG_SMP
306 .select_task_rq = select_task_rq_litmus, 291 .select_task_rq = select_task_rq_litmus,
307 292
308 .load_balance = load_balance_litmus,
309 .move_one_task = move_one_task_litmus,
310 .pre_schedule = pre_schedule_litmus, 293 .pre_schedule = pre_schedule_litmus,
311#endif 294#endif
312 295
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c
index 2ea39223e7f0..ea77d3295290 100644
--- a/litmus/sched_pfair.c
+++ b/litmus/sched_pfair.c
@@ -12,6 +12,7 @@
12#include <linux/percpu.h> 12#include <linux/percpu.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/slab.h>
15 16
16#include <litmus/litmus.h> 17#include <litmus/litmus.h>
17#include <litmus/jobs.h> 18#include <litmus/jobs.h>
@@ -415,7 +416,7 @@ static void schedule_next_quantum(quanta_t time)
415 /* called with interrupts disabled */ 416 /* called with interrupts disabled */
416 PTRACE("--- Q %lu at %llu PRE-SPIN\n", 417 PTRACE("--- Q %lu at %llu PRE-SPIN\n",
417 time, litmus_clock()); 418 time, litmus_clock());
418 spin_lock(&pfair_lock); 419 raw_spin_lock(&pfair_lock);
419 PTRACE("<<< Q %lu at %llu\n", 420 PTRACE("<<< Q %lu at %llu\n",
420 time, litmus_clock()); 421 time, litmus_clock());
421 422
@@ -448,7 +449,7 @@ static void schedule_next_quantum(quanta_t time)
448 } 449 }
449 PTRACE(">>> Q %lu at %llu\n", 450 PTRACE(">>> Q %lu at %llu\n",
450 time, litmus_clock()); 451 time, litmus_clock());
451 spin_unlock(&pfair_lock); 452 raw_spin_unlock(&pfair_lock);
452} 453}
453 454
454static noinline void wait_for_quantum(quanta_t q, struct pfair_state* state) 455static noinline void wait_for_quantum(quanta_t q, struct pfair_state* state)
@@ -564,7 +565,7 @@ static struct task_struct* pfair_schedule(struct task_struct * prev)
564 int blocks; 565 int blocks;
565 struct task_struct* next = NULL; 566 struct task_struct* next = NULL;
566 567
567 spin_lock(&pfair_lock); 568 raw_spin_lock(&pfair_lock);
568 569
569 blocks = is_realtime(prev) && !is_running(prev); 570 blocks = is_realtime(prev) && !is_running(prev);
570 571
@@ -577,7 +578,7 @@ static struct task_struct* pfair_schedule(struct task_struct * prev)
577 tsk_rt(next)->scheduled_on = state->cpu; 578 tsk_rt(next)->scheduled_on = state->cpu;
578 } 579 }
579 580
580 spin_unlock(&pfair_lock); 581 raw_spin_unlock(&pfair_lock);
581 582
582 if (next) 583 if (next)
583 TRACE_TASK(next, "scheduled rel=%lu at %lu (%llu)\n", 584 TRACE_TASK(next, "scheduled rel=%lu at %lu (%llu)\n",
@@ -594,7 +595,7 @@ static void pfair_task_new(struct task_struct * t, int on_rq, int running)
594 595
595 TRACE("pfair: task new %d state:%d\n", t->pid, t->state); 596 TRACE("pfair: task new %d state:%d\n", t->pid, t->state);
596 597
597 spin_lock_irqsave(&pfair_lock, flags); 598 raw_spin_lock_irqsave(&pfair_lock, flags);
598 if (running) 599 if (running)
599 t->rt_param.scheduled_on = task_cpu(t); 600 t->rt_param.scheduled_on = task_cpu(t);
600 else 601 else
@@ -605,7 +606,7 @@ static void pfair_task_new(struct task_struct * t, int on_rq, int running)
605 pfair_add_release(t); 606 pfair_add_release(t);
606 check_preempt(t); 607 check_preempt(t);
607 608
608 spin_unlock_irqrestore(&pfair_lock, flags); 609 raw_spin_unlock_irqrestore(&pfair_lock, flags);
609} 610}
610 611
611static void pfair_task_wake_up(struct task_struct *t) 612static void pfair_task_wake_up(struct task_struct *t)
@@ -616,7 +617,7 @@ static void pfair_task_wake_up(struct task_struct *t)
616 TRACE_TASK(t, "wakes at %llu, release=%lu, pfair_time:%lu\n", 617 TRACE_TASK(t, "wakes at %llu, release=%lu, pfair_time:%lu\n",
617 litmus_clock(), cur_release(t), pfair_time); 618 litmus_clock(), cur_release(t), pfair_time);
618 619
619 spin_lock_irqsave(&pfair_lock, flags); 620 raw_spin_lock_irqsave(&pfair_lock, flags);
620 621
621 /* It is a little unclear how to deal with Pfair 622 /* It is a little unclear how to deal with Pfair
622 * tasks that block for a while and then wake. For now, 623 * tasks that block for a while and then wake. For now,
@@ -637,7 +638,7 @@ static void pfair_task_wake_up(struct task_struct *t)
637 638
638 check_preempt(t); 639 check_preempt(t);
639 640
640 spin_unlock_irqrestore(&pfair_lock, flags); 641 raw_spin_unlock_irqrestore(&pfair_lock, flags);
641 TRACE_TASK(t, "wake up done at %llu\n", litmus_clock()); 642 TRACE_TASK(t, "wake up done at %llu\n", litmus_clock());
642} 643}
643 644
@@ -661,12 +662,12 @@ static void pfair_task_exit(struct task_struct * t)
661 * might not be the same as the CPU that the PFAIR scheduler 662 * might not be the same as the CPU that the PFAIR scheduler
662 * has chosen for it. 663 * has chosen for it.
663 */ 664 */
664 spin_lock_irqsave(&pfair_lock, flags); 665 raw_spin_lock_irqsave(&pfair_lock, flags);
665 666
666 TRACE_TASK(t, "RIP, state:%d\n", t->state); 667 TRACE_TASK(t, "RIP, state:%d\n", t->state);
667 drop_all_references(t); 668 drop_all_references(t);
668 669
669 spin_unlock_irqrestore(&pfair_lock, flags); 670 raw_spin_unlock_irqrestore(&pfair_lock, flags);
670 671
671 kfree(t->rt_param.pfair); 672 kfree(t->rt_param.pfair);
672 t->rt_param.pfair = NULL; 673 t->rt_param.pfair = NULL;
@@ -680,7 +681,7 @@ static void pfair_release_at(struct task_struct* task, lt_t start)
680 681
681 BUG_ON(!is_realtime(task)); 682 BUG_ON(!is_realtime(task));
682 683
683 spin_lock_irqsave(&pfair_lock, flags); 684 raw_spin_lock_irqsave(&pfair_lock, flags);
684 release_at(task, start); 685 release_at(task, start);
685 release = time2quanta(start, CEIL); 686 release = time2quanta(start, CEIL);
686 687
@@ -698,7 +699,7 @@ static void pfair_release_at(struct task_struct* task, lt_t start)
698 */ 699 */
699 tsk_pfair(task)->sporadic_release = 0; 700 tsk_pfair(task)->sporadic_release = 0;
700 701
701 spin_unlock_irqrestore(&pfair_lock, flags); 702 raw_spin_unlock_irqrestore(&pfair_lock, flags);
702} 703}
703 704
704static void init_subtask(struct subtask* sub, unsigned long i, 705static void init_subtask(struct subtask* sub, unsigned long i,
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c
index 3767b30e610a..3543b7baff53 100644
--- a/litmus/sched_plugin.c
+++ b/litmus/sched_plugin.c
@@ -187,7 +187,7 @@ struct sched_plugin *litmus = &linux_sched_plugin;
187 187
188/* the list of registered scheduling plugins */ 188/* the list of registered scheduling plugins */
189static LIST_HEAD(sched_plugins); 189static LIST_HEAD(sched_plugins);
190static DEFINE_SPINLOCK(sched_plugins_lock); 190static DEFINE_RAW_SPINLOCK(sched_plugins_lock);
191 191
192#define CHECK(func) {\ 192#define CHECK(func) {\
193 if (!plugin->func) \ 193 if (!plugin->func) \
@@ -220,9 +220,9 @@ int register_sched_plugin(struct sched_plugin* plugin)
220 if (!plugin->release_at) 220 if (!plugin->release_at)
221 plugin->release_at = release_at; 221 plugin->release_at = release_at;
222 222
223 spin_lock(&sched_plugins_lock); 223 raw_spin_lock(&sched_plugins_lock);
224 list_add(&plugin->list, &sched_plugins); 224 list_add(&plugin->list, &sched_plugins);
225 spin_unlock(&sched_plugins_lock); 225 raw_spin_unlock(&sched_plugins_lock);
226 226
227 return 0; 227 return 0;
228} 228}
@@ -234,7 +234,7 @@ struct sched_plugin* find_sched_plugin(const char* name)
234 struct list_head *pos; 234 struct list_head *pos;
235 struct sched_plugin *plugin; 235 struct sched_plugin *plugin;
236 236
237 spin_lock(&sched_plugins_lock); 237 raw_spin_lock(&sched_plugins_lock);
238 list_for_each(pos, &sched_plugins) { 238 list_for_each(pos, &sched_plugins) {
239 plugin = list_entry(pos, struct sched_plugin, list); 239 plugin = list_entry(pos, struct sched_plugin, list);
240 if (!strcmp(plugin->plugin_name, name)) 240 if (!strcmp(plugin->plugin_name, name))
@@ -243,7 +243,7 @@ struct sched_plugin* find_sched_plugin(const char* name)
243 plugin = NULL; 243 plugin = NULL;
244 244
245out_unlock: 245out_unlock:
246 spin_unlock(&sched_plugins_lock); 246 raw_spin_unlock(&sched_plugins_lock);
247 return plugin; 247 return plugin;
248} 248}
249 249
@@ -253,13 +253,13 @@ int print_sched_plugins(char* buf, int max)
253 struct list_head *pos; 253 struct list_head *pos;
254 struct sched_plugin *plugin; 254 struct sched_plugin *plugin;
255 255
256 spin_lock(&sched_plugins_lock); 256 raw_spin_lock(&sched_plugins_lock);
257 list_for_each(pos, &sched_plugins) { 257 list_for_each(pos, &sched_plugins) {
258 plugin = list_entry(pos, struct sched_plugin, list); 258 plugin = list_entry(pos, struct sched_plugin, list);
259 count += snprintf(buf + count, max - count, "%s\n", plugin->plugin_name); 259 count += snprintf(buf + count, max - count, "%s\n", plugin->plugin_name);
260 if (max - count <= 0) 260 if (max - count <= 0)
261 break; 261 break;
262 } 262 }
263 spin_unlock(&sched_plugins_lock); 263 raw_spin_unlock(&sched_plugins_lock);
264 return count; 264 return count;
265} 265}
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index af0b30cb8b89..e50b27391d21 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -131,7 +131,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
131 int out_of_time, sleep, preempt, 131 int out_of_time, sleep, preempt,
132 np, exists, blocks, resched; 132 np, exists, blocks, resched;
133 133
134 spin_lock(&pedf->slock); 134 raw_spin_lock(&pedf->slock);
135 135
136 /* sanity checking 136 /* sanity checking
137 * differently from gedf, when a task exits (dead) 137 * differently from gedf, when a task exits (dead)
@@ -203,7 +203,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
203 } 203 }
204 204
205 pedf->scheduled = next; 205 pedf->scheduled = next;
206 spin_unlock(&pedf->slock); 206 raw_spin_unlock(&pedf->slock);
207 207
208 return next; 208 return next;
209} 209}
@@ -226,7 +226,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running)
226 /* The task should be running in the queue, otherwise signal 226 /* The task should be running in the queue, otherwise signal
227 * code will try to wake it up with fatal consequences. 227 * code will try to wake it up with fatal consequences.
228 */ 228 */
229 spin_lock_irqsave(&pedf->slock, flags); 229 raw_spin_lock_irqsave(&pedf->slock, flags);
230 if (running) { 230 if (running) {
231 /* there shouldn't be anything else running at the time */ 231 /* there shouldn't be anything else running at the time */
232 BUG_ON(pedf->scheduled); 232 BUG_ON(pedf->scheduled);
@@ -236,7 +236,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running)
236 /* maybe we have to reschedule */ 236 /* maybe we have to reschedule */
237 preempt(pedf); 237 preempt(pedf);
238 } 238 }
239 spin_unlock_irqrestore(&pedf->slock, flags); 239 raw_spin_unlock_irqrestore(&pedf->slock, flags);
240} 240}
241 241
242static void psnedf_task_wake_up(struct task_struct *task) 242static void psnedf_task_wake_up(struct task_struct *task)
@@ -247,7 +247,7 @@ static void psnedf_task_wake_up(struct task_struct *task)
247 lt_t now; 247 lt_t now;
248 248
249 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 249 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
250 spin_lock_irqsave(&pedf->slock, flags); 250 raw_spin_lock_irqsave(&pedf->slock, flags);
251 BUG_ON(is_queued(task)); 251 BUG_ON(is_queued(task));
252 /* We need to take suspensions because of semaphores into 252 /* We need to take suspensions because of semaphores into
253 * account! If a job resumes after being suspended due to acquiring 253 * account! If a job resumes after being suspended due to acquiring
@@ -272,7 +272,7 @@ static void psnedf_task_wake_up(struct task_struct *task)
272 if (pedf->scheduled != task) 272 if (pedf->scheduled != task)
273 requeue(task, edf); 273 requeue(task, edf);
274 274
275 spin_unlock_irqrestore(&pedf->slock, flags); 275 raw_spin_unlock_irqrestore(&pedf->slock, flags);
276 TRACE_TASK(task, "wake up done\n"); 276 TRACE_TASK(task, "wake up done\n");
277} 277}
278 278
@@ -291,7 +291,7 @@ static void psnedf_task_exit(struct task_struct * t)
291 psnedf_domain_t* pedf = task_pedf(t); 291 psnedf_domain_t* pedf = task_pedf(t);
292 rt_domain_t* edf; 292 rt_domain_t* edf;
293 293
294 spin_lock_irqsave(&pedf->slock, flags); 294 raw_spin_lock_irqsave(&pedf->slock, flags);
295 if (is_queued(t)) { 295 if (is_queued(t)) {
296 /* dequeue */ 296 /* dequeue */
297 edf = task_edf(t); 297 edf = task_edf(t);
@@ -303,7 +303,7 @@ static void psnedf_task_exit(struct task_struct * t)
303 TRACE_TASK(t, "RIP, now reschedule\n"); 303 TRACE_TASK(t, "RIP, now reschedule\n");
304 304
305 preempt(pedf); 305 preempt(pedf);
306 spin_unlock_irqrestore(&pedf->slock, flags); 306 raw_spin_unlock_irqrestore(&pedf->slock, flags);
307} 307}
308 308
309#ifdef CONFIG_FMLP 309#ifdef CONFIG_FMLP
@@ -323,7 +323,7 @@ static long psnedf_pi_block(struct pi_semaphore *sem,
323 edf = task_edf(new_waiter); 323 edf = task_edf(new_waiter);
324 324
325 /* interrupts already disabled */ 325 /* interrupts already disabled */
326 spin_lock(&pedf->slock); 326 raw_spin_lock(&pedf->slock);
327 327
328 /* store new highest-priority task */ 328 /* store new highest-priority task */
329 sem->hp.cpu_task[cpu] = new_waiter; 329 sem->hp.cpu_task[cpu] = new_waiter;
@@ -348,7 +348,7 @@ static long psnedf_pi_block(struct pi_semaphore *sem,
348 if (edf_preemption_needed(edf, current)) 348 if (edf_preemption_needed(edf, current))
349 preempt(pedf); 349 preempt(pedf);
350 350
351 spin_unlock(&pedf->slock); 351 raw_spin_unlock(&pedf->slock);
352 } 352 }
353 353
354 return 0; 354 return 0;
@@ -415,7 +415,7 @@ static long psnedf_return_priority(struct pi_semaphore *sem)
415 /* Always check for delayed preemptions that might have become 415 /* Always check for delayed preemptions that might have become
416 * necessary due to non-preemptive execution. 416 * necessary due to non-preemptive execution.
417 */ 417 */
418 spin_lock(&pedf->slock); 418 raw_spin_lock(&pedf->slock);
419 419
420 /* Reset inh_task to NULL. */ 420 /* Reset inh_task to NULL. */
421 current->rt_param.inh_task = NULL; 421 current->rt_param.inh_task = NULL;
@@ -424,7 +424,7 @@ static long psnedf_return_priority(struct pi_semaphore *sem)
424 if (edf_preemption_needed(edf, current)) 424 if (edf_preemption_needed(edf, current))
425 preempt(pedf); 425 preempt(pedf);
426 426
427 spin_unlock(&pedf->slock); 427 raw_spin_unlock(&pedf->slock);
428 428
429 429
430 return ret; 430 return ret;
diff --git a/litmus/sched_trace.c b/litmus/sched_trace.c
index ad0b138d4b01..1fa2094b0495 100644
--- a/litmus/sched_trace.c
+++ b/litmus/sched_trace.c
@@ -5,6 +5,7 @@
5#include <linux/semaphore.h> 5#include <linux/semaphore.h>
6 6
7#include <linux/fs.h> 7#include <linux/fs.h>
8#include <linux/slab.h>
8#include <linux/miscdevice.h> 9#include <linux/miscdevice.h>
9#include <asm/uaccess.h> 10#include <asm/uaccess.h>
10#include <linux/module.h> 11#include <linux/module.h>
@@ -32,7 +33,7 @@ typedef struct {
32 rwlock_t del_lock; 33 rwlock_t del_lock;
33 34
34 /* the buffer */ 35 /* the buffer */
35 struct kfifo *kfifo; 36 struct kfifo kfifo;
36} ring_buffer_t; 37} ring_buffer_t;
37 38
38/* Main buffer structure */ 39/* Main buffer structure */
@@ -49,25 +50,26 @@ typedef struct {
49void rb_init(ring_buffer_t* buf) 50void rb_init(ring_buffer_t* buf)
50{ 51{
51 rwlock_init(&buf->del_lock); 52 rwlock_init(&buf->del_lock);
52 buf->kfifo = NULL;
53} 53}
54 54
55int rb_alloc_buf(ring_buffer_t* buf, unsigned int size) 55int rb_alloc_buf(ring_buffer_t* buf, unsigned int size)
56{ 56{
57 unsigned long flags; 57 unsigned long flags;
58 int ret = 0;
58 59
59 write_lock_irqsave(&buf->del_lock, flags); 60 write_lock_irqsave(&buf->del_lock, flags);
60 61
61 buf->kfifo = kfifo_alloc(size, GFP_ATOMIC, NULL); 62 /* kfifo size must be a power of 2
63 * atm kfifo alloc is automatically rounding the size
64 */
65 ret = kfifo_alloc(&buf->kfifo, size, GFP_ATOMIC);
62 66
63 write_unlock_irqrestore(&buf->del_lock, flags); 67 write_unlock_irqrestore(&buf->del_lock, flags);
64 68
65 if(IS_ERR(buf->kfifo)) { 69 if(ret < 0)
66 printk(KERN_ERR "kfifo_alloc failed\n"); 70 printk(KERN_ERR "kfifo_alloc failed\n");
67 return PTR_ERR(buf->kfifo);
68 }
69 71
70 return 0; 72 return ret;
71} 73}
72 74
73int rb_free_buf(ring_buffer_t* buf) 75int rb_free_buf(ring_buffer_t* buf)
@@ -76,10 +78,8 @@ int rb_free_buf(ring_buffer_t* buf)
76 78
77 write_lock_irqsave(&buf->del_lock, flags); 79 write_lock_irqsave(&buf->del_lock, flags);
78 80
79 BUG_ON(!buf->kfifo); 81 BUG_ON(!kfifo_initialized(&buf->kfifo));
80 kfifo_free(buf->kfifo); 82 kfifo_free(&buf->kfifo);
81
82 buf->kfifo = NULL;
83 83
84 write_unlock_irqrestore(&buf->del_lock, flags); 84 write_unlock_irqrestore(&buf->del_lock, flags);
85 85
@@ -98,12 +98,12 @@ int rb_put(ring_buffer_t* buf, char* mem, size_t len)
98 98
99 read_lock_irqsave(&buf->del_lock, flags); 99 read_lock_irqsave(&buf->del_lock, flags);
100 100
101 if (!buf->kfifo) { 101 if (!kfifo_initialized(&buf->kfifo)) {
102 error = -ENODEV; 102 error = -ENODEV;
103 goto out; 103 goto out;
104 } 104 }
105 105
106 if((__kfifo_put(buf->kfifo, mem, len)) < len) { 106 if((kfifo_in(&buf->kfifo, mem, len)) < len) {
107 error = -ENOMEM; 107 error = -ENOMEM;
108 goto out; 108 goto out;
109 } 109 }
@@ -120,12 +120,12 @@ int rb_get(ring_buffer_t* buf, char* mem, size_t len)
120 int error = 0; 120 int error = 0;
121 121
122 read_lock_irqsave(&buf->del_lock, flags); 122 read_lock_irqsave(&buf->del_lock, flags);
123 if (!buf->kfifo) { 123 if (!kfifo_initialized(&buf->kfifo)) {
124 error = -ENODEV; 124 error = -ENODEV;
125 goto out; 125 goto out;
126 } 126 }
127 127
128 error = __kfifo_get(buf->kfifo, (unsigned char*)mem, len); 128 error = kfifo_out(&buf->kfifo, (unsigned char*)mem, len);
129 129
130 out: 130 out:
131 read_unlock_irqrestore(&buf->del_lock, flags); 131 read_unlock_irqrestore(&buf->del_lock, flags);
@@ -135,7 +135,7 @@ int rb_get(ring_buffer_t* buf, char* mem, size_t len)
135/* 135/*
136 * Device Driver management 136 * Device Driver management
137 */ 137 */
138static spinlock_t log_buffer_lock = SPIN_LOCK_UNLOCKED; 138static DEFINE_RAW_SPINLOCK(log_buffer_lock);
139static trace_buffer_t log_buffer; 139static trace_buffer_t log_buffer;
140 140
141static void init_log_buffer(void) 141static void init_log_buffer(void)
@@ -170,12 +170,12 @@ void sched_trace_log_message(const char* fmt, ...)
170 buf = __get_cpu_var(fmt_buffer); 170 buf = __get_cpu_var(fmt_buffer);
171 len = vscnprintf(buf, MSG_SIZE, fmt, args); 171 len = vscnprintf(buf, MSG_SIZE, fmt, args);
172 172
173 spin_lock(&log_buffer_lock); 173 raw_spin_lock(&log_buffer_lock);
174 /* Don't copy the trailing null byte, we don't want null bytes 174 /* Don't copy the trailing null byte, we don't want null bytes
175 * in a text file. 175 * in a text file.
176 */ 176 */
177 rb_put(&log_buffer.buf, buf, len); 177 rb_put(&log_buffer.buf, buf, len);
178 spin_unlock(&log_buffer_lock); 178 raw_spin_unlock(&log_buffer_lock);
179 179
180 local_irq_restore(flags); 180 local_irq_restore(flags);
181 va_end(args); 181 va_end(args);
@@ -265,8 +265,8 @@ static int log_open(struct inode *in, struct file *filp)
265 filp->private_data = tbuf; 265 filp->private_data = tbuf;
266 266
267 printk(KERN_DEBUG 267 printk(KERN_DEBUG
268 "sched_trace kfifo at 0x%p with buffer starting at: 0x%p\n", 268 "sched_trace kfifo with buffer starting at: 0x%p\n",
269 tbuf->buf.kfifo, &((tbuf->buf.kfifo)->buffer)); 269 (tbuf->buf.kfifo).buffer);
270 270
271 /* override printk() */ 271 /* override printk() */
272 trace_override++; 272 trace_override++;