aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/lockdep.c7
-rw-r--r--kernel/mutex.c125
-rw-r--r--kernel/sched.c25
-rw-r--r--kernel/semaphore.c13
-rw-r--r--kernel/softirq.c335
-rw-r--r--kernel/workqueue.c71
7 files changed, 516 insertions, 66 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 25c6111fe3a6..7491c4f5e78c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1370,8 +1370,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1370 write_unlock_irq(&tasklist_lock); 1370 write_unlock_irq(&tasklist_lock);
1371 proc_fork_connector(p); 1371 proc_fork_connector(p);
1372 cgroup_post_fork(p); 1372 cgroup_post_fork(p);
1373 if (clone_flags & CLONE_THREAD) 1373 if (clone_flags & CLONE_THREAD) {
1374 threadgroup_fork_read_unlock(current); 1374 threadgroup_fork_read_unlock(current);
1375#ifdef CONFIG_REALTIME_AUX_TASKS
1376 litmus_post_fork_thread(p);
1377#endif
1378 }
1375 perf_event_fork(p); 1379 perf_event_fork(p);
1376 return p; 1380 return p;
1377 1381
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 298c9276dfdb..2bdcdc3691e5 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -542,7 +542,7 @@ static void print_lock(struct held_lock *hlock)
542 print_ip_sym(hlock->acquire_ip); 542 print_ip_sym(hlock->acquire_ip);
543} 543}
544 544
545static void lockdep_print_held_locks(struct task_struct *curr) 545void lockdep_print_held_locks(struct task_struct *curr)
546{ 546{
547 int i, depth = curr->lockdep_depth; 547 int i, depth = curr->lockdep_depth;
548 548
@@ -558,6 +558,7 @@ static void lockdep_print_held_locks(struct task_struct *curr)
558 print_lock(curr->held_locks + i); 558 print_lock(curr->held_locks + i);
559 } 559 }
560} 560}
561EXPORT_SYMBOL(lockdep_print_held_locks);
561 562
562static void print_kernel_version(void) 563static void print_kernel_version(void)
563{ 564{
@@ -583,6 +584,10 @@ static int static_obj(void *obj)
583 end = (unsigned long) &_end, 584 end = (unsigned long) &_end,
584 addr = (unsigned long) obj; 585 addr = (unsigned long) obj;
585 586
587 // GLENN
588 return 1;
589
590
586 /* 591 /*
587 * static variable? 592 * static variable?
588 */ 593 */
diff --git a/kernel/mutex.c b/kernel/mutex.c
index d607ed5dd441..2f363b9bfc1f 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -498,3 +498,128 @@ int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
498 return 1; 498 return 1;
499} 499}
500EXPORT_SYMBOL(atomic_dec_and_mutex_lock); 500EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
501
502
503
504
505void mutex_lock_sfx(struct mutex *lock,
506 side_effect_t pre, unsigned long pre_arg,
507 side_effect_t post, unsigned long post_arg)
508{
509 long state = TASK_UNINTERRUPTIBLE;
510
511 struct task_struct *task = current;
512 struct mutex_waiter waiter;
513 unsigned long flags;
514
515 preempt_disable();
516 mutex_acquire(&lock->dep_map, subclass, 0, ip);
517
518 spin_lock_mutex(&lock->wait_lock, flags);
519
520 if(pre)
521 {
522 if(unlikely(pre(pre_arg)))
523 {
524 // this will fuck with lockdep's CONFIG_PROVE_LOCKING...
525 spin_unlock_mutex(&lock->wait_lock, flags);
526 preempt_enable();
527 return;
528 }
529 }
530
531 debug_mutex_lock_common(lock, &waiter);
532 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
533
534 /* add waiting tasks to the end of the waitqueue (FIFO): */
535 list_add_tail(&waiter.list, &lock->wait_list);
536 waiter.task = task;
537
538 if (atomic_xchg(&lock->count, -1) == 1)
539 goto done;
540
541 lock_contended(&lock->dep_map, ip);
542
543 for (;;) {
544 /*
545 * Lets try to take the lock again - this is needed even if
546 * we get here for the first time (shortly after failing to
547 * acquire the lock), to make sure that we get a wakeup once
548 * it's unlocked. Later on, if we sleep, this is the
549 * operation that gives us the lock. We xchg it to -1, so
550 * that when we release the lock, we properly wake up the
551 * other waiters:
552 */
553 if (atomic_xchg(&lock->count, -1) == 1)
554 break;
555
556 __set_task_state(task, state);
557
558 /* didnt get the lock, go to sleep: */
559 spin_unlock_mutex(&lock->wait_lock, flags);
560 preempt_enable_no_resched();
561 schedule();
562 preempt_disable();
563 spin_lock_mutex(&lock->wait_lock, flags);
564 }
565
566done:
567 lock_acquired(&lock->dep_map, ip);
568 /* got the lock - rejoice! */
569 mutex_remove_waiter(lock, &waiter, current_thread_info());
570 mutex_set_owner(lock);
571
572 /* set it to 0 if there are no waiters left: */
573 if (likely(list_empty(&lock->wait_list)))
574 atomic_set(&lock->count, 0);
575
576 if(post)
577 post(post_arg);
578
579 spin_unlock_mutex(&lock->wait_lock, flags);
580
581 debug_mutex_free_waiter(&waiter);
582 preempt_enable();
583}
584EXPORT_SYMBOL(mutex_lock_sfx);
585
586void mutex_unlock_sfx(struct mutex *lock,
587 side_effect_t pre, unsigned long pre_arg,
588 side_effect_t post, unsigned long post_arg)
589{
590 unsigned long flags;
591
592 spin_lock_mutex(&lock->wait_lock, flags);
593
594 if(pre)
595 pre(pre_arg);
596
597 //mutex_release(&lock->dep_map, nested, _RET_IP_);
598 mutex_release(&lock->dep_map, 1, _RET_IP_);
599 debug_mutex_unlock(lock);
600
601 /*
602 * some architectures leave the lock unlocked in the fastpath failure
603 * case, others need to leave it locked. In the later case we have to
604 * unlock it here
605 */
606 if (__mutex_slowpath_needs_to_unlock())
607 atomic_set(&lock->count, 1);
608
609 if (!list_empty(&lock->wait_list)) {
610 /* get the first entry from the wait-list: */
611 struct mutex_waiter *waiter =
612 list_entry(lock->wait_list.next,
613 struct mutex_waiter, list);
614
615 debug_mutex_wake_waiter(lock, waiter);
616
617 wake_up_process(waiter->task);
618 }
619
620 if(post)
621 post(post_arg);
622
623 spin_unlock_mutex(&lock->wait_lock, flags);
624}
625EXPORT_SYMBOL(mutex_unlock_sfx);
diff --git a/kernel/sched.c b/kernel/sched.c
index c4b6bd5151ff..e29a97235f26 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -86,6 +86,10 @@
86#include <litmus/sched_trace.h> 86#include <litmus/sched_trace.h>
87#include <litmus/trace.h> 87#include <litmus/trace.h>
88 88
89#ifdef CONFIG_LITMUS_SOFTIRQD
90#include <litmus/litmus_softirq.h>
91#endif
92
89static void litmus_tick(struct rq*, struct task_struct*); 93static void litmus_tick(struct rq*, struct task_struct*);
90 94
91/* 95/*
@@ -2703,8 +2707,10 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
2703 unsigned long flags; 2707 unsigned long flags;
2704 int cpu, success = 0; 2708 int cpu, success = 0;
2705 2709
2706 if (is_realtime(p)) 2710 if (is_realtime(p)) {
2711 //WARN_ON(1);
2707 TRACE_TASK(p, "try_to_wake_up() state:%d\n", p->state); 2712 TRACE_TASK(p, "try_to_wake_up() state:%d\n", p->state);
2713 }
2708 2714
2709 smp_wmb(); 2715 smp_wmb();
2710 raw_spin_lock_irqsave(&p->pi_lock, flags); 2716 raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -4319,6 +4325,7 @@ pick_next_task(struct rq *rq)
4319 BUG(); /* the idle class will always have a runnable task */ 4325 BUG(); /* the idle class will always have a runnable task */
4320} 4326}
4321 4327
4328
4322/* 4329/*
4323 * schedule() is the main scheduler function. 4330 * schedule() is the main scheduler function.
4324 */ 4331 */
@@ -4434,10 +4441,16 @@ litmus_need_resched_nonpreemptible:
4434 if (need_resched()) 4441 if (need_resched())
4435 goto need_resched; 4442 goto need_resched;
4436 4443
4444#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
4445 litmus->run_tasklets(prev);
4446#endif
4447
4437 srp_ceiling_block(); 4448 srp_ceiling_block();
4438} 4449}
4439EXPORT_SYMBOL(schedule); 4450EXPORT_SYMBOL(schedule);
4440 4451
4452
4453
4441#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 4454#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
4442 4455
4443static inline bool owner_running(struct mutex *lock, struct task_struct *owner) 4456static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
@@ -4581,6 +4594,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
4581 } 4594 }
4582} 4595}
4583 4596
4597
4584/** 4598/**
4585 * __wake_up - wake up threads blocked on a waitqueue. 4599 * __wake_up - wake up threads blocked on a waitqueue.
4586 * @q: the waitqueue 4600 * @q: the waitqueue
@@ -4756,6 +4770,12 @@ void __sched wait_for_completion(struct completion *x)
4756} 4770}
4757EXPORT_SYMBOL(wait_for_completion); 4771EXPORT_SYMBOL(wait_for_completion);
4758 4772
4773void __sched __wait_for_completion_locked(struct completion *x)
4774{
4775 do_wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
4776}
4777EXPORT_SYMBOL(__wait_for_completion_locked);
4778
4759/** 4779/**
4760 * wait_for_completion_timeout: - waits for completion of a task (w/timeout) 4780 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
4761 * @x: holds the state of this particular completion 4781 * @x: holds the state of this particular completion
@@ -5258,6 +5278,9 @@ recheck:
5258 if (retval) 5278 if (retval)
5259 return retval; 5279 return retval;
5260 } 5280 }
5281 else if (p->policy == SCHED_LITMUS) {
5282 litmus_pre_exit_task(p);
5283 }
5261 5284
5262 /* 5285 /*
5263 * make sure no PI-waiters arrive (or leave) while we are 5286 * make sure no PI-waiters arrive (or leave) while we are
diff --git a/kernel/semaphore.c b/kernel/semaphore.c
index 94a62c0d4ade..c947a046a6d7 100644
--- a/kernel/semaphore.c
+++ b/kernel/semaphore.c
@@ -33,11 +33,11 @@
33#include <linux/spinlock.h> 33#include <linux/spinlock.h>
34#include <linux/ftrace.h> 34#include <linux/ftrace.h>
35 35
36static noinline void __down(struct semaphore *sem); 36noinline void __down(struct semaphore *sem);
37static noinline int __down_interruptible(struct semaphore *sem); 37static noinline int __down_interruptible(struct semaphore *sem);
38static noinline int __down_killable(struct semaphore *sem); 38static noinline int __down_killable(struct semaphore *sem);
39static noinline int __down_timeout(struct semaphore *sem, long jiffies); 39static noinline int __down_timeout(struct semaphore *sem, long jiffies);
40static noinline void __up(struct semaphore *sem); 40noinline void __up(struct semaphore *sem);
41 41
42/** 42/**
43 * down - acquire the semaphore 43 * down - acquire the semaphore
@@ -190,11 +190,13 @@ EXPORT_SYMBOL(up);
190 190
191/* Functions for the contended case */ 191/* Functions for the contended case */
192 192
193/*
193struct semaphore_waiter { 194struct semaphore_waiter {
194 struct list_head list; 195 struct list_head list;
195 struct task_struct *task; 196 struct task_struct *task;
196 int up; 197 int up;
197}; 198};
199 */
198 200
199/* 201/*
200 * Because this function is inlined, the 'state' parameter will be 202 * Because this function is inlined, the 'state' parameter will be
@@ -233,10 +235,12 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
233 return -EINTR; 235 return -EINTR;
234} 236}
235 237
236static noinline void __sched __down(struct semaphore *sem) 238noinline void __sched __down(struct semaphore *sem)
237{ 239{
238 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 240 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
239} 241}
242EXPORT_SYMBOL(__down);
243
240 244
241static noinline int __sched __down_interruptible(struct semaphore *sem) 245static noinline int __sched __down_interruptible(struct semaphore *sem)
242{ 246{
@@ -253,7 +257,7 @@ static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies)
253 return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies); 257 return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies);
254} 258}
255 259
256static noinline void __sched __up(struct semaphore *sem) 260noinline void __sched __up(struct semaphore *sem)
257{ 261{
258 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, 262 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
259 struct semaphore_waiter, list); 263 struct semaphore_waiter, list);
@@ -261,3 +265,4 @@ static noinline void __sched __up(struct semaphore *sem)
261 waiter->up = 1; 265 waiter->up = 1;
262 wake_up_process(waiter->task); 266 wake_up_process(waiter->task);
263} 267}
268EXPORT_SYMBOL(__up); \ No newline at end of file
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 2f2df08df395..ea438a8635d0 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -29,6 +29,15 @@
29#include <trace/events/irq.h> 29#include <trace/events/irq.h>
30 30
31#include <asm/irq.h> 31#include <asm/irq.h>
32
33#include <litmus/litmus.h>
34#include <litmus/sched_trace.h>
35
36#ifdef CONFIG_LITMUS_NVIDIA
37#include <litmus/nvidia_info.h>
38#include <litmus/trace.h>
39#endif
40
32/* 41/*
33 - No shared variables, all the data are CPU local. 42 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself 43 - If a softirq needs serialization, let it serialize itself
@@ -67,7 +76,7 @@ char *softirq_to_name[NR_SOFTIRQS] = {
67 * to the pending events, so lets the scheduler to balance 76 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us. 77 * the softirq load for us.
69 */ 78 */
70static void wakeup_softirqd(void) 79void wakeup_softirqd(void)
71{ 80{
72 /* Interrupts are disabled: no need to stop preemption */ 81 /* Interrupts are disabled: no need to stop preemption */
73 struct task_struct *tsk = __this_cpu_read(ksoftirqd); 82 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
@@ -193,6 +202,7 @@ void local_bh_enable_ip(unsigned long ip)
193} 202}
194EXPORT_SYMBOL(local_bh_enable_ip); 203EXPORT_SYMBOL(local_bh_enable_ip);
195 204
205
196/* 206/*
197 * We restart softirq processing MAX_SOFTIRQ_RESTART times, 207 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
198 * and we fall back to softirqd after that. 208 * and we fall back to softirqd after that.
@@ -206,10 +216,10 @@ EXPORT_SYMBOL(local_bh_enable_ip);
206 216
207asmlinkage void __do_softirq(void) 217asmlinkage void __do_softirq(void)
208{ 218{
209 struct softirq_action *h; 219 struct softirq_action *h;
210 __u32 pending; 220 __u32 pending;
211 int max_restart = MAX_SOFTIRQ_RESTART; 221 int max_restart = MAX_SOFTIRQ_RESTART;
212 int cpu; 222 int cpu;
213 223
214 /* Mark Feather-Trace samples as "disturbed". */ 224 /* Mark Feather-Trace samples as "disturbed". */
215 ft_irq_fired(); 225 ft_irq_fired();
@@ -217,57 +227,57 @@ asmlinkage void __do_softirq(void)
217 pending = local_softirq_pending(); 227 pending = local_softirq_pending();
218 account_system_vtime(current); 228 account_system_vtime(current);
219 229
220 __local_bh_disable((unsigned long)__builtin_return_address(0), 230 __local_bh_disable((unsigned long)__builtin_return_address(0),
221 SOFTIRQ_OFFSET); 231 SOFTIRQ_OFFSET);
222 lockdep_softirq_enter(); 232 lockdep_softirq_enter();
223 233
224 cpu = smp_processor_id(); 234 cpu = smp_processor_id();
225restart: 235restart:
226 /* Reset the pending bitmask before enabling irqs */ 236 /* Reset the pending bitmask before enabling irqs */
227 set_softirq_pending(0); 237 set_softirq_pending(0);
228 238
229 local_irq_enable(); 239 local_irq_enable();
230 240
231 h = softirq_vec; 241 h = softirq_vec;
232
233 do {
234 if (pending & 1) {
235 unsigned int vec_nr = h - softirq_vec;
236 int prev_count = preempt_count();
237
238 kstat_incr_softirqs_this_cpu(vec_nr);
239
240 trace_softirq_entry(vec_nr);
241 h->action(h);
242 trace_softirq_exit(vec_nr);
243 if (unlikely(prev_count != preempt_count())) {
244 printk(KERN_ERR "huh, entered softirq %u %s %p"
245 "with preempt_count %08x,"
246 " exited with %08x?\n", vec_nr,
247 softirq_to_name[vec_nr], h->action,
248 prev_count, preempt_count());
249 preempt_count() = prev_count;
250 }
251 242
252 rcu_bh_qs(cpu); 243 do {
253 } 244 if (pending & 1) {
254 h++; 245 unsigned int vec_nr = h - softirq_vec;
255 pending >>= 1; 246 int prev_count = preempt_count();
256 } while (pending);
257 247
258 local_irq_disable(); 248 kstat_incr_softirqs_this_cpu(vec_nr);
259 249
260 pending = local_softirq_pending(); 250 trace_softirq_entry(vec_nr);
261 if (pending && --max_restart) 251 h->action(h);
262 goto restart; 252 trace_softirq_exit(vec_nr);
253 if (unlikely(prev_count != preempt_count())) {
254 printk(KERN_ERR "huh, entered softirq %u %s %p"
255 "with preempt_count %08x,"
256 " exited with %08x?\n", vec_nr,
257 softirq_to_name[vec_nr], h->action,
258 prev_count, preempt_count());
259 preempt_count() = prev_count;
260 }
263 261
264 if (pending) 262 rcu_bh_qs(cpu);
265 wakeup_softirqd(); 263 }
264 h++;
265 pending >>= 1;
266 } while (pending);
266 267
267 lockdep_softirq_exit(); 268 local_irq_disable();
268 269
269 account_system_vtime(current); 270 pending = local_softirq_pending();
270 __local_bh_enable(SOFTIRQ_OFFSET); 271 if (pending && --max_restart)
272 goto restart;
273
274 if (pending)
275 wakeup_softirqd();
276
277 lockdep_softirq_exit();
278
279 account_system_vtime(current);
280 __local_bh_enable(SOFTIRQ_OFFSET);
271} 281}
272 282
273#ifndef __ARCH_HAS_DO_SOFTIRQ 283#ifndef __ARCH_HAS_DO_SOFTIRQ
@@ -396,17 +406,103 @@ void open_softirq(int nr, void (*action)(struct softirq_action *))
396/* 406/*
397 * Tasklets 407 * Tasklets
398 */ 408 */
399struct tasklet_head
400{
401 struct tasklet_struct *head;
402 struct tasklet_struct **tail;
403};
404 409
405static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); 410static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
406static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); 411static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
407 412
413#ifdef CONFIG_LITMUS_NVIDIA
414static int __do_nv_now(struct tasklet_struct* tasklet)
415{
416 int success = 1;
417
418 if(tasklet_trylock(tasklet)) {
419 if (!atomic_read(&tasklet->count)) {
420 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state)) {
421 BUG();
422 }
423 tasklet->func(tasklet->data);
424 tasklet_unlock(tasklet);
425 }
426 else {
427 success = 0;
428 }
429
430 tasklet_unlock(tasklet);
431 }
432 else {
433 success = 0;
434 }
435
436 return success;
437}
438#endif
439
440
408void __tasklet_schedule(struct tasklet_struct *t) 441void __tasklet_schedule(struct tasklet_struct *t)
409{ 442{
443#ifdef CONFIG_LITMUS_NVIDIA
444 if(is_nvidia_func(t->func))
445 {
446#if 1
447 // do nvidia tasklets right away and return
448 if(__do_nv_now(t))
449 return;
450#else
451 u32 nvidia_device = get_tasklet_nv_device_num(t);
452 // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n",
453 // __FUNCTION__, nvidia_device,litmus_clock());
454
455 unsigned long flags;
456 struct task_struct* device_owner;
457
458 lock_nv_registry(nvidia_device, &flags);
459
460 device_owner = get_nv_max_device_owner(nvidia_device);
461
462 if(device_owner==NULL)
463 {
464 t->owner = NULL;
465 }
466 else
467 {
468 if(is_realtime(device_owner))
469 {
470 TRACE("%s: Handling NVIDIA tasklet for device %u at %llu\n",
471 __FUNCTION__, nvidia_device,litmus_clock());
472 TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n",
473 __FUNCTION__,device_owner->pid,nvidia_device);
474
475 t->owner = device_owner;
476 sched_trace_tasklet_release(t->owner);
477
478 if(likely(_litmus_tasklet_schedule(t,nvidia_device)))
479 {
480 unlock_nv_registry(nvidia_device, &flags);
481 return;
482 }
483 else
484 {
485 t->owner = NULL; /* fall through to normal scheduling */
486 }
487 }
488 else
489 {
490 t->owner = NULL;
491 }
492 }
493 unlock_nv_registry(nvidia_device, &flags);
494#endif
495 }
496
497#endif
498
499 ___tasklet_schedule(t);
500}
501EXPORT_SYMBOL(__tasklet_schedule);
502
503
504void ___tasklet_schedule(struct tasklet_struct *t)
505{
410 unsigned long flags; 506 unsigned long flags;
411 507
412 local_irq_save(flags); 508 local_irq_save(flags);
@@ -416,11 +512,71 @@ void __tasklet_schedule(struct tasklet_struct *t)
416 raise_softirq_irqoff(TASKLET_SOFTIRQ); 512 raise_softirq_irqoff(TASKLET_SOFTIRQ);
417 local_irq_restore(flags); 513 local_irq_restore(flags);
418} 514}
515EXPORT_SYMBOL(___tasklet_schedule);
419 516
420EXPORT_SYMBOL(__tasklet_schedule);
421 517
422void __tasklet_hi_schedule(struct tasklet_struct *t) 518void __tasklet_hi_schedule(struct tasklet_struct *t)
423{ 519{
520#ifdef CONFIG_LITMUS_NVIDIA
521 if(is_nvidia_func(t->func))
522 {
523#if 1
524 // do nvidia tasklets right away and return
525 if(__do_nv_now(t))
526 return;
527#else
528 u32 nvidia_device = get_tasklet_nv_device_num(t);
529 // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n",
530 // __FUNCTION__, nvidia_device,litmus_clock());
531
532 unsigned long flags;
533 struct task_struct* device_owner;
534
535 lock_nv_registry(nvidia_device, &flags);
536
537 device_owner = get_nv_max_device_owner(nvidia_device);
538
539 if(device_owner==NULL)
540 {
541 t->owner = NULL;
542 }
543 else
544 {
545 if( is_realtime(device_owner))
546 {
547 TRACE("%s: Handling NVIDIA tasklet for device %u\tat %llu\n",
548 __FUNCTION__, nvidia_device,litmus_clock());
549 TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n",
550 __FUNCTION__,device_owner->pid,nvidia_device);
551
552 t->owner = device_owner;
553 sched_trace_tasklet_release(t->owner);
554 if(likely(_litmus_tasklet_hi_schedule(t,nvidia_device)))
555 {
556 unlock_nv_registry(nvidia_device, &flags);
557 return;
558 }
559 else
560 {
561 t->owner = NULL; /* fall through to normal scheduling */
562 }
563 }
564 else
565 {
566 t->owner = NULL;
567 }
568 }
569 unlock_nv_registry(nvidia_device, &flags);
570#endif
571 }
572#endif
573
574 ___tasklet_hi_schedule(t);
575}
576EXPORT_SYMBOL(__tasklet_hi_schedule);
577
578void ___tasklet_hi_schedule(struct tasklet_struct* t)
579{
424 unsigned long flags; 580 unsigned long flags;
425 581
426 local_irq_save(flags); 582 local_irq_save(flags);
@@ -430,19 +586,78 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
430 raise_softirq_irqoff(HI_SOFTIRQ); 586 raise_softirq_irqoff(HI_SOFTIRQ);
431 local_irq_restore(flags); 587 local_irq_restore(flags);
432} 588}
433 589EXPORT_SYMBOL(___tasklet_hi_schedule);
434EXPORT_SYMBOL(__tasklet_hi_schedule);
435 590
436void __tasklet_hi_schedule_first(struct tasklet_struct *t) 591void __tasklet_hi_schedule_first(struct tasklet_struct *t)
437{ 592{
438 BUG_ON(!irqs_disabled()); 593 BUG_ON(!irqs_disabled());
594#ifdef CONFIG_LITMUS_NVIDIA
595 if(is_nvidia_func(t->func))
596 {
597#if 1
598 // do nvidia tasklets right away and return
599 if(__do_nv_now(t))
600 return;
601#else
602 u32 nvidia_device = get_tasklet_nv_device_num(t);
603 // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n",
604 // __FUNCTION__, nvidia_device,litmus_clock());
605 unsigned long flags;
606 struct task_struct* device_owner;
607
608 lock_nv_registry(nvidia_device, &flags);
609
610 device_owner = get_nv_max_device_owner(nvidia_device);
611
612 if(device_owner==NULL)
613 {
614 t->owner = NULL;
615 }
616 else
617 {
618 if(is_realtime(device_owner))
619 {
620 TRACE("%s: Handling NVIDIA tasklet for device %u at %llu\n",
621 __FUNCTION__, nvidia_device,litmus_clock());
622
623 TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n",
624 __FUNCTION__,device_owner->pid,nvidia_device);
625
626 t->owner = device_owner;
627 sched_trace_tasklet_release(t->owner);
628 if(likely(_litmus_tasklet_hi_schedule_first(t,nvidia_device)))
629 {
630 unlock_nv_registry(nvidia_device, &flags);
631 return;
632 }
633 else
634 {
635 t->owner = NULL; /* fall through to normal scheduling */
636 }
637 }
638 else
639 {
640 t->owner = NULL;
641 }
642 }
643 unlock_nv_registry(nvidia_device, &flags);
644#endif
645 }
646#endif
647
648 ___tasklet_hi_schedule_first(t);
649}
650EXPORT_SYMBOL(__tasklet_hi_schedule_first);
651
652void ___tasklet_hi_schedule_first(struct tasklet_struct* t)
653{
654 BUG_ON(!irqs_disabled());
439 655
440 t->next = __this_cpu_read(tasklet_hi_vec.head); 656 t->next = __this_cpu_read(tasklet_hi_vec.head);
441 __this_cpu_write(tasklet_hi_vec.head, t); 657 __this_cpu_write(tasklet_hi_vec.head, t);
442 __raise_softirq_irqoff(HI_SOFTIRQ); 658 __raise_softirq_irqoff(HI_SOFTIRQ);
443} 659}
444 660EXPORT_SYMBOL(___tasklet_hi_schedule_first);
445EXPORT_SYMBOL(__tasklet_hi_schedule_first);
446 661
447static void tasklet_action(struct softirq_action *a) 662static void tasklet_action(struct softirq_action *a)
448{ 663{
@@ -498,6 +713,7 @@ static void tasklet_hi_action(struct softirq_action *a)
498 if (!atomic_read(&t->count)) { 713 if (!atomic_read(&t->count)) {
499 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) 714 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
500 BUG(); 715 BUG();
716
501 t->func(t->data); 717 t->func(t->data);
502 tasklet_unlock(t); 718 tasklet_unlock(t);
503 continue; 719 continue;
@@ -521,8 +737,13 @@ void tasklet_init(struct tasklet_struct *t,
521 t->next = NULL; 737 t->next = NULL;
522 t->state = 0; 738 t->state = 0;
523 atomic_set(&t->count, 0); 739 atomic_set(&t->count, 0);
740
524 t->func = func; 741 t->func = func;
525 t->data = data; 742 t->data = data;
743
744#ifdef CONFIG_LITMUS_SOFTIRQD
745 t->owner = NULL;
746#endif
526} 747}
527 748
528EXPORT_SYMBOL(tasklet_init); 749EXPORT_SYMBOL(tasklet_init);
@@ -537,6 +758,7 @@ void tasklet_kill(struct tasklet_struct *t)
537 yield(); 758 yield();
538 } while (test_bit(TASKLET_STATE_SCHED, &t->state)); 759 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
539 } 760 }
761
540 tasklet_unlock_wait(t); 762 tasklet_unlock_wait(t);
541 clear_bit(TASKLET_STATE_SCHED, &t->state); 763 clear_bit(TASKLET_STATE_SCHED, &t->state);
542} 764}
@@ -811,6 +1033,7 @@ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
811 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { 1033 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
812 if (*i == t) { 1034 if (*i == t) {
813 *i = t->next; 1035 *i = t->next;
1036
814 /* If this was the tail element, move the tail ptr */ 1037 /* If this was the tail element, move the tail ptr */
815 if (*i == NULL) 1038 if (*i == NULL)
816 per_cpu(tasklet_vec, cpu).tail = i; 1039 per_cpu(tasklet_vec, cpu).tail = i;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0400553f0d04..6b59d59ce3cf 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -44,6 +44,13 @@
44 44
45#include "workqueue_sched.h" 45#include "workqueue_sched.h"
46 46
47#ifdef CONFIG_LITMUS_NVIDIA
48#include <litmus/litmus.h>
49#include <litmus/sched_trace.h>
50#include <litmus/nvidia_info.h>
51#endif
52
53
47enum { 54enum {
48 /* global_cwq flags */ 55 /* global_cwq flags */
49 GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ 56 GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
@@ -1047,9 +1054,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
1047 work_flags |= WORK_STRUCT_DELAYED; 1054 work_flags |= WORK_STRUCT_DELAYED;
1048 worklist = &cwq->delayed_works; 1055 worklist = &cwq->delayed_works;
1049 } 1056 }
1050
1051 insert_work(cwq, work, worklist, work_flags); 1057 insert_work(cwq, work, worklist, work_flags);
1052
1053 spin_unlock_irqrestore(&gcwq->lock, flags); 1058 spin_unlock_irqrestore(&gcwq->lock, flags);
1054} 1059}
1055 1060
@@ -2687,10 +2692,70 @@ EXPORT_SYMBOL(cancel_delayed_work_sync);
2687 */ 2692 */
2688int schedule_work(struct work_struct *work) 2693int schedule_work(struct work_struct *work)
2689{ 2694{
2690 return queue_work(system_wq, work); 2695#if 0
2696#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD)
2697 if(is_nvidia_func(work->func))
2698 {
2699 u32 nvidiaDevice = get_work_nv_device_num(work);
2700
2701 //1) Ask Litmus which task owns GPU <nvidiaDevice>. (API to be defined.)
2702 unsigned long flags;
2703 struct task_struct* device_owner;
2704
2705 lock_nv_registry(nvidiaDevice, &flags);
2706
2707 device_owner = get_nv_max_device_owner(nvidiaDevice);
2708
2709 //2) If there is an owner, set work->owner to the owner's task struct.
2710 if(device_owner==NULL)
2711 {
2712 work->owner = NULL;
2713 //TRACE("%s: the owner task of NVIDIA Device %u is NULL\n",__FUNCTION__,nvidiaDevice);
2714 }
2715 else
2716 {
2717 if( is_realtime(device_owner))
2718 {
2719 TRACE("%s: Handling NVIDIA work for device\t%u\tat\t%llu\n",
2720 __FUNCTION__, nvidiaDevice,litmus_clock());
2721 TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n",
2722 __FUNCTION__,
2723 device_owner->pid,
2724 nvidiaDevice);
2725
2726 //3) Call litmus_schedule_work() and return (don't execute the rest
2727 // of schedule_schedule()).
2728 work->owner = device_owner;
2729 sched_trace_work_release(work->owner);
2730 if(likely(litmus_schedule_work(work, nvidiaDevice)))
2731 {
2732 unlock_nv_registry(nvidiaDevice, &flags);
2733 return 1;
2734 }
2735 else
2736 {
2737 work->owner = NULL; /* fall through to normal work scheduling */
2738 }
2739 }
2740 else
2741 {
2742 work->owner = NULL;
2743 }
2744 }
2745 unlock_nv_registry(nvidiaDevice, &flags);
2746 }
2747#endif
2748#endif
2749 return(__schedule_work(work));
2691} 2750}
2692EXPORT_SYMBOL(schedule_work); 2751EXPORT_SYMBOL(schedule_work);
2693 2752
2753int __schedule_work(struct work_struct* work)
2754{
2755 return queue_work(system_wq, work);
2756}
2757EXPORT_SYMBOL(__schedule_work);
2758
2694/* 2759/*
2695 * schedule_work_on - put work task on a specific cpu 2760 * schedule_work_on - put work task on a specific cpu
2696 * @cpu: cpu to put the work task on 2761 * @cpu: cpu to put the work task on