aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/lockdep.c3
-rw-r--r--kernel/mutex.c125
-rw-r--r--kernel/sched.c27
-rw-r--r--kernel/semaphore.c13
-rw-r--r--kernel/softirq.c256
-rw-r--r--kernel/workqueue.c71
6 files changed, 469 insertions, 26 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 298c9276dfdb..3f2f54a49001 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -542,7 +542,7 @@ static void print_lock(struct held_lock *hlock)
542 print_ip_sym(hlock->acquire_ip); 542 print_ip_sym(hlock->acquire_ip);
543} 543}
544 544
545static void lockdep_print_held_locks(struct task_struct *curr) 545void lockdep_print_held_locks(struct task_struct *curr)
546{ 546{
547 int i, depth = curr->lockdep_depth; 547 int i, depth = curr->lockdep_depth;
548 548
@@ -558,6 +558,7 @@ static void lockdep_print_held_locks(struct task_struct *curr)
558 print_lock(curr->held_locks + i); 558 print_lock(curr->held_locks + i);
559 } 559 }
560} 560}
561EXPORT_SYMBOL(lockdep_print_held_locks);
561 562
562static void print_kernel_version(void) 563static void print_kernel_version(void)
563{ 564{
diff --git a/kernel/mutex.c b/kernel/mutex.c
index d607ed5dd441..2f363b9bfc1f 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -498,3 +498,128 @@ int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
498 return 1; 498 return 1;
499} 499}
500EXPORT_SYMBOL(atomic_dec_and_mutex_lock); 500EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
501
502
503
504
505void mutex_lock_sfx(struct mutex *lock,
506 side_effect_t pre, unsigned long pre_arg,
507 side_effect_t post, unsigned long post_arg)
508{
509 long state = TASK_UNINTERRUPTIBLE;
510
511 struct task_struct *task = current;
512 struct mutex_waiter waiter;
513 unsigned long flags;
514
515 preempt_disable();
516 mutex_acquire(&lock->dep_map, subclass, 0, ip);
517
518 spin_lock_mutex(&lock->wait_lock, flags);
519
520 if(pre)
521 {
522 if(unlikely(pre(pre_arg)))
523 {
524 // this will fuck with lockdep's CONFIG_PROVE_LOCKING...
525 spin_unlock_mutex(&lock->wait_lock, flags);
526 preempt_enable();
527 return;
528 }
529 }
530
531 debug_mutex_lock_common(lock, &waiter);
532 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
533
534 /* add waiting tasks to the end of the waitqueue (FIFO): */
535 list_add_tail(&waiter.list, &lock->wait_list);
536 waiter.task = task;
537
538 if (atomic_xchg(&lock->count, -1) == 1)
539 goto done;
540
541 lock_contended(&lock->dep_map, ip);
542
543 for (;;) {
544 /*
545 * Lets try to take the lock again - this is needed even if
546 * we get here for the first time (shortly after failing to
547 * acquire the lock), to make sure that we get a wakeup once
548 * it's unlocked. Later on, if we sleep, this is the
549 * operation that gives us the lock. We xchg it to -1, so
550 * that when we release the lock, we properly wake up the
551 * other waiters:
552 */
553 if (atomic_xchg(&lock->count, -1) == 1)
554 break;
555
556 __set_task_state(task, state);
557
558 /* didnt get the lock, go to sleep: */
559 spin_unlock_mutex(&lock->wait_lock, flags);
560 preempt_enable_no_resched();
561 schedule();
562 preempt_disable();
563 spin_lock_mutex(&lock->wait_lock, flags);
564 }
565
566done:
567 lock_acquired(&lock->dep_map, ip);
568 /* got the lock - rejoice! */
569 mutex_remove_waiter(lock, &waiter, current_thread_info());
570 mutex_set_owner(lock);
571
572 /* set it to 0 if there are no waiters left: */
573 if (likely(list_empty(&lock->wait_list)))
574 atomic_set(&lock->count, 0);
575
576 if(post)
577 post(post_arg);
578
579 spin_unlock_mutex(&lock->wait_lock, flags);
580
581 debug_mutex_free_waiter(&waiter);
582 preempt_enable();
583}
584EXPORT_SYMBOL(mutex_lock_sfx);
585
586void mutex_unlock_sfx(struct mutex *lock,
587 side_effect_t pre, unsigned long pre_arg,
588 side_effect_t post, unsigned long post_arg)
589{
590 unsigned long flags;
591
592 spin_lock_mutex(&lock->wait_lock, flags);
593
594 if(pre)
595 pre(pre_arg);
596
597 //mutex_release(&lock->dep_map, nested, _RET_IP_);
598 mutex_release(&lock->dep_map, 1, _RET_IP_);
599 debug_mutex_unlock(lock);
600
601 /*
602 * some architectures leave the lock unlocked in the fastpath failure
603 * case, others need to leave it locked. In the later case we have to
604 * unlock it here
605 */
606 if (__mutex_slowpath_needs_to_unlock())
607 atomic_set(&lock->count, 1);
608
609 if (!list_empty(&lock->wait_list)) {
610 /* get the first entry from the wait-list: */
611 struct mutex_waiter *waiter =
612 list_entry(lock->wait_list.next,
613 struct mutex_waiter, list);
614
615 debug_mutex_wake_waiter(lock, waiter);
616
617 wake_up_process(waiter->task);
618 }
619
620 if(post)
621 post(post_arg);
622
623 spin_unlock_mutex(&lock->wait_lock, flags);
624}
625EXPORT_SYMBOL(mutex_unlock_sfx);
diff --git a/kernel/sched.c b/kernel/sched.c
index baaca61bc3a3..f3d9a69a3777 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -83,6 +83,10 @@
83#include <litmus/sched_trace.h> 83#include <litmus/sched_trace.h>
84#include <litmus/trace.h> 84#include <litmus/trace.h>
85 85
86#ifdef CONFIG_LITMUS_SOFTIRQD
87#include <litmus/litmus_softirq.h>
88#endif
89
86static void litmus_tick(struct rq*, struct task_struct*); 90static void litmus_tick(struct rq*, struct task_struct*);
87 91
88#define CREATE_TRACE_POINTS 92#define CREATE_TRACE_POINTS
@@ -4305,6 +4309,7 @@ pick_next_task(struct rq *rq)
4305 BUG(); /* the idle class will always have a runnable task */ 4309 BUG(); /* the idle class will always have a runnable task */
4306} 4310}
4307 4311
4312
4308/* 4313/*
4309 * schedule() is the main scheduler function. 4314 * schedule() is the main scheduler function.
4310 */ 4315 */
@@ -4323,6 +4328,10 @@ need_resched:
4323 rcu_note_context_switch(cpu); 4328 rcu_note_context_switch(cpu);
4324 prev = rq->curr; 4329 prev = rq->curr;
4325 4330
4331#ifdef CONFIG_LITMUS_SOFTIRQD
4332 release_klitirqd_lock(prev);
4333#endif
4334
4326 /* LITMUS^RT: quickly re-evaluate the scheduling decision 4335 /* LITMUS^RT: quickly re-evaluate the scheduling decision
4327 * if the previous one is no longer valid after CTX. 4336 * if the previous one is no longer valid after CTX.
4328 */ 4337 */
@@ -4411,13 +4420,24 @@ litmus_need_resched_nonpreemptible:
4411 goto litmus_need_resched_nonpreemptible; 4420 goto litmus_need_resched_nonpreemptible;
4412 4421
4413 preempt_enable_no_resched(); 4422 preempt_enable_no_resched();
4423
4414 if (need_resched()) 4424 if (need_resched())
4415 goto need_resched; 4425 goto need_resched;
4416 4426
4427#ifdef LITMUS_SOFTIRQD
4428 reacquire_klitirqd_lock(prev);
4429#endif
4430
4431#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
4432 litmus->run_tasklets(prev);
4433#endif
4434
4417 srp_ceiling_block(); 4435 srp_ceiling_block();
4418} 4436}
4419EXPORT_SYMBOL(schedule); 4437EXPORT_SYMBOL(schedule);
4420 4438
4439
4440
4421#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 4441#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
4422 4442
4423static inline bool owner_running(struct mutex *lock, struct task_struct *owner) 4443static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
@@ -4561,6 +4581,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
4561 } 4581 }
4562} 4582}
4563 4583
4584
4564/** 4585/**
4565 * __wake_up - wake up threads blocked on a waitqueue. 4586 * __wake_up - wake up threads blocked on a waitqueue.
4566 * @q: the waitqueue 4587 * @q: the waitqueue
@@ -4747,6 +4768,12 @@ void __sched wait_for_completion(struct completion *x)
4747} 4768}
4748EXPORT_SYMBOL(wait_for_completion); 4769EXPORT_SYMBOL(wait_for_completion);
4749 4770
4771void __sched __wait_for_completion_locked(struct completion *x)
4772{
4773 do_wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
4774}
4775EXPORT_SYMBOL(__wait_for_completion_locked);
4776
4750/** 4777/**
4751 * wait_for_completion_timeout: - waits for completion of a task (w/timeout) 4778 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
4752 * @x: holds the state of this particular completion 4779 * @x: holds the state of this particular completion
diff --git a/kernel/semaphore.c b/kernel/semaphore.c
index 94a62c0d4ade..c947a046a6d7 100644
--- a/kernel/semaphore.c
+++ b/kernel/semaphore.c
@@ -33,11 +33,11 @@
33#include <linux/spinlock.h> 33#include <linux/spinlock.h>
34#include <linux/ftrace.h> 34#include <linux/ftrace.h>
35 35
36static noinline void __down(struct semaphore *sem); 36noinline void __down(struct semaphore *sem);
37static noinline int __down_interruptible(struct semaphore *sem); 37static noinline int __down_interruptible(struct semaphore *sem);
38static noinline int __down_killable(struct semaphore *sem); 38static noinline int __down_killable(struct semaphore *sem);
39static noinline int __down_timeout(struct semaphore *sem, long jiffies); 39static noinline int __down_timeout(struct semaphore *sem, long jiffies);
40static noinline void __up(struct semaphore *sem); 40noinline void __up(struct semaphore *sem);
41 41
42/** 42/**
43 * down - acquire the semaphore 43 * down - acquire the semaphore
@@ -190,11 +190,13 @@ EXPORT_SYMBOL(up);
190 190
191/* Functions for the contended case */ 191/* Functions for the contended case */
192 192
193/*
193struct semaphore_waiter { 194struct semaphore_waiter {
194 struct list_head list; 195 struct list_head list;
195 struct task_struct *task; 196 struct task_struct *task;
196 int up; 197 int up;
197}; 198};
199 */
198 200
199/* 201/*
200 * Because this function is inlined, the 'state' parameter will be 202 * Because this function is inlined, the 'state' parameter will be
@@ -233,10 +235,12 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
233 return -EINTR; 235 return -EINTR;
234} 236}
235 237
236static noinline void __sched __down(struct semaphore *sem) 238noinline void __sched __down(struct semaphore *sem)
237{ 239{
238 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 240 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
239} 241}
242EXPORT_SYMBOL(__down);
243
240 244
241static noinline int __sched __down_interruptible(struct semaphore *sem) 245static noinline int __sched __down_interruptible(struct semaphore *sem)
242{ 246{
@@ -253,7 +257,7 @@ static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies)
253 return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies); 257 return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies);
254} 258}
255 259
256static noinline void __sched __up(struct semaphore *sem) 260noinline void __sched __up(struct semaphore *sem)
257{ 261{
258 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, 262 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
259 struct semaphore_waiter, list); 263 struct semaphore_waiter, list);
@@ -261,3 +265,4 @@ static noinline void __sched __up(struct semaphore *sem)
261 waiter->up = 1; 265 waiter->up = 1;
262 wake_up_process(waiter->task); 266 wake_up_process(waiter->task);
263} 267}
268EXPORT_SYMBOL(__up); \ No newline at end of file
diff --git a/kernel/softirq.c b/kernel/softirq.c
index fca82c32042b..48d6bde692a1 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -29,6 +29,15 @@
29#include <trace/events/irq.h> 29#include <trace/events/irq.h>
30 30
31#include <asm/irq.h> 31#include <asm/irq.h>
32
33#include <litmus/litmus.h>
34#include <litmus/sched_trace.h>
35
36#ifdef CONFIG_LITMUS_NVIDIA
37#include <litmus/nvidia_info.h>
38#include <litmus/trace.h>
39#endif
40
32/* 41/*
33 - No shared variables, all the data are CPU local. 42 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself 43 - If a softirq needs serialization, let it serialize itself
@@ -67,7 +76,7 @@ char *softirq_to_name[NR_SOFTIRQS] = {
67 * to the pending events, so lets the scheduler to balance 76 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us. 77 * the softirq load for us.
69 */ 78 */
70static void wakeup_softirqd(void) 79void wakeup_softirqd(void)
71{ 80{
72 /* Interrupts are disabled: no need to stop preemption */ 81 /* Interrupts are disabled: no need to stop preemption */
73 struct task_struct *tsk = __this_cpu_read(ksoftirqd); 82 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
@@ -193,6 +202,7 @@ void local_bh_enable_ip(unsigned long ip)
193} 202}
194EXPORT_SYMBOL(local_bh_enable_ip); 203EXPORT_SYMBOL(local_bh_enable_ip);
195 204
205
196/* 206/*
197 * We restart softirq processing MAX_SOFTIRQ_RESTART times, 207 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
198 * and we fall back to softirqd after that. 208 * and we fall back to softirqd after that.
@@ -204,14 +214,15 @@ EXPORT_SYMBOL(local_bh_enable_ip);
204 */ 214 */
205#define MAX_SOFTIRQ_RESTART 10 215#define MAX_SOFTIRQ_RESTART 10
206 216
207asmlinkage void __do_softirq(void) 217static void ____do_softirq(void)
208{ 218{
209 struct softirq_action *h;
210 __u32 pending; 219 __u32 pending;
211 int max_restart = MAX_SOFTIRQ_RESTART; 220
221 struct softirq_action *h;
212 int cpu; 222 int cpu;
213 223
214 pending = local_softirq_pending(); 224 pending = local_softirq_pending();
225
215 account_system_vtime(current); 226 account_system_vtime(current);
216 227
217 __local_bh_disable((unsigned long)__builtin_return_address(0), 228 __local_bh_disable((unsigned long)__builtin_return_address(0),
@@ -219,14 +230,13 @@ asmlinkage void __do_softirq(void)
219 lockdep_softirq_enter(); 230 lockdep_softirq_enter();
220 231
221 cpu = smp_processor_id(); 232 cpu = smp_processor_id();
222restart:
223 /* Reset the pending bitmask before enabling irqs */
224 set_softirq_pending(0);
225 233
234 set_softirq_pending(0);
235
226 local_irq_enable(); 236 local_irq_enable();
227 237
228 h = softirq_vec; 238 h = softirq_vec;
229 239
230 do { 240 do {
231 if (pending & 1) { 241 if (pending & 1) {
232 unsigned int vec_nr = h - softirq_vec; 242 unsigned int vec_nr = h - softirq_vec;
@@ -245,14 +255,23 @@ restart:
245 prev_count, preempt_count()); 255 prev_count, preempt_count());
246 preempt_count() = prev_count; 256 preempt_count() = prev_count;
247 } 257 }
248 258
249 rcu_bh_qs(cpu); 259 rcu_bh_qs(cpu);
250 } 260 }
251 h++; 261 h++;
252 pending >>= 1; 262 pending >>= 1;
253 } while (pending); 263 } while (pending);
254 264
255 local_irq_disable(); 265 local_irq_disable();
266}
267
268static void ___do_softirq(void)
269{
270 int max_restart = MAX_SOFTIRQ_RESTART;
271 __u32 pending;
272
273restart:
274 ____do_softirq();
256 275
257 pending = local_softirq_pending(); 276 pending = local_softirq_pending();
258 if (pending && --max_restart) 277 if (pending && --max_restart)
@@ -260,9 +279,38 @@ restart:
260 279
261 if (pending) 280 if (pending)
262 wakeup_softirqd(); 281 wakeup_softirqd();
282}
263 283
284asmlinkage void __do_softirq(void)
285{
286#ifdef LITMUS_THREAD_ALL_SOFTIRQ
287 /* Skip straight to wakeup_softirqd() if we're using
288 LITMUS_THREAD_ALL_SOFTIRQ (unless there's really high prio-stuff waiting.). */
289 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
290
291 if(tsk)
292 {
293 __u32 pending = local_softirq_pending();
294 const __u32 high_prio_softirq = (1<<HI_SOFTIRQ) | (1<<TIMER_SOFTIRQ) | (1<<HRTIMER_SOFTIRQ);
295 if(pending && !(pending & high_prio_softirq))
296 {
297 wakeup_softirqd();
298 return;
299 }
300 }
301#endif
302
303 /*
304 * 'immediate' softirq execution:
305 */
306 __local_bh_disable((unsigned long)__builtin_return_address(0),
307 SOFTIRQ_OFFSET);
308 lockdep_softirq_enter();
309
310 ___do_softirq();
311
264 lockdep_softirq_exit(); 312 lockdep_softirq_exit();
265 313
266 account_system_vtime(current); 314 account_system_vtime(current);
267 __local_bh_enable(SOFTIRQ_OFFSET); 315 __local_bh_enable(SOFTIRQ_OFFSET);
268} 316}
@@ -402,8 +450,65 @@ struct tasklet_head
402static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); 450static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
403static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); 451static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
404 452
453
405void __tasklet_schedule(struct tasklet_struct *t) 454void __tasklet_schedule(struct tasklet_struct *t)
406{ 455{
456#ifdef CONFIG_LITMUS_NVIDIA
457 if(is_nvidia_func(t->func))
458 {
459 u32 nvidia_device = get_tasklet_nv_device_num(t);
460 // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n",
461 // __FUNCTION__, nvidia_device,litmus_clock());
462
463 unsigned long flags;
464 struct task_struct* device_owner;
465
466 lock_nv_registry(nvidia_device, &flags);
467
468 device_owner = get_nv_device_owner(nvidia_device);
469
470 if(device_owner==NULL)
471 {
472 t->owner = NULL;
473 }
474 else
475 {
476 if(is_realtime(device_owner))
477 {
478 TRACE("%s: Handling NVIDIA tasklet for device %u at %llu\n",
479 __FUNCTION__, nvidia_device,litmus_clock());
480 TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n",
481 __FUNCTION__,device_owner->pid,nvidia_device);
482
483 t->owner = device_owner;
484 sched_trace_tasklet_release(t->owner);
485
486 if(likely(_litmus_tasklet_schedule(t,nvidia_device)))
487 {
488 unlock_nv_registry(nvidia_device, &flags);
489 return;
490 }
491 else
492 {
493 t->owner = NULL; /* fall through to normal scheduling */
494 }
495 }
496 else
497 {
498 t->owner = NULL;
499 }
500 }
501 unlock_nv_registry(nvidia_device, &flags);
502 }
503#endif
504
505 ___tasklet_schedule(t);
506}
507EXPORT_SYMBOL(__tasklet_schedule);
508
509
510void ___tasklet_schedule(struct tasklet_struct *t)
511{
407 unsigned long flags; 512 unsigned long flags;
408 513
409 local_irq_save(flags); 514 local_irq_save(flags);
@@ -413,11 +518,65 @@ void __tasklet_schedule(struct tasklet_struct *t)
413 raise_softirq_irqoff(TASKLET_SOFTIRQ); 518 raise_softirq_irqoff(TASKLET_SOFTIRQ);
414 local_irq_restore(flags); 519 local_irq_restore(flags);
415} 520}
521EXPORT_SYMBOL(___tasklet_schedule);
416 522
417EXPORT_SYMBOL(__tasklet_schedule);
418 523
419void __tasklet_hi_schedule(struct tasklet_struct *t) 524void __tasklet_hi_schedule(struct tasklet_struct *t)
420{ 525{
526#ifdef CONFIG_LITMUS_NVIDIA
527 if(is_nvidia_func(t->func))
528 {
529 u32 nvidia_device = get_tasklet_nv_device_num(t);
530 // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n",
531 // __FUNCTION__, nvidia_device,litmus_clock());
532
533 unsigned long flags;
534 struct task_struct* device_owner;
535
536 lock_nv_registry(nvidia_device, &flags);
537
538 device_owner = get_nv_device_owner(nvidia_device);
539
540 if(device_owner==NULL)
541 {
542 t->owner = NULL;
543 }
544 else
545 {
546 if( is_realtime(device_owner))
547 {
548 TRACE("%s: Handling NVIDIA tasklet for device %u\tat %llu\n",
549 __FUNCTION__, nvidia_device,litmus_clock());
550 TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n",
551 __FUNCTION__,device_owner->pid,nvidia_device);
552
553 t->owner = device_owner;
554 sched_trace_tasklet_release(t->owner);
555 if(likely(_litmus_tasklet_hi_schedule(t,nvidia_device)))
556 {
557 unlock_nv_registry(nvidia_device, &flags);
558 return;
559 }
560 else
561 {
562 t->owner = NULL; /* fall through to normal scheduling */
563 }
564 }
565 else
566 {
567 t->owner = NULL;
568 }
569 }
570 unlock_nv_registry(nvidia_device, &flags);
571 }
572#endif
573
574 ___tasklet_hi_schedule(t);
575}
576EXPORT_SYMBOL(__tasklet_hi_schedule);
577
578void ___tasklet_hi_schedule(struct tasklet_struct* t)
579{
421 unsigned long flags; 580 unsigned long flags;
422 581
423 local_irq_save(flags); 582 local_irq_save(flags);
@@ -427,19 +586,72 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
427 raise_softirq_irqoff(HI_SOFTIRQ); 586 raise_softirq_irqoff(HI_SOFTIRQ);
428 local_irq_restore(flags); 587 local_irq_restore(flags);
429} 588}
430 589EXPORT_SYMBOL(___tasklet_hi_schedule);
431EXPORT_SYMBOL(__tasklet_hi_schedule);
432 590
433void __tasklet_hi_schedule_first(struct tasklet_struct *t) 591void __tasklet_hi_schedule_first(struct tasklet_struct *t)
434{ 592{
435 BUG_ON(!irqs_disabled()); 593 BUG_ON(!irqs_disabled());
594#ifdef CONFIG_LITMUS_NVIDIA
595 if(is_nvidia_func(t->func))
596 {
597 u32 nvidia_device = get_tasklet_nv_device_num(t);
598 // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n",
599 // __FUNCTION__, nvidia_device,litmus_clock());
600 unsigned long flags;
601 struct task_struct* device_owner;
602
603 lock_nv_registry(nvidia_device, &flags);
604
605 device_owner = get_nv_device_owner(nvidia_device);
606
607 if(device_owner==NULL)
608 {
609 t->owner = NULL;
610 }
611 else
612 {
613 if(is_realtime(device_owner))
614 {
615 TRACE("%s: Handling NVIDIA tasklet for device %u at %llu\n",
616 __FUNCTION__, nvidia_device,litmus_clock());
617
618 TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n",
619 __FUNCTION__,device_owner->pid,nvidia_device);
620
621 t->owner = device_owner;
622 sched_trace_tasklet_release(t->owner);
623 if(likely(_litmus_tasklet_hi_schedule_first(t,nvidia_device)))
624 {
625 unlock_nv_registry(nvidia_device, &flags);
626 return;
627 }
628 else
629 {
630 t->owner = NULL; /* fall through to normal scheduling */
631 }
632 }
633 else
634 {
635 t->owner = NULL;
636 }
637 }
638 unlock_nv_registry(nvidia_device, &flags);
639 }
640#endif
641
642 ___tasklet_hi_schedule_first(t);
643}
644EXPORT_SYMBOL(__tasklet_hi_schedule_first);
645
646void ___tasklet_hi_schedule_first(struct tasklet_struct* t)
647{
648 BUG_ON(!irqs_disabled());
436 649
437 t->next = __this_cpu_read(tasklet_hi_vec.head); 650 t->next = __this_cpu_read(tasklet_hi_vec.head);
438 __this_cpu_write(tasklet_hi_vec.head, t); 651 __this_cpu_write(tasklet_hi_vec.head, t);
439 __raise_softirq_irqoff(HI_SOFTIRQ); 652 __raise_softirq_irqoff(HI_SOFTIRQ);
440} 653}
441 654EXPORT_SYMBOL(___tasklet_hi_schedule_first);
442EXPORT_SYMBOL(__tasklet_hi_schedule_first);
443 655
444static void tasklet_action(struct softirq_action *a) 656static void tasklet_action(struct softirq_action *a)
445{ 657{
@@ -495,6 +707,7 @@ static void tasklet_hi_action(struct softirq_action *a)
495 if (!atomic_read(&t->count)) { 707 if (!atomic_read(&t->count)) {
496 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) 708 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
497 BUG(); 709 BUG();
710
498 t->func(t->data); 711 t->func(t->data);
499 tasklet_unlock(t); 712 tasklet_unlock(t);
500 continue; 713 continue;
@@ -518,8 +731,13 @@ void tasklet_init(struct tasklet_struct *t,
518 t->next = NULL; 731 t->next = NULL;
519 t->state = 0; 732 t->state = 0;
520 atomic_set(&t->count, 0); 733 atomic_set(&t->count, 0);
734
521 t->func = func; 735 t->func = func;
522 t->data = data; 736 t->data = data;
737
738#ifdef CONFIG_LITMUS_SOFTIRQD
739 t->owner = NULL;
740#endif
523} 741}
524 742
525EXPORT_SYMBOL(tasklet_init); 743EXPORT_SYMBOL(tasklet_init);
@@ -534,6 +752,7 @@ void tasklet_kill(struct tasklet_struct *t)
534 yield(); 752 yield();
535 } while (test_bit(TASKLET_STATE_SCHED, &t->state)); 753 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
536 } 754 }
755
537 tasklet_unlock_wait(t); 756 tasklet_unlock_wait(t);
538 clear_bit(TASKLET_STATE_SCHED, &t->state); 757 clear_bit(TASKLET_STATE_SCHED, &t->state);
539} 758}
@@ -808,6 +1027,7 @@ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
808 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { 1027 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
809 if (*i == t) { 1028 if (*i == t) {
810 *i = t->next; 1029 *i = t->next;
1030
811 /* If this was the tail element, move the tail ptr */ 1031 /* If this was the tail element, move the tail ptr */
812 if (*i == NULL) 1032 if (*i == NULL)
813 per_cpu(tasklet_vec, cpu).tail = i; 1033 per_cpu(tasklet_vec, cpu).tail = i;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0400553f0d04..2ceb7b43a045 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -44,6 +44,13 @@
44 44
45#include "workqueue_sched.h" 45#include "workqueue_sched.h"
46 46
47#ifdef CONFIG_LITMUS_NVIDIA
48#include <litmus/litmus.h>
49#include <litmus/sched_trace.h>
50#include <litmus/nvidia_info.h>
51#endif
52
53
47enum { 54enum {
48 /* global_cwq flags */ 55 /* global_cwq flags */
49 GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ 56 GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
@@ -1047,9 +1054,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
1047 work_flags |= WORK_STRUCT_DELAYED; 1054 work_flags |= WORK_STRUCT_DELAYED;
1048 worklist = &cwq->delayed_works; 1055 worklist = &cwq->delayed_works;
1049 } 1056 }
1050
1051 insert_work(cwq, work, worklist, work_flags); 1057 insert_work(cwq, work, worklist, work_flags);
1052
1053 spin_unlock_irqrestore(&gcwq->lock, flags); 1058 spin_unlock_irqrestore(&gcwq->lock, flags);
1054} 1059}
1055 1060
@@ -2687,10 +2692,70 @@ EXPORT_SYMBOL(cancel_delayed_work_sync);
2687 */ 2692 */
2688int schedule_work(struct work_struct *work) 2693int schedule_work(struct work_struct *work)
2689{ 2694{
2690 return queue_work(system_wq, work); 2695#if 0
2696#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD)
2697 if(is_nvidia_func(work->func))
2698 {
2699 u32 nvidiaDevice = get_work_nv_device_num(work);
2700
2701 //1) Ask Litmus which task owns GPU <nvidiaDevice>. (API to be defined.)
2702 unsigned long flags;
2703 struct task_struct* device_owner;
2704
2705 lock_nv_registry(nvidiaDevice, &flags);
2706
2707 device_owner = get_nv_device_owner(nvidiaDevice);
2708
2709 //2) If there is an owner, set work->owner to the owner's task struct.
2710 if(device_owner==NULL)
2711 {
2712 work->owner = NULL;
2713 //TRACE("%s: the owner task of NVIDIA Device %u is NULL\n",__FUNCTION__,nvidiaDevice);
2714 }
2715 else
2716 {
2717 if( is_realtime(device_owner))
2718 {
2719 TRACE("%s: Handling NVIDIA work for device\t%u\tat\t%llu\n",
2720 __FUNCTION__, nvidiaDevice,litmus_clock());
2721 TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n",
2722 __FUNCTION__,
2723 device_owner->pid,
2724 nvidiaDevice);
2725
2726 //3) Call litmus_schedule_work() and return (don't execute the rest
2727 // of schedule_schedule()).
2728 work->owner = device_owner;
2729 sched_trace_work_release(work->owner);
2730 if(likely(litmus_schedule_work(work, nvidiaDevice)))
2731 {
2732 unlock_nv_registry(nvidiaDevice, &flags);
2733 return 1;
2734 }
2735 else
2736 {
2737 work->owner = NULL; /* fall through to normal work scheduling */
2738 }
2739 }
2740 else
2741 {
2742 work->owner = NULL;
2743 }
2744 }
2745 unlock_nv_registry(nvidiaDevice, &flags);
2746 }
2747#endif
2748#endif
2749 return(__schedule_work(work));
2691} 2750}
2692EXPORT_SYMBOL(schedule_work); 2751EXPORT_SYMBOL(schedule_work);
2693 2752
2753int __schedule_work(struct work_struct* work)
2754{
2755 return queue_work(system_wq, work);
2756}
2757EXPORT_SYMBOL(__schedule_work);
2758
2694/* 2759/*
2695 * schedule_work_on - put work task on a specific cpu 2760 * schedule_work_on - put work task on a specific cpu
2696 * @cpu: cpu to put the work task on 2761 * @cpu: cpu to put the work task on