aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/lockdep.c3
-rw-r--r--kernel/mutex.c141
-rw-r--r--kernel/sched.c23
-rw-r--r--kernel/semaphore.c13
-rw-r--r--kernel/softirq.c278
-rw-r--r--kernel/workqueue.c70
6 files changed, 493 insertions, 35 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index f2852a510232..ebff2cf715c5 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -530,7 +530,7 @@ static void print_lock(struct held_lock *hlock)
530 print_ip_sym(hlock->acquire_ip); 530 print_ip_sym(hlock->acquire_ip);
531} 531}
532 532
533static void lockdep_print_held_locks(struct task_struct *curr) 533void lockdep_print_held_locks(struct task_struct *curr)
534{ 534{
535 int i, depth = curr->lockdep_depth; 535 int i, depth = curr->lockdep_depth;
536 536
@@ -546,6 +546,7 @@ static void lockdep_print_held_locks(struct task_struct *curr)
546 print_lock(curr->held_locks + i); 546 print_lock(curr->held_locks + i);
547 } 547 }
548} 548}
549EXPORT_SYMBOL(lockdep_print_held_locks);
549 550
550static void print_kernel_version(void) 551static void print_kernel_version(void)
551{ 552{
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 200407c1502f..435685ecd068 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -496,3 +496,144 @@ int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
496 return 1; 496 return 1;
497} 497}
498EXPORT_SYMBOL(atomic_dec_and_mutex_lock); 498EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
499
500
501
502
503
504
505
506
507//__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
508
509void mutex_lock_sfx(struct mutex *lock,
510 side_effect_t pre, unsigned long pre_arg,
511 side_effect_t post, unsigned long post_arg)
512{
513 long state = TASK_UNINTERRUPTIBLE;
514 unsigned int subclass = 0;
515 unsigned long ip = _RET_IP_;
516
517
518 struct task_struct *task = current;
519 struct mutex_waiter waiter;
520 unsigned long flags;
521
522 preempt_disable();
523 mutex_acquire(&lock->dep_map, subclass, 0, ip);
524
525 spin_lock_mutex(&lock->wait_lock, flags);
526
527 if(pre)
528 {
529 if(unlikely(pre(pre_arg)))
530 {
531 // this will fuck with lockdep's CONFIG_PROVE_LOCKING...
532 spin_unlock_mutex(&lock->wait_lock, flags);
533 preempt_enable();
534 return;
535 }
536 }
537
538 debug_mutex_lock_common(lock, &waiter);
539 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
540
541 /* add waiting tasks to the end of the waitqueue (FIFO): */
542 list_add_tail(&waiter.list, &lock->wait_list);
543 waiter.task = task;
544
545 if (atomic_xchg(&lock->count, -1) == 1)
546 goto done;
547
548 lock_contended(&lock->dep_map, ip);
549
550 for (;;) {
551 /*
552 * Lets try to take the lock again - this is needed even if
553 * we get here for the first time (shortly after failing to
554 * acquire the lock), to make sure that we get a wakeup once
555 * it's unlocked. Later on, if we sleep, this is the
556 * operation that gives us the lock. We xchg it to -1, so
557 * that when we release the lock, we properly wake up the
558 * other waiters:
559 */
560 if (atomic_xchg(&lock->count, -1) == 1)
561 break;
562
563 __set_task_state(task, state);
564
565 /* didnt get the lock, go to sleep: */
566 spin_unlock_mutex(&lock->wait_lock, flags);
567 preempt_enable_no_resched();
568 schedule();
569 preempt_disable();
570 spin_lock_mutex(&lock->wait_lock, flags);
571 }
572
573done:
574 lock_acquired(&lock->dep_map, ip);
575 /* got the lock - rejoice! */
576 mutex_remove_waiter(lock, &waiter, current_thread_info());
577 mutex_set_owner(lock);
578
579 /* set it to 0 if there are no waiters left: */
580 if (likely(list_empty(&lock->wait_list)))
581 atomic_set(&lock->count, 0);
582
583 if(post)
584 post(post_arg);
585
586 spin_unlock_mutex(&lock->wait_lock, flags);
587
588 debug_mutex_free_waiter(&waiter);
589 preempt_enable();
590
591 //return 0;
592}
593EXPORT_SYMBOL(mutex_lock_sfx);
594
595
596
597//__mutex_unlock_common_slowpath(lock_count, 1);
598
599void mutex_unlock_sfx(struct mutex *lock,
600 side_effect_t pre, unsigned long pre_arg,
601 side_effect_t post, unsigned long post_arg)
602{
603 //struct mutex *lock = container_of(lock_count, struct mutex, count);
604 unsigned long flags;
605
606 spin_lock_mutex(&lock->wait_lock, flags);
607
608 if(pre)
609 pre(pre_arg);
610
611 //mutex_release(&lock->dep_map, nested, _RET_IP_);
612 mutex_release(&lock->dep_map, 1, _RET_IP_);
613 debug_mutex_unlock(lock);
614
615 /*
616 * some architectures leave the lock unlocked in the fastpath failure
617 * case, others need to leave it locked. In the later case we have to
618 * unlock it here
619 */
620 if (__mutex_slowpath_needs_to_unlock())
621 atomic_set(&lock->count, 1);
622
623 if (!list_empty(&lock->wait_list)) {
624 /* get the first entry from the wait-list: */
625 struct mutex_waiter *waiter =
626 list_entry(lock->wait_list.next,
627 struct mutex_waiter, list);
628
629 debug_mutex_wake_waiter(lock, waiter);
630
631 wake_up_process(waiter->task);
632 }
633
634 if(post)
635 post(post_arg);
636
637 spin_unlock_mutex(&lock->wait_lock, flags);
638}
639EXPORT_SYMBOL(mutex_unlock_sfx);
diff --git a/kernel/sched.c b/kernel/sched.c
index c5d775079027..3162605ffc91 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -82,6 +82,10 @@
82#include <litmus/sched_trace.h> 82#include <litmus/sched_trace.h>
83#include <litmus/trace.h> 83#include <litmus/trace.h>
84 84
85#ifdef CONFIG_LITMUS_SOFTIRQD
86#include <litmus/litmus_softirq.h>
87#endif
88
85static void litmus_tick(struct rq*, struct task_struct*); 89static void litmus_tick(struct rq*, struct task_struct*);
86 90
87#define CREATE_TRACE_POINTS 91#define CREATE_TRACE_POINTS
@@ -3789,6 +3793,7 @@ pick_next_task(struct rq *rq)
3789 } 3793 }
3790} 3794}
3791 3795
3796
3792/* 3797/*
3793 * schedule() is the main scheduler function. 3798 * schedule() is the main scheduler function.
3794 */ 3799 */
@@ -3807,6 +3812,10 @@ need_resched:
3807 rcu_note_context_switch(cpu); 3812 rcu_note_context_switch(cpu);
3808 prev = rq->curr; 3813 prev = rq->curr;
3809 3814
3815#ifdef CONFIG_LITMUS_SOFTIRQD
3816 release_klitirqd_lock(prev);
3817#endif
3818
3810 release_kernel_lock(prev); 3819 release_kernel_lock(prev);
3811need_resched_nonpreemptible: 3820need_resched_nonpreemptible:
3812 TS_SCHED_START; 3821 TS_SCHED_START;
@@ -3882,15 +3891,20 @@ need_resched_nonpreemptible:
3882 3891
3883 if (sched_state_validate_switch() || unlikely(reacquire_kernel_lock(prev))) 3892 if (sched_state_validate_switch() || unlikely(reacquire_kernel_lock(prev)))
3884 goto need_resched_nonpreemptible; 3893 goto need_resched_nonpreemptible;
3885 3894
3886 preempt_enable_no_resched(); 3895 preempt_enable_no_resched();
3896
3887 if (need_resched()) 3897 if (need_resched())
3888 goto need_resched; 3898 goto need_resched;
3889 3899
3900 reacquire_klitirqd_lock(prev);
3901
3890 srp_ceiling_block(); 3902 srp_ceiling_block();
3891} 3903}
3892EXPORT_SYMBOL(schedule); 3904EXPORT_SYMBOL(schedule);
3893 3905
3906
3907
3894#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 3908#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
3895/* 3909/*
3896 * Look out! "owner" is an entirely speculative pointer 3910 * Look out! "owner" is an entirely speculative pointer
@@ -4051,6 +4065,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
4051 } 4065 }
4052} 4066}
4053 4067
4068
4054/** 4069/**
4055 * __wake_up - wake up threads blocked on a waitqueue. 4070 * __wake_up - wake up threads blocked on a waitqueue.
4056 * @q: the waitqueue 4071 * @q: the waitqueue
@@ -4236,6 +4251,12 @@ void __sched wait_for_completion(struct completion *x)
4236} 4251}
4237EXPORT_SYMBOL(wait_for_completion); 4252EXPORT_SYMBOL(wait_for_completion);
4238 4253
4254void __sched __wait_for_completion_locked(struct completion *x)
4255{
4256 do_wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
4257}
4258EXPORT_SYMBOL(__wait_for_completion_locked);
4259
4239/** 4260/**
4240 * wait_for_completion_timeout: - waits for completion of a task (w/timeout) 4261 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
4241 * @x: holds the state of this particular completion 4262 * @x: holds the state of this particular completion
diff --git a/kernel/semaphore.c b/kernel/semaphore.c
index 94a62c0d4ade..c947a046a6d7 100644
--- a/kernel/semaphore.c
+++ b/kernel/semaphore.c
@@ -33,11 +33,11 @@
33#include <linux/spinlock.h> 33#include <linux/spinlock.h>
34#include <linux/ftrace.h> 34#include <linux/ftrace.h>
35 35
36static noinline void __down(struct semaphore *sem); 36noinline void __down(struct semaphore *sem);
37static noinline int __down_interruptible(struct semaphore *sem); 37static noinline int __down_interruptible(struct semaphore *sem);
38static noinline int __down_killable(struct semaphore *sem); 38static noinline int __down_killable(struct semaphore *sem);
39static noinline int __down_timeout(struct semaphore *sem, long jiffies); 39static noinline int __down_timeout(struct semaphore *sem, long jiffies);
40static noinline void __up(struct semaphore *sem); 40noinline void __up(struct semaphore *sem);
41 41
42/** 42/**
43 * down - acquire the semaphore 43 * down - acquire the semaphore
@@ -190,11 +190,13 @@ EXPORT_SYMBOL(up);
190 190
191/* Functions for the contended case */ 191/* Functions for the contended case */
192 192
193/*
193struct semaphore_waiter { 194struct semaphore_waiter {
194 struct list_head list; 195 struct list_head list;
195 struct task_struct *task; 196 struct task_struct *task;
196 int up; 197 int up;
197}; 198};
199 */
198 200
199/* 201/*
200 * Because this function is inlined, the 'state' parameter will be 202 * Because this function is inlined, the 'state' parameter will be
@@ -233,10 +235,12 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
233 return -EINTR; 235 return -EINTR;
234} 236}
235 237
236static noinline void __sched __down(struct semaphore *sem) 238noinline void __sched __down(struct semaphore *sem)
237{ 239{
238 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 240 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
239} 241}
242EXPORT_SYMBOL(__down);
243
240 244
241static noinline int __sched __down_interruptible(struct semaphore *sem) 245static noinline int __sched __down_interruptible(struct semaphore *sem)
242{ 246{
@@ -253,7 +257,7 @@ static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies)
253 return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies); 257 return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies);
254} 258}
255 259
256static noinline void __sched __up(struct semaphore *sem) 260noinline void __sched __up(struct semaphore *sem)
257{ 261{
258 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, 262 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
259 struct semaphore_waiter, list); 263 struct semaphore_waiter, list);
@@ -261,3 +265,4 @@ static noinline void __sched __up(struct semaphore *sem)
261 waiter->up = 1; 265 waiter->up = 1;
262 wake_up_process(waiter->task); 266 wake_up_process(waiter->task);
263} 267}
268EXPORT_SYMBOL(__up); \ No newline at end of file
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 07b4f1b1a73a..be4b8fab3637 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -29,6 +29,14 @@
29#include <trace/events/irq.h> 29#include <trace/events/irq.h>
30 30
31#include <asm/irq.h> 31#include <asm/irq.h>
32
33#include <litmus/litmus.h>
34#include <litmus/sched_trace.h>
35
36#ifdef CONFIG_LITMUS_NVIDIA
37#include <litmus/nvidia_info.h>
38#endif
39
32/* 40/*
33 - No shared variables, all the data are CPU local. 41 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself 42 - If a softirq needs serialization, let it serialize itself
@@ -54,7 +62,7 @@ EXPORT_SYMBOL(irq_stat);
54 62
55static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; 63static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
56 64
57static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); 65static DEFINE_PER_CPU(struct task_struct *, ksoftirqd) = NULL;
58 66
59char *softirq_to_name[NR_SOFTIRQS] = { 67char *softirq_to_name[NR_SOFTIRQS] = {
60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", 68 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
@@ -177,6 +185,7 @@ void local_bh_enable_ip(unsigned long ip)
177} 185}
178EXPORT_SYMBOL(local_bh_enable_ip); 186EXPORT_SYMBOL(local_bh_enable_ip);
179 187
188
180/* 189/*
181 * We restart softirq processing MAX_SOFTIRQ_RESTART times, 190 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
182 * and we fall back to softirqd after that. 191 * and we fall back to softirqd after that.
@@ -187,34 +196,30 @@ EXPORT_SYMBOL(local_bh_enable_ip);
187 * should not be able to lock up the box. 196 * should not be able to lock up the box.
188 */ 197 */
189#define MAX_SOFTIRQ_RESTART 10 198#define MAX_SOFTIRQ_RESTART 10
190 199static void ____do_softirq(void)
191asmlinkage void __do_softirq(void)
192{ 200{
193 struct softirq_action *h;
194 __u32 pending; 201 __u32 pending;
195 int max_restart = MAX_SOFTIRQ_RESTART; 202
203 struct softirq_action *h;
196 int cpu; 204 int cpu;
197 205
198 pending = local_softirq_pending(); 206 pending = local_softirq_pending();
207
199 account_system_vtime(current); 208 account_system_vtime(current);
200 209
201 __local_bh_disable((unsigned long)__builtin_return_address(0));
202 lockdep_softirq_enter();
203
204 cpu = smp_processor_id(); 210 cpu = smp_processor_id();
205restart:
206 /* Reset the pending bitmask before enabling irqs */
207 set_softirq_pending(0);
208 211
212 set_softirq_pending(0);
213
209 local_irq_enable(); 214 local_irq_enable();
210 215
211 h = softirq_vec; 216 h = softirq_vec;
212 217
213 do { 218 do {
214 if (pending & 1) { 219 if (pending & 1) {
215 int prev_count = preempt_count(); 220 int prev_count = preempt_count();
216 kstat_incr_softirqs_this_cpu(h - softirq_vec); 221 kstat_incr_softirqs_this_cpu(h - softirq_vec);
217 222
218 trace_softirq_entry(h, softirq_vec); 223 trace_softirq_entry(h, softirq_vec);
219 h->action(h); 224 h->action(h);
220 trace_softirq_exit(h, softirq_vec); 225 trace_softirq_exit(h, softirq_vec);
@@ -226,26 +231,70 @@ restart:
226 h->action, prev_count, preempt_count()); 231 h->action, prev_count, preempt_count());
227 preempt_count() = prev_count; 232 preempt_count() = prev_count;
228 } 233 }
229 234
230 rcu_bh_qs(cpu); 235 rcu_bh_qs(cpu);
231 } 236 }
232 h++; 237 h++;
233 pending >>= 1; 238 pending >>= 1;
234 } while (pending); 239 } while (pending);
235 240
236 local_irq_disable(); 241 local_irq_disable();
242}
243
244static void ___do_softirq(void)
245{
246 __u32 pending;
247
248 //struct softirq_action *h;
249 int max_restart = MAX_SOFTIRQ_RESTART;
250 //int cpu;
251
252 pending = local_softirq_pending();
253
254restart:
255 ____do_softirq();
237 256
238 pending = local_softirq_pending(); 257 pending = local_softirq_pending();
239 if (pending && --max_restart) 258 if (pending && --max_restart)
240 goto restart; 259 goto restart;
241 260
242 if (pending) 261 if (pending)
262 {
243 wakeup_softirqd(); 263 wakeup_softirqd();
264 }
265}
244 266
267asmlinkage void __do_softirq(void)
268{
269#ifdef LITMUS_THREAD_ALL_SOFTIRQ
270 /* Skip straight to wakeup_softirqd() if we're using
271 LITMUS_THREAD_ALL_SOFTIRQ (unless there's really high prio-stuff waiting.). */
272 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
273
274 if(tsk)
275 {
276 __u32 pending = local_softirq_pending();
277 const __u32 high_prio_softirq = (1<<HI_SOFTIRQ) | (1<<TIMER_SOFTIRQ) | (1<<HRTIMER_SOFTIRQ);
278 if(pending && !(pending & high_prio_softirq))
279 {
280 wakeup_softirqd();
281 return;
282 }
283 }
284#endif
285
286 /*
287 * 'immediate' softirq execution:
288 */
289 __local_bh_disable((unsigned long)__builtin_return_address(0));
290 lockdep_softirq_enter();
291
292 ___do_softirq();
293
245 lockdep_softirq_exit(); 294 lockdep_softirq_exit();
246 295
247 account_system_vtime(current); 296 account_system_vtime(current);
248 _local_bh_enable(); 297 _local_bh_enable();
249} 298}
250 299
251#ifndef __ARCH_HAS_DO_SOFTIRQ 300#ifndef __ARCH_HAS_DO_SOFTIRQ
@@ -357,8 +406,64 @@ struct tasklet_head
357static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); 406static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
358static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); 407static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
359 408
409
360void __tasklet_schedule(struct tasklet_struct *t) 410void __tasklet_schedule(struct tasklet_struct *t)
361{ 411{
412#ifdef CONFIG_LITMUS_NVIDIA
413 if(is_nvidia_func(t->func))
414 {
415 u32 nvidia_device = get_tasklet_nv_device_num(t);
416 // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n",
417 // __FUNCTION__, nvidia_device,litmus_clock());
418
419 unsigned long flags;
420 struct task_struct* device_owner;
421
422 lock_nv_registry(nvidia_device, &flags);
423
424 device_owner = get_nv_device_owner(nvidia_device);
425
426 if(device_owner==NULL)
427 {
428 t->owner = NULL;
429 }
430 else
431 {
432 if(is_realtime(device_owner))
433 {
434 TRACE("%s: Handling NVIDIA tasklet for device %u at %llu\n",
435 __FUNCTION__, nvidia_device,litmus_clock());
436 TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n",
437 __FUNCTION__,device_owner->pid,nvidia_device);
438
439 t->owner = device_owner;
440 sched_trace_tasklet_release(t->owner);
441 if(likely(_litmus_tasklet_schedule(t,nvidia_device)))
442 {
443 unlock_nv_registry(nvidia_device, &flags);
444 return;
445 }
446 else
447 {
448 t->owner = NULL; /* fall through to normal scheduling */
449 }
450 }
451 else
452 {
453 t->owner = NULL;
454 }
455 }
456 unlock_nv_registry(nvidia_device, &flags);
457 }
458#endif
459
460 ___tasklet_schedule(t);
461}
462EXPORT_SYMBOL(__tasklet_schedule);
463
464
465void ___tasklet_schedule(struct tasklet_struct *t)
466{
362 unsigned long flags; 467 unsigned long flags;
363 468
364 local_irq_save(flags); 469 local_irq_save(flags);
@@ -368,11 +473,65 @@ void __tasklet_schedule(struct tasklet_struct *t)
368 raise_softirq_irqoff(TASKLET_SOFTIRQ); 473 raise_softirq_irqoff(TASKLET_SOFTIRQ);
369 local_irq_restore(flags); 474 local_irq_restore(flags);
370} 475}
476EXPORT_SYMBOL(___tasklet_schedule);
371 477
372EXPORT_SYMBOL(__tasklet_schedule);
373 478
374void __tasklet_hi_schedule(struct tasklet_struct *t) 479void __tasklet_hi_schedule(struct tasklet_struct *t)
375{ 480{
481#ifdef CONFIG_LITMUS_NVIDIA
482 if(is_nvidia_func(t->func))
483 {
484 u32 nvidia_device = get_tasklet_nv_device_num(t);
485 // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n",
486 // __FUNCTION__, nvidia_device,litmus_clock());
487
488 unsigned long flags;
489 struct task_struct* device_owner;
490
491 lock_nv_registry(nvidia_device, &flags);
492
493 device_owner = get_nv_device_owner(nvidia_device);
494
495 if(device_owner==NULL)
496 {
497 t->owner = NULL;
498 }
499 else
500 {
501 if( is_realtime(device_owner))
502 {
503 TRACE("%s: Handling NVIDIA tasklet for device %u\tat %llu\n",
504 __FUNCTION__, nvidia_device,litmus_clock());
505 TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n",
506 __FUNCTION__,device_owner->pid,nvidia_device);
507
508 t->owner = device_owner;
509 sched_trace_tasklet_release(t->owner);
510 if(likely(_litmus_tasklet_hi_schedule(t,nvidia_device)))
511 {
512 unlock_nv_registry(nvidia_device, &flags);
513 return;
514 }
515 else
516 {
517 t->owner = NULL; /* fall through to normal scheduling */
518 }
519 }
520 else
521 {
522 t->owner = NULL;
523 }
524 }
525 unlock_nv_registry(nvidia_device, &flags);
526 }
527#endif
528
529 ___tasklet_hi_schedule(t);
530}
531EXPORT_SYMBOL(__tasklet_hi_schedule);
532
533void ___tasklet_hi_schedule(struct tasklet_struct* t)
534{
376 unsigned long flags; 535 unsigned long flags;
377 536
378 local_irq_save(flags); 537 local_irq_save(flags);
@@ -382,19 +541,72 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
382 raise_softirq_irqoff(HI_SOFTIRQ); 541 raise_softirq_irqoff(HI_SOFTIRQ);
383 local_irq_restore(flags); 542 local_irq_restore(flags);
384} 543}
385 544EXPORT_SYMBOL(___tasklet_hi_schedule);
386EXPORT_SYMBOL(__tasklet_hi_schedule);
387 545
388void __tasklet_hi_schedule_first(struct tasklet_struct *t) 546void __tasklet_hi_schedule_first(struct tasklet_struct *t)
389{ 547{
390 BUG_ON(!irqs_disabled()); 548 BUG_ON(!irqs_disabled());
549#ifdef CONFIG_LITMUS_NVIDIA
550 if(is_nvidia_func(t->func))
551 {
552 u32 nvidia_device = get_tasklet_nv_device_num(t);
553 // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n",
554 // __FUNCTION__, nvidia_device,litmus_clock());
555 unsigned long flags;
556 struct task_struct* device_owner;
557
558 lock_nv_registry(nvidia_device, &flags);
559
560 device_owner = get_nv_device_owner(nvidia_device);
561
562 if(device_owner==NULL)
563 {
564 t->owner = NULL;
565 }
566 else
567 {
568 if(is_realtime(device_owner))
569 {
570 TRACE("%s: Handling NVIDIA tasklet for device %u at %llu\n",
571 __FUNCTION__, nvidia_device,litmus_clock());
572
573 TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n",
574 __FUNCTION__,device_owner->pid,nvidia_device);
575
576 t->owner = device_owner;
577 sched_trace_tasklet_release(t->owner);
578 if(likely(_litmus_tasklet_hi_schedule_first(t,nvidia_device)))
579 {
580 unlock_nv_registry(nvidia_device, &flags);
581 return;
582 }
583 else
584 {
585 t->owner = NULL; /* fall through to normal scheduling */
586 }
587 }
588 else
589 {
590 t->owner = NULL;
591 }
592 }
593 unlock_nv_registry(nvidia_device, &flags);
594 }
595#endif
596
597 ___tasklet_hi_schedule_first(t);
598}
599EXPORT_SYMBOL(__tasklet_hi_schedule_first);
600
601void ___tasklet_hi_schedule_first(struct tasklet_struct* t)
602{
603 BUG_ON(!irqs_disabled());
391 604
392 t->next = __get_cpu_var(tasklet_hi_vec).head; 605 t->next = __get_cpu_var(tasklet_hi_vec).head;
393 __get_cpu_var(tasklet_hi_vec).head = t; 606 __get_cpu_var(tasklet_hi_vec).head = t;
394 __raise_softirq_irqoff(HI_SOFTIRQ); 607 __raise_softirq_irqoff(HI_SOFTIRQ);
395} 608}
396 609EXPORT_SYMBOL(___tasklet_hi_schedule_first);
397EXPORT_SYMBOL(__tasklet_hi_schedule_first);
398 610
399static void tasklet_action(struct softirq_action *a) 611static void tasklet_action(struct softirq_action *a)
400{ 612{
@@ -450,6 +662,7 @@ static void tasklet_hi_action(struct softirq_action *a)
450 if (!atomic_read(&t->count)) { 662 if (!atomic_read(&t->count)) {
451 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) 663 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
452 BUG(); 664 BUG();
665
453 t->func(t->data); 666 t->func(t->data);
454 tasklet_unlock(t); 667 tasklet_unlock(t);
455 continue; 668 continue;
@@ -473,8 +686,13 @@ void tasklet_init(struct tasklet_struct *t,
473 t->next = NULL; 686 t->next = NULL;
474 t->state = 0; 687 t->state = 0;
475 atomic_set(&t->count, 0); 688 atomic_set(&t->count, 0);
689
476 t->func = func; 690 t->func = func;
477 t->data = data; 691 t->data = data;
692
693#ifdef CONFIG_LITMUS_SOFTIRQD
694 t->owner = NULL;
695#endif
478} 696}
479 697
480EXPORT_SYMBOL(tasklet_init); 698EXPORT_SYMBOL(tasklet_init);
@@ -489,6 +707,7 @@ void tasklet_kill(struct tasklet_struct *t)
489 yield(); 707 yield();
490 } while (test_bit(TASKLET_STATE_SCHED, &t->state)); 708 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
491 } 709 }
710
492 tasklet_unlock_wait(t); 711 tasklet_unlock_wait(t);
493 clear_bit(TASKLET_STATE_SCHED, &t->state); 712 clear_bit(TASKLET_STATE_SCHED, &t->state);
494} 713}
@@ -694,6 +913,8 @@ void __init softirq_init(void)
694 913
695static int run_ksoftirqd(void * __bind_cpu) 914static int run_ksoftirqd(void * __bind_cpu)
696{ 915{
916 unsigned long flags;
917
697 set_current_state(TASK_INTERRUPTIBLE); 918 set_current_state(TASK_INTERRUPTIBLE);
698 919
699 while (!kthread_should_stop()) { 920 while (!kthread_should_stop()) {
@@ -712,7 +933,11 @@ static int run_ksoftirqd(void * __bind_cpu)
712 don't process */ 933 don't process */
713 if (cpu_is_offline((long)__bind_cpu)) 934 if (cpu_is_offline((long)__bind_cpu))
714 goto wait_to_die; 935 goto wait_to_die;
715 do_softirq(); 936
937 local_irq_save(flags);
938 ____do_softirq();
939 local_irq_restore(flags);
940
716 preempt_enable_no_resched(); 941 preempt_enable_no_resched();
717 cond_resched(); 942 cond_resched();
718 preempt_disable(); 943 preempt_disable();
@@ -760,6 +985,7 @@ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
760 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { 985 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
761 if (*i == t) { 986 if (*i == t) {
762 *i = t->next; 987 *i = t->next;
988
763 /* If this was the tail element, move the tail ptr */ 989 /* If this was the tail element, move the tail ptr */
764 if (*i == NULL) 990 if (*i == NULL)
765 per_cpu(tasklet_vec, cpu).tail = i; 991 per_cpu(tasklet_vec, cpu).tail = i;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f77afd939229..8139208eaee1 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -47,6 +47,13 @@
47 47
48#include "workqueue_sched.h" 48#include "workqueue_sched.h"
49 49
50#ifdef CONFIG_LITMUS_NVIDIA
51#include <litmus/litmus.h>
52#include <litmus/sched_trace.h>
53#include <litmus/nvidia_info.h>
54#endif
55
56
50enum { 57enum {
51 /* global_cwq flags */ 58 /* global_cwq flags */
52 GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ 59 GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
@@ -1010,9 +1017,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
1010 work_flags |= WORK_STRUCT_DELAYED; 1017 work_flags |= WORK_STRUCT_DELAYED;
1011 worklist = &cwq->delayed_works; 1018 worklist = &cwq->delayed_works;
1012 } 1019 }
1013
1014 insert_work(cwq, work, worklist, work_flags); 1020 insert_work(cwq, work, worklist, work_flags);
1015
1016 spin_unlock_irqrestore(&gcwq->lock, flags); 1021 spin_unlock_irqrestore(&gcwq->lock, flags);
1017} 1022}
1018 1023
@@ -2526,10 +2531,69 @@ EXPORT_SYMBOL(cancel_delayed_work_sync);
2526 */ 2531 */
2527int schedule_work(struct work_struct *work) 2532int schedule_work(struct work_struct *work)
2528{ 2533{
2529 return queue_work(system_wq, work); 2534#ifdef CONFIG_LITMUS_NVIDIA
2535 if(is_nvidia_func(work->func))
2536 {
2537 u32 nvidiaDevice = get_work_nv_device_num(work);
2538
2539 //1) Ask Litmus which task owns GPU <nvidiaDevice>. (API to be defined.)
2540 unsigned long flags;
2541 struct task_struct* device_owner;
2542
2543 lock_nv_registry(nvidiaDevice, &flags);
2544
2545 device_owner = get_nv_device_owner(nvidiaDevice);
2546
2547 //2) If there is an owner, set work->owner to the owner's task struct.
2548 if(device_owner==NULL)
2549 {
2550 work->owner = NULL;
2551 //TRACE("%s: the owner task of NVIDIA Device %u is NULL\n",__FUNCTION__,nvidiaDevice);
2552 }
2553 else
2554 {
2555 if( is_realtime(device_owner))
2556 {
2557 TRACE("%s: Handling NVIDIA work for device\t%u\tat\t%llu\n",
2558 __FUNCTION__, nvidiaDevice,litmus_clock());
2559 TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n",
2560 __FUNCTION__,
2561 device_owner->pid,
2562 nvidiaDevice);
2563
2564 //3) Call litmus_schedule_work() and return (don't execute the rest
2565 // of schedule_schedule()).
2566 work->owner = device_owner;
2567 sched_trace_work_release(work->owner);
2568 if(likely(litmus_schedule_work(work, nvidiaDevice)))
2569 {
2570 unlock_nv_registry(nvidiaDevice, &flags);
2571 return 1;
2572 }
2573 else
2574 {
2575 work->owner = NULL; /* fall through to normal work scheduling */
2576 }
2577 }
2578 else
2579 {
2580 work->owner = NULL;
2581 }
2582 }
2583 unlock_nv_registry(nvidiaDevice, &flags);
2584 }
2585#endif
2586
2587 return(__schedule_work(work));
2530} 2588}
2531EXPORT_SYMBOL(schedule_work); 2589EXPORT_SYMBOL(schedule_work);
2532 2590
2591int __schedule_work(struct work_struct* work)
2592{
2593 return queue_work(system_wq, work);
2594}
2595EXPORT_SYMBOL(__schedule_work);
2596
2533/* 2597/*
2534 * schedule_work_on - put work task on a specific cpu 2598 * schedule_work_on - put work task on a specific cpu
2535 * @cpu: cpu to put the work task on 2599 * @cpu: cpu to put the work task on