diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/lockdep.c | 7 | ||||
-rw-r--r-- | kernel/mutex.c | 125 | ||||
-rw-r--r-- | kernel/sched.c | 27 | ||||
-rw-r--r-- | kernel/semaphore.c | 13 | ||||
-rw-r--r-- | kernel/softirq.c | 322 | ||||
-rw-r--r-- | kernel/workqueue.c | 71 |
6 files changed, 504 insertions, 61 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 298c9276dfdb..2bdcdc3691e5 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -542,7 +542,7 @@ static void print_lock(struct held_lock *hlock) | |||
542 | print_ip_sym(hlock->acquire_ip); | 542 | print_ip_sym(hlock->acquire_ip); |
543 | } | 543 | } |
544 | 544 | ||
545 | static void lockdep_print_held_locks(struct task_struct *curr) | 545 | void lockdep_print_held_locks(struct task_struct *curr) |
546 | { | 546 | { |
547 | int i, depth = curr->lockdep_depth; | 547 | int i, depth = curr->lockdep_depth; |
548 | 548 | ||
@@ -558,6 +558,7 @@ static void lockdep_print_held_locks(struct task_struct *curr) | |||
558 | print_lock(curr->held_locks + i); | 558 | print_lock(curr->held_locks + i); |
559 | } | 559 | } |
560 | } | 560 | } |
561 | EXPORT_SYMBOL(lockdep_print_held_locks); | ||
561 | 562 | ||
562 | static void print_kernel_version(void) | 563 | static void print_kernel_version(void) |
563 | { | 564 | { |
@@ -583,6 +584,10 @@ static int static_obj(void *obj) | |||
583 | end = (unsigned long) &_end, | 584 | end = (unsigned long) &_end, |
584 | addr = (unsigned long) obj; | 585 | addr = (unsigned long) obj; |
585 | 586 | ||
587 | // GLENN | ||
588 | return 1; | ||
589 | |||
590 | |||
586 | /* | 591 | /* |
587 | * static variable? | 592 | * static variable? |
588 | */ | 593 | */ |
diff --git a/kernel/mutex.c b/kernel/mutex.c index d607ed5dd441..2f363b9bfc1f 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -498,3 +498,128 @@ int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) | |||
498 | return 1; | 498 | return 1; |
499 | } | 499 | } |
500 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); | 500 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); |
501 | |||
502 | |||
503 | |||
504 | |||
505 | void mutex_lock_sfx(struct mutex *lock, | ||
506 | side_effect_t pre, unsigned long pre_arg, | ||
507 | side_effect_t post, unsigned long post_arg) | ||
508 | { | ||
509 | long state = TASK_UNINTERRUPTIBLE; | ||
510 | |||
511 | struct task_struct *task = current; | ||
512 | struct mutex_waiter waiter; | ||
513 | unsigned long flags; | ||
514 | |||
515 | preempt_disable(); | ||
516 | mutex_acquire(&lock->dep_map, subclass, 0, ip); | ||
517 | |||
518 | spin_lock_mutex(&lock->wait_lock, flags); | ||
519 | |||
520 | if(pre) | ||
521 | { | ||
522 | if(unlikely(pre(pre_arg))) | ||
523 | { | ||
524 | // this will fuck with lockdep's CONFIG_PROVE_LOCKING... | ||
525 | spin_unlock_mutex(&lock->wait_lock, flags); | ||
526 | preempt_enable(); | ||
527 | return; | ||
528 | } | ||
529 | } | ||
530 | |||
531 | debug_mutex_lock_common(lock, &waiter); | ||
532 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); | ||
533 | |||
534 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | ||
535 | list_add_tail(&waiter.list, &lock->wait_list); | ||
536 | waiter.task = task; | ||
537 | |||
538 | if (atomic_xchg(&lock->count, -1) == 1) | ||
539 | goto done; | ||
540 | |||
541 | lock_contended(&lock->dep_map, ip); | ||
542 | |||
543 | for (;;) { | ||
544 | /* | ||
545 | * Lets try to take the lock again - this is needed even if | ||
546 | * we get here for the first time (shortly after failing to | ||
547 | * acquire the lock), to make sure that we get a wakeup once | ||
548 | * it's unlocked. Later on, if we sleep, this is the | ||
549 | * operation that gives us the lock. We xchg it to -1, so | ||
550 | * that when we release the lock, we properly wake up the | ||
551 | * other waiters: | ||
552 | */ | ||
553 | if (atomic_xchg(&lock->count, -1) == 1) | ||
554 | break; | ||
555 | |||
556 | __set_task_state(task, state); | ||
557 | |||
558 | /* didnt get the lock, go to sleep: */ | ||
559 | spin_unlock_mutex(&lock->wait_lock, flags); | ||
560 | preempt_enable_no_resched(); | ||
561 | schedule(); | ||
562 | preempt_disable(); | ||
563 | spin_lock_mutex(&lock->wait_lock, flags); | ||
564 | } | ||
565 | |||
566 | done: | ||
567 | lock_acquired(&lock->dep_map, ip); | ||
568 | /* got the lock - rejoice! */ | ||
569 | mutex_remove_waiter(lock, &waiter, current_thread_info()); | ||
570 | mutex_set_owner(lock); | ||
571 | |||
572 | /* set it to 0 if there are no waiters left: */ | ||
573 | if (likely(list_empty(&lock->wait_list))) | ||
574 | atomic_set(&lock->count, 0); | ||
575 | |||
576 | if(post) | ||
577 | post(post_arg); | ||
578 | |||
579 | spin_unlock_mutex(&lock->wait_lock, flags); | ||
580 | |||
581 | debug_mutex_free_waiter(&waiter); | ||
582 | preempt_enable(); | ||
583 | } | ||
584 | EXPORT_SYMBOL(mutex_lock_sfx); | ||
585 | |||
586 | void mutex_unlock_sfx(struct mutex *lock, | ||
587 | side_effect_t pre, unsigned long pre_arg, | ||
588 | side_effect_t post, unsigned long post_arg) | ||
589 | { | ||
590 | unsigned long flags; | ||
591 | |||
592 | spin_lock_mutex(&lock->wait_lock, flags); | ||
593 | |||
594 | if(pre) | ||
595 | pre(pre_arg); | ||
596 | |||
597 | //mutex_release(&lock->dep_map, nested, _RET_IP_); | ||
598 | mutex_release(&lock->dep_map, 1, _RET_IP_); | ||
599 | debug_mutex_unlock(lock); | ||
600 | |||
601 | /* | ||
602 | * some architectures leave the lock unlocked in the fastpath failure | ||
603 | * case, others need to leave it locked. In the later case we have to | ||
604 | * unlock it here | ||
605 | */ | ||
606 | if (__mutex_slowpath_needs_to_unlock()) | ||
607 | atomic_set(&lock->count, 1); | ||
608 | |||
609 | if (!list_empty(&lock->wait_list)) { | ||
610 | /* get the first entry from the wait-list: */ | ||
611 | struct mutex_waiter *waiter = | ||
612 | list_entry(lock->wait_list.next, | ||
613 | struct mutex_waiter, list); | ||
614 | |||
615 | debug_mutex_wake_waiter(lock, waiter); | ||
616 | |||
617 | wake_up_process(waiter->task); | ||
618 | } | ||
619 | |||
620 | if(post) | ||
621 | post(post_arg); | ||
622 | |||
623 | spin_unlock_mutex(&lock->wait_lock, flags); | ||
624 | } | ||
625 | EXPORT_SYMBOL(mutex_unlock_sfx); | ||
diff --git a/kernel/sched.c b/kernel/sched.c index baaca61bc3a3..f3d9a69a3777 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -83,6 +83,10 @@ | |||
83 | #include <litmus/sched_trace.h> | 83 | #include <litmus/sched_trace.h> |
84 | #include <litmus/trace.h> | 84 | #include <litmus/trace.h> |
85 | 85 | ||
86 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
87 | #include <litmus/litmus_softirq.h> | ||
88 | #endif | ||
89 | |||
86 | static void litmus_tick(struct rq*, struct task_struct*); | 90 | static void litmus_tick(struct rq*, struct task_struct*); |
87 | 91 | ||
88 | #define CREATE_TRACE_POINTS | 92 | #define CREATE_TRACE_POINTS |
@@ -4305,6 +4309,7 @@ pick_next_task(struct rq *rq) | |||
4305 | BUG(); /* the idle class will always have a runnable task */ | 4309 | BUG(); /* the idle class will always have a runnable task */ |
4306 | } | 4310 | } |
4307 | 4311 | ||
4312 | |||
4308 | /* | 4313 | /* |
4309 | * schedule() is the main scheduler function. | 4314 | * schedule() is the main scheduler function. |
4310 | */ | 4315 | */ |
@@ -4323,6 +4328,10 @@ need_resched: | |||
4323 | rcu_note_context_switch(cpu); | 4328 | rcu_note_context_switch(cpu); |
4324 | prev = rq->curr; | 4329 | prev = rq->curr; |
4325 | 4330 | ||
4331 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
4332 | release_klitirqd_lock(prev); | ||
4333 | #endif | ||
4334 | |||
4326 | /* LITMUS^RT: quickly re-evaluate the scheduling decision | 4335 | /* LITMUS^RT: quickly re-evaluate the scheduling decision |
4327 | * if the previous one is no longer valid after CTX. | 4336 | * if the previous one is no longer valid after CTX. |
4328 | */ | 4337 | */ |
@@ -4411,13 +4420,24 @@ litmus_need_resched_nonpreemptible: | |||
4411 | goto litmus_need_resched_nonpreemptible; | 4420 | goto litmus_need_resched_nonpreemptible; |
4412 | 4421 | ||
4413 | preempt_enable_no_resched(); | 4422 | preempt_enable_no_resched(); |
4423 | |||
4414 | if (need_resched()) | 4424 | if (need_resched()) |
4415 | goto need_resched; | 4425 | goto need_resched; |
4416 | 4426 | ||
4427 | #ifdef LITMUS_SOFTIRQD | ||
4428 | reacquire_klitirqd_lock(prev); | ||
4429 | #endif | ||
4430 | |||
4431 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
4432 | litmus->run_tasklets(prev); | ||
4433 | #endif | ||
4434 | |||
4417 | srp_ceiling_block(); | 4435 | srp_ceiling_block(); |
4418 | } | 4436 | } |
4419 | EXPORT_SYMBOL(schedule); | 4437 | EXPORT_SYMBOL(schedule); |
4420 | 4438 | ||
4439 | |||
4440 | |||
4421 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | 4441 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
4422 | 4442 | ||
4423 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) | 4443 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) |
@@ -4561,6 +4581,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | |||
4561 | } | 4581 | } |
4562 | } | 4582 | } |
4563 | 4583 | ||
4584 | |||
4564 | /** | 4585 | /** |
4565 | * __wake_up - wake up threads blocked on a waitqueue. | 4586 | * __wake_up - wake up threads blocked on a waitqueue. |
4566 | * @q: the waitqueue | 4587 | * @q: the waitqueue |
@@ -4747,6 +4768,12 @@ void __sched wait_for_completion(struct completion *x) | |||
4747 | } | 4768 | } |
4748 | EXPORT_SYMBOL(wait_for_completion); | 4769 | EXPORT_SYMBOL(wait_for_completion); |
4749 | 4770 | ||
4771 | void __sched __wait_for_completion_locked(struct completion *x) | ||
4772 | { | ||
4773 | do_wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); | ||
4774 | } | ||
4775 | EXPORT_SYMBOL(__wait_for_completion_locked); | ||
4776 | |||
4750 | /** | 4777 | /** |
4751 | * wait_for_completion_timeout: - waits for completion of a task (w/timeout) | 4778 | * wait_for_completion_timeout: - waits for completion of a task (w/timeout) |
4752 | * @x: holds the state of this particular completion | 4779 | * @x: holds the state of this particular completion |
diff --git a/kernel/semaphore.c b/kernel/semaphore.c index 94a62c0d4ade..c947a046a6d7 100644 --- a/kernel/semaphore.c +++ b/kernel/semaphore.c | |||
@@ -33,11 +33,11 @@ | |||
33 | #include <linux/spinlock.h> | 33 | #include <linux/spinlock.h> |
34 | #include <linux/ftrace.h> | 34 | #include <linux/ftrace.h> |
35 | 35 | ||
36 | static noinline void __down(struct semaphore *sem); | 36 | noinline void __down(struct semaphore *sem); |
37 | static noinline int __down_interruptible(struct semaphore *sem); | 37 | static noinline int __down_interruptible(struct semaphore *sem); |
38 | static noinline int __down_killable(struct semaphore *sem); | 38 | static noinline int __down_killable(struct semaphore *sem); |
39 | static noinline int __down_timeout(struct semaphore *sem, long jiffies); | 39 | static noinline int __down_timeout(struct semaphore *sem, long jiffies); |
40 | static noinline void __up(struct semaphore *sem); | 40 | noinline void __up(struct semaphore *sem); |
41 | 41 | ||
42 | /** | 42 | /** |
43 | * down - acquire the semaphore | 43 | * down - acquire the semaphore |
@@ -190,11 +190,13 @@ EXPORT_SYMBOL(up); | |||
190 | 190 | ||
191 | /* Functions for the contended case */ | 191 | /* Functions for the contended case */ |
192 | 192 | ||
193 | /* | ||
193 | struct semaphore_waiter { | 194 | struct semaphore_waiter { |
194 | struct list_head list; | 195 | struct list_head list; |
195 | struct task_struct *task; | 196 | struct task_struct *task; |
196 | int up; | 197 | int up; |
197 | }; | 198 | }; |
199 | */ | ||
198 | 200 | ||
199 | /* | 201 | /* |
200 | * Because this function is inlined, the 'state' parameter will be | 202 | * Because this function is inlined, the 'state' parameter will be |
@@ -233,10 +235,12 @@ static inline int __sched __down_common(struct semaphore *sem, long state, | |||
233 | return -EINTR; | 235 | return -EINTR; |
234 | } | 236 | } |
235 | 237 | ||
236 | static noinline void __sched __down(struct semaphore *sem) | 238 | noinline void __sched __down(struct semaphore *sem) |
237 | { | 239 | { |
238 | __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); | 240 | __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); |
239 | } | 241 | } |
242 | EXPORT_SYMBOL(__down); | ||
243 | |||
240 | 244 | ||
241 | static noinline int __sched __down_interruptible(struct semaphore *sem) | 245 | static noinline int __sched __down_interruptible(struct semaphore *sem) |
242 | { | 246 | { |
@@ -253,7 +257,7 @@ static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies) | |||
253 | return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies); | 257 | return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies); |
254 | } | 258 | } |
255 | 259 | ||
256 | static noinline void __sched __up(struct semaphore *sem) | 260 | noinline void __sched __up(struct semaphore *sem) |
257 | { | 261 | { |
258 | struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, | 262 | struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, |
259 | struct semaphore_waiter, list); | 263 | struct semaphore_waiter, list); |
@@ -261,3 +265,4 @@ static noinline void __sched __up(struct semaphore *sem) | |||
261 | waiter->up = 1; | 265 | waiter->up = 1; |
262 | wake_up_process(waiter->task); | 266 | wake_up_process(waiter->task); |
263 | } | 267 | } |
268 | EXPORT_SYMBOL(__up); \ No newline at end of file | ||
diff --git a/kernel/softirq.c b/kernel/softirq.c index fca82c32042b..5ce271675662 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -29,6 +29,15 @@ | |||
29 | #include <trace/events/irq.h> | 29 | #include <trace/events/irq.h> |
30 | 30 | ||
31 | #include <asm/irq.h> | 31 | #include <asm/irq.h> |
32 | |||
33 | #include <litmus/litmus.h> | ||
34 | #include <litmus/sched_trace.h> | ||
35 | |||
36 | #ifdef CONFIG_LITMUS_NVIDIA | ||
37 | #include <litmus/nvidia_info.h> | ||
38 | #include <litmus/trace.h> | ||
39 | #endif | ||
40 | |||
32 | /* | 41 | /* |
33 | - No shared variables, all the data are CPU local. | 42 | - No shared variables, all the data are CPU local. |
34 | - If a softirq needs serialization, let it serialize itself | 43 | - If a softirq needs serialization, let it serialize itself |
@@ -67,7 +76,7 @@ char *softirq_to_name[NR_SOFTIRQS] = { | |||
67 | * to the pending events, so lets the scheduler to balance | 76 | * to the pending events, so lets the scheduler to balance |
68 | * the softirq load for us. | 77 | * the softirq load for us. |
69 | */ | 78 | */ |
70 | static void wakeup_softirqd(void) | 79 | void wakeup_softirqd(void) |
71 | { | 80 | { |
72 | /* Interrupts are disabled: no need to stop preemption */ | 81 | /* Interrupts are disabled: no need to stop preemption */ |
73 | struct task_struct *tsk = __this_cpu_read(ksoftirqd); | 82 | struct task_struct *tsk = __this_cpu_read(ksoftirqd); |
@@ -193,6 +202,7 @@ void local_bh_enable_ip(unsigned long ip) | |||
193 | } | 202 | } |
194 | EXPORT_SYMBOL(local_bh_enable_ip); | 203 | EXPORT_SYMBOL(local_bh_enable_ip); |
195 | 204 | ||
205 | |||
196 | /* | 206 | /* |
197 | * We restart softirq processing MAX_SOFTIRQ_RESTART times, | 207 | * We restart softirq processing MAX_SOFTIRQ_RESTART times, |
198 | * and we fall back to softirqd after that. | 208 | * and we fall back to softirqd after that. |
@@ -206,65 +216,65 @@ EXPORT_SYMBOL(local_bh_enable_ip); | |||
206 | 216 | ||
207 | asmlinkage void __do_softirq(void) | 217 | asmlinkage void __do_softirq(void) |
208 | { | 218 | { |
209 | struct softirq_action *h; | 219 | struct softirq_action *h; |
210 | __u32 pending; | 220 | __u32 pending; |
211 | int max_restart = MAX_SOFTIRQ_RESTART; | 221 | int max_restart = MAX_SOFTIRQ_RESTART; |
212 | int cpu; | 222 | int cpu; |
213 | 223 | ||
214 | pending = local_softirq_pending(); | 224 | pending = local_softirq_pending(); |
215 | account_system_vtime(current); | 225 | account_system_vtime(current); |
216 | 226 | ||
217 | __local_bh_disable((unsigned long)__builtin_return_address(0), | 227 | __local_bh_disable((unsigned long)__builtin_return_address(0), |
218 | SOFTIRQ_OFFSET); | 228 | SOFTIRQ_OFFSET); |
219 | lockdep_softirq_enter(); | 229 | lockdep_softirq_enter(); |
220 | 230 | ||
221 | cpu = smp_processor_id(); | 231 | cpu = smp_processor_id(); |
222 | restart: | 232 | restart: |
223 | /* Reset the pending bitmask before enabling irqs */ | 233 | /* Reset the pending bitmask before enabling irqs */ |
224 | set_softirq_pending(0); | 234 | set_softirq_pending(0); |
225 | 235 | ||
226 | local_irq_enable(); | 236 | local_irq_enable(); |
227 | 237 | ||
228 | h = softirq_vec; | 238 | h = softirq_vec; |
229 | |||
230 | do { | ||
231 | if (pending & 1) { | ||
232 | unsigned int vec_nr = h - softirq_vec; | ||
233 | int prev_count = preempt_count(); | ||
234 | |||
235 | kstat_incr_softirqs_this_cpu(vec_nr); | ||
236 | |||
237 | trace_softirq_entry(vec_nr); | ||
238 | h->action(h); | ||
239 | trace_softirq_exit(vec_nr); | ||
240 | if (unlikely(prev_count != preempt_count())) { | ||
241 | printk(KERN_ERR "huh, entered softirq %u %s %p" | ||
242 | "with preempt_count %08x," | ||
243 | " exited with %08x?\n", vec_nr, | ||
244 | softirq_to_name[vec_nr], h->action, | ||
245 | prev_count, preempt_count()); | ||
246 | preempt_count() = prev_count; | ||
247 | } | ||
248 | 239 | ||
249 | rcu_bh_qs(cpu); | 240 | do { |
250 | } | 241 | if (pending & 1) { |
251 | h++; | 242 | unsigned int vec_nr = h - softirq_vec; |
252 | pending >>= 1; | 243 | int prev_count = preempt_count(); |
253 | } while (pending); | ||
254 | 244 | ||
255 | local_irq_disable(); | 245 | kstat_incr_softirqs_this_cpu(vec_nr); |
256 | 246 | ||
257 | pending = local_softirq_pending(); | 247 | trace_softirq_entry(vec_nr); |
258 | if (pending && --max_restart) | 248 | h->action(h); |
259 | goto restart; | 249 | trace_softirq_exit(vec_nr); |
250 | if (unlikely(prev_count != preempt_count())) { | ||
251 | printk(KERN_ERR "huh, entered softirq %u %s %p" | ||
252 | "with preempt_count %08x," | ||
253 | " exited with %08x?\n", vec_nr, | ||
254 | softirq_to_name[vec_nr], h->action, | ||
255 | prev_count, preempt_count()); | ||
256 | preempt_count() = prev_count; | ||
257 | } | ||
260 | 258 | ||
261 | if (pending) | 259 | rcu_bh_qs(cpu); |
262 | wakeup_softirqd(); | 260 | } |
261 | h++; | ||
262 | pending >>= 1; | ||
263 | } while (pending); | ||
263 | 264 | ||
264 | lockdep_softirq_exit(); | 265 | local_irq_disable(); |
265 | 266 | ||
266 | account_system_vtime(current); | 267 | pending = local_softirq_pending(); |
267 | __local_bh_enable(SOFTIRQ_OFFSET); | 268 | if (pending && --max_restart) |
269 | goto restart; | ||
270 | |||
271 | if (pending) | ||
272 | wakeup_softirqd(); | ||
273 | |||
274 | lockdep_softirq_exit(); | ||
275 | |||
276 | account_system_vtime(current); | ||
277 | __local_bh_enable(SOFTIRQ_OFFSET); | ||
268 | } | 278 | } |
269 | 279 | ||
270 | #ifndef __ARCH_HAS_DO_SOFTIRQ | 280 | #ifndef __ARCH_HAS_DO_SOFTIRQ |
@@ -402,8 +412,99 @@ struct tasklet_head | |||
402 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); | 412 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); |
403 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); | 413 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); |
404 | 414 | ||
415 | #ifdef CONFIG_LITMUS_NVIDIA | ||
416 | static int __do_nv_now(struct tasklet_struct* tasklet) | ||
417 | { | ||
418 | int success = 1; | ||
419 | |||
420 | if(tasklet_trylock(tasklet)) { | ||
421 | if (!atomic_read(&tasklet->count)) { | ||
422 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state)) { | ||
423 | BUG(); | ||
424 | } | ||
425 | tasklet->func(tasklet->data); | ||
426 | tasklet_unlock(tasklet); | ||
427 | } | ||
428 | else { | ||
429 | success = 0; | ||
430 | } | ||
431 | |||
432 | tasklet_unlock(tasklet); | ||
433 | } | ||
434 | else { | ||
435 | success = 0; | ||
436 | } | ||
437 | |||
438 | return success; | ||
439 | } | ||
440 | #endif | ||
441 | |||
442 | |||
405 | void __tasklet_schedule(struct tasklet_struct *t) | 443 | void __tasklet_schedule(struct tasklet_struct *t) |
406 | { | 444 | { |
445 | #ifdef CONFIG_LITMUS_NVIDIA | ||
446 | if(is_nvidia_func(t->func)) | ||
447 | { | ||
448 | #if 0 | ||
449 | // do nvidia tasklets right away and return | ||
450 | if(__do_nv_now(t)) | ||
451 | return; | ||
452 | #else | ||
453 | u32 nvidia_device = get_tasklet_nv_device_num(t); | ||
454 | // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n", | ||
455 | // __FUNCTION__, nvidia_device,litmus_clock()); | ||
456 | |||
457 | unsigned long flags; | ||
458 | struct task_struct* device_owner; | ||
459 | |||
460 | lock_nv_registry(nvidia_device, &flags); | ||
461 | |||
462 | device_owner = get_nv_max_device_owner(nvidia_device); | ||
463 | |||
464 | if(device_owner==NULL) | ||
465 | { | ||
466 | t->owner = NULL; | ||
467 | } | ||
468 | else | ||
469 | { | ||
470 | if(is_realtime(device_owner)) | ||
471 | { | ||
472 | TRACE("%s: Handling NVIDIA tasklet for device %u at %llu\n", | ||
473 | __FUNCTION__, nvidia_device,litmus_clock()); | ||
474 | TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n", | ||
475 | __FUNCTION__,device_owner->pid,nvidia_device); | ||
476 | |||
477 | t->owner = device_owner; | ||
478 | sched_trace_tasklet_release(t->owner); | ||
479 | |||
480 | if(likely(_litmus_tasklet_schedule(t,nvidia_device))) | ||
481 | { | ||
482 | unlock_nv_registry(nvidia_device, &flags); | ||
483 | return; | ||
484 | } | ||
485 | else | ||
486 | { | ||
487 | t->owner = NULL; /* fall through to normal scheduling */ | ||
488 | } | ||
489 | } | ||
490 | else | ||
491 | { | ||
492 | t->owner = NULL; | ||
493 | } | ||
494 | } | ||
495 | unlock_nv_registry(nvidia_device, &flags); | ||
496 | #endif | ||
497 | } | ||
498 | |||
499 | #endif | ||
500 | |||
501 | ___tasklet_schedule(t); | ||
502 | } | ||
503 | EXPORT_SYMBOL(__tasklet_schedule); | ||
504 | |||
505 | |||
506 | void ___tasklet_schedule(struct tasklet_struct *t) | ||
507 | { | ||
407 | unsigned long flags; | 508 | unsigned long flags; |
408 | 509 | ||
409 | local_irq_save(flags); | 510 | local_irq_save(flags); |
@@ -413,11 +514,65 @@ void __tasklet_schedule(struct tasklet_struct *t) | |||
413 | raise_softirq_irqoff(TASKLET_SOFTIRQ); | 514 | raise_softirq_irqoff(TASKLET_SOFTIRQ); |
414 | local_irq_restore(flags); | 515 | local_irq_restore(flags); |
415 | } | 516 | } |
517 | EXPORT_SYMBOL(___tasklet_schedule); | ||
416 | 518 | ||
417 | EXPORT_SYMBOL(__tasklet_schedule); | ||
418 | 519 | ||
419 | void __tasklet_hi_schedule(struct tasklet_struct *t) | 520 | void __tasklet_hi_schedule(struct tasklet_struct *t) |
420 | { | 521 | { |
522 | #ifdef CONFIG_LITMUS_NVIDIA | ||
523 | if(is_nvidia_func(t->func)) | ||
524 | { | ||
525 | u32 nvidia_device = get_tasklet_nv_device_num(t); | ||
526 | // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n", | ||
527 | // __FUNCTION__, nvidia_device,litmus_clock()); | ||
528 | |||
529 | unsigned long flags; | ||
530 | struct task_struct* device_owner; | ||
531 | |||
532 | lock_nv_registry(nvidia_device, &flags); | ||
533 | |||
534 | device_owner = get_nv_max_device_owner(nvidia_device); | ||
535 | |||
536 | if(device_owner==NULL) | ||
537 | { | ||
538 | t->owner = NULL; | ||
539 | } | ||
540 | else | ||
541 | { | ||
542 | if( is_realtime(device_owner)) | ||
543 | { | ||
544 | TRACE("%s: Handling NVIDIA tasklet for device %u\tat %llu\n", | ||
545 | __FUNCTION__, nvidia_device,litmus_clock()); | ||
546 | TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n", | ||
547 | __FUNCTION__,device_owner->pid,nvidia_device); | ||
548 | |||
549 | t->owner = device_owner; | ||
550 | sched_trace_tasklet_release(t->owner); | ||
551 | if(likely(_litmus_tasklet_hi_schedule(t,nvidia_device))) | ||
552 | { | ||
553 | unlock_nv_registry(nvidia_device, &flags); | ||
554 | return; | ||
555 | } | ||
556 | else | ||
557 | { | ||
558 | t->owner = NULL; /* fall through to normal scheduling */ | ||
559 | } | ||
560 | } | ||
561 | else | ||
562 | { | ||
563 | t->owner = NULL; | ||
564 | } | ||
565 | } | ||
566 | unlock_nv_registry(nvidia_device, &flags); | ||
567 | } | ||
568 | #endif | ||
569 | |||
570 | ___tasklet_hi_schedule(t); | ||
571 | } | ||
572 | EXPORT_SYMBOL(__tasklet_hi_schedule); | ||
573 | |||
574 | void ___tasklet_hi_schedule(struct tasklet_struct* t) | ||
575 | { | ||
421 | unsigned long flags; | 576 | unsigned long flags; |
422 | 577 | ||
423 | local_irq_save(flags); | 578 | local_irq_save(flags); |
@@ -427,19 +582,72 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) | |||
427 | raise_softirq_irqoff(HI_SOFTIRQ); | 582 | raise_softirq_irqoff(HI_SOFTIRQ); |
428 | local_irq_restore(flags); | 583 | local_irq_restore(flags); |
429 | } | 584 | } |
430 | 585 | EXPORT_SYMBOL(___tasklet_hi_schedule); | |
431 | EXPORT_SYMBOL(__tasklet_hi_schedule); | ||
432 | 586 | ||
433 | void __tasklet_hi_schedule_first(struct tasklet_struct *t) | 587 | void __tasklet_hi_schedule_first(struct tasklet_struct *t) |
434 | { | 588 | { |
435 | BUG_ON(!irqs_disabled()); | 589 | BUG_ON(!irqs_disabled()); |
590 | #ifdef CONFIG_LITMUS_NVIDIA | ||
591 | if(is_nvidia_func(t->func)) | ||
592 | { | ||
593 | u32 nvidia_device = get_tasklet_nv_device_num(t); | ||
594 | // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n", | ||
595 | // __FUNCTION__, nvidia_device,litmus_clock()); | ||
596 | unsigned long flags; | ||
597 | struct task_struct* device_owner; | ||
598 | |||
599 | lock_nv_registry(nvidia_device, &flags); | ||
600 | |||
601 | device_owner = get_nv_max_device_owner(nvidia_device); | ||
602 | |||
603 | if(device_owner==NULL) | ||
604 | { | ||
605 | t->owner = NULL; | ||
606 | } | ||
607 | else | ||
608 | { | ||
609 | if(is_realtime(device_owner)) | ||
610 | { | ||
611 | TRACE("%s: Handling NVIDIA tasklet for device %u at %llu\n", | ||
612 | __FUNCTION__, nvidia_device,litmus_clock()); | ||
613 | |||
614 | TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n", | ||
615 | __FUNCTION__,device_owner->pid,nvidia_device); | ||
616 | |||
617 | t->owner = device_owner; | ||
618 | sched_trace_tasklet_release(t->owner); | ||
619 | if(likely(_litmus_tasklet_hi_schedule_first(t,nvidia_device))) | ||
620 | { | ||
621 | unlock_nv_registry(nvidia_device, &flags); | ||
622 | return; | ||
623 | } | ||
624 | else | ||
625 | { | ||
626 | t->owner = NULL; /* fall through to normal scheduling */ | ||
627 | } | ||
628 | } | ||
629 | else | ||
630 | { | ||
631 | t->owner = NULL; | ||
632 | } | ||
633 | } | ||
634 | unlock_nv_registry(nvidia_device, &flags); | ||
635 | } | ||
636 | #endif | ||
637 | |||
638 | ___tasklet_hi_schedule_first(t); | ||
639 | } | ||
640 | EXPORT_SYMBOL(__tasklet_hi_schedule_first); | ||
641 | |||
642 | void ___tasklet_hi_schedule_first(struct tasklet_struct* t) | ||
643 | { | ||
644 | BUG_ON(!irqs_disabled()); | ||
436 | 645 | ||
437 | t->next = __this_cpu_read(tasklet_hi_vec.head); | 646 | t->next = __this_cpu_read(tasklet_hi_vec.head); |
438 | __this_cpu_write(tasklet_hi_vec.head, t); | 647 | __this_cpu_write(tasklet_hi_vec.head, t); |
439 | __raise_softirq_irqoff(HI_SOFTIRQ); | 648 | __raise_softirq_irqoff(HI_SOFTIRQ); |
440 | } | 649 | } |
441 | 650 | EXPORT_SYMBOL(___tasklet_hi_schedule_first); | |
442 | EXPORT_SYMBOL(__tasklet_hi_schedule_first); | ||
443 | 651 | ||
444 | static void tasklet_action(struct softirq_action *a) | 652 | static void tasklet_action(struct softirq_action *a) |
445 | { | 653 | { |
@@ -495,6 +703,7 @@ static void tasklet_hi_action(struct softirq_action *a) | |||
495 | if (!atomic_read(&t->count)) { | 703 | if (!atomic_read(&t->count)) { |
496 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) | 704 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) |
497 | BUG(); | 705 | BUG(); |
706 | |||
498 | t->func(t->data); | 707 | t->func(t->data); |
499 | tasklet_unlock(t); | 708 | tasklet_unlock(t); |
500 | continue; | 709 | continue; |
@@ -518,8 +727,13 @@ void tasklet_init(struct tasklet_struct *t, | |||
518 | t->next = NULL; | 727 | t->next = NULL; |
519 | t->state = 0; | 728 | t->state = 0; |
520 | atomic_set(&t->count, 0); | 729 | atomic_set(&t->count, 0); |
730 | |||
521 | t->func = func; | 731 | t->func = func; |
522 | t->data = data; | 732 | t->data = data; |
733 | |||
734 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
735 | t->owner = NULL; | ||
736 | #endif | ||
523 | } | 737 | } |
524 | 738 | ||
525 | EXPORT_SYMBOL(tasklet_init); | 739 | EXPORT_SYMBOL(tasklet_init); |
@@ -534,6 +748,7 @@ void tasklet_kill(struct tasklet_struct *t) | |||
534 | yield(); | 748 | yield(); |
535 | } while (test_bit(TASKLET_STATE_SCHED, &t->state)); | 749 | } while (test_bit(TASKLET_STATE_SCHED, &t->state)); |
536 | } | 750 | } |
751 | |||
537 | tasklet_unlock_wait(t); | 752 | tasklet_unlock_wait(t); |
538 | clear_bit(TASKLET_STATE_SCHED, &t->state); | 753 | clear_bit(TASKLET_STATE_SCHED, &t->state); |
539 | } | 754 | } |
@@ -808,6 +1023,7 @@ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu) | |||
808 | for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { | 1023 | for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { |
809 | if (*i == t) { | 1024 | if (*i == t) { |
810 | *i = t->next; | 1025 | *i = t->next; |
1026 | |||
811 | /* If this was the tail element, move the tail ptr */ | 1027 | /* If this was the tail element, move the tail ptr */ |
812 | if (*i == NULL) | 1028 | if (*i == NULL) |
813 | per_cpu(tasklet_vec, cpu).tail = i; | 1029 | per_cpu(tasklet_vec, cpu).tail = i; |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 0400553f0d04..6b59d59ce3cf 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -44,6 +44,13 @@ | |||
44 | 44 | ||
45 | #include "workqueue_sched.h" | 45 | #include "workqueue_sched.h" |
46 | 46 | ||
47 | #ifdef CONFIG_LITMUS_NVIDIA | ||
48 | #include <litmus/litmus.h> | ||
49 | #include <litmus/sched_trace.h> | ||
50 | #include <litmus/nvidia_info.h> | ||
51 | #endif | ||
52 | |||
53 | |||
47 | enum { | 54 | enum { |
48 | /* global_cwq flags */ | 55 | /* global_cwq flags */ |
49 | GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ | 56 | GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ |
@@ -1047,9 +1054,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
1047 | work_flags |= WORK_STRUCT_DELAYED; | 1054 | work_flags |= WORK_STRUCT_DELAYED; |
1048 | worklist = &cwq->delayed_works; | 1055 | worklist = &cwq->delayed_works; |
1049 | } | 1056 | } |
1050 | |||
1051 | insert_work(cwq, work, worklist, work_flags); | 1057 | insert_work(cwq, work, worklist, work_flags); |
1052 | |||
1053 | spin_unlock_irqrestore(&gcwq->lock, flags); | 1058 | spin_unlock_irqrestore(&gcwq->lock, flags); |
1054 | } | 1059 | } |
1055 | 1060 | ||
@@ -2687,10 +2692,70 @@ EXPORT_SYMBOL(cancel_delayed_work_sync); | |||
2687 | */ | 2692 | */ |
2688 | int schedule_work(struct work_struct *work) | 2693 | int schedule_work(struct work_struct *work) |
2689 | { | 2694 | { |
2690 | return queue_work(system_wq, work); | 2695 | #if 0 |
2696 | #if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD) | ||
2697 | if(is_nvidia_func(work->func)) | ||
2698 | { | ||
2699 | u32 nvidiaDevice = get_work_nv_device_num(work); | ||
2700 | |||
2701 | //1) Ask Litmus which task owns GPU <nvidiaDevice>. (API to be defined.) | ||
2702 | unsigned long flags; | ||
2703 | struct task_struct* device_owner; | ||
2704 | |||
2705 | lock_nv_registry(nvidiaDevice, &flags); | ||
2706 | |||
2707 | device_owner = get_nv_max_device_owner(nvidiaDevice); | ||
2708 | |||
2709 | //2) If there is an owner, set work->owner to the owner's task struct. | ||
2710 | if(device_owner==NULL) | ||
2711 | { | ||
2712 | work->owner = NULL; | ||
2713 | //TRACE("%s: the owner task of NVIDIA Device %u is NULL\n",__FUNCTION__,nvidiaDevice); | ||
2714 | } | ||
2715 | else | ||
2716 | { | ||
2717 | if( is_realtime(device_owner)) | ||
2718 | { | ||
2719 | TRACE("%s: Handling NVIDIA work for device\t%u\tat\t%llu\n", | ||
2720 | __FUNCTION__, nvidiaDevice,litmus_clock()); | ||
2721 | TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n", | ||
2722 | __FUNCTION__, | ||
2723 | device_owner->pid, | ||
2724 | nvidiaDevice); | ||
2725 | |||
2726 | //3) Call litmus_schedule_work() and return (don't execute the rest | ||
2727 | // of schedule_schedule()). | ||
2728 | work->owner = device_owner; | ||
2729 | sched_trace_work_release(work->owner); | ||
2730 | if(likely(litmus_schedule_work(work, nvidiaDevice))) | ||
2731 | { | ||
2732 | unlock_nv_registry(nvidiaDevice, &flags); | ||
2733 | return 1; | ||
2734 | } | ||
2735 | else | ||
2736 | { | ||
2737 | work->owner = NULL; /* fall through to normal work scheduling */ | ||
2738 | } | ||
2739 | } | ||
2740 | else | ||
2741 | { | ||
2742 | work->owner = NULL; | ||
2743 | } | ||
2744 | } | ||
2745 | unlock_nv_registry(nvidiaDevice, &flags); | ||
2746 | } | ||
2747 | #endif | ||
2748 | #endif | ||
2749 | return(__schedule_work(work)); | ||
2691 | } | 2750 | } |
2692 | EXPORT_SYMBOL(schedule_work); | 2751 | EXPORT_SYMBOL(schedule_work); |
2693 | 2752 | ||
2753 | int __schedule_work(struct work_struct* work) | ||
2754 | { | ||
2755 | return queue_work(system_wq, work); | ||
2756 | } | ||
2757 | EXPORT_SYMBOL(__schedule_work); | ||
2758 | |||
2694 | /* | 2759 | /* |
2695 | * schedule_work_on - put work task on a specific cpu | 2760 | * schedule_work_on - put work task on a specific cpu |
2696 | * @cpu: cpu to put the work task on | 2761 | * @cpu: cpu to put the work task on |