diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/completion.h | 1 | ||||
-rw-r--r-- | include/linux/interrupt.h | 10 | ||||
-rw-r--r-- | include/linux/mutex.h | 10 | ||||
-rw-r--r-- | include/linux/semaphore.h | 9 | ||||
-rw-r--r-- | include/linux/workqueue.h | 18 |
5 files changed, 47 insertions, 1 deletions
diff --git a/include/linux/completion.h b/include/linux/completion.h index 9d727271c9fe..cff405c4dd3a 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h | |||
@@ -76,6 +76,7 @@ static inline void init_completion(struct completion *x) | |||
76 | init_waitqueue_head(&x->wait); | 76 | init_waitqueue_head(&x->wait); |
77 | } | 77 | } |
78 | 78 | ||
79 | extern void __wait_for_completion_locked(struct completion *); | ||
79 | extern void wait_for_completion(struct completion *); | 80 | extern void wait_for_completion(struct completion *); |
80 | extern int wait_for_completion_interruptible(struct completion *x); | 81 | extern int wait_for_completion_interruptible(struct completion *x); |
81 | extern int wait_for_completion_killable(struct completion *x); | 82 | extern int wait_for_completion_killable(struct completion *x); |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index f6efed0039ed..57a7bc8807be 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -445,6 +445,7 @@ static inline void __raise_softirq_irqoff(unsigned int nr) | |||
445 | 445 | ||
446 | extern void raise_softirq_irqoff(unsigned int nr); | 446 | extern void raise_softirq_irqoff(unsigned int nr); |
447 | extern void raise_softirq(unsigned int nr); | 447 | extern void raise_softirq(unsigned int nr); |
448 | extern void wakeup_softirqd(void); | ||
448 | 449 | ||
449 | /* This is the worklist that queues up per-cpu softirq work. | 450 | /* This is the worklist that queues up per-cpu softirq work. |
450 | * | 451 | * |
@@ -500,6 +501,10 @@ struct tasklet_struct | |||
500 | atomic_t count; | 501 | atomic_t count; |
501 | void (*func)(unsigned long); | 502 | void (*func)(unsigned long); |
502 | unsigned long data; | 503 | unsigned long data; |
504 | |||
505 | #if defined(CONFIG_LITMUS_SOFTIRQD) || defined(CONFIG_LITMUS_PAI_SOFTIRQD) | ||
506 | struct task_struct *owner; | ||
507 | #endif | ||
503 | }; | 508 | }; |
504 | 509 | ||
505 | #define DECLARE_TASKLET(name, func, data) \ | 510 | #define DECLARE_TASKLET(name, func, data) \ |
@@ -537,6 +542,7 @@ static inline void tasklet_unlock_wait(struct tasklet_struct *t) | |||
537 | #define tasklet_unlock(t) do { } while (0) | 542 | #define tasklet_unlock(t) do { } while (0) |
538 | #endif | 543 | #endif |
539 | 544 | ||
545 | extern void ___tasklet_schedule(struct tasklet_struct *t); | ||
540 | extern void __tasklet_schedule(struct tasklet_struct *t); | 546 | extern void __tasklet_schedule(struct tasklet_struct *t); |
541 | 547 | ||
542 | static inline void tasklet_schedule(struct tasklet_struct *t) | 548 | static inline void tasklet_schedule(struct tasklet_struct *t) |
@@ -545,6 +551,7 @@ static inline void tasklet_schedule(struct tasklet_struct *t) | |||
545 | __tasklet_schedule(t); | 551 | __tasklet_schedule(t); |
546 | } | 552 | } |
547 | 553 | ||
554 | extern void ___tasklet_hi_schedule(struct tasklet_struct *t); | ||
548 | extern void __tasklet_hi_schedule(struct tasklet_struct *t); | 555 | extern void __tasklet_hi_schedule(struct tasklet_struct *t); |
549 | 556 | ||
550 | static inline void tasklet_hi_schedule(struct tasklet_struct *t) | 557 | static inline void tasklet_hi_schedule(struct tasklet_struct *t) |
@@ -553,6 +560,7 @@ static inline void tasklet_hi_schedule(struct tasklet_struct *t) | |||
553 | __tasklet_hi_schedule(t); | 560 | __tasklet_hi_schedule(t); |
554 | } | 561 | } |
555 | 562 | ||
563 | extern void ___tasklet_hi_schedule_first(struct tasklet_struct *t); | ||
556 | extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); | 564 | extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); |
557 | 565 | ||
558 | /* | 566 | /* |
@@ -582,7 +590,7 @@ static inline void tasklet_disable(struct tasklet_struct *t) | |||
582 | } | 590 | } |
583 | 591 | ||
584 | static inline void tasklet_enable(struct tasklet_struct *t) | 592 | static inline void tasklet_enable(struct tasklet_struct *t) |
585 | { | 593 | { |
586 | smp_mb__before_atomic_dec(); | 594 | smp_mb__before_atomic_dec(); |
587 | atomic_dec(&t->count); | 595 | atomic_dec(&t->count); |
588 | } | 596 | } |
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index a940fe435aca..cb47debbf24d 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -126,6 +126,15 @@ static inline int mutex_is_locked(struct mutex *lock) | |||
126 | return atomic_read(&lock->count) != 1; | 126 | return atomic_read(&lock->count) != 1; |
127 | } | 127 | } |
128 | 128 | ||
129 | /* return non-zero to abort. only pre-side-effects may abort */ | ||
130 | typedef int (*side_effect_t)(unsigned long); | ||
131 | extern void mutex_lock_sfx(struct mutex *lock, | ||
132 | side_effect_t pre, unsigned long pre_arg, | ||
133 | side_effect_t post, unsigned long post_arg); | ||
134 | extern void mutex_unlock_sfx(struct mutex *lock, | ||
135 | side_effect_t pre, unsigned long pre_arg, | ||
136 | side_effect_t post, unsigned long post_arg); | ||
137 | |||
129 | /* | 138 | /* |
130 | * See kernel/mutex.c for detailed documentation of these APIs. | 139 | * See kernel/mutex.c for detailed documentation of these APIs. |
131 | * Also see Documentation/mutex-design.txt. | 140 | * Also see Documentation/mutex-design.txt. |
@@ -153,6 +162,7 @@ extern void mutex_lock(struct mutex *lock); | |||
153 | extern int __must_check mutex_lock_interruptible(struct mutex *lock); | 162 | extern int __must_check mutex_lock_interruptible(struct mutex *lock); |
154 | extern int __must_check mutex_lock_killable(struct mutex *lock); | 163 | extern int __must_check mutex_lock_killable(struct mutex *lock); |
155 | 164 | ||
165 | |||
156 | # define mutex_lock_nested(lock, subclass) mutex_lock(lock) | 166 | # define mutex_lock_nested(lock, subclass) mutex_lock(lock) |
157 | # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) | 167 | # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) |
158 | # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) | 168 | # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) |
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h index 39fa04966aa8..c83fc2b65f01 100644 --- a/include/linux/semaphore.h +++ b/include/linux/semaphore.h | |||
@@ -43,4 +43,13 @@ extern int __must_check down_trylock(struct semaphore *sem); | |||
43 | extern int __must_check down_timeout(struct semaphore *sem, long jiffies); | 43 | extern int __must_check down_timeout(struct semaphore *sem, long jiffies); |
44 | extern void up(struct semaphore *sem); | 44 | extern void up(struct semaphore *sem); |
45 | 45 | ||
46 | extern void __down(struct semaphore *sem); | ||
47 | extern void __up(struct semaphore *sem); | ||
48 | |||
49 | struct semaphore_waiter { | ||
50 | struct list_head list; | ||
51 | struct task_struct *task; | ||
52 | int up; | ||
53 | }; | ||
54 | |||
46 | #endif /* __LINUX_SEMAPHORE_H */ | 55 | #endif /* __LINUX_SEMAPHORE_H */ |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index f584aba78ca9..1ec2ec7d4e3b 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -83,6 +83,9 @@ struct work_struct { | |||
83 | #ifdef CONFIG_LOCKDEP | 83 | #ifdef CONFIG_LOCKDEP |
84 | struct lockdep_map lockdep_map; | 84 | struct lockdep_map lockdep_map; |
85 | #endif | 85 | #endif |
86 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
87 | struct task_struct *owner; | ||
88 | #endif | ||
86 | }; | 89 | }; |
87 | 90 | ||
88 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU) | 91 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU) |
@@ -115,11 +118,25 @@ struct execute_work { | |||
115 | #define __WORK_INIT_LOCKDEP_MAP(n, k) | 118 | #define __WORK_INIT_LOCKDEP_MAP(n, k) |
116 | #endif | 119 | #endif |
117 | 120 | ||
121 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
122 | #define __WORK_INIT_OWNER() \ | ||
123 | .owner = NULL, | ||
124 | |||
125 | #define PREPARE_OWNER(_work, _owner) \ | ||
126 | do { \ | ||
127 | (_work)->owner = (_owner); \ | ||
128 | } while(0) | ||
129 | #else | ||
130 | #define __WORK_INIT_OWNER() | ||
131 | #define PREPARE_OWNER(_work, _owner) | ||
132 | #endif | ||
133 | |||
118 | #define __WORK_INITIALIZER(n, f) { \ | 134 | #define __WORK_INITIALIZER(n, f) { \ |
119 | .data = WORK_DATA_STATIC_INIT(), \ | 135 | .data = WORK_DATA_STATIC_INIT(), \ |
120 | .entry = { &(n).entry, &(n).entry }, \ | 136 | .entry = { &(n).entry, &(n).entry }, \ |
121 | .func = (f), \ | 137 | .func = (f), \ |
122 | __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ | 138 | __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ |
139 | __WORK_INIT_OWNER() \ | ||
123 | } | 140 | } |
124 | 141 | ||
125 | #define __DELAYED_WORK_INITIALIZER(n, f) { \ | 142 | #define __DELAYED_WORK_INITIALIZER(n, f) { \ |
@@ -357,6 +374,7 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
357 | extern void flush_workqueue(struct workqueue_struct *wq); | 374 | extern void flush_workqueue(struct workqueue_struct *wq); |
358 | extern void flush_scheduled_work(void); | 375 | extern void flush_scheduled_work(void); |
359 | 376 | ||
377 | extern int __schedule_work(struct work_struct *work); | ||
360 | extern int schedule_work(struct work_struct *work); | 378 | extern int schedule_work(struct work_struct *work); |
361 | extern int schedule_work_on(int cpu, struct work_struct *work); | 379 | extern int schedule_work_on(int cpu, struct work_struct *work); |
362 | extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay); | 380 | extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay); |