diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 2 | ||||
-rw-r--r-- | kernel/cgroup.c | 7 | ||||
-rw-r--r-- | kernel/semaphore.c | 264 | ||||
-rw-r--r-- | kernel/signal.c | 71 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 2 | ||||
-rw-r--r-- | kernel/time/tick-common.c | 4 | ||||
-rw-r--r-- | kernel/time/tick-oneshot.c | 2 |
7 files changed, 318 insertions, 34 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 05c8003718ee..6c5f081132a4 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -8,7 +8,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ | |||
8 | signal.o sys.o kmod.o workqueue.o pid.o \ | 8 | signal.o sys.o kmod.o workqueue.o pid.o \ |
9 | rcupdate.o extable.o params.o posix-timers.o \ | 9 | rcupdate.o extable.o params.o posix-timers.o \ |
10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ | 10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ |
11 | hrtimer.o rwsem.o nsproxy.o srcu.o \ | 11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ |
12 | notifier.o ksysfs.o pm_qos_params.o | 12 | notifier.o ksysfs.o pm_qos_params.o |
13 | 13 | ||
14 | obj-$(CONFIG_SYSCTL) += sysctl_check.o | 14 | obj-$(CONFIG_SYSCTL) += sysctl_check.o |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 2727f9238359..6d8de051382b 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -1722,7 +1722,12 @@ void cgroup_enable_task_cg_lists(void) | |||
1722 | use_task_css_set_links = 1; | 1722 | use_task_css_set_links = 1; |
1723 | do_each_thread(g, p) { | 1723 | do_each_thread(g, p) { |
1724 | task_lock(p); | 1724 | task_lock(p); |
1725 | if (list_empty(&p->cg_list)) | 1725 | /* |
1726 | * We should check if the process is exiting, otherwise | ||
1727 | * it will race with cgroup_exit() in that the list | ||
1728 | * entry won't be deleted though the process has exited. | ||
1729 | */ | ||
1730 | if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list)) | ||
1726 | list_add(&p->cg_list, &p->cgroups->tasks); | 1731 | list_add(&p->cg_list, &p->cgroups->tasks); |
1727 | task_unlock(p); | 1732 | task_unlock(p); |
1728 | } while_each_thread(g, p); | 1733 | } while_each_thread(g, p); |
diff --git a/kernel/semaphore.c b/kernel/semaphore.c new file mode 100644 index 000000000000..5c2942e768cd --- /dev/null +++ b/kernel/semaphore.c | |||
@@ -0,0 +1,264 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008 Intel Corporation | ||
3 | * Author: Matthew Wilcox <willy@linux.intel.com> | ||
4 | * | ||
5 | * Distributed under the terms of the GNU GPL, version 2 | ||
6 | * | ||
7 | * This file implements counting semaphores. | ||
8 | * A counting semaphore may be acquired 'n' times before sleeping. | ||
9 | * See mutex.c for single-acquisition sleeping locks which enforce | ||
10 | * rules which allow code to be debugged more easily. | ||
11 | */ | ||
12 | |||
13 | /* | ||
14 | * Some notes on the implementation: | ||
15 | * | ||
16 | * The spinlock controls access to the other members of the semaphore. | ||
17 | * down_trylock() and up() can be called from interrupt context, so we | ||
18 | * have to disable interrupts when taking the lock. It turns out various | ||
19 | * parts of the kernel expect to be able to use down() on a semaphore in | ||
20 | * interrupt context when they know it will succeed, so we have to use | ||
21 | * irqsave variants for down(), down_interruptible() and down_killable() | ||
22 | * too. | ||
23 | * | ||
24 | * The ->count variable represents how many more tasks can acquire this | ||
25 | * semaphore. If it's zero, there may be tasks waiting on the wait_list. | ||
26 | */ | ||
27 | |||
28 | #include <linux/compiler.h> | ||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/module.h> | ||
31 | #include <linux/sched.h> | ||
32 | #include <linux/semaphore.h> | ||
33 | #include <linux/spinlock.h> | ||
34 | |||
35 | static noinline void __down(struct semaphore *sem); | ||
36 | static noinline int __down_interruptible(struct semaphore *sem); | ||
37 | static noinline int __down_killable(struct semaphore *sem); | ||
38 | static noinline int __down_timeout(struct semaphore *sem, long jiffies); | ||
39 | static noinline void __up(struct semaphore *sem); | ||
40 | |||
41 | /** | ||
42 | * down - acquire the semaphore | ||
43 | * @sem: the semaphore to be acquired | ||
44 | * | ||
45 | * Acquires the semaphore. If no more tasks are allowed to acquire the | ||
46 | * semaphore, calling this function will put the task to sleep until the | ||
47 | * semaphore is released. | ||
48 | * | ||
49 | * Use of this function is deprecated, please use down_interruptible() or | ||
50 | * down_killable() instead. | ||
51 | */ | ||
52 | void down(struct semaphore *sem) | ||
53 | { | ||
54 | unsigned long flags; | ||
55 | |||
56 | spin_lock_irqsave(&sem->lock, flags); | ||
57 | if (likely(sem->count > 0)) | ||
58 | sem->count--; | ||
59 | else | ||
60 | __down(sem); | ||
61 | spin_unlock_irqrestore(&sem->lock, flags); | ||
62 | } | ||
63 | EXPORT_SYMBOL(down); | ||
64 | |||
65 | /** | ||
66 | * down_interruptible - acquire the semaphore unless interrupted | ||
67 | * @sem: the semaphore to be acquired | ||
68 | * | ||
69 | * Attempts to acquire the semaphore. If no more tasks are allowed to | ||
70 | * acquire the semaphore, calling this function will put the task to sleep. | ||
71 | * If the sleep is interrupted by a signal, this function will return -EINTR. | ||
72 | * If the semaphore is successfully acquired, this function returns 0. | ||
73 | */ | ||
74 | int down_interruptible(struct semaphore *sem) | ||
75 | { | ||
76 | unsigned long flags; | ||
77 | int result = 0; | ||
78 | |||
79 | spin_lock_irqsave(&sem->lock, flags); | ||
80 | if (likely(sem->count > 0)) | ||
81 | sem->count--; | ||
82 | else | ||
83 | result = __down_interruptible(sem); | ||
84 | spin_unlock_irqrestore(&sem->lock, flags); | ||
85 | |||
86 | return result; | ||
87 | } | ||
88 | EXPORT_SYMBOL(down_interruptible); | ||
89 | |||
90 | /** | ||
91 | * down_killable - acquire the semaphore unless killed | ||
92 | * @sem: the semaphore to be acquired | ||
93 | * | ||
94 | * Attempts to acquire the semaphore. If no more tasks are allowed to | ||
95 | * acquire the semaphore, calling this function will put the task to sleep. | ||
96 | * If the sleep is interrupted by a fatal signal, this function will return | ||
97 | * -EINTR. If the semaphore is successfully acquired, this function returns | ||
98 | * 0. | ||
99 | */ | ||
100 | int down_killable(struct semaphore *sem) | ||
101 | { | ||
102 | unsigned long flags; | ||
103 | int result = 0; | ||
104 | |||
105 | spin_lock_irqsave(&sem->lock, flags); | ||
106 | if (likely(sem->count > 0)) | ||
107 | sem->count--; | ||
108 | else | ||
109 | result = __down_killable(sem); | ||
110 | spin_unlock_irqrestore(&sem->lock, flags); | ||
111 | |||
112 | return result; | ||
113 | } | ||
114 | EXPORT_SYMBOL(down_killable); | ||
115 | |||
116 | /** | ||
117 | * down_trylock - try to acquire the semaphore, without waiting | ||
118 | * @sem: the semaphore to be acquired | ||
119 | * | ||
120 | * Try to acquire the semaphore atomically. Returns 0 if the mutex has | ||
121 | * been acquired successfully or 1 if it it cannot be acquired. | ||
122 | * | ||
123 | * NOTE: This return value is inverted from both spin_trylock and | ||
124 | * mutex_trylock! Be careful about this when converting code. | ||
125 | * | ||
126 | * Unlike mutex_trylock, this function can be used from interrupt context, | ||
127 | * and the semaphore can be released by any task or interrupt. | ||
128 | */ | ||
129 | int down_trylock(struct semaphore *sem) | ||
130 | { | ||
131 | unsigned long flags; | ||
132 | int count; | ||
133 | |||
134 | spin_lock_irqsave(&sem->lock, flags); | ||
135 | count = sem->count - 1; | ||
136 | if (likely(count >= 0)) | ||
137 | sem->count = count; | ||
138 | spin_unlock_irqrestore(&sem->lock, flags); | ||
139 | |||
140 | return (count < 0); | ||
141 | } | ||
142 | EXPORT_SYMBOL(down_trylock); | ||
143 | |||
144 | /** | ||
145 | * down_timeout - acquire the semaphore within a specified time | ||
146 | * @sem: the semaphore to be acquired | ||
147 | * @jiffies: how long to wait before failing | ||
148 | * | ||
149 | * Attempts to acquire the semaphore. If no more tasks are allowed to | ||
150 | * acquire the semaphore, calling this function will put the task to sleep. | ||
151 | * If the semaphore is not released within the specified number of jiffies, | ||
152 | * this function returns -ETIME. It returns 0 if the semaphore was acquired. | ||
153 | */ | ||
154 | int down_timeout(struct semaphore *sem, long jiffies) | ||
155 | { | ||
156 | unsigned long flags; | ||
157 | int result = 0; | ||
158 | |||
159 | spin_lock_irqsave(&sem->lock, flags); | ||
160 | if (likely(sem->count > 0)) | ||
161 | sem->count--; | ||
162 | else | ||
163 | result = __down_timeout(sem, jiffies); | ||
164 | spin_unlock_irqrestore(&sem->lock, flags); | ||
165 | |||
166 | return result; | ||
167 | } | ||
168 | EXPORT_SYMBOL(down_timeout); | ||
169 | |||
170 | /** | ||
171 | * up - release the semaphore | ||
172 | * @sem: the semaphore to release | ||
173 | * | ||
174 | * Release the semaphore. Unlike mutexes, up() may be called from any | ||
175 | * context and even by tasks which have never called down(). | ||
176 | */ | ||
177 | void up(struct semaphore *sem) | ||
178 | { | ||
179 | unsigned long flags; | ||
180 | |||
181 | spin_lock_irqsave(&sem->lock, flags); | ||
182 | if (likely(list_empty(&sem->wait_list))) | ||
183 | sem->count++; | ||
184 | else | ||
185 | __up(sem); | ||
186 | spin_unlock_irqrestore(&sem->lock, flags); | ||
187 | } | ||
188 | EXPORT_SYMBOL(up); | ||
189 | |||
190 | /* Functions for the contended case */ | ||
191 | |||
192 | struct semaphore_waiter { | ||
193 | struct list_head list; | ||
194 | struct task_struct *task; | ||
195 | int up; | ||
196 | }; | ||
197 | |||
198 | /* | ||
199 | * Because this function is inlined, the 'state' parameter will be | ||
200 | * constant, and thus optimised away by the compiler. Likewise the | ||
201 | * 'timeout' parameter for the cases without timeouts. | ||
202 | */ | ||
203 | static inline int __sched __down_common(struct semaphore *sem, long state, | ||
204 | long timeout) | ||
205 | { | ||
206 | struct task_struct *task = current; | ||
207 | struct semaphore_waiter waiter; | ||
208 | |||
209 | list_add_tail(&waiter.list, &sem->wait_list); | ||
210 | waiter.task = task; | ||
211 | waiter.up = 0; | ||
212 | |||
213 | for (;;) { | ||
214 | if (state == TASK_INTERRUPTIBLE && signal_pending(task)) | ||
215 | goto interrupted; | ||
216 | if (state == TASK_KILLABLE && fatal_signal_pending(task)) | ||
217 | goto interrupted; | ||
218 | if (timeout <= 0) | ||
219 | goto timed_out; | ||
220 | __set_task_state(task, state); | ||
221 | spin_unlock_irq(&sem->lock); | ||
222 | timeout = schedule_timeout(timeout); | ||
223 | spin_lock_irq(&sem->lock); | ||
224 | if (waiter.up) | ||
225 | return 0; | ||
226 | } | ||
227 | |||
228 | timed_out: | ||
229 | list_del(&waiter.list); | ||
230 | return -ETIME; | ||
231 | |||
232 | interrupted: | ||
233 | list_del(&waiter.list); | ||
234 | return -EINTR; | ||
235 | } | ||
236 | |||
237 | static noinline void __sched __down(struct semaphore *sem) | ||
238 | { | ||
239 | __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); | ||
240 | } | ||
241 | |||
242 | static noinline int __sched __down_interruptible(struct semaphore *sem) | ||
243 | { | ||
244 | return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); | ||
245 | } | ||
246 | |||
247 | static noinline int __sched __down_killable(struct semaphore *sem) | ||
248 | { | ||
249 | return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT); | ||
250 | } | ||
251 | |||
252 | static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies) | ||
253 | { | ||
254 | return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies); | ||
255 | } | ||
256 | |||
257 | static noinline void __sched __up(struct semaphore *sem) | ||
258 | { | ||
259 | struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, | ||
260 | struct semaphore_waiter, list); | ||
261 | list_del(&waiter->list); | ||
262 | waiter->up = 1; | ||
263 | wake_up_process(waiter->task); | ||
264 | } | ||
diff --git a/kernel/signal.c b/kernel/signal.c index 6af1210092c3..cc8303cd093d 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -1757,6 +1757,45 @@ static int do_signal_stop(int signr) | |||
1757 | return 1; | 1757 | return 1; |
1758 | } | 1758 | } |
1759 | 1759 | ||
1760 | static int ptrace_signal(int signr, siginfo_t *info, | ||
1761 | struct pt_regs *regs, void *cookie) | ||
1762 | { | ||
1763 | if (!(current->ptrace & PT_PTRACED)) | ||
1764 | return signr; | ||
1765 | |||
1766 | ptrace_signal_deliver(regs, cookie); | ||
1767 | |||
1768 | /* Let the debugger run. */ | ||
1769 | ptrace_stop(signr, 0, info); | ||
1770 | |||
1771 | /* We're back. Did the debugger cancel the sig? */ | ||
1772 | signr = current->exit_code; | ||
1773 | if (signr == 0) | ||
1774 | return signr; | ||
1775 | |||
1776 | current->exit_code = 0; | ||
1777 | |||
1778 | /* Update the siginfo structure if the signal has | ||
1779 | changed. If the debugger wanted something | ||
1780 | specific in the siginfo structure then it should | ||
1781 | have updated *info via PTRACE_SETSIGINFO. */ | ||
1782 | if (signr != info->si_signo) { | ||
1783 | info->si_signo = signr; | ||
1784 | info->si_errno = 0; | ||
1785 | info->si_code = SI_USER; | ||
1786 | info->si_pid = task_pid_vnr(current->parent); | ||
1787 | info->si_uid = current->parent->uid; | ||
1788 | } | ||
1789 | |||
1790 | /* If the (new) signal is now blocked, requeue it. */ | ||
1791 | if (sigismember(¤t->blocked, signr)) { | ||
1792 | specific_send_sig_info(signr, info, current); | ||
1793 | signr = 0; | ||
1794 | } | ||
1795 | |||
1796 | return signr; | ||
1797 | } | ||
1798 | |||
1760 | int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, | 1799 | int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, |
1761 | struct pt_regs *regs, void *cookie) | 1800 | struct pt_regs *regs, void *cookie) |
1762 | { | 1801 | { |
@@ -1785,36 +1824,10 @@ relock: | |||
1785 | if (!signr) | 1824 | if (!signr) |
1786 | break; /* will return 0 */ | 1825 | break; /* will return 0 */ |
1787 | 1826 | ||
1788 | if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) { | 1827 | if (signr != SIGKILL) { |
1789 | ptrace_signal_deliver(regs, cookie); | 1828 | signr = ptrace_signal(signr, info, regs, cookie); |
1790 | 1829 | if (!signr) | |
1791 | /* Let the debugger run. */ | ||
1792 | ptrace_stop(signr, 0, info); | ||
1793 | |||
1794 | /* We're back. Did the debugger cancel the sig? */ | ||
1795 | signr = current->exit_code; | ||
1796 | if (signr == 0) | ||
1797 | continue; | ||
1798 | |||
1799 | current->exit_code = 0; | ||
1800 | |||
1801 | /* Update the siginfo structure if the signal has | ||
1802 | changed. If the debugger wanted something | ||
1803 | specific in the siginfo structure then it should | ||
1804 | have updated *info via PTRACE_SETSIGINFO. */ | ||
1805 | if (signr != info->si_signo) { | ||
1806 | info->si_signo = signr; | ||
1807 | info->si_errno = 0; | ||
1808 | info->si_code = SI_USER; | ||
1809 | info->si_pid = task_pid_vnr(current->parent); | ||
1810 | info->si_uid = current->parent->uid; | ||
1811 | } | ||
1812 | |||
1813 | /* If the (new) signal is now blocked, requeue it. */ | ||
1814 | if (sigismember(¤t->blocked, signr)) { | ||
1815 | specific_send_sig_info(signr, info, current); | ||
1816 | continue; | 1830 | continue; |
1817 | } | ||
1818 | } | 1831 | } |
1819 | 1832 | ||
1820 | ka = ¤t->sighand->action[signr-1]; | 1833 | ka = ¤t->sighand->action[signr-1]; |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index e1bd50cbbf5d..fdfa0c745bb6 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/cpu.h> | 14 | #include <linux/cpu.h> |
15 | #include <linux/err.h> | 15 | #include <linux/err.h> |
16 | #include <linux/hrtimer.h> | 16 | #include <linux/hrtimer.h> |
17 | #include <linux/irq.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/percpu.h> | 18 | #include <linux/percpu.h> |
19 | #include <linux/profile.h> | 19 | #include <linux/profile.h> |
20 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 1bea399a9ef0..4f3886562b8c 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -14,12 +14,14 @@ | |||
14 | #include <linux/cpu.h> | 14 | #include <linux/cpu.h> |
15 | #include <linux/err.h> | 15 | #include <linux/err.h> |
16 | #include <linux/hrtimer.h> | 16 | #include <linux/hrtimer.h> |
17 | #include <linux/irq.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/percpu.h> | 18 | #include <linux/percpu.h> |
19 | #include <linux/profile.h> | 19 | #include <linux/profile.h> |
20 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
21 | #include <linux/tick.h> | 21 | #include <linux/tick.h> |
22 | 22 | ||
23 | #include <asm/irq_regs.h> | ||
24 | |||
23 | #include "tick-internal.h" | 25 | #include "tick-internal.h" |
24 | 26 | ||
25 | /* | 27 | /* |
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index 0258d3115d54..450c04935b66 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/cpu.h> | 14 | #include <linux/cpu.h> |
15 | #include <linux/err.h> | 15 | #include <linux/err.h> |
16 | #include <linux/hrtimer.h> | 16 | #include <linux/hrtimer.h> |
17 | #include <linux/irq.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/percpu.h> | 18 | #include <linux/percpu.h> |
19 | #include <linux/profile.h> | 19 | #include <linux/profile.h> |
20 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |