aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/compat.c2
-rw-r--r--kernel/cpu.c12
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/taskstats.c70
-rw-r--r--kernel/time/ntp.c2
-rw-r--r--kernel/tsacct.c7
-rw-r--r--kernel/workqueue.c6
9 files changed, 48 insertions, 57 deletions
diff --git a/kernel/compat.c b/kernel/compat.c
index 75573e5d27b0..d4898aad6cfa 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -678,7 +678,7 @@ int get_compat_sigevent(struct sigevent *event,
678 ? -EFAULT : 0; 678 ? -EFAULT : 0;
679} 679}
680 680
681long compat_get_bitmap(unsigned long *mask, compat_ulong_t __user *umask, 681long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
682 unsigned long bitmap_size) 682 unsigned long bitmap_size)
683{ 683{
684 int i, j; 684 int i, j;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 27dd3ee47099..663c920b2234 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -150,18 +150,18 @@ static int _cpu_down(unsigned int cpu)
150 p = __stop_machine_run(take_cpu_down, NULL, cpu); 150 p = __stop_machine_run(take_cpu_down, NULL, cpu);
151 mutex_unlock(&cpu_bitmask_lock); 151 mutex_unlock(&cpu_bitmask_lock);
152 152
153 if (IS_ERR(p)) { 153 if (IS_ERR(p) || cpu_online(cpu)) {
154 /* CPU didn't die: tell everyone. Can't complain. */ 154 /* CPU didn't die: tell everyone. Can't complain. */
155 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, 155 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
156 (void *)(long)cpu) == NOTIFY_BAD) 156 (void *)(long)cpu) == NOTIFY_BAD)
157 BUG(); 157 BUG();
158 158
159 err = PTR_ERR(p); 159 if (IS_ERR(p)) {
160 goto out_allowed; 160 err = PTR_ERR(p);
161 } 161 goto out_allowed;
162 162 }
163 if (cpu_online(cpu))
164 goto out_thread; 163 goto out_thread;
164 }
165 165
166 /* Wait for it to sleep (leaving idle task). */ 166 /* Wait for it to sleep (leaving idle task). */
167 while (!idle_cpu(cpu)) 167 while (!idle_cpu(cpu))
diff --git a/kernel/exit.c b/kernel/exit.c
index f250a5e3e281..06de6c4e8ca3 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -128,6 +128,7 @@ static void __exit_signal(struct task_struct *tsk)
128 flush_sigqueue(&tsk->pending); 128 flush_sigqueue(&tsk->pending);
129 if (sig) { 129 if (sig) {
130 flush_sigqueue(&sig->shared_pending); 130 flush_sigqueue(&sig->shared_pending);
131 taskstats_tgid_free(sig);
131 __cleanup_signal(sig); 132 __cleanup_signal(sig);
132 } 133 }
133} 134}
diff --git a/kernel/fork.c b/kernel/fork.c
index 29ebb30850ed..3da978eec791 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -830,7 +830,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
830 if (clone_flags & CLONE_THREAD) { 830 if (clone_flags & CLONE_THREAD) {
831 atomic_inc(&current->signal->count); 831 atomic_inc(&current->signal->count);
832 atomic_inc(&current->signal->live); 832 atomic_inc(&current->signal->live);
833 taskstats_tgid_alloc(current->signal); 833 taskstats_tgid_alloc(current);
834 return 0; 834 return 0;
835 } 835 }
836 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); 836 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
@@ -897,7 +897,6 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
897void __cleanup_signal(struct signal_struct *sig) 897void __cleanup_signal(struct signal_struct *sig)
898{ 898{
899 exit_thread_group_keys(sig); 899 exit_thread_group_keys(sig);
900 taskstats_tgid_free(sig);
901 kmem_cache_free(signal_cachep, sig); 900 kmem_cache_free(signal_cachep, sig);
902} 901}
903 902
diff --git a/kernel/module.c b/kernel/module.c
index 67009bd56c52..5072a943fe35 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1342,7 +1342,7 @@ static void set_license(struct module *mod, const char *license)
1342 1342
1343 if (!license_is_gpl_compatible(license)) { 1343 if (!license_is_gpl_compatible(license)) {
1344 if (!(tainted & TAINT_PROPRIETARY_MODULE)) 1344 if (!(tainted & TAINT_PROPRIETARY_MODULE))
1345 printk(KERN_WARNING "%s: module license '%s' taints" 1345 printk(KERN_WARNING "%s: module license '%s' taints "
1346 "kernel.\n", mod->name, license); 1346 "kernel.\n", mod->name, license);
1347 add_taint_module(mod, TAINT_PROPRIETARY_MODULE); 1347 add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
1348 } 1348 }
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 5d6a8c54ee85..2039585ec5e1 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -77,7 +77,8 @@ static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
77 /* 77 /*
78 * If new attributes are added, please revisit this allocation 78 * If new attributes are added, please revisit this allocation
79 */ 79 */
80 skb = nlmsg_new(genlmsg_total_size(size), GFP_KERNEL); 80 size = nlmsg_total_size(genlmsg_total_size(size));
81 skb = nlmsg_new(size, GFP_KERNEL);
81 if (!skb) 82 if (!skb)
82 return -ENOMEM; 83 return -ENOMEM;
83 84
@@ -174,21 +175,19 @@ static void send_cpu_listeners(struct sk_buff *skb, unsigned int cpu)
174 up_write(&listeners->sem); 175 up_write(&listeners->sem);
175} 176}
176 177
177static int fill_pid(pid_t pid, struct task_struct *pidtsk, 178static int fill_pid(pid_t pid, struct task_struct *tsk,
178 struct taskstats *stats) 179 struct taskstats *stats)
179{ 180{
180 int rc = 0; 181 int rc = 0;
181 struct task_struct *tsk = pidtsk;
182 182
183 if (!pidtsk) { 183 if (!tsk) {
184 read_lock(&tasklist_lock); 184 rcu_read_lock();
185 tsk = find_task_by_pid(pid); 185 tsk = find_task_by_pid(pid);
186 if (!tsk) { 186 if (tsk)
187 read_unlock(&tasklist_lock); 187 get_task_struct(tsk);
188 rcu_read_unlock();
189 if (!tsk)
188 return -ESRCH; 190 return -ESRCH;
189 }
190 get_task_struct(tsk);
191 read_unlock(&tasklist_lock);
192 } else 191 } else
193 get_task_struct(tsk); 192 get_task_struct(tsk);
194 193
@@ -214,39 +213,30 @@ static int fill_pid(pid_t pid, struct task_struct *pidtsk,
214 213
215} 214}
216 215
217static int fill_tgid(pid_t tgid, struct task_struct *tgidtsk, 216static int fill_tgid(pid_t tgid, struct task_struct *first,
218 struct taskstats *stats) 217 struct taskstats *stats)
219{ 218{
220 struct task_struct *tsk, *first; 219 struct task_struct *tsk;
221 unsigned long flags; 220 unsigned long flags;
221 int rc = -ESRCH;
222 222
223 /* 223 /*
224 * Add additional stats from live tasks except zombie thread group 224 * Add additional stats from live tasks except zombie thread group
225 * leaders who are already counted with the dead tasks 225 * leaders who are already counted with the dead tasks
226 */ 226 */
227 first = tgidtsk; 227 rcu_read_lock();
228 if (!first) { 228 if (!first)
229 read_lock(&tasklist_lock);
230 first = find_task_by_pid(tgid); 229 first = find_task_by_pid(tgid);
231 if (!first) {
232 read_unlock(&tasklist_lock);
233 return -ESRCH;
234 }
235 get_task_struct(first);
236 read_unlock(&tasklist_lock);
237 } else
238 get_task_struct(first);
239 230
240 /* Start with stats from dead tasks */ 231 if (!first || !lock_task_sighand(first, &flags))
241 spin_lock_irqsave(&first->signal->stats_lock, flags); 232 goto out;
233
242 if (first->signal->stats) 234 if (first->signal->stats)
243 memcpy(stats, first->signal->stats, sizeof(*stats)); 235 memcpy(stats, first->signal->stats, sizeof(*stats));
244 spin_unlock_irqrestore(&first->signal->stats_lock, flags);
245 236
246 tsk = first; 237 tsk = first;
247 read_lock(&tasklist_lock);
248 do { 238 do {
249 if (tsk->exit_state == EXIT_ZOMBIE && thread_group_leader(tsk)) 239 if (tsk->exit_state)
250 continue; 240 continue;
251 /* 241 /*
252 * Accounting subsystem can call its functions here to 242 * Accounting subsystem can call its functions here to
@@ -257,15 +247,18 @@ static int fill_tgid(pid_t tgid, struct task_struct *tgidtsk,
257 delayacct_add_tsk(stats, tsk); 247 delayacct_add_tsk(stats, tsk);
258 248
259 } while_each_thread(first, tsk); 249 } while_each_thread(first, tsk);
260 read_unlock(&tasklist_lock);
261 stats->version = TASKSTATS_VERSION;
262 250
251 unlock_task_sighand(first, &flags);
252 rc = 0;
253out:
254 rcu_read_unlock();
255
256 stats->version = TASKSTATS_VERSION;
263 /* 257 /*
264 * Accounting subsytems can also add calls here to modify 258 * Accounting subsytems can also add calls here to modify
265 * fields of taskstats. 259 * fields of taskstats.
266 */ 260 */
267 261 return rc;
268 return 0;
269} 262}
270 263
271 264
@@ -273,7 +266,7 @@ static void fill_tgid_exit(struct task_struct *tsk)
273{ 266{
274 unsigned long flags; 267 unsigned long flags;
275 268
276 spin_lock_irqsave(&tsk->signal->stats_lock, flags); 269 spin_lock_irqsave(&tsk->sighand->siglock, flags);
277 if (!tsk->signal->stats) 270 if (!tsk->signal->stats)
278 goto ret; 271 goto ret;
279 272
@@ -285,7 +278,7 @@ static void fill_tgid_exit(struct task_struct *tsk)
285 */ 278 */
286 delayacct_add_tsk(tsk->signal->stats, tsk); 279 delayacct_add_tsk(tsk->signal->stats, tsk);
287ret: 280ret:
288 spin_unlock_irqrestore(&tsk->signal->stats_lock, flags); 281 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
289 return; 282 return;
290} 283}
291 284
@@ -419,7 +412,7 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
419 return send_reply(rep_skb, info->snd_pid); 412 return send_reply(rep_skb, info->snd_pid);
420 413
421nla_put_failure: 414nla_put_failure:
422 return genlmsg_cancel(rep_skb, reply); 415 rc = genlmsg_cancel(rep_skb, reply);
423err: 416err:
424 nlmsg_free(rep_skb); 417 nlmsg_free(rep_skb);
425 return rc; 418 return rc;
@@ -461,15 +454,10 @@ void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats,
461 size_t size; 454 size_t size;
462 int is_thread_group; 455 int is_thread_group;
463 struct nlattr *na; 456 struct nlattr *na;
464 unsigned long flags;
465 457
466 if (!family_registered || !tidstats) 458 if (!family_registered || !tidstats)
467 return; 459 return;
468 460
469 spin_lock_irqsave(&tsk->signal->stats_lock, flags);
470 is_thread_group = tsk->signal->stats ? 1 : 0;
471 spin_unlock_irqrestore(&tsk->signal->stats_lock, flags);
472
473 rc = 0; 461 rc = 0;
474 /* 462 /*
475 * Size includes space for nested attributes 463 * Size includes space for nested attributes
@@ -477,6 +465,7 @@ void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats,
477 size = nla_total_size(sizeof(u32)) + 465 size = nla_total_size(sizeof(u32)) +
478 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); 466 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
479 467
468 is_thread_group = (tsk->signal->stats != NULL);
480 if (is_thread_group) 469 if (is_thread_group)
481 size = 2 * size; /* PID + STATS + TGID + STATS */ 470 size = 2 * size; /* PID + STATS + TGID + STATS */
482 471
@@ -519,7 +508,6 @@ send:
519 508
520nla_put_failure: 509nla_put_failure:
521 genlmsg_cancel(rep_skb, reply); 510 genlmsg_cancel(rep_skb, reply);
522 goto ret;
523err_skb: 511err_skb:
524 nlmsg_free(rep_skb); 512 nlmsg_free(rep_skb);
525ret: 513ret:
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 47195fa0ec4f..3afeaa3a73f9 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -161,9 +161,9 @@ void second_overflow(void)
161 time_adjust += MAX_TICKADJ; 161 time_adjust += MAX_TICKADJ;
162 tick_length -= MAX_TICKADJ_SCALED; 162 tick_length -= MAX_TICKADJ_SCALED;
163 } else { 163 } else {
164 time_adjust = 0;
165 tick_length += (s64)(time_adjust * NSEC_PER_USEC / 164 tick_length += (s64)(time_adjust * NSEC_PER_USEC /
166 HZ) << TICK_LENGTH_SHIFT; 165 HZ) << TICK_LENGTH_SHIFT;
166 time_adjust = 0;
167 } 167 }
168 } 168 }
169} 169}
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index db443221ba5b..65a5036a3d95 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -36,7 +36,7 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
36 36
37 /* calculate task elapsed time in timespec */ 37 /* calculate task elapsed time in timespec */
38 do_posix_clock_monotonic_gettime(&uptime); 38 do_posix_clock_monotonic_gettime(&uptime);
39 ts = timespec_sub(uptime, current->group_leader->start_time); 39 ts = timespec_sub(uptime, tsk->start_time);
40 /* rebase elapsed time to usec */ 40 /* rebase elapsed time to usec */
41 ac_etime = timespec_to_ns(&ts); 41 ac_etime = timespec_to_ns(&ts);
42 do_div(ac_etime, NSEC_PER_USEC); 42 do_div(ac_etime, NSEC_PER_USEC);
@@ -58,7 +58,10 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
58 stats->ac_uid = tsk->uid; 58 stats->ac_uid = tsk->uid;
59 stats->ac_gid = tsk->gid; 59 stats->ac_gid = tsk->gid;
60 stats->ac_pid = tsk->pid; 60 stats->ac_pid = tsk->pid;
61 stats->ac_ppid = (tsk->parent) ? tsk->parent->pid : 0; 61 rcu_read_lock();
62 stats->ac_ppid = pid_alive(tsk) ?
63 rcu_dereference(tsk->real_parent)->tgid : 0;
64 rcu_read_unlock();
62 stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC; 65 stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC;
63 stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC; 66 stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC;
64 stats->ac_minflt = tsk->min_flt; 67 stats->ac_minflt = tsk->min_flt;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 3df9bfc7ff78..17c2f03d2c27 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -99,7 +99,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
99 * @wq: workqueue to use 99 * @wq: workqueue to use
100 * @work: work to queue 100 * @work: work to queue
101 * 101 *
102 * Returns non-zero if it was successfully added. 102 * Returns 0 if @work was already on a queue, non-zero otherwise.
103 * 103 *
104 * We queue the work to the CPU it was submitted, but there is no 104 * We queue the work to the CPU it was submitted, but there is no
105 * guarantee that it will be processed by that CPU. 105 * guarantee that it will be processed by that CPU.
@@ -138,7 +138,7 @@ static void delayed_work_timer_fn(unsigned long __data)
138 * @work: work to queue 138 * @work: work to queue
139 * @delay: number of jiffies to wait before queueing 139 * @delay: number of jiffies to wait before queueing
140 * 140 *
141 * Returns non-zero if it was successfully added. 141 * Returns 0 if @work was already on a queue, non-zero otherwise.
142 */ 142 */
143int fastcall queue_delayed_work(struct workqueue_struct *wq, 143int fastcall queue_delayed_work(struct workqueue_struct *wq,
144 struct work_struct *work, unsigned long delay) 144 struct work_struct *work, unsigned long delay)
@@ -169,7 +169,7 @@ EXPORT_SYMBOL_GPL(queue_delayed_work);
169 * @work: work to queue 169 * @work: work to queue
170 * @delay: number of jiffies to wait before queueing 170 * @delay: number of jiffies to wait before queueing
171 * 171 *
172 * Returns non-zero if it was successfully added. 172 * Returns 0 if @work was already on a queue, non-zero otherwise.
173 */ 173 */
174int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 174int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
175 struct work_struct *work, unsigned long delay) 175 struct work_struct *work, unsigned long delay)