diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 4 | ||||
-rw-r--r-- | kernel/audit_tree.c | 91 | ||||
-rw-r--r-- | kernel/auditfilter.c | 14 | ||||
-rw-r--r-- | kernel/cgroup_freezer.c | 19 | ||||
-rw-r--r-- | kernel/cpuset.c | 12 | ||||
-rw-r--r-- | kernel/exit.c | 9 | ||||
-rw-r--r-- | kernel/fork.c | 11 | ||||
-rw-r--r-- | kernel/hrtimer.c | 9 | ||||
-rw-r--r-- | kernel/kprobes.c | 23 | ||||
-rw-r--r-- | kernel/posix-cpu-timers.c | 7 | ||||
-rw-r--r-- | kernel/power/main.c | 2 | ||||
-rw-r--r-- | kernel/relay.c | 9 | ||||
-rw-r--r-- | kernel/sched.c | 18 | ||||
-rw-r--r-- | kernel/sched_debug.c | 5 | ||||
-rw-r--r-- | kernel/sched_stats.h | 15 | ||||
-rw-r--r-- | kernel/stop_machine.c | 5 |
16 files changed, 154 insertions, 99 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 9a3ec66a9d84..19fad003b19d 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -11,8 +11,6 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ | |||
11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ | 11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ |
12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o | 12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o |
13 | 13 | ||
14 | CFLAGS_REMOVE_sched.o = -mno-spe | ||
15 | |||
16 | ifdef CONFIG_FUNCTION_TRACER | 14 | ifdef CONFIG_FUNCTION_TRACER |
17 | # Do not trace debug files and internal ftrace files | 15 | # Do not trace debug files and internal ftrace files |
18 | CFLAGS_REMOVE_lockdep.o = -pg | 16 | CFLAGS_REMOVE_lockdep.o = -pg |
@@ -21,7 +19,7 @@ CFLAGS_REMOVE_mutex-debug.o = -pg | |||
21 | CFLAGS_REMOVE_rtmutex-debug.o = -pg | 19 | CFLAGS_REMOVE_rtmutex-debug.o = -pg |
22 | CFLAGS_REMOVE_cgroup-debug.o = -pg | 20 | CFLAGS_REMOVE_cgroup-debug.o = -pg |
23 | CFLAGS_REMOVE_sched_clock.o = -pg | 21 | CFLAGS_REMOVE_sched_clock.o = -pg |
24 | CFLAGS_REMOVE_sched.o = -mno-spe -pg | 22 | CFLAGS_REMOVE_sched.o = -pg |
25 | endif | 23 | endif |
26 | 24 | ||
27 | obj-$(CONFIG_FREEZER) += freezer.o | 25 | obj-$(CONFIG_FREEZER) += freezer.o |
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 8ba0e0d934f2..8b509441f49a 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c | |||
@@ -24,6 +24,7 @@ struct audit_chunk { | |||
24 | struct list_head trees; /* with root here */ | 24 | struct list_head trees; /* with root here */ |
25 | int dead; | 25 | int dead; |
26 | int count; | 26 | int count; |
27 | atomic_long_t refs; | ||
27 | struct rcu_head head; | 28 | struct rcu_head head; |
28 | struct node { | 29 | struct node { |
29 | struct list_head list; | 30 | struct list_head list; |
@@ -56,7 +57,8 @@ static LIST_HEAD(prune_list); | |||
56 | * tree is refcounted; one reference for "some rules on rules_list refer to | 57 | * tree is refcounted; one reference for "some rules on rules_list refer to |
57 | * it", one for each chunk with pointer to it. | 58 | * it", one for each chunk with pointer to it. |
58 | * | 59 | * |
59 | * chunk is refcounted by embedded inotify_watch. | 60 | * chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount |
61 | * of watch contributes 1 to .refs). | ||
60 | * | 62 | * |
61 | * node.index allows to get from node.list to containing chunk. | 63 | * node.index allows to get from node.list to containing chunk. |
62 | * MSB of that sucker is stolen to mark taggings that we might have to | 64 | * MSB of that sucker is stolen to mark taggings that we might have to |
@@ -121,6 +123,7 @@ static struct audit_chunk *alloc_chunk(int count) | |||
121 | INIT_LIST_HEAD(&chunk->hash); | 123 | INIT_LIST_HEAD(&chunk->hash); |
122 | INIT_LIST_HEAD(&chunk->trees); | 124 | INIT_LIST_HEAD(&chunk->trees); |
123 | chunk->count = count; | 125 | chunk->count = count; |
126 | atomic_long_set(&chunk->refs, 1); | ||
124 | for (i = 0; i < count; i++) { | 127 | for (i = 0; i < count; i++) { |
125 | INIT_LIST_HEAD(&chunk->owners[i].list); | 128 | INIT_LIST_HEAD(&chunk->owners[i].list); |
126 | chunk->owners[i].index = i; | 129 | chunk->owners[i].index = i; |
@@ -129,9 +132,8 @@ static struct audit_chunk *alloc_chunk(int count) | |||
129 | return chunk; | 132 | return chunk; |
130 | } | 133 | } |
131 | 134 | ||
132 | static void __free_chunk(struct rcu_head *rcu) | 135 | static void free_chunk(struct audit_chunk *chunk) |
133 | { | 136 | { |
134 | struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); | ||
135 | int i; | 137 | int i; |
136 | 138 | ||
137 | for (i = 0; i < chunk->count; i++) { | 139 | for (i = 0; i < chunk->count; i++) { |
@@ -141,14 +143,16 @@ static void __free_chunk(struct rcu_head *rcu) | |||
141 | kfree(chunk); | 143 | kfree(chunk); |
142 | } | 144 | } |
143 | 145 | ||
144 | static inline void free_chunk(struct audit_chunk *chunk) | 146 | void audit_put_chunk(struct audit_chunk *chunk) |
145 | { | 147 | { |
146 | call_rcu(&chunk->head, __free_chunk); | 148 | if (atomic_long_dec_and_test(&chunk->refs)) |
149 | free_chunk(chunk); | ||
147 | } | 150 | } |
148 | 151 | ||
149 | void audit_put_chunk(struct audit_chunk *chunk) | 152 | static void __put_chunk(struct rcu_head *rcu) |
150 | { | 153 | { |
151 | put_inotify_watch(&chunk->watch); | 154 | struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); |
155 | audit_put_chunk(chunk); | ||
152 | } | 156 | } |
153 | 157 | ||
154 | enum {HASH_SIZE = 128}; | 158 | enum {HASH_SIZE = 128}; |
@@ -176,7 +180,7 @@ struct audit_chunk *audit_tree_lookup(const struct inode *inode) | |||
176 | 180 | ||
177 | list_for_each_entry_rcu(p, list, hash) { | 181 | list_for_each_entry_rcu(p, list, hash) { |
178 | if (p->watch.inode == inode) { | 182 | if (p->watch.inode == inode) { |
179 | get_inotify_watch(&p->watch); | 183 | atomic_long_inc(&p->refs); |
180 | return p; | 184 | return p; |
181 | } | 185 | } |
182 | } | 186 | } |
@@ -194,17 +198,49 @@ int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree) | |||
194 | 198 | ||
195 | /* tagging and untagging inodes with trees */ | 199 | /* tagging and untagging inodes with trees */ |
196 | 200 | ||
197 | static void untag_chunk(struct audit_chunk *chunk, struct node *p) | 201 | static struct audit_chunk *find_chunk(struct node *p) |
202 | { | ||
203 | int index = p->index & ~(1U<<31); | ||
204 | p -= index; | ||
205 | return container_of(p, struct audit_chunk, owners[0]); | ||
206 | } | ||
207 | |||
208 | static void untag_chunk(struct node *p) | ||
198 | { | 209 | { |
210 | struct audit_chunk *chunk = find_chunk(p); | ||
199 | struct audit_chunk *new; | 211 | struct audit_chunk *new; |
200 | struct audit_tree *owner; | 212 | struct audit_tree *owner; |
201 | int size = chunk->count - 1; | 213 | int size = chunk->count - 1; |
202 | int i, j; | 214 | int i, j; |
203 | 215 | ||
216 | if (!pin_inotify_watch(&chunk->watch)) { | ||
217 | /* | ||
218 | * Filesystem is shutting down; all watches are getting | ||
219 | * evicted, just take it off the node list for this | ||
220 | * tree and let the eviction logics take care of the | ||
221 | * rest. | ||
222 | */ | ||
223 | owner = p->owner; | ||
224 | if (owner->root == chunk) { | ||
225 | list_del_init(&owner->same_root); | ||
226 | owner->root = NULL; | ||
227 | } | ||
228 | list_del_init(&p->list); | ||
229 | p->owner = NULL; | ||
230 | put_tree(owner); | ||
231 | return; | ||
232 | } | ||
233 | |||
234 | spin_unlock(&hash_lock); | ||
235 | |||
236 | /* | ||
237 | * pin_inotify_watch() succeeded, so the watch won't go away | ||
238 | * from under us. | ||
239 | */ | ||
204 | mutex_lock(&chunk->watch.inode->inotify_mutex); | 240 | mutex_lock(&chunk->watch.inode->inotify_mutex); |
205 | if (chunk->dead) { | 241 | if (chunk->dead) { |
206 | mutex_unlock(&chunk->watch.inode->inotify_mutex); | 242 | mutex_unlock(&chunk->watch.inode->inotify_mutex); |
207 | return; | 243 | goto out; |
208 | } | 244 | } |
209 | 245 | ||
210 | owner = p->owner; | 246 | owner = p->owner; |
@@ -221,7 +257,7 @@ static void untag_chunk(struct audit_chunk *chunk, struct node *p) | |||
221 | inotify_evict_watch(&chunk->watch); | 257 | inotify_evict_watch(&chunk->watch); |
222 | mutex_unlock(&chunk->watch.inode->inotify_mutex); | 258 | mutex_unlock(&chunk->watch.inode->inotify_mutex); |
223 | put_inotify_watch(&chunk->watch); | 259 | put_inotify_watch(&chunk->watch); |
224 | return; | 260 | goto out; |
225 | } | 261 | } |
226 | 262 | ||
227 | new = alloc_chunk(size); | 263 | new = alloc_chunk(size); |
@@ -263,7 +299,7 @@ static void untag_chunk(struct audit_chunk *chunk, struct node *p) | |||
263 | inotify_evict_watch(&chunk->watch); | 299 | inotify_evict_watch(&chunk->watch); |
264 | mutex_unlock(&chunk->watch.inode->inotify_mutex); | 300 | mutex_unlock(&chunk->watch.inode->inotify_mutex); |
265 | put_inotify_watch(&chunk->watch); | 301 | put_inotify_watch(&chunk->watch); |
266 | return; | 302 | goto out; |
267 | 303 | ||
268 | Fallback: | 304 | Fallback: |
269 | // do the best we can | 305 | // do the best we can |
@@ -277,6 +313,9 @@ Fallback: | |||
277 | put_tree(owner); | 313 | put_tree(owner); |
278 | spin_unlock(&hash_lock); | 314 | spin_unlock(&hash_lock); |
279 | mutex_unlock(&chunk->watch.inode->inotify_mutex); | 315 | mutex_unlock(&chunk->watch.inode->inotify_mutex); |
316 | out: | ||
317 | unpin_inotify_watch(&chunk->watch); | ||
318 | spin_lock(&hash_lock); | ||
280 | } | 319 | } |
281 | 320 | ||
282 | static int create_chunk(struct inode *inode, struct audit_tree *tree) | 321 | static int create_chunk(struct inode *inode, struct audit_tree *tree) |
@@ -387,13 +426,6 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) | |||
387 | return 0; | 426 | return 0; |
388 | } | 427 | } |
389 | 428 | ||
390 | static struct audit_chunk *find_chunk(struct node *p) | ||
391 | { | ||
392 | int index = p->index & ~(1U<<31); | ||
393 | p -= index; | ||
394 | return container_of(p, struct audit_chunk, owners[0]); | ||
395 | } | ||
396 | |||
397 | static void kill_rules(struct audit_tree *tree) | 429 | static void kill_rules(struct audit_tree *tree) |
398 | { | 430 | { |
399 | struct audit_krule *rule, *next; | 431 | struct audit_krule *rule, *next; |
@@ -431,17 +463,10 @@ static void prune_one(struct audit_tree *victim) | |||
431 | spin_lock(&hash_lock); | 463 | spin_lock(&hash_lock); |
432 | while (!list_empty(&victim->chunks)) { | 464 | while (!list_empty(&victim->chunks)) { |
433 | struct node *p; | 465 | struct node *p; |
434 | struct audit_chunk *chunk; | ||
435 | 466 | ||
436 | p = list_entry(victim->chunks.next, struct node, list); | 467 | p = list_entry(victim->chunks.next, struct node, list); |
437 | chunk = find_chunk(p); | ||
438 | get_inotify_watch(&chunk->watch); | ||
439 | spin_unlock(&hash_lock); | ||
440 | |||
441 | untag_chunk(chunk, p); | ||
442 | 468 | ||
443 | put_inotify_watch(&chunk->watch); | 469 | untag_chunk(p); |
444 | spin_lock(&hash_lock); | ||
445 | } | 470 | } |
446 | spin_unlock(&hash_lock); | 471 | spin_unlock(&hash_lock); |
447 | put_tree(victim); | 472 | put_tree(victim); |
@@ -469,7 +494,6 @@ static void trim_marked(struct audit_tree *tree) | |||
469 | 494 | ||
470 | while (!list_empty(&tree->chunks)) { | 495 | while (!list_empty(&tree->chunks)) { |
471 | struct node *node; | 496 | struct node *node; |
472 | struct audit_chunk *chunk; | ||
473 | 497 | ||
474 | node = list_entry(tree->chunks.next, struct node, list); | 498 | node = list_entry(tree->chunks.next, struct node, list); |
475 | 499 | ||
@@ -477,14 +501,7 @@ static void trim_marked(struct audit_tree *tree) | |||
477 | if (!(node->index & (1U<<31))) | 501 | if (!(node->index & (1U<<31))) |
478 | break; | 502 | break; |
479 | 503 | ||
480 | chunk = find_chunk(node); | 504 | untag_chunk(node); |
481 | get_inotify_watch(&chunk->watch); | ||
482 | spin_unlock(&hash_lock); | ||
483 | |||
484 | untag_chunk(chunk, node); | ||
485 | |||
486 | put_inotify_watch(&chunk->watch); | ||
487 | spin_lock(&hash_lock); | ||
488 | } | 505 | } |
489 | if (!tree->root && !tree->goner) { | 506 | if (!tree->root && !tree->goner) { |
490 | tree->goner = 1; | 507 | tree->goner = 1; |
@@ -878,7 +895,7 @@ static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask, | |||
878 | static void destroy_watch(struct inotify_watch *watch) | 895 | static void destroy_watch(struct inotify_watch *watch) |
879 | { | 896 | { |
880 | struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch); | 897 | struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch); |
881 | free_chunk(chunk); | 898 | call_rcu(&chunk->head, __put_chunk); |
882 | } | 899 | } |
883 | 900 | ||
884 | static const struct inotify_operations rtree_inotify_ops = { | 901 | static const struct inotify_operations rtree_inotify_ops = { |
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index b7d354e2b0ef..9fd85a4640a0 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c | |||
@@ -1094,8 +1094,8 @@ static void audit_inotify_unregister(struct list_head *in_list) | |||
1094 | list_for_each_entry_safe(p, n, in_list, ilist) { | 1094 | list_for_each_entry_safe(p, n, in_list, ilist) { |
1095 | list_del(&p->ilist); | 1095 | list_del(&p->ilist); |
1096 | inotify_rm_watch(audit_ih, &p->wdata); | 1096 | inotify_rm_watch(audit_ih, &p->wdata); |
1097 | /* the put matching the get in audit_do_del_rule() */ | 1097 | /* the unpin matching the pin in audit_do_del_rule() */ |
1098 | put_inotify_watch(&p->wdata); | 1098 | unpin_inotify_watch(&p->wdata); |
1099 | } | 1099 | } |
1100 | } | 1100 | } |
1101 | 1101 | ||
@@ -1389,9 +1389,13 @@ static inline int audit_del_rule(struct audit_entry *entry, | |||
1389 | /* Put parent on the inotify un-registration | 1389 | /* Put parent on the inotify un-registration |
1390 | * list. Grab a reference before releasing | 1390 | * list. Grab a reference before releasing |
1391 | * audit_filter_mutex, to be released in | 1391 | * audit_filter_mutex, to be released in |
1392 | * audit_inotify_unregister(). */ | 1392 | * audit_inotify_unregister(). |
1393 | list_add(&parent->ilist, &inotify_list); | 1393 | * If filesystem is going away, just leave |
1394 | get_inotify_watch(&parent->wdata); | 1394 | * the sucker alone, eviction will take |
1395 | * care of it. | ||
1396 | */ | ||
1397 | if (pin_inotify_watch(&parent->wdata)) | ||
1398 | list_add(&parent->ilist, &inotify_list); | ||
1395 | } | 1399 | } |
1396 | } | 1400 | } |
1397 | } | 1401 | } |
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index 7fa476f01d05..fb249e2bcada 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c | |||
@@ -184,9 +184,20 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task) | |||
184 | { | 184 | { |
185 | struct freezer *freezer; | 185 | struct freezer *freezer; |
186 | 186 | ||
187 | task_lock(task); | 187 | /* |
188 | * No lock is needed, since the task isn't on tasklist yet, | ||
189 | * so it can't be moved to another cgroup, which means the | ||
190 | * freezer won't be removed and will be valid during this | ||
191 | * function call. | ||
192 | */ | ||
188 | freezer = task_freezer(task); | 193 | freezer = task_freezer(task); |
189 | task_unlock(task); | 194 | |
195 | /* | ||
196 | * The root cgroup is non-freezable, so we can skip the | ||
197 | * following check. | ||
198 | */ | ||
199 | if (!freezer->css.cgroup->parent) | ||
200 | return; | ||
190 | 201 | ||
191 | spin_lock_irq(&freezer->lock); | 202 | spin_lock_irq(&freezer->lock); |
192 | BUG_ON(freezer->state == CGROUP_FROZEN); | 203 | BUG_ON(freezer->state == CGROUP_FROZEN); |
@@ -331,7 +342,7 @@ static int freezer_write(struct cgroup *cgroup, | |||
331 | else if (strcmp(buffer, freezer_state_strs[CGROUP_FROZEN]) == 0) | 342 | else if (strcmp(buffer, freezer_state_strs[CGROUP_FROZEN]) == 0) |
332 | goal_state = CGROUP_FROZEN; | 343 | goal_state = CGROUP_FROZEN; |
333 | else | 344 | else |
334 | return -EIO; | 345 | return -EINVAL; |
335 | 346 | ||
336 | if (!cgroup_lock_live_group(cgroup)) | 347 | if (!cgroup_lock_live_group(cgroup)) |
337 | return -ENODEV; | 348 | return -ENODEV; |
@@ -350,6 +361,8 @@ static struct cftype files[] = { | |||
350 | 361 | ||
351 | static int freezer_populate(struct cgroup_subsys *ss, struct cgroup *cgroup) | 362 | static int freezer_populate(struct cgroup_subsys *ss, struct cgroup *cgroup) |
352 | { | 363 | { |
364 | if (!cgroup->parent) | ||
365 | return 0; | ||
353 | return cgroup_add_files(cgroup, ss, files, ARRAY_SIZE(files)); | 366 | return cgroup_add_files(cgroup, ss, files, ARRAY_SIZE(files)); |
354 | } | 367 | } |
355 | 368 | ||
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 3e00526f52ec..81fc6791a296 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -587,7 +587,6 @@ static int generate_sched_domains(cpumask_t **domains, | |||
587 | int ndoms; /* number of sched domains in result */ | 587 | int ndoms; /* number of sched domains in result */ |
588 | int nslot; /* next empty doms[] cpumask_t slot */ | 588 | int nslot; /* next empty doms[] cpumask_t slot */ |
589 | 589 | ||
590 | ndoms = 0; | ||
591 | doms = NULL; | 590 | doms = NULL; |
592 | dattr = NULL; | 591 | dattr = NULL; |
593 | csa = NULL; | 592 | csa = NULL; |
@@ -674,10 +673,8 @@ restart: | |||
674 | * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. | 673 | * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. |
675 | */ | 674 | */ |
676 | doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL); | 675 | doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL); |
677 | if (!doms) { | 676 | if (!doms) |
678 | ndoms = 0; | ||
679 | goto done; | 677 | goto done; |
680 | } | ||
681 | 678 | ||
682 | /* | 679 | /* |
683 | * The rest of the code, including the scheduler, can deal with | 680 | * The rest of the code, including the scheduler, can deal with |
@@ -732,6 +729,13 @@ restart: | |||
732 | done: | 729 | done: |
733 | kfree(csa); | 730 | kfree(csa); |
734 | 731 | ||
732 | /* | ||
733 | * Fallback to the default domain if kmalloc() failed. | ||
734 | * See comments in partition_sched_domains(). | ||
735 | */ | ||
736 | if (doms == NULL) | ||
737 | ndoms = 1; | ||
738 | |||
735 | *domains = doms; | 739 | *domains = doms; |
736 | *attributes = dattr; | 740 | *attributes = dattr; |
737 | return ndoms; | 741 | return ndoms; |
diff --git a/kernel/exit.c b/kernel/exit.c index ae2b92be5fae..2d8be7ebb0f7 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -40,7 +40,6 @@ | |||
40 | #include <linux/cn_proc.h> | 40 | #include <linux/cn_proc.h> |
41 | #include <linux/mutex.h> | 41 | #include <linux/mutex.h> |
42 | #include <linux/futex.h> | 42 | #include <linux/futex.h> |
43 | #include <linux/compat.h> | ||
44 | #include <linux/pipe_fs_i.h> | 43 | #include <linux/pipe_fs_i.h> |
45 | #include <linux/audit.h> /* for audit_free() */ | 44 | #include <linux/audit.h> /* for audit_free() */ |
46 | #include <linux/resource.h> | 45 | #include <linux/resource.h> |
@@ -1059,14 +1058,6 @@ NORET_TYPE void do_exit(long code) | |||
1059 | exit_itimers(tsk->signal); | 1058 | exit_itimers(tsk->signal); |
1060 | } | 1059 | } |
1061 | acct_collect(code, group_dead); | 1060 | acct_collect(code, group_dead); |
1062 | #ifdef CONFIG_FUTEX | ||
1063 | if (unlikely(tsk->robust_list)) | ||
1064 | exit_robust_list(tsk); | ||
1065 | #ifdef CONFIG_COMPAT | ||
1066 | if (unlikely(tsk->compat_robust_list)) | ||
1067 | compat_exit_robust_list(tsk); | ||
1068 | #endif | ||
1069 | #endif | ||
1070 | if (group_dead) | 1061 | if (group_dead) |
1071 | tty_audit_exit(); | 1062 | tty_audit_exit(); |
1072 | if (unlikely(tsk->audit_context)) | 1063 | if (unlikely(tsk->audit_context)) |
diff --git a/kernel/fork.c b/kernel/fork.c index f6083561dfe0..2a372a0e206f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/jiffies.h> | 40 | #include <linux/jiffies.h> |
41 | #include <linux/tracehook.h> | 41 | #include <linux/tracehook.h> |
42 | #include <linux/futex.h> | 42 | #include <linux/futex.h> |
43 | #include <linux/compat.h> | ||
43 | #include <linux/task_io_accounting_ops.h> | 44 | #include <linux/task_io_accounting_ops.h> |
44 | #include <linux/rcupdate.h> | 45 | #include <linux/rcupdate.h> |
45 | #include <linux/ptrace.h> | 46 | #include <linux/ptrace.h> |
@@ -519,6 +520,16 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm) | |||
519 | { | 520 | { |
520 | struct completion *vfork_done = tsk->vfork_done; | 521 | struct completion *vfork_done = tsk->vfork_done; |
521 | 522 | ||
523 | /* Get rid of any futexes when releasing the mm */ | ||
524 | #ifdef CONFIG_FUTEX | ||
525 | if (unlikely(tsk->robust_list)) | ||
526 | exit_robust_list(tsk); | ||
527 | #ifdef CONFIG_COMPAT | ||
528 | if (unlikely(tsk->compat_robust_list)) | ||
529 | compat_exit_robust_list(tsk); | ||
530 | #endif | ||
531 | #endif | ||
532 | |||
522 | /* Get rid of any cached register state */ | 533 | /* Get rid of any cached register state */ |
523 | deactivate_mm(tsk, mm); | 534 | deactivate_mm(tsk, mm); |
524 | 535 | ||
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 95d3949f2ae5..47e63349d1b2 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -664,14 +664,6 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
664 | 664 | ||
665 | /* Timer is expired, act upon the callback mode */ | 665 | /* Timer is expired, act upon the callback mode */ |
666 | switch(timer->cb_mode) { | 666 | switch(timer->cb_mode) { |
667 | case HRTIMER_CB_IRQSAFE_NO_RESTART: | ||
668 | debug_hrtimer_deactivate(timer); | ||
669 | /* | ||
670 | * We can call the callback from here. No restart | ||
671 | * happens, so no danger of recursion | ||
672 | */ | ||
673 | BUG_ON(timer->function(timer) != HRTIMER_NORESTART); | ||
674 | return 1; | ||
675 | case HRTIMER_CB_IRQSAFE_PERCPU: | 667 | case HRTIMER_CB_IRQSAFE_PERCPU: |
676 | case HRTIMER_CB_IRQSAFE_UNLOCKED: | 668 | case HRTIMER_CB_IRQSAFE_UNLOCKED: |
677 | /* | 669 | /* |
@@ -683,7 +675,6 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
683 | */ | 675 | */ |
684 | debug_hrtimer_deactivate(timer); | 676 | debug_hrtimer_deactivate(timer); |
685 | return 1; | 677 | return 1; |
686 | case HRTIMER_CB_IRQSAFE: | ||
687 | case HRTIMER_CB_SOFTIRQ: | 678 | case HRTIMER_CB_SOFTIRQ: |
688 | /* | 679 | /* |
689 | * Move everything else into the softirq pending list ! | 680 | * Move everything else into the softirq pending list ! |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 8b57a2597f21..9f8a3f25259a 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -72,7 +72,7 @@ static bool kprobe_enabled; | |||
72 | DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ | 72 | DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ |
73 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; | 73 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
74 | static struct { | 74 | static struct { |
75 | spinlock_t lock ____cacheline_aligned; | 75 | spinlock_t lock ____cacheline_aligned_in_smp; |
76 | } kretprobe_table_locks[KPROBE_TABLE_SIZE]; | 76 | } kretprobe_table_locks[KPROBE_TABLE_SIZE]; |
77 | 77 | ||
78 | static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) | 78 | static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) |
@@ -613,30 +613,37 @@ static int __kprobes __register_kprobe(struct kprobe *p, | |||
613 | return -EINVAL; | 613 | return -EINVAL; |
614 | p->addr = addr; | 614 | p->addr = addr; |
615 | 615 | ||
616 | if (!kernel_text_address((unsigned long) p->addr) || | 616 | preempt_disable(); |
617 | in_kprobes_functions((unsigned long) p->addr)) | 617 | if (!__kernel_text_address((unsigned long) p->addr) || |
618 | in_kprobes_functions((unsigned long) p->addr)) { | ||
619 | preempt_enable(); | ||
618 | return -EINVAL; | 620 | return -EINVAL; |
621 | } | ||
619 | 622 | ||
620 | p->mod_refcounted = 0; | 623 | p->mod_refcounted = 0; |
621 | 624 | ||
622 | /* | 625 | /* |
623 | * Check if are we probing a module. | 626 | * Check if are we probing a module. |
624 | */ | 627 | */ |
625 | probed_mod = module_text_address((unsigned long) p->addr); | 628 | probed_mod = __module_text_address((unsigned long) p->addr); |
626 | if (probed_mod) { | 629 | if (probed_mod) { |
627 | struct module *calling_mod = module_text_address(called_from); | 630 | struct module *calling_mod; |
631 | calling_mod = __module_text_address(called_from); | ||
628 | /* | 632 | /* |
629 | * We must allow modules to probe themself and in this case | 633 | * We must allow modules to probe themself and in this case |
630 | * avoid incrementing the module refcount, so as to allow | 634 | * avoid incrementing the module refcount, so as to allow |
631 | * unloading of self probing modules. | 635 | * unloading of self probing modules. |
632 | */ | 636 | */ |
633 | if (calling_mod && calling_mod != probed_mod) { | 637 | if (calling_mod && calling_mod != probed_mod) { |
634 | if (unlikely(!try_module_get(probed_mod))) | 638 | if (unlikely(!try_module_get(probed_mod))) { |
639 | preempt_enable(); | ||
635 | return -EINVAL; | 640 | return -EINVAL; |
641 | } | ||
636 | p->mod_refcounted = 1; | 642 | p->mod_refcounted = 1; |
637 | } else | 643 | } else |
638 | probed_mod = NULL; | 644 | probed_mod = NULL; |
639 | } | 645 | } |
646 | preempt_enable(); | ||
640 | 647 | ||
641 | p->nmissed = 0; | 648 | p->nmissed = 0; |
642 | INIT_LIST_HEAD(&p->list); | 649 | INIT_LIST_HEAD(&p->list); |
@@ -718,6 +725,10 @@ static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) | |||
718 | struct kprobe *old_p; | 725 | struct kprobe *old_p; |
719 | 726 | ||
720 | if (p->mod_refcounted) { | 727 | if (p->mod_refcounted) { |
728 | /* | ||
729 | * Since we've already incremented refcount, | ||
730 | * we don't need to disable preemption. | ||
731 | */ | ||
721 | mod = module_text_address((unsigned long)p->addr); | 732 | mod = module_text_address((unsigned long)p->addr); |
722 | if (mod) | 733 | if (mod) |
723 | module_put(mod); | 734 | module_put(mod); |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 153dcb2639c3..895337b16a24 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -1308,9 +1308,10 @@ static inline int task_cputime_expired(const struct task_cputime *sample, | |||
1308 | */ | 1308 | */ |
1309 | static inline int fastpath_timer_check(struct task_struct *tsk) | 1309 | static inline int fastpath_timer_check(struct task_struct *tsk) |
1310 | { | 1310 | { |
1311 | struct signal_struct *sig = tsk->signal; | 1311 | struct signal_struct *sig; |
1312 | 1312 | ||
1313 | if (unlikely(!sig)) | 1313 | /* tsk == current, ensure it is safe to use ->signal/sighand */ |
1314 | if (unlikely(tsk->exit_state)) | ||
1314 | return 0; | 1315 | return 0; |
1315 | 1316 | ||
1316 | if (!task_cputime_zero(&tsk->cputime_expires)) { | 1317 | if (!task_cputime_zero(&tsk->cputime_expires)) { |
@@ -1323,6 +1324,8 @@ static inline int fastpath_timer_check(struct task_struct *tsk) | |||
1323 | if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) | 1324 | if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) |
1324 | return 1; | 1325 | return 1; |
1325 | } | 1326 | } |
1327 | |||
1328 | sig = tsk->signal; | ||
1326 | if (!task_cputime_zero(&sig->cputime_expires)) { | 1329 | if (!task_cputime_zero(&sig->cputime_expires)) { |
1327 | struct task_cputime group_sample; | 1330 | struct task_cputime group_sample; |
1328 | 1331 | ||
diff --git a/kernel/power/main.c b/kernel/power/main.c index 19122cf6d827..b8f7ce9473e8 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -174,7 +174,7 @@ static void suspend_test_finish(const char *label) | |||
174 | * has some performance issues. The stack dump of a WARN_ON | 174 | * has some performance issues. The stack dump of a WARN_ON |
175 | * is more likely to get the right attention than a printk... | 175 | * is more likely to get the right attention than a printk... |
176 | */ | 176 | */ |
177 | WARN_ON(msec > (TEST_SUSPEND_SECONDS * 1000)); | 177 | WARN(msec > (TEST_SUSPEND_SECONDS * 1000), "Component: %s\n", label); |
178 | } | 178 | } |
179 | 179 | ||
180 | #else | 180 | #else |
diff --git a/kernel/relay.c b/kernel/relay.c index 8d13a7855c08..32b0befdcb6a 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -400,7 +400,7 @@ void relay_reset(struct rchan *chan) | |||
400 | } | 400 | } |
401 | 401 | ||
402 | mutex_lock(&relay_channels_mutex); | 402 | mutex_lock(&relay_channels_mutex); |
403 | for_each_online_cpu(i) | 403 | for_each_possible_cpu(i) |
404 | if (chan->buf[i]) | 404 | if (chan->buf[i]) |
405 | __relay_reset(chan->buf[i], 0); | 405 | __relay_reset(chan->buf[i], 0); |
406 | mutex_unlock(&relay_channels_mutex); | 406 | mutex_unlock(&relay_channels_mutex); |
@@ -611,10 +611,9 @@ struct rchan *relay_open(const char *base_filename, | |||
611 | return chan; | 611 | return chan; |
612 | 612 | ||
613 | free_bufs: | 613 | free_bufs: |
614 | for_each_online_cpu(i) { | 614 | for_each_possible_cpu(i) { |
615 | if (!chan->buf[i]) | 615 | if (chan->buf[i]) |
616 | break; | 616 | relay_close_buf(chan->buf[i]); |
617 | relay_close_buf(chan->buf[i]); | ||
618 | } | 617 | } |
619 | 618 | ||
620 | kref_put(&chan->kref, relay_destroy_channel); | 619 | kref_put(&chan->kref, relay_destroy_channel); |
diff --git a/kernel/sched.c b/kernel/sched.c index 50a21f964679..9b1e79371c20 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1456,6 +1456,8 @@ static unsigned long cpu_avg_load_per_task(int cpu) | |||
1456 | 1456 | ||
1457 | if (rq->nr_running) | 1457 | if (rq->nr_running) |
1458 | rq->avg_load_per_task = rq->load.weight / rq->nr_running; | 1458 | rq->avg_load_per_task = rq->load.weight / rq->nr_running; |
1459 | else | ||
1460 | rq->avg_load_per_task = 0; | ||
1459 | 1461 | ||
1460 | return rq->avg_load_per_task; | 1462 | return rq->avg_load_per_task; |
1461 | } | 1463 | } |
@@ -5868,6 +5870,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5868 | struct rq *rq = cpu_rq(cpu); | 5870 | struct rq *rq = cpu_rq(cpu); |
5869 | unsigned long flags; | 5871 | unsigned long flags; |
5870 | 5872 | ||
5873 | spin_lock_irqsave(&rq->lock, flags); | ||
5874 | |||
5871 | __sched_fork(idle); | 5875 | __sched_fork(idle); |
5872 | idle->se.exec_start = sched_clock(); | 5876 | idle->se.exec_start = sched_clock(); |
5873 | 5877 | ||
@@ -5875,7 +5879,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5875 | idle->cpus_allowed = cpumask_of_cpu(cpu); | 5879 | idle->cpus_allowed = cpumask_of_cpu(cpu); |
5876 | __set_task_cpu(idle, cpu); | 5880 | __set_task_cpu(idle, cpu); |
5877 | 5881 | ||
5878 | spin_lock_irqsave(&rq->lock, flags); | ||
5879 | rq->curr = rq->idle = idle; | 5882 | rq->curr = rq->idle = idle; |
5880 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | 5883 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) |
5881 | idle->oncpu = 1; | 5884 | idle->oncpu = 1; |
@@ -7786,13 +7789,14 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
7786 | * | 7789 | * |
7787 | * The passed in 'doms_new' should be kmalloc'd. This routine takes | 7790 | * The passed in 'doms_new' should be kmalloc'd. This routine takes |
7788 | * ownership of it and will kfree it when done with it. If the caller | 7791 | * ownership of it and will kfree it when done with it. If the caller |
7789 | * failed the kmalloc call, then it can pass in doms_new == NULL, | 7792 | * failed the kmalloc call, then it can pass in doms_new == NULL && |
7790 | * and partition_sched_domains() will fallback to the single partition | 7793 | * ndoms_new == 1, and partition_sched_domains() will fallback to |
7791 | * 'fallback_doms', it also forces the domains to be rebuilt. | 7794 | * the single partition 'fallback_doms', it also forces the domains |
7795 | * to be rebuilt. | ||
7792 | * | 7796 | * |
7793 | * If doms_new==NULL it will be replaced with cpu_online_map. | 7797 | * If doms_new == NULL it will be replaced with cpu_online_map. |
7794 | * ndoms_new==0 is a special case for destroying existing domains. | 7798 | * ndoms_new == 0 is a special case for destroying existing domains, |
7795 | * It will not create the default domain. | 7799 | * and it will not create the default domain. |
7796 | * | 7800 | * |
7797 | * Call with hotplug lock held | 7801 | * Call with hotplug lock held |
7798 | */ | 7802 | */ |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 48ecc51e7701..26ed8e3d1c15 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -423,10 +423,11 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) | |||
423 | #undef __P | 423 | #undef __P |
424 | 424 | ||
425 | { | 425 | { |
426 | unsigned int this_cpu = raw_smp_processor_id(); | ||
426 | u64 t0, t1; | 427 | u64 t0, t1; |
427 | 428 | ||
428 | t0 = sched_clock(); | 429 | t0 = cpu_clock(this_cpu); |
429 | t1 = sched_clock(); | 430 | t1 = cpu_clock(this_cpu); |
430 | SEQ_printf(m, "%-35s:%21Ld\n", | 431 | SEQ_printf(m, "%-35s:%21Ld\n", |
431 | "clock-delta", (long long)(t1-t0)); | 432 | "clock-delta", (long long)(t1-t0)); |
432 | } | 433 | } |
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index ee71bec1da66..7dbf72a2b02c 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h | |||
@@ -298,9 +298,11 @@ static inline void account_group_user_time(struct task_struct *tsk, | |||
298 | { | 298 | { |
299 | struct signal_struct *sig; | 299 | struct signal_struct *sig; |
300 | 300 | ||
301 | sig = tsk->signal; | 301 | /* tsk == current, ensure it is safe to use ->signal */ |
302 | if (unlikely(!sig)) | 302 | if (unlikely(tsk->exit_state)) |
303 | return; | 303 | return; |
304 | |||
305 | sig = tsk->signal; | ||
304 | if (sig->cputime.totals) { | 306 | if (sig->cputime.totals) { |
305 | struct task_cputime *times; | 307 | struct task_cputime *times; |
306 | 308 | ||
@@ -325,9 +327,11 @@ static inline void account_group_system_time(struct task_struct *tsk, | |||
325 | { | 327 | { |
326 | struct signal_struct *sig; | 328 | struct signal_struct *sig; |
327 | 329 | ||
328 | sig = tsk->signal; | 330 | /* tsk == current, ensure it is safe to use ->signal */ |
329 | if (unlikely(!sig)) | 331 | if (unlikely(tsk->exit_state)) |
330 | return; | 332 | return; |
333 | |||
334 | sig = tsk->signal; | ||
331 | if (sig->cputime.totals) { | 335 | if (sig->cputime.totals) { |
332 | struct task_cputime *times; | 336 | struct task_cputime *times; |
333 | 337 | ||
@@ -353,8 +357,11 @@ static inline void account_group_exec_runtime(struct task_struct *tsk, | |||
353 | struct signal_struct *sig; | 357 | struct signal_struct *sig; |
354 | 358 | ||
355 | sig = tsk->signal; | 359 | sig = tsk->signal; |
360 | /* see __exit_signal()->task_rq_unlock_wait() */ | ||
361 | barrier(); | ||
356 | if (unlikely(!sig)) | 362 | if (unlikely(!sig)) |
357 | return; | 363 | return; |
364 | |||
358 | if (sig->cputime.totals) { | 365 | if (sig->cputime.totals) { |
359 | struct task_cputime *times; | 366 | struct task_cputime *times; |
360 | 367 | ||
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 9bc4c00872c9..24e8ceacc388 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
@@ -112,7 +112,7 @@ static int chill(void *unused) | |||
112 | int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) | 112 | int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) |
113 | { | 113 | { |
114 | struct work_struct *sm_work; | 114 | struct work_struct *sm_work; |
115 | int i; | 115 | int i, ret; |
116 | 116 | ||
117 | /* Set up initial state. */ | 117 | /* Set up initial state. */ |
118 | mutex_lock(&lock); | 118 | mutex_lock(&lock); |
@@ -137,8 +137,9 @@ int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) | |||
137 | /* This will release the thread on our CPU. */ | 137 | /* This will release the thread on our CPU. */ |
138 | put_cpu(); | 138 | put_cpu(); |
139 | flush_workqueue(stop_machine_wq); | 139 | flush_workqueue(stop_machine_wq); |
140 | ret = active.fnret; | ||
140 | mutex_unlock(&lock); | 141 | mutex_unlock(&lock); |
141 | return active.fnret; | 142 | return ret; |
142 | } | 143 | } |
143 | 144 | ||
144 | int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) | 145 | int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) |