diff options
| author | Ingo Molnar <mingo@elte.hu> | 2008-11-18 10:48:49 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-11-18 10:48:49 -0500 |
| commit | 73f56c0d35e6427081a4eabd620d8b8d8a35bd09 (patch) | |
| tree | 9fe1a2577baea03f3a6ec4a5e79f3ff26c4ee0ae /kernel | |
| parent | 0af40a4b1050c050e62eb1dc30b82d5ab22bf221 (diff) | |
| parent | 8501c45cc32c311ae755a2d5ac8c4a5f04908d42 (diff) | |
Merge branch 'iommu-fixes-2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu into x86/urgent
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/Makefile | 4 | ||||
| -rw-r--r-- | kernel/audit_tree.c | 91 | ||||
| -rw-r--r-- | kernel/auditfilter.c | 14 | ||||
| -rw-r--r-- | kernel/cgroup_freezer.c | 19 | ||||
| -rw-r--r-- | kernel/cpu.c | 3 | ||||
| -rw-r--r-- | kernel/exit.c | 14 | ||||
| -rw-r--r-- | kernel/fork.c | 11 | ||||
| -rw-r--r-- | kernel/hrtimer.c | 26 | ||||
| -rw-r--r-- | kernel/kprobes.c | 23 | ||||
| -rw-r--r-- | kernel/sched.c | 15 | ||||
| -rw-r--r-- | kernel/sched_debug.c | 41 | ||||
| -rw-r--r-- | kernel/sched_fair.c | 17 | ||||
| -rw-r--r-- | kernel/softirq.c | 7 | ||||
| -rw-r--r-- | kernel/stop_machine.c | 5 | ||||
| -rw-r--r-- | kernel/time/tick-sched.c | 4 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 19 | ||||
| -rw-r--r-- | kernel/workqueue.c | 45 |
18 files changed, 252 insertions, 108 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 9a3ec66a9d84..19fad003b19d 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
| @@ -11,8 +11,6 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ | |||
| 11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ | 11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ |
| 12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o | 12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o |
| 13 | 13 | ||
| 14 | CFLAGS_REMOVE_sched.o = -mno-spe | ||
| 15 | |||
| 16 | ifdef CONFIG_FUNCTION_TRACER | 14 | ifdef CONFIG_FUNCTION_TRACER |
| 17 | # Do not trace debug files and internal ftrace files | 15 | # Do not trace debug files and internal ftrace files |
| 18 | CFLAGS_REMOVE_lockdep.o = -pg | 16 | CFLAGS_REMOVE_lockdep.o = -pg |
| @@ -21,7 +19,7 @@ CFLAGS_REMOVE_mutex-debug.o = -pg | |||
| 21 | CFLAGS_REMOVE_rtmutex-debug.o = -pg | 19 | CFLAGS_REMOVE_rtmutex-debug.o = -pg |
| 22 | CFLAGS_REMOVE_cgroup-debug.o = -pg | 20 | CFLAGS_REMOVE_cgroup-debug.o = -pg |
| 23 | CFLAGS_REMOVE_sched_clock.o = -pg | 21 | CFLAGS_REMOVE_sched_clock.o = -pg |
| 24 | CFLAGS_REMOVE_sched.o = -mno-spe -pg | 22 | CFLAGS_REMOVE_sched.o = -pg |
| 25 | endif | 23 | endif |
| 26 | 24 | ||
| 27 | obj-$(CONFIG_FREEZER) += freezer.o | 25 | obj-$(CONFIG_FREEZER) += freezer.o |
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 8ba0e0d934f2..8b509441f49a 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c | |||
| @@ -24,6 +24,7 @@ struct audit_chunk { | |||
| 24 | struct list_head trees; /* with root here */ | 24 | struct list_head trees; /* with root here */ |
| 25 | int dead; | 25 | int dead; |
| 26 | int count; | 26 | int count; |
| 27 | atomic_long_t refs; | ||
| 27 | struct rcu_head head; | 28 | struct rcu_head head; |
| 28 | struct node { | 29 | struct node { |
| 29 | struct list_head list; | 30 | struct list_head list; |
| @@ -56,7 +57,8 @@ static LIST_HEAD(prune_list); | |||
| 56 | * tree is refcounted; one reference for "some rules on rules_list refer to | 57 | * tree is refcounted; one reference for "some rules on rules_list refer to |
| 57 | * it", one for each chunk with pointer to it. | 58 | * it", one for each chunk with pointer to it. |
| 58 | * | 59 | * |
| 59 | * chunk is refcounted by embedded inotify_watch. | 60 | * chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount |
| 61 | * of watch contributes 1 to .refs). | ||
| 60 | * | 62 | * |
| 61 | * node.index allows to get from node.list to containing chunk. | 63 | * node.index allows to get from node.list to containing chunk. |
| 62 | * MSB of that sucker is stolen to mark taggings that we might have to | 64 | * MSB of that sucker is stolen to mark taggings that we might have to |
| @@ -121,6 +123,7 @@ static struct audit_chunk *alloc_chunk(int count) | |||
| 121 | INIT_LIST_HEAD(&chunk->hash); | 123 | INIT_LIST_HEAD(&chunk->hash); |
| 122 | INIT_LIST_HEAD(&chunk->trees); | 124 | INIT_LIST_HEAD(&chunk->trees); |
| 123 | chunk->count = count; | 125 | chunk->count = count; |
| 126 | atomic_long_set(&chunk->refs, 1); | ||
| 124 | for (i = 0; i < count; i++) { | 127 | for (i = 0; i < count; i++) { |
| 125 | INIT_LIST_HEAD(&chunk->owners[i].list); | 128 | INIT_LIST_HEAD(&chunk->owners[i].list); |
| 126 | chunk->owners[i].index = i; | 129 | chunk->owners[i].index = i; |
| @@ -129,9 +132,8 @@ static struct audit_chunk *alloc_chunk(int count) | |||
| 129 | return chunk; | 132 | return chunk; |
| 130 | } | 133 | } |
| 131 | 134 | ||
| 132 | static void __free_chunk(struct rcu_head *rcu) | 135 | static void free_chunk(struct audit_chunk *chunk) |
| 133 | { | 136 | { |
| 134 | struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); | ||
| 135 | int i; | 137 | int i; |
| 136 | 138 | ||
| 137 | for (i = 0; i < chunk->count; i++) { | 139 | for (i = 0; i < chunk->count; i++) { |
| @@ -141,14 +143,16 @@ static void __free_chunk(struct rcu_head *rcu) | |||
| 141 | kfree(chunk); | 143 | kfree(chunk); |
| 142 | } | 144 | } |
| 143 | 145 | ||
| 144 | static inline void free_chunk(struct audit_chunk *chunk) | 146 | void audit_put_chunk(struct audit_chunk *chunk) |
| 145 | { | 147 | { |
| 146 | call_rcu(&chunk->head, __free_chunk); | 148 | if (atomic_long_dec_and_test(&chunk->refs)) |
| 149 | free_chunk(chunk); | ||
| 147 | } | 150 | } |
| 148 | 151 | ||
| 149 | void audit_put_chunk(struct audit_chunk *chunk) | 152 | static void __put_chunk(struct rcu_head *rcu) |
| 150 | { | 153 | { |
| 151 | put_inotify_watch(&chunk->watch); | 154 | struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); |
| 155 | audit_put_chunk(chunk); | ||
| 152 | } | 156 | } |
| 153 | 157 | ||
| 154 | enum {HASH_SIZE = 128}; | 158 | enum {HASH_SIZE = 128}; |
| @@ -176,7 +180,7 @@ struct audit_chunk *audit_tree_lookup(const struct inode *inode) | |||
| 176 | 180 | ||
| 177 | list_for_each_entry_rcu(p, list, hash) { | 181 | list_for_each_entry_rcu(p, list, hash) { |
| 178 | if (p->watch.inode == inode) { | 182 | if (p->watch.inode == inode) { |
| 179 | get_inotify_watch(&p->watch); | 183 | atomic_long_inc(&p->refs); |
| 180 | return p; | 184 | return p; |
| 181 | } | 185 | } |
| 182 | } | 186 | } |
| @@ -194,17 +198,49 @@ int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree) | |||
| 194 | 198 | ||
| 195 | /* tagging and untagging inodes with trees */ | 199 | /* tagging and untagging inodes with trees */ |
| 196 | 200 | ||
| 197 | static void untag_chunk(struct audit_chunk *chunk, struct node *p) | 201 | static struct audit_chunk *find_chunk(struct node *p) |
| 202 | { | ||
| 203 | int index = p->index & ~(1U<<31); | ||
| 204 | p -= index; | ||
| 205 | return container_of(p, struct audit_chunk, owners[0]); | ||
| 206 | } | ||
| 207 | |||
| 208 | static void untag_chunk(struct node *p) | ||
| 198 | { | 209 | { |
| 210 | struct audit_chunk *chunk = find_chunk(p); | ||
| 199 | struct audit_chunk *new; | 211 | struct audit_chunk *new; |
| 200 | struct audit_tree *owner; | 212 | struct audit_tree *owner; |
| 201 | int size = chunk->count - 1; | 213 | int size = chunk->count - 1; |
| 202 | int i, j; | 214 | int i, j; |
| 203 | 215 | ||
| 216 | if (!pin_inotify_watch(&chunk->watch)) { | ||
| 217 | /* | ||
| 218 | * Filesystem is shutting down; all watches are getting | ||
| 219 | * evicted, just take it off the node list for this | ||
| 220 | * tree and let the eviction logics take care of the | ||
| 221 | * rest. | ||
| 222 | */ | ||
| 223 | owner = p->owner; | ||
| 224 | if (owner->root == chunk) { | ||
| 225 | list_del_init(&owner->same_root); | ||
| 226 | owner->root = NULL; | ||
| 227 | } | ||
| 228 | list_del_init(&p->list); | ||
| 229 | p->owner = NULL; | ||
| 230 | put_tree(owner); | ||
| 231 | return; | ||
| 232 | } | ||
| 233 | |||
| 234 | spin_unlock(&hash_lock); | ||
| 235 | |||
| 236 | /* | ||
| 237 | * pin_inotify_watch() succeeded, so the watch won't go away | ||
| 238 | * from under us. | ||
| 239 | */ | ||
| 204 | mutex_lock(&chunk->watch.inode->inotify_mutex); | 240 | mutex_lock(&chunk->watch.inode->inotify_mutex); |
| 205 | if (chunk->dead) { | 241 | if (chunk->dead) { |
| 206 | mutex_unlock(&chunk->watch.inode->inotify_mutex); | 242 | mutex_unlock(&chunk->watch.inode->inotify_mutex); |
| 207 | return; | 243 | goto out; |
| 208 | } | 244 | } |
| 209 | 245 | ||
| 210 | owner = p->owner; | 246 | owner = p->owner; |
| @@ -221,7 +257,7 @@ static void untag_chunk(struct audit_chunk *chunk, struct node *p) | |||
| 221 | inotify_evict_watch(&chunk->watch); | 257 | inotify_evict_watch(&chunk->watch); |
| 222 | mutex_unlock(&chunk->watch.inode->inotify_mutex); | 258 | mutex_unlock(&chunk->watch.inode->inotify_mutex); |
| 223 | put_inotify_watch(&chunk->watch); | 259 | put_inotify_watch(&chunk->watch); |
| 224 | return; | 260 | goto out; |
| 225 | } | 261 | } |
| 226 | 262 | ||
| 227 | new = alloc_chunk(size); | 263 | new = alloc_chunk(size); |
| @@ -263,7 +299,7 @@ static void untag_chunk(struct audit_chunk *chunk, struct node *p) | |||
| 263 | inotify_evict_watch(&chunk->watch); | 299 | inotify_evict_watch(&chunk->watch); |
| 264 | mutex_unlock(&chunk->watch.inode->inotify_mutex); | 300 | mutex_unlock(&chunk->watch.inode->inotify_mutex); |
| 265 | put_inotify_watch(&chunk->watch); | 301 | put_inotify_watch(&chunk->watch); |
| 266 | return; | 302 | goto out; |
| 267 | 303 | ||
| 268 | Fallback: | 304 | Fallback: |
| 269 | // do the best we can | 305 | // do the best we can |
| @@ -277,6 +313,9 @@ Fallback: | |||
| 277 | put_tree(owner); | 313 | put_tree(owner); |
| 278 | spin_unlock(&hash_lock); | 314 | spin_unlock(&hash_lock); |
| 279 | mutex_unlock(&chunk->watch.inode->inotify_mutex); | 315 | mutex_unlock(&chunk->watch.inode->inotify_mutex); |
| 316 | out: | ||
| 317 | unpin_inotify_watch(&chunk->watch); | ||
| 318 | spin_lock(&hash_lock); | ||
| 280 | } | 319 | } |
| 281 | 320 | ||
| 282 | static int create_chunk(struct inode *inode, struct audit_tree *tree) | 321 | static int create_chunk(struct inode *inode, struct audit_tree *tree) |
| @@ -387,13 +426,6 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) | |||
| 387 | return 0; | 426 | return 0; |
| 388 | } | 427 | } |
| 389 | 428 | ||
| 390 | static struct audit_chunk *find_chunk(struct node *p) | ||
| 391 | { | ||
| 392 | int index = p->index & ~(1U<<31); | ||
| 393 | p -= index; | ||
| 394 | return container_of(p, struct audit_chunk, owners[0]); | ||
| 395 | } | ||
| 396 | |||
| 397 | static void kill_rules(struct audit_tree *tree) | 429 | static void kill_rules(struct audit_tree *tree) |
| 398 | { | 430 | { |
| 399 | struct audit_krule *rule, *next; | 431 | struct audit_krule *rule, *next; |
| @@ -431,17 +463,10 @@ static void prune_one(struct audit_tree *victim) | |||
| 431 | spin_lock(&hash_lock); | 463 | spin_lock(&hash_lock); |
| 432 | while (!list_empty(&victim->chunks)) { | 464 | while (!list_empty(&victim->chunks)) { |
| 433 | struct node *p; | 465 | struct node *p; |
| 434 | struct audit_chunk *chunk; | ||
| 435 | 466 | ||
| 436 | p = list_entry(victim->chunks.next, struct node, list); | 467 | p = list_entry(victim->chunks.next, struct node, list); |
| 437 | chunk = find_chunk(p); | ||
| 438 | get_inotify_watch(&chunk->watch); | ||
| 439 | spin_unlock(&hash_lock); | ||
| 440 | |||
| 441 | untag_chunk(chunk, p); | ||
| 442 | 468 | ||
| 443 | put_inotify_watch(&chunk->watch); | 469 | untag_chunk(p); |
| 444 | spin_lock(&hash_lock); | ||
| 445 | } | 470 | } |
| 446 | spin_unlock(&hash_lock); | 471 | spin_unlock(&hash_lock); |
| 447 | put_tree(victim); | 472 | put_tree(victim); |
| @@ -469,7 +494,6 @@ static void trim_marked(struct audit_tree *tree) | |||
| 469 | 494 | ||
| 470 | while (!list_empty(&tree->chunks)) { | 495 | while (!list_empty(&tree->chunks)) { |
| 471 | struct node *node; | 496 | struct node *node; |
| 472 | struct audit_chunk *chunk; | ||
| 473 | 497 | ||
| 474 | node = list_entry(tree->chunks.next, struct node, list); | 498 | node = list_entry(tree->chunks.next, struct node, list); |
| 475 | 499 | ||
| @@ -477,14 +501,7 @@ static void trim_marked(struct audit_tree *tree) | |||
| 477 | if (!(node->index & (1U<<31))) | 501 | if (!(node->index & (1U<<31))) |
| 478 | break; | 502 | break; |
| 479 | 503 | ||
| 480 | chunk = find_chunk(node); | 504 | untag_chunk(node); |
| 481 | get_inotify_watch(&chunk->watch); | ||
| 482 | spin_unlock(&hash_lock); | ||
| 483 | |||
| 484 | untag_chunk(chunk, node); | ||
| 485 | |||
| 486 | put_inotify_watch(&chunk->watch); | ||
| 487 | spin_lock(&hash_lock); | ||
| 488 | } | 505 | } |
| 489 | if (!tree->root && !tree->goner) { | 506 | if (!tree->root && !tree->goner) { |
| 490 | tree->goner = 1; | 507 | tree->goner = 1; |
| @@ -878,7 +895,7 @@ static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask, | |||
| 878 | static void destroy_watch(struct inotify_watch *watch) | 895 | static void destroy_watch(struct inotify_watch *watch) |
| 879 | { | 896 | { |
| 880 | struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch); | 897 | struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch); |
| 881 | free_chunk(chunk); | 898 | call_rcu(&chunk->head, __put_chunk); |
| 882 | } | 899 | } |
| 883 | 900 | ||
| 884 | static const struct inotify_operations rtree_inotify_ops = { | 901 | static const struct inotify_operations rtree_inotify_ops = { |
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index b7d354e2b0ef..9fd85a4640a0 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c | |||
| @@ -1094,8 +1094,8 @@ static void audit_inotify_unregister(struct list_head *in_list) | |||
| 1094 | list_for_each_entry_safe(p, n, in_list, ilist) { | 1094 | list_for_each_entry_safe(p, n, in_list, ilist) { |
| 1095 | list_del(&p->ilist); | 1095 | list_del(&p->ilist); |
| 1096 | inotify_rm_watch(audit_ih, &p->wdata); | 1096 | inotify_rm_watch(audit_ih, &p->wdata); |
| 1097 | /* the put matching the get in audit_do_del_rule() */ | 1097 | /* the unpin matching the pin in audit_do_del_rule() */ |
| 1098 | put_inotify_watch(&p->wdata); | 1098 | unpin_inotify_watch(&p->wdata); |
| 1099 | } | 1099 | } |
| 1100 | } | 1100 | } |
| 1101 | 1101 | ||
| @@ -1389,9 +1389,13 @@ static inline int audit_del_rule(struct audit_entry *entry, | |||
| 1389 | /* Put parent on the inotify un-registration | 1389 | /* Put parent on the inotify un-registration |
| 1390 | * list. Grab a reference before releasing | 1390 | * list. Grab a reference before releasing |
| 1391 | * audit_filter_mutex, to be released in | 1391 | * audit_filter_mutex, to be released in |
| 1392 | * audit_inotify_unregister(). */ | 1392 | * audit_inotify_unregister(). |
| 1393 | list_add(&parent->ilist, &inotify_list); | 1393 | * If filesystem is going away, just leave |
| 1394 | get_inotify_watch(&parent->wdata); | 1394 | * the sucker alone, eviction will take |
| 1395 | * care of it. | ||
| 1396 | */ | ||
| 1397 | if (pin_inotify_watch(&parent->wdata)) | ||
| 1398 | list_add(&parent->ilist, &inotify_list); | ||
| 1395 | } | 1399 | } |
| 1396 | } | 1400 | } |
| 1397 | } | 1401 | } |
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index 7fa476f01d05..fb249e2bcada 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c | |||
| @@ -184,9 +184,20 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task) | |||
| 184 | { | 184 | { |
| 185 | struct freezer *freezer; | 185 | struct freezer *freezer; |
| 186 | 186 | ||
| 187 | task_lock(task); | 187 | /* |
| 188 | * No lock is needed, since the task isn't on tasklist yet, | ||
| 189 | * so it can't be moved to another cgroup, which means the | ||
| 190 | * freezer won't be removed and will be valid during this | ||
| 191 | * function call. | ||
| 192 | */ | ||
| 188 | freezer = task_freezer(task); | 193 | freezer = task_freezer(task); |
| 189 | task_unlock(task); | 194 | |
| 195 | /* | ||
| 196 | * The root cgroup is non-freezable, so we can skip the | ||
| 197 | * following check. | ||
| 198 | */ | ||
| 199 | if (!freezer->css.cgroup->parent) | ||
| 200 | return; | ||
| 190 | 201 | ||
| 191 | spin_lock_irq(&freezer->lock); | 202 | spin_lock_irq(&freezer->lock); |
| 192 | BUG_ON(freezer->state == CGROUP_FROZEN); | 203 | BUG_ON(freezer->state == CGROUP_FROZEN); |
| @@ -331,7 +342,7 @@ static int freezer_write(struct cgroup *cgroup, | |||
| 331 | else if (strcmp(buffer, freezer_state_strs[CGROUP_FROZEN]) == 0) | 342 | else if (strcmp(buffer, freezer_state_strs[CGROUP_FROZEN]) == 0) |
| 332 | goal_state = CGROUP_FROZEN; | 343 | goal_state = CGROUP_FROZEN; |
| 333 | else | 344 | else |
| 334 | return -EIO; | 345 | return -EINVAL; |
| 335 | 346 | ||
| 336 | if (!cgroup_lock_live_group(cgroup)) | 347 | if (!cgroup_lock_live_group(cgroup)) |
| 337 | return -ENODEV; | 348 | return -ENODEV; |
| @@ -350,6 +361,8 @@ static struct cftype files[] = { | |||
| 350 | 361 | ||
| 351 | static int freezer_populate(struct cgroup_subsys *ss, struct cgroup *cgroup) | 362 | static int freezer_populate(struct cgroup_subsys *ss, struct cgroup *cgroup) |
| 352 | { | 363 | { |
| 364 | if (!cgroup->parent) | ||
| 365 | return 0; | ||
| 353 | return cgroup_add_files(cgroup, ss, files, ARRAY_SIZE(files)); | 366 | return cgroup_add_files(cgroup, ss, files, ARRAY_SIZE(files)); |
| 354 | } | 367 | } |
| 355 | 368 | ||
diff --git a/kernel/cpu.c b/kernel/cpu.c index 86d49045daed..5a732c5ef08b 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -499,3 +499,6 @@ const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { | |||
| 499 | #endif | 499 | #endif |
| 500 | }; | 500 | }; |
| 501 | EXPORT_SYMBOL_GPL(cpu_bit_bitmap); | 501 | EXPORT_SYMBOL_GPL(cpu_bit_bitmap); |
| 502 | |||
| 503 | const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; | ||
| 504 | EXPORT_SYMBOL(cpu_all_bits); | ||
diff --git a/kernel/exit.c b/kernel/exit.c index 80137a5d9467..2d8be7ebb0f7 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -40,7 +40,6 @@ | |||
| 40 | #include <linux/cn_proc.h> | 40 | #include <linux/cn_proc.h> |
| 41 | #include <linux/mutex.h> | 41 | #include <linux/mutex.h> |
| 42 | #include <linux/futex.h> | 42 | #include <linux/futex.h> |
| 43 | #include <linux/compat.h> | ||
| 44 | #include <linux/pipe_fs_i.h> | 43 | #include <linux/pipe_fs_i.h> |
| 45 | #include <linux/audit.h> /* for audit_free() */ | 44 | #include <linux/audit.h> /* for audit_free() */ |
| 46 | #include <linux/resource.h> | 45 | #include <linux/resource.h> |
| @@ -141,6 +140,11 @@ static void __exit_signal(struct task_struct *tsk) | |||
| 141 | if (sig) { | 140 | if (sig) { |
| 142 | flush_sigqueue(&sig->shared_pending); | 141 | flush_sigqueue(&sig->shared_pending); |
| 143 | taskstats_tgid_free(sig); | 142 | taskstats_tgid_free(sig); |
| 143 | /* | ||
| 144 | * Make sure ->signal can't go away under rq->lock, | ||
| 145 | * see account_group_exec_runtime(). | ||
| 146 | */ | ||
| 147 | task_rq_unlock_wait(tsk); | ||
| 144 | __cleanup_signal(sig); | 148 | __cleanup_signal(sig); |
| 145 | } | 149 | } |
| 146 | } | 150 | } |
| @@ -1054,14 +1058,6 @@ NORET_TYPE void do_exit(long code) | |||
| 1054 | exit_itimers(tsk->signal); | 1058 | exit_itimers(tsk->signal); |
| 1055 | } | 1059 | } |
| 1056 | acct_collect(code, group_dead); | 1060 | acct_collect(code, group_dead); |
| 1057 | #ifdef CONFIG_FUTEX | ||
| 1058 | if (unlikely(tsk->robust_list)) | ||
| 1059 | exit_robust_list(tsk); | ||
| 1060 | #ifdef CONFIG_COMPAT | ||
| 1061 | if (unlikely(tsk->compat_robust_list)) | ||
| 1062 | compat_exit_robust_list(tsk); | ||
| 1063 | #endif | ||
| 1064 | #endif | ||
| 1065 | if (group_dead) | 1061 | if (group_dead) |
| 1066 | tty_audit_exit(); | 1062 | tty_audit_exit(); |
| 1067 | if (unlikely(tsk->audit_context)) | 1063 | if (unlikely(tsk->audit_context)) |
diff --git a/kernel/fork.c b/kernel/fork.c index f6083561dfe0..2a372a0e206f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -40,6 +40,7 @@ | |||
| 40 | #include <linux/jiffies.h> | 40 | #include <linux/jiffies.h> |
| 41 | #include <linux/tracehook.h> | 41 | #include <linux/tracehook.h> |
| 42 | #include <linux/futex.h> | 42 | #include <linux/futex.h> |
| 43 | #include <linux/compat.h> | ||
| 43 | #include <linux/task_io_accounting_ops.h> | 44 | #include <linux/task_io_accounting_ops.h> |
| 44 | #include <linux/rcupdate.h> | 45 | #include <linux/rcupdate.h> |
| 45 | #include <linux/ptrace.h> | 46 | #include <linux/ptrace.h> |
| @@ -519,6 +520,16 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm) | |||
| 519 | { | 520 | { |
| 520 | struct completion *vfork_done = tsk->vfork_done; | 521 | struct completion *vfork_done = tsk->vfork_done; |
| 521 | 522 | ||
| 523 | /* Get rid of any futexes when releasing the mm */ | ||
| 524 | #ifdef CONFIG_FUTEX | ||
| 525 | if (unlikely(tsk->robust_list)) | ||
| 526 | exit_robust_list(tsk); | ||
| 527 | #ifdef CONFIG_COMPAT | ||
| 528 | if (unlikely(tsk->compat_robust_list)) | ||
| 529 | compat_exit_robust_list(tsk); | ||
| 530 | #endif | ||
| 531 | #endif | ||
| 532 | |||
| 522 | /* Get rid of any cached register state */ | 533 | /* Get rid of any cached register state */ |
| 523 | deactivate_mm(tsk, mm); | 534 | deactivate_mm(tsk, mm); |
| 524 | 535 | ||
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 2b465dfde426..47e63349d1b2 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -664,14 +664,6 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
| 664 | 664 | ||
| 665 | /* Timer is expired, act upon the callback mode */ | 665 | /* Timer is expired, act upon the callback mode */ |
| 666 | switch(timer->cb_mode) { | 666 | switch(timer->cb_mode) { |
| 667 | case HRTIMER_CB_IRQSAFE_NO_RESTART: | ||
| 668 | debug_hrtimer_deactivate(timer); | ||
| 669 | /* | ||
| 670 | * We can call the callback from here. No restart | ||
| 671 | * happens, so no danger of recursion | ||
| 672 | */ | ||
| 673 | BUG_ON(timer->function(timer) != HRTIMER_NORESTART); | ||
| 674 | return 1; | ||
| 675 | case HRTIMER_CB_IRQSAFE_PERCPU: | 667 | case HRTIMER_CB_IRQSAFE_PERCPU: |
| 676 | case HRTIMER_CB_IRQSAFE_UNLOCKED: | 668 | case HRTIMER_CB_IRQSAFE_UNLOCKED: |
| 677 | /* | 669 | /* |
| @@ -683,7 +675,6 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
| 683 | */ | 675 | */ |
| 684 | debug_hrtimer_deactivate(timer); | 676 | debug_hrtimer_deactivate(timer); |
| 685 | return 1; | 677 | return 1; |
| 686 | case HRTIMER_CB_IRQSAFE: | ||
| 687 | case HRTIMER_CB_SOFTIRQ: | 678 | case HRTIMER_CB_SOFTIRQ: |
| 688 | /* | 679 | /* |
| 689 | * Move everything else into the softirq pending list ! | 680 | * Move everything else into the softirq pending list ! |
| @@ -1209,6 +1200,7 @@ static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base) | |||
| 1209 | enum hrtimer_restart (*fn)(struct hrtimer *); | 1200 | enum hrtimer_restart (*fn)(struct hrtimer *); |
| 1210 | struct hrtimer *timer; | 1201 | struct hrtimer *timer; |
| 1211 | int restart; | 1202 | int restart; |
| 1203 | int emulate_hardirq_ctx = 0; | ||
| 1212 | 1204 | ||
| 1213 | timer = list_entry(cpu_base->cb_pending.next, | 1205 | timer = list_entry(cpu_base->cb_pending.next, |
| 1214 | struct hrtimer, cb_entry); | 1206 | struct hrtimer, cb_entry); |
| @@ -1217,10 +1209,24 @@ static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base) | |||
| 1217 | timer_stats_account_hrtimer(timer); | 1209 | timer_stats_account_hrtimer(timer); |
| 1218 | 1210 | ||
| 1219 | fn = timer->function; | 1211 | fn = timer->function; |
| 1212 | /* | ||
| 1213 | * A timer might have been added to the cb_pending list | ||
| 1214 | * when it was migrated during a cpu-offline operation. | ||
| 1215 | * Emulate hardirq context for such timers. | ||
| 1216 | */ | ||
| 1217 | if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU || | ||
| 1218 | timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) | ||
| 1219 | emulate_hardirq_ctx = 1; | ||
| 1220 | |||
| 1220 | __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0); | 1221 | __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0); |
| 1221 | spin_unlock_irq(&cpu_base->lock); | 1222 | spin_unlock_irq(&cpu_base->lock); |
| 1222 | 1223 | ||
| 1223 | restart = fn(timer); | 1224 | if (unlikely(emulate_hardirq_ctx)) { |
| 1225 | local_irq_disable(); | ||
| 1226 | restart = fn(timer); | ||
| 1227 | local_irq_enable(); | ||
| 1228 | } else | ||
| 1229 | restart = fn(timer); | ||
| 1224 | 1230 | ||
| 1225 | spin_lock_irq(&cpu_base->lock); | 1231 | spin_lock_irq(&cpu_base->lock); |
| 1226 | 1232 | ||
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 8b57a2597f21..9f8a3f25259a 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
| @@ -72,7 +72,7 @@ static bool kprobe_enabled; | |||
| 72 | DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ | 72 | DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ |
| 73 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; | 73 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
| 74 | static struct { | 74 | static struct { |
| 75 | spinlock_t lock ____cacheline_aligned; | 75 | spinlock_t lock ____cacheline_aligned_in_smp; |
| 76 | } kretprobe_table_locks[KPROBE_TABLE_SIZE]; | 76 | } kretprobe_table_locks[KPROBE_TABLE_SIZE]; |
| 77 | 77 | ||
| 78 | static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) | 78 | static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) |
| @@ -613,30 +613,37 @@ static int __kprobes __register_kprobe(struct kprobe *p, | |||
| 613 | return -EINVAL; | 613 | return -EINVAL; |
| 614 | p->addr = addr; | 614 | p->addr = addr; |
| 615 | 615 | ||
| 616 | if (!kernel_text_address((unsigned long) p->addr) || | 616 | preempt_disable(); |
| 617 | in_kprobes_functions((unsigned long) p->addr)) | 617 | if (!__kernel_text_address((unsigned long) p->addr) || |
| 618 | in_kprobes_functions((unsigned long) p->addr)) { | ||
| 619 | preempt_enable(); | ||
| 618 | return -EINVAL; | 620 | return -EINVAL; |
| 621 | } | ||
| 619 | 622 | ||
| 620 | p->mod_refcounted = 0; | 623 | p->mod_refcounted = 0; |
| 621 | 624 | ||
| 622 | /* | 625 | /* |
| 623 | * Check if are we probing a module. | 626 | * Check if are we probing a module. |
| 624 | */ | 627 | */ |
| 625 | probed_mod = module_text_address((unsigned long) p->addr); | 628 | probed_mod = __module_text_address((unsigned long) p->addr); |
| 626 | if (probed_mod) { | 629 | if (probed_mod) { |
| 627 | struct module *calling_mod = module_text_address(called_from); | 630 | struct module *calling_mod; |
| 631 | calling_mod = __module_text_address(called_from); | ||
| 628 | /* | 632 | /* |
| 629 | * We must allow modules to probe themself and in this case | 633 | * We must allow modules to probe themself and in this case |
| 630 | * avoid incrementing the module refcount, so as to allow | 634 | * avoid incrementing the module refcount, so as to allow |
| 631 | * unloading of self probing modules. | 635 | * unloading of self probing modules. |
| 632 | */ | 636 | */ |
| 633 | if (calling_mod && calling_mod != probed_mod) { | 637 | if (calling_mod && calling_mod != probed_mod) { |
| 634 | if (unlikely(!try_module_get(probed_mod))) | 638 | if (unlikely(!try_module_get(probed_mod))) { |
| 639 | preempt_enable(); | ||
| 635 | return -EINVAL; | 640 | return -EINVAL; |
| 641 | } | ||
| 636 | p->mod_refcounted = 1; | 642 | p->mod_refcounted = 1; |
| 637 | } else | 643 | } else |
| 638 | probed_mod = NULL; | 644 | probed_mod = NULL; |
| 639 | } | 645 | } |
| 646 | preempt_enable(); | ||
| 640 | 647 | ||
| 641 | p->nmissed = 0; | 648 | p->nmissed = 0; |
| 642 | INIT_LIST_HEAD(&p->list); | 649 | INIT_LIST_HEAD(&p->list); |
| @@ -718,6 +725,10 @@ static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) | |||
| 718 | struct kprobe *old_p; | 725 | struct kprobe *old_p; |
| 719 | 726 | ||
| 720 | if (p->mod_refcounted) { | 727 | if (p->mod_refcounted) { |
| 728 | /* | ||
| 729 | * Since we've already incremented refcount, | ||
| 730 | * we don't need to disable preemption. | ||
| 731 | */ | ||
| 721 | mod = module_text_address((unsigned long)p->addr); | 732 | mod = module_text_address((unsigned long)p->addr); |
| 722 | if (mod) | 733 | if (mod) |
| 723 | module_put(mod); | 734 | module_put(mod); |
diff --git a/kernel/sched.c b/kernel/sched.c index 57c933ffbee1..c94baf2969e7 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -399,7 +399,7 @@ struct cfs_rq { | |||
| 399 | */ | 399 | */ |
| 400 | struct sched_entity *curr, *next, *last; | 400 | struct sched_entity *curr, *next, *last; |
| 401 | 401 | ||
| 402 | unsigned long nr_spread_over; | 402 | unsigned int nr_spread_over; |
| 403 | 403 | ||
| 404 | #ifdef CONFIG_FAIR_GROUP_SCHED | 404 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 405 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ | 405 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ |
| @@ -969,6 +969,14 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | |||
| 969 | } | 969 | } |
| 970 | } | 970 | } |
| 971 | 971 | ||
| 972 | void task_rq_unlock_wait(struct task_struct *p) | ||
| 973 | { | ||
| 974 | struct rq *rq = task_rq(p); | ||
| 975 | |||
| 976 | smp_mb(); /* spin-unlock-wait is not a full memory barrier */ | ||
| 977 | spin_unlock_wait(&rq->lock); | ||
| 978 | } | ||
| 979 | |||
| 972 | static void __task_rq_unlock(struct rq *rq) | 980 | static void __task_rq_unlock(struct rq *rq) |
| 973 | __releases(rq->lock) | 981 | __releases(rq->lock) |
| 974 | { | 982 | { |
| @@ -1448,6 +1456,8 @@ static unsigned long cpu_avg_load_per_task(int cpu) | |||
| 1448 | 1456 | ||
| 1449 | if (rq->nr_running) | 1457 | if (rq->nr_running) |
| 1450 | rq->avg_load_per_task = rq->load.weight / rq->nr_running; | 1458 | rq->avg_load_per_task = rq->load.weight / rq->nr_running; |
| 1459 | else | ||
| 1460 | rq->avg_load_per_task = 0; | ||
| 1451 | 1461 | ||
| 1452 | return rq->avg_load_per_task; | 1462 | return rq->avg_load_per_task; |
| 1453 | } | 1463 | } |
| @@ -5860,6 +5870,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
| 5860 | struct rq *rq = cpu_rq(cpu); | 5870 | struct rq *rq = cpu_rq(cpu); |
| 5861 | unsigned long flags; | 5871 | unsigned long flags; |
| 5862 | 5872 | ||
| 5873 | spin_lock_irqsave(&rq->lock, flags); | ||
| 5874 | |||
| 5863 | __sched_fork(idle); | 5875 | __sched_fork(idle); |
| 5864 | idle->se.exec_start = sched_clock(); | 5876 | idle->se.exec_start = sched_clock(); |
| 5865 | 5877 | ||
| @@ -5867,7 +5879,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
| 5867 | idle->cpus_allowed = cpumask_of_cpu(cpu); | 5879 | idle->cpus_allowed = cpumask_of_cpu(cpu); |
| 5868 | __set_task_cpu(idle, cpu); | 5880 | __set_task_cpu(idle, cpu); |
| 5869 | 5881 | ||
| 5870 | spin_lock_irqsave(&rq->lock, flags); | ||
| 5871 | rq->curr = rq->idle = idle; | 5882 | rq->curr = rq->idle = idle; |
| 5872 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | 5883 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) |
| 5873 | idle->oncpu = 1; | 5884 | idle->oncpu = 1; |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 5ae17762ec32..48ecc51e7701 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
| @@ -144,7 +144,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
| 144 | last = __pick_last_entity(cfs_rq); | 144 | last = __pick_last_entity(cfs_rq); |
| 145 | if (last) | 145 | if (last) |
| 146 | max_vruntime = last->vruntime; | 146 | max_vruntime = last->vruntime; |
| 147 | min_vruntime = rq->cfs.min_vruntime; | 147 | min_vruntime = cfs_rq->min_vruntime; |
| 148 | rq0_min_vruntime = per_cpu(runqueues, 0).cfs.min_vruntime; | 148 | rq0_min_vruntime = per_cpu(runqueues, 0).cfs.min_vruntime; |
| 149 | spin_unlock_irqrestore(&rq->lock, flags); | 149 | spin_unlock_irqrestore(&rq->lock, flags); |
| 150 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", | 150 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", |
| @@ -161,26 +161,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
| 161 | SPLIT_NS(spread0)); | 161 | SPLIT_NS(spread0)); |
| 162 | SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running); | 162 | SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running); |
| 163 | SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); | 163 | SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); |
| 164 | #ifdef CONFIG_SCHEDSTATS | ||
| 165 | #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n); | ||
| 166 | |||
| 167 | P(yld_exp_empty); | ||
| 168 | P(yld_act_empty); | ||
| 169 | P(yld_both_empty); | ||
| 170 | P(yld_count); | ||
| 171 | 164 | ||
| 172 | P(sched_switch); | 165 | SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over", |
| 173 | P(sched_count); | ||
| 174 | P(sched_goidle); | ||
| 175 | |||
| 176 | P(ttwu_count); | ||
| 177 | P(ttwu_local); | ||
| 178 | |||
| 179 | P(bkl_count); | ||
| 180 | |||
| 181 | #undef P | ||
| 182 | #endif | ||
| 183 | SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over", | ||
| 184 | cfs_rq->nr_spread_over); | 166 | cfs_rq->nr_spread_over); |
| 185 | #ifdef CONFIG_FAIR_GROUP_SCHED | 167 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 186 | #ifdef CONFIG_SMP | 168 | #ifdef CONFIG_SMP |
| @@ -260,6 +242,25 @@ static void print_cpu(struct seq_file *m, int cpu) | |||
| 260 | #undef P | 242 | #undef P |
| 261 | #undef PN | 243 | #undef PN |
| 262 | 244 | ||
| 245 | #ifdef CONFIG_SCHEDSTATS | ||
| 246 | #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n); | ||
| 247 | |||
| 248 | P(yld_exp_empty); | ||
| 249 | P(yld_act_empty); | ||
| 250 | P(yld_both_empty); | ||
| 251 | P(yld_count); | ||
| 252 | |||
| 253 | P(sched_switch); | ||
| 254 | P(sched_count); | ||
| 255 | P(sched_goidle); | ||
| 256 | |||
| 257 | P(ttwu_count); | ||
| 258 | P(ttwu_local); | ||
| 259 | |||
| 260 | P(bkl_count); | ||
| 261 | |||
| 262 | #undef P | ||
| 263 | #endif | ||
| 263 | print_cfs_stats(m, cpu); | 264 | print_cfs_stats(m, cpu); |
| 264 | print_rt_stats(m, cpu); | 265 | print_rt_stats(m, cpu); |
| 265 | 266 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 51aa3e102acb..98345e45b059 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -716,6 +716,15 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) | |||
| 716 | __enqueue_entity(cfs_rq, se); | 716 | __enqueue_entity(cfs_rq, se); |
| 717 | } | 717 | } |
| 718 | 718 | ||
| 719 | static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
| 720 | { | ||
| 721 | if (cfs_rq->last == se) | ||
| 722 | cfs_rq->last = NULL; | ||
| 723 | |||
| 724 | if (cfs_rq->next == se) | ||
| 725 | cfs_rq->next = NULL; | ||
| 726 | } | ||
| 727 | |||
| 719 | static void | 728 | static void |
| 720 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | 729 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) |
| 721 | { | 730 | { |
| @@ -738,11 +747,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | |||
| 738 | #endif | 747 | #endif |
| 739 | } | 748 | } |
| 740 | 749 | ||
| 741 | if (cfs_rq->last == se) | 750 | clear_buddies(cfs_rq, se); |
| 742 | cfs_rq->last = NULL; | ||
| 743 | |||
| 744 | if (cfs_rq->next == se) | ||
| 745 | cfs_rq->next = NULL; | ||
| 746 | 751 | ||
| 747 | if (se != cfs_rq->curr) | 752 | if (se != cfs_rq->curr) |
| 748 | __dequeue_entity(cfs_rq, se); | 753 | __dequeue_entity(cfs_rq, se); |
| @@ -977,6 +982,8 @@ static void yield_task_fair(struct rq *rq) | |||
| 977 | if (unlikely(cfs_rq->nr_running == 1)) | 982 | if (unlikely(cfs_rq->nr_running == 1)) |
| 978 | return; | 983 | return; |
| 979 | 984 | ||
| 985 | clear_buddies(cfs_rq, se); | ||
| 986 | |||
| 980 | if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) { | 987 | if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) { |
| 981 | update_rq_clock(rq); | 988 | update_rq_clock(rq); |
| 982 | /* | 989 | /* |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 7110daeb9a90..e7c69a720d69 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -269,10 +269,11 @@ void irq_enter(void) | |||
| 269 | { | 269 | { |
| 270 | int cpu = smp_processor_id(); | 270 | int cpu = smp_processor_id(); |
| 271 | 271 | ||
| 272 | if (idle_cpu(cpu) && !in_interrupt()) | 272 | if (idle_cpu(cpu) && !in_interrupt()) { |
| 273 | __irq_enter(); | ||
| 273 | tick_check_idle(cpu); | 274 | tick_check_idle(cpu); |
| 274 | 275 | } else | |
| 275 | __irq_enter(); | 276 | __irq_enter(); |
| 276 | } | 277 | } |
| 277 | 278 | ||
| 278 | #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED | 279 | #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 9bc4c00872c9..24e8ceacc388 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
| @@ -112,7 +112,7 @@ static int chill(void *unused) | |||
| 112 | int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) | 112 | int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) |
| 113 | { | 113 | { |
| 114 | struct work_struct *sm_work; | 114 | struct work_struct *sm_work; |
| 115 | int i; | 115 | int i, ret; |
| 116 | 116 | ||
| 117 | /* Set up initial state. */ | 117 | /* Set up initial state. */ |
| 118 | mutex_lock(&lock); | 118 | mutex_lock(&lock); |
| @@ -137,8 +137,9 @@ int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) | |||
| 137 | /* This will release the thread on our CPU. */ | 137 | /* This will release the thread on our CPU. */ |
| 138 | put_cpu(); | 138 | put_cpu(); |
| 139 | flush_workqueue(stop_machine_wq); | 139 | flush_workqueue(stop_machine_wq); |
| 140 | ret = active.fnret; | ||
| 140 | mutex_unlock(&lock); | 141 | mutex_unlock(&lock); |
| 141 | return active.fnret; | 142 | return ret; |
| 142 | } | 143 | } |
| 143 | 144 | ||
| 144 | int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) | 145 | int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 5bbb1044f847..342fc9ccab46 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -568,6 +568,9 @@ static void tick_nohz_switch_to_nohz(void) | |||
| 568 | */ | 568 | */ |
| 569 | static void tick_nohz_kick_tick(int cpu) | 569 | static void tick_nohz_kick_tick(int cpu) |
| 570 | { | 570 | { |
| 571 | #if 0 | ||
| 572 | /* Switch back to 2.6.27 behaviour */ | ||
| 573 | |||
| 571 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 574 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 572 | ktime_t delta, now; | 575 | ktime_t delta, now; |
| 573 | 576 | ||
| @@ -584,6 +587,7 @@ static void tick_nohz_kick_tick(int cpu) | |||
| 584 | return; | 587 | return; |
| 585 | 588 | ||
| 586 | tick_nohz_restart(ts, now); | 589 | tick_nohz_restart(ts, now); |
| 590 | #endif | ||
| 587 | } | 591 | } |
| 588 | 592 | ||
| 589 | #else | 593 | #else |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 3f3380638646..2f76193c3489 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -1060,7 +1060,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 1060 | 1060 | ||
| 1061 | /* Did the write stamp get updated already? */ | 1061 | /* Did the write stamp get updated already? */ |
| 1062 | if (unlikely(ts < cpu_buffer->write_stamp)) | 1062 | if (unlikely(ts < cpu_buffer->write_stamp)) |
| 1063 | goto again; | 1063 | delta = 0; |
| 1064 | 1064 | ||
| 1065 | if (test_time_stamp(delta)) { | 1065 | if (test_time_stamp(delta)) { |
| 1066 | 1066 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9f3b478f9171..697eda36b86a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -1755,7 +1755,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter) | |||
| 1755 | return TRACE_TYPE_HANDLED; | 1755 | return TRACE_TYPE_HANDLED; |
| 1756 | 1756 | ||
| 1757 | SEQ_PUT_FIELD_RET(s, entry->pid); | 1757 | SEQ_PUT_FIELD_RET(s, entry->pid); |
| 1758 | SEQ_PUT_FIELD_RET(s, iter->cpu); | 1758 | SEQ_PUT_FIELD_RET(s, entry->cpu); |
| 1759 | SEQ_PUT_FIELD_RET(s, iter->ts); | 1759 | SEQ_PUT_FIELD_RET(s, iter->ts); |
| 1760 | 1760 | ||
| 1761 | switch (entry->type) { | 1761 | switch (entry->type) { |
| @@ -2676,7 +2676,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
| 2676 | { | 2676 | { |
| 2677 | unsigned long val; | 2677 | unsigned long val; |
| 2678 | char buf[64]; | 2678 | char buf[64]; |
| 2679 | int ret; | 2679 | int ret, cpu; |
| 2680 | struct trace_array *tr = filp->private_data; | 2680 | struct trace_array *tr = filp->private_data; |
| 2681 | 2681 | ||
| 2682 | if (cnt >= sizeof(buf)) | 2682 | if (cnt >= sizeof(buf)) |
| @@ -2704,6 +2704,14 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
| 2704 | goto out; | 2704 | goto out; |
| 2705 | } | 2705 | } |
| 2706 | 2706 | ||
| 2707 | /* disable all cpu buffers */ | ||
| 2708 | for_each_tracing_cpu(cpu) { | ||
| 2709 | if (global_trace.data[cpu]) | ||
| 2710 | atomic_inc(&global_trace.data[cpu]->disabled); | ||
| 2711 | if (max_tr.data[cpu]) | ||
| 2712 | atomic_inc(&max_tr.data[cpu]->disabled); | ||
| 2713 | } | ||
| 2714 | |||
| 2707 | if (val != global_trace.entries) { | 2715 | if (val != global_trace.entries) { |
| 2708 | ret = ring_buffer_resize(global_trace.buffer, val); | 2716 | ret = ring_buffer_resize(global_trace.buffer, val); |
| 2709 | if (ret < 0) { | 2717 | if (ret < 0) { |
| @@ -2735,6 +2743,13 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
| 2735 | if (tracing_disabled) | 2743 | if (tracing_disabled) |
| 2736 | cnt = -ENOMEM; | 2744 | cnt = -ENOMEM; |
| 2737 | out: | 2745 | out: |
| 2746 | for_each_tracing_cpu(cpu) { | ||
| 2747 | if (global_trace.data[cpu]) | ||
| 2748 | atomic_dec(&global_trace.data[cpu]->disabled); | ||
| 2749 | if (max_tr.data[cpu]) | ||
| 2750 | atomic_dec(&max_tr.data[cpu]->disabled); | ||
| 2751 | } | ||
| 2752 | |||
| 2738 | max_tr.entries = global_trace.entries; | 2753 | max_tr.entries = global_trace.entries; |
| 2739 | mutex_unlock(&trace_types_lock); | 2754 | mutex_unlock(&trace_types_lock); |
| 2740 | 2755 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f928f2a87b9b..d4dc69ddebd7 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -970,6 +970,51 @@ undo: | |||
| 970 | return ret; | 970 | return ret; |
| 971 | } | 971 | } |
| 972 | 972 | ||
| 973 | #ifdef CONFIG_SMP | ||
| 974 | struct work_for_cpu { | ||
| 975 | struct work_struct work; | ||
| 976 | long (*fn)(void *); | ||
| 977 | void *arg; | ||
| 978 | long ret; | ||
| 979 | }; | ||
| 980 | |||
| 981 | static void do_work_for_cpu(struct work_struct *w) | ||
| 982 | { | ||
| 983 | struct work_for_cpu *wfc = container_of(w, struct work_for_cpu, work); | ||
| 984 | |||
| 985 | wfc->ret = wfc->fn(wfc->arg); | ||
| 986 | } | ||
| 987 | |||
| 988 | /** | ||
| 989 | * work_on_cpu - run a function in user context on a particular cpu | ||
| 990 | * @cpu: the cpu to run on | ||
| 991 | * @fn: the function to run | ||
| 992 | * @arg: the function arg | ||
| 993 | * | ||
| 994 | * This will return -EINVAL in the cpu is not online, or the return value | ||
| 995 | * of @fn otherwise. | ||
| 996 | */ | ||
| 997 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | ||
| 998 | { | ||
| 999 | struct work_for_cpu wfc; | ||
| 1000 | |||
| 1001 | INIT_WORK(&wfc.work, do_work_for_cpu); | ||
| 1002 | wfc.fn = fn; | ||
| 1003 | wfc.arg = arg; | ||
| 1004 | get_online_cpus(); | ||
| 1005 | if (unlikely(!cpu_online(cpu))) | ||
| 1006 | wfc.ret = -EINVAL; | ||
| 1007 | else { | ||
| 1008 | schedule_work_on(cpu, &wfc.work); | ||
| 1009 | flush_work(&wfc.work); | ||
| 1010 | } | ||
| 1011 | put_online_cpus(); | ||
| 1012 | |||
| 1013 | return wfc.ret; | ||
| 1014 | } | ||
| 1015 | EXPORT_SYMBOL_GPL(work_on_cpu); | ||
| 1016 | #endif /* CONFIG_SMP */ | ||
| 1017 | |||
| 973 | void __init init_workqueues(void) | 1018 | void __init init_workqueues(void) |
| 974 | { | 1019 | { |
| 975 | cpu_populated_map = cpu_online_map; | 1020 | cpu_populated_map = cpu_online_map; |
