diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cgroup.c | 6 | ||||
| -rw-r--r-- | kernel/compat.c | 21 | ||||
| -rw-r--r-- | kernel/debug/kdb/kdb_bp.c | 2 | ||||
| -rw-r--r-- | kernel/fork.c | 2 | ||||
| -rw-r--r-- | kernel/gcov/fs.c | 244 | ||||
| -rw-r--r-- | kernel/groups.c | 5 | ||||
| -rw-r--r-- | kernel/hrtimer.c | 3 | ||||
| -rw-r--r-- | kernel/hw_breakpoint.c | 3 | ||||
| -rw-r--r-- | kernel/mutex.c | 23 | ||||
| -rw-r--r-- | kernel/perf_event.c | 32 | ||||
| -rw-r--r-- | kernel/pm_qos_params.c | 4 | ||||
| -rw-r--r-- | kernel/power/hibernate.c | 1 | ||||
| -rw-r--r-- | kernel/power/snapshot.c | 86 | ||||
| -rw-r--r-- | kernel/power/swap.c | 6 | ||||
| -rw-r--r-- | kernel/sched.c | 14 | ||||
| -rw-r--r-- | kernel/sched_fair.c | 13 | ||||
| -rw-r--r-- | kernel/sys.c | 2 | ||||
| -rw-r--r-- | kernel/sysctl.c | 5 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 19 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_event_perf.c | 3 | ||||
| -rw-r--r-- | kernel/trace/trace_kprobe.c | 43 | ||||
| -rw-r--r-- | kernel/watchdog.c | 17 | ||||
| -rw-r--r-- | kernel/workqueue.c | 27 |
24 files changed, 402 insertions, 181 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index ed19afd9e3fe..c9483d8f6140 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -1798,13 +1798,13 @@ out: | |||
| 1798 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) | 1798 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) |
| 1799 | { | 1799 | { |
| 1800 | struct cgroupfs_root *root; | 1800 | struct cgroupfs_root *root; |
| 1801 | struct cgroup *cur_cg; | ||
| 1802 | int retval = 0; | 1801 | int retval = 0; |
| 1803 | 1802 | ||
| 1804 | cgroup_lock(); | 1803 | cgroup_lock(); |
| 1805 | for_each_active_root(root) { | 1804 | for_each_active_root(root) { |
| 1806 | cur_cg = task_cgroup_from_root(from, root); | 1805 | struct cgroup *from_cg = task_cgroup_from_root(from, root); |
| 1807 | retval = cgroup_attach_task(cur_cg, tsk); | 1806 | |
| 1807 | retval = cgroup_attach_task(from_cg, tsk); | ||
| 1808 | if (retval) | 1808 | if (retval) |
| 1809 | break; | 1809 | break; |
| 1810 | } | 1810 | } |
diff --git a/kernel/compat.c b/kernel/compat.c index e167efce8423..c9e2ec0b34a8 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
| @@ -1126,3 +1126,24 @@ compat_sys_sysinfo(struct compat_sysinfo __user *info) | |||
| 1126 | 1126 | ||
| 1127 | return 0; | 1127 | return 0; |
| 1128 | } | 1128 | } |
| 1129 | |||
| 1130 | /* | ||
| 1131 | * Allocate user-space memory for the duration of a single system call, | ||
| 1132 | * in order to marshall parameters inside a compat thunk. | ||
| 1133 | */ | ||
| 1134 | void __user *compat_alloc_user_space(unsigned long len) | ||
| 1135 | { | ||
| 1136 | void __user *ptr; | ||
| 1137 | |||
| 1138 | /* If len would occupy more than half of the entire compat space... */ | ||
| 1139 | if (unlikely(len > (((compat_uptr_t)~0) >> 1))) | ||
| 1140 | return NULL; | ||
| 1141 | |||
| 1142 | ptr = arch_compat_alloc_user_space(len); | ||
| 1143 | |||
| 1144 | if (unlikely(!access_ok(VERIFY_WRITE, ptr, len))) | ||
| 1145 | return NULL; | ||
| 1146 | |||
| 1147 | return ptr; | ||
| 1148 | } | ||
| 1149 | EXPORT_SYMBOL_GPL(compat_alloc_user_space); | ||
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c index 75bd9b3ebbb7..20059ef4459a 100644 --- a/kernel/debug/kdb/kdb_bp.c +++ b/kernel/debug/kdb/kdb_bp.c | |||
| @@ -274,7 +274,6 @@ static int kdb_bp(int argc, const char **argv) | |||
| 274 | int i, bpno; | 274 | int i, bpno; |
| 275 | kdb_bp_t *bp, *bp_check; | 275 | kdb_bp_t *bp, *bp_check; |
| 276 | int diag; | 276 | int diag; |
| 277 | int free; | ||
| 278 | char *symname = NULL; | 277 | char *symname = NULL; |
| 279 | long offset = 0ul; | 278 | long offset = 0ul; |
| 280 | int nextarg; | 279 | int nextarg; |
| @@ -305,7 +304,6 @@ static int kdb_bp(int argc, const char **argv) | |||
| 305 | /* | 304 | /* |
| 306 | * Find an empty bp structure to allocate | 305 | * Find an empty bp structure to allocate |
| 307 | */ | 306 | */ |
| 308 | free = KDB_MAXBPT; | ||
| 309 | for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) { | 307 | for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) { |
| 310 | if (bp->bp_free) | 308 | if (bp->bp_free) |
| 311 | break; | 309 | break; |
diff --git a/kernel/fork.c b/kernel/fork.c index b7e9d60a675d..c445f8cc408d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -356,10 +356,10 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
| 356 | if (IS_ERR(pol)) | 356 | if (IS_ERR(pol)) |
| 357 | goto fail_nomem_policy; | 357 | goto fail_nomem_policy; |
| 358 | vma_set_policy(tmp, pol); | 358 | vma_set_policy(tmp, pol); |
| 359 | tmp->vm_mm = mm; | ||
| 359 | if (anon_vma_fork(tmp, mpnt)) | 360 | if (anon_vma_fork(tmp, mpnt)) |
| 360 | goto fail_nomem_anon_vma_fork; | 361 | goto fail_nomem_anon_vma_fork; |
| 361 | tmp->vm_flags &= ~VM_LOCKED; | 362 | tmp->vm_flags &= ~VM_LOCKED; |
| 362 | tmp->vm_mm = mm; | ||
| 363 | tmp->vm_next = tmp->vm_prev = NULL; | 363 | tmp->vm_next = tmp->vm_prev = NULL; |
| 364 | file = tmp->vm_file; | 364 | file = tmp->vm_file; |
| 365 | if (file) { | 365 | if (file) { |
diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c index ef3c3f88a7a3..f83972b16564 100644 --- a/kernel/gcov/fs.c +++ b/kernel/gcov/fs.c | |||
| @@ -33,10 +33,11 @@ | |||
| 33 | * @children: child nodes | 33 | * @children: child nodes |
| 34 | * @all: list head for list of all nodes | 34 | * @all: list head for list of all nodes |
| 35 | * @parent: parent node | 35 | * @parent: parent node |
| 36 | * @info: associated profiling data structure if not a directory | 36 | * @loaded_info: array of pointers to profiling data sets for loaded object |
| 37 | * @ghost: when an object file containing profiling data is unloaded we keep a | 37 | * files. |
| 38 | * copy of the profiling data here to allow collecting coverage data | 38 | * @num_loaded: number of profiling data sets for loaded object files. |
| 39 | * for cleanup code. Such a node is called a "ghost". | 39 | * @unloaded_info: accumulated copy of profiling data sets for unloaded |
| 40 | * object files. Used only when gcov_persist=1. | ||
| 40 | * @dentry: main debugfs entry, either a directory or data file | 41 | * @dentry: main debugfs entry, either a directory or data file |
| 41 | * @links: associated symbolic links | 42 | * @links: associated symbolic links |
| 42 | * @name: data file basename | 43 | * @name: data file basename |
| @@ -51,10 +52,11 @@ struct gcov_node { | |||
| 51 | struct list_head children; | 52 | struct list_head children; |
| 52 | struct list_head all; | 53 | struct list_head all; |
| 53 | struct gcov_node *parent; | 54 | struct gcov_node *parent; |
| 54 | struct gcov_info *info; | 55 | struct gcov_info **loaded_info; |
| 55 | struct gcov_info *ghost; | 56 | struct gcov_info *unloaded_info; |
| 56 | struct dentry *dentry; | 57 | struct dentry *dentry; |
| 57 | struct dentry **links; | 58 | struct dentry **links; |
| 59 | int num_loaded; | ||
| 58 | char name[0]; | 60 | char name[0]; |
| 59 | }; | 61 | }; |
| 60 | 62 | ||
| @@ -136,16 +138,37 @@ static const struct seq_operations gcov_seq_ops = { | |||
| 136 | }; | 138 | }; |
| 137 | 139 | ||
| 138 | /* | 140 | /* |
| 139 | * Return the profiling data set for a given node. This can either be the | 141 | * Return a profiling data set associated with the given node. This is |
| 140 | * original profiling data structure or a duplicate (also called "ghost") | 142 | * either a data set for a loaded object file or a data set copy in case |
| 141 | * in case the associated object file has been unloaded. | 143 | * all associated object files have been unloaded. |
| 142 | */ | 144 | */ |
| 143 | static struct gcov_info *get_node_info(struct gcov_node *node) | 145 | static struct gcov_info *get_node_info(struct gcov_node *node) |
| 144 | { | 146 | { |
| 145 | if (node->info) | 147 | if (node->num_loaded > 0) |
| 146 | return node->info; | 148 | return node->loaded_info[0]; |
| 147 | 149 | ||
| 148 | return node->ghost; | 150 | return node->unloaded_info; |
| 151 | } | ||
| 152 | |||
| 153 | /* | ||
| 154 | * Return a newly allocated profiling data set which contains the sum of | ||
| 155 | * all profiling data associated with the given node. | ||
| 156 | */ | ||
| 157 | static struct gcov_info *get_accumulated_info(struct gcov_node *node) | ||
| 158 | { | ||
| 159 | struct gcov_info *info; | ||
| 160 | int i = 0; | ||
| 161 | |||
| 162 | if (node->unloaded_info) | ||
| 163 | info = gcov_info_dup(node->unloaded_info); | ||
| 164 | else | ||
| 165 | info = gcov_info_dup(node->loaded_info[i++]); | ||
| 166 | if (!info) | ||
| 167 | return NULL; | ||
| 168 | for (; i < node->num_loaded; i++) | ||
| 169 | gcov_info_add(info, node->loaded_info[i]); | ||
| 170 | |||
| 171 | return info; | ||
| 149 | } | 172 | } |
| 150 | 173 | ||
| 151 | /* | 174 | /* |
| @@ -163,9 +186,10 @@ static int gcov_seq_open(struct inode *inode, struct file *file) | |||
| 163 | mutex_lock(&node_lock); | 186 | mutex_lock(&node_lock); |
| 164 | /* | 187 | /* |
| 165 | * Read from a profiling data copy to minimize reference tracking | 188 | * Read from a profiling data copy to minimize reference tracking |
| 166 | * complexity and concurrent access. | 189 | * complexity and concurrent access and to keep accumulating multiple |
| 190 | * profiling data sets associated with one node simple. | ||
| 167 | */ | 191 | */ |
| 168 | info = gcov_info_dup(get_node_info(node)); | 192 | info = get_accumulated_info(node); |
| 169 | if (!info) | 193 | if (!info) |
| 170 | goto out_unlock; | 194 | goto out_unlock; |
| 171 | iter = gcov_iter_new(info); | 195 | iter = gcov_iter_new(info); |
| @@ -225,12 +249,25 @@ static struct gcov_node *get_node_by_name(const char *name) | |||
| 225 | return NULL; | 249 | return NULL; |
| 226 | } | 250 | } |
| 227 | 251 | ||
| 252 | /* | ||
| 253 | * Reset all profiling data associated with the specified node. | ||
| 254 | */ | ||
| 255 | static void reset_node(struct gcov_node *node) | ||
| 256 | { | ||
| 257 | int i; | ||
| 258 | |||
| 259 | if (node->unloaded_info) | ||
| 260 | gcov_info_reset(node->unloaded_info); | ||
| 261 | for (i = 0; i < node->num_loaded; i++) | ||
| 262 | gcov_info_reset(node->loaded_info[i]); | ||
| 263 | } | ||
| 264 | |||
| 228 | static void remove_node(struct gcov_node *node); | 265 | static void remove_node(struct gcov_node *node); |
| 229 | 266 | ||
| 230 | /* | 267 | /* |
| 231 | * write() implementation for gcov data files. Reset profiling data for the | 268 | * write() implementation for gcov data files. Reset profiling data for the |
| 232 | * associated file. If the object file has been unloaded (i.e. this is | 269 | * corresponding file. If all associated object files have been unloaded, |
| 233 | * a "ghost" node), remove the debug fs node as well. | 270 | * remove the debug fs node as well. |
| 234 | */ | 271 | */ |
| 235 | static ssize_t gcov_seq_write(struct file *file, const char __user *addr, | 272 | static ssize_t gcov_seq_write(struct file *file, const char __user *addr, |
| 236 | size_t len, loff_t *pos) | 273 | size_t len, loff_t *pos) |
| @@ -245,10 +282,10 @@ static ssize_t gcov_seq_write(struct file *file, const char __user *addr, | |||
| 245 | node = get_node_by_name(info->filename); | 282 | node = get_node_by_name(info->filename); |
| 246 | if (node) { | 283 | if (node) { |
| 247 | /* Reset counts or remove node for unloaded modules. */ | 284 | /* Reset counts or remove node for unloaded modules. */ |
| 248 | if (node->ghost) | 285 | if (node->num_loaded == 0) |
| 249 | remove_node(node); | 286 | remove_node(node); |
| 250 | else | 287 | else |
| 251 | gcov_info_reset(node->info); | 288 | reset_node(node); |
| 252 | } | 289 | } |
| 253 | /* Reset counts for open file. */ | 290 | /* Reset counts for open file. */ |
| 254 | gcov_info_reset(info); | 291 | gcov_info_reset(info); |
| @@ -378,7 +415,10 @@ static void init_node(struct gcov_node *node, struct gcov_info *info, | |||
| 378 | INIT_LIST_HEAD(&node->list); | 415 | INIT_LIST_HEAD(&node->list); |
| 379 | INIT_LIST_HEAD(&node->children); | 416 | INIT_LIST_HEAD(&node->children); |
| 380 | INIT_LIST_HEAD(&node->all); | 417 | INIT_LIST_HEAD(&node->all); |
| 381 | node->info = info; | 418 | if (node->loaded_info) { |
| 419 | node->loaded_info[0] = info; | ||
| 420 | node->num_loaded = 1; | ||
| 421 | } | ||
| 382 | node->parent = parent; | 422 | node->parent = parent; |
| 383 | if (name) | 423 | if (name) |
| 384 | strcpy(node->name, name); | 424 | strcpy(node->name, name); |
| @@ -394,9 +434,13 @@ static struct gcov_node *new_node(struct gcov_node *parent, | |||
| 394 | struct gcov_node *node; | 434 | struct gcov_node *node; |
| 395 | 435 | ||
| 396 | node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL); | 436 | node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL); |
| 397 | if (!node) { | 437 | if (!node) |
| 398 | pr_warning("out of memory\n"); | 438 | goto err_nomem; |
| 399 | return NULL; | 439 | if (info) { |
| 440 | node->loaded_info = kcalloc(1, sizeof(struct gcov_info *), | ||
| 441 | GFP_KERNEL); | ||
| 442 | if (!node->loaded_info) | ||
| 443 | goto err_nomem; | ||
| 400 | } | 444 | } |
| 401 | init_node(node, info, name, parent); | 445 | init_node(node, info, name, parent); |
| 402 | /* Differentiate between gcov data file nodes and directory nodes. */ | 446 | /* Differentiate between gcov data file nodes and directory nodes. */ |
| @@ -416,6 +460,11 @@ static struct gcov_node *new_node(struct gcov_node *parent, | |||
| 416 | list_add(&node->all, &all_head); | 460 | list_add(&node->all, &all_head); |
| 417 | 461 | ||
| 418 | return node; | 462 | return node; |
| 463 | |||
| 464 | err_nomem: | ||
| 465 | kfree(node); | ||
| 466 | pr_warning("out of memory\n"); | ||
| 467 | return NULL; | ||
| 419 | } | 468 | } |
| 420 | 469 | ||
| 421 | /* Remove symbolic links associated with node. */ | 470 | /* Remove symbolic links associated with node. */ |
| @@ -441,8 +490,9 @@ static void release_node(struct gcov_node *node) | |||
| 441 | list_del(&node->all); | 490 | list_del(&node->all); |
| 442 | debugfs_remove(node->dentry); | 491 | debugfs_remove(node->dentry); |
| 443 | remove_links(node); | 492 | remove_links(node); |
| 444 | if (node->ghost) | 493 | kfree(node->loaded_info); |
| 445 | gcov_info_free(node->ghost); | 494 | if (node->unloaded_info) |
| 495 | gcov_info_free(node->unloaded_info); | ||
| 446 | kfree(node); | 496 | kfree(node); |
| 447 | } | 497 | } |
| 448 | 498 | ||
| @@ -477,7 +527,7 @@ static struct gcov_node *get_child_by_name(struct gcov_node *parent, | |||
| 477 | 527 | ||
| 478 | /* | 528 | /* |
| 479 | * write() implementation for reset file. Reset all profiling data to zero | 529 | * write() implementation for reset file. Reset all profiling data to zero |
| 480 | * and remove ghost nodes. | 530 | * and remove nodes for which all associated object files are unloaded. |
| 481 | */ | 531 | */ |
| 482 | static ssize_t reset_write(struct file *file, const char __user *addr, | 532 | static ssize_t reset_write(struct file *file, const char __user *addr, |
| 483 | size_t len, loff_t *pos) | 533 | size_t len, loff_t *pos) |
| @@ -487,8 +537,8 @@ static ssize_t reset_write(struct file *file, const char __user *addr, | |||
| 487 | mutex_lock(&node_lock); | 537 | mutex_lock(&node_lock); |
| 488 | restart: | 538 | restart: |
| 489 | list_for_each_entry(node, &all_head, all) { | 539 | list_for_each_entry(node, &all_head, all) { |
| 490 | if (node->info) | 540 | if (node->num_loaded > 0) |
| 491 | gcov_info_reset(node->info); | 541 | reset_node(node); |
| 492 | else if (list_empty(&node->children)) { | 542 | else if (list_empty(&node->children)) { |
| 493 | remove_node(node); | 543 | remove_node(node); |
| 494 | /* Several nodes may have gone - restart loop. */ | 544 | /* Several nodes may have gone - restart loop. */ |
| @@ -564,37 +614,115 @@ err_remove: | |||
| 564 | } | 614 | } |
| 565 | 615 | ||
| 566 | /* | 616 | /* |
| 567 | * The profiling data set associated with this node is being unloaded. Store a | 617 | * Associate a profiling data set with an existing node. Needs to be called |
| 568 | * copy of the profiling data and turn this node into a "ghost". | 618 | * with node_lock held. |
| 569 | */ | 619 | */ |
| 570 | static int ghost_node(struct gcov_node *node) | 620 | static void add_info(struct gcov_node *node, struct gcov_info *info) |
| 571 | { | 621 | { |
| 572 | node->ghost = gcov_info_dup(node->info); | 622 | struct gcov_info **loaded_info; |
| 573 | if (!node->ghost) { | 623 | int num = node->num_loaded; |
| 574 | pr_warning("could not save data for '%s' (out of memory)\n", | 624 | |
| 575 | node->info->filename); | 625 | /* |
| 576 | return -ENOMEM; | 626 | * Prepare new array. This is done first to simplify cleanup in |
| 627 | * case the new data set is incompatible, the node only contains | ||
| 628 | * unloaded data sets and there's not enough memory for the array. | ||
| 629 | */ | ||
| 630 | loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL); | ||
| 631 | if (!loaded_info) { | ||
| 632 | pr_warning("could not add '%s' (out of memory)\n", | ||
| 633 | info->filename); | ||
| 634 | return; | ||
| 635 | } | ||
| 636 | memcpy(loaded_info, node->loaded_info, | ||
| 637 | num * sizeof(struct gcov_info *)); | ||
| 638 | loaded_info[num] = info; | ||
| 639 | /* Check if the new data set is compatible. */ | ||
| 640 | if (num == 0) { | ||
| 641 | /* | ||
| 642 | * A module was unloaded, modified and reloaded. The new | ||
| 643 | * data set replaces the copy of the last one. | ||
| 644 | */ | ||
| 645 | if (!gcov_info_is_compatible(node->unloaded_info, info)) { | ||
| 646 | pr_warning("discarding saved data for %s " | ||
| 647 | "(incompatible version)\n", info->filename); | ||
| 648 | gcov_info_free(node->unloaded_info); | ||
| 649 | node->unloaded_info = NULL; | ||
| 650 | } | ||
| 651 | } else { | ||
| 652 | /* | ||
| 653 | * Two different versions of the same object file are loaded. | ||
| 654 | * The initial one takes precedence. | ||
| 655 | */ | ||
| 656 | if (!gcov_info_is_compatible(node->loaded_info[0], info)) { | ||
| 657 | pr_warning("could not add '%s' (incompatible " | ||
| 658 | "version)\n", info->filename); | ||
| 659 | kfree(loaded_info); | ||
| 660 | return; | ||
| 661 | } | ||
| 577 | } | 662 | } |
| 578 | node->info = NULL; | 663 | /* Overwrite previous array. */ |
| 664 | kfree(node->loaded_info); | ||
| 665 | node->loaded_info = loaded_info; | ||
| 666 | node->num_loaded = num + 1; | ||
| 667 | } | ||
| 579 | 668 | ||
| 580 | return 0; | 669 | /* |
| 670 | * Return the index of a profiling data set associated with a node. | ||
| 671 | */ | ||
| 672 | static int get_info_index(struct gcov_node *node, struct gcov_info *info) | ||
| 673 | { | ||
| 674 | int i; | ||
| 675 | |||
| 676 | for (i = 0; i < node->num_loaded; i++) { | ||
| 677 | if (node->loaded_info[i] == info) | ||
| 678 | return i; | ||
| 679 | } | ||
| 680 | return -ENOENT; | ||
| 581 | } | 681 | } |
| 582 | 682 | ||
| 583 | /* | 683 | /* |
| 584 | * Profiling data for this node has been loaded again. Add profiling data | 684 | * Save the data of a profiling data set which is being unloaded. |
| 585 | * from previous instantiation and turn this node into a regular node. | ||
| 586 | */ | 685 | */ |
| 587 | static void revive_node(struct gcov_node *node, struct gcov_info *info) | 686 | static void save_info(struct gcov_node *node, struct gcov_info *info) |
| 588 | { | 687 | { |
| 589 | if (gcov_info_is_compatible(node->ghost, info)) | 688 | if (node->unloaded_info) |
| 590 | gcov_info_add(info, node->ghost); | 689 | gcov_info_add(node->unloaded_info, info); |
| 591 | else { | 690 | else { |
| 592 | pr_warning("discarding saved data for '%s' (version changed)\n", | 691 | node->unloaded_info = gcov_info_dup(info); |
| 692 | if (!node->unloaded_info) { | ||
| 693 | pr_warning("could not save data for '%s' " | ||
| 694 | "(out of memory)\n", info->filename); | ||
| 695 | } | ||
| 696 | } | ||
| 697 | } | ||
| 698 | |||
| 699 | /* | ||
| 700 | * Disassociate a profiling data set from a node. Needs to be called with | ||
| 701 | * node_lock held. | ||
| 702 | */ | ||
| 703 | static void remove_info(struct gcov_node *node, struct gcov_info *info) | ||
| 704 | { | ||
| 705 | int i; | ||
| 706 | |||
| 707 | i = get_info_index(node, info); | ||
| 708 | if (i < 0) { | ||
| 709 | pr_warning("could not remove '%s' (not found)\n", | ||
| 593 | info->filename); | 710 | info->filename); |
| 711 | return; | ||
| 594 | } | 712 | } |
| 595 | gcov_info_free(node->ghost); | 713 | if (gcov_persist) |
| 596 | node->ghost = NULL; | 714 | save_info(node, info); |
| 597 | node->info = info; | 715 | /* Shrink array. */ |
| 716 | node->loaded_info[i] = node->loaded_info[node->num_loaded - 1]; | ||
| 717 | node->num_loaded--; | ||
| 718 | if (node->num_loaded > 0) | ||
| 719 | return; | ||
| 720 | /* Last loaded data set was removed. */ | ||
| 721 | kfree(node->loaded_info); | ||
| 722 | node->loaded_info = NULL; | ||
| 723 | node->num_loaded = 0; | ||
| 724 | if (!node->unloaded_info) | ||
| 725 | remove_node(node); | ||
| 598 | } | 726 | } |
| 599 | 727 | ||
| 600 | /* | 728 | /* |
| @@ -609,30 +737,18 @@ void gcov_event(enum gcov_action action, struct gcov_info *info) | |||
| 609 | node = get_node_by_name(info->filename); | 737 | node = get_node_by_name(info->filename); |
| 610 | switch (action) { | 738 | switch (action) { |
| 611 | case GCOV_ADD: | 739 | case GCOV_ADD: |
| 612 | /* Add new node or revive ghost. */ | 740 | if (node) |
| 613 | if (!node) { | 741 | add_info(node, info); |
| 742 | else | ||
| 614 | add_node(info); | 743 | add_node(info); |
| 615 | break; | ||
| 616 | } | ||
| 617 | if (gcov_persist) | ||
| 618 | revive_node(node, info); | ||
| 619 | else { | ||
| 620 | pr_warning("could not add '%s' (already exists)\n", | ||
| 621 | info->filename); | ||
| 622 | } | ||
| 623 | break; | 744 | break; |
| 624 | case GCOV_REMOVE: | 745 | case GCOV_REMOVE: |
| 625 | /* Remove node or turn into ghost. */ | 746 | if (node) |
| 626 | if (!node) { | 747 | remove_info(node, info); |
| 748 | else { | ||
| 627 | pr_warning("could not remove '%s' (not found)\n", | 749 | pr_warning("could not remove '%s' (not found)\n", |
| 628 | info->filename); | 750 | info->filename); |
| 629 | break; | ||
| 630 | } | 751 | } |
| 631 | if (gcov_persist) { | ||
| 632 | if (!ghost_node(node)) | ||
| 633 | break; | ||
| 634 | } | ||
| 635 | remove_node(node); | ||
| 636 | break; | 752 | break; |
| 637 | } | 753 | } |
| 638 | mutex_unlock(&node_lock); | 754 | mutex_unlock(&node_lock); |
diff --git a/kernel/groups.c b/kernel/groups.c index 53b1916c9492..253dc0f35cf4 100644 --- a/kernel/groups.c +++ b/kernel/groups.c | |||
| @@ -143,10 +143,9 @@ int groups_search(const struct group_info *group_info, gid_t grp) | |||
| 143 | right = group_info->ngroups; | 143 | right = group_info->ngroups; |
| 144 | while (left < right) { | 144 | while (left < right) { |
| 145 | unsigned int mid = (left+right)/2; | 145 | unsigned int mid = (left+right)/2; |
| 146 | int cmp = grp - GROUP_AT(group_info, mid); | 146 | if (grp > GROUP_AT(group_info, mid)) |
| 147 | if (cmp > 0) | ||
| 148 | left = mid + 1; | 147 | left = mid + 1; |
| 149 | else if (cmp < 0) | 148 | else if (grp < GROUP_AT(group_info, mid)) |
| 150 | right = mid; | 149 | right = mid; |
| 151 | else | 150 | else |
| 152 | return 1; | 151 | return 1; |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index ce669174f355..1decafbb6b1a 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -1091,11 +1091,10 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel); | |||
| 1091 | */ | 1091 | */ |
| 1092 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) | 1092 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) |
| 1093 | { | 1093 | { |
| 1094 | struct hrtimer_clock_base *base; | ||
| 1095 | unsigned long flags; | 1094 | unsigned long flags; |
| 1096 | ktime_t rem; | 1095 | ktime_t rem; |
| 1097 | 1096 | ||
| 1098 | base = lock_hrtimer_base(timer, &flags); | 1097 | lock_hrtimer_base(timer, &flags); |
| 1099 | rem = hrtimer_expires_remaining(timer); | 1098 | rem = hrtimer_expires_remaining(timer); |
| 1100 | unlock_hrtimer_base(timer, &flags); | 1099 | unlock_hrtimer_base(timer, &flags); |
| 1101 | 1100 | ||
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index d71a987fd2bf..c7c2aed9e2dc 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
| @@ -433,7 +433,8 @@ register_user_hw_breakpoint(struct perf_event_attr *attr, | |||
| 433 | perf_overflow_handler_t triggered, | 433 | perf_overflow_handler_t triggered, |
| 434 | struct task_struct *tsk) | 434 | struct task_struct *tsk) |
| 435 | { | 435 | { |
| 436 | return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); | 436 | return perf_event_create_kernel_counter(attr, -1, task_pid_vnr(tsk), |
| 437 | triggered); | ||
| 437 | } | 438 | } |
| 438 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | 439 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); |
| 439 | 440 | ||
diff --git a/kernel/mutex.c b/kernel/mutex.c index 4c0b7b3e6d2e..200407c1502f 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
| @@ -36,15 +36,6 @@ | |||
| 36 | # include <asm/mutex.h> | 36 | # include <asm/mutex.h> |
| 37 | #endif | 37 | #endif |
| 38 | 38 | ||
| 39 | /*** | ||
| 40 | * mutex_init - initialize the mutex | ||
| 41 | * @lock: the mutex to be initialized | ||
| 42 | * @key: the lock_class_key for the class; used by mutex lock debugging | ||
| 43 | * | ||
| 44 | * Initialize the mutex to unlocked state. | ||
| 45 | * | ||
| 46 | * It is not allowed to initialize an already locked mutex. | ||
| 47 | */ | ||
| 48 | void | 39 | void |
| 49 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | 40 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) |
| 50 | { | 41 | { |
| @@ -68,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init); | |||
| 68 | static __used noinline void __sched | 59 | static __used noinline void __sched |
| 69 | __mutex_lock_slowpath(atomic_t *lock_count); | 60 | __mutex_lock_slowpath(atomic_t *lock_count); |
| 70 | 61 | ||
| 71 | /*** | 62 | /** |
| 72 | * mutex_lock - acquire the mutex | 63 | * mutex_lock - acquire the mutex |
| 73 | * @lock: the mutex to be acquired | 64 | * @lock: the mutex to be acquired |
| 74 | * | 65 | * |
| @@ -105,7 +96,7 @@ EXPORT_SYMBOL(mutex_lock); | |||
| 105 | 96 | ||
| 106 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); | 97 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
| 107 | 98 | ||
| 108 | /*** | 99 | /** |
| 109 | * mutex_unlock - release the mutex | 100 | * mutex_unlock - release the mutex |
| 110 | * @lock: the mutex to be released | 101 | * @lock: the mutex to be released |
| 111 | * | 102 | * |
| @@ -364,8 +355,8 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count); | |||
| 364 | static noinline int __sched | 355 | static noinline int __sched |
| 365 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); | 356 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); |
| 366 | 357 | ||
| 367 | /*** | 358 | /** |
| 368 | * mutex_lock_interruptible - acquire the mutex, interruptable | 359 | * mutex_lock_interruptible - acquire the mutex, interruptible |
| 369 | * @lock: the mutex to be acquired | 360 | * @lock: the mutex to be acquired |
| 370 | * | 361 | * |
| 371 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has | 362 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has |
| @@ -456,15 +447,15 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | |||
| 456 | return prev == 1; | 447 | return prev == 1; |
| 457 | } | 448 | } |
| 458 | 449 | ||
| 459 | /*** | 450 | /** |
| 460 | * mutex_trylock - try acquire the mutex, without waiting | 451 | * mutex_trylock - try to acquire the mutex, without waiting |
| 461 | * @lock: the mutex to be acquired | 452 | * @lock: the mutex to be acquired |
| 462 | * | 453 | * |
| 463 | * Try to acquire the mutex atomically. Returns 1 if the mutex | 454 | * Try to acquire the mutex atomically. Returns 1 if the mutex |
| 464 | * has been acquired successfully, and 0 on contention. | 455 | * has been acquired successfully, and 0 on contention. |
| 465 | * | 456 | * |
| 466 | * NOTE: this function follows the spin_trylock() convention, so | 457 | * NOTE: this function follows the spin_trylock() convention, so |
| 467 | * it is negated to the down_trylock() return values! Be careful | 458 | * it is negated from the down_trylock() return values! Be careful |
| 468 | * about this when converting semaphore users to mutexes. | 459 | * about this when converting semaphore users to mutexes. |
| 469 | * | 460 | * |
| 470 | * This function must not be used in interrupt context. The | 461 | * This function must not be used in interrupt context. The |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 403d1804b198..db5b56064687 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -402,11 +402,31 @@ static void perf_group_detach(struct perf_event *event) | |||
| 402 | } | 402 | } |
| 403 | } | 403 | } |
| 404 | 404 | ||
| 405 | static inline int | ||
| 406 | event_filter_match(struct perf_event *event) | ||
| 407 | { | ||
| 408 | return event->cpu == -1 || event->cpu == smp_processor_id(); | ||
| 409 | } | ||
| 410 | |||
| 405 | static void | 411 | static void |
| 406 | event_sched_out(struct perf_event *event, | 412 | event_sched_out(struct perf_event *event, |
| 407 | struct perf_cpu_context *cpuctx, | 413 | struct perf_cpu_context *cpuctx, |
| 408 | struct perf_event_context *ctx) | 414 | struct perf_event_context *ctx) |
| 409 | { | 415 | { |
| 416 | u64 delta; | ||
| 417 | /* | ||
| 418 | * An event which could not be activated because of | ||
| 419 | * filter mismatch still needs to have its timings | ||
| 420 | * maintained, otherwise bogus information is return | ||
| 421 | * via read() for time_enabled, time_running: | ||
| 422 | */ | ||
| 423 | if (event->state == PERF_EVENT_STATE_INACTIVE | ||
| 424 | && !event_filter_match(event)) { | ||
| 425 | delta = ctx->time - event->tstamp_stopped; | ||
| 426 | event->tstamp_running += delta; | ||
| 427 | event->tstamp_stopped = ctx->time; | ||
| 428 | } | ||
| 429 | |||
| 410 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 430 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
| 411 | return; | 431 | return; |
| 412 | 432 | ||
| @@ -432,9 +452,7 @@ group_sched_out(struct perf_event *group_event, | |||
| 432 | struct perf_event_context *ctx) | 452 | struct perf_event_context *ctx) |
| 433 | { | 453 | { |
| 434 | struct perf_event *event; | 454 | struct perf_event *event; |
| 435 | 455 | int state = group_event->state; | |
| 436 | if (group_event->state != PERF_EVENT_STATE_ACTIVE) | ||
| 437 | return; | ||
| 438 | 456 | ||
| 439 | event_sched_out(group_event, cpuctx, ctx); | 457 | event_sched_out(group_event, cpuctx, ctx); |
| 440 | 458 | ||
| @@ -444,7 +462,7 @@ group_sched_out(struct perf_event *group_event, | |||
| 444 | list_for_each_entry(event, &group_event->sibling_list, group_entry) | 462 | list_for_each_entry(event, &group_event->sibling_list, group_entry) |
| 445 | event_sched_out(event, cpuctx, ctx); | 463 | event_sched_out(event, cpuctx, ctx); |
| 446 | 464 | ||
| 447 | if (group_event->attr.exclusive) | 465 | if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive) |
| 448 | cpuctx->exclusive = 0; | 466 | cpuctx->exclusive = 0; |
| 449 | } | 467 | } |
| 450 | 468 | ||
| @@ -5743,15 +5761,15 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | |||
| 5743 | { | 5761 | { |
| 5744 | unsigned int cpu = (long)hcpu; | 5762 | unsigned int cpu = (long)hcpu; |
| 5745 | 5763 | ||
| 5746 | switch (action) { | 5764 | switch (action & ~CPU_TASKS_FROZEN) { |
| 5747 | 5765 | ||
| 5748 | case CPU_UP_PREPARE: | 5766 | case CPU_UP_PREPARE: |
| 5749 | case CPU_UP_PREPARE_FROZEN: | 5767 | case CPU_DOWN_FAILED: |
| 5750 | perf_event_init_cpu(cpu); | 5768 | perf_event_init_cpu(cpu); |
| 5751 | break; | 5769 | break; |
| 5752 | 5770 | ||
| 5771 | case CPU_UP_CANCELED: | ||
| 5753 | case CPU_DOWN_PREPARE: | 5772 | case CPU_DOWN_PREPARE: |
| 5754 | case CPU_DOWN_PREPARE_FROZEN: | ||
| 5755 | perf_event_exit_cpu(cpu); | 5773 | perf_event_exit_cpu(cpu); |
| 5756 | break; | 5774 | break; |
| 5757 | 5775 | ||
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index b7e4c362361b..645e541a45f6 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c | |||
| @@ -389,10 +389,12 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, | |||
| 389 | } else if (count == 11) { /* len('0x12345678/0') */ | 389 | } else if (count == 11) { /* len('0x12345678/0') */ |
| 390 | if (copy_from_user(ascii_value, buf, 11)) | 390 | if (copy_from_user(ascii_value, buf, 11)) |
| 391 | return -EFAULT; | 391 | return -EFAULT; |
| 392 | if (strlen(ascii_value) != 10) | ||
| 393 | return -EINVAL; | ||
| 392 | x = sscanf(ascii_value, "%x", &value); | 394 | x = sscanf(ascii_value, "%x", &value); |
| 393 | if (x != 1) | 395 | if (x != 1) |
| 394 | return -EINVAL; | 396 | return -EINVAL; |
| 395 | pr_debug(KERN_ERR "%s, %d, 0x%x\n", ascii_value, x, value); | 397 | pr_debug("%s, %d, 0x%x\n", ascii_value, x, value); |
| 396 | } else | 398 | } else |
| 397 | return -EINVAL; | 399 | return -EINVAL; |
| 398 | 400 | ||
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index c77963938bca..8dc31e02ae12 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
| @@ -338,7 +338,6 @@ int hibernation_snapshot(int platform_mode) | |||
| 338 | goto Close; | 338 | goto Close; |
| 339 | 339 | ||
| 340 | suspend_console(); | 340 | suspend_console(); |
| 341 | hibernation_freeze_swap(); | ||
| 342 | saved_mask = clear_gfp_allowed_mask(GFP_IOFS); | 341 | saved_mask = clear_gfp_allowed_mask(GFP_IOFS); |
| 343 | error = dpm_suspend_start(PMSG_FREEZE); | 342 | error = dpm_suspend_start(PMSG_FREEZE); |
| 344 | if (error) | 343 | if (error) |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 5e7edfb05e66..d3f795f01bbc 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
| @@ -1086,7 +1086,6 @@ void swsusp_free(void) | |||
| 1086 | buffer = NULL; | 1086 | buffer = NULL; |
| 1087 | alloc_normal = 0; | 1087 | alloc_normal = 0; |
| 1088 | alloc_highmem = 0; | 1088 | alloc_highmem = 0; |
| 1089 | hibernation_thaw_swap(); | ||
| 1090 | } | 1089 | } |
| 1091 | 1090 | ||
| 1092 | /* Helper functions used for the shrinking of memory. */ | 1091 | /* Helper functions used for the shrinking of memory. */ |
| @@ -1122,9 +1121,19 @@ static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask) | |||
| 1122 | return nr_alloc; | 1121 | return nr_alloc; |
| 1123 | } | 1122 | } |
| 1124 | 1123 | ||
| 1125 | static unsigned long preallocate_image_memory(unsigned long nr_pages) | 1124 | static unsigned long preallocate_image_memory(unsigned long nr_pages, |
| 1125 | unsigned long avail_normal) | ||
| 1126 | { | 1126 | { |
| 1127 | return preallocate_image_pages(nr_pages, GFP_IMAGE); | 1127 | unsigned long alloc; |
| 1128 | |||
| 1129 | if (avail_normal <= alloc_normal) | ||
| 1130 | return 0; | ||
| 1131 | |||
| 1132 | alloc = avail_normal - alloc_normal; | ||
| 1133 | if (nr_pages < alloc) | ||
| 1134 | alloc = nr_pages; | ||
| 1135 | |||
| 1136 | return preallocate_image_pages(alloc, GFP_IMAGE); | ||
| 1128 | } | 1137 | } |
| 1129 | 1138 | ||
| 1130 | #ifdef CONFIG_HIGHMEM | 1139 | #ifdef CONFIG_HIGHMEM |
| @@ -1170,15 +1179,22 @@ static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages, | |||
| 1170 | */ | 1179 | */ |
| 1171 | static void free_unnecessary_pages(void) | 1180 | static void free_unnecessary_pages(void) |
| 1172 | { | 1181 | { |
| 1173 | unsigned long save_highmem, to_free_normal, to_free_highmem; | 1182 | unsigned long save, to_free_normal, to_free_highmem; |
| 1174 | 1183 | ||
| 1175 | to_free_normal = alloc_normal - count_data_pages(); | 1184 | save = count_data_pages(); |
| 1176 | save_highmem = count_highmem_pages(); | 1185 | if (alloc_normal >= save) { |
| 1177 | if (alloc_highmem > save_highmem) { | 1186 | to_free_normal = alloc_normal - save; |
| 1178 | to_free_highmem = alloc_highmem - save_highmem; | 1187 | save = 0; |
| 1188 | } else { | ||
| 1189 | to_free_normal = 0; | ||
| 1190 | save -= alloc_normal; | ||
| 1191 | } | ||
| 1192 | save += count_highmem_pages(); | ||
| 1193 | if (alloc_highmem >= save) { | ||
| 1194 | to_free_highmem = alloc_highmem - save; | ||
| 1179 | } else { | 1195 | } else { |
| 1180 | to_free_highmem = 0; | 1196 | to_free_highmem = 0; |
| 1181 | to_free_normal -= save_highmem - alloc_highmem; | 1197 | to_free_normal -= save - alloc_highmem; |
| 1182 | } | 1198 | } |
| 1183 | 1199 | ||
| 1184 | memory_bm_position_reset(©_bm); | 1200 | memory_bm_position_reset(©_bm); |
| @@ -1259,7 +1275,7 @@ int hibernate_preallocate_memory(void) | |||
| 1259 | { | 1275 | { |
| 1260 | struct zone *zone; | 1276 | struct zone *zone; |
| 1261 | unsigned long saveable, size, max_size, count, highmem, pages = 0; | 1277 | unsigned long saveable, size, max_size, count, highmem, pages = 0; |
| 1262 | unsigned long alloc, save_highmem, pages_highmem; | 1278 | unsigned long alloc, save_highmem, pages_highmem, avail_normal; |
| 1263 | struct timeval start, stop; | 1279 | struct timeval start, stop; |
| 1264 | int error; | 1280 | int error; |
| 1265 | 1281 | ||
| @@ -1296,6 +1312,7 @@ int hibernate_preallocate_memory(void) | |||
| 1296 | else | 1312 | else |
| 1297 | count += zone_page_state(zone, NR_FREE_PAGES); | 1313 | count += zone_page_state(zone, NR_FREE_PAGES); |
| 1298 | } | 1314 | } |
| 1315 | avail_normal = count; | ||
| 1299 | count += highmem; | 1316 | count += highmem; |
| 1300 | count -= totalreserve_pages; | 1317 | count -= totalreserve_pages; |
| 1301 | 1318 | ||
| @@ -1310,12 +1327,21 @@ int hibernate_preallocate_memory(void) | |||
| 1310 | */ | 1327 | */ |
| 1311 | if (size >= saveable) { | 1328 | if (size >= saveable) { |
| 1312 | pages = preallocate_image_highmem(save_highmem); | 1329 | pages = preallocate_image_highmem(save_highmem); |
| 1313 | pages += preallocate_image_memory(saveable - pages); | 1330 | pages += preallocate_image_memory(saveable - pages, avail_normal); |
| 1314 | goto out; | 1331 | goto out; |
| 1315 | } | 1332 | } |
| 1316 | 1333 | ||
| 1317 | /* Estimate the minimum size of the image. */ | 1334 | /* Estimate the minimum size of the image. */ |
| 1318 | pages = minimum_image_size(saveable); | 1335 | pages = minimum_image_size(saveable); |
| 1336 | /* | ||
| 1337 | * To avoid excessive pressure on the normal zone, leave room in it to | ||
| 1338 | * accommodate an image of the minimum size (unless it's already too | ||
| 1339 | * small, in which case don't preallocate pages from it at all). | ||
| 1340 | */ | ||
| 1341 | if (avail_normal > pages) | ||
| 1342 | avail_normal -= pages; | ||
| 1343 | else | ||
| 1344 | avail_normal = 0; | ||
| 1319 | if (size < pages) | 1345 | if (size < pages) |
| 1320 | size = min_t(unsigned long, pages, max_size); | 1346 | size = min_t(unsigned long, pages, max_size); |
| 1321 | 1347 | ||
| @@ -1336,16 +1362,34 @@ int hibernate_preallocate_memory(void) | |||
| 1336 | */ | 1362 | */ |
| 1337 | pages_highmem = preallocate_image_highmem(highmem / 2); | 1363 | pages_highmem = preallocate_image_highmem(highmem / 2); |
| 1338 | alloc = (count - max_size) - pages_highmem; | 1364 | alloc = (count - max_size) - pages_highmem; |
| 1339 | pages = preallocate_image_memory(alloc); | 1365 | pages = preallocate_image_memory(alloc, avail_normal); |
| 1340 | if (pages < alloc) | 1366 | if (pages < alloc) { |
| 1341 | goto err_out; | 1367 | /* We have exhausted non-highmem pages, try highmem. */ |
| 1342 | size = max_size - size; | 1368 | alloc -= pages; |
| 1343 | alloc = size; | 1369 | pages += pages_highmem; |
| 1344 | size = preallocate_highmem_fraction(size, highmem, count); | 1370 | pages_highmem = preallocate_image_highmem(alloc); |
| 1345 | pages_highmem += size; | 1371 | if (pages_highmem < alloc) |
| 1346 | alloc -= size; | 1372 | goto err_out; |
| 1347 | pages += preallocate_image_memory(alloc); | 1373 | pages += pages_highmem; |
| 1348 | pages += pages_highmem; | 1374 | /* |
| 1375 | * size is the desired number of saveable pages to leave in | ||
| 1376 | * memory, so try to preallocate (all memory - size) pages. | ||
| 1377 | */ | ||
| 1378 | alloc = (count - pages) - size; | ||
| 1379 | pages += preallocate_image_highmem(alloc); | ||
| 1380 | } else { | ||
| 1381 | /* | ||
| 1382 | * There are approximately max_size saveable pages at this point | ||
| 1383 | * and we want to reduce this number down to size. | ||
| 1384 | */ | ||
| 1385 | alloc = max_size - size; | ||
| 1386 | size = preallocate_highmem_fraction(alloc, highmem, count); | ||
| 1387 | pages_highmem += size; | ||
| 1388 | alloc -= size; | ||
| 1389 | size = preallocate_image_memory(alloc, avail_normal); | ||
| 1390 | pages_highmem += preallocate_image_highmem(alloc - size); | ||
| 1391 | pages += pages_highmem + size; | ||
| 1392 | } | ||
| 1349 | 1393 | ||
| 1350 | /* | 1394 | /* |
| 1351 | * We only need as many page frames for the image as there are saveable | 1395 | * We only need as many page frames for the image as there are saveable |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 5d0059eed3e4..e6a5bdf61a37 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
| @@ -136,10 +136,10 @@ sector_t alloc_swapdev_block(int swap) | |||
| 136 | { | 136 | { |
| 137 | unsigned long offset; | 137 | unsigned long offset; |
| 138 | 138 | ||
| 139 | offset = swp_offset(get_swap_for_hibernation(swap)); | 139 | offset = swp_offset(get_swap_page_of_type(swap)); |
| 140 | if (offset) { | 140 | if (offset) { |
| 141 | if (swsusp_extents_insert(offset)) | 141 | if (swsusp_extents_insert(offset)) |
| 142 | swap_free_for_hibernation(swp_entry(swap, offset)); | 142 | swap_free(swp_entry(swap, offset)); |
| 143 | else | 143 | else |
| 144 | return swapdev_block(swap, offset); | 144 | return swapdev_block(swap, offset); |
| 145 | } | 145 | } |
| @@ -163,7 +163,7 @@ void free_all_swap_pages(int swap) | |||
| 163 | ext = container_of(node, struct swsusp_extent, node); | 163 | ext = container_of(node, struct swsusp_extent, node); |
| 164 | rb_erase(node, &swsusp_extents); | 164 | rb_erase(node, &swsusp_extents); |
| 165 | for (offset = ext->start; offset <= ext->end; offset++) | 165 | for (offset = ext->start; offset <= ext->end; offset++) |
| 166 | swap_free_for_hibernation(swp_entry(swap, offset)); | 166 | swap_free(swp_entry(swap, offset)); |
| 167 | 167 | ||
| 168 | kfree(ext); | 168 | kfree(ext); |
| 169 | } | 169 | } |
diff --git a/kernel/sched.c b/kernel/sched.c index 09b574e7f4df..dc85ceb90832 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -1294,6 +1294,10 @@ static void resched_task(struct task_struct *p) | |||
| 1294 | static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) | 1294 | static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) |
| 1295 | { | 1295 | { |
| 1296 | } | 1296 | } |
| 1297 | |||
| 1298 | static void sched_avg_update(struct rq *rq) | ||
| 1299 | { | ||
| 1300 | } | ||
| 1297 | #endif /* CONFIG_SMP */ | 1301 | #endif /* CONFIG_SMP */ |
| 1298 | 1302 | ||
| 1299 | #if BITS_PER_LONG == 32 | 1303 | #if BITS_PER_LONG == 32 |
| @@ -3182,6 +3186,8 @@ static void update_cpu_load(struct rq *this_rq) | |||
| 3182 | 3186 | ||
| 3183 | this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; | 3187 | this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; |
| 3184 | } | 3188 | } |
| 3189 | |||
| 3190 | sched_avg_update(this_rq); | ||
| 3185 | } | 3191 | } |
| 3186 | 3192 | ||
| 3187 | static void update_cpu_load_active(struct rq *this_rq) | 3193 | static void update_cpu_load_active(struct rq *this_rq) |
| @@ -3507,9 +3513,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | |||
| 3507 | rtime = nsecs_to_cputime(p->se.sum_exec_runtime); | 3513 | rtime = nsecs_to_cputime(p->se.sum_exec_runtime); |
| 3508 | 3514 | ||
| 3509 | if (total) { | 3515 | if (total) { |
| 3510 | u64 temp; | 3516 | u64 temp = rtime; |
| 3511 | 3517 | ||
| 3512 | temp = (u64)(rtime * utime); | 3518 | temp *= utime; |
| 3513 | do_div(temp, total); | 3519 | do_div(temp, total); |
| 3514 | utime = (cputime_t)temp; | 3520 | utime = (cputime_t)temp; |
| 3515 | } else | 3521 | } else |
| @@ -3540,9 +3546,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | |||
| 3540 | rtime = nsecs_to_cputime(cputime.sum_exec_runtime); | 3546 | rtime = nsecs_to_cputime(cputime.sum_exec_runtime); |
| 3541 | 3547 | ||
| 3542 | if (total) { | 3548 | if (total) { |
| 3543 | u64 temp; | 3549 | u64 temp = rtime; |
| 3544 | 3550 | ||
| 3545 | temp = (u64)(rtime * cputime.utime); | 3551 | temp *= cputime.utime; |
| 3546 | do_div(temp, total); | 3552 | do_div(temp, total); |
| 3547 | utime = (cputime_t)temp; | 3553 | utime = (cputime_t)temp; |
| 3548 | } else | 3554 | } else |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index ab661ebc4895..db3f674ca49d 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -54,13 +54,13 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling | |||
| 54 | * Minimal preemption granularity for CPU-bound tasks: | 54 | * Minimal preemption granularity for CPU-bound tasks: |
| 55 | * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds) | 55 | * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds) |
| 56 | */ | 56 | */ |
| 57 | unsigned int sysctl_sched_min_granularity = 2000000ULL; | 57 | unsigned int sysctl_sched_min_granularity = 750000ULL; |
| 58 | unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL; | 58 | unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; |
| 59 | 59 | ||
| 60 | /* | 60 | /* |
| 61 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity | 61 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity |
| 62 | */ | 62 | */ |
| 63 | static unsigned int sched_nr_latency = 3; | 63 | static unsigned int sched_nr_latency = 8; |
| 64 | 64 | ||
| 65 | /* | 65 | /* |
| 66 | * After fork, child runs first. If set to 0 (default) then | 66 | * After fork, child runs first. If set to 0 (default) then |
| @@ -1313,7 +1313,7 @@ static struct sched_group * | |||
| 1313 | find_idlest_group(struct sched_domain *sd, struct task_struct *p, | 1313 | find_idlest_group(struct sched_domain *sd, struct task_struct *p, |
| 1314 | int this_cpu, int load_idx) | 1314 | int this_cpu, int load_idx) |
| 1315 | { | 1315 | { |
| 1316 | struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups; | 1316 | struct sched_group *idlest = NULL, *group = sd->groups; |
| 1317 | unsigned long min_load = ULONG_MAX, this_load = 0; | 1317 | unsigned long min_load = ULONG_MAX, this_load = 0; |
| 1318 | int imbalance = 100 + (sd->imbalance_pct-100)/2; | 1318 | int imbalance = 100 + (sd->imbalance_pct-100)/2; |
| 1319 | 1319 | ||
| @@ -1348,7 +1348,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, | |||
| 1348 | 1348 | ||
| 1349 | if (local_group) { | 1349 | if (local_group) { |
| 1350 | this_load = avg_load; | 1350 | this_load = avg_load; |
| 1351 | this = group; | ||
| 1352 | } else if (avg_load < min_load) { | 1351 | } else if (avg_load < min_load) { |
| 1353 | min_load = avg_load; | 1352 | min_load = avg_load; |
| 1354 | idlest = group; | 1353 | idlest = group; |
| @@ -2268,8 +2267,6 @@ unsigned long scale_rt_power(int cpu) | |||
| 2268 | struct rq *rq = cpu_rq(cpu); | 2267 | struct rq *rq = cpu_rq(cpu); |
| 2269 | u64 total, available; | 2268 | u64 total, available; |
| 2270 | 2269 | ||
| 2271 | sched_avg_update(rq); | ||
| 2272 | |||
| 2273 | total = sched_avg_period() + (rq->clock - rq->age_stamp); | 2270 | total = sched_avg_period() + (rq->clock - rq->age_stamp); |
| 2274 | available = total - rq->rt_avg; | 2271 | available = total - rq->rt_avg; |
| 2275 | 2272 | ||
| @@ -3633,7 +3630,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu) | |||
| 3633 | if (time_before(now, nohz.next_balance)) | 3630 | if (time_before(now, nohz.next_balance)) |
| 3634 | return 0; | 3631 | return 0; |
| 3635 | 3632 | ||
| 3636 | if (!rq->nr_running) | 3633 | if (rq->idle_at_tick) |
| 3637 | return 0; | 3634 | return 0; |
| 3638 | 3635 | ||
| 3639 | first_pick_cpu = atomic_read(&nohz.first_pick_cpu); | 3636 | first_pick_cpu = atomic_read(&nohz.first_pick_cpu); |
diff --git a/kernel/sys.c b/kernel/sys.c index e9ad44489828..7f5a0cd296a9 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -931,6 +931,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) | |||
| 931 | pgid = pid; | 931 | pgid = pid; |
| 932 | if (pgid < 0) | 932 | if (pgid < 0) |
| 933 | return -EINVAL; | 933 | return -EINVAL; |
| 934 | rcu_read_lock(); | ||
| 934 | 935 | ||
| 935 | /* From this point forward we keep holding onto the tasklist lock | 936 | /* From this point forward we keep holding onto the tasklist lock |
| 936 | * so that our parent does not change from under us. -DaveM | 937 | * so that our parent does not change from under us. -DaveM |
| @@ -984,6 +985,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) | |||
| 984 | out: | 985 | out: |
| 985 | /* All paths lead to here, thus we are safe. -DaveM */ | 986 | /* All paths lead to here, thus we are safe. -DaveM */ |
| 986 | write_unlock_irq(&tasklist_lock); | 987 | write_unlock_irq(&tasklist_lock); |
| 988 | rcu_read_unlock(); | ||
| 987 | return err; | 989 | return err; |
| 988 | } | 990 | } |
| 989 | 991 | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index ca38e8e3e907..f88552c6d227 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -1713,10 +1713,7 @@ static __init int sysctl_init(void) | |||
| 1713 | { | 1713 | { |
| 1714 | sysctl_set_parent(NULL, root_table); | 1714 | sysctl_set_parent(NULL, root_table); |
| 1715 | #ifdef CONFIG_SYSCTL_SYSCALL_CHECK | 1715 | #ifdef CONFIG_SYSCTL_SYSCALL_CHECK |
| 1716 | { | 1716 | sysctl_check_table(current->nsproxy, root_table); |
| 1717 | int err; | ||
| 1718 | err = sysctl_check_table(current->nsproxy, root_table); | ||
| 1719 | } | ||
| 1720 | #endif | 1717 | #endif |
| 1721 | return 0; | 1718 | return 0; |
| 1722 | } | 1719 | } |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 0d88ce9b9fb8..fa7ece649fe1 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -381,12 +381,19 @@ static int function_stat_show(struct seq_file *m, void *v) | |||
| 381 | { | 381 | { |
| 382 | struct ftrace_profile *rec = v; | 382 | struct ftrace_profile *rec = v; |
| 383 | char str[KSYM_SYMBOL_LEN]; | 383 | char str[KSYM_SYMBOL_LEN]; |
| 384 | int ret = 0; | ||
| 384 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 385 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 385 | static DEFINE_MUTEX(mutex); | ||
| 386 | static struct trace_seq s; | 386 | static struct trace_seq s; |
| 387 | unsigned long long avg; | 387 | unsigned long long avg; |
| 388 | unsigned long long stddev; | 388 | unsigned long long stddev; |
| 389 | #endif | 389 | #endif |
| 390 | mutex_lock(&ftrace_profile_lock); | ||
| 391 | |||
| 392 | /* we raced with function_profile_reset() */ | ||
| 393 | if (unlikely(rec->counter == 0)) { | ||
| 394 | ret = -EBUSY; | ||
| 395 | goto out; | ||
| 396 | } | ||
| 390 | 397 | ||
| 391 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | 398 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); |
| 392 | seq_printf(m, " %-30.30s %10lu", str, rec->counter); | 399 | seq_printf(m, " %-30.30s %10lu", str, rec->counter); |
| @@ -408,7 +415,6 @@ static int function_stat_show(struct seq_file *m, void *v) | |||
| 408 | do_div(stddev, (rec->counter - 1) * 1000); | 415 | do_div(stddev, (rec->counter - 1) * 1000); |
| 409 | } | 416 | } |
| 410 | 417 | ||
| 411 | mutex_lock(&mutex); | ||
| 412 | trace_seq_init(&s); | 418 | trace_seq_init(&s); |
| 413 | trace_print_graph_duration(rec->time, &s); | 419 | trace_print_graph_duration(rec->time, &s); |
| 414 | trace_seq_puts(&s, " "); | 420 | trace_seq_puts(&s, " "); |
| @@ -416,11 +422,12 @@ static int function_stat_show(struct seq_file *m, void *v) | |||
| 416 | trace_seq_puts(&s, " "); | 422 | trace_seq_puts(&s, " "); |
| 417 | trace_print_graph_duration(stddev, &s); | 423 | trace_print_graph_duration(stddev, &s); |
| 418 | trace_print_seq(m, &s); | 424 | trace_print_seq(m, &s); |
| 419 | mutex_unlock(&mutex); | ||
| 420 | #endif | 425 | #endif |
| 421 | seq_putc(m, '\n'); | 426 | seq_putc(m, '\n'); |
| 427 | out: | ||
| 428 | mutex_unlock(&ftrace_profile_lock); | ||
| 422 | 429 | ||
| 423 | return 0; | 430 | return ret; |
| 424 | } | 431 | } |
| 425 | 432 | ||
| 426 | static void ftrace_profile_reset(struct ftrace_profile_stat *stat) | 433 | static void ftrace_profile_reset(struct ftrace_profile_stat *stat) |
| @@ -1503,6 +1510,8 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
| 1503 | if (*pos > 0) | 1510 | if (*pos > 0) |
| 1504 | return t_hash_start(m, pos); | 1511 | return t_hash_start(m, pos); |
| 1505 | iter->flags |= FTRACE_ITER_PRINTALL; | 1512 | iter->flags |= FTRACE_ITER_PRINTALL; |
| 1513 | /* reset in case of seek/pread */ | ||
| 1514 | iter->flags &= ~FTRACE_ITER_HASH; | ||
| 1506 | return iter; | 1515 | return iter; |
| 1507 | } | 1516 | } |
| 1508 | 1517 | ||
| @@ -2409,7 +2418,7 @@ static const struct file_operations ftrace_filter_fops = { | |||
| 2409 | .open = ftrace_filter_open, | 2418 | .open = ftrace_filter_open, |
| 2410 | .read = seq_read, | 2419 | .read = seq_read, |
| 2411 | .write = ftrace_filter_write, | 2420 | .write = ftrace_filter_write, |
| 2412 | .llseek = ftrace_regex_lseek, | 2421 | .llseek = no_llseek, |
| 2413 | .release = ftrace_filter_release, | 2422 | .release = ftrace_filter_release, |
| 2414 | }; | 2423 | }; |
| 2415 | 2424 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 19cccc3c3028..492197e2f86c 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -2985,13 +2985,11 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 2985 | 2985 | ||
| 2986 | static void rb_advance_iter(struct ring_buffer_iter *iter) | 2986 | static void rb_advance_iter(struct ring_buffer_iter *iter) |
| 2987 | { | 2987 | { |
| 2988 | struct ring_buffer *buffer; | ||
| 2989 | struct ring_buffer_per_cpu *cpu_buffer; | 2988 | struct ring_buffer_per_cpu *cpu_buffer; |
| 2990 | struct ring_buffer_event *event; | 2989 | struct ring_buffer_event *event; |
| 2991 | unsigned length; | 2990 | unsigned length; |
| 2992 | 2991 | ||
| 2993 | cpu_buffer = iter->cpu_buffer; | 2992 | cpu_buffer = iter->cpu_buffer; |
| 2994 | buffer = cpu_buffer->buffer; | ||
| 2995 | 2993 | ||
| 2996 | /* | 2994 | /* |
| 2997 | * Check if we are at the end of the buffer. | 2995 | * Check if we are at the end of the buffer. |
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 000e6e85b445..31cc4cb0dbf2 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
| @@ -91,6 +91,8 @@ int perf_trace_init(struct perf_event *p_event) | |||
| 91 | tp_event->class && tp_event->class->reg && | 91 | tp_event->class && tp_event->class->reg && |
| 92 | try_module_get(tp_event->mod)) { | 92 | try_module_get(tp_event->mod)) { |
| 93 | ret = perf_trace_event_init(tp_event, p_event); | 93 | ret = perf_trace_event_init(tp_event, p_event); |
| 94 | if (ret) | ||
| 95 | module_put(tp_event->mod); | ||
| 94 | break; | 96 | break; |
| 95 | } | 97 | } |
| 96 | } | 98 | } |
| @@ -146,6 +148,7 @@ void perf_trace_destroy(struct perf_event *p_event) | |||
| 146 | } | 148 | } |
| 147 | } | 149 | } |
| 148 | out: | 150 | out: |
| 151 | module_put(tp_event->mod); | ||
| 149 | mutex_unlock(&event_mutex); | 152 | mutex_unlock(&event_mutex); |
| 150 | } | 153 | } |
| 151 | 154 | ||
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 8b27c9849b42..544301d29dee 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -514,8 +514,8 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs); | |||
| 514 | static int kretprobe_dispatcher(struct kretprobe_instance *ri, | 514 | static int kretprobe_dispatcher(struct kretprobe_instance *ri, |
| 515 | struct pt_regs *regs); | 515 | struct pt_regs *regs); |
| 516 | 516 | ||
| 517 | /* Check the name is good for event/group */ | 517 | /* Check the name is good for event/group/fields */ |
| 518 | static int check_event_name(const char *name) | 518 | static int is_good_name(const char *name) |
| 519 | { | 519 | { |
| 520 | if (!isalpha(*name) && *name != '_') | 520 | if (!isalpha(*name) && *name != '_') |
| 521 | return 0; | 521 | return 0; |
| @@ -557,7 +557,7 @@ static struct trace_probe *alloc_trace_probe(const char *group, | |||
| 557 | else | 557 | else |
| 558 | tp->rp.kp.pre_handler = kprobe_dispatcher; | 558 | tp->rp.kp.pre_handler = kprobe_dispatcher; |
| 559 | 559 | ||
| 560 | if (!event || !check_event_name(event)) { | 560 | if (!event || !is_good_name(event)) { |
| 561 | ret = -EINVAL; | 561 | ret = -EINVAL; |
| 562 | goto error; | 562 | goto error; |
| 563 | } | 563 | } |
| @@ -567,7 +567,7 @@ static struct trace_probe *alloc_trace_probe(const char *group, | |||
| 567 | if (!tp->call.name) | 567 | if (!tp->call.name) |
| 568 | goto error; | 568 | goto error; |
| 569 | 569 | ||
| 570 | if (!group || !check_event_name(group)) { | 570 | if (!group || !is_good_name(group)) { |
| 571 | ret = -EINVAL; | 571 | ret = -EINVAL; |
| 572 | goto error; | 572 | goto error; |
| 573 | } | 573 | } |
| @@ -883,7 +883,7 @@ static int create_trace_probe(int argc, char **argv) | |||
| 883 | int i, ret = 0; | 883 | int i, ret = 0; |
| 884 | int is_return = 0, is_delete = 0; | 884 | int is_return = 0, is_delete = 0; |
| 885 | char *symbol = NULL, *event = NULL, *group = NULL; | 885 | char *symbol = NULL, *event = NULL, *group = NULL; |
| 886 | char *arg, *tmp; | 886 | char *arg; |
| 887 | unsigned long offset = 0; | 887 | unsigned long offset = 0; |
| 888 | void *addr = NULL; | 888 | void *addr = NULL; |
| 889 | char buf[MAX_EVENT_NAME_LEN]; | 889 | char buf[MAX_EVENT_NAME_LEN]; |
| @@ -992,26 +992,36 @@ static int create_trace_probe(int argc, char **argv) | |||
| 992 | /* parse arguments */ | 992 | /* parse arguments */ |
| 993 | ret = 0; | 993 | ret = 0; |
| 994 | for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { | 994 | for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { |
| 995 | /* Increment count for freeing args in error case */ | ||
| 996 | tp->nr_args++; | ||
| 997 | |||
| 995 | /* Parse argument name */ | 998 | /* Parse argument name */ |
| 996 | arg = strchr(argv[i], '='); | 999 | arg = strchr(argv[i], '='); |
| 997 | if (arg) | 1000 | if (arg) { |
| 998 | *arg++ = '\0'; | 1001 | *arg++ = '\0'; |
| 999 | else | 1002 | tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); |
| 1003 | } else { | ||
| 1000 | arg = argv[i]; | 1004 | arg = argv[i]; |
| 1005 | /* If argument name is omitted, set "argN" */ | ||
| 1006 | snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); | ||
| 1007 | tp->args[i].name = kstrdup(buf, GFP_KERNEL); | ||
| 1008 | } | ||
| 1001 | 1009 | ||
| 1002 | tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); | ||
| 1003 | if (!tp->args[i].name) { | 1010 | if (!tp->args[i].name) { |
| 1004 | pr_info("Failed to allocate argument%d name '%s'.\n", | 1011 | pr_info("Failed to allocate argument[%d] name.\n", i); |
| 1005 | i, argv[i]); | ||
| 1006 | ret = -ENOMEM; | 1012 | ret = -ENOMEM; |
| 1007 | goto error; | 1013 | goto error; |
| 1008 | } | 1014 | } |
| 1009 | tmp = strchr(tp->args[i].name, ':'); | 1015 | |
| 1010 | if (tmp) | 1016 | if (!is_good_name(tp->args[i].name)) { |
| 1011 | *tmp = '_'; /* convert : to _ */ | 1017 | pr_info("Invalid argument[%d] name: %s\n", |
| 1018 | i, tp->args[i].name); | ||
| 1019 | ret = -EINVAL; | ||
| 1020 | goto error; | ||
| 1021 | } | ||
| 1012 | 1022 | ||
| 1013 | if (conflict_field_name(tp->args[i].name, tp->args, i)) { | 1023 | if (conflict_field_name(tp->args[i].name, tp->args, i)) { |
| 1014 | pr_info("Argument%d name '%s' conflicts with " | 1024 | pr_info("Argument[%d] name '%s' conflicts with " |
| 1015 | "another field.\n", i, argv[i]); | 1025 | "another field.\n", i, argv[i]); |
| 1016 | ret = -EINVAL; | 1026 | ret = -EINVAL; |
| 1017 | goto error; | 1027 | goto error; |
| @@ -1020,12 +1030,9 @@ static int create_trace_probe(int argc, char **argv) | |||
| 1020 | /* Parse fetch argument */ | 1030 | /* Parse fetch argument */ |
| 1021 | ret = parse_probe_arg(arg, tp, &tp->args[i], is_return); | 1031 | ret = parse_probe_arg(arg, tp, &tp->args[i], is_return); |
| 1022 | if (ret) { | 1032 | if (ret) { |
| 1023 | pr_info("Parse error at argument%d. (%d)\n", i, ret); | 1033 | pr_info("Parse error at argument[%d]. (%d)\n", i, ret); |
| 1024 | kfree(tp->args[i].name); | ||
| 1025 | goto error; | 1034 | goto error; |
| 1026 | } | 1035 | } |
| 1027 | |||
| 1028 | tp->nr_args++; | ||
| 1029 | } | 1036 | } |
| 1030 | 1037 | ||
| 1031 | ret = register_trace_probe(tp); | 1038 | ret = register_trace_probe(tp); |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 0d53c8e853b1..7f9c3c52ecc1 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
| @@ -122,7 +122,7 @@ static void __touch_watchdog(void) | |||
| 122 | 122 | ||
| 123 | void touch_softlockup_watchdog(void) | 123 | void touch_softlockup_watchdog(void) |
| 124 | { | 124 | { |
| 125 | __get_cpu_var(watchdog_touch_ts) = 0; | 125 | __raw_get_cpu_var(watchdog_touch_ts) = 0; |
| 126 | } | 126 | } |
| 127 | EXPORT_SYMBOL(touch_softlockup_watchdog); | 127 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
| 128 | 128 | ||
| @@ -142,7 +142,14 @@ void touch_all_softlockup_watchdogs(void) | |||
| 142 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 142 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
| 143 | void touch_nmi_watchdog(void) | 143 | void touch_nmi_watchdog(void) |
| 144 | { | 144 | { |
| 145 | __get_cpu_var(watchdog_nmi_touch) = true; | 145 | if (watchdog_enabled) { |
| 146 | unsigned cpu; | ||
| 147 | |||
| 148 | for_each_present_cpu(cpu) { | ||
| 149 | if (per_cpu(watchdog_nmi_touch, cpu) != true) | ||
| 150 | per_cpu(watchdog_nmi_touch, cpu) = true; | ||
| 151 | } | ||
| 152 | } | ||
| 146 | touch_softlockup_watchdog(); | 153 | touch_softlockup_watchdog(); |
| 147 | } | 154 | } |
| 148 | EXPORT_SYMBOL(touch_nmi_watchdog); | 155 | EXPORT_SYMBOL(touch_nmi_watchdog); |
| @@ -433,6 +440,9 @@ static int watchdog_enable(int cpu) | |||
| 433 | wake_up_process(p); | 440 | wake_up_process(p); |
| 434 | } | 441 | } |
| 435 | 442 | ||
| 443 | /* if any cpu succeeds, watchdog is considered enabled for the system */ | ||
| 444 | watchdog_enabled = 1; | ||
| 445 | |||
| 436 | return 0; | 446 | return 0; |
| 437 | } | 447 | } |
| 438 | 448 | ||
| @@ -455,9 +465,6 @@ static void watchdog_disable(int cpu) | |||
| 455 | per_cpu(softlockup_watchdog, cpu) = NULL; | 465 | per_cpu(softlockup_watchdog, cpu) = NULL; |
| 456 | kthread_stop(p); | 466 | kthread_stop(p); |
| 457 | } | 467 | } |
| 458 | |||
| 459 | /* if any cpu succeeds, watchdog is considered enabled for the system */ | ||
| 460 | watchdog_enabled = 1; | ||
| 461 | } | 468 | } |
| 462 | 469 | ||
| 463 | static void watchdog_enable_all_cpus(void) | 470 | static void watchdog_enable_all_cpus(void) |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 727f24e563ae..f77afd939229 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -1,19 +1,26 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * linux/kernel/workqueue.c | 2 | * kernel/workqueue.c - generic async execution with shared worker pool |
| 3 | * | 3 | * |
| 4 | * Generic mechanism for defining kernel helper threads for running | 4 | * Copyright (C) 2002 Ingo Molnar |
| 5 | * arbitrary tasks in process context. | ||
| 6 | * | 5 | * |
| 7 | * Started by Ingo Molnar, Copyright (C) 2002 | 6 | * Derived from the taskqueue/keventd code by: |
| 7 | * David Woodhouse <dwmw2@infradead.org> | ||
| 8 | * Andrew Morton | ||
| 9 | * Kai Petzke <wpp@marie.physik.tu-berlin.de> | ||
| 10 | * Theodore Ts'o <tytso@mit.edu> | ||
| 8 | * | 11 | * |
| 9 | * Derived from the taskqueue/keventd code by: | 12 | * Made to use alloc_percpu by Christoph Lameter. |
| 10 | * | 13 | * |
| 11 | * David Woodhouse <dwmw2@infradead.org> | 14 | * Copyright (C) 2010 SUSE Linux Products GmbH |
| 12 | * Andrew Morton | 15 | * Copyright (C) 2010 Tejun Heo <tj@kernel.org> |
| 13 | * Kai Petzke <wpp@marie.physik.tu-berlin.de> | ||
| 14 | * Theodore Ts'o <tytso@mit.edu> | ||
| 15 | * | 16 | * |
| 16 | * Made to use alloc_percpu by Christoph Lameter. | 17 | * This is the generic async execution mechanism. Work items as are |
| 18 | * executed in process context. The worker pool is shared and | ||
| 19 | * automatically managed. There is one worker pool for each CPU and | ||
| 20 | * one extra for works which are better served by workers which are | ||
| 21 | * not bound to any specific CPU. | ||
| 22 | * | ||
| 23 | * Please read Documentation/workqueue.txt for details. | ||
| 17 | */ | 24 | */ |
| 18 | 25 | ||
| 19 | #include <linux/module.h> | 26 | #include <linux/module.h> |
