diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2010-10-13 01:18:36 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2010-10-13 01:18:36 -0400 |
commit | 4783f393de3077211c14675a0e57c8a02e9190b0 (patch) | |
tree | 6c37d8664eb072fd026db3706481d771da4495ca /kernel | |
parent | 9f5f9ffe50e90ed73040d2100db8bfc341cee352 (diff) | |
parent | 5b8544c38e6fde6968645afd46ff681492192b86 (diff) |
Merge remote branch 'kumar/merge' into next
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup.c | 13 | ||||
-rw-r--r-- | kernel/compat.c | 21 | ||||
-rw-r--r-- | kernel/debug/kdb/kdb_bp.c | 2 | ||||
-rw-r--r-- | kernel/fork.c | 2 | ||||
-rw-r--r-- | kernel/gcov/fs.c | 244 | ||||
-rw-r--r-- | kernel/groups.c | 5 | ||||
-rw-r--r-- | kernel/hrtimer.c | 3 | ||||
-rw-r--r-- | kernel/hw_breakpoint.c | 3 | ||||
-rw-r--r-- | kernel/kfifo.c | 2 | ||||
-rw-r--r-- | kernel/module.c | 4 | ||||
-rw-r--r-- | kernel/mutex.c | 23 | ||||
-rw-r--r-- | kernel/perf_event.c | 32 | ||||
-rw-r--r-- | kernel/pm_qos_params.c | 4 | ||||
-rw-r--r-- | kernel/power/hibernate.c | 1 | ||||
-rw-r--r-- | kernel/power/snapshot.c | 86 | ||||
-rw-r--r-- | kernel/power/swap.c | 6 | ||||
-rw-r--r-- | kernel/sched.c | 14 | ||||
-rw-r--r-- | kernel/sched_fair.c | 13 | ||||
-rw-r--r-- | kernel/signal.c | 8 | ||||
-rw-r--r-- | kernel/smp.c | 17 | ||||
-rw-r--r-- | kernel/sys.c | 2 | ||||
-rw-r--r-- | kernel/sysctl.c | 7 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 19 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_event_perf.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 43 | ||||
-rw-r--r-- | kernel/watchdog.c | 17 | ||||
-rw-r--r-- | kernel/workqueue.c | 80 |
28 files changed, 471 insertions, 205 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 192f88c5b0f..c9483d8f614 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -1791,19 +1791,20 @@ out: | |||
1791 | } | 1791 | } |
1792 | 1792 | ||
1793 | /** | 1793 | /** |
1794 | * cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup | 1794 | * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from' |
1795 | * @from: attach to all cgroups of a given task | ||
1795 | * @tsk: the task to be attached | 1796 | * @tsk: the task to be attached |
1796 | */ | 1797 | */ |
1797 | int cgroup_attach_task_current_cg(struct task_struct *tsk) | 1798 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) |
1798 | { | 1799 | { |
1799 | struct cgroupfs_root *root; | 1800 | struct cgroupfs_root *root; |
1800 | struct cgroup *cur_cg; | ||
1801 | int retval = 0; | 1801 | int retval = 0; |
1802 | 1802 | ||
1803 | cgroup_lock(); | 1803 | cgroup_lock(); |
1804 | for_each_active_root(root) { | 1804 | for_each_active_root(root) { |
1805 | cur_cg = task_cgroup_from_root(current, root); | 1805 | struct cgroup *from_cg = task_cgroup_from_root(from, root); |
1806 | retval = cgroup_attach_task(cur_cg, tsk); | 1806 | |
1807 | retval = cgroup_attach_task(from_cg, tsk); | ||
1807 | if (retval) | 1808 | if (retval) |
1808 | break; | 1809 | break; |
1809 | } | 1810 | } |
@@ -1811,7 +1812,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk) | |||
1811 | 1812 | ||
1812 | return retval; | 1813 | return retval; |
1813 | } | 1814 | } |
1814 | EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg); | 1815 | EXPORT_SYMBOL_GPL(cgroup_attach_task_all); |
1815 | 1816 | ||
1816 | /* | 1817 | /* |
1817 | * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex | 1818 | * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex |
diff --git a/kernel/compat.c b/kernel/compat.c index e167efce842..c9e2ec0b34a 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
@@ -1126,3 +1126,24 @@ compat_sys_sysinfo(struct compat_sysinfo __user *info) | |||
1126 | 1126 | ||
1127 | return 0; | 1127 | return 0; |
1128 | } | 1128 | } |
1129 | |||
1130 | /* | ||
1131 | * Allocate user-space memory for the duration of a single system call, | ||
1132 | * in order to marshall parameters inside a compat thunk. | ||
1133 | */ | ||
1134 | void __user *compat_alloc_user_space(unsigned long len) | ||
1135 | { | ||
1136 | void __user *ptr; | ||
1137 | |||
1138 | /* If len would occupy more than half of the entire compat space... */ | ||
1139 | if (unlikely(len > (((compat_uptr_t)~0) >> 1))) | ||
1140 | return NULL; | ||
1141 | |||
1142 | ptr = arch_compat_alloc_user_space(len); | ||
1143 | |||
1144 | if (unlikely(!access_ok(VERIFY_WRITE, ptr, len))) | ||
1145 | return NULL; | ||
1146 | |||
1147 | return ptr; | ||
1148 | } | ||
1149 | EXPORT_SYMBOL_GPL(compat_alloc_user_space); | ||
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c index 75bd9b3ebbb..20059ef4459 100644 --- a/kernel/debug/kdb/kdb_bp.c +++ b/kernel/debug/kdb/kdb_bp.c | |||
@@ -274,7 +274,6 @@ static int kdb_bp(int argc, const char **argv) | |||
274 | int i, bpno; | 274 | int i, bpno; |
275 | kdb_bp_t *bp, *bp_check; | 275 | kdb_bp_t *bp, *bp_check; |
276 | int diag; | 276 | int diag; |
277 | int free; | ||
278 | char *symname = NULL; | 277 | char *symname = NULL; |
279 | long offset = 0ul; | 278 | long offset = 0ul; |
280 | int nextarg; | 279 | int nextarg; |
@@ -305,7 +304,6 @@ static int kdb_bp(int argc, const char **argv) | |||
305 | /* | 304 | /* |
306 | * Find an empty bp structure to allocate | 305 | * Find an empty bp structure to allocate |
307 | */ | 306 | */ |
308 | free = KDB_MAXBPT; | ||
309 | for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) { | 307 | for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) { |
310 | if (bp->bp_free) | 308 | if (bp->bp_free) |
311 | break; | 309 | break; |
diff --git a/kernel/fork.c b/kernel/fork.c index b7e9d60a675..c445f8cc408 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -356,10 +356,10 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
356 | if (IS_ERR(pol)) | 356 | if (IS_ERR(pol)) |
357 | goto fail_nomem_policy; | 357 | goto fail_nomem_policy; |
358 | vma_set_policy(tmp, pol); | 358 | vma_set_policy(tmp, pol); |
359 | tmp->vm_mm = mm; | ||
359 | if (anon_vma_fork(tmp, mpnt)) | 360 | if (anon_vma_fork(tmp, mpnt)) |
360 | goto fail_nomem_anon_vma_fork; | 361 | goto fail_nomem_anon_vma_fork; |
361 | tmp->vm_flags &= ~VM_LOCKED; | 362 | tmp->vm_flags &= ~VM_LOCKED; |
362 | tmp->vm_mm = mm; | ||
363 | tmp->vm_next = tmp->vm_prev = NULL; | 363 | tmp->vm_next = tmp->vm_prev = NULL; |
364 | file = tmp->vm_file; | 364 | file = tmp->vm_file; |
365 | if (file) { | 365 | if (file) { |
diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c index ef3c3f88a7a..f83972b1656 100644 --- a/kernel/gcov/fs.c +++ b/kernel/gcov/fs.c | |||
@@ -33,10 +33,11 @@ | |||
33 | * @children: child nodes | 33 | * @children: child nodes |
34 | * @all: list head for list of all nodes | 34 | * @all: list head for list of all nodes |
35 | * @parent: parent node | 35 | * @parent: parent node |
36 | * @info: associated profiling data structure if not a directory | 36 | * @loaded_info: array of pointers to profiling data sets for loaded object |
37 | * @ghost: when an object file containing profiling data is unloaded we keep a | 37 | * files. |
38 | * copy of the profiling data here to allow collecting coverage data | 38 | * @num_loaded: number of profiling data sets for loaded object files. |
39 | * for cleanup code. Such a node is called a "ghost". | 39 | * @unloaded_info: accumulated copy of profiling data sets for unloaded |
40 | * object files. Used only when gcov_persist=1. | ||
40 | * @dentry: main debugfs entry, either a directory or data file | 41 | * @dentry: main debugfs entry, either a directory or data file |
41 | * @links: associated symbolic links | 42 | * @links: associated symbolic links |
42 | * @name: data file basename | 43 | * @name: data file basename |
@@ -51,10 +52,11 @@ struct gcov_node { | |||
51 | struct list_head children; | 52 | struct list_head children; |
52 | struct list_head all; | 53 | struct list_head all; |
53 | struct gcov_node *parent; | 54 | struct gcov_node *parent; |
54 | struct gcov_info *info; | 55 | struct gcov_info **loaded_info; |
55 | struct gcov_info *ghost; | 56 | struct gcov_info *unloaded_info; |
56 | struct dentry *dentry; | 57 | struct dentry *dentry; |
57 | struct dentry **links; | 58 | struct dentry **links; |
59 | int num_loaded; | ||
58 | char name[0]; | 60 | char name[0]; |
59 | }; | 61 | }; |
60 | 62 | ||
@@ -136,16 +138,37 @@ static const struct seq_operations gcov_seq_ops = { | |||
136 | }; | 138 | }; |
137 | 139 | ||
138 | /* | 140 | /* |
139 | * Return the profiling data set for a given node. This can either be the | 141 | * Return a profiling data set associated with the given node. This is |
140 | * original profiling data structure or a duplicate (also called "ghost") | 142 | * either a data set for a loaded object file or a data set copy in case |
141 | * in case the associated object file has been unloaded. | 143 | * all associated object files have been unloaded. |
142 | */ | 144 | */ |
143 | static struct gcov_info *get_node_info(struct gcov_node *node) | 145 | static struct gcov_info *get_node_info(struct gcov_node *node) |
144 | { | 146 | { |
145 | if (node->info) | 147 | if (node->num_loaded > 0) |
146 | return node->info; | 148 | return node->loaded_info[0]; |
147 | 149 | ||
148 | return node->ghost; | 150 | return node->unloaded_info; |
151 | } | ||
152 | |||
153 | /* | ||
154 | * Return a newly allocated profiling data set which contains the sum of | ||
155 | * all profiling data associated with the given node. | ||
156 | */ | ||
157 | static struct gcov_info *get_accumulated_info(struct gcov_node *node) | ||
158 | { | ||
159 | struct gcov_info *info; | ||
160 | int i = 0; | ||
161 | |||
162 | if (node->unloaded_info) | ||
163 | info = gcov_info_dup(node->unloaded_info); | ||
164 | else | ||
165 | info = gcov_info_dup(node->loaded_info[i++]); | ||
166 | if (!info) | ||
167 | return NULL; | ||
168 | for (; i < node->num_loaded; i++) | ||
169 | gcov_info_add(info, node->loaded_info[i]); | ||
170 | |||
171 | return info; | ||
149 | } | 172 | } |
150 | 173 | ||
151 | /* | 174 | /* |
@@ -163,9 +186,10 @@ static int gcov_seq_open(struct inode *inode, struct file *file) | |||
163 | mutex_lock(&node_lock); | 186 | mutex_lock(&node_lock); |
164 | /* | 187 | /* |
165 | * Read from a profiling data copy to minimize reference tracking | 188 | * Read from a profiling data copy to minimize reference tracking |
166 | * complexity and concurrent access. | 189 | * complexity and concurrent access and to keep accumulating multiple |
190 | * profiling data sets associated with one node simple. | ||
167 | */ | 191 | */ |
168 | info = gcov_info_dup(get_node_info(node)); | 192 | info = get_accumulated_info(node); |
169 | if (!info) | 193 | if (!info) |
170 | goto out_unlock; | 194 | goto out_unlock; |
171 | iter = gcov_iter_new(info); | 195 | iter = gcov_iter_new(info); |
@@ -225,12 +249,25 @@ static struct gcov_node *get_node_by_name(const char *name) | |||
225 | return NULL; | 249 | return NULL; |
226 | } | 250 | } |
227 | 251 | ||
252 | /* | ||
253 | * Reset all profiling data associated with the specified node. | ||
254 | */ | ||
255 | static void reset_node(struct gcov_node *node) | ||
256 | { | ||
257 | int i; | ||
258 | |||
259 | if (node->unloaded_info) | ||
260 | gcov_info_reset(node->unloaded_info); | ||
261 | for (i = 0; i < node->num_loaded; i++) | ||
262 | gcov_info_reset(node->loaded_info[i]); | ||
263 | } | ||
264 | |||
228 | static void remove_node(struct gcov_node *node); | 265 | static void remove_node(struct gcov_node *node); |
229 | 266 | ||
230 | /* | 267 | /* |
231 | * write() implementation for gcov data files. Reset profiling data for the | 268 | * write() implementation for gcov data files. Reset profiling data for the |
232 | * associated file. If the object file has been unloaded (i.e. this is | 269 | * corresponding file. If all associated object files have been unloaded, |
233 | * a "ghost" node), remove the debug fs node as well. | 270 | * remove the debug fs node as well. |
234 | */ | 271 | */ |
235 | static ssize_t gcov_seq_write(struct file *file, const char __user *addr, | 272 | static ssize_t gcov_seq_write(struct file *file, const char __user *addr, |
236 | size_t len, loff_t *pos) | 273 | size_t len, loff_t *pos) |
@@ -245,10 +282,10 @@ static ssize_t gcov_seq_write(struct file *file, const char __user *addr, | |||
245 | node = get_node_by_name(info->filename); | 282 | node = get_node_by_name(info->filename); |
246 | if (node) { | 283 | if (node) { |
247 | /* Reset counts or remove node for unloaded modules. */ | 284 | /* Reset counts or remove node for unloaded modules. */ |
248 | if (node->ghost) | 285 | if (node->num_loaded == 0) |
249 | remove_node(node); | 286 | remove_node(node); |
250 | else | 287 | else |
251 | gcov_info_reset(node->info); | 288 | reset_node(node); |
252 | } | 289 | } |
253 | /* Reset counts for open file. */ | 290 | /* Reset counts for open file. */ |
254 | gcov_info_reset(info); | 291 | gcov_info_reset(info); |
@@ -378,7 +415,10 @@ static void init_node(struct gcov_node *node, struct gcov_info *info, | |||
378 | INIT_LIST_HEAD(&node->list); | 415 | INIT_LIST_HEAD(&node->list); |
379 | INIT_LIST_HEAD(&node->children); | 416 | INIT_LIST_HEAD(&node->children); |
380 | INIT_LIST_HEAD(&node->all); | 417 | INIT_LIST_HEAD(&node->all); |
381 | node->info = info; | 418 | if (node->loaded_info) { |
419 | node->loaded_info[0] = info; | ||
420 | node->num_loaded = 1; | ||
421 | } | ||
382 | node->parent = parent; | 422 | node->parent = parent; |
383 | if (name) | 423 | if (name) |
384 | strcpy(node->name, name); | 424 | strcpy(node->name, name); |
@@ -394,9 +434,13 @@ static struct gcov_node *new_node(struct gcov_node *parent, | |||
394 | struct gcov_node *node; | 434 | struct gcov_node *node; |
395 | 435 | ||
396 | node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL); | 436 | node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL); |
397 | if (!node) { | 437 | if (!node) |
398 | pr_warning("out of memory\n"); | 438 | goto err_nomem; |
399 | return NULL; | 439 | if (info) { |
440 | node->loaded_info = kcalloc(1, sizeof(struct gcov_info *), | ||
441 | GFP_KERNEL); | ||
442 | if (!node->loaded_info) | ||
443 | goto err_nomem; | ||
400 | } | 444 | } |
401 | init_node(node, info, name, parent); | 445 | init_node(node, info, name, parent); |
402 | /* Differentiate between gcov data file nodes and directory nodes. */ | 446 | /* Differentiate between gcov data file nodes and directory nodes. */ |
@@ -416,6 +460,11 @@ static struct gcov_node *new_node(struct gcov_node *parent, | |||
416 | list_add(&node->all, &all_head); | 460 | list_add(&node->all, &all_head); |
417 | 461 | ||
418 | return node; | 462 | return node; |
463 | |||
464 | err_nomem: | ||
465 | kfree(node); | ||
466 | pr_warning("out of memory\n"); | ||
467 | return NULL; | ||
419 | } | 468 | } |
420 | 469 | ||
421 | /* Remove symbolic links associated with node. */ | 470 | /* Remove symbolic links associated with node. */ |
@@ -441,8 +490,9 @@ static void release_node(struct gcov_node *node) | |||
441 | list_del(&node->all); | 490 | list_del(&node->all); |
442 | debugfs_remove(node->dentry); | 491 | debugfs_remove(node->dentry); |
443 | remove_links(node); | 492 | remove_links(node); |
444 | if (node->ghost) | 493 | kfree(node->loaded_info); |
445 | gcov_info_free(node->ghost); | 494 | if (node->unloaded_info) |
495 | gcov_info_free(node->unloaded_info); | ||
446 | kfree(node); | 496 | kfree(node); |
447 | } | 497 | } |
448 | 498 | ||
@@ -477,7 +527,7 @@ static struct gcov_node *get_child_by_name(struct gcov_node *parent, | |||
477 | 527 | ||
478 | /* | 528 | /* |
479 | * write() implementation for reset file. Reset all profiling data to zero | 529 | * write() implementation for reset file. Reset all profiling data to zero |
480 | * and remove ghost nodes. | 530 | * and remove nodes for which all associated object files are unloaded. |
481 | */ | 531 | */ |
482 | static ssize_t reset_write(struct file *file, const char __user *addr, | 532 | static ssize_t reset_write(struct file *file, const char __user *addr, |
483 | size_t len, loff_t *pos) | 533 | size_t len, loff_t *pos) |
@@ -487,8 +537,8 @@ static ssize_t reset_write(struct file *file, const char __user *addr, | |||
487 | mutex_lock(&node_lock); | 537 | mutex_lock(&node_lock); |
488 | restart: | 538 | restart: |
489 | list_for_each_entry(node, &all_head, all) { | 539 | list_for_each_entry(node, &all_head, all) { |
490 | if (node->info) | 540 | if (node->num_loaded > 0) |
491 | gcov_info_reset(node->info); | 541 | reset_node(node); |
492 | else if (list_empty(&node->children)) { | 542 | else if (list_empty(&node->children)) { |
493 | remove_node(node); | 543 | remove_node(node); |
494 | /* Several nodes may have gone - restart loop. */ | 544 | /* Several nodes may have gone - restart loop. */ |
@@ -564,37 +614,115 @@ err_remove: | |||
564 | } | 614 | } |
565 | 615 | ||
566 | /* | 616 | /* |
567 | * The profiling data set associated with this node is being unloaded. Store a | 617 | * Associate a profiling data set with an existing node. Needs to be called |
568 | * copy of the profiling data and turn this node into a "ghost". | 618 | * with node_lock held. |
569 | */ | 619 | */ |
570 | static int ghost_node(struct gcov_node *node) | 620 | static void add_info(struct gcov_node *node, struct gcov_info *info) |
571 | { | 621 | { |
572 | node->ghost = gcov_info_dup(node->info); | 622 | struct gcov_info **loaded_info; |
573 | if (!node->ghost) { | 623 | int num = node->num_loaded; |
574 | pr_warning("could not save data for '%s' (out of memory)\n", | 624 | |
575 | node->info->filename); | 625 | /* |
576 | return -ENOMEM; | 626 | * Prepare new array. This is done first to simplify cleanup in |
627 | * case the new data set is incompatible, the node only contains | ||
628 | * unloaded data sets and there's not enough memory for the array. | ||
629 | */ | ||
630 | loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL); | ||
631 | if (!loaded_info) { | ||
632 | pr_warning("could not add '%s' (out of memory)\n", | ||
633 | info->filename); | ||
634 | return; | ||
635 | } | ||
636 | memcpy(loaded_info, node->loaded_info, | ||
637 | num * sizeof(struct gcov_info *)); | ||
638 | loaded_info[num] = info; | ||
639 | /* Check if the new data set is compatible. */ | ||
640 | if (num == 0) { | ||
641 | /* | ||
642 | * A module was unloaded, modified and reloaded. The new | ||
643 | * data set replaces the copy of the last one. | ||
644 | */ | ||
645 | if (!gcov_info_is_compatible(node->unloaded_info, info)) { | ||
646 | pr_warning("discarding saved data for %s " | ||
647 | "(incompatible version)\n", info->filename); | ||
648 | gcov_info_free(node->unloaded_info); | ||
649 | node->unloaded_info = NULL; | ||
650 | } | ||
651 | } else { | ||
652 | /* | ||
653 | * Two different versions of the same object file are loaded. | ||
654 | * The initial one takes precedence. | ||
655 | */ | ||
656 | if (!gcov_info_is_compatible(node->loaded_info[0], info)) { | ||
657 | pr_warning("could not add '%s' (incompatible " | ||
658 | "version)\n", info->filename); | ||
659 | kfree(loaded_info); | ||
660 | return; | ||
661 | } | ||
577 | } | 662 | } |
578 | node->info = NULL; | 663 | /* Overwrite previous array. */ |
664 | kfree(node->loaded_info); | ||
665 | node->loaded_info = loaded_info; | ||
666 | node->num_loaded = num + 1; | ||
667 | } | ||
579 | 668 | ||
580 | return 0; | 669 | /* |
670 | * Return the index of a profiling data set associated with a node. | ||
671 | */ | ||
672 | static int get_info_index(struct gcov_node *node, struct gcov_info *info) | ||
673 | { | ||
674 | int i; | ||
675 | |||
676 | for (i = 0; i < node->num_loaded; i++) { | ||
677 | if (node->loaded_info[i] == info) | ||
678 | return i; | ||
679 | } | ||
680 | return -ENOENT; | ||
581 | } | 681 | } |
582 | 682 | ||
583 | /* | 683 | /* |
584 | * Profiling data for this node has been loaded again. Add profiling data | 684 | * Save the data of a profiling data set which is being unloaded. |
585 | * from previous instantiation and turn this node into a regular node. | ||
586 | */ | 685 | */ |
587 | static void revive_node(struct gcov_node *node, struct gcov_info *info) | 686 | static void save_info(struct gcov_node *node, struct gcov_info *info) |
588 | { | 687 | { |
589 | if (gcov_info_is_compatible(node->ghost, info)) | 688 | if (node->unloaded_info) |
590 | gcov_info_add(info, node->ghost); | 689 | gcov_info_add(node->unloaded_info, info); |
591 | else { | 690 | else { |
592 | pr_warning("discarding saved data for '%s' (version changed)\n", | 691 | node->unloaded_info = gcov_info_dup(info); |
692 | if (!node->unloaded_info) { | ||
693 | pr_warning("could not save data for '%s' " | ||
694 | "(out of memory)\n", info->filename); | ||
695 | } | ||
696 | } | ||
697 | } | ||
698 | |||
699 | /* | ||
700 | * Disassociate a profiling data set from a node. Needs to be called with | ||
701 | * node_lock held. | ||
702 | */ | ||
703 | static void remove_info(struct gcov_node *node, struct gcov_info *info) | ||
704 | { | ||
705 | int i; | ||
706 | |||
707 | i = get_info_index(node, info); | ||
708 | if (i < 0) { | ||
709 | pr_warning("could not remove '%s' (not found)\n", | ||
593 | info->filename); | 710 | info->filename); |
711 | return; | ||
594 | } | 712 | } |
595 | gcov_info_free(node->ghost); | 713 | if (gcov_persist) |
596 | node->ghost = NULL; | 714 | save_info(node, info); |
597 | node->info = info; | 715 | /* Shrink array. */ |
716 | node->loaded_info[i] = node->loaded_info[node->num_loaded - 1]; | ||
717 | node->num_loaded--; | ||
718 | if (node->num_loaded > 0) | ||
719 | return; | ||
720 | /* Last loaded data set was removed. */ | ||
721 | kfree(node->loaded_info); | ||
722 | node->loaded_info = NULL; | ||
723 | node->num_loaded = 0; | ||
724 | if (!node->unloaded_info) | ||
725 | remove_node(node); | ||
598 | } | 726 | } |
599 | 727 | ||
600 | /* | 728 | /* |
@@ -609,30 +737,18 @@ void gcov_event(enum gcov_action action, struct gcov_info *info) | |||
609 | node = get_node_by_name(info->filename); | 737 | node = get_node_by_name(info->filename); |
610 | switch (action) { | 738 | switch (action) { |
611 | case GCOV_ADD: | 739 | case GCOV_ADD: |
612 | /* Add new node or revive ghost. */ | 740 | if (node) |
613 | if (!node) { | 741 | add_info(node, info); |
742 | else | ||
614 | add_node(info); | 743 | add_node(info); |
615 | break; | ||
616 | } | ||
617 | if (gcov_persist) | ||
618 | revive_node(node, info); | ||
619 | else { | ||
620 | pr_warning("could not add '%s' (already exists)\n", | ||
621 | info->filename); | ||
622 | } | ||
623 | break; | 744 | break; |
624 | case GCOV_REMOVE: | 745 | case GCOV_REMOVE: |
625 | /* Remove node or turn into ghost. */ | 746 | if (node) |
626 | if (!node) { | 747 | remove_info(node, info); |
748 | else { | ||
627 | pr_warning("could not remove '%s' (not found)\n", | 749 | pr_warning("could not remove '%s' (not found)\n", |
628 | info->filename); | 750 | info->filename); |
629 | break; | ||
630 | } | 751 | } |
631 | if (gcov_persist) { | ||
632 | if (!ghost_node(node)) | ||
633 | break; | ||
634 | } | ||
635 | remove_node(node); | ||
636 | break; | 752 | break; |
637 | } | 753 | } |
638 | mutex_unlock(&node_lock); | 754 | mutex_unlock(&node_lock); |
diff --git a/kernel/groups.c b/kernel/groups.c index 53b1916c949..253dc0f35cf 100644 --- a/kernel/groups.c +++ b/kernel/groups.c | |||
@@ -143,10 +143,9 @@ int groups_search(const struct group_info *group_info, gid_t grp) | |||
143 | right = group_info->ngroups; | 143 | right = group_info->ngroups; |
144 | while (left < right) { | 144 | while (left < right) { |
145 | unsigned int mid = (left+right)/2; | 145 | unsigned int mid = (left+right)/2; |
146 | int cmp = grp - GROUP_AT(group_info, mid); | 146 | if (grp > GROUP_AT(group_info, mid)) |
147 | if (cmp > 0) | ||
148 | left = mid + 1; | 147 | left = mid + 1; |
149 | else if (cmp < 0) | 148 | else if (grp < GROUP_AT(group_info, mid)) |
150 | right = mid; | 149 | right = mid; |
151 | else | 150 | else |
152 | return 1; | 151 | return 1; |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index ce669174f35..1decafbb6b1 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -1091,11 +1091,10 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel); | |||
1091 | */ | 1091 | */ |
1092 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) | 1092 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) |
1093 | { | 1093 | { |
1094 | struct hrtimer_clock_base *base; | ||
1095 | unsigned long flags; | 1094 | unsigned long flags; |
1096 | ktime_t rem; | 1095 | ktime_t rem; |
1097 | 1096 | ||
1098 | base = lock_hrtimer_base(timer, &flags); | 1097 | lock_hrtimer_base(timer, &flags); |
1099 | rem = hrtimer_expires_remaining(timer); | 1098 | rem = hrtimer_expires_remaining(timer); |
1100 | unlock_hrtimer_base(timer, &flags); | 1099 | unlock_hrtimer_base(timer, &flags); |
1101 | 1100 | ||
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index d71a987fd2b..c7c2aed9e2d 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
@@ -433,7 +433,8 @@ register_user_hw_breakpoint(struct perf_event_attr *attr, | |||
433 | perf_overflow_handler_t triggered, | 433 | perf_overflow_handler_t triggered, |
434 | struct task_struct *tsk) | 434 | struct task_struct *tsk) |
435 | { | 435 | { |
436 | return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); | 436 | return perf_event_create_kernel_counter(attr, -1, task_pid_vnr(tsk), |
437 | triggered); | ||
437 | } | 438 | } |
438 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | 439 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); |
439 | 440 | ||
diff --git a/kernel/kfifo.c b/kernel/kfifo.c index 6b5580c5764..01a0700e873 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c | |||
@@ -365,8 +365,6 @@ static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl, | |||
365 | n = setup_sgl_buf(sgl, fifo->data + off, nents, l); | 365 | n = setup_sgl_buf(sgl, fifo->data + off, nents, l); |
366 | n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); | 366 | n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); |
367 | 367 | ||
368 | if (n) | ||
369 | sg_mark_end(sgl + n - 1); | ||
370 | return n; | 368 | return n; |
371 | } | 369 | } |
372 | 370 | ||
diff --git a/kernel/module.c b/kernel/module.c index d0b5f8db11b..ccd64199184 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -1537,6 +1537,7 @@ static int __unlink_module(void *_mod) | |||
1537 | { | 1537 | { |
1538 | struct module *mod = _mod; | 1538 | struct module *mod = _mod; |
1539 | list_del(&mod->list); | 1539 | list_del(&mod->list); |
1540 | module_bug_cleanup(mod); | ||
1540 | return 0; | 1541 | return 0; |
1541 | } | 1542 | } |
1542 | 1543 | ||
@@ -2625,6 +2626,7 @@ static struct module *load_module(void __user *umod, | |||
2625 | if (err < 0) | 2626 | if (err < 0) |
2626 | goto ddebug; | 2627 | goto ddebug; |
2627 | 2628 | ||
2629 | module_bug_finalize(info.hdr, info.sechdrs, mod); | ||
2628 | list_add_rcu(&mod->list, &modules); | 2630 | list_add_rcu(&mod->list, &modules); |
2629 | mutex_unlock(&module_mutex); | 2631 | mutex_unlock(&module_mutex); |
2630 | 2632 | ||
@@ -2650,6 +2652,8 @@ static struct module *load_module(void __user *umod, | |||
2650 | mutex_lock(&module_mutex); | 2652 | mutex_lock(&module_mutex); |
2651 | /* Unlink carefully: kallsyms could be walking list. */ | 2653 | /* Unlink carefully: kallsyms could be walking list. */ |
2652 | list_del_rcu(&mod->list); | 2654 | list_del_rcu(&mod->list); |
2655 | module_bug_cleanup(mod); | ||
2656 | |||
2653 | ddebug: | 2657 | ddebug: |
2654 | if (!mod->taints) | 2658 | if (!mod->taints) |
2655 | dynamic_debug_remove(info.debug); | 2659 | dynamic_debug_remove(info.debug); |
diff --git a/kernel/mutex.c b/kernel/mutex.c index 4c0b7b3e6d2..200407c1502 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -36,15 +36,6 @@ | |||
36 | # include <asm/mutex.h> | 36 | # include <asm/mutex.h> |
37 | #endif | 37 | #endif |
38 | 38 | ||
39 | /*** | ||
40 | * mutex_init - initialize the mutex | ||
41 | * @lock: the mutex to be initialized | ||
42 | * @key: the lock_class_key for the class; used by mutex lock debugging | ||
43 | * | ||
44 | * Initialize the mutex to unlocked state. | ||
45 | * | ||
46 | * It is not allowed to initialize an already locked mutex. | ||
47 | */ | ||
48 | void | 39 | void |
49 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | 40 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) |
50 | { | 41 | { |
@@ -68,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init); | |||
68 | static __used noinline void __sched | 59 | static __used noinline void __sched |
69 | __mutex_lock_slowpath(atomic_t *lock_count); | 60 | __mutex_lock_slowpath(atomic_t *lock_count); |
70 | 61 | ||
71 | /*** | 62 | /** |
72 | * mutex_lock - acquire the mutex | 63 | * mutex_lock - acquire the mutex |
73 | * @lock: the mutex to be acquired | 64 | * @lock: the mutex to be acquired |
74 | * | 65 | * |
@@ -105,7 +96,7 @@ EXPORT_SYMBOL(mutex_lock); | |||
105 | 96 | ||
106 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); | 97 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
107 | 98 | ||
108 | /*** | 99 | /** |
109 | * mutex_unlock - release the mutex | 100 | * mutex_unlock - release the mutex |
110 | * @lock: the mutex to be released | 101 | * @lock: the mutex to be released |
111 | * | 102 | * |
@@ -364,8 +355,8 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count); | |||
364 | static noinline int __sched | 355 | static noinline int __sched |
365 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); | 356 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); |
366 | 357 | ||
367 | /*** | 358 | /** |
368 | * mutex_lock_interruptible - acquire the mutex, interruptable | 359 | * mutex_lock_interruptible - acquire the mutex, interruptible |
369 | * @lock: the mutex to be acquired | 360 | * @lock: the mutex to be acquired |
370 | * | 361 | * |
371 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has | 362 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has |
@@ -456,15 +447,15 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | |||
456 | return prev == 1; | 447 | return prev == 1; |
457 | } | 448 | } |
458 | 449 | ||
459 | /*** | 450 | /** |
460 | * mutex_trylock - try acquire the mutex, without waiting | 451 | * mutex_trylock - try to acquire the mutex, without waiting |
461 | * @lock: the mutex to be acquired | 452 | * @lock: the mutex to be acquired |
462 | * | 453 | * |
463 | * Try to acquire the mutex atomically. Returns 1 if the mutex | 454 | * Try to acquire the mutex atomically. Returns 1 if the mutex |
464 | * has been acquired successfully, and 0 on contention. | 455 | * has been acquired successfully, and 0 on contention. |
465 | * | 456 | * |
466 | * NOTE: this function follows the spin_trylock() convention, so | 457 | * NOTE: this function follows the spin_trylock() convention, so |
467 | * it is negated to the down_trylock() return values! Be careful | 458 | * it is negated from the down_trylock() return values! Be careful |
468 | * about this when converting semaphore users to mutexes. | 459 | * about this when converting semaphore users to mutexes. |
469 | * | 460 | * |
470 | * This function must not be used in interrupt context. The | 461 | * This function must not be used in interrupt context. The |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 403d1804b19..db5b5606468 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -402,11 +402,31 @@ static void perf_group_detach(struct perf_event *event) | |||
402 | } | 402 | } |
403 | } | 403 | } |
404 | 404 | ||
405 | static inline int | ||
406 | event_filter_match(struct perf_event *event) | ||
407 | { | ||
408 | return event->cpu == -1 || event->cpu == smp_processor_id(); | ||
409 | } | ||
410 | |||
405 | static void | 411 | static void |
406 | event_sched_out(struct perf_event *event, | 412 | event_sched_out(struct perf_event *event, |
407 | struct perf_cpu_context *cpuctx, | 413 | struct perf_cpu_context *cpuctx, |
408 | struct perf_event_context *ctx) | 414 | struct perf_event_context *ctx) |
409 | { | 415 | { |
416 | u64 delta; | ||
417 | /* | ||
418 | * An event which could not be activated because of | ||
419 | * filter mismatch still needs to have its timings | ||
420 | * maintained, otherwise bogus information is return | ||
421 | * via read() for time_enabled, time_running: | ||
422 | */ | ||
423 | if (event->state == PERF_EVENT_STATE_INACTIVE | ||
424 | && !event_filter_match(event)) { | ||
425 | delta = ctx->time - event->tstamp_stopped; | ||
426 | event->tstamp_running += delta; | ||
427 | event->tstamp_stopped = ctx->time; | ||
428 | } | ||
429 | |||
410 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 430 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
411 | return; | 431 | return; |
412 | 432 | ||
@@ -432,9 +452,7 @@ group_sched_out(struct perf_event *group_event, | |||
432 | struct perf_event_context *ctx) | 452 | struct perf_event_context *ctx) |
433 | { | 453 | { |
434 | struct perf_event *event; | 454 | struct perf_event *event; |
435 | 455 | int state = group_event->state; | |
436 | if (group_event->state != PERF_EVENT_STATE_ACTIVE) | ||
437 | return; | ||
438 | 456 | ||
439 | event_sched_out(group_event, cpuctx, ctx); | 457 | event_sched_out(group_event, cpuctx, ctx); |
440 | 458 | ||
@@ -444,7 +462,7 @@ group_sched_out(struct perf_event *group_event, | |||
444 | list_for_each_entry(event, &group_event->sibling_list, group_entry) | 462 | list_for_each_entry(event, &group_event->sibling_list, group_entry) |
445 | event_sched_out(event, cpuctx, ctx); | 463 | event_sched_out(event, cpuctx, ctx); |
446 | 464 | ||
447 | if (group_event->attr.exclusive) | 465 | if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive) |
448 | cpuctx->exclusive = 0; | 466 | cpuctx->exclusive = 0; |
449 | } | 467 | } |
450 | 468 | ||
@@ -5743,15 +5761,15 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | |||
5743 | { | 5761 | { |
5744 | unsigned int cpu = (long)hcpu; | 5762 | unsigned int cpu = (long)hcpu; |
5745 | 5763 | ||
5746 | switch (action) { | 5764 | switch (action & ~CPU_TASKS_FROZEN) { |
5747 | 5765 | ||
5748 | case CPU_UP_PREPARE: | 5766 | case CPU_UP_PREPARE: |
5749 | case CPU_UP_PREPARE_FROZEN: | 5767 | case CPU_DOWN_FAILED: |
5750 | perf_event_init_cpu(cpu); | 5768 | perf_event_init_cpu(cpu); |
5751 | break; | 5769 | break; |
5752 | 5770 | ||
5771 | case CPU_UP_CANCELED: | ||
5753 | case CPU_DOWN_PREPARE: | 5772 | case CPU_DOWN_PREPARE: |
5754 | case CPU_DOWN_PREPARE_FROZEN: | ||
5755 | perf_event_exit_cpu(cpu); | 5773 | perf_event_exit_cpu(cpu); |
5756 | break; | 5774 | break; |
5757 | 5775 | ||
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index b7e4c362361..645e541a45f 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c | |||
@@ -389,10 +389,12 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, | |||
389 | } else if (count == 11) { /* len('0x12345678/0') */ | 389 | } else if (count == 11) { /* len('0x12345678/0') */ |
390 | if (copy_from_user(ascii_value, buf, 11)) | 390 | if (copy_from_user(ascii_value, buf, 11)) |
391 | return -EFAULT; | 391 | return -EFAULT; |
392 | if (strlen(ascii_value) != 10) | ||
393 | return -EINVAL; | ||
392 | x = sscanf(ascii_value, "%x", &value); | 394 | x = sscanf(ascii_value, "%x", &value); |
393 | if (x != 1) | 395 | if (x != 1) |
394 | return -EINVAL; | 396 | return -EINVAL; |
395 | pr_debug(KERN_ERR "%s, %d, 0x%x\n", ascii_value, x, value); | 397 | pr_debug("%s, %d, 0x%x\n", ascii_value, x, value); |
396 | } else | 398 | } else |
397 | return -EINVAL; | 399 | return -EINVAL; |
398 | 400 | ||
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index c77963938bc..8dc31e02ae1 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -338,7 +338,6 @@ int hibernation_snapshot(int platform_mode) | |||
338 | goto Close; | 338 | goto Close; |
339 | 339 | ||
340 | suspend_console(); | 340 | suspend_console(); |
341 | hibernation_freeze_swap(); | ||
342 | saved_mask = clear_gfp_allowed_mask(GFP_IOFS); | 341 | saved_mask = clear_gfp_allowed_mask(GFP_IOFS); |
343 | error = dpm_suspend_start(PMSG_FREEZE); | 342 | error = dpm_suspend_start(PMSG_FREEZE); |
344 | if (error) | 343 | if (error) |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 5e7edfb05e6..d3f795f01bb 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -1086,7 +1086,6 @@ void swsusp_free(void) | |||
1086 | buffer = NULL; | 1086 | buffer = NULL; |
1087 | alloc_normal = 0; | 1087 | alloc_normal = 0; |
1088 | alloc_highmem = 0; | 1088 | alloc_highmem = 0; |
1089 | hibernation_thaw_swap(); | ||
1090 | } | 1089 | } |
1091 | 1090 | ||
1092 | /* Helper functions used for the shrinking of memory. */ | 1091 | /* Helper functions used for the shrinking of memory. */ |
@@ -1122,9 +1121,19 @@ static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask) | |||
1122 | return nr_alloc; | 1121 | return nr_alloc; |
1123 | } | 1122 | } |
1124 | 1123 | ||
1125 | static unsigned long preallocate_image_memory(unsigned long nr_pages) | 1124 | static unsigned long preallocate_image_memory(unsigned long nr_pages, |
1125 | unsigned long avail_normal) | ||
1126 | { | 1126 | { |
1127 | return preallocate_image_pages(nr_pages, GFP_IMAGE); | 1127 | unsigned long alloc; |
1128 | |||
1129 | if (avail_normal <= alloc_normal) | ||
1130 | return 0; | ||
1131 | |||
1132 | alloc = avail_normal - alloc_normal; | ||
1133 | if (nr_pages < alloc) | ||
1134 | alloc = nr_pages; | ||
1135 | |||
1136 | return preallocate_image_pages(alloc, GFP_IMAGE); | ||
1128 | } | 1137 | } |
1129 | 1138 | ||
1130 | #ifdef CONFIG_HIGHMEM | 1139 | #ifdef CONFIG_HIGHMEM |
@@ -1170,15 +1179,22 @@ static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages, | |||
1170 | */ | 1179 | */ |
1171 | static void free_unnecessary_pages(void) | 1180 | static void free_unnecessary_pages(void) |
1172 | { | 1181 | { |
1173 | unsigned long save_highmem, to_free_normal, to_free_highmem; | 1182 | unsigned long save, to_free_normal, to_free_highmem; |
1174 | 1183 | ||
1175 | to_free_normal = alloc_normal - count_data_pages(); | 1184 | save = count_data_pages(); |
1176 | save_highmem = count_highmem_pages(); | 1185 | if (alloc_normal >= save) { |
1177 | if (alloc_highmem > save_highmem) { | 1186 | to_free_normal = alloc_normal - save; |
1178 | to_free_highmem = alloc_highmem - save_highmem; | 1187 | save = 0; |
1188 | } else { | ||
1189 | to_free_normal = 0; | ||
1190 | save -= alloc_normal; | ||
1191 | } | ||
1192 | save += count_highmem_pages(); | ||
1193 | if (alloc_highmem >= save) { | ||
1194 | to_free_highmem = alloc_highmem - save; | ||
1179 | } else { | 1195 | } else { |
1180 | to_free_highmem = 0; | 1196 | to_free_highmem = 0; |
1181 | to_free_normal -= save_highmem - alloc_highmem; | 1197 | to_free_normal -= save - alloc_highmem; |
1182 | } | 1198 | } |
1183 | 1199 | ||
1184 | memory_bm_position_reset(©_bm); | 1200 | memory_bm_position_reset(©_bm); |
@@ -1259,7 +1275,7 @@ int hibernate_preallocate_memory(void) | |||
1259 | { | 1275 | { |
1260 | struct zone *zone; | 1276 | struct zone *zone; |
1261 | unsigned long saveable, size, max_size, count, highmem, pages = 0; | 1277 | unsigned long saveable, size, max_size, count, highmem, pages = 0; |
1262 | unsigned long alloc, save_highmem, pages_highmem; | 1278 | unsigned long alloc, save_highmem, pages_highmem, avail_normal; |
1263 | struct timeval start, stop; | 1279 | struct timeval start, stop; |
1264 | int error; | 1280 | int error; |
1265 | 1281 | ||
@@ -1296,6 +1312,7 @@ int hibernate_preallocate_memory(void) | |||
1296 | else | 1312 | else |
1297 | count += zone_page_state(zone, NR_FREE_PAGES); | 1313 | count += zone_page_state(zone, NR_FREE_PAGES); |
1298 | } | 1314 | } |
1315 | avail_normal = count; | ||
1299 | count += highmem; | 1316 | count += highmem; |
1300 | count -= totalreserve_pages; | 1317 | count -= totalreserve_pages; |
1301 | 1318 | ||
@@ -1310,12 +1327,21 @@ int hibernate_preallocate_memory(void) | |||
1310 | */ | 1327 | */ |
1311 | if (size >= saveable) { | 1328 | if (size >= saveable) { |
1312 | pages = preallocate_image_highmem(save_highmem); | 1329 | pages = preallocate_image_highmem(save_highmem); |
1313 | pages += preallocate_image_memory(saveable - pages); | 1330 | pages += preallocate_image_memory(saveable - pages, avail_normal); |
1314 | goto out; | 1331 | goto out; |
1315 | } | 1332 | } |
1316 | 1333 | ||
1317 | /* Estimate the minimum size of the image. */ | 1334 | /* Estimate the minimum size of the image. */ |
1318 | pages = minimum_image_size(saveable); | 1335 | pages = minimum_image_size(saveable); |
1336 | /* | ||
1337 | * To avoid excessive pressure on the normal zone, leave room in it to | ||
1338 | * accommodate an image of the minimum size (unless it's already too | ||
1339 | * small, in which case don't preallocate pages from it at all). | ||
1340 | */ | ||
1341 | if (avail_normal > pages) | ||
1342 | avail_normal -= pages; | ||
1343 | else | ||
1344 | avail_normal = 0; | ||
1319 | if (size < pages) | 1345 | if (size < pages) |
1320 | size = min_t(unsigned long, pages, max_size); | 1346 | size = min_t(unsigned long, pages, max_size); |
1321 | 1347 | ||
@@ -1336,16 +1362,34 @@ int hibernate_preallocate_memory(void) | |||
1336 | */ | 1362 | */ |
1337 | pages_highmem = preallocate_image_highmem(highmem / 2); | 1363 | pages_highmem = preallocate_image_highmem(highmem / 2); |
1338 | alloc = (count - max_size) - pages_highmem; | 1364 | alloc = (count - max_size) - pages_highmem; |
1339 | pages = preallocate_image_memory(alloc); | 1365 | pages = preallocate_image_memory(alloc, avail_normal); |
1340 | if (pages < alloc) | 1366 | if (pages < alloc) { |
1341 | goto err_out; | 1367 | /* We have exhausted non-highmem pages, try highmem. */ |
1342 | size = max_size - size; | 1368 | alloc -= pages; |
1343 | alloc = size; | 1369 | pages += pages_highmem; |
1344 | size = preallocate_highmem_fraction(size, highmem, count); | 1370 | pages_highmem = preallocate_image_highmem(alloc); |
1345 | pages_highmem += size; | 1371 | if (pages_highmem < alloc) |
1346 | alloc -= size; | 1372 | goto err_out; |
1347 | pages += preallocate_image_memory(alloc); | 1373 | pages += pages_highmem; |
1348 | pages += pages_highmem; | 1374 | /* |
1375 | * size is the desired number of saveable pages to leave in | ||
1376 | * memory, so try to preallocate (all memory - size) pages. | ||
1377 | */ | ||
1378 | alloc = (count - pages) - size; | ||
1379 | pages += preallocate_image_highmem(alloc); | ||
1380 | } else { | ||
1381 | /* | ||
1382 | * There are approximately max_size saveable pages at this point | ||
1383 | * and we want to reduce this number down to size. | ||
1384 | */ | ||
1385 | alloc = max_size - size; | ||
1386 | size = preallocate_highmem_fraction(alloc, highmem, count); | ||
1387 | pages_highmem += size; | ||
1388 | alloc -= size; | ||
1389 | size = preallocate_image_memory(alloc, avail_normal); | ||
1390 | pages_highmem += preallocate_image_highmem(alloc - size); | ||
1391 | pages += pages_highmem + size; | ||
1392 | } | ||
1349 | 1393 | ||
1350 | /* | 1394 | /* |
1351 | * We only need as many page frames for the image as there are saveable | 1395 | * We only need as many page frames for the image as there are saveable |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 5d0059eed3e..e6a5bdf61a3 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -136,10 +136,10 @@ sector_t alloc_swapdev_block(int swap) | |||
136 | { | 136 | { |
137 | unsigned long offset; | 137 | unsigned long offset; |
138 | 138 | ||
139 | offset = swp_offset(get_swap_for_hibernation(swap)); | 139 | offset = swp_offset(get_swap_page_of_type(swap)); |
140 | if (offset) { | 140 | if (offset) { |
141 | if (swsusp_extents_insert(offset)) | 141 | if (swsusp_extents_insert(offset)) |
142 | swap_free_for_hibernation(swp_entry(swap, offset)); | 142 | swap_free(swp_entry(swap, offset)); |
143 | else | 143 | else |
144 | return swapdev_block(swap, offset); | 144 | return swapdev_block(swap, offset); |
145 | } | 145 | } |
@@ -163,7 +163,7 @@ void free_all_swap_pages(int swap) | |||
163 | ext = container_of(node, struct swsusp_extent, node); | 163 | ext = container_of(node, struct swsusp_extent, node); |
164 | rb_erase(node, &swsusp_extents); | 164 | rb_erase(node, &swsusp_extents); |
165 | for (offset = ext->start; offset <= ext->end; offset++) | 165 | for (offset = ext->start; offset <= ext->end; offset++) |
166 | swap_free_for_hibernation(swp_entry(swap, offset)); | 166 | swap_free(swp_entry(swap, offset)); |
167 | 167 | ||
168 | kfree(ext); | 168 | kfree(ext); |
169 | } | 169 | } |
diff --git a/kernel/sched.c b/kernel/sched.c index 09b574e7f4d..dc85ceb9083 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1294,6 +1294,10 @@ static void resched_task(struct task_struct *p) | |||
1294 | static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) | 1294 | static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) |
1295 | { | 1295 | { |
1296 | } | 1296 | } |
1297 | |||
1298 | static void sched_avg_update(struct rq *rq) | ||
1299 | { | ||
1300 | } | ||
1297 | #endif /* CONFIG_SMP */ | 1301 | #endif /* CONFIG_SMP */ |
1298 | 1302 | ||
1299 | #if BITS_PER_LONG == 32 | 1303 | #if BITS_PER_LONG == 32 |
@@ -3182,6 +3186,8 @@ static void update_cpu_load(struct rq *this_rq) | |||
3182 | 3186 | ||
3183 | this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; | 3187 | this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; |
3184 | } | 3188 | } |
3189 | |||
3190 | sched_avg_update(this_rq); | ||
3185 | } | 3191 | } |
3186 | 3192 | ||
3187 | static void update_cpu_load_active(struct rq *this_rq) | 3193 | static void update_cpu_load_active(struct rq *this_rq) |
@@ -3507,9 +3513,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | |||
3507 | rtime = nsecs_to_cputime(p->se.sum_exec_runtime); | 3513 | rtime = nsecs_to_cputime(p->se.sum_exec_runtime); |
3508 | 3514 | ||
3509 | if (total) { | 3515 | if (total) { |
3510 | u64 temp; | 3516 | u64 temp = rtime; |
3511 | 3517 | ||
3512 | temp = (u64)(rtime * utime); | 3518 | temp *= utime; |
3513 | do_div(temp, total); | 3519 | do_div(temp, total); |
3514 | utime = (cputime_t)temp; | 3520 | utime = (cputime_t)temp; |
3515 | } else | 3521 | } else |
@@ -3540,9 +3546,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | |||
3540 | rtime = nsecs_to_cputime(cputime.sum_exec_runtime); | 3546 | rtime = nsecs_to_cputime(cputime.sum_exec_runtime); |
3541 | 3547 | ||
3542 | if (total) { | 3548 | if (total) { |
3543 | u64 temp; | 3549 | u64 temp = rtime; |
3544 | 3550 | ||
3545 | temp = (u64)(rtime * cputime.utime); | 3551 | temp *= cputime.utime; |
3546 | do_div(temp, total); | 3552 | do_div(temp, total); |
3547 | utime = (cputime_t)temp; | 3553 | utime = (cputime_t)temp; |
3548 | } else | 3554 | } else |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index ab661ebc489..db3f674ca49 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -54,13 +54,13 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling | |||
54 | * Minimal preemption granularity for CPU-bound tasks: | 54 | * Minimal preemption granularity for CPU-bound tasks: |
55 | * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds) | 55 | * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds) |
56 | */ | 56 | */ |
57 | unsigned int sysctl_sched_min_granularity = 2000000ULL; | 57 | unsigned int sysctl_sched_min_granularity = 750000ULL; |
58 | unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL; | 58 | unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; |
59 | 59 | ||
60 | /* | 60 | /* |
61 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity | 61 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity |
62 | */ | 62 | */ |
63 | static unsigned int sched_nr_latency = 3; | 63 | static unsigned int sched_nr_latency = 8; |
64 | 64 | ||
65 | /* | 65 | /* |
66 | * After fork, child runs first. If set to 0 (default) then | 66 | * After fork, child runs first. If set to 0 (default) then |
@@ -1313,7 +1313,7 @@ static struct sched_group * | |||
1313 | find_idlest_group(struct sched_domain *sd, struct task_struct *p, | 1313 | find_idlest_group(struct sched_domain *sd, struct task_struct *p, |
1314 | int this_cpu, int load_idx) | 1314 | int this_cpu, int load_idx) |
1315 | { | 1315 | { |
1316 | struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups; | 1316 | struct sched_group *idlest = NULL, *group = sd->groups; |
1317 | unsigned long min_load = ULONG_MAX, this_load = 0; | 1317 | unsigned long min_load = ULONG_MAX, this_load = 0; |
1318 | int imbalance = 100 + (sd->imbalance_pct-100)/2; | 1318 | int imbalance = 100 + (sd->imbalance_pct-100)/2; |
1319 | 1319 | ||
@@ -1348,7 +1348,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, | |||
1348 | 1348 | ||
1349 | if (local_group) { | 1349 | if (local_group) { |
1350 | this_load = avg_load; | 1350 | this_load = avg_load; |
1351 | this = group; | ||
1352 | } else if (avg_load < min_load) { | 1351 | } else if (avg_load < min_load) { |
1353 | min_load = avg_load; | 1352 | min_load = avg_load; |
1354 | idlest = group; | 1353 | idlest = group; |
@@ -2268,8 +2267,6 @@ unsigned long scale_rt_power(int cpu) | |||
2268 | struct rq *rq = cpu_rq(cpu); | 2267 | struct rq *rq = cpu_rq(cpu); |
2269 | u64 total, available; | 2268 | u64 total, available; |
2270 | 2269 | ||
2271 | sched_avg_update(rq); | ||
2272 | |||
2273 | total = sched_avg_period() + (rq->clock - rq->age_stamp); | 2270 | total = sched_avg_period() + (rq->clock - rq->age_stamp); |
2274 | available = total - rq->rt_avg; | 2271 | available = total - rq->rt_avg; |
2275 | 2272 | ||
@@ -3633,7 +3630,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu) | |||
3633 | if (time_before(now, nohz.next_balance)) | 3630 | if (time_before(now, nohz.next_balance)) |
3634 | return 0; | 3631 | return 0; |
3635 | 3632 | ||
3636 | if (!rq->nr_running) | 3633 | if (rq->idle_at_tick) |
3637 | return 0; | 3634 | return 0; |
3638 | 3635 | ||
3639 | first_pick_cpu = atomic_read(&nohz.first_pick_cpu); | 3636 | first_pick_cpu = atomic_read(&nohz.first_pick_cpu); |
diff --git a/kernel/signal.c b/kernel/signal.c index bded6518778..919562c3d6b 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -2215,6 +2215,14 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) | |||
2215 | #ifdef __ARCH_SI_TRAPNO | 2215 | #ifdef __ARCH_SI_TRAPNO |
2216 | err |= __put_user(from->si_trapno, &to->si_trapno); | 2216 | err |= __put_user(from->si_trapno, &to->si_trapno); |
2217 | #endif | 2217 | #endif |
2218 | #ifdef BUS_MCEERR_AO | ||
2219 | /* | ||
2220 | * Other callers might not initialize the si_lsb field, | ||
2221 | * so check explicitely for the right codes here. | ||
2222 | */ | ||
2223 | if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) | ||
2224 | err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); | ||
2225 | #endif | ||
2218 | break; | 2226 | break; |
2219 | case __SI_CHLD: | 2227 | case __SI_CHLD: |
2220 | err |= __put_user(from->si_pid, &to->si_pid); | 2228 | err |= __put_user(from->si_pid, &to->si_pid); |
diff --git a/kernel/smp.c b/kernel/smp.c index 75c970c715d..ed6aacfcb7e 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -365,9 +365,10 @@ call: | |||
365 | EXPORT_SYMBOL_GPL(smp_call_function_any); | 365 | EXPORT_SYMBOL_GPL(smp_call_function_any); |
366 | 366 | ||
367 | /** | 367 | /** |
368 | * __smp_call_function_single(): Run a function on another CPU | 368 | * __smp_call_function_single(): Run a function on a specific CPU |
369 | * @cpu: The CPU to run on. | 369 | * @cpu: The CPU to run on. |
370 | * @data: Pre-allocated and setup data structure | 370 | * @data: Pre-allocated and setup data structure |
371 | * @wait: If true, wait until function has completed on specified CPU. | ||
371 | * | 372 | * |
372 | * Like smp_call_function_single(), but allow caller to pass in a | 373 | * Like smp_call_function_single(), but allow caller to pass in a |
373 | * pre-allocated data structure. Useful for embedding @data inside | 374 | * pre-allocated data structure. Useful for embedding @data inside |
@@ -376,8 +377,10 @@ EXPORT_SYMBOL_GPL(smp_call_function_any); | |||
376 | void __smp_call_function_single(int cpu, struct call_single_data *data, | 377 | void __smp_call_function_single(int cpu, struct call_single_data *data, |
377 | int wait) | 378 | int wait) |
378 | { | 379 | { |
379 | csd_lock(data); | 380 | unsigned int this_cpu; |
381 | unsigned long flags; | ||
380 | 382 | ||
383 | this_cpu = get_cpu(); | ||
381 | /* | 384 | /* |
382 | * Can deadlock when called with interrupts disabled. | 385 | * Can deadlock when called with interrupts disabled. |
383 | * We allow cpu's that are not yet online though, as no one else can | 386 | * We allow cpu's that are not yet online though, as no one else can |
@@ -387,7 +390,15 @@ void __smp_call_function_single(int cpu, struct call_single_data *data, | |||
387 | WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() | 390 | WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() |
388 | && !oops_in_progress); | 391 | && !oops_in_progress); |
389 | 392 | ||
390 | generic_exec_single(cpu, data, wait); | 393 | if (cpu == this_cpu) { |
394 | local_irq_save(flags); | ||
395 | data->func(data->info); | ||
396 | local_irq_restore(flags); | ||
397 | } else { | ||
398 | csd_lock(data); | ||
399 | generic_exec_single(cpu, data, wait); | ||
400 | } | ||
401 | put_cpu(); | ||
391 | } | 402 | } |
392 | 403 | ||
393 | /** | 404 | /** |
diff --git a/kernel/sys.c b/kernel/sys.c index e9ad4448982..7f5a0cd296a 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -931,6 +931,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) | |||
931 | pgid = pid; | 931 | pgid = pid; |
932 | if (pgid < 0) | 932 | if (pgid < 0) |
933 | return -EINVAL; | 933 | return -EINVAL; |
934 | rcu_read_lock(); | ||
934 | 935 | ||
935 | /* From this point forward we keep holding onto the tasklist lock | 936 | /* From this point forward we keep holding onto the tasklist lock |
936 | * so that our parent does not change from under us. -DaveM | 937 | * so that our parent does not change from under us. -DaveM |
@@ -984,6 +985,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) | |||
984 | out: | 985 | out: |
985 | /* All paths lead to here, thus we are safe. -DaveM */ | 986 | /* All paths lead to here, thus we are safe. -DaveM */ |
986 | write_unlock_irq(&tasklist_lock); | 987 | write_unlock_irq(&tasklist_lock); |
988 | rcu_read_unlock(); | ||
987 | return err; | 989 | return err; |
988 | } | 990 | } |
989 | 991 | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index ca38e8e3e90..3a45c224770 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -1713,10 +1713,7 @@ static __init int sysctl_init(void) | |||
1713 | { | 1713 | { |
1714 | sysctl_set_parent(NULL, root_table); | 1714 | sysctl_set_parent(NULL, root_table); |
1715 | #ifdef CONFIG_SYSCTL_SYSCALL_CHECK | 1715 | #ifdef CONFIG_SYSCTL_SYSCALL_CHECK |
1716 | { | 1716 | sysctl_check_table(current->nsproxy, root_table); |
1717 | int err; | ||
1718 | err = sysctl_check_table(current->nsproxy, root_table); | ||
1719 | } | ||
1720 | #endif | 1717 | #endif |
1721 | return 0; | 1718 | return 0; |
1722 | } | 1719 | } |
@@ -2488,7 +2485,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int | |||
2488 | kbuf[left] = 0; | 2485 | kbuf[left] = 0; |
2489 | } | 2486 | } |
2490 | 2487 | ||
2491 | for (; left && vleft--; i++, min++, max++, first=0) { | 2488 | for (; left && vleft--; i++, first = 0) { |
2492 | unsigned long val; | 2489 | unsigned long val; |
2493 | 2490 | ||
2494 | if (write) { | 2491 | if (write) { |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 0d88ce9b9fb..fa7ece649fe 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -381,12 +381,19 @@ static int function_stat_show(struct seq_file *m, void *v) | |||
381 | { | 381 | { |
382 | struct ftrace_profile *rec = v; | 382 | struct ftrace_profile *rec = v; |
383 | char str[KSYM_SYMBOL_LEN]; | 383 | char str[KSYM_SYMBOL_LEN]; |
384 | int ret = 0; | ||
384 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 385 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
385 | static DEFINE_MUTEX(mutex); | ||
386 | static struct trace_seq s; | 386 | static struct trace_seq s; |
387 | unsigned long long avg; | 387 | unsigned long long avg; |
388 | unsigned long long stddev; | 388 | unsigned long long stddev; |
389 | #endif | 389 | #endif |
390 | mutex_lock(&ftrace_profile_lock); | ||
391 | |||
392 | /* we raced with function_profile_reset() */ | ||
393 | if (unlikely(rec->counter == 0)) { | ||
394 | ret = -EBUSY; | ||
395 | goto out; | ||
396 | } | ||
390 | 397 | ||
391 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | 398 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); |
392 | seq_printf(m, " %-30.30s %10lu", str, rec->counter); | 399 | seq_printf(m, " %-30.30s %10lu", str, rec->counter); |
@@ -408,7 +415,6 @@ static int function_stat_show(struct seq_file *m, void *v) | |||
408 | do_div(stddev, (rec->counter - 1) * 1000); | 415 | do_div(stddev, (rec->counter - 1) * 1000); |
409 | } | 416 | } |
410 | 417 | ||
411 | mutex_lock(&mutex); | ||
412 | trace_seq_init(&s); | 418 | trace_seq_init(&s); |
413 | trace_print_graph_duration(rec->time, &s); | 419 | trace_print_graph_duration(rec->time, &s); |
414 | trace_seq_puts(&s, " "); | 420 | trace_seq_puts(&s, " "); |
@@ -416,11 +422,12 @@ static int function_stat_show(struct seq_file *m, void *v) | |||
416 | trace_seq_puts(&s, " "); | 422 | trace_seq_puts(&s, " "); |
417 | trace_print_graph_duration(stddev, &s); | 423 | trace_print_graph_duration(stddev, &s); |
418 | trace_print_seq(m, &s); | 424 | trace_print_seq(m, &s); |
419 | mutex_unlock(&mutex); | ||
420 | #endif | 425 | #endif |
421 | seq_putc(m, '\n'); | 426 | seq_putc(m, '\n'); |
427 | out: | ||
428 | mutex_unlock(&ftrace_profile_lock); | ||
422 | 429 | ||
423 | return 0; | 430 | return ret; |
424 | } | 431 | } |
425 | 432 | ||
426 | static void ftrace_profile_reset(struct ftrace_profile_stat *stat) | 433 | static void ftrace_profile_reset(struct ftrace_profile_stat *stat) |
@@ -1503,6 +1510,8 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
1503 | if (*pos > 0) | 1510 | if (*pos > 0) |
1504 | return t_hash_start(m, pos); | 1511 | return t_hash_start(m, pos); |
1505 | iter->flags |= FTRACE_ITER_PRINTALL; | 1512 | iter->flags |= FTRACE_ITER_PRINTALL; |
1513 | /* reset in case of seek/pread */ | ||
1514 | iter->flags &= ~FTRACE_ITER_HASH; | ||
1506 | return iter; | 1515 | return iter; |
1507 | } | 1516 | } |
1508 | 1517 | ||
@@ -2409,7 +2418,7 @@ static const struct file_operations ftrace_filter_fops = { | |||
2409 | .open = ftrace_filter_open, | 2418 | .open = ftrace_filter_open, |
2410 | .read = seq_read, | 2419 | .read = seq_read, |
2411 | .write = ftrace_filter_write, | 2420 | .write = ftrace_filter_write, |
2412 | .llseek = ftrace_regex_lseek, | 2421 | .llseek = no_llseek, |
2413 | .release = ftrace_filter_release, | 2422 | .release = ftrace_filter_release, |
2414 | }; | 2423 | }; |
2415 | 2424 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 19cccc3c302..492197e2f86 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -2985,13 +2985,11 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) | |||
2985 | 2985 | ||
2986 | static void rb_advance_iter(struct ring_buffer_iter *iter) | 2986 | static void rb_advance_iter(struct ring_buffer_iter *iter) |
2987 | { | 2987 | { |
2988 | struct ring_buffer *buffer; | ||
2989 | struct ring_buffer_per_cpu *cpu_buffer; | 2988 | struct ring_buffer_per_cpu *cpu_buffer; |
2990 | struct ring_buffer_event *event; | 2989 | struct ring_buffer_event *event; |
2991 | unsigned length; | 2990 | unsigned length; |
2992 | 2991 | ||
2993 | cpu_buffer = iter->cpu_buffer; | 2992 | cpu_buffer = iter->cpu_buffer; |
2994 | buffer = cpu_buffer->buffer; | ||
2995 | 2993 | ||
2996 | /* | 2994 | /* |
2997 | * Check if we are at the end of the buffer. | 2995 | * Check if we are at the end of the buffer. |
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 000e6e85b44..31cc4cb0dbf 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -91,6 +91,8 @@ int perf_trace_init(struct perf_event *p_event) | |||
91 | tp_event->class && tp_event->class->reg && | 91 | tp_event->class && tp_event->class->reg && |
92 | try_module_get(tp_event->mod)) { | 92 | try_module_get(tp_event->mod)) { |
93 | ret = perf_trace_event_init(tp_event, p_event); | 93 | ret = perf_trace_event_init(tp_event, p_event); |
94 | if (ret) | ||
95 | module_put(tp_event->mod); | ||
94 | break; | 96 | break; |
95 | } | 97 | } |
96 | } | 98 | } |
@@ -146,6 +148,7 @@ void perf_trace_destroy(struct perf_event *p_event) | |||
146 | } | 148 | } |
147 | } | 149 | } |
148 | out: | 150 | out: |
151 | module_put(tp_event->mod); | ||
149 | mutex_unlock(&event_mutex); | 152 | mutex_unlock(&event_mutex); |
150 | } | 153 | } |
151 | 154 | ||
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 8b27c9849b4..544301d29de 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -514,8 +514,8 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs); | |||
514 | static int kretprobe_dispatcher(struct kretprobe_instance *ri, | 514 | static int kretprobe_dispatcher(struct kretprobe_instance *ri, |
515 | struct pt_regs *regs); | 515 | struct pt_regs *regs); |
516 | 516 | ||
517 | /* Check the name is good for event/group */ | 517 | /* Check the name is good for event/group/fields */ |
518 | static int check_event_name(const char *name) | 518 | static int is_good_name(const char *name) |
519 | { | 519 | { |
520 | if (!isalpha(*name) && *name != '_') | 520 | if (!isalpha(*name) && *name != '_') |
521 | return 0; | 521 | return 0; |
@@ -557,7 +557,7 @@ static struct trace_probe *alloc_trace_probe(const char *group, | |||
557 | else | 557 | else |
558 | tp->rp.kp.pre_handler = kprobe_dispatcher; | 558 | tp->rp.kp.pre_handler = kprobe_dispatcher; |
559 | 559 | ||
560 | if (!event || !check_event_name(event)) { | 560 | if (!event || !is_good_name(event)) { |
561 | ret = -EINVAL; | 561 | ret = -EINVAL; |
562 | goto error; | 562 | goto error; |
563 | } | 563 | } |
@@ -567,7 +567,7 @@ static struct trace_probe *alloc_trace_probe(const char *group, | |||
567 | if (!tp->call.name) | 567 | if (!tp->call.name) |
568 | goto error; | 568 | goto error; |
569 | 569 | ||
570 | if (!group || !check_event_name(group)) { | 570 | if (!group || !is_good_name(group)) { |
571 | ret = -EINVAL; | 571 | ret = -EINVAL; |
572 | goto error; | 572 | goto error; |
573 | } | 573 | } |
@@ -883,7 +883,7 @@ static int create_trace_probe(int argc, char **argv) | |||
883 | int i, ret = 0; | 883 | int i, ret = 0; |
884 | int is_return = 0, is_delete = 0; | 884 | int is_return = 0, is_delete = 0; |
885 | char *symbol = NULL, *event = NULL, *group = NULL; | 885 | char *symbol = NULL, *event = NULL, *group = NULL; |
886 | char *arg, *tmp; | 886 | char *arg; |
887 | unsigned long offset = 0; | 887 | unsigned long offset = 0; |
888 | void *addr = NULL; | 888 | void *addr = NULL; |
889 | char buf[MAX_EVENT_NAME_LEN]; | 889 | char buf[MAX_EVENT_NAME_LEN]; |
@@ -992,26 +992,36 @@ static int create_trace_probe(int argc, char **argv) | |||
992 | /* parse arguments */ | 992 | /* parse arguments */ |
993 | ret = 0; | 993 | ret = 0; |
994 | for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { | 994 | for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { |
995 | /* Increment count for freeing args in error case */ | ||
996 | tp->nr_args++; | ||
997 | |||
995 | /* Parse argument name */ | 998 | /* Parse argument name */ |
996 | arg = strchr(argv[i], '='); | 999 | arg = strchr(argv[i], '='); |
997 | if (arg) | 1000 | if (arg) { |
998 | *arg++ = '\0'; | 1001 | *arg++ = '\0'; |
999 | else | 1002 | tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); |
1003 | } else { | ||
1000 | arg = argv[i]; | 1004 | arg = argv[i]; |
1005 | /* If argument name is omitted, set "argN" */ | ||
1006 | snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); | ||
1007 | tp->args[i].name = kstrdup(buf, GFP_KERNEL); | ||
1008 | } | ||
1001 | 1009 | ||
1002 | tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); | ||
1003 | if (!tp->args[i].name) { | 1010 | if (!tp->args[i].name) { |
1004 | pr_info("Failed to allocate argument%d name '%s'.\n", | 1011 | pr_info("Failed to allocate argument[%d] name.\n", i); |
1005 | i, argv[i]); | ||
1006 | ret = -ENOMEM; | 1012 | ret = -ENOMEM; |
1007 | goto error; | 1013 | goto error; |
1008 | } | 1014 | } |
1009 | tmp = strchr(tp->args[i].name, ':'); | 1015 | |
1010 | if (tmp) | 1016 | if (!is_good_name(tp->args[i].name)) { |
1011 | *tmp = '_'; /* convert : to _ */ | 1017 | pr_info("Invalid argument[%d] name: %s\n", |
1018 | i, tp->args[i].name); | ||
1019 | ret = -EINVAL; | ||
1020 | goto error; | ||
1021 | } | ||
1012 | 1022 | ||
1013 | if (conflict_field_name(tp->args[i].name, tp->args, i)) { | 1023 | if (conflict_field_name(tp->args[i].name, tp->args, i)) { |
1014 | pr_info("Argument%d name '%s' conflicts with " | 1024 | pr_info("Argument[%d] name '%s' conflicts with " |
1015 | "another field.\n", i, argv[i]); | 1025 | "another field.\n", i, argv[i]); |
1016 | ret = -EINVAL; | 1026 | ret = -EINVAL; |
1017 | goto error; | 1027 | goto error; |
@@ -1020,12 +1030,9 @@ static int create_trace_probe(int argc, char **argv) | |||
1020 | /* Parse fetch argument */ | 1030 | /* Parse fetch argument */ |
1021 | ret = parse_probe_arg(arg, tp, &tp->args[i], is_return); | 1031 | ret = parse_probe_arg(arg, tp, &tp->args[i], is_return); |
1022 | if (ret) { | 1032 | if (ret) { |
1023 | pr_info("Parse error at argument%d. (%d)\n", i, ret); | 1033 | pr_info("Parse error at argument[%d]. (%d)\n", i, ret); |
1024 | kfree(tp->args[i].name); | ||
1025 | goto error; | 1034 | goto error; |
1026 | } | 1035 | } |
1027 | |||
1028 | tp->nr_args++; | ||
1029 | } | 1036 | } |
1030 | 1037 | ||
1031 | ret = register_trace_probe(tp); | 1038 | ret = register_trace_probe(tp); |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 0d53c8e853b..7f9c3c52ecc 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -122,7 +122,7 @@ static void __touch_watchdog(void) | |||
122 | 122 | ||
123 | void touch_softlockup_watchdog(void) | 123 | void touch_softlockup_watchdog(void) |
124 | { | 124 | { |
125 | __get_cpu_var(watchdog_touch_ts) = 0; | 125 | __raw_get_cpu_var(watchdog_touch_ts) = 0; |
126 | } | 126 | } |
127 | EXPORT_SYMBOL(touch_softlockup_watchdog); | 127 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
128 | 128 | ||
@@ -142,7 +142,14 @@ void touch_all_softlockup_watchdogs(void) | |||
142 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 142 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
143 | void touch_nmi_watchdog(void) | 143 | void touch_nmi_watchdog(void) |
144 | { | 144 | { |
145 | __get_cpu_var(watchdog_nmi_touch) = true; | 145 | if (watchdog_enabled) { |
146 | unsigned cpu; | ||
147 | |||
148 | for_each_present_cpu(cpu) { | ||
149 | if (per_cpu(watchdog_nmi_touch, cpu) != true) | ||
150 | per_cpu(watchdog_nmi_touch, cpu) = true; | ||
151 | } | ||
152 | } | ||
146 | touch_softlockup_watchdog(); | 153 | touch_softlockup_watchdog(); |
147 | } | 154 | } |
148 | EXPORT_SYMBOL(touch_nmi_watchdog); | 155 | EXPORT_SYMBOL(touch_nmi_watchdog); |
@@ -433,6 +440,9 @@ static int watchdog_enable(int cpu) | |||
433 | wake_up_process(p); | 440 | wake_up_process(p); |
434 | } | 441 | } |
435 | 442 | ||
443 | /* if any cpu succeeds, watchdog is considered enabled for the system */ | ||
444 | watchdog_enabled = 1; | ||
445 | |||
436 | return 0; | 446 | return 0; |
437 | } | 447 | } |
438 | 448 | ||
@@ -455,9 +465,6 @@ static void watchdog_disable(int cpu) | |||
455 | per_cpu(softlockup_watchdog, cpu) = NULL; | 465 | per_cpu(softlockup_watchdog, cpu) = NULL; |
456 | kthread_stop(p); | 466 | kthread_stop(p); |
457 | } | 467 | } |
458 | |||
459 | /* if any cpu succeeds, watchdog is considered enabled for the system */ | ||
460 | watchdog_enabled = 1; | ||
461 | } | 468 | } |
462 | 469 | ||
463 | static void watchdog_enable_all_cpus(void) | 470 | static void watchdog_enable_all_cpus(void) |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8bd600c020e..f77afd93922 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -1,19 +1,26 @@ | |||
1 | /* | 1 | /* |
2 | * linux/kernel/workqueue.c | 2 | * kernel/workqueue.c - generic async execution with shared worker pool |
3 | * | 3 | * |
4 | * Generic mechanism for defining kernel helper threads for running | 4 | * Copyright (C) 2002 Ingo Molnar |
5 | * arbitrary tasks in process context. | ||
6 | * | 5 | * |
7 | * Started by Ingo Molnar, Copyright (C) 2002 | 6 | * Derived from the taskqueue/keventd code by: |
7 | * David Woodhouse <dwmw2@infradead.org> | ||
8 | * Andrew Morton | ||
9 | * Kai Petzke <wpp@marie.physik.tu-berlin.de> | ||
10 | * Theodore Ts'o <tytso@mit.edu> | ||
8 | * | 11 | * |
9 | * Derived from the taskqueue/keventd code by: | 12 | * Made to use alloc_percpu by Christoph Lameter. |
10 | * | 13 | * |
11 | * David Woodhouse <dwmw2@infradead.org> | 14 | * Copyright (C) 2010 SUSE Linux Products GmbH |
12 | * Andrew Morton | 15 | * Copyright (C) 2010 Tejun Heo <tj@kernel.org> |
13 | * Kai Petzke <wpp@marie.physik.tu-berlin.de> | ||
14 | * Theodore Ts'o <tytso@mit.edu> | ||
15 | * | 16 | * |
16 | * Made to use alloc_percpu by Christoph Lameter. | 17 | * This is the generic async execution mechanism. Work items as are |
18 | * executed in process context. The worker pool is shared and | ||
19 | * automatically managed. There is one worker pool for each CPU and | ||
20 | * one extra for works which are better served by workers which are | ||
21 | * not bound to any specific CPU. | ||
22 | * | ||
23 | * Please read Documentation/workqueue.txt for details. | ||
17 | */ | 24 | */ |
18 | 25 | ||
19 | #include <linux/module.h> | 26 | #include <linux/module.h> |
@@ -90,7 +97,8 @@ enum { | |||
90 | /* | 97 | /* |
91 | * Structure fields follow one of the following exclusion rules. | 98 | * Structure fields follow one of the following exclusion rules. |
92 | * | 99 | * |
93 | * I: Set during initialization and read-only afterwards. | 100 | * I: Modifiable by initialization/destruction paths and read-only for |
101 | * everyone else. | ||
94 | * | 102 | * |
95 | * P: Preemption protected. Disabling preemption is enough and should | 103 | * P: Preemption protected. Disabling preemption is enough and should |
96 | * only be modified and accessed from the local cpu. | 104 | * only be modified and accessed from the local cpu. |
@@ -198,7 +206,7 @@ typedef cpumask_var_t mayday_mask_t; | |||
198 | cpumask_test_and_set_cpu((cpu), (mask)) | 206 | cpumask_test_and_set_cpu((cpu), (mask)) |
199 | #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) | 207 | #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) |
200 | #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) | 208 | #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) |
201 | #define alloc_mayday_mask(maskp, gfp) alloc_cpumask_var((maskp), (gfp)) | 209 | #define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp)) |
202 | #define free_mayday_mask(mask) free_cpumask_var((mask)) | 210 | #define free_mayday_mask(mask) free_cpumask_var((mask)) |
203 | #else | 211 | #else |
204 | typedef unsigned long mayday_mask_t; | 212 | typedef unsigned long mayday_mask_t; |
@@ -943,10 +951,14 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
943 | struct global_cwq *gcwq; | 951 | struct global_cwq *gcwq; |
944 | struct cpu_workqueue_struct *cwq; | 952 | struct cpu_workqueue_struct *cwq; |
945 | struct list_head *worklist; | 953 | struct list_head *worklist; |
954 | unsigned int work_flags; | ||
946 | unsigned long flags; | 955 | unsigned long flags; |
947 | 956 | ||
948 | debug_work_activate(work); | 957 | debug_work_activate(work); |
949 | 958 | ||
959 | if (WARN_ON_ONCE(wq->flags & WQ_DYING)) | ||
960 | return; | ||
961 | |||
950 | /* determine gcwq to use */ | 962 | /* determine gcwq to use */ |
951 | if (!(wq->flags & WQ_UNBOUND)) { | 963 | if (!(wq->flags & WQ_UNBOUND)) { |
952 | struct global_cwq *last_gcwq; | 964 | struct global_cwq *last_gcwq; |
@@ -989,14 +1001,17 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
989 | BUG_ON(!list_empty(&work->entry)); | 1001 | BUG_ON(!list_empty(&work->entry)); |
990 | 1002 | ||
991 | cwq->nr_in_flight[cwq->work_color]++; | 1003 | cwq->nr_in_flight[cwq->work_color]++; |
1004 | work_flags = work_color_to_flags(cwq->work_color); | ||
992 | 1005 | ||
993 | if (likely(cwq->nr_active < cwq->max_active)) { | 1006 | if (likely(cwq->nr_active < cwq->max_active)) { |
994 | cwq->nr_active++; | 1007 | cwq->nr_active++; |
995 | worklist = gcwq_determine_ins_pos(gcwq, cwq); | 1008 | worklist = gcwq_determine_ins_pos(gcwq, cwq); |
996 | } else | 1009 | } else { |
1010 | work_flags |= WORK_STRUCT_DELAYED; | ||
997 | worklist = &cwq->delayed_works; | 1011 | worklist = &cwq->delayed_works; |
1012 | } | ||
998 | 1013 | ||
999 | insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color)); | 1014 | insert_work(cwq, work, worklist, work_flags); |
1000 | 1015 | ||
1001 | spin_unlock_irqrestore(&gcwq->lock, flags); | 1016 | spin_unlock_irqrestore(&gcwq->lock, flags); |
1002 | } | 1017 | } |
@@ -1215,6 +1230,7 @@ static void worker_leave_idle(struct worker *worker) | |||
1215 | * bound), %false if offline. | 1230 | * bound), %false if offline. |
1216 | */ | 1231 | */ |
1217 | static bool worker_maybe_bind_and_lock(struct worker *worker) | 1232 | static bool worker_maybe_bind_and_lock(struct worker *worker) |
1233 | __acquires(&gcwq->lock) | ||
1218 | { | 1234 | { |
1219 | struct global_cwq *gcwq = worker->gcwq; | 1235 | struct global_cwq *gcwq = worker->gcwq; |
1220 | struct task_struct *task = worker->task; | 1236 | struct task_struct *task = worker->task; |
@@ -1488,6 +1504,8 @@ static void gcwq_mayday_timeout(unsigned long __gcwq) | |||
1488 | * otherwise. | 1504 | * otherwise. |
1489 | */ | 1505 | */ |
1490 | static bool maybe_create_worker(struct global_cwq *gcwq) | 1506 | static bool maybe_create_worker(struct global_cwq *gcwq) |
1507 | __releases(&gcwq->lock) | ||
1508 | __acquires(&gcwq->lock) | ||
1491 | { | 1509 | { |
1492 | if (!need_to_create_worker(gcwq)) | 1510 | if (!need_to_create_worker(gcwq)) |
1493 | return false; | 1511 | return false; |
@@ -1662,6 +1680,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | |||
1662 | struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); | 1680 | struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); |
1663 | 1681 | ||
1664 | move_linked_works(work, pos, NULL); | 1682 | move_linked_works(work, pos, NULL); |
1683 | __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); | ||
1665 | cwq->nr_active++; | 1684 | cwq->nr_active++; |
1666 | } | 1685 | } |
1667 | 1686 | ||
@@ -1669,6 +1688,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | |||
1669 | * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight | 1688 | * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight |
1670 | * @cwq: cwq of interest | 1689 | * @cwq: cwq of interest |
1671 | * @color: color of work which left the queue | 1690 | * @color: color of work which left the queue |
1691 | * @delayed: for a delayed work | ||
1672 | * | 1692 | * |
1673 | * A work either has completed or is removed from pending queue, | 1693 | * A work either has completed or is removed from pending queue, |
1674 | * decrement nr_in_flight of its cwq and handle workqueue flushing. | 1694 | * decrement nr_in_flight of its cwq and handle workqueue flushing. |
@@ -1676,19 +1696,22 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | |||
1676 | * CONTEXT: | 1696 | * CONTEXT: |
1677 | * spin_lock_irq(gcwq->lock). | 1697 | * spin_lock_irq(gcwq->lock). |
1678 | */ | 1698 | */ |
1679 | static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) | 1699 | static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color, |
1700 | bool delayed) | ||
1680 | { | 1701 | { |
1681 | /* ignore uncolored works */ | 1702 | /* ignore uncolored works */ |
1682 | if (color == WORK_NO_COLOR) | 1703 | if (color == WORK_NO_COLOR) |
1683 | return; | 1704 | return; |
1684 | 1705 | ||
1685 | cwq->nr_in_flight[color]--; | 1706 | cwq->nr_in_flight[color]--; |
1686 | cwq->nr_active--; | ||
1687 | 1707 | ||
1688 | if (!list_empty(&cwq->delayed_works)) { | 1708 | if (!delayed) { |
1689 | /* one down, submit a delayed one */ | 1709 | cwq->nr_active--; |
1690 | if (cwq->nr_active < cwq->max_active) | 1710 | if (!list_empty(&cwq->delayed_works)) { |
1691 | cwq_activate_first_delayed(cwq); | 1711 | /* one down, submit a delayed one */ |
1712 | if (cwq->nr_active < cwq->max_active) | ||
1713 | cwq_activate_first_delayed(cwq); | ||
1714 | } | ||
1692 | } | 1715 | } |
1693 | 1716 | ||
1694 | /* is flush in progress and are we at the flushing tip? */ | 1717 | /* is flush in progress and are we at the flushing tip? */ |
@@ -1725,6 +1748,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) | |||
1725 | * spin_lock_irq(gcwq->lock) which is released and regrabbed. | 1748 | * spin_lock_irq(gcwq->lock) which is released and regrabbed. |
1726 | */ | 1749 | */ |
1727 | static void process_one_work(struct worker *worker, struct work_struct *work) | 1750 | static void process_one_work(struct worker *worker, struct work_struct *work) |
1751 | __releases(&gcwq->lock) | ||
1752 | __acquires(&gcwq->lock) | ||
1728 | { | 1753 | { |
1729 | struct cpu_workqueue_struct *cwq = get_work_cwq(work); | 1754 | struct cpu_workqueue_struct *cwq = get_work_cwq(work); |
1730 | struct global_cwq *gcwq = cwq->gcwq; | 1755 | struct global_cwq *gcwq = cwq->gcwq; |
@@ -1823,7 +1848,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) | |||
1823 | hlist_del_init(&worker->hentry); | 1848 | hlist_del_init(&worker->hentry); |
1824 | worker->current_work = NULL; | 1849 | worker->current_work = NULL; |
1825 | worker->current_cwq = NULL; | 1850 | worker->current_cwq = NULL; |
1826 | cwq_dec_nr_in_flight(cwq, work_color); | 1851 | cwq_dec_nr_in_flight(cwq, work_color, false); |
1827 | } | 1852 | } |
1828 | 1853 | ||
1829 | /** | 1854 | /** |
@@ -2388,7 +2413,8 @@ static int try_to_grab_pending(struct work_struct *work) | |||
2388 | debug_work_deactivate(work); | 2413 | debug_work_deactivate(work); |
2389 | list_del_init(&work->entry); | 2414 | list_del_init(&work->entry); |
2390 | cwq_dec_nr_in_flight(get_work_cwq(work), | 2415 | cwq_dec_nr_in_flight(get_work_cwq(work), |
2391 | get_work_color(work)); | 2416 | get_work_color(work), |
2417 | *work_data_bits(work) & WORK_STRUCT_DELAYED); | ||
2392 | ret = 1; | 2418 | ret = 1; |
2393 | } | 2419 | } |
2394 | } | 2420 | } |
@@ -2791,7 +2817,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, | |||
2791 | if (IS_ERR(rescuer->task)) | 2817 | if (IS_ERR(rescuer->task)) |
2792 | goto err; | 2818 | goto err; |
2793 | 2819 | ||
2794 | wq->rescuer = rescuer; | ||
2795 | rescuer->task->flags |= PF_THREAD_BOUND; | 2820 | rescuer->task->flags |= PF_THREAD_BOUND; |
2796 | wake_up_process(rescuer->task); | 2821 | wake_up_process(rescuer->task); |
2797 | } | 2822 | } |
@@ -2833,6 +2858,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
2833 | { | 2858 | { |
2834 | unsigned int cpu; | 2859 | unsigned int cpu; |
2835 | 2860 | ||
2861 | wq->flags |= WQ_DYING; | ||
2836 | flush_workqueue(wq); | 2862 | flush_workqueue(wq); |
2837 | 2863 | ||
2838 | /* | 2864 | /* |
@@ -2857,6 +2883,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
2857 | if (wq->flags & WQ_RESCUER) { | 2883 | if (wq->flags & WQ_RESCUER) { |
2858 | kthread_stop(wq->rescuer->task); | 2884 | kthread_stop(wq->rescuer->task); |
2859 | free_mayday_mask(wq->mayday_mask); | 2885 | free_mayday_mask(wq->mayday_mask); |
2886 | kfree(wq->rescuer); | ||
2860 | } | 2887 | } |
2861 | 2888 | ||
2862 | free_cwqs(wq); | 2889 | free_cwqs(wq); |
@@ -3239,6 +3266,8 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
3239 | * multiple times. To be used by cpu_callback. | 3266 | * multiple times. To be used by cpu_callback. |
3240 | */ | 3267 | */ |
3241 | static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) | 3268 | static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) |
3269 | __releases(&gcwq->lock) | ||
3270 | __acquires(&gcwq->lock) | ||
3242 | { | 3271 | { |
3243 | if (!(gcwq->trustee_state == state || | 3272 | if (!(gcwq->trustee_state == state || |
3244 | gcwq->trustee_state == TRUSTEE_DONE)) { | 3273 | gcwq->trustee_state == TRUSTEE_DONE)) { |
@@ -3545,8 +3574,7 @@ static int __init init_workqueues(void) | |||
3545 | spin_lock_init(&gcwq->lock); | 3574 | spin_lock_init(&gcwq->lock); |
3546 | INIT_LIST_HEAD(&gcwq->worklist); | 3575 | INIT_LIST_HEAD(&gcwq->worklist); |
3547 | gcwq->cpu = cpu; | 3576 | gcwq->cpu = cpu; |
3548 | if (cpu == WORK_CPU_UNBOUND) | 3577 | gcwq->flags |= GCWQ_DISASSOCIATED; |
3549 | gcwq->flags |= GCWQ_DISASSOCIATED; | ||
3550 | 3578 | ||
3551 | INIT_LIST_HEAD(&gcwq->idle_list); | 3579 | INIT_LIST_HEAD(&gcwq->idle_list); |
3552 | for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) | 3580 | for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) |
@@ -3570,6 +3598,8 @@ static int __init init_workqueues(void) | |||
3570 | struct global_cwq *gcwq = get_gcwq(cpu); | 3598 | struct global_cwq *gcwq = get_gcwq(cpu); |
3571 | struct worker *worker; | 3599 | struct worker *worker; |
3572 | 3600 | ||
3601 | if (cpu != WORK_CPU_UNBOUND) | ||
3602 | gcwq->flags &= ~GCWQ_DISASSOCIATED; | ||
3573 | worker = create_worker(gcwq, true); | 3603 | worker = create_worker(gcwq, true); |
3574 | BUG_ON(!worker); | 3604 | BUG_ON(!worker); |
3575 | spin_lock_irq(&gcwq->lock); | 3605 | spin_lock_irq(&gcwq->lock); |