diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-09-15 04:27:31 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-09-15 04:27:31 -0400 |
commit | 3aabae7d9dfaed60effe93662f02c19bafc18537 (patch) | |
tree | af94cdd69add07601d9f3f5988dfc1dc255e3886 /kernel | |
parent | 79e406d7b00ab2b261ae32a59f266fd3b7af6f29 (diff) | |
parent | 57c072c7113f54f9512624d6c665db6184448782 (diff) |
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup.c | 13 | ||||
-rw-r--r-- | kernel/debug/debug_core.c | 2 | ||||
-rw-r--r-- | kernel/debug/kdb/kdb_bp.c | 2 | ||||
-rw-r--r-- | kernel/debug/kdb/kdb_main.c | 2 | ||||
-rw-r--r-- | kernel/gcov/fs.c | 244 | ||||
-rw-r--r-- | kernel/groups.c | 5 | ||||
-rw-r--r-- | kernel/hrtimer.c | 3 | ||||
-rw-r--r-- | kernel/mutex.c | 23 | ||||
-rw-r--r-- | kernel/pm_qos_params.c | 16 | ||||
-rw-r--r-- | kernel/power/hibernate.c | 1 | ||||
-rw-r--r-- | kernel/power/poweroff.c | 2 | ||||
-rw-r--r-- | kernel/power/snapshot.c | 86 | ||||
-rw-r--r-- | kernel/power/swap.c | 6 | ||||
-rw-r--r-- | kernel/sched.c | 6 | ||||
-rw-r--r-- | kernel/sched_fair.c | 13 | ||||
-rw-r--r-- | kernel/sys.c | 2 | ||||
-rw-r--r-- | kernel/sysctl.c | 5 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 107 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 2 | ||||
-rw-r--r-- | kernel/workqueue.c | 53 |
20 files changed, 409 insertions, 184 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 192f88c5b0f9..c9483d8f6140 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -1791,19 +1791,20 @@ out: | |||
1791 | } | 1791 | } |
1792 | 1792 | ||
1793 | /** | 1793 | /** |
1794 | * cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup | 1794 | * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from' |
1795 | * @from: attach to all cgroups of a given task | ||
1795 | * @tsk: the task to be attached | 1796 | * @tsk: the task to be attached |
1796 | */ | 1797 | */ |
1797 | int cgroup_attach_task_current_cg(struct task_struct *tsk) | 1798 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) |
1798 | { | 1799 | { |
1799 | struct cgroupfs_root *root; | 1800 | struct cgroupfs_root *root; |
1800 | struct cgroup *cur_cg; | ||
1801 | int retval = 0; | 1801 | int retval = 0; |
1802 | 1802 | ||
1803 | cgroup_lock(); | 1803 | cgroup_lock(); |
1804 | for_each_active_root(root) { | 1804 | for_each_active_root(root) { |
1805 | cur_cg = task_cgroup_from_root(current, root); | 1805 | struct cgroup *from_cg = task_cgroup_from_root(from, root); |
1806 | retval = cgroup_attach_task(cur_cg, tsk); | 1806 | |
1807 | retval = cgroup_attach_task(from_cg, tsk); | ||
1807 | if (retval) | 1808 | if (retval) |
1808 | break; | 1809 | break; |
1809 | } | 1810 | } |
@@ -1811,7 +1812,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk) | |||
1811 | 1812 | ||
1812 | return retval; | 1813 | return retval; |
1813 | } | 1814 | } |
1814 | EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg); | 1815 | EXPORT_SYMBOL_GPL(cgroup_attach_task_all); |
1815 | 1816 | ||
1816 | /* | 1817 | /* |
1817 | * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex | 1818 | * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex |
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 3c2d4972d235..de407c78178d 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c | |||
@@ -741,7 +741,7 @@ static struct console kgdbcons = { | |||
741 | }; | 741 | }; |
742 | 742 | ||
743 | #ifdef CONFIG_MAGIC_SYSRQ | 743 | #ifdef CONFIG_MAGIC_SYSRQ |
744 | static void sysrq_handle_dbg(int key, struct tty_struct *tty) | 744 | static void sysrq_handle_dbg(int key) |
745 | { | 745 | { |
746 | if (!dbg_io_ops) { | 746 | if (!dbg_io_ops) { |
747 | printk(KERN_CRIT "ERROR: No KGDB I/O module available\n"); | 747 | printk(KERN_CRIT "ERROR: No KGDB I/O module available\n"); |
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c index 75bd9b3ebbb7..20059ef4459a 100644 --- a/kernel/debug/kdb/kdb_bp.c +++ b/kernel/debug/kdb/kdb_bp.c | |||
@@ -274,7 +274,6 @@ static int kdb_bp(int argc, const char **argv) | |||
274 | int i, bpno; | 274 | int i, bpno; |
275 | kdb_bp_t *bp, *bp_check; | 275 | kdb_bp_t *bp, *bp_check; |
276 | int diag; | 276 | int diag; |
277 | int free; | ||
278 | char *symname = NULL; | 277 | char *symname = NULL; |
279 | long offset = 0ul; | 278 | long offset = 0ul; |
280 | int nextarg; | 279 | int nextarg; |
@@ -305,7 +304,6 @@ static int kdb_bp(int argc, const char **argv) | |||
305 | /* | 304 | /* |
306 | * Find an empty bp structure to allocate | 305 | * Find an empty bp structure to allocate |
307 | */ | 306 | */ |
308 | free = KDB_MAXBPT; | ||
309 | for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) { | 307 | for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) { |
310 | if (bp->bp_free) | 308 | if (bp->bp_free) |
311 | break; | 309 | break; |
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 28b844118bbd..caf057a3de0e 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c | |||
@@ -1929,7 +1929,7 @@ static int kdb_sr(int argc, const char **argv) | |||
1929 | if (argc != 1) | 1929 | if (argc != 1) |
1930 | return KDB_ARGCOUNT; | 1930 | return KDB_ARGCOUNT; |
1931 | kdb_trap_printk++; | 1931 | kdb_trap_printk++; |
1932 | __handle_sysrq(*argv[1], NULL, 0); | 1932 | __handle_sysrq(*argv[1], false); |
1933 | kdb_trap_printk--; | 1933 | kdb_trap_printk--; |
1934 | 1934 | ||
1935 | return 0; | 1935 | return 0; |
diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c index ef3c3f88a7a3..f83972b16564 100644 --- a/kernel/gcov/fs.c +++ b/kernel/gcov/fs.c | |||
@@ -33,10 +33,11 @@ | |||
33 | * @children: child nodes | 33 | * @children: child nodes |
34 | * @all: list head for list of all nodes | 34 | * @all: list head for list of all nodes |
35 | * @parent: parent node | 35 | * @parent: parent node |
36 | * @info: associated profiling data structure if not a directory | 36 | * @loaded_info: array of pointers to profiling data sets for loaded object |
37 | * @ghost: when an object file containing profiling data is unloaded we keep a | 37 | * files. |
38 | * copy of the profiling data here to allow collecting coverage data | 38 | * @num_loaded: number of profiling data sets for loaded object files. |
39 | * for cleanup code. Such a node is called a "ghost". | 39 | * @unloaded_info: accumulated copy of profiling data sets for unloaded |
40 | * object files. Used only when gcov_persist=1. | ||
40 | * @dentry: main debugfs entry, either a directory or data file | 41 | * @dentry: main debugfs entry, either a directory or data file |
41 | * @links: associated symbolic links | 42 | * @links: associated symbolic links |
42 | * @name: data file basename | 43 | * @name: data file basename |
@@ -51,10 +52,11 @@ struct gcov_node { | |||
51 | struct list_head children; | 52 | struct list_head children; |
52 | struct list_head all; | 53 | struct list_head all; |
53 | struct gcov_node *parent; | 54 | struct gcov_node *parent; |
54 | struct gcov_info *info; | 55 | struct gcov_info **loaded_info; |
55 | struct gcov_info *ghost; | 56 | struct gcov_info *unloaded_info; |
56 | struct dentry *dentry; | 57 | struct dentry *dentry; |
57 | struct dentry **links; | 58 | struct dentry **links; |
59 | int num_loaded; | ||
58 | char name[0]; | 60 | char name[0]; |
59 | }; | 61 | }; |
60 | 62 | ||
@@ -136,16 +138,37 @@ static const struct seq_operations gcov_seq_ops = { | |||
136 | }; | 138 | }; |
137 | 139 | ||
138 | /* | 140 | /* |
139 | * Return the profiling data set for a given node. This can either be the | 141 | * Return a profiling data set associated with the given node. This is |
140 | * original profiling data structure or a duplicate (also called "ghost") | 142 | * either a data set for a loaded object file or a data set copy in case |
141 | * in case the associated object file has been unloaded. | 143 | * all associated object files have been unloaded. |
142 | */ | 144 | */ |
143 | static struct gcov_info *get_node_info(struct gcov_node *node) | 145 | static struct gcov_info *get_node_info(struct gcov_node *node) |
144 | { | 146 | { |
145 | if (node->info) | 147 | if (node->num_loaded > 0) |
146 | return node->info; | 148 | return node->loaded_info[0]; |
147 | 149 | ||
148 | return node->ghost; | 150 | return node->unloaded_info; |
151 | } | ||
152 | |||
153 | /* | ||
154 | * Return a newly allocated profiling data set which contains the sum of | ||
155 | * all profiling data associated with the given node. | ||
156 | */ | ||
157 | static struct gcov_info *get_accumulated_info(struct gcov_node *node) | ||
158 | { | ||
159 | struct gcov_info *info; | ||
160 | int i = 0; | ||
161 | |||
162 | if (node->unloaded_info) | ||
163 | info = gcov_info_dup(node->unloaded_info); | ||
164 | else | ||
165 | info = gcov_info_dup(node->loaded_info[i++]); | ||
166 | if (!info) | ||
167 | return NULL; | ||
168 | for (; i < node->num_loaded; i++) | ||
169 | gcov_info_add(info, node->loaded_info[i]); | ||
170 | |||
171 | return info; | ||
149 | } | 172 | } |
150 | 173 | ||
151 | /* | 174 | /* |
@@ -163,9 +186,10 @@ static int gcov_seq_open(struct inode *inode, struct file *file) | |||
163 | mutex_lock(&node_lock); | 186 | mutex_lock(&node_lock); |
164 | /* | 187 | /* |
165 | * Read from a profiling data copy to minimize reference tracking | 188 | * Read from a profiling data copy to minimize reference tracking |
166 | * complexity and concurrent access. | 189 | * complexity and concurrent access and to keep accumulating multiple |
190 | * profiling data sets associated with one node simple. | ||
167 | */ | 191 | */ |
168 | info = gcov_info_dup(get_node_info(node)); | 192 | info = get_accumulated_info(node); |
169 | if (!info) | 193 | if (!info) |
170 | goto out_unlock; | 194 | goto out_unlock; |
171 | iter = gcov_iter_new(info); | 195 | iter = gcov_iter_new(info); |
@@ -225,12 +249,25 @@ static struct gcov_node *get_node_by_name(const char *name) | |||
225 | return NULL; | 249 | return NULL; |
226 | } | 250 | } |
227 | 251 | ||
252 | /* | ||
253 | * Reset all profiling data associated with the specified node. | ||
254 | */ | ||
255 | static void reset_node(struct gcov_node *node) | ||
256 | { | ||
257 | int i; | ||
258 | |||
259 | if (node->unloaded_info) | ||
260 | gcov_info_reset(node->unloaded_info); | ||
261 | for (i = 0; i < node->num_loaded; i++) | ||
262 | gcov_info_reset(node->loaded_info[i]); | ||
263 | } | ||
264 | |||
228 | static void remove_node(struct gcov_node *node); | 265 | static void remove_node(struct gcov_node *node); |
229 | 266 | ||
230 | /* | 267 | /* |
231 | * write() implementation for gcov data files. Reset profiling data for the | 268 | * write() implementation for gcov data files. Reset profiling data for the |
232 | * associated file. If the object file has been unloaded (i.e. this is | 269 | * corresponding file. If all associated object files have been unloaded, |
233 | * a "ghost" node), remove the debug fs node as well. | 270 | * remove the debug fs node as well. |
234 | */ | 271 | */ |
235 | static ssize_t gcov_seq_write(struct file *file, const char __user *addr, | 272 | static ssize_t gcov_seq_write(struct file *file, const char __user *addr, |
236 | size_t len, loff_t *pos) | 273 | size_t len, loff_t *pos) |
@@ -245,10 +282,10 @@ static ssize_t gcov_seq_write(struct file *file, const char __user *addr, | |||
245 | node = get_node_by_name(info->filename); | 282 | node = get_node_by_name(info->filename); |
246 | if (node) { | 283 | if (node) { |
247 | /* Reset counts or remove node for unloaded modules. */ | 284 | /* Reset counts or remove node for unloaded modules. */ |
248 | if (node->ghost) | 285 | if (node->num_loaded == 0) |
249 | remove_node(node); | 286 | remove_node(node); |
250 | else | 287 | else |
251 | gcov_info_reset(node->info); | 288 | reset_node(node); |
252 | } | 289 | } |
253 | /* Reset counts for open file. */ | 290 | /* Reset counts for open file. */ |
254 | gcov_info_reset(info); | 291 | gcov_info_reset(info); |
@@ -378,7 +415,10 @@ static void init_node(struct gcov_node *node, struct gcov_info *info, | |||
378 | INIT_LIST_HEAD(&node->list); | 415 | INIT_LIST_HEAD(&node->list); |
379 | INIT_LIST_HEAD(&node->children); | 416 | INIT_LIST_HEAD(&node->children); |
380 | INIT_LIST_HEAD(&node->all); | 417 | INIT_LIST_HEAD(&node->all); |
381 | node->info = info; | 418 | if (node->loaded_info) { |
419 | node->loaded_info[0] = info; | ||
420 | node->num_loaded = 1; | ||
421 | } | ||
382 | node->parent = parent; | 422 | node->parent = parent; |
383 | if (name) | 423 | if (name) |
384 | strcpy(node->name, name); | 424 | strcpy(node->name, name); |
@@ -394,9 +434,13 @@ static struct gcov_node *new_node(struct gcov_node *parent, | |||
394 | struct gcov_node *node; | 434 | struct gcov_node *node; |
395 | 435 | ||
396 | node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL); | 436 | node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL); |
397 | if (!node) { | 437 | if (!node) |
398 | pr_warning("out of memory\n"); | 438 | goto err_nomem; |
399 | return NULL; | 439 | if (info) { |
440 | node->loaded_info = kcalloc(1, sizeof(struct gcov_info *), | ||
441 | GFP_KERNEL); | ||
442 | if (!node->loaded_info) | ||
443 | goto err_nomem; | ||
400 | } | 444 | } |
401 | init_node(node, info, name, parent); | 445 | init_node(node, info, name, parent); |
402 | /* Differentiate between gcov data file nodes and directory nodes. */ | 446 | /* Differentiate between gcov data file nodes and directory nodes. */ |
@@ -416,6 +460,11 @@ static struct gcov_node *new_node(struct gcov_node *parent, | |||
416 | list_add(&node->all, &all_head); | 460 | list_add(&node->all, &all_head); |
417 | 461 | ||
418 | return node; | 462 | return node; |
463 | |||
464 | err_nomem: | ||
465 | kfree(node); | ||
466 | pr_warning("out of memory\n"); | ||
467 | return NULL; | ||
419 | } | 468 | } |
420 | 469 | ||
421 | /* Remove symbolic links associated with node. */ | 470 | /* Remove symbolic links associated with node. */ |
@@ -441,8 +490,9 @@ static void release_node(struct gcov_node *node) | |||
441 | list_del(&node->all); | 490 | list_del(&node->all); |
442 | debugfs_remove(node->dentry); | 491 | debugfs_remove(node->dentry); |
443 | remove_links(node); | 492 | remove_links(node); |
444 | if (node->ghost) | 493 | kfree(node->loaded_info); |
445 | gcov_info_free(node->ghost); | 494 | if (node->unloaded_info) |
495 | gcov_info_free(node->unloaded_info); | ||
446 | kfree(node); | 496 | kfree(node); |
447 | } | 497 | } |
448 | 498 | ||
@@ -477,7 +527,7 @@ static struct gcov_node *get_child_by_name(struct gcov_node *parent, | |||
477 | 527 | ||
478 | /* | 528 | /* |
479 | * write() implementation for reset file. Reset all profiling data to zero | 529 | * write() implementation for reset file. Reset all profiling data to zero |
480 | * and remove ghost nodes. | 530 | * and remove nodes for which all associated object files are unloaded. |
481 | */ | 531 | */ |
482 | static ssize_t reset_write(struct file *file, const char __user *addr, | 532 | static ssize_t reset_write(struct file *file, const char __user *addr, |
483 | size_t len, loff_t *pos) | 533 | size_t len, loff_t *pos) |
@@ -487,8 +537,8 @@ static ssize_t reset_write(struct file *file, const char __user *addr, | |||
487 | mutex_lock(&node_lock); | 537 | mutex_lock(&node_lock); |
488 | restart: | 538 | restart: |
489 | list_for_each_entry(node, &all_head, all) { | 539 | list_for_each_entry(node, &all_head, all) { |
490 | if (node->info) | 540 | if (node->num_loaded > 0) |
491 | gcov_info_reset(node->info); | 541 | reset_node(node); |
492 | else if (list_empty(&node->children)) { | 542 | else if (list_empty(&node->children)) { |
493 | remove_node(node); | 543 | remove_node(node); |
494 | /* Several nodes may have gone - restart loop. */ | 544 | /* Several nodes may have gone - restart loop. */ |
@@ -564,37 +614,115 @@ err_remove: | |||
564 | } | 614 | } |
565 | 615 | ||
566 | /* | 616 | /* |
567 | * The profiling data set associated with this node is being unloaded. Store a | 617 | * Associate a profiling data set with an existing node. Needs to be called |
568 | * copy of the profiling data and turn this node into a "ghost". | 618 | * with node_lock held. |
569 | */ | 619 | */ |
570 | static int ghost_node(struct gcov_node *node) | 620 | static void add_info(struct gcov_node *node, struct gcov_info *info) |
571 | { | 621 | { |
572 | node->ghost = gcov_info_dup(node->info); | 622 | struct gcov_info **loaded_info; |
573 | if (!node->ghost) { | 623 | int num = node->num_loaded; |
574 | pr_warning("could not save data for '%s' (out of memory)\n", | 624 | |
575 | node->info->filename); | 625 | /* |
576 | return -ENOMEM; | 626 | * Prepare new array. This is done first to simplify cleanup in |
627 | * case the new data set is incompatible, the node only contains | ||
628 | * unloaded data sets and there's not enough memory for the array. | ||
629 | */ | ||
630 | loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL); | ||
631 | if (!loaded_info) { | ||
632 | pr_warning("could not add '%s' (out of memory)\n", | ||
633 | info->filename); | ||
634 | return; | ||
635 | } | ||
636 | memcpy(loaded_info, node->loaded_info, | ||
637 | num * sizeof(struct gcov_info *)); | ||
638 | loaded_info[num] = info; | ||
639 | /* Check if the new data set is compatible. */ | ||
640 | if (num == 0) { | ||
641 | /* | ||
642 | * A module was unloaded, modified and reloaded. The new | ||
643 | * data set replaces the copy of the last one. | ||
644 | */ | ||
645 | if (!gcov_info_is_compatible(node->unloaded_info, info)) { | ||
646 | pr_warning("discarding saved data for %s " | ||
647 | "(incompatible version)\n", info->filename); | ||
648 | gcov_info_free(node->unloaded_info); | ||
649 | node->unloaded_info = NULL; | ||
650 | } | ||
651 | } else { | ||
652 | /* | ||
653 | * Two different versions of the same object file are loaded. | ||
654 | * The initial one takes precedence. | ||
655 | */ | ||
656 | if (!gcov_info_is_compatible(node->loaded_info[0], info)) { | ||
657 | pr_warning("could not add '%s' (incompatible " | ||
658 | "version)\n", info->filename); | ||
659 | kfree(loaded_info); | ||
660 | return; | ||
661 | } | ||
577 | } | 662 | } |
578 | node->info = NULL; | 663 | /* Overwrite previous array. */ |
664 | kfree(node->loaded_info); | ||
665 | node->loaded_info = loaded_info; | ||
666 | node->num_loaded = num + 1; | ||
667 | } | ||
579 | 668 | ||
580 | return 0; | 669 | /* |
670 | * Return the index of a profiling data set associated with a node. | ||
671 | */ | ||
672 | static int get_info_index(struct gcov_node *node, struct gcov_info *info) | ||
673 | { | ||
674 | int i; | ||
675 | |||
676 | for (i = 0; i < node->num_loaded; i++) { | ||
677 | if (node->loaded_info[i] == info) | ||
678 | return i; | ||
679 | } | ||
680 | return -ENOENT; | ||
581 | } | 681 | } |
582 | 682 | ||
583 | /* | 683 | /* |
584 | * Profiling data for this node has been loaded again. Add profiling data | 684 | * Save the data of a profiling data set which is being unloaded. |
585 | * from previous instantiation and turn this node into a regular node. | ||
586 | */ | 685 | */ |
587 | static void revive_node(struct gcov_node *node, struct gcov_info *info) | 686 | static void save_info(struct gcov_node *node, struct gcov_info *info) |
588 | { | 687 | { |
589 | if (gcov_info_is_compatible(node->ghost, info)) | 688 | if (node->unloaded_info) |
590 | gcov_info_add(info, node->ghost); | 689 | gcov_info_add(node->unloaded_info, info); |
591 | else { | 690 | else { |
592 | pr_warning("discarding saved data for '%s' (version changed)\n", | 691 | node->unloaded_info = gcov_info_dup(info); |
692 | if (!node->unloaded_info) { | ||
693 | pr_warning("could not save data for '%s' " | ||
694 | "(out of memory)\n", info->filename); | ||
695 | } | ||
696 | } | ||
697 | } | ||
698 | |||
699 | /* | ||
700 | * Disassociate a profiling data set from a node. Needs to be called with | ||
701 | * node_lock held. | ||
702 | */ | ||
703 | static void remove_info(struct gcov_node *node, struct gcov_info *info) | ||
704 | { | ||
705 | int i; | ||
706 | |||
707 | i = get_info_index(node, info); | ||
708 | if (i < 0) { | ||
709 | pr_warning("could not remove '%s' (not found)\n", | ||
593 | info->filename); | 710 | info->filename); |
711 | return; | ||
594 | } | 712 | } |
595 | gcov_info_free(node->ghost); | 713 | if (gcov_persist) |
596 | node->ghost = NULL; | 714 | save_info(node, info); |
597 | node->info = info; | 715 | /* Shrink array. */ |
716 | node->loaded_info[i] = node->loaded_info[node->num_loaded - 1]; | ||
717 | node->num_loaded--; | ||
718 | if (node->num_loaded > 0) | ||
719 | return; | ||
720 | /* Last loaded data set was removed. */ | ||
721 | kfree(node->loaded_info); | ||
722 | node->loaded_info = NULL; | ||
723 | node->num_loaded = 0; | ||
724 | if (!node->unloaded_info) | ||
725 | remove_node(node); | ||
598 | } | 726 | } |
599 | 727 | ||
600 | /* | 728 | /* |
@@ -609,30 +737,18 @@ void gcov_event(enum gcov_action action, struct gcov_info *info) | |||
609 | node = get_node_by_name(info->filename); | 737 | node = get_node_by_name(info->filename); |
610 | switch (action) { | 738 | switch (action) { |
611 | case GCOV_ADD: | 739 | case GCOV_ADD: |
612 | /* Add new node or revive ghost. */ | 740 | if (node) |
613 | if (!node) { | 741 | add_info(node, info); |
742 | else | ||
614 | add_node(info); | 743 | add_node(info); |
615 | break; | ||
616 | } | ||
617 | if (gcov_persist) | ||
618 | revive_node(node, info); | ||
619 | else { | ||
620 | pr_warning("could not add '%s' (already exists)\n", | ||
621 | info->filename); | ||
622 | } | ||
623 | break; | 744 | break; |
624 | case GCOV_REMOVE: | 745 | case GCOV_REMOVE: |
625 | /* Remove node or turn into ghost. */ | 746 | if (node) |
626 | if (!node) { | 747 | remove_info(node, info); |
748 | else { | ||
627 | pr_warning("could not remove '%s' (not found)\n", | 749 | pr_warning("could not remove '%s' (not found)\n", |
628 | info->filename); | 750 | info->filename); |
629 | break; | ||
630 | } | 751 | } |
631 | if (gcov_persist) { | ||
632 | if (!ghost_node(node)) | ||
633 | break; | ||
634 | } | ||
635 | remove_node(node); | ||
636 | break; | 752 | break; |
637 | } | 753 | } |
638 | mutex_unlock(&node_lock); | 754 | mutex_unlock(&node_lock); |
diff --git a/kernel/groups.c b/kernel/groups.c index 53b1916c9492..253dc0f35cf4 100644 --- a/kernel/groups.c +++ b/kernel/groups.c | |||
@@ -143,10 +143,9 @@ int groups_search(const struct group_info *group_info, gid_t grp) | |||
143 | right = group_info->ngroups; | 143 | right = group_info->ngroups; |
144 | while (left < right) { | 144 | while (left < right) { |
145 | unsigned int mid = (left+right)/2; | 145 | unsigned int mid = (left+right)/2; |
146 | int cmp = grp - GROUP_AT(group_info, mid); | 146 | if (grp > GROUP_AT(group_info, mid)) |
147 | if (cmp > 0) | ||
148 | left = mid + 1; | 147 | left = mid + 1; |
149 | else if (cmp < 0) | 148 | else if (grp < GROUP_AT(group_info, mid)) |
150 | right = mid; | 149 | right = mid; |
151 | else | 150 | else |
152 | return 1; | 151 | return 1; |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index ce669174f355..1decafbb6b1a 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -1091,11 +1091,10 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel); | |||
1091 | */ | 1091 | */ |
1092 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) | 1092 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) |
1093 | { | 1093 | { |
1094 | struct hrtimer_clock_base *base; | ||
1095 | unsigned long flags; | 1094 | unsigned long flags; |
1096 | ktime_t rem; | 1095 | ktime_t rem; |
1097 | 1096 | ||
1098 | base = lock_hrtimer_base(timer, &flags); | 1097 | lock_hrtimer_base(timer, &flags); |
1099 | rem = hrtimer_expires_remaining(timer); | 1098 | rem = hrtimer_expires_remaining(timer); |
1100 | unlock_hrtimer_base(timer, &flags); | 1099 | unlock_hrtimer_base(timer, &flags); |
1101 | 1100 | ||
diff --git a/kernel/mutex.c b/kernel/mutex.c index 4c0b7b3e6d2e..200407c1502f 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -36,15 +36,6 @@ | |||
36 | # include <asm/mutex.h> | 36 | # include <asm/mutex.h> |
37 | #endif | 37 | #endif |
38 | 38 | ||
39 | /*** | ||
40 | * mutex_init - initialize the mutex | ||
41 | * @lock: the mutex to be initialized | ||
42 | * @key: the lock_class_key for the class; used by mutex lock debugging | ||
43 | * | ||
44 | * Initialize the mutex to unlocked state. | ||
45 | * | ||
46 | * It is not allowed to initialize an already locked mutex. | ||
47 | */ | ||
48 | void | 39 | void |
49 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | 40 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) |
50 | { | 41 | { |
@@ -68,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init); | |||
68 | static __used noinline void __sched | 59 | static __used noinline void __sched |
69 | __mutex_lock_slowpath(atomic_t *lock_count); | 60 | __mutex_lock_slowpath(atomic_t *lock_count); |
70 | 61 | ||
71 | /*** | 62 | /** |
72 | * mutex_lock - acquire the mutex | 63 | * mutex_lock - acquire the mutex |
73 | * @lock: the mutex to be acquired | 64 | * @lock: the mutex to be acquired |
74 | * | 65 | * |
@@ -105,7 +96,7 @@ EXPORT_SYMBOL(mutex_lock); | |||
105 | 96 | ||
106 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); | 97 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
107 | 98 | ||
108 | /*** | 99 | /** |
109 | * mutex_unlock - release the mutex | 100 | * mutex_unlock - release the mutex |
110 | * @lock: the mutex to be released | 101 | * @lock: the mutex to be released |
111 | * | 102 | * |
@@ -364,8 +355,8 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count); | |||
364 | static noinline int __sched | 355 | static noinline int __sched |
365 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); | 356 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); |
366 | 357 | ||
367 | /*** | 358 | /** |
368 | * mutex_lock_interruptible - acquire the mutex, interruptable | 359 | * mutex_lock_interruptible - acquire the mutex, interruptible |
369 | * @lock: the mutex to be acquired | 360 | * @lock: the mutex to be acquired |
370 | * | 361 | * |
371 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has | 362 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has |
@@ -456,15 +447,15 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | |||
456 | return prev == 1; | 447 | return prev == 1; |
457 | } | 448 | } |
458 | 449 | ||
459 | /*** | 450 | /** |
460 | * mutex_trylock - try acquire the mutex, without waiting | 451 | * mutex_trylock - try to acquire the mutex, without waiting |
461 | * @lock: the mutex to be acquired | 452 | * @lock: the mutex to be acquired |
462 | * | 453 | * |
463 | * Try to acquire the mutex atomically. Returns 1 if the mutex | 454 | * Try to acquire the mutex atomically. Returns 1 if the mutex |
464 | * has been acquired successfully, and 0 on contention. | 455 | * has been acquired successfully, and 0 on contention. |
465 | * | 456 | * |
466 | * NOTE: this function follows the spin_trylock() convention, so | 457 | * NOTE: this function follows the spin_trylock() convention, so |
467 | * it is negated to the down_trylock() return values! Be careful | 458 | * it is negated from the down_trylock() return values! Be careful |
468 | * about this when converting semaphore users to mutexes. | 459 | * about this when converting semaphore users to mutexes. |
469 | * | 460 | * |
470 | * This function must not be used in interrupt context. The | 461 | * This function must not be used in interrupt context. The |
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index 996a4dec5f96..645e541a45f6 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c | |||
@@ -212,15 +212,17 @@ EXPORT_SYMBOL_GPL(pm_qos_request_active); | |||
212 | 212 | ||
213 | /** | 213 | /** |
214 | * pm_qos_add_request - inserts new qos request into the list | 214 | * pm_qos_add_request - inserts new qos request into the list |
215 | * @pm_qos_class: identifies which list of qos request to us | 215 | * @dep: pointer to a preallocated handle |
216 | * @pm_qos_class: identifies which list of qos request to use | ||
216 | * @value: defines the qos request | 217 | * @value: defines the qos request |
217 | * | 218 | * |
218 | * This function inserts a new entry in the pm_qos_class list of requested qos | 219 | * This function inserts a new entry in the pm_qos_class list of requested qos |
219 | * performance characteristics. It recomputes the aggregate QoS expectations | 220 | * performance characteristics. It recomputes the aggregate QoS expectations |
220 | * for the pm_qos_class of parameters, and returns the pm_qos_request list | 221 | * for the pm_qos_class of parameters and initializes the pm_qos_request_list |
221 | * element as a handle for use in updating and removal. Call needs to save | 222 | * handle. Caller needs to save this handle for later use in updates and |
222 | * this handle for later use. | 223 | * removal. |
223 | */ | 224 | */ |
225 | |||
224 | void pm_qos_add_request(struct pm_qos_request_list *dep, | 226 | void pm_qos_add_request(struct pm_qos_request_list *dep, |
225 | int pm_qos_class, s32 value) | 227 | int pm_qos_class, s32 value) |
226 | { | 228 | { |
@@ -348,7 +350,7 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp) | |||
348 | 350 | ||
349 | pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); | 351 | pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); |
350 | if (pm_qos_class >= 0) { | 352 | if (pm_qos_class >= 0) { |
351 | struct pm_qos_request_list *req = kzalloc(GFP_KERNEL, sizeof(*req)); | 353 | struct pm_qos_request_list *req = kzalloc(sizeof(*req), GFP_KERNEL); |
352 | if (!req) | 354 | if (!req) |
353 | return -ENOMEM; | 355 | return -ENOMEM; |
354 | 356 | ||
@@ -387,10 +389,12 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, | |||
387 | } else if (count == 11) { /* len('0x12345678/0') */ | 389 | } else if (count == 11) { /* len('0x12345678/0') */ |
388 | if (copy_from_user(ascii_value, buf, 11)) | 390 | if (copy_from_user(ascii_value, buf, 11)) |
389 | return -EFAULT; | 391 | return -EFAULT; |
392 | if (strlen(ascii_value) != 10) | ||
393 | return -EINVAL; | ||
390 | x = sscanf(ascii_value, "%x", &value); | 394 | x = sscanf(ascii_value, "%x", &value); |
391 | if (x != 1) | 395 | if (x != 1) |
392 | return -EINVAL; | 396 | return -EINVAL; |
393 | pr_debug(KERN_ERR "%s, %d, 0x%x\n", ascii_value, x, value); | 397 | pr_debug("%s, %d, 0x%x\n", ascii_value, x, value); |
394 | } else | 398 | } else |
395 | return -EINVAL; | 399 | return -EINVAL; |
396 | 400 | ||
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index c77963938bca..8dc31e02ae12 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -338,7 +338,6 @@ int hibernation_snapshot(int platform_mode) | |||
338 | goto Close; | 338 | goto Close; |
339 | 339 | ||
340 | suspend_console(); | 340 | suspend_console(); |
341 | hibernation_freeze_swap(); | ||
342 | saved_mask = clear_gfp_allowed_mask(GFP_IOFS); | 341 | saved_mask = clear_gfp_allowed_mask(GFP_IOFS); |
343 | error = dpm_suspend_start(PMSG_FREEZE); | 342 | error = dpm_suspend_start(PMSG_FREEZE); |
344 | if (error) | 343 | if (error) |
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c index e8b337006276..d52359374e85 100644 --- a/kernel/power/poweroff.c +++ b/kernel/power/poweroff.c | |||
@@ -24,7 +24,7 @@ static void do_poweroff(struct work_struct *dummy) | |||
24 | 24 | ||
25 | static DECLARE_WORK(poweroff_work, do_poweroff); | 25 | static DECLARE_WORK(poweroff_work, do_poweroff); |
26 | 26 | ||
27 | static void handle_poweroff(int key, struct tty_struct *tty) | 27 | static void handle_poweroff(int key) |
28 | { | 28 | { |
29 | /* run sysrq poweroff on boot cpu */ | 29 | /* run sysrq poweroff on boot cpu */ |
30 | schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work); | 30 | schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work); |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 5e7edfb05e66..d3f795f01bbc 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -1086,7 +1086,6 @@ void swsusp_free(void) | |||
1086 | buffer = NULL; | 1086 | buffer = NULL; |
1087 | alloc_normal = 0; | 1087 | alloc_normal = 0; |
1088 | alloc_highmem = 0; | 1088 | alloc_highmem = 0; |
1089 | hibernation_thaw_swap(); | ||
1090 | } | 1089 | } |
1091 | 1090 | ||
1092 | /* Helper functions used for the shrinking of memory. */ | 1091 | /* Helper functions used for the shrinking of memory. */ |
@@ -1122,9 +1121,19 @@ static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask) | |||
1122 | return nr_alloc; | 1121 | return nr_alloc; |
1123 | } | 1122 | } |
1124 | 1123 | ||
1125 | static unsigned long preallocate_image_memory(unsigned long nr_pages) | 1124 | static unsigned long preallocate_image_memory(unsigned long nr_pages, |
1125 | unsigned long avail_normal) | ||
1126 | { | 1126 | { |
1127 | return preallocate_image_pages(nr_pages, GFP_IMAGE); | 1127 | unsigned long alloc; |
1128 | |||
1129 | if (avail_normal <= alloc_normal) | ||
1130 | return 0; | ||
1131 | |||
1132 | alloc = avail_normal - alloc_normal; | ||
1133 | if (nr_pages < alloc) | ||
1134 | alloc = nr_pages; | ||
1135 | |||
1136 | return preallocate_image_pages(alloc, GFP_IMAGE); | ||
1128 | } | 1137 | } |
1129 | 1138 | ||
1130 | #ifdef CONFIG_HIGHMEM | 1139 | #ifdef CONFIG_HIGHMEM |
@@ -1170,15 +1179,22 @@ static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages, | |||
1170 | */ | 1179 | */ |
1171 | static void free_unnecessary_pages(void) | 1180 | static void free_unnecessary_pages(void) |
1172 | { | 1181 | { |
1173 | unsigned long save_highmem, to_free_normal, to_free_highmem; | 1182 | unsigned long save, to_free_normal, to_free_highmem; |
1174 | 1183 | ||
1175 | to_free_normal = alloc_normal - count_data_pages(); | 1184 | save = count_data_pages(); |
1176 | save_highmem = count_highmem_pages(); | 1185 | if (alloc_normal >= save) { |
1177 | if (alloc_highmem > save_highmem) { | 1186 | to_free_normal = alloc_normal - save; |
1178 | to_free_highmem = alloc_highmem - save_highmem; | 1187 | save = 0; |
1188 | } else { | ||
1189 | to_free_normal = 0; | ||
1190 | save -= alloc_normal; | ||
1191 | } | ||
1192 | save += count_highmem_pages(); | ||
1193 | if (alloc_highmem >= save) { | ||
1194 | to_free_highmem = alloc_highmem - save; | ||
1179 | } else { | 1195 | } else { |
1180 | to_free_highmem = 0; | 1196 | to_free_highmem = 0; |
1181 | to_free_normal -= save_highmem - alloc_highmem; | 1197 | to_free_normal -= save - alloc_highmem; |
1182 | } | 1198 | } |
1183 | 1199 | ||
1184 | memory_bm_position_reset(©_bm); | 1200 | memory_bm_position_reset(©_bm); |
@@ -1259,7 +1275,7 @@ int hibernate_preallocate_memory(void) | |||
1259 | { | 1275 | { |
1260 | struct zone *zone; | 1276 | struct zone *zone; |
1261 | unsigned long saveable, size, max_size, count, highmem, pages = 0; | 1277 | unsigned long saveable, size, max_size, count, highmem, pages = 0; |
1262 | unsigned long alloc, save_highmem, pages_highmem; | 1278 | unsigned long alloc, save_highmem, pages_highmem, avail_normal; |
1263 | struct timeval start, stop; | 1279 | struct timeval start, stop; |
1264 | int error; | 1280 | int error; |
1265 | 1281 | ||
@@ -1296,6 +1312,7 @@ int hibernate_preallocate_memory(void) | |||
1296 | else | 1312 | else |
1297 | count += zone_page_state(zone, NR_FREE_PAGES); | 1313 | count += zone_page_state(zone, NR_FREE_PAGES); |
1298 | } | 1314 | } |
1315 | avail_normal = count; | ||
1299 | count += highmem; | 1316 | count += highmem; |
1300 | count -= totalreserve_pages; | 1317 | count -= totalreserve_pages; |
1301 | 1318 | ||
@@ -1310,12 +1327,21 @@ int hibernate_preallocate_memory(void) | |||
1310 | */ | 1327 | */ |
1311 | if (size >= saveable) { | 1328 | if (size >= saveable) { |
1312 | pages = preallocate_image_highmem(save_highmem); | 1329 | pages = preallocate_image_highmem(save_highmem); |
1313 | pages += preallocate_image_memory(saveable - pages); | 1330 | pages += preallocate_image_memory(saveable - pages, avail_normal); |
1314 | goto out; | 1331 | goto out; |
1315 | } | 1332 | } |
1316 | 1333 | ||
1317 | /* Estimate the minimum size of the image. */ | 1334 | /* Estimate the minimum size of the image. */ |
1318 | pages = minimum_image_size(saveable); | 1335 | pages = minimum_image_size(saveable); |
1336 | /* | ||
1337 | * To avoid excessive pressure on the normal zone, leave room in it to | ||
1338 | * accommodate an image of the minimum size (unless it's already too | ||
1339 | * small, in which case don't preallocate pages from it at all). | ||
1340 | */ | ||
1341 | if (avail_normal > pages) | ||
1342 | avail_normal -= pages; | ||
1343 | else | ||
1344 | avail_normal = 0; | ||
1319 | if (size < pages) | 1345 | if (size < pages) |
1320 | size = min_t(unsigned long, pages, max_size); | 1346 | size = min_t(unsigned long, pages, max_size); |
1321 | 1347 | ||
@@ -1336,16 +1362,34 @@ int hibernate_preallocate_memory(void) | |||
1336 | */ | 1362 | */ |
1337 | pages_highmem = preallocate_image_highmem(highmem / 2); | 1363 | pages_highmem = preallocate_image_highmem(highmem / 2); |
1338 | alloc = (count - max_size) - pages_highmem; | 1364 | alloc = (count - max_size) - pages_highmem; |
1339 | pages = preallocate_image_memory(alloc); | 1365 | pages = preallocate_image_memory(alloc, avail_normal); |
1340 | if (pages < alloc) | 1366 | if (pages < alloc) { |
1341 | goto err_out; | 1367 | /* We have exhausted non-highmem pages, try highmem. */ |
1342 | size = max_size - size; | 1368 | alloc -= pages; |
1343 | alloc = size; | 1369 | pages += pages_highmem; |
1344 | size = preallocate_highmem_fraction(size, highmem, count); | 1370 | pages_highmem = preallocate_image_highmem(alloc); |
1345 | pages_highmem += size; | 1371 | if (pages_highmem < alloc) |
1346 | alloc -= size; | 1372 | goto err_out; |
1347 | pages += preallocate_image_memory(alloc); | 1373 | pages += pages_highmem; |
1348 | pages += pages_highmem; | 1374 | /* |
1375 | * size is the desired number of saveable pages to leave in | ||
1376 | * memory, so try to preallocate (all memory - size) pages. | ||
1377 | */ | ||
1378 | alloc = (count - pages) - size; | ||
1379 | pages += preallocate_image_highmem(alloc); | ||
1380 | } else { | ||
1381 | /* | ||
1382 | * There are approximately max_size saveable pages at this point | ||
1383 | * and we want to reduce this number down to size. | ||
1384 | */ | ||
1385 | alloc = max_size - size; | ||
1386 | size = preallocate_highmem_fraction(alloc, highmem, count); | ||
1387 | pages_highmem += size; | ||
1388 | alloc -= size; | ||
1389 | size = preallocate_image_memory(alloc, avail_normal); | ||
1390 | pages_highmem += preallocate_image_highmem(alloc - size); | ||
1391 | pages += pages_highmem + size; | ||
1392 | } | ||
1349 | 1393 | ||
1350 | /* | 1394 | /* |
1351 | * We only need as many page frames for the image as there are saveable | 1395 | * We only need as many page frames for the image as there are saveable |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 5d0059eed3e4..e6a5bdf61a37 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -136,10 +136,10 @@ sector_t alloc_swapdev_block(int swap) | |||
136 | { | 136 | { |
137 | unsigned long offset; | 137 | unsigned long offset; |
138 | 138 | ||
139 | offset = swp_offset(get_swap_for_hibernation(swap)); | 139 | offset = swp_offset(get_swap_page_of_type(swap)); |
140 | if (offset) { | 140 | if (offset) { |
141 | if (swsusp_extents_insert(offset)) | 141 | if (swsusp_extents_insert(offset)) |
142 | swap_free_for_hibernation(swp_entry(swap, offset)); | 142 | swap_free(swp_entry(swap, offset)); |
143 | else | 143 | else |
144 | return swapdev_block(swap, offset); | 144 | return swapdev_block(swap, offset); |
145 | } | 145 | } |
@@ -163,7 +163,7 @@ void free_all_swap_pages(int swap) | |||
163 | ext = container_of(node, struct swsusp_extent, node); | 163 | ext = container_of(node, struct swsusp_extent, node); |
164 | rb_erase(node, &swsusp_extents); | 164 | rb_erase(node, &swsusp_extents); |
165 | for (offset = ext->start; offset <= ext->end; offset++) | 165 | for (offset = ext->start; offset <= ext->end; offset++) |
166 | swap_free_for_hibernation(swp_entry(swap, offset)); | 166 | swap_free(swp_entry(swap, offset)); |
167 | 167 | ||
168 | kfree(ext); | 168 | kfree(ext); |
169 | } | 169 | } |
diff --git a/kernel/sched.c b/kernel/sched.c index 66a02ba83c01..1c3ea7a55b7b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1294,6 +1294,10 @@ static void resched_task(struct task_struct *p) | |||
1294 | static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) | 1294 | static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) |
1295 | { | 1295 | { |
1296 | } | 1296 | } |
1297 | |||
1298 | static void sched_avg_update(struct rq *rq) | ||
1299 | { | ||
1300 | } | ||
1297 | #endif /* CONFIG_SMP */ | 1301 | #endif /* CONFIG_SMP */ |
1298 | 1302 | ||
1299 | #if BITS_PER_LONG == 32 | 1303 | #if BITS_PER_LONG == 32 |
@@ -3182,6 +3186,8 @@ static void update_cpu_load(struct rq *this_rq) | |||
3182 | 3186 | ||
3183 | this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; | 3187 | this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; |
3184 | } | 3188 | } |
3189 | |||
3190 | sched_avg_update(this_rq); | ||
3185 | } | 3191 | } |
3186 | 3192 | ||
3187 | static void update_cpu_load_active(struct rq *this_rq) | 3193 | static void update_cpu_load_active(struct rq *this_rq) |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 806d1b227a21..a171138a9402 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -54,13 +54,13 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling | |||
54 | * Minimal preemption granularity for CPU-bound tasks: | 54 | * Minimal preemption granularity for CPU-bound tasks: |
55 | * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds) | 55 | * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds) |
56 | */ | 56 | */ |
57 | unsigned int sysctl_sched_min_granularity = 2000000ULL; | 57 | unsigned int sysctl_sched_min_granularity = 750000ULL; |
58 | unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL; | 58 | unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; |
59 | 59 | ||
60 | /* | 60 | /* |
61 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity | 61 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity |
62 | */ | 62 | */ |
63 | static unsigned int sched_nr_latency = 3; | 63 | static unsigned int sched_nr_latency = 8; |
64 | 64 | ||
65 | /* | 65 | /* |
66 | * After fork, child runs first. If set to 0 (default) then | 66 | * After fork, child runs first. If set to 0 (default) then |
@@ -1313,7 +1313,7 @@ static struct sched_group * | |||
1313 | find_idlest_group(struct sched_domain *sd, struct task_struct *p, | 1313 | find_idlest_group(struct sched_domain *sd, struct task_struct *p, |
1314 | int this_cpu, int load_idx) | 1314 | int this_cpu, int load_idx) |
1315 | { | 1315 | { |
1316 | struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups; | 1316 | struct sched_group *idlest = NULL, *group = sd->groups; |
1317 | unsigned long min_load = ULONG_MAX, this_load = 0; | 1317 | unsigned long min_load = ULONG_MAX, this_load = 0; |
1318 | int imbalance = 100 + (sd->imbalance_pct-100)/2; | 1318 | int imbalance = 100 + (sd->imbalance_pct-100)/2; |
1319 | 1319 | ||
@@ -1348,7 +1348,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, | |||
1348 | 1348 | ||
1349 | if (local_group) { | 1349 | if (local_group) { |
1350 | this_load = avg_load; | 1350 | this_load = avg_load; |
1351 | this = group; | ||
1352 | } else if (avg_load < min_load) { | 1351 | } else if (avg_load < min_load) { |
1353 | min_load = avg_load; | 1352 | min_load = avg_load; |
1354 | idlest = group; | 1353 | idlest = group; |
@@ -2268,8 +2267,6 @@ unsigned long scale_rt_power(int cpu) | |||
2268 | struct rq *rq = cpu_rq(cpu); | 2267 | struct rq *rq = cpu_rq(cpu); |
2269 | u64 total, available; | 2268 | u64 total, available; |
2270 | 2269 | ||
2271 | sched_avg_update(rq); | ||
2272 | |||
2273 | total = sched_avg_period() + (rq->clock - rq->age_stamp); | 2270 | total = sched_avg_period() + (rq->clock - rq->age_stamp); |
2274 | available = total - rq->rt_avg; | 2271 | available = total - rq->rt_avg; |
2275 | 2272 | ||
@@ -3752,6 +3749,8 @@ static void task_fork_fair(struct task_struct *p) | |||
3752 | 3749 | ||
3753 | raw_spin_lock_irqsave(&rq->lock, flags); | 3750 | raw_spin_lock_irqsave(&rq->lock, flags); |
3754 | 3751 | ||
3752 | update_rq_clock(rq); | ||
3753 | |||
3755 | if (unlikely(task_cpu(p) != this_cpu)) | 3754 | if (unlikely(task_cpu(p) != this_cpu)) |
3756 | __set_task_cpu(p, this_cpu); | 3755 | __set_task_cpu(p, this_cpu); |
3757 | 3756 | ||
diff --git a/kernel/sys.c b/kernel/sys.c index e9ad44489828..7f5a0cd296a9 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -931,6 +931,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) | |||
931 | pgid = pid; | 931 | pgid = pid; |
932 | if (pgid < 0) | 932 | if (pgid < 0) |
933 | return -EINVAL; | 933 | return -EINVAL; |
934 | rcu_read_lock(); | ||
934 | 935 | ||
935 | /* From this point forward we keep holding onto the tasklist lock | 936 | /* From this point forward we keep holding onto the tasklist lock |
936 | * so that our parent does not change from under us. -DaveM | 937 | * so that our parent does not change from under us. -DaveM |
@@ -984,6 +985,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) | |||
984 | out: | 985 | out: |
985 | /* All paths lead to here, thus we are safe. -DaveM */ | 986 | /* All paths lead to here, thus we are safe. -DaveM */ |
986 | write_unlock_irq(&tasklist_lock); | 987 | write_unlock_irq(&tasklist_lock); |
988 | rcu_read_unlock(); | ||
987 | return err; | 989 | return err; |
988 | } | 990 | } |
989 | 991 | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index ca38e8e3e907..f88552c6d227 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -1713,10 +1713,7 @@ static __init int sysctl_init(void) | |||
1713 | { | 1713 | { |
1714 | sysctl_set_parent(NULL, root_table); | 1714 | sysctl_set_parent(NULL, root_table); |
1715 | #ifdef CONFIG_SYSCTL_SYSCALL_CHECK | 1715 | #ifdef CONFIG_SYSCTL_SYSCALL_CHECK |
1716 | { | 1716 | sysctl_check_table(current->nsproxy, root_table); |
1717 | int err; | ||
1718 | err = sysctl_check_table(current->nsproxy, root_table); | ||
1719 | } | ||
1720 | #endif | 1717 | #endif |
1721 | return 0; | 1718 | return 0; |
1722 | } | 1719 | } |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 20aff3f1c719..65fb077ea79c 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1358,24 +1358,29 @@ enum { | |||
1358 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ | 1358 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ |
1359 | 1359 | ||
1360 | struct ftrace_iterator { | 1360 | struct ftrace_iterator { |
1361 | struct ftrace_page *pg; | 1361 | loff_t pos; |
1362 | int hidx; | 1362 | loff_t func_pos; |
1363 | int idx; | 1363 | struct ftrace_page *pg; |
1364 | unsigned flags; | 1364 | struct dyn_ftrace *func; |
1365 | struct trace_parser parser; | 1365 | struct ftrace_func_probe *probe; |
1366 | struct trace_parser parser; | ||
1367 | int hidx; | ||
1368 | int idx; | ||
1369 | unsigned flags; | ||
1366 | }; | 1370 | }; |
1367 | 1371 | ||
1368 | static void * | 1372 | static void * |
1369 | t_hash_next(struct seq_file *m, void *v, loff_t *pos) | 1373 | t_hash_next(struct seq_file *m, loff_t *pos) |
1370 | { | 1374 | { |
1371 | struct ftrace_iterator *iter = m->private; | 1375 | struct ftrace_iterator *iter = m->private; |
1372 | struct hlist_node *hnd = v; | 1376 | struct hlist_node *hnd = NULL; |
1373 | struct hlist_head *hhd; | 1377 | struct hlist_head *hhd; |
1374 | 1378 | ||
1375 | WARN_ON(!(iter->flags & FTRACE_ITER_HASH)); | ||
1376 | |||
1377 | (*pos)++; | 1379 | (*pos)++; |
1380 | iter->pos = *pos; | ||
1378 | 1381 | ||
1382 | if (iter->probe) | ||
1383 | hnd = &iter->probe->node; | ||
1379 | retry: | 1384 | retry: |
1380 | if (iter->hidx >= FTRACE_FUNC_HASHSIZE) | 1385 | if (iter->hidx >= FTRACE_FUNC_HASHSIZE) |
1381 | return NULL; | 1386 | return NULL; |
@@ -1398,7 +1403,12 @@ t_hash_next(struct seq_file *m, void *v, loff_t *pos) | |||
1398 | } | 1403 | } |
1399 | } | 1404 | } |
1400 | 1405 | ||
1401 | return hnd; | 1406 | if (WARN_ON_ONCE(!hnd)) |
1407 | return NULL; | ||
1408 | |||
1409 | iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node); | ||
1410 | |||
1411 | return iter; | ||
1402 | } | 1412 | } |
1403 | 1413 | ||
1404 | static void *t_hash_start(struct seq_file *m, loff_t *pos) | 1414 | static void *t_hash_start(struct seq_file *m, loff_t *pos) |
@@ -1407,26 +1417,32 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos) | |||
1407 | void *p = NULL; | 1417 | void *p = NULL; |
1408 | loff_t l; | 1418 | loff_t l; |
1409 | 1419 | ||
1410 | if (!(iter->flags & FTRACE_ITER_HASH)) | 1420 | if (iter->func_pos > *pos) |
1411 | *pos = 0; | 1421 | return NULL; |
1412 | |||
1413 | iter->flags |= FTRACE_ITER_HASH; | ||
1414 | 1422 | ||
1415 | iter->hidx = 0; | 1423 | iter->hidx = 0; |
1416 | for (l = 0; l <= *pos; ) { | 1424 | for (l = 0; l <= (*pos - iter->func_pos); ) { |
1417 | p = t_hash_next(m, p, &l); | 1425 | p = t_hash_next(m, &l); |
1418 | if (!p) | 1426 | if (!p) |
1419 | break; | 1427 | break; |
1420 | } | 1428 | } |
1421 | return p; | 1429 | if (!p) |
1430 | return NULL; | ||
1431 | |||
1432 | /* Only set this if we have an item */ | ||
1433 | iter->flags |= FTRACE_ITER_HASH; | ||
1434 | |||
1435 | return iter; | ||
1422 | } | 1436 | } |
1423 | 1437 | ||
1424 | static int t_hash_show(struct seq_file *m, void *v) | 1438 | static int |
1439 | t_hash_show(struct seq_file *m, struct ftrace_iterator *iter) | ||
1425 | { | 1440 | { |
1426 | struct ftrace_func_probe *rec; | 1441 | struct ftrace_func_probe *rec; |
1427 | struct hlist_node *hnd = v; | ||
1428 | 1442 | ||
1429 | rec = hlist_entry(hnd, struct ftrace_func_probe, node); | 1443 | rec = iter->probe; |
1444 | if (WARN_ON_ONCE(!rec)) | ||
1445 | return -EIO; | ||
1430 | 1446 | ||
1431 | if (rec->ops->print) | 1447 | if (rec->ops->print) |
1432 | return rec->ops->print(m, rec->ip, rec->ops, rec->data); | 1448 | return rec->ops->print(m, rec->ip, rec->ops, rec->data); |
@@ -1447,12 +1463,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
1447 | struct dyn_ftrace *rec = NULL; | 1463 | struct dyn_ftrace *rec = NULL; |
1448 | 1464 | ||
1449 | if (iter->flags & FTRACE_ITER_HASH) | 1465 | if (iter->flags & FTRACE_ITER_HASH) |
1450 | return t_hash_next(m, v, pos); | 1466 | return t_hash_next(m, pos); |
1451 | 1467 | ||
1452 | (*pos)++; | 1468 | (*pos)++; |
1469 | iter->pos = *pos; | ||
1453 | 1470 | ||
1454 | if (iter->flags & FTRACE_ITER_PRINTALL) | 1471 | if (iter->flags & FTRACE_ITER_PRINTALL) |
1455 | return NULL; | 1472 | return t_hash_start(m, pos); |
1456 | 1473 | ||
1457 | retry: | 1474 | retry: |
1458 | if (iter->idx >= iter->pg->index) { | 1475 | if (iter->idx >= iter->pg->index) { |
@@ -1481,7 +1498,20 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
1481 | } | 1498 | } |
1482 | } | 1499 | } |
1483 | 1500 | ||
1484 | return rec; | 1501 | if (!rec) |
1502 | return t_hash_start(m, pos); | ||
1503 | |||
1504 | iter->func_pos = *pos; | ||
1505 | iter->func = rec; | ||
1506 | |||
1507 | return iter; | ||
1508 | } | ||
1509 | |||
1510 | static void reset_iter_read(struct ftrace_iterator *iter) | ||
1511 | { | ||
1512 | iter->pos = 0; | ||
1513 | iter->func_pos = 0; | ||
1514 | iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH); | ||
1485 | } | 1515 | } |
1486 | 1516 | ||
1487 | static void *t_start(struct seq_file *m, loff_t *pos) | 1517 | static void *t_start(struct seq_file *m, loff_t *pos) |
@@ -1492,6 +1522,12 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
1492 | 1522 | ||
1493 | mutex_lock(&ftrace_lock); | 1523 | mutex_lock(&ftrace_lock); |
1494 | /* | 1524 | /* |
1525 | * If an lseek was done, then reset and start from beginning. | ||
1526 | */ | ||
1527 | if (*pos < iter->pos) | ||
1528 | reset_iter_read(iter); | ||
1529 | |||
1530 | /* | ||
1495 | * For set_ftrace_filter reading, if we have the filter | 1531 | * For set_ftrace_filter reading, if we have the filter |
1496 | * off, we can short cut and just print out that all | 1532 | * off, we can short cut and just print out that all |
1497 | * functions are enabled. | 1533 | * functions are enabled. |
@@ -1500,12 +1536,19 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
1500 | if (*pos > 0) | 1536 | if (*pos > 0) |
1501 | return t_hash_start(m, pos); | 1537 | return t_hash_start(m, pos); |
1502 | iter->flags |= FTRACE_ITER_PRINTALL; | 1538 | iter->flags |= FTRACE_ITER_PRINTALL; |
1539 | /* reset in case of seek/pread */ | ||
1540 | iter->flags &= ~FTRACE_ITER_HASH; | ||
1503 | return iter; | 1541 | return iter; |
1504 | } | 1542 | } |
1505 | 1543 | ||
1506 | if (iter->flags & FTRACE_ITER_HASH) | 1544 | if (iter->flags & FTRACE_ITER_HASH) |
1507 | return t_hash_start(m, pos); | 1545 | return t_hash_start(m, pos); |
1508 | 1546 | ||
1547 | /* | ||
1548 | * Unfortunately, we need to restart at ftrace_pages_start | ||
1549 | * every time we let go of the ftrace_mutex. This is because | ||
1550 | * those pointers can change without the lock. | ||
1551 | */ | ||
1509 | iter->pg = ftrace_pages_start; | 1552 | iter->pg = ftrace_pages_start; |
1510 | iter->idx = 0; | 1553 | iter->idx = 0; |
1511 | for (l = 0; l <= *pos; ) { | 1554 | for (l = 0; l <= *pos; ) { |
@@ -1514,10 +1557,14 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
1514 | break; | 1557 | break; |
1515 | } | 1558 | } |
1516 | 1559 | ||
1517 | if (!p && iter->flags & FTRACE_ITER_FILTER) | 1560 | if (!p) { |
1518 | return t_hash_start(m, pos); | 1561 | if (iter->flags & FTRACE_ITER_FILTER) |
1562 | return t_hash_start(m, pos); | ||
1519 | 1563 | ||
1520 | return p; | 1564 | return NULL; |
1565 | } | ||
1566 | |||
1567 | return iter; | ||
1521 | } | 1568 | } |
1522 | 1569 | ||
1523 | static void t_stop(struct seq_file *m, void *p) | 1570 | static void t_stop(struct seq_file *m, void *p) |
@@ -1528,16 +1575,18 @@ static void t_stop(struct seq_file *m, void *p) | |||
1528 | static int t_show(struct seq_file *m, void *v) | 1575 | static int t_show(struct seq_file *m, void *v) |
1529 | { | 1576 | { |
1530 | struct ftrace_iterator *iter = m->private; | 1577 | struct ftrace_iterator *iter = m->private; |
1531 | struct dyn_ftrace *rec = v; | 1578 | struct dyn_ftrace *rec; |
1532 | 1579 | ||
1533 | if (iter->flags & FTRACE_ITER_HASH) | 1580 | if (iter->flags & FTRACE_ITER_HASH) |
1534 | return t_hash_show(m, v); | 1581 | return t_hash_show(m, iter); |
1535 | 1582 | ||
1536 | if (iter->flags & FTRACE_ITER_PRINTALL) { | 1583 | if (iter->flags & FTRACE_ITER_PRINTALL) { |
1537 | seq_printf(m, "#### all functions enabled ####\n"); | 1584 | seq_printf(m, "#### all functions enabled ####\n"); |
1538 | return 0; | 1585 | return 0; |
1539 | } | 1586 | } |
1540 | 1587 | ||
1588 | rec = iter->func; | ||
1589 | |||
1541 | if (!rec) | 1590 | if (!rec) |
1542 | return 0; | 1591 | return 0; |
1543 | 1592 | ||
@@ -2406,7 +2455,7 @@ static const struct file_operations ftrace_filter_fops = { | |||
2406 | .open = ftrace_filter_open, | 2455 | .open = ftrace_filter_open, |
2407 | .read = seq_read, | 2456 | .read = seq_read, |
2408 | .write = ftrace_filter_write, | 2457 | .write = ftrace_filter_write, |
2409 | .llseek = no_llseek, | 2458 | .llseek = ftrace_regex_lseek, |
2410 | .release = ftrace_filter_release, | 2459 | .release = ftrace_filter_release, |
2411 | }; | 2460 | }; |
2412 | 2461 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index ef27017caa56..4e2f03410377 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -2994,13 +2994,11 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) | |||
2994 | 2994 | ||
2995 | static void rb_advance_iter(struct ring_buffer_iter *iter) | 2995 | static void rb_advance_iter(struct ring_buffer_iter *iter) |
2996 | { | 2996 | { |
2997 | struct ring_buffer *buffer; | ||
2998 | struct ring_buffer_per_cpu *cpu_buffer; | 2997 | struct ring_buffer_per_cpu *cpu_buffer; |
2999 | struct ring_buffer_event *event; | 2998 | struct ring_buffer_event *event; |
3000 | unsigned length; | 2999 | unsigned length; |
3001 | 3000 | ||
3002 | cpu_buffer = iter->cpu_buffer; | 3001 | cpu_buffer = iter->cpu_buffer; |
3003 | buffer = cpu_buffer->buffer; | ||
3004 | 3002 | ||
3005 | /* | 3003 | /* |
3006 | * Check if we are at the end of the buffer. | 3004 | * Check if we are at the end of the buffer. |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8bd600c020e5..727f24e563ae 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -90,7 +90,8 @@ enum { | |||
90 | /* | 90 | /* |
91 | * Structure fields follow one of the following exclusion rules. | 91 | * Structure fields follow one of the following exclusion rules. |
92 | * | 92 | * |
93 | * I: Set during initialization and read-only afterwards. | 93 | * I: Modifiable by initialization/destruction paths and read-only for |
94 | * everyone else. | ||
94 | * | 95 | * |
95 | * P: Preemption protected. Disabling preemption is enough and should | 96 | * P: Preemption protected. Disabling preemption is enough and should |
96 | * only be modified and accessed from the local cpu. | 97 | * only be modified and accessed from the local cpu. |
@@ -198,7 +199,7 @@ typedef cpumask_var_t mayday_mask_t; | |||
198 | cpumask_test_and_set_cpu((cpu), (mask)) | 199 | cpumask_test_and_set_cpu((cpu), (mask)) |
199 | #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) | 200 | #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) |
200 | #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) | 201 | #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) |
201 | #define alloc_mayday_mask(maskp, gfp) alloc_cpumask_var((maskp), (gfp)) | 202 | #define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp)) |
202 | #define free_mayday_mask(mask) free_cpumask_var((mask)) | 203 | #define free_mayday_mask(mask) free_cpumask_var((mask)) |
203 | #else | 204 | #else |
204 | typedef unsigned long mayday_mask_t; | 205 | typedef unsigned long mayday_mask_t; |
@@ -943,10 +944,14 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
943 | struct global_cwq *gcwq; | 944 | struct global_cwq *gcwq; |
944 | struct cpu_workqueue_struct *cwq; | 945 | struct cpu_workqueue_struct *cwq; |
945 | struct list_head *worklist; | 946 | struct list_head *worklist; |
947 | unsigned int work_flags; | ||
946 | unsigned long flags; | 948 | unsigned long flags; |
947 | 949 | ||
948 | debug_work_activate(work); | 950 | debug_work_activate(work); |
949 | 951 | ||
952 | if (WARN_ON_ONCE(wq->flags & WQ_DYING)) | ||
953 | return; | ||
954 | |||
950 | /* determine gcwq to use */ | 955 | /* determine gcwq to use */ |
951 | if (!(wq->flags & WQ_UNBOUND)) { | 956 | if (!(wq->flags & WQ_UNBOUND)) { |
952 | struct global_cwq *last_gcwq; | 957 | struct global_cwq *last_gcwq; |
@@ -989,14 +994,17 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
989 | BUG_ON(!list_empty(&work->entry)); | 994 | BUG_ON(!list_empty(&work->entry)); |
990 | 995 | ||
991 | cwq->nr_in_flight[cwq->work_color]++; | 996 | cwq->nr_in_flight[cwq->work_color]++; |
997 | work_flags = work_color_to_flags(cwq->work_color); | ||
992 | 998 | ||
993 | if (likely(cwq->nr_active < cwq->max_active)) { | 999 | if (likely(cwq->nr_active < cwq->max_active)) { |
994 | cwq->nr_active++; | 1000 | cwq->nr_active++; |
995 | worklist = gcwq_determine_ins_pos(gcwq, cwq); | 1001 | worklist = gcwq_determine_ins_pos(gcwq, cwq); |
996 | } else | 1002 | } else { |
1003 | work_flags |= WORK_STRUCT_DELAYED; | ||
997 | worklist = &cwq->delayed_works; | 1004 | worklist = &cwq->delayed_works; |
1005 | } | ||
998 | 1006 | ||
999 | insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color)); | 1007 | insert_work(cwq, work, worklist, work_flags); |
1000 | 1008 | ||
1001 | spin_unlock_irqrestore(&gcwq->lock, flags); | 1009 | spin_unlock_irqrestore(&gcwq->lock, flags); |
1002 | } | 1010 | } |
@@ -1215,6 +1223,7 @@ static void worker_leave_idle(struct worker *worker) | |||
1215 | * bound), %false if offline. | 1223 | * bound), %false if offline. |
1216 | */ | 1224 | */ |
1217 | static bool worker_maybe_bind_and_lock(struct worker *worker) | 1225 | static bool worker_maybe_bind_and_lock(struct worker *worker) |
1226 | __acquires(&gcwq->lock) | ||
1218 | { | 1227 | { |
1219 | struct global_cwq *gcwq = worker->gcwq; | 1228 | struct global_cwq *gcwq = worker->gcwq; |
1220 | struct task_struct *task = worker->task; | 1229 | struct task_struct *task = worker->task; |
@@ -1488,6 +1497,8 @@ static void gcwq_mayday_timeout(unsigned long __gcwq) | |||
1488 | * otherwise. | 1497 | * otherwise. |
1489 | */ | 1498 | */ |
1490 | static bool maybe_create_worker(struct global_cwq *gcwq) | 1499 | static bool maybe_create_worker(struct global_cwq *gcwq) |
1500 | __releases(&gcwq->lock) | ||
1501 | __acquires(&gcwq->lock) | ||
1491 | { | 1502 | { |
1492 | if (!need_to_create_worker(gcwq)) | 1503 | if (!need_to_create_worker(gcwq)) |
1493 | return false; | 1504 | return false; |
@@ -1662,6 +1673,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | |||
1662 | struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); | 1673 | struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); |
1663 | 1674 | ||
1664 | move_linked_works(work, pos, NULL); | 1675 | move_linked_works(work, pos, NULL); |
1676 | __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); | ||
1665 | cwq->nr_active++; | 1677 | cwq->nr_active++; |
1666 | } | 1678 | } |
1667 | 1679 | ||
@@ -1669,6 +1681,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | |||
1669 | * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight | 1681 | * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight |
1670 | * @cwq: cwq of interest | 1682 | * @cwq: cwq of interest |
1671 | * @color: color of work which left the queue | 1683 | * @color: color of work which left the queue |
1684 | * @delayed: for a delayed work | ||
1672 | * | 1685 | * |
1673 | * A work either has completed or is removed from pending queue, | 1686 | * A work either has completed or is removed from pending queue, |
1674 | * decrement nr_in_flight of its cwq and handle workqueue flushing. | 1687 | * decrement nr_in_flight of its cwq and handle workqueue flushing. |
@@ -1676,19 +1689,22 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | |||
1676 | * CONTEXT: | 1689 | * CONTEXT: |
1677 | * spin_lock_irq(gcwq->lock). | 1690 | * spin_lock_irq(gcwq->lock). |
1678 | */ | 1691 | */ |
1679 | static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) | 1692 | static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color, |
1693 | bool delayed) | ||
1680 | { | 1694 | { |
1681 | /* ignore uncolored works */ | 1695 | /* ignore uncolored works */ |
1682 | if (color == WORK_NO_COLOR) | 1696 | if (color == WORK_NO_COLOR) |
1683 | return; | 1697 | return; |
1684 | 1698 | ||
1685 | cwq->nr_in_flight[color]--; | 1699 | cwq->nr_in_flight[color]--; |
1686 | cwq->nr_active--; | ||
1687 | 1700 | ||
1688 | if (!list_empty(&cwq->delayed_works)) { | 1701 | if (!delayed) { |
1689 | /* one down, submit a delayed one */ | 1702 | cwq->nr_active--; |
1690 | if (cwq->nr_active < cwq->max_active) | 1703 | if (!list_empty(&cwq->delayed_works)) { |
1691 | cwq_activate_first_delayed(cwq); | 1704 | /* one down, submit a delayed one */ |
1705 | if (cwq->nr_active < cwq->max_active) | ||
1706 | cwq_activate_first_delayed(cwq); | ||
1707 | } | ||
1692 | } | 1708 | } |
1693 | 1709 | ||
1694 | /* is flush in progress and are we at the flushing tip? */ | 1710 | /* is flush in progress and are we at the flushing tip? */ |
@@ -1725,6 +1741,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) | |||
1725 | * spin_lock_irq(gcwq->lock) which is released and regrabbed. | 1741 | * spin_lock_irq(gcwq->lock) which is released and regrabbed. |
1726 | */ | 1742 | */ |
1727 | static void process_one_work(struct worker *worker, struct work_struct *work) | 1743 | static void process_one_work(struct worker *worker, struct work_struct *work) |
1744 | __releases(&gcwq->lock) | ||
1745 | __acquires(&gcwq->lock) | ||
1728 | { | 1746 | { |
1729 | struct cpu_workqueue_struct *cwq = get_work_cwq(work); | 1747 | struct cpu_workqueue_struct *cwq = get_work_cwq(work); |
1730 | struct global_cwq *gcwq = cwq->gcwq; | 1748 | struct global_cwq *gcwq = cwq->gcwq; |
@@ -1823,7 +1841,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) | |||
1823 | hlist_del_init(&worker->hentry); | 1841 | hlist_del_init(&worker->hentry); |
1824 | worker->current_work = NULL; | 1842 | worker->current_work = NULL; |
1825 | worker->current_cwq = NULL; | 1843 | worker->current_cwq = NULL; |
1826 | cwq_dec_nr_in_flight(cwq, work_color); | 1844 | cwq_dec_nr_in_flight(cwq, work_color, false); |
1827 | } | 1845 | } |
1828 | 1846 | ||
1829 | /** | 1847 | /** |
@@ -2388,7 +2406,8 @@ static int try_to_grab_pending(struct work_struct *work) | |||
2388 | debug_work_deactivate(work); | 2406 | debug_work_deactivate(work); |
2389 | list_del_init(&work->entry); | 2407 | list_del_init(&work->entry); |
2390 | cwq_dec_nr_in_flight(get_work_cwq(work), | 2408 | cwq_dec_nr_in_flight(get_work_cwq(work), |
2391 | get_work_color(work)); | 2409 | get_work_color(work), |
2410 | *work_data_bits(work) & WORK_STRUCT_DELAYED); | ||
2392 | ret = 1; | 2411 | ret = 1; |
2393 | } | 2412 | } |
2394 | } | 2413 | } |
@@ -2791,7 +2810,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, | |||
2791 | if (IS_ERR(rescuer->task)) | 2810 | if (IS_ERR(rescuer->task)) |
2792 | goto err; | 2811 | goto err; |
2793 | 2812 | ||
2794 | wq->rescuer = rescuer; | ||
2795 | rescuer->task->flags |= PF_THREAD_BOUND; | 2813 | rescuer->task->flags |= PF_THREAD_BOUND; |
2796 | wake_up_process(rescuer->task); | 2814 | wake_up_process(rescuer->task); |
2797 | } | 2815 | } |
@@ -2833,6 +2851,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
2833 | { | 2851 | { |
2834 | unsigned int cpu; | 2852 | unsigned int cpu; |
2835 | 2853 | ||
2854 | wq->flags |= WQ_DYING; | ||
2836 | flush_workqueue(wq); | 2855 | flush_workqueue(wq); |
2837 | 2856 | ||
2838 | /* | 2857 | /* |
@@ -2857,6 +2876,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
2857 | if (wq->flags & WQ_RESCUER) { | 2876 | if (wq->flags & WQ_RESCUER) { |
2858 | kthread_stop(wq->rescuer->task); | 2877 | kthread_stop(wq->rescuer->task); |
2859 | free_mayday_mask(wq->mayday_mask); | 2878 | free_mayday_mask(wq->mayday_mask); |
2879 | kfree(wq->rescuer); | ||
2860 | } | 2880 | } |
2861 | 2881 | ||
2862 | free_cwqs(wq); | 2882 | free_cwqs(wq); |
@@ -3239,6 +3259,8 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
3239 | * multiple times. To be used by cpu_callback. | 3259 | * multiple times. To be used by cpu_callback. |
3240 | */ | 3260 | */ |
3241 | static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) | 3261 | static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) |
3262 | __releases(&gcwq->lock) | ||
3263 | __acquires(&gcwq->lock) | ||
3242 | { | 3264 | { |
3243 | if (!(gcwq->trustee_state == state || | 3265 | if (!(gcwq->trustee_state == state || |
3244 | gcwq->trustee_state == TRUSTEE_DONE)) { | 3266 | gcwq->trustee_state == TRUSTEE_DONE)) { |
@@ -3545,8 +3567,7 @@ static int __init init_workqueues(void) | |||
3545 | spin_lock_init(&gcwq->lock); | 3567 | spin_lock_init(&gcwq->lock); |
3546 | INIT_LIST_HEAD(&gcwq->worklist); | 3568 | INIT_LIST_HEAD(&gcwq->worklist); |
3547 | gcwq->cpu = cpu; | 3569 | gcwq->cpu = cpu; |
3548 | if (cpu == WORK_CPU_UNBOUND) | 3570 | gcwq->flags |= GCWQ_DISASSOCIATED; |
3549 | gcwq->flags |= GCWQ_DISASSOCIATED; | ||
3550 | 3571 | ||
3551 | INIT_LIST_HEAD(&gcwq->idle_list); | 3572 | INIT_LIST_HEAD(&gcwq->idle_list); |
3552 | for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) | 3573 | for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) |
@@ -3570,6 +3591,8 @@ static int __init init_workqueues(void) | |||
3570 | struct global_cwq *gcwq = get_gcwq(cpu); | 3591 | struct global_cwq *gcwq = get_gcwq(cpu); |
3571 | struct worker *worker; | 3592 | struct worker *worker; |
3572 | 3593 | ||
3594 | if (cpu != WORK_CPU_UNBOUND) | ||
3595 | gcwq->flags &= ~GCWQ_DISASSOCIATED; | ||
3573 | worker = create_worker(gcwq, true); | 3596 | worker = create_worker(gcwq, true); |
3574 | BUG_ON(!worker); | 3597 | BUG_ON(!worker); |
3575 | spin_lock_irq(&gcwq->lock); | 3598 | spin_lock_irq(&gcwq->lock); |