aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-10-07 03:43:11 -0400
committerIngo Molnar <mingo@elte.hu>2010-10-07 03:43:11 -0400
commitd4f8f217b8a5d5bd02af979650418dca4caec472 (patch)
treeaf047bfa9729c975e24cb7624107574e884d3a57 /kernel
parent2dfbf4dfbe47a484bae20456c12b40763b9b6af7 (diff)
parent773e3f93577ffb493fb7c39b1a6ecf39b5748e87 (diff)
Merge branch 'rcu/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu into core/rcu
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c13
-rw-r--r--kernel/debug/debug_core.c2
-rw-r--r--kernel/debug/kdb/kdb_bp.c2
-rw-r--r--kernel/debug/kdb/kdb_main.c2
-rw-r--r--kernel/gcov/fs.c244
-rw-r--r--kernel/groups.c5
-rw-r--r--kernel/hrtimer.c3
-rw-r--r--kernel/mutex.c23
-rw-r--r--kernel/perf_event.c26
-rw-r--r--kernel/pm_qos_params.c12
-rw-r--r--kernel/power/hibernate.c1
-rw-r--r--kernel/power/poweroff.c2
-rw-r--r--kernel/power/snapshot.c1
-rw-r--r--kernel/power/swap.c6
-rw-r--r--kernel/rcupdate.c2
-rw-r--r--kernel/sched.c10
-rw-r--r--kernel/sched_fair.c5
-rw-r--r--kernel/smp.c17
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/sysctl.c5
-rw-r--r--kernel/trace/ftrace.c15
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/trace/trace_stack.c2
-rw-r--r--kernel/watchdog.c20
-rw-r--r--kernel/workqueue.c53
25 files changed, 327 insertions, 148 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index e5c5497a7dca..291ba3d04bea 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1791,19 +1791,20 @@ out:
1791} 1791}
1792 1792
1793/** 1793/**
1794 * cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup 1794 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
1795 * @from: attach to all cgroups of a given task
1795 * @tsk: the task to be attached 1796 * @tsk: the task to be attached
1796 */ 1797 */
1797int cgroup_attach_task_current_cg(struct task_struct *tsk) 1798int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
1798{ 1799{
1799 struct cgroupfs_root *root; 1800 struct cgroupfs_root *root;
1800 struct cgroup *cur_cg;
1801 int retval = 0; 1801 int retval = 0;
1802 1802
1803 cgroup_lock(); 1803 cgroup_lock();
1804 for_each_active_root(root) { 1804 for_each_active_root(root) {
1805 cur_cg = task_cgroup_from_root(current, root); 1805 struct cgroup *from_cg = task_cgroup_from_root(from, root);
1806 retval = cgroup_attach_task(cur_cg, tsk); 1806
1807 retval = cgroup_attach_task(from_cg, tsk);
1807 if (retval) 1808 if (retval)
1808 break; 1809 break;
1809 } 1810 }
@@ -1811,7 +1812,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk)
1811 1812
1812 return retval; 1813 return retval;
1813} 1814}
1814EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg); 1815EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
1815 1816
1816/* 1817/*
1817 * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex 1818 * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 3c2d4972d235..de407c78178d 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -741,7 +741,7 @@ static struct console kgdbcons = {
741}; 741};
742 742
743#ifdef CONFIG_MAGIC_SYSRQ 743#ifdef CONFIG_MAGIC_SYSRQ
744static void sysrq_handle_dbg(int key, struct tty_struct *tty) 744static void sysrq_handle_dbg(int key)
745{ 745{
746 if (!dbg_io_ops) { 746 if (!dbg_io_ops) {
747 printk(KERN_CRIT "ERROR: No KGDB I/O module available\n"); 747 printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c
index 75bd9b3ebbb7..20059ef4459a 100644
--- a/kernel/debug/kdb/kdb_bp.c
+++ b/kernel/debug/kdb/kdb_bp.c
@@ -274,7 +274,6 @@ static int kdb_bp(int argc, const char **argv)
274 int i, bpno; 274 int i, bpno;
275 kdb_bp_t *bp, *bp_check; 275 kdb_bp_t *bp, *bp_check;
276 int diag; 276 int diag;
277 int free;
278 char *symname = NULL; 277 char *symname = NULL;
279 long offset = 0ul; 278 long offset = 0ul;
280 int nextarg; 279 int nextarg;
@@ -305,7 +304,6 @@ static int kdb_bp(int argc, const char **argv)
305 /* 304 /*
306 * Find an empty bp structure to allocate 305 * Find an empty bp structure to allocate
307 */ 306 */
308 free = KDB_MAXBPT;
309 for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) { 307 for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
310 if (bp->bp_free) 308 if (bp->bp_free)
311 break; 309 break;
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 28b844118bbd..caf057a3de0e 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -1929,7 +1929,7 @@ static int kdb_sr(int argc, const char **argv)
1929 if (argc != 1) 1929 if (argc != 1)
1930 return KDB_ARGCOUNT; 1930 return KDB_ARGCOUNT;
1931 kdb_trap_printk++; 1931 kdb_trap_printk++;
1932 __handle_sysrq(*argv[1], NULL, 0); 1932 __handle_sysrq(*argv[1], false);
1933 kdb_trap_printk--; 1933 kdb_trap_printk--;
1934 1934
1935 return 0; 1935 return 0;
diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c
index ef3c3f88a7a3..f83972b16564 100644
--- a/kernel/gcov/fs.c
+++ b/kernel/gcov/fs.c
@@ -33,10 +33,11 @@
33 * @children: child nodes 33 * @children: child nodes
34 * @all: list head for list of all nodes 34 * @all: list head for list of all nodes
35 * @parent: parent node 35 * @parent: parent node
36 * @info: associated profiling data structure if not a directory 36 * @loaded_info: array of pointers to profiling data sets for loaded object
37 * @ghost: when an object file containing profiling data is unloaded we keep a 37 * files.
38 * copy of the profiling data here to allow collecting coverage data 38 * @num_loaded: number of profiling data sets for loaded object files.
39 * for cleanup code. Such a node is called a "ghost". 39 * @unloaded_info: accumulated copy of profiling data sets for unloaded
40 * object files. Used only when gcov_persist=1.
40 * @dentry: main debugfs entry, either a directory or data file 41 * @dentry: main debugfs entry, either a directory or data file
41 * @links: associated symbolic links 42 * @links: associated symbolic links
42 * @name: data file basename 43 * @name: data file basename
@@ -51,10 +52,11 @@ struct gcov_node {
51 struct list_head children; 52 struct list_head children;
52 struct list_head all; 53 struct list_head all;
53 struct gcov_node *parent; 54 struct gcov_node *parent;
54 struct gcov_info *info; 55 struct gcov_info **loaded_info;
55 struct gcov_info *ghost; 56 struct gcov_info *unloaded_info;
56 struct dentry *dentry; 57 struct dentry *dentry;
57 struct dentry **links; 58 struct dentry **links;
59 int num_loaded;
58 char name[0]; 60 char name[0];
59}; 61};
60 62
@@ -136,16 +138,37 @@ static const struct seq_operations gcov_seq_ops = {
136}; 138};
137 139
138/* 140/*
139 * Return the profiling data set for a given node. This can either be the 141 * Return a profiling data set associated with the given node. This is
140 * original profiling data structure or a duplicate (also called "ghost") 142 * either a data set for a loaded object file or a data set copy in case
141 * in case the associated object file has been unloaded. 143 * all associated object files have been unloaded.
142 */ 144 */
143static struct gcov_info *get_node_info(struct gcov_node *node) 145static struct gcov_info *get_node_info(struct gcov_node *node)
144{ 146{
145 if (node->info) 147 if (node->num_loaded > 0)
146 return node->info; 148 return node->loaded_info[0];
147 149
148 return node->ghost; 150 return node->unloaded_info;
151}
152
153/*
154 * Return a newly allocated profiling data set which contains the sum of
155 * all profiling data associated with the given node.
156 */
157static struct gcov_info *get_accumulated_info(struct gcov_node *node)
158{
159 struct gcov_info *info;
160 int i = 0;
161
162 if (node->unloaded_info)
163 info = gcov_info_dup(node->unloaded_info);
164 else
165 info = gcov_info_dup(node->loaded_info[i++]);
166 if (!info)
167 return NULL;
168 for (; i < node->num_loaded; i++)
169 gcov_info_add(info, node->loaded_info[i]);
170
171 return info;
149} 172}
150 173
151/* 174/*
@@ -163,9 +186,10 @@ static int gcov_seq_open(struct inode *inode, struct file *file)
163 mutex_lock(&node_lock); 186 mutex_lock(&node_lock);
164 /* 187 /*
165 * Read from a profiling data copy to minimize reference tracking 188 * Read from a profiling data copy to minimize reference tracking
166 * complexity and concurrent access. 189 * complexity and concurrent access and to keep accumulating multiple
190 * profiling data sets associated with one node simple.
167 */ 191 */
168 info = gcov_info_dup(get_node_info(node)); 192 info = get_accumulated_info(node);
169 if (!info) 193 if (!info)
170 goto out_unlock; 194 goto out_unlock;
171 iter = gcov_iter_new(info); 195 iter = gcov_iter_new(info);
@@ -225,12 +249,25 @@ static struct gcov_node *get_node_by_name(const char *name)
225 return NULL; 249 return NULL;
226} 250}
227 251
252/*
253 * Reset all profiling data associated with the specified node.
254 */
255static void reset_node(struct gcov_node *node)
256{
257 int i;
258
259 if (node->unloaded_info)
260 gcov_info_reset(node->unloaded_info);
261 for (i = 0; i < node->num_loaded; i++)
262 gcov_info_reset(node->loaded_info[i]);
263}
264
228static void remove_node(struct gcov_node *node); 265static void remove_node(struct gcov_node *node);
229 266
230/* 267/*
231 * write() implementation for gcov data files. Reset profiling data for the 268 * write() implementation for gcov data files. Reset profiling data for the
232 * associated file. If the object file has been unloaded (i.e. this is 269 * corresponding file. If all associated object files have been unloaded,
233 * a "ghost" node), remove the debug fs node as well. 270 * remove the debug fs node as well.
234 */ 271 */
235static ssize_t gcov_seq_write(struct file *file, const char __user *addr, 272static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
236 size_t len, loff_t *pos) 273 size_t len, loff_t *pos)
@@ -245,10 +282,10 @@ static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
245 node = get_node_by_name(info->filename); 282 node = get_node_by_name(info->filename);
246 if (node) { 283 if (node) {
247 /* Reset counts or remove node for unloaded modules. */ 284 /* Reset counts or remove node for unloaded modules. */
248 if (node->ghost) 285 if (node->num_loaded == 0)
249 remove_node(node); 286 remove_node(node);
250 else 287 else
251 gcov_info_reset(node->info); 288 reset_node(node);
252 } 289 }
253 /* Reset counts for open file. */ 290 /* Reset counts for open file. */
254 gcov_info_reset(info); 291 gcov_info_reset(info);
@@ -378,7 +415,10 @@ static void init_node(struct gcov_node *node, struct gcov_info *info,
378 INIT_LIST_HEAD(&node->list); 415 INIT_LIST_HEAD(&node->list);
379 INIT_LIST_HEAD(&node->children); 416 INIT_LIST_HEAD(&node->children);
380 INIT_LIST_HEAD(&node->all); 417 INIT_LIST_HEAD(&node->all);
381 node->info = info; 418 if (node->loaded_info) {
419 node->loaded_info[0] = info;
420 node->num_loaded = 1;
421 }
382 node->parent = parent; 422 node->parent = parent;
383 if (name) 423 if (name)
384 strcpy(node->name, name); 424 strcpy(node->name, name);
@@ -394,9 +434,13 @@ static struct gcov_node *new_node(struct gcov_node *parent,
394 struct gcov_node *node; 434 struct gcov_node *node;
395 435
396 node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL); 436 node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL);
397 if (!node) { 437 if (!node)
398 pr_warning("out of memory\n"); 438 goto err_nomem;
399 return NULL; 439 if (info) {
440 node->loaded_info = kcalloc(1, sizeof(struct gcov_info *),
441 GFP_KERNEL);
442 if (!node->loaded_info)
443 goto err_nomem;
400 } 444 }
401 init_node(node, info, name, parent); 445 init_node(node, info, name, parent);
402 /* Differentiate between gcov data file nodes and directory nodes. */ 446 /* Differentiate between gcov data file nodes and directory nodes. */
@@ -416,6 +460,11 @@ static struct gcov_node *new_node(struct gcov_node *parent,
416 list_add(&node->all, &all_head); 460 list_add(&node->all, &all_head);
417 461
418 return node; 462 return node;
463
464err_nomem:
465 kfree(node);
466 pr_warning("out of memory\n");
467 return NULL;
419} 468}
420 469
421/* Remove symbolic links associated with node. */ 470/* Remove symbolic links associated with node. */
@@ -441,8 +490,9 @@ static void release_node(struct gcov_node *node)
441 list_del(&node->all); 490 list_del(&node->all);
442 debugfs_remove(node->dentry); 491 debugfs_remove(node->dentry);
443 remove_links(node); 492 remove_links(node);
444 if (node->ghost) 493 kfree(node->loaded_info);
445 gcov_info_free(node->ghost); 494 if (node->unloaded_info)
495 gcov_info_free(node->unloaded_info);
446 kfree(node); 496 kfree(node);
447} 497}
448 498
@@ -477,7 +527,7 @@ static struct gcov_node *get_child_by_name(struct gcov_node *parent,
477 527
478/* 528/*
479 * write() implementation for reset file. Reset all profiling data to zero 529 * write() implementation for reset file. Reset all profiling data to zero
480 * and remove ghost nodes. 530 * and remove nodes for which all associated object files are unloaded.
481 */ 531 */
482static ssize_t reset_write(struct file *file, const char __user *addr, 532static ssize_t reset_write(struct file *file, const char __user *addr,
483 size_t len, loff_t *pos) 533 size_t len, loff_t *pos)
@@ -487,8 +537,8 @@ static ssize_t reset_write(struct file *file, const char __user *addr,
487 mutex_lock(&node_lock); 537 mutex_lock(&node_lock);
488restart: 538restart:
489 list_for_each_entry(node, &all_head, all) { 539 list_for_each_entry(node, &all_head, all) {
490 if (node->info) 540 if (node->num_loaded > 0)
491 gcov_info_reset(node->info); 541 reset_node(node);
492 else if (list_empty(&node->children)) { 542 else if (list_empty(&node->children)) {
493 remove_node(node); 543 remove_node(node);
494 /* Several nodes may have gone - restart loop. */ 544 /* Several nodes may have gone - restart loop. */
@@ -564,37 +614,115 @@ err_remove:
564} 614}
565 615
566/* 616/*
567 * The profiling data set associated with this node is being unloaded. Store a 617 * Associate a profiling data set with an existing node. Needs to be called
568 * copy of the profiling data and turn this node into a "ghost". 618 * with node_lock held.
569 */ 619 */
570static int ghost_node(struct gcov_node *node) 620static void add_info(struct gcov_node *node, struct gcov_info *info)
571{ 621{
572 node->ghost = gcov_info_dup(node->info); 622 struct gcov_info **loaded_info;
573 if (!node->ghost) { 623 int num = node->num_loaded;
574 pr_warning("could not save data for '%s' (out of memory)\n", 624
575 node->info->filename); 625 /*
576 return -ENOMEM; 626 * Prepare new array. This is done first to simplify cleanup in
627 * case the new data set is incompatible, the node only contains
628 * unloaded data sets and there's not enough memory for the array.
629 */
630 loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL);
631 if (!loaded_info) {
632 pr_warning("could not add '%s' (out of memory)\n",
633 info->filename);
634 return;
635 }
636 memcpy(loaded_info, node->loaded_info,
637 num * sizeof(struct gcov_info *));
638 loaded_info[num] = info;
639 /* Check if the new data set is compatible. */
640 if (num == 0) {
641 /*
642 * A module was unloaded, modified and reloaded. The new
643 * data set replaces the copy of the last one.
644 */
645 if (!gcov_info_is_compatible(node->unloaded_info, info)) {
646 pr_warning("discarding saved data for %s "
647 "(incompatible version)\n", info->filename);
648 gcov_info_free(node->unloaded_info);
649 node->unloaded_info = NULL;
650 }
651 } else {
652 /*
653 * Two different versions of the same object file are loaded.
654 * The initial one takes precedence.
655 */
656 if (!gcov_info_is_compatible(node->loaded_info[0], info)) {
657 pr_warning("could not add '%s' (incompatible "
658 "version)\n", info->filename);
659 kfree(loaded_info);
660 return;
661 }
577 } 662 }
578 node->info = NULL; 663 /* Overwrite previous array. */
664 kfree(node->loaded_info);
665 node->loaded_info = loaded_info;
666 node->num_loaded = num + 1;
667}
579 668
580 return 0; 669/*
670 * Return the index of a profiling data set associated with a node.
671 */
672static int get_info_index(struct gcov_node *node, struct gcov_info *info)
673{
674 int i;
675
676 for (i = 0; i < node->num_loaded; i++) {
677 if (node->loaded_info[i] == info)
678 return i;
679 }
680 return -ENOENT;
581} 681}
582 682
583/* 683/*
584 * Profiling data for this node has been loaded again. Add profiling data 684 * Save the data of a profiling data set which is being unloaded.
585 * from previous instantiation and turn this node into a regular node.
586 */ 685 */
587static void revive_node(struct gcov_node *node, struct gcov_info *info) 686static void save_info(struct gcov_node *node, struct gcov_info *info)
588{ 687{
589 if (gcov_info_is_compatible(node->ghost, info)) 688 if (node->unloaded_info)
590 gcov_info_add(info, node->ghost); 689 gcov_info_add(node->unloaded_info, info);
591 else { 690 else {
592 pr_warning("discarding saved data for '%s' (version changed)\n", 691 node->unloaded_info = gcov_info_dup(info);
692 if (!node->unloaded_info) {
693 pr_warning("could not save data for '%s' "
694 "(out of memory)\n", info->filename);
695 }
696 }
697}
698
699/*
700 * Disassociate a profiling data set from a node. Needs to be called with
701 * node_lock held.
702 */
703static void remove_info(struct gcov_node *node, struct gcov_info *info)
704{
705 int i;
706
707 i = get_info_index(node, info);
708 if (i < 0) {
709 pr_warning("could not remove '%s' (not found)\n",
593 info->filename); 710 info->filename);
711 return;
594 } 712 }
595 gcov_info_free(node->ghost); 713 if (gcov_persist)
596 node->ghost = NULL; 714 save_info(node, info);
597 node->info = info; 715 /* Shrink array. */
716 node->loaded_info[i] = node->loaded_info[node->num_loaded - 1];
717 node->num_loaded--;
718 if (node->num_loaded > 0)
719 return;
720 /* Last loaded data set was removed. */
721 kfree(node->loaded_info);
722 node->loaded_info = NULL;
723 node->num_loaded = 0;
724 if (!node->unloaded_info)
725 remove_node(node);
598} 726}
599 727
600/* 728/*
@@ -609,30 +737,18 @@ void gcov_event(enum gcov_action action, struct gcov_info *info)
609 node = get_node_by_name(info->filename); 737 node = get_node_by_name(info->filename);
610 switch (action) { 738 switch (action) {
611 case GCOV_ADD: 739 case GCOV_ADD:
612 /* Add new node or revive ghost. */ 740 if (node)
613 if (!node) { 741 add_info(node, info);
742 else
614 add_node(info); 743 add_node(info);
615 break;
616 }
617 if (gcov_persist)
618 revive_node(node, info);
619 else {
620 pr_warning("could not add '%s' (already exists)\n",
621 info->filename);
622 }
623 break; 744 break;
624 case GCOV_REMOVE: 745 case GCOV_REMOVE:
625 /* Remove node or turn into ghost. */ 746 if (node)
626 if (!node) { 747 remove_info(node, info);
748 else {
627 pr_warning("could not remove '%s' (not found)\n", 749 pr_warning("could not remove '%s' (not found)\n",
628 info->filename); 750 info->filename);
629 break;
630 } 751 }
631 if (gcov_persist) {
632 if (!ghost_node(node))
633 break;
634 }
635 remove_node(node);
636 break; 752 break;
637 } 753 }
638 mutex_unlock(&node_lock); 754 mutex_unlock(&node_lock);
diff --git a/kernel/groups.c b/kernel/groups.c
index 53b1916c9492..253dc0f35cf4 100644
--- a/kernel/groups.c
+++ b/kernel/groups.c
@@ -143,10 +143,9 @@ int groups_search(const struct group_info *group_info, gid_t grp)
143 right = group_info->ngroups; 143 right = group_info->ngroups;
144 while (left < right) { 144 while (left < right) {
145 unsigned int mid = (left+right)/2; 145 unsigned int mid = (left+right)/2;
146 int cmp = grp - GROUP_AT(group_info, mid); 146 if (grp > GROUP_AT(group_info, mid))
147 if (cmp > 0)
148 left = mid + 1; 147 left = mid + 1;
149 else if (cmp < 0) 148 else if (grp < GROUP_AT(group_info, mid))
150 right = mid; 149 right = mid;
151 else 150 else
152 return 1; 151 return 1;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index ce669174f355..1decafbb6b1a 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1091,11 +1091,10 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel);
1091 */ 1091 */
1092ktime_t hrtimer_get_remaining(const struct hrtimer *timer) 1092ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
1093{ 1093{
1094 struct hrtimer_clock_base *base;
1095 unsigned long flags; 1094 unsigned long flags;
1096 ktime_t rem; 1095 ktime_t rem;
1097 1096
1098 base = lock_hrtimer_base(timer, &flags); 1097 lock_hrtimer_base(timer, &flags);
1099 rem = hrtimer_expires_remaining(timer); 1098 rem = hrtimer_expires_remaining(timer);
1100 unlock_hrtimer_base(timer, &flags); 1099 unlock_hrtimer_base(timer, &flags);
1101 1100
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 4c0b7b3e6d2e..200407c1502f 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -36,15 +36,6 @@
36# include <asm/mutex.h> 36# include <asm/mutex.h>
37#endif 37#endif
38 38
39/***
40 * mutex_init - initialize the mutex
41 * @lock: the mutex to be initialized
42 * @key: the lock_class_key for the class; used by mutex lock debugging
43 *
44 * Initialize the mutex to unlocked state.
45 *
46 * It is not allowed to initialize an already locked mutex.
47 */
48void 39void
49__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) 40__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
50{ 41{
@@ -68,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init);
68static __used noinline void __sched 59static __used noinline void __sched
69__mutex_lock_slowpath(atomic_t *lock_count); 60__mutex_lock_slowpath(atomic_t *lock_count);
70 61
71/*** 62/**
72 * mutex_lock - acquire the mutex 63 * mutex_lock - acquire the mutex
73 * @lock: the mutex to be acquired 64 * @lock: the mutex to be acquired
74 * 65 *
@@ -105,7 +96,7 @@ EXPORT_SYMBOL(mutex_lock);
105 96
106static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); 97static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
107 98
108/*** 99/**
109 * mutex_unlock - release the mutex 100 * mutex_unlock - release the mutex
110 * @lock: the mutex to be released 101 * @lock: the mutex to be released
111 * 102 *
@@ -364,8 +355,8 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count);
364static noinline int __sched 355static noinline int __sched
365__mutex_lock_interruptible_slowpath(atomic_t *lock_count); 356__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
366 357
367/*** 358/**
368 * mutex_lock_interruptible - acquire the mutex, interruptable 359 * mutex_lock_interruptible - acquire the mutex, interruptible
369 * @lock: the mutex to be acquired 360 * @lock: the mutex to be acquired
370 * 361 *
371 * Lock the mutex like mutex_lock(), and return 0 if the mutex has 362 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
@@ -456,15 +447,15 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
456 return prev == 1; 447 return prev == 1;
457} 448}
458 449
459/*** 450/**
460 * mutex_trylock - try acquire the mutex, without waiting 451 * mutex_trylock - try to acquire the mutex, without waiting
461 * @lock: the mutex to be acquired 452 * @lock: the mutex to be acquired
462 * 453 *
463 * Try to acquire the mutex atomically. Returns 1 if the mutex 454 * Try to acquire the mutex atomically. Returns 1 if the mutex
464 * has been acquired successfully, and 0 on contention. 455 * has been acquired successfully, and 0 on contention.
465 * 456 *
466 * NOTE: this function follows the spin_trylock() convention, so 457 * NOTE: this function follows the spin_trylock() convention, so
467 * it is negated to the down_trylock() return values! Be careful 458 * it is negated from the down_trylock() return values! Be careful
468 * about this when converting semaphore users to mutexes. 459 * about this when converting semaphore users to mutexes.
469 * 460 *
470 * This function must not be used in interrupt context. The 461 * This function must not be used in interrupt context. The
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 403d1804b198..657555a5f30f 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -402,11 +402,31 @@ static void perf_group_detach(struct perf_event *event)
402 } 402 }
403} 403}
404 404
405static inline int
406event_filter_match(struct perf_event *event)
407{
408 return event->cpu == -1 || event->cpu == smp_processor_id();
409}
410
405static void 411static void
406event_sched_out(struct perf_event *event, 412event_sched_out(struct perf_event *event,
407 struct perf_cpu_context *cpuctx, 413 struct perf_cpu_context *cpuctx,
408 struct perf_event_context *ctx) 414 struct perf_event_context *ctx)
409{ 415{
416 u64 delta;
417 /*
418 * An event which could not be activated because of
419 * filter mismatch still needs to have its timings
420 * maintained, otherwise bogus information is return
421 * via read() for time_enabled, time_running:
422 */
423 if (event->state == PERF_EVENT_STATE_INACTIVE
424 && !event_filter_match(event)) {
425 delta = ctx->time - event->tstamp_stopped;
426 event->tstamp_running += delta;
427 event->tstamp_stopped = ctx->time;
428 }
429
410 if (event->state != PERF_EVENT_STATE_ACTIVE) 430 if (event->state != PERF_EVENT_STATE_ACTIVE)
411 return; 431 return;
412 432
@@ -432,9 +452,7 @@ group_sched_out(struct perf_event *group_event,
432 struct perf_event_context *ctx) 452 struct perf_event_context *ctx)
433{ 453{
434 struct perf_event *event; 454 struct perf_event *event;
435 455 int state = group_event->state;
436 if (group_event->state != PERF_EVENT_STATE_ACTIVE)
437 return;
438 456
439 event_sched_out(group_event, cpuctx, ctx); 457 event_sched_out(group_event, cpuctx, ctx);
440 458
@@ -444,7 +462,7 @@ group_sched_out(struct perf_event *group_event,
444 list_for_each_entry(event, &group_event->sibling_list, group_entry) 462 list_for_each_entry(event, &group_event->sibling_list, group_entry)
445 event_sched_out(event, cpuctx, ctx); 463 event_sched_out(event, cpuctx, ctx);
446 464
447 if (group_event->attr.exclusive) 465 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
448 cpuctx->exclusive = 0; 466 cpuctx->exclusive = 0;
449} 467}
450 468
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index 996a4dec5f96..b7e4c362361b 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -212,15 +212,17 @@ EXPORT_SYMBOL_GPL(pm_qos_request_active);
212 212
213/** 213/**
214 * pm_qos_add_request - inserts new qos request into the list 214 * pm_qos_add_request - inserts new qos request into the list
215 * @pm_qos_class: identifies which list of qos request to us 215 * @dep: pointer to a preallocated handle
216 * @pm_qos_class: identifies which list of qos request to use
216 * @value: defines the qos request 217 * @value: defines the qos request
217 * 218 *
218 * This function inserts a new entry in the pm_qos_class list of requested qos 219 * This function inserts a new entry in the pm_qos_class list of requested qos
219 * performance characteristics. It recomputes the aggregate QoS expectations 220 * performance characteristics. It recomputes the aggregate QoS expectations
220 * for the pm_qos_class of parameters, and returns the pm_qos_request list 221 * for the pm_qos_class of parameters and initializes the pm_qos_request_list
221 * element as a handle for use in updating and removal. Call needs to save 222 * handle. Caller needs to save this handle for later use in updates and
222 * this handle for later use. 223 * removal.
223 */ 224 */
225
224void pm_qos_add_request(struct pm_qos_request_list *dep, 226void pm_qos_add_request(struct pm_qos_request_list *dep,
225 int pm_qos_class, s32 value) 227 int pm_qos_class, s32 value)
226{ 228{
@@ -348,7 +350,7 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp)
348 350
349 pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); 351 pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
350 if (pm_qos_class >= 0) { 352 if (pm_qos_class >= 0) {
351 struct pm_qos_request_list *req = kzalloc(GFP_KERNEL, sizeof(*req)); 353 struct pm_qos_request_list *req = kzalloc(sizeof(*req), GFP_KERNEL);
352 if (!req) 354 if (!req)
353 return -ENOMEM; 355 return -ENOMEM;
354 356
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index c77963938bca..8dc31e02ae12 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -338,7 +338,6 @@ int hibernation_snapshot(int platform_mode)
338 goto Close; 338 goto Close;
339 339
340 suspend_console(); 340 suspend_console();
341 hibernation_freeze_swap();
342 saved_mask = clear_gfp_allowed_mask(GFP_IOFS); 341 saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
343 error = dpm_suspend_start(PMSG_FREEZE); 342 error = dpm_suspend_start(PMSG_FREEZE);
344 if (error) 343 if (error)
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index e8b337006276..d52359374e85 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -24,7 +24,7 @@ static void do_poweroff(struct work_struct *dummy)
24 24
25static DECLARE_WORK(poweroff_work, do_poweroff); 25static DECLARE_WORK(poweroff_work, do_poweroff);
26 26
27static void handle_poweroff(int key, struct tty_struct *tty) 27static void handle_poweroff(int key)
28{ 28{
29 /* run sysrq poweroff on boot cpu */ 29 /* run sysrq poweroff on boot cpu */
30 schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work); 30 schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 5e7edfb05e66..f6cd6faf84fd 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1086,7 +1086,6 @@ void swsusp_free(void)
1086 buffer = NULL; 1086 buffer = NULL;
1087 alloc_normal = 0; 1087 alloc_normal = 0;
1088 alloc_highmem = 0; 1088 alloc_highmem = 0;
1089 hibernation_thaw_swap();
1090} 1089}
1091 1090
1092/* Helper functions used for the shrinking of memory. */ 1091/* Helper functions used for the shrinking of memory. */
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 5d0059eed3e4..e6a5bdf61a37 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -136,10 +136,10 @@ sector_t alloc_swapdev_block(int swap)
136{ 136{
137 unsigned long offset; 137 unsigned long offset;
138 138
139 offset = swp_offset(get_swap_for_hibernation(swap)); 139 offset = swp_offset(get_swap_page_of_type(swap));
140 if (offset) { 140 if (offset) {
141 if (swsusp_extents_insert(offset)) 141 if (swsusp_extents_insert(offset))
142 swap_free_for_hibernation(swp_entry(swap, offset)); 142 swap_free(swp_entry(swap, offset));
143 else 143 else
144 return swapdev_block(swap, offset); 144 return swapdev_block(swap, offset);
145 } 145 }
@@ -163,7 +163,7 @@ void free_all_swap_pages(int swap)
163 ext = container_of(node, struct swsusp_extent, node); 163 ext = container_of(node, struct swsusp_extent, node);
164 rb_erase(node, &swsusp_extents); 164 rb_erase(node, &swsusp_extents);
165 for (offset = ext->start; offset <= ext->end; offset++) 165 for (offset = ext->start; offset <= ext->end; offset++)
166 swap_free_for_hibernation(swp_entry(swap, offset)); 166 swap_free(swp_entry(swap, offset));
167 167
168 kfree(ext); 168 kfree(ext);
169 } 169 }
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 6c79e851521c..a23a57a976d1 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -88,7 +88,7 @@ int rcu_read_lock_bh_held(void)
88{ 88{
89 if (!debug_lockdep_rcu_enabled()) 89 if (!debug_lockdep_rcu_enabled())
90 return 1; 90 return 1;
91 return in_softirq(); 91 return in_softirq() || irqs_disabled();
92} 92}
93EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); 93EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
94 94
diff --git a/kernel/sched.c b/kernel/sched.c
index 41541d79e3c8..09b574e7f4df 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3865,8 +3865,16 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
3865 /* 3865 /*
3866 * Owner changed, break to re-assess state. 3866 * Owner changed, break to re-assess state.
3867 */ 3867 */
3868 if (lock->owner != owner) 3868 if (lock->owner != owner) {
3869 /*
3870 * If the lock has switched to a different owner,
3871 * we likely have heavy contention. Return 0 to quit
3872 * optimistic spinning and not contend further:
3873 */
3874 if (lock->owner)
3875 return 0;
3869 break; 3876 break;
3877 }
3870 3878
3871 /* 3879 /*
3872 * Is that owner really running on that cpu? 3880 * Is that owner really running on that cpu?
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 806d1b227a21..134f7edb30c6 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1313,7 +1313,7 @@ static struct sched_group *
1313find_idlest_group(struct sched_domain *sd, struct task_struct *p, 1313find_idlest_group(struct sched_domain *sd, struct task_struct *p,
1314 int this_cpu, int load_idx) 1314 int this_cpu, int load_idx)
1315{ 1315{
1316 struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups; 1316 struct sched_group *idlest = NULL, *group = sd->groups;
1317 unsigned long min_load = ULONG_MAX, this_load = 0; 1317 unsigned long min_load = ULONG_MAX, this_load = 0;
1318 int imbalance = 100 + (sd->imbalance_pct-100)/2; 1318 int imbalance = 100 + (sd->imbalance_pct-100)/2;
1319 1319
@@ -1348,7 +1348,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
1348 1348
1349 if (local_group) { 1349 if (local_group) {
1350 this_load = avg_load; 1350 this_load = avg_load;
1351 this = group;
1352 } else if (avg_load < min_load) { 1351 } else if (avg_load < min_load) {
1353 min_load = avg_load; 1352 min_load = avg_load;
1354 idlest = group; 1353 idlest = group;
@@ -3752,6 +3751,8 @@ static void task_fork_fair(struct task_struct *p)
3752 3751
3753 raw_spin_lock_irqsave(&rq->lock, flags); 3752 raw_spin_lock_irqsave(&rq->lock, flags);
3754 3753
3754 update_rq_clock(rq);
3755
3755 if (unlikely(task_cpu(p) != this_cpu)) 3756 if (unlikely(task_cpu(p) != this_cpu))
3756 __set_task_cpu(p, this_cpu); 3757 __set_task_cpu(p, this_cpu);
3757 3758
diff --git a/kernel/smp.c b/kernel/smp.c
index 75c970c715d3..ed6aacfcb7ef 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -365,9 +365,10 @@ call:
365EXPORT_SYMBOL_GPL(smp_call_function_any); 365EXPORT_SYMBOL_GPL(smp_call_function_any);
366 366
367/** 367/**
368 * __smp_call_function_single(): Run a function on another CPU 368 * __smp_call_function_single(): Run a function on a specific CPU
369 * @cpu: The CPU to run on. 369 * @cpu: The CPU to run on.
370 * @data: Pre-allocated and setup data structure 370 * @data: Pre-allocated and setup data structure
371 * @wait: If true, wait until function has completed on specified CPU.
371 * 372 *
372 * Like smp_call_function_single(), but allow caller to pass in a 373 * Like smp_call_function_single(), but allow caller to pass in a
373 * pre-allocated data structure. Useful for embedding @data inside 374 * pre-allocated data structure. Useful for embedding @data inside
@@ -376,8 +377,10 @@ EXPORT_SYMBOL_GPL(smp_call_function_any);
376void __smp_call_function_single(int cpu, struct call_single_data *data, 377void __smp_call_function_single(int cpu, struct call_single_data *data,
377 int wait) 378 int wait)
378{ 379{
379 csd_lock(data); 380 unsigned int this_cpu;
381 unsigned long flags;
380 382
383 this_cpu = get_cpu();
381 /* 384 /*
382 * Can deadlock when called with interrupts disabled. 385 * Can deadlock when called with interrupts disabled.
383 * We allow cpu's that are not yet online though, as no one else can 386 * We allow cpu's that are not yet online though, as no one else can
@@ -387,7 +390,15 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
387 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() 390 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
388 && !oops_in_progress); 391 && !oops_in_progress);
389 392
390 generic_exec_single(cpu, data, wait); 393 if (cpu == this_cpu) {
394 local_irq_save(flags);
395 data->func(data->info);
396 local_irq_restore(flags);
397 } else {
398 csd_lock(data);
399 generic_exec_single(cpu, data, wait);
400 }
401 put_cpu();
391} 402}
392 403
393/** 404/**
diff --git a/kernel/sys.c b/kernel/sys.c
index e9ad44489828..7f5a0cd296a9 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -931,6 +931,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
931 pgid = pid; 931 pgid = pid;
932 if (pgid < 0) 932 if (pgid < 0)
933 return -EINVAL; 933 return -EINVAL;
934 rcu_read_lock();
934 935
935 /* From this point forward we keep holding onto the tasklist lock 936 /* From this point forward we keep holding onto the tasklist lock
936 * so that our parent does not change from under us. -DaveM 937 * so that our parent does not change from under us. -DaveM
@@ -984,6 +985,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
984out: 985out:
985 /* All paths lead to here, thus we are safe. -DaveM */ 986 /* All paths lead to here, thus we are safe. -DaveM */
986 write_unlock_irq(&tasklist_lock); 987 write_unlock_irq(&tasklist_lock);
988 rcu_read_unlock();
987 return err; 989 return err;
988} 990}
989 991
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ca38e8e3e907..f88552c6d227 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1713,10 +1713,7 @@ static __init int sysctl_init(void)
1713{ 1713{
1714 sysctl_set_parent(NULL, root_table); 1714 sysctl_set_parent(NULL, root_table);
1715#ifdef CONFIG_SYSCTL_SYSCALL_CHECK 1715#ifdef CONFIG_SYSCTL_SYSCALL_CHECK
1716 { 1716 sysctl_check_table(current->nsproxy, root_table);
1717 int err;
1718 err = sysctl_check_table(current->nsproxy, root_table);
1719 }
1720#endif 1717#endif
1721 return 0; 1718 return 0;
1722} 1719}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0d88ce9b9fb8..7cb1f45a1de1 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -381,12 +381,19 @@ static int function_stat_show(struct seq_file *m, void *v)
381{ 381{
382 struct ftrace_profile *rec = v; 382 struct ftrace_profile *rec = v;
383 char str[KSYM_SYMBOL_LEN]; 383 char str[KSYM_SYMBOL_LEN];
384 int ret = 0;
384#ifdef CONFIG_FUNCTION_GRAPH_TRACER 385#ifdef CONFIG_FUNCTION_GRAPH_TRACER
385 static DEFINE_MUTEX(mutex);
386 static struct trace_seq s; 386 static struct trace_seq s;
387 unsigned long long avg; 387 unsigned long long avg;
388 unsigned long long stddev; 388 unsigned long long stddev;
389#endif 389#endif
390 mutex_lock(&ftrace_profile_lock);
391
392 /* we raced with function_profile_reset() */
393 if (unlikely(rec->counter == 0)) {
394 ret = -EBUSY;
395 goto out;
396 }
390 397
391 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 398 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
392 seq_printf(m, " %-30.30s %10lu", str, rec->counter); 399 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
@@ -408,7 +415,6 @@ static int function_stat_show(struct seq_file *m, void *v)
408 do_div(stddev, (rec->counter - 1) * 1000); 415 do_div(stddev, (rec->counter - 1) * 1000);
409 } 416 }
410 417
411 mutex_lock(&mutex);
412 trace_seq_init(&s); 418 trace_seq_init(&s);
413 trace_print_graph_duration(rec->time, &s); 419 trace_print_graph_duration(rec->time, &s);
414 trace_seq_puts(&s, " "); 420 trace_seq_puts(&s, " ");
@@ -416,11 +422,12 @@ static int function_stat_show(struct seq_file *m, void *v)
416 trace_seq_puts(&s, " "); 422 trace_seq_puts(&s, " ");
417 trace_print_graph_duration(stddev, &s); 423 trace_print_graph_duration(stddev, &s);
418 trace_print_seq(m, &s); 424 trace_print_seq(m, &s);
419 mutex_unlock(&mutex);
420#endif 425#endif
421 seq_putc(m, '\n'); 426 seq_putc(m, '\n');
427out:
428 mutex_unlock(&ftrace_profile_lock);
422 429
423 return 0; 430 return ret;
424} 431}
425 432
426static void ftrace_profile_reset(struct ftrace_profile_stat *stat) 433static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 19cccc3c3028..492197e2f86c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2985,13 +2985,11 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2985 2985
2986static void rb_advance_iter(struct ring_buffer_iter *iter) 2986static void rb_advance_iter(struct ring_buffer_iter *iter)
2987{ 2987{
2988 struct ring_buffer *buffer;
2989 struct ring_buffer_per_cpu *cpu_buffer; 2988 struct ring_buffer_per_cpu *cpu_buffer;
2990 struct ring_buffer_event *event; 2989 struct ring_buffer_event *event;
2991 unsigned length; 2990 unsigned length;
2992 2991
2993 cpu_buffer = iter->cpu_buffer; 2992 cpu_buffer = iter->cpu_buffer;
2994 buffer = cpu_buffer->buffer;
2995 2993
2996 /* 2994 /*
2997 * Check if we are at the end of the buffer. 2995 * Check if we are at the end of the buffer.
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 056468eae7cf..a6b7e0e0f3eb 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -249,7 +249,7 @@ static int trace_lookup_stack(struct seq_file *m, long i)
249{ 249{
250 unsigned long addr = stack_dump_trace[i]; 250 unsigned long addr = stack_dump_trace[i];
251 251
252 return seq_printf(m, "%pF\n", (void *)addr); 252 return seq_printf(m, "%pS\n", (void *)addr);
253} 253}
254 254
255static void print_disabled(struct seq_file *m) 255static void print_disabled(struct seq_file *m)
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 613bc1f04610..7f9c3c52ecc1 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -122,7 +122,7 @@ static void __touch_watchdog(void)
122 122
123void touch_softlockup_watchdog(void) 123void touch_softlockup_watchdog(void)
124{ 124{
125 __get_cpu_var(watchdog_touch_ts) = 0; 125 __raw_get_cpu_var(watchdog_touch_ts) = 0;
126} 126}
127EXPORT_SYMBOL(touch_softlockup_watchdog); 127EXPORT_SYMBOL(touch_softlockup_watchdog);
128 128
@@ -142,7 +142,14 @@ void touch_all_softlockup_watchdogs(void)
142#ifdef CONFIG_HARDLOCKUP_DETECTOR 142#ifdef CONFIG_HARDLOCKUP_DETECTOR
143void touch_nmi_watchdog(void) 143void touch_nmi_watchdog(void)
144{ 144{
145 __get_cpu_var(watchdog_nmi_touch) = true; 145 if (watchdog_enabled) {
146 unsigned cpu;
147
148 for_each_present_cpu(cpu) {
149 if (per_cpu(watchdog_nmi_touch, cpu) != true)
150 per_cpu(watchdog_nmi_touch, cpu) = true;
151 }
152 }
146 touch_softlockup_watchdog(); 153 touch_softlockup_watchdog();
147} 154}
148EXPORT_SYMBOL(touch_nmi_watchdog); 155EXPORT_SYMBOL(touch_nmi_watchdog);
@@ -206,6 +213,9 @@ void watchdog_overflow_callback(struct perf_event *event, int nmi,
206 struct perf_sample_data *data, 213 struct perf_sample_data *data,
207 struct pt_regs *regs) 214 struct pt_regs *regs)
208{ 215{
216 /* Ensure the watchdog never gets throttled */
217 event->hw.interrupts = 0;
218
209 if (__get_cpu_var(watchdog_nmi_touch) == true) { 219 if (__get_cpu_var(watchdog_nmi_touch) == true) {
210 __get_cpu_var(watchdog_nmi_touch) = false; 220 __get_cpu_var(watchdog_nmi_touch) = false;
211 return; 221 return;
@@ -430,6 +440,9 @@ static int watchdog_enable(int cpu)
430 wake_up_process(p); 440 wake_up_process(p);
431 } 441 }
432 442
443 /* if any cpu succeeds, watchdog is considered enabled for the system */
444 watchdog_enabled = 1;
445
433 return 0; 446 return 0;
434} 447}
435 448
@@ -452,9 +465,6 @@ static void watchdog_disable(int cpu)
452 per_cpu(softlockup_watchdog, cpu) = NULL; 465 per_cpu(softlockup_watchdog, cpu) = NULL;
453 kthread_stop(p); 466 kthread_stop(p);
454 } 467 }
455
456 /* if any cpu succeeds, watchdog is considered enabled for the system */
457 watchdog_enabled = 1;
458} 468}
459 469
460static void watchdog_enable_all_cpus(void) 470static void watchdog_enable_all_cpus(void)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8bd600c020e5..727f24e563ae 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -90,7 +90,8 @@ enum {
90/* 90/*
91 * Structure fields follow one of the following exclusion rules. 91 * Structure fields follow one of the following exclusion rules.
92 * 92 *
93 * I: Set during initialization and read-only afterwards. 93 * I: Modifiable by initialization/destruction paths and read-only for
94 * everyone else.
94 * 95 *
95 * P: Preemption protected. Disabling preemption is enough and should 96 * P: Preemption protected. Disabling preemption is enough and should
96 * only be modified and accessed from the local cpu. 97 * only be modified and accessed from the local cpu.
@@ -198,7 +199,7 @@ typedef cpumask_var_t mayday_mask_t;
198 cpumask_test_and_set_cpu((cpu), (mask)) 199 cpumask_test_and_set_cpu((cpu), (mask))
199#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) 200#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
200#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) 201#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
201#define alloc_mayday_mask(maskp, gfp) alloc_cpumask_var((maskp), (gfp)) 202#define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
202#define free_mayday_mask(mask) free_cpumask_var((mask)) 203#define free_mayday_mask(mask) free_cpumask_var((mask))
203#else 204#else
204typedef unsigned long mayday_mask_t; 205typedef unsigned long mayday_mask_t;
@@ -943,10 +944,14 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
943 struct global_cwq *gcwq; 944 struct global_cwq *gcwq;
944 struct cpu_workqueue_struct *cwq; 945 struct cpu_workqueue_struct *cwq;
945 struct list_head *worklist; 946 struct list_head *worklist;
947 unsigned int work_flags;
946 unsigned long flags; 948 unsigned long flags;
947 949
948 debug_work_activate(work); 950 debug_work_activate(work);
949 951
952 if (WARN_ON_ONCE(wq->flags & WQ_DYING))
953 return;
954
950 /* determine gcwq to use */ 955 /* determine gcwq to use */
951 if (!(wq->flags & WQ_UNBOUND)) { 956 if (!(wq->flags & WQ_UNBOUND)) {
952 struct global_cwq *last_gcwq; 957 struct global_cwq *last_gcwq;
@@ -989,14 +994,17 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
989 BUG_ON(!list_empty(&work->entry)); 994 BUG_ON(!list_empty(&work->entry));
990 995
991 cwq->nr_in_flight[cwq->work_color]++; 996 cwq->nr_in_flight[cwq->work_color]++;
997 work_flags = work_color_to_flags(cwq->work_color);
992 998
993 if (likely(cwq->nr_active < cwq->max_active)) { 999 if (likely(cwq->nr_active < cwq->max_active)) {
994 cwq->nr_active++; 1000 cwq->nr_active++;
995 worklist = gcwq_determine_ins_pos(gcwq, cwq); 1001 worklist = gcwq_determine_ins_pos(gcwq, cwq);
996 } else 1002 } else {
1003 work_flags |= WORK_STRUCT_DELAYED;
997 worklist = &cwq->delayed_works; 1004 worklist = &cwq->delayed_works;
1005 }
998 1006
999 insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color)); 1007 insert_work(cwq, work, worklist, work_flags);
1000 1008
1001 spin_unlock_irqrestore(&gcwq->lock, flags); 1009 spin_unlock_irqrestore(&gcwq->lock, flags);
1002} 1010}
@@ -1215,6 +1223,7 @@ static void worker_leave_idle(struct worker *worker)
1215 * bound), %false if offline. 1223 * bound), %false if offline.
1216 */ 1224 */
1217static bool worker_maybe_bind_and_lock(struct worker *worker) 1225static bool worker_maybe_bind_and_lock(struct worker *worker)
1226__acquires(&gcwq->lock)
1218{ 1227{
1219 struct global_cwq *gcwq = worker->gcwq; 1228 struct global_cwq *gcwq = worker->gcwq;
1220 struct task_struct *task = worker->task; 1229 struct task_struct *task = worker->task;
@@ -1488,6 +1497,8 @@ static void gcwq_mayday_timeout(unsigned long __gcwq)
1488 * otherwise. 1497 * otherwise.
1489 */ 1498 */
1490static bool maybe_create_worker(struct global_cwq *gcwq) 1499static bool maybe_create_worker(struct global_cwq *gcwq)
1500__releases(&gcwq->lock)
1501__acquires(&gcwq->lock)
1491{ 1502{
1492 if (!need_to_create_worker(gcwq)) 1503 if (!need_to_create_worker(gcwq))
1493 return false; 1504 return false;
@@ -1662,6 +1673,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1662 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); 1673 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
1663 1674
1664 move_linked_works(work, pos, NULL); 1675 move_linked_works(work, pos, NULL);
1676 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1665 cwq->nr_active++; 1677 cwq->nr_active++;
1666} 1678}
1667 1679
@@ -1669,6 +1681,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1669 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight 1681 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1670 * @cwq: cwq of interest 1682 * @cwq: cwq of interest
1671 * @color: color of work which left the queue 1683 * @color: color of work which left the queue
1684 * @delayed: for a delayed work
1672 * 1685 *
1673 * A work either has completed or is removed from pending queue, 1686 * A work either has completed or is removed from pending queue,
1674 * decrement nr_in_flight of its cwq and handle workqueue flushing. 1687 * decrement nr_in_flight of its cwq and handle workqueue flushing.
@@ -1676,19 +1689,22 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1676 * CONTEXT: 1689 * CONTEXT:
1677 * spin_lock_irq(gcwq->lock). 1690 * spin_lock_irq(gcwq->lock).
1678 */ 1691 */
1679static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) 1692static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1693 bool delayed)
1680{ 1694{
1681 /* ignore uncolored works */ 1695 /* ignore uncolored works */
1682 if (color == WORK_NO_COLOR) 1696 if (color == WORK_NO_COLOR)
1683 return; 1697 return;
1684 1698
1685 cwq->nr_in_flight[color]--; 1699 cwq->nr_in_flight[color]--;
1686 cwq->nr_active--;
1687 1700
1688 if (!list_empty(&cwq->delayed_works)) { 1701 if (!delayed) {
1689 /* one down, submit a delayed one */ 1702 cwq->nr_active--;
1690 if (cwq->nr_active < cwq->max_active) 1703 if (!list_empty(&cwq->delayed_works)) {
1691 cwq_activate_first_delayed(cwq); 1704 /* one down, submit a delayed one */
1705 if (cwq->nr_active < cwq->max_active)
1706 cwq_activate_first_delayed(cwq);
1707 }
1692 } 1708 }
1693 1709
1694 /* is flush in progress and are we at the flushing tip? */ 1710 /* is flush in progress and are we at the flushing tip? */
@@ -1725,6 +1741,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
1725 * spin_lock_irq(gcwq->lock) which is released and regrabbed. 1741 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1726 */ 1742 */
1727static void process_one_work(struct worker *worker, struct work_struct *work) 1743static void process_one_work(struct worker *worker, struct work_struct *work)
1744__releases(&gcwq->lock)
1745__acquires(&gcwq->lock)
1728{ 1746{
1729 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 1747 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1730 struct global_cwq *gcwq = cwq->gcwq; 1748 struct global_cwq *gcwq = cwq->gcwq;
@@ -1823,7 +1841,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
1823 hlist_del_init(&worker->hentry); 1841 hlist_del_init(&worker->hentry);
1824 worker->current_work = NULL; 1842 worker->current_work = NULL;
1825 worker->current_cwq = NULL; 1843 worker->current_cwq = NULL;
1826 cwq_dec_nr_in_flight(cwq, work_color); 1844 cwq_dec_nr_in_flight(cwq, work_color, false);
1827} 1845}
1828 1846
1829/** 1847/**
@@ -2388,7 +2406,8 @@ static int try_to_grab_pending(struct work_struct *work)
2388 debug_work_deactivate(work); 2406 debug_work_deactivate(work);
2389 list_del_init(&work->entry); 2407 list_del_init(&work->entry);
2390 cwq_dec_nr_in_flight(get_work_cwq(work), 2408 cwq_dec_nr_in_flight(get_work_cwq(work),
2391 get_work_color(work)); 2409 get_work_color(work),
2410 *work_data_bits(work) & WORK_STRUCT_DELAYED);
2392 ret = 1; 2411 ret = 1;
2393 } 2412 }
2394 } 2413 }
@@ -2791,7 +2810,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
2791 if (IS_ERR(rescuer->task)) 2810 if (IS_ERR(rescuer->task))
2792 goto err; 2811 goto err;
2793 2812
2794 wq->rescuer = rescuer;
2795 rescuer->task->flags |= PF_THREAD_BOUND; 2813 rescuer->task->flags |= PF_THREAD_BOUND;
2796 wake_up_process(rescuer->task); 2814 wake_up_process(rescuer->task);
2797 } 2815 }
@@ -2833,6 +2851,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
2833{ 2851{
2834 unsigned int cpu; 2852 unsigned int cpu;
2835 2853
2854 wq->flags |= WQ_DYING;
2836 flush_workqueue(wq); 2855 flush_workqueue(wq);
2837 2856
2838 /* 2857 /*
@@ -2857,6 +2876,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
2857 if (wq->flags & WQ_RESCUER) { 2876 if (wq->flags & WQ_RESCUER) {
2858 kthread_stop(wq->rescuer->task); 2877 kthread_stop(wq->rescuer->task);
2859 free_mayday_mask(wq->mayday_mask); 2878 free_mayday_mask(wq->mayday_mask);
2879 kfree(wq->rescuer);
2860 } 2880 }
2861 2881
2862 free_cwqs(wq); 2882 free_cwqs(wq);
@@ -3239,6 +3259,8 @@ static int __cpuinit trustee_thread(void *__gcwq)
3239 * multiple times. To be used by cpu_callback. 3259 * multiple times. To be used by cpu_callback.
3240 */ 3260 */
3241static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) 3261static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
3262__releases(&gcwq->lock)
3263__acquires(&gcwq->lock)
3242{ 3264{
3243 if (!(gcwq->trustee_state == state || 3265 if (!(gcwq->trustee_state == state ||
3244 gcwq->trustee_state == TRUSTEE_DONE)) { 3266 gcwq->trustee_state == TRUSTEE_DONE)) {
@@ -3545,8 +3567,7 @@ static int __init init_workqueues(void)
3545 spin_lock_init(&gcwq->lock); 3567 spin_lock_init(&gcwq->lock);
3546 INIT_LIST_HEAD(&gcwq->worklist); 3568 INIT_LIST_HEAD(&gcwq->worklist);
3547 gcwq->cpu = cpu; 3569 gcwq->cpu = cpu;
3548 if (cpu == WORK_CPU_UNBOUND) 3570 gcwq->flags |= GCWQ_DISASSOCIATED;
3549 gcwq->flags |= GCWQ_DISASSOCIATED;
3550 3571
3551 INIT_LIST_HEAD(&gcwq->idle_list); 3572 INIT_LIST_HEAD(&gcwq->idle_list);
3552 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) 3573 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
@@ -3570,6 +3591,8 @@ static int __init init_workqueues(void)
3570 struct global_cwq *gcwq = get_gcwq(cpu); 3591 struct global_cwq *gcwq = get_gcwq(cpu);
3571 struct worker *worker; 3592 struct worker *worker;
3572 3593
3594 if (cpu != WORK_CPU_UNBOUND)
3595 gcwq->flags &= ~GCWQ_DISASSOCIATED;
3573 worker = create_worker(gcwq, true); 3596 worker = create_worker(gcwq, true);
3574 BUG_ON(!worker); 3597 BUG_ON(!worker);
3575 spin_lock_irq(&gcwq->lock); 3598 spin_lock_irq(&gcwq->lock);