aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/auditsc.c5
-rw-r--r--kernel/cgroup.c153
-rw-r--r--kernel/futex.c50
-rw-r--r--kernel/futex_compat.c9
-rw-r--r--kernel/irq/chip.c20
-rw-r--r--kernel/irq/spurious.c3
-rw-r--r--kernel/lockdep.c8
-rw-r--r--kernel/marker.c4
-rw-r--r--kernel/module.c7
-rw-r--r--kernel/power/disk.c4
-rw-r--r--kernel/power/snapshot.c42
-rw-r--r--kernel/printk.c2
-rw-r--r--kernel/rcupreempt.c233
-rw-r--r--kernel/sched.c21
-rw-r--r--kernel/sched_fair.c13
-rw-r--r--kernel/softirq.c1
-rw-r--r--kernel/softlockup.c13
-rw-r--r--kernel/time/tick-sched.c3
-rw-r--r--kernel/time/timer_list.c4
19 files changed, 457 insertions, 138 deletions
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index ac6d9b23b018..2087d6de67ea 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1000,9 +1000,10 @@ static int audit_log_single_execve_arg(struct audit_context *context,
1000 * for strings that are too long, we should not have created 1000 * for strings that are too long, we should not have created
1001 * any. 1001 * any.
1002 */ 1002 */
1003 if (unlikely((len = -1) || len > MAX_ARG_STRLEN - 1)) { 1003 if (unlikely((len == -1) || len > MAX_ARG_STRLEN - 1)) {
1004 WARN_ON(1); 1004 WARN_ON(1);
1005 send_sig(SIGKILL, current, 0); 1005 send_sig(SIGKILL, current, 0);
1006 return -1;
1006 } 1007 }
1007 1008
1008 /* walk the whole argument looking for non-ascii chars */ 1009 /* walk the whole argument looking for non-ascii chars */
@@ -1020,6 +1021,7 @@ static int audit_log_single_execve_arg(struct audit_context *context,
1020 if (ret) { 1021 if (ret) {
1021 WARN_ON(1); 1022 WARN_ON(1);
1022 send_sig(SIGKILL, current, 0); 1023 send_sig(SIGKILL, current, 0);
1024 return -1;
1023 } 1025 }
1024 buf[to_send] = '\0'; 1026 buf[to_send] = '\0';
1025 has_cntl = audit_string_contains_control(buf, to_send); 1027 has_cntl = audit_string_contains_control(buf, to_send);
@@ -1083,6 +1085,7 @@ static int audit_log_single_execve_arg(struct audit_context *context,
1083 if (ret) { 1085 if (ret) {
1084 WARN_ON(1); 1086 WARN_ON(1);
1085 send_sig(SIGKILL, current, 0); 1087 send_sig(SIGKILL, current, 0);
1088 return -1;
1086 } 1089 }
1087 buf[to_send] = '\0'; 1090 buf[to_send] = '\0';
1088 1091
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 4766bb65e4d9..d8abe996e009 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -113,9 +113,9 @@ static int root_count;
113#define dummytop (&rootnode.top_cgroup) 113#define dummytop (&rootnode.top_cgroup)
114 114
115/* This flag indicates whether tasks in the fork and exit paths should 115/* This flag indicates whether tasks in the fork and exit paths should
116 * take callback_mutex and check for fork/exit handlers to call. This 116 * check for fork/exit handlers to call. This avoids us having to do
117 * avoids us having to do extra work in the fork/exit path if none of the 117 * extra work in the fork/exit path if none of the subsystems need to
118 * subsystems need to be called. 118 * be called.
119 */ 119 */
120static int need_forkexit_callback; 120static int need_forkexit_callback;
121 121
@@ -307,7 +307,6 @@ static inline void put_css_set_taskexit(struct css_set *cg)
307 * template: location in which to build the desired set of subsystem 307 * template: location in which to build the desired set of subsystem
308 * state objects for the new cgroup group 308 * state objects for the new cgroup group
309 */ 309 */
310
311static struct css_set *find_existing_css_set( 310static struct css_set *find_existing_css_set(
312 struct css_set *oldcg, 311 struct css_set *oldcg,
313 struct cgroup *cgrp, 312 struct cgroup *cgrp,
@@ -320,7 +319,7 @@ static struct css_set *find_existing_css_set(
320 /* Built the set of subsystem state objects that we want to 319 /* Built the set of subsystem state objects that we want to
321 * see in the new css_set */ 320 * see in the new css_set */
322 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { 321 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
323 if (root->subsys_bits & (1ull << i)) { 322 if (root->subsys_bits & (1UL << i)) {
324 /* Subsystem is in this hierarchy. So we want 323 /* Subsystem is in this hierarchy. So we want
325 * the subsystem state from the new 324 * the subsystem state from the new
326 * cgroup */ 325 * cgroup */
@@ -354,7 +353,6 @@ static struct css_set *find_existing_css_set(
354 * and chains them on tmp through their cgrp_link_list fields. Returns 0 on 353 * and chains them on tmp through their cgrp_link_list fields. Returns 0 on
355 * success or a negative error 354 * success or a negative error
356 */ 355 */
357
358static int allocate_cg_links(int count, struct list_head *tmp) 356static int allocate_cg_links(int count, struct list_head *tmp)
359{ 357{
360 struct cg_cgroup_link *link; 358 struct cg_cgroup_link *link;
@@ -396,7 +394,6 @@ static void free_cg_links(struct list_head *tmp)
396 * substituted into the appropriate hierarchy. Must be called with 394 * substituted into the appropriate hierarchy. Must be called with
397 * cgroup_mutex held 395 * cgroup_mutex held
398 */ 396 */
399
400static struct css_set *find_css_set( 397static struct css_set *find_css_set(
401 struct css_set *oldcg, struct cgroup *cgrp) 398 struct css_set *oldcg, struct cgroup *cgrp)
402{ 399{
@@ -473,7 +470,6 @@ static struct css_set *find_css_set(
473 /* Link this cgroup group into the list */ 470 /* Link this cgroup group into the list */
474 list_add(&res->list, &init_css_set.list); 471 list_add(&res->list, &init_css_set.list);
475 css_set_count++; 472 css_set_count++;
476 INIT_LIST_HEAD(&res->tasks);
477 write_unlock(&css_set_lock); 473 write_unlock(&css_set_lock);
478 474
479 return res; 475 return res;
@@ -507,8 +503,8 @@ static struct css_set *find_css_set(
507 * critical pieces of code here. The exception occurs on cgroup_exit(), 503 * critical pieces of code here. The exception occurs on cgroup_exit(),
508 * when a task in a notify_on_release cgroup exits. Then cgroup_mutex 504 * when a task in a notify_on_release cgroup exits. Then cgroup_mutex
509 * is taken, and if the cgroup count is zero, a usermode call made 505 * is taken, and if the cgroup count is zero, a usermode call made
510 * to /sbin/cgroup_release_agent with the name of the cgroup (path 506 * to the release agent with the name of the cgroup (path relative to
511 * relative to the root of cgroup file system) as the argument. 507 * the root of cgroup file system) as the argument.
512 * 508 *
513 * A cgroup can only be deleted if both its 'count' of using tasks 509 * A cgroup can only be deleted if both its 'count' of using tasks
514 * is zero, and its list of 'children' cgroups is empty. Since all 510 * is zero, and its list of 'children' cgroups is empty. Since all
@@ -521,7 +517,7 @@ static struct css_set *find_css_set(
521 * 517 *
522 * The need for this exception arises from the action of 518 * The need for this exception arises from the action of
523 * cgroup_attach_task(), which overwrites one tasks cgroup pointer with 519 * cgroup_attach_task(), which overwrites one tasks cgroup pointer with
524 * another. It does so using cgroup_mutexe, however there are 520 * another. It does so using cgroup_mutex, however there are
525 * several performance critical places that need to reference 521 * several performance critical places that need to reference
526 * task->cgroup without the expense of grabbing a system global 522 * task->cgroup without the expense of grabbing a system global
527 * mutex. Therefore except as noted below, when dereferencing or, as 523 * mutex. Therefore except as noted below, when dereferencing or, as
@@ -537,7 +533,6 @@ static struct css_set *find_css_set(
537 * cgroup_lock - lock out any changes to cgroup structures 533 * cgroup_lock - lock out any changes to cgroup structures
538 * 534 *
539 */ 535 */
540
541void cgroup_lock(void) 536void cgroup_lock(void)
542{ 537{
543 mutex_lock(&cgroup_mutex); 538 mutex_lock(&cgroup_mutex);
@@ -548,7 +543,6 @@ void cgroup_lock(void)
548 * 543 *
549 * Undo the lock taken in a previous cgroup_lock() call. 544 * Undo the lock taken in a previous cgroup_lock() call.
550 */ 545 */
551
552void cgroup_unlock(void) 546void cgroup_unlock(void)
553{ 547{
554 mutex_unlock(&cgroup_mutex); 548 mutex_unlock(&cgroup_mutex);
@@ -590,7 +584,6 @@ static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
590 * Call subsys's pre_destroy handler. 584 * Call subsys's pre_destroy handler.
591 * This is called before css refcnt check. 585 * This is called before css refcnt check.
592 */ 586 */
593
594static void cgroup_call_pre_destroy(struct cgroup *cgrp) 587static void cgroup_call_pre_destroy(struct cgroup *cgrp)
595{ 588{
596 struct cgroup_subsys *ss; 589 struct cgroup_subsys *ss;
@@ -600,7 +593,6 @@ static void cgroup_call_pre_destroy(struct cgroup *cgrp)
600 return; 593 return;
601} 594}
602 595
603
604static void cgroup_diput(struct dentry *dentry, struct inode *inode) 596static void cgroup_diput(struct dentry *dentry, struct inode *inode)
605{ 597{
606 /* is dentry a directory ? if so, kfree() associated cgroup */ 598 /* is dentry a directory ? if so, kfree() associated cgroup */
@@ -696,7 +688,7 @@ static int rebind_subsystems(struct cgroupfs_root *root,
696 added_bits = final_bits & ~root->actual_subsys_bits; 688 added_bits = final_bits & ~root->actual_subsys_bits;
697 /* Check that any added subsystems are currently free */ 689 /* Check that any added subsystems are currently free */
698 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { 690 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
699 unsigned long long bit = 1ull << i; 691 unsigned long bit = 1UL << i;
700 struct cgroup_subsys *ss = subsys[i]; 692 struct cgroup_subsys *ss = subsys[i];
701 if (!(bit & added_bits)) 693 if (!(bit & added_bits))
702 continue; 694 continue;
@@ -927,7 +919,6 @@ static int cgroup_get_rootdir(struct super_block *sb)
927 if (!inode) 919 if (!inode)
928 return -ENOMEM; 920 return -ENOMEM;
929 921
930 inode->i_op = &simple_dir_inode_operations;
931 inode->i_fop = &simple_dir_operations; 922 inode->i_fop = &simple_dir_operations;
932 inode->i_op = &cgroup_dir_inode_operations; 923 inode->i_op = &cgroup_dir_inode_operations;
933 /* directories start off with i_nlink == 2 (for "." entry) */ 924 /* directories start off with i_nlink == 2 (for "." entry) */
@@ -961,8 +952,11 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
961 } 952 }
962 953
963 root = kzalloc(sizeof(*root), GFP_KERNEL); 954 root = kzalloc(sizeof(*root), GFP_KERNEL);
964 if (!root) 955 if (!root) {
956 if (opts.release_agent)
957 kfree(opts.release_agent);
965 return -ENOMEM; 958 return -ENOMEM;
959 }
966 960
967 init_cgroup_root(root); 961 init_cgroup_root(root);
968 root->subsys_bits = opts.subsys_bits; 962 root->subsys_bits = opts.subsys_bits;
@@ -1129,8 +1123,13 @@ static inline struct cftype *__d_cft(struct dentry *dentry)
1129 return dentry->d_fsdata; 1123 return dentry->d_fsdata;
1130} 1124}
1131 1125
1132/* 1126/**
1133 * Called with cgroup_mutex held. Writes path of cgroup into buf. 1127 * cgroup_path - generate the path of a cgroup
1128 * @cgrp: the cgroup in question
1129 * @buf: the buffer to write the path into
1130 * @buflen: the length of the buffer
1131 *
1132 * Called with cgroup_mutex held. Writes path of cgroup into buf.
1134 * Returns 0 on success, -errno on error. 1133 * Returns 0 on success, -errno on error.
1135 */ 1134 */
1136int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen) 1135int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
@@ -1188,11 +1187,13 @@ static void get_first_subsys(const struct cgroup *cgrp,
1188 *subsys_id = test_ss->subsys_id; 1187 *subsys_id = test_ss->subsys_id;
1189} 1188}
1190 1189
1191/* 1190/**
1192 * Attach task 'tsk' to cgroup 'cgrp' 1191 * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp'
1192 * @cgrp: the cgroup the task is attaching to
1193 * @tsk: the task to be attached
1193 * 1194 *
1194 * Call holding cgroup_mutex. May take task_lock of 1195 * Call holding cgroup_mutex. May take task_lock of
1195 * the task 'pid' during call. 1196 * the task 'tsk' during call.
1196 */ 1197 */
1197int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) 1198int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1198{ 1199{
@@ -1293,7 +1294,6 @@ static int attach_task_by_pid(struct cgroup *cgrp, char *pidbuf)
1293} 1294}
1294 1295
1295/* The various types of files and directories in a cgroup file system */ 1296/* The various types of files and directories in a cgroup file system */
1296
1297enum cgroup_filetype { 1297enum cgroup_filetype {
1298 FILE_ROOT, 1298 FILE_ROOT,
1299 FILE_DIR, 1299 FILE_DIR,
@@ -1584,12 +1584,11 @@ static int cgroup_create_file(struct dentry *dentry, int mode,
1584} 1584}
1585 1585
1586/* 1586/*
1587 * cgroup_create_dir - create a directory for an object. 1587 * cgroup_create_dir - create a directory for an object.
1588 * cgrp: the cgroup we create the directory for. 1588 * @cgrp: the cgroup we create the directory for. It must have a valid
1589 * It must have a valid ->parent field 1589 * ->parent field. And we are going to fill its ->dentry field.
1590 * And we are going to fill its ->dentry field. 1590 * @dentry: dentry of the new cgroup
1591 * dentry: dentry of the new cgroup 1591 * @mode: mode to set on new directory.
1592 * mode: mode to set on new directory.
1593 */ 1592 */
1594static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry, 1593static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
1595 int mode) 1594 int mode)
@@ -1651,8 +1650,12 @@ int cgroup_add_files(struct cgroup *cgrp,
1651 return 0; 1650 return 0;
1652} 1651}
1653 1652
1654/* Count the number of tasks in a cgroup. */ 1653/**
1655 1654 * cgroup_task_count - count the number of tasks in a cgroup.
1655 * @cgrp: the cgroup in question
1656 *
1657 * Return the number of tasks in the cgroup.
1658 */
1656int cgroup_task_count(const struct cgroup *cgrp) 1659int cgroup_task_count(const struct cgroup *cgrp)
1657{ 1660{
1658 int count = 0; 1661 int count = 0;
@@ -1962,12 +1965,13 @@ static int pid_array_load(pid_t *pidarray, int npids, struct cgroup *cgrp)
1962} 1965}
1963 1966
1964/** 1967/**
1965 * Build and fill cgroupstats so that taskstats can export it to user 1968 * cgroupstats_build - build and fill cgroupstats
1966 * space.
1967 *
1968 * @stats: cgroupstats to fill information into 1969 * @stats: cgroupstats to fill information into
1969 * @dentry: A dentry entry belonging to the cgroup for which stats have 1970 * @dentry: A dentry entry belonging to the cgroup for which stats have
1970 * been requested. 1971 * been requested.
1972 *
1973 * Build and fill cgroupstats so that taskstats can export it to user
1974 * space.
1971 */ 1975 */
1972int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) 1976int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
1973{ 1977{
@@ -2199,14 +2203,13 @@ static void init_cgroup_css(struct cgroup_subsys_state *css,
2199} 2203}
2200 2204
2201/* 2205/*
2202 * cgroup_create - create a cgroup 2206 * cgroup_create - create a cgroup
2203 * parent: cgroup that will be parent of the new cgroup. 2207 * @parent: cgroup that will be parent of the new cgroup
2204 * name: name of the new cgroup. Will be strcpy'ed. 2208 * @dentry: dentry of the new cgroup
2205 * mode: mode to set on new inode 2209 * @mode: mode to set on new inode
2206 * 2210 *
2207 * Must be called with the mutex on the parent inode held 2211 * Must be called with the mutex on the parent inode held
2208 */ 2212 */
2209
2210static long cgroup_create(struct cgroup *parent, struct dentry *dentry, 2213static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
2211 int mode) 2214 int mode)
2212{ 2215{
@@ -2349,13 +2352,12 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
2349 parent = cgrp->parent; 2352 parent = cgrp->parent;
2350 root = cgrp->root; 2353 root = cgrp->root;
2351 sb = root->sb; 2354 sb = root->sb;
2355
2352 /* 2356 /*
2353 * Call pre_destroy handlers of subsys 2357 * Call pre_destroy handlers of subsys. Notify subsystems
2358 * that rmdir() request comes.
2354 */ 2359 */
2355 cgroup_call_pre_destroy(cgrp); 2360 cgroup_call_pre_destroy(cgrp);
2356 /*
2357 * Notify subsyses that rmdir() request comes.
2358 */
2359 2361
2360 if (cgroup_has_css_refs(cgrp)) { 2362 if (cgroup_has_css_refs(cgrp)) {
2361 mutex_unlock(&cgroup_mutex); 2363 mutex_unlock(&cgroup_mutex);
@@ -2431,8 +2433,10 @@ static void cgroup_init_subsys(struct cgroup_subsys *ss)
2431} 2433}
2432 2434
2433/** 2435/**
2434 * cgroup_init_early - initialize cgroups at system boot, and 2436 * cgroup_init_early - cgroup initialization at system boot
2435 * initialize any subsystems that request early init. 2437 *
2438 * Initialize cgroups at system boot, and initialize any
2439 * subsystems that request early init.
2436 */ 2440 */
2437int __init cgroup_init_early(void) 2441int __init cgroup_init_early(void)
2438{ 2442{
@@ -2474,8 +2478,10 @@ int __init cgroup_init_early(void)
2474} 2478}
2475 2479
2476/** 2480/**
2477 * cgroup_init - register cgroup filesystem and /proc file, and 2481 * cgroup_init - cgroup initialization
2478 * initialize any subsystems that didn't request early init. 2482 *
2483 * Register cgroup filesystem and /proc file, and initialize
2484 * any subsystems that didn't request early init.
2479 */ 2485 */
2480int __init cgroup_init(void) 2486int __init cgroup_init(void)
2481{ 2487{
@@ -2618,7 +2624,7 @@ static struct file_operations proc_cgroupstats_operations = {
2618 2624
2619/** 2625/**
2620 * cgroup_fork - attach newly forked task to its parents cgroup. 2626 * cgroup_fork - attach newly forked task to its parents cgroup.
2621 * @tsk: pointer to task_struct of forking parent process. 2627 * @child: pointer to task_struct of forking parent process.
2622 * 2628 *
2623 * Description: A task inherits its parent's cgroup at fork(). 2629 * Description: A task inherits its parent's cgroup at fork().
2624 * 2630 *
@@ -2642,9 +2648,12 @@ void cgroup_fork(struct task_struct *child)
2642} 2648}
2643 2649
2644/** 2650/**
2645 * cgroup_fork_callbacks - called on a new task very soon before 2651 * cgroup_fork_callbacks - run fork callbacks
2646 * adding it to the tasklist. No need to take any locks since no-one 2652 * @child: the new task
2647 * can be operating on this task 2653 *
2654 * Called on a new task very soon before adding it to the
2655 * tasklist. No need to take any locks since no-one can
2656 * be operating on this task.
2648 */ 2657 */
2649void cgroup_fork_callbacks(struct task_struct *child) 2658void cgroup_fork_callbacks(struct task_struct *child)
2650{ 2659{
@@ -2659,11 +2668,14 @@ void cgroup_fork_callbacks(struct task_struct *child)
2659} 2668}
2660 2669
2661/** 2670/**
2662 * cgroup_post_fork - called on a new task after adding it to the 2671 * cgroup_post_fork - called on a new task after adding it to the task list
2663 * task list. Adds the task to the list running through its css_set 2672 * @child: the task in question
2664 * if necessary. Has to be after the task is visible on the task list 2673 *
2665 * in case we race with the first call to cgroup_iter_start() - to 2674 * Adds the task to the list running through its css_set if necessary.
2666 * guarantee that the new task ends up on its list. */ 2675 * Has to be after the task is visible on the task list in case we race
2676 * with the first call to cgroup_iter_start() - to guarantee that the
2677 * new task ends up on its list.
2678 */
2667void cgroup_post_fork(struct task_struct *child) 2679void cgroup_post_fork(struct task_struct *child)
2668{ 2680{
2669 if (use_task_css_set_links) { 2681 if (use_task_css_set_links) {
@@ -2676,6 +2688,7 @@ void cgroup_post_fork(struct task_struct *child)
2676/** 2688/**
2677 * cgroup_exit - detach cgroup from exiting task 2689 * cgroup_exit - detach cgroup from exiting task
2678 * @tsk: pointer to task_struct of exiting process 2690 * @tsk: pointer to task_struct of exiting process
2691 * @run_callback: run exit callbacks?
2679 * 2692 *
2680 * Description: Detach cgroup from @tsk and release it. 2693 * Description: Detach cgroup from @tsk and release it.
2681 * 2694 *
@@ -2706,7 +2719,6 @@ void cgroup_post_fork(struct task_struct *child)
2706 * top_cgroup isn't going away, and either task has PF_EXITING set, 2719 * top_cgroup isn't going away, and either task has PF_EXITING set,
2707 * which wards off any cgroup_attach_task() attempts, or task is a failed 2720 * which wards off any cgroup_attach_task() attempts, or task is a failed
2708 * fork, never visible to cgroup_attach_task. 2721 * fork, never visible to cgroup_attach_task.
2709 *
2710 */ 2722 */
2711void cgroup_exit(struct task_struct *tsk, int run_callbacks) 2723void cgroup_exit(struct task_struct *tsk, int run_callbacks)
2712{ 2724{
@@ -2743,9 +2755,13 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
2743} 2755}
2744 2756
2745/** 2757/**
2746 * cgroup_clone - duplicate the current cgroup in the hierarchy 2758 * cgroup_clone - clone the cgroup the given subsystem is attached to
2747 * that the given subsystem is attached to, and move this task into 2759 * @tsk: the task to be moved
2748 * the new child 2760 * @subsys: the given subsystem
2761 *
2762 * Duplicate the current cgroup in the hierarchy that the given
2763 * subsystem is attached to, and move this task into the new
2764 * child.
2749 */ 2765 */
2750int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys) 2766int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys)
2751{ 2767{
@@ -2858,9 +2874,12 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys)
2858 return ret; 2874 return ret;
2859} 2875}
2860 2876
2861/* 2877/**
2862 * See if "cgrp" is a descendant of the current task's cgroup in 2878 * cgroup_is_descendant - see if @cgrp is a descendant of current task's cgrp
2863 * the appropriate hierarchy 2879 * @cgrp: the cgroup in question
2880 *
2881 * See if @cgrp is a descendant of the current task's cgroup in
2882 * the appropriate hierarchy.
2864 * 2883 *
2865 * If we are sending in dummytop, then presumably we are creating 2884 * If we are sending in dummytop, then presumably we are creating
2866 * the top cgroup in the subsystem. 2885 * the top cgroup in the subsystem.
@@ -2939,9 +2958,7 @@ void __css_put(struct cgroup_subsys_state *css)
2939 * release agent task. We don't bother to wait because the caller of 2958 * release agent task. We don't bother to wait because the caller of
2940 * this routine has no use for the exit status of the release agent 2959 * this routine has no use for the exit status of the release agent
2941 * task, so no sense holding our caller up for that. 2960 * task, so no sense holding our caller up for that.
2942 *
2943 */ 2961 */
2944
2945static void cgroup_release_agent(struct work_struct *work) 2962static void cgroup_release_agent(struct work_struct *work)
2946{ 2963{
2947 BUG_ON(work != &release_agent_work); 2964 BUG_ON(work != &release_agent_work);
diff --git a/kernel/futex.c b/kernel/futex.c
index 221f2128a437..06968cd79200 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -60,6 +60,8 @@
60 60
61#include "rtmutex_common.h" 61#include "rtmutex_common.h"
62 62
63int __read_mostly futex_cmpxchg_enabled;
64
63#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) 65#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
64 66
65/* 67/*
@@ -469,6 +471,8 @@ void exit_pi_state_list(struct task_struct *curr)
469 struct futex_hash_bucket *hb; 471 struct futex_hash_bucket *hb;
470 union futex_key key; 472 union futex_key key;
471 473
474 if (!futex_cmpxchg_enabled)
475 return;
472 /* 476 /*
473 * We are a ZOMBIE and nobody can enqueue itself on 477 * We are a ZOMBIE and nobody can enqueue itself on
474 * pi_state_list anymore, but we have to be careful 478 * pi_state_list anymore, but we have to be careful
@@ -1870,6 +1874,8 @@ asmlinkage long
1870sys_set_robust_list(struct robust_list_head __user *head, 1874sys_set_robust_list(struct robust_list_head __user *head,
1871 size_t len) 1875 size_t len)
1872{ 1876{
1877 if (!futex_cmpxchg_enabled)
1878 return -ENOSYS;
1873 /* 1879 /*
1874 * The kernel knows only one size for now: 1880 * The kernel knows only one size for now:
1875 */ 1881 */
@@ -1894,6 +1900,9 @@ sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr,
1894 struct robust_list_head __user *head; 1900 struct robust_list_head __user *head;
1895 unsigned long ret; 1901 unsigned long ret;
1896 1902
1903 if (!futex_cmpxchg_enabled)
1904 return -ENOSYS;
1905
1897 if (!pid) 1906 if (!pid)
1898 head = current->robust_list; 1907 head = current->robust_list;
1899 else { 1908 else {
@@ -1997,6 +2006,9 @@ void exit_robust_list(struct task_struct *curr)
1997 unsigned long futex_offset; 2006 unsigned long futex_offset;
1998 int rc; 2007 int rc;
1999 2008
2009 if (!futex_cmpxchg_enabled)
2010 return;
2011
2000 /* 2012 /*
2001 * Fetch the list head (which was registered earlier, via 2013 * Fetch the list head (which was registered earlier, via
2002 * sys_set_robust_list()): 2014 * sys_set_robust_list()):
@@ -2051,7 +2063,7 @@ void exit_robust_list(struct task_struct *curr)
2051long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, 2063long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2052 u32 __user *uaddr2, u32 val2, u32 val3) 2064 u32 __user *uaddr2, u32 val2, u32 val3)
2053{ 2065{
2054 int ret; 2066 int ret = -ENOSYS;
2055 int cmd = op & FUTEX_CMD_MASK; 2067 int cmd = op & FUTEX_CMD_MASK;
2056 struct rw_semaphore *fshared = NULL; 2068 struct rw_semaphore *fshared = NULL;
2057 2069
@@ -2083,13 +2095,16 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2083 ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3); 2095 ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
2084 break; 2096 break;
2085 case FUTEX_LOCK_PI: 2097 case FUTEX_LOCK_PI:
2086 ret = futex_lock_pi(uaddr, fshared, val, timeout, 0); 2098 if (futex_cmpxchg_enabled)
2099 ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
2087 break; 2100 break;
2088 case FUTEX_UNLOCK_PI: 2101 case FUTEX_UNLOCK_PI:
2089 ret = futex_unlock_pi(uaddr, fshared); 2102 if (futex_cmpxchg_enabled)
2103 ret = futex_unlock_pi(uaddr, fshared);
2090 break; 2104 break;
2091 case FUTEX_TRYLOCK_PI: 2105 case FUTEX_TRYLOCK_PI:
2092 ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1); 2106 if (futex_cmpxchg_enabled)
2107 ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
2093 break; 2108 break;
2094 default: 2109 default:
2095 ret = -ENOSYS; 2110 ret = -ENOSYS;
@@ -2145,8 +2160,29 @@ static struct file_system_type futex_fs_type = {
2145 2160
2146static int __init init(void) 2161static int __init init(void)
2147{ 2162{
2148 int i = register_filesystem(&futex_fs_type); 2163 u32 curval;
2164 int i;
2165
2166 /*
2167 * This will fail and we want it. Some arch implementations do
2168 * runtime detection of the futex_atomic_cmpxchg_inatomic()
2169 * functionality. We want to know that before we call in any
2170 * of the complex code paths. Also we want to prevent
2171 * registration of robust lists in that case. NULL is
2172 * guaranteed to fault and we get -EFAULT on functional
2173 * implementation, the non functional ones will return
2174 * -ENOSYS.
2175 */
2176 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
2177 if (curval == -EFAULT)
2178 futex_cmpxchg_enabled = 1;
2149 2179
2180 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2181 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
2182 spin_lock_init(&futex_queues[i].lock);
2183 }
2184
2185 i = register_filesystem(&futex_fs_type);
2150 if (i) 2186 if (i)
2151 return i; 2187 return i;
2152 2188
@@ -2156,10 +2192,6 @@ static int __init init(void)
2156 return PTR_ERR(futex_mnt); 2192 return PTR_ERR(futex_mnt);
2157 } 2193 }
2158 2194
2159 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2160 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
2161 spin_lock_init(&futex_queues[i].lock);
2162 }
2163 return 0; 2195 return 0;
2164} 2196}
2165__initcall(init); 2197__initcall(init);
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 7d5e4b016f39..ff90f049f8f6 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -54,6 +54,9 @@ void compat_exit_robust_list(struct task_struct *curr)
54 compat_long_t futex_offset; 54 compat_long_t futex_offset;
55 int rc; 55 int rc;
56 56
57 if (!futex_cmpxchg_enabled)
58 return;
59
57 /* 60 /*
58 * Fetch the list head (which was registered earlier, via 61 * Fetch the list head (which was registered earlier, via
59 * sys_set_robust_list()): 62 * sys_set_robust_list()):
@@ -115,6 +118,9 @@ asmlinkage long
115compat_sys_set_robust_list(struct compat_robust_list_head __user *head, 118compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
116 compat_size_t len) 119 compat_size_t len)
117{ 120{
121 if (!futex_cmpxchg_enabled)
122 return -ENOSYS;
123
118 if (unlikely(len != sizeof(*head))) 124 if (unlikely(len != sizeof(*head)))
119 return -EINVAL; 125 return -EINVAL;
120 126
@@ -130,6 +136,9 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
130 struct compat_robust_list_head __user *head; 136 struct compat_robust_list_head __user *head;
131 unsigned long ret; 137 unsigned long ret;
132 138
139 if (!futex_cmpxchg_enabled)
140 return -ENOSYS;
141
133 if (!pid) 142 if (!pid)
134 head = current->compat_robust_list; 143 head = current->compat_robust_list;
135 else { 144 else {
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index cc54c6276356..fdb3fbe2b0c4 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -246,6 +246,17 @@ static unsigned int default_startup(unsigned int irq)
246} 246}
247 247
248/* 248/*
249 * default shutdown function
250 */
251static void default_shutdown(unsigned int irq)
252{
253 struct irq_desc *desc = irq_desc + irq;
254
255 desc->chip->mask(irq);
256 desc->status |= IRQ_MASKED;
257}
258
259/*
249 * Fixup enable/disable function pointers 260 * Fixup enable/disable function pointers
250 */ 261 */
251void irq_chip_set_defaults(struct irq_chip *chip) 262void irq_chip_set_defaults(struct irq_chip *chip)
@@ -256,8 +267,15 @@ void irq_chip_set_defaults(struct irq_chip *chip)
256 chip->disable = default_disable; 267 chip->disable = default_disable;
257 if (!chip->startup) 268 if (!chip->startup)
258 chip->startup = default_startup; 269 chip->startup = default_startup;
270 /*
271 * We use chip->disable, when the user provided its own. When
272 * we have default_disable set for chip->disable, then we need
273 * to use default_shutdown, otherwise the irq line is not
274 * disabled on free_irq():
275 */
259 if (!chip->shutdown) 276 if (!chip->shutdown)
260 chip->shutdown = chip->disable; 277 chip->shutdown = chip->disable != default_disable ?
278 chip->disable : default_shutdown;
261 if (!chip->name) 279 if (!chip->name)
262 chip->name = chip->typename; 280 chip->name = chip->typename;
263 if (!chip->end) 281 if (!chip->end)
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index a6b2bc831dd0..088dabbf2d6a 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -6,6 +6,7 @@
6 * This file contains spurious interrupt handling. 6 * This file contains spurious interrupt handling.
7 */ 7 */
8 8
9#include <linux/jiffies.h>
9#include <linux/irq.h> 10#include <linux/irq.h>
10#include <linux/module.h> 11#include <linux/module.h>
11#include <linux/kallsyms.h> 12#include <linux/kallsyms.h>
@@ -179,7 +180,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
179 * otherwise the couter becomes a doomsday timer for otherwise 180 * otherwise the couter becomes a doomsday timer for otherwise
180 * working systems 181 * working systems
181 */ 182 */
182 if (jiffies - desc->last_unhandled > HZ/10) 183 if (time_after(jiffies, desc->last_unhandled + HZ/10))
183 desc->irqs_unhandled = 1; 184 desc->irqs_unhandled = 1;
184 else 185 else
185 desc->irqs_unhandled++; 186 desc->irqs_unhandled++;
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 3574379f4d62..81a4e4a3f087 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -779,6 +779,10 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
779 * parallel walking of the hash-list safe: 779 * parallel walking of the hash-list safe:
780 */ 780 */
781 list_add_tail_rcu(&class->hash_entry, hash_head); 781 list_add_tail_rcu(&class->hash_entry, hash_head);
782 /*
783 * Add it to the global list of classes:
784 */
785 list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
782 786
783 if (verbose(class)) { 787 if (verbose(class)) {
784 graph_unlock(); 788 graph_unlock();
@@ -2282,10 +2286,6 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2282 return 0; 2286 return 0;
2283 break; 2287 break;
2284 case LOCK_USED: 2288 case LOCK_USED:
2285 /*
2286 * Add it to the global list of classes:
2287 */
2288 list_add_tail_rcu(&this->class->lock_entry, &all_lock_classes);
2289 debug_atomic_dec(&nr_unused_locks); 2289 debug_atomic_dec(&nr_unused_locks);
2290 break; 2290 break;
2291 default: 2291 default:
diff --git a/kernel/marker.c b/kernel/marker.c
index c4c2cd8b61f5..50effc01d9a2 100644
--- a/kernel/marker.c
+++ b/kernel/marker.c
@@ -61,8 +61,8 @@ struct marker_entry {
61 int refcount; /* Number of times armed. 0 if disarmed. */ 61 int refcount; /* Number of times armed. 0 if disarmed. */
62 struct rcu_head rcu; 62 struct rcu_head rcu;
63 void *oldptr; 63 void *oldptr;
64 char rcu_pending:1; 64 unsigned char rcu_pending:1;
65 char ptype:1; 65 unsigned char ptype:1;
66 char name[0]; /* Contains name'\0'format'\0' */ 66 char name[0]; /* Contains name'\0'format'\0' */
67}; 67};
68 68
diff --git a/kernel/module.c b/kernel/module.c
index 92595bad3812..901cd6ac2f11 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -987,12 +987,11 @@ static unsigned long resolve_symbol(Elf_Shdr *sechdrs,
987 return ret; 987 return ret;
988} 988}
989 989
990
991/* 990/*
992 * /sys/module/foo/sections stuff 991 * /sys/module/foo/sections stuff
993 * J. Corbet <corbet@lwn.net> 992 * J. Corbet <corbet@lwn.net>
994 */ 993 */
995#ifdef CONFIG_KALLSYMS 994#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
996static ssize_t module_sect_show(struct module_attribute *mattr, 995static ssize_t module_sect_show(struct module_attribute *mattr,
997 struct module *mod, char *buf) 996 struct module *mod, char *buf)
998{ 997{
@@ -1188,7 +1187,7 @@ static inline void add_notes_attrs(struct module *mod, unsigned int nsect,
1188static inline void remove_notes_attrs(struct module *mod) 1187static inline void remove_notes_attrs(struct module *mod)
1189{ 1188{
1190} 1189}
1191#endif /* CONFIG_KALLSYMS */ 1190#endif
1192 1191
1193#ifdef CONFIG_SYSFS 1192#ifdef CONFIG_SYSFS
1194int module_add_modinfo_attrs(struct module *mod) 1193int module_add_modinfo_attrs(struct module *mod)
@@ -1231,9 +1230,7 @@ void module_remove_modinfo_attrs(struct module *mod)
1231 } 1230 }
1232 kfree(mod->modinfo_attrs); 1231 kfree(mod->modinfo_attrs);
1233} 1232}
1234#endif
1235 1233
1236#ifdef CONFIG_SYSFS
1237int mod_sysfs_init(struct module *mod) 1234int mod_sysfs_init(struct module *mod)
1238{ 1235{
1239 int err; 1236 int err;
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 859a8e59773a..14a656cdc652 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -391,7 +391,7 @@ int hibernation_platform_enter(void)
391 goto Close; 391 goto Close;
392 392
393 suspend_console(); 393 suspend_console();
394 error = device_suspend(PMSG_SUSPEND); 394 error = device_suspend(PMSG_HIBERNATE);
395 if (error) 395 if (error)
396 goto Resume_console; 396 goto Resume_console;
397 397
@@ -404,7 +404,7 @@ int hibernation_platform_enter(void)
404 goto Finish; 404 goto Finish;
405 405
406 local_irq_disable(); 406 local_irq_disable();
407 error = device_power_down(PMSG_SUSPEND); 407 error = device_power_down(PMSG_HIBERNATE);
408 if (!error) { 408 if (!error) {
409 hibernation_ops->enter(); 409 hibernation_ops->enter();
410 /* We should never get here */ 410 /* We should never get here */
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 95250d7c8d91..72a020cabb4c 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -875,8 +875,8 @@ static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; }
875#endif /* CONFIG_HIGHMEM */ 875#endif /* CONFIG_HIGHMEM */
876 876
877/** 877/**
878 * saveable - Determine whether a non-highmem page should be included in 878 * saveable_page - Determine whether a non-highmem page should be included
879 * the suspend image. 879 * in the suspend image.
880 * 880 *
881 * We should save the page if it isn't Nosave, and is not in the range 881 * We should save the page if it isn't Nosave, and is not in the range
882 * of pages statically defined as 'unsaveable', and it isn't a part of 882 * of pages statically defined as 'unsaveable', and it isn't a part of
@@ -897,7 +897,8 @@ static struct page *saveable_page(unsigned long pfn)
897 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page)) 897 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
898 return NULL; 898 return NULL;
899 899
900 if (PageReserved(page) && pfn_is_nosave(pfn)) 900 if (PageReserved(page)
901 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
901 return NULL; 902 return NULL;
902 903
903 return page; 904 return page;
@@ -938,6 +939,25 @@ static inline void do_copy_page(long *dst, long *src)
938 *dst++ = *src++; 939 *dst++ = *src++;
939} 940}
940 941
942
943/**
944 * safe_copy_page - check if the page we are going to copy is marked as
945 * present in the kernel page tables (this always is the case if
946 * CONFIG_DEBUG_PAGEALLOC is not set and in that case
947 * kernel_page_present() always returns 'true').
948 */
949static void safe_copy_page(void *dst, struct page *s_page)
950{
951 if (kernel_page_present(s_page)) {
952 do_copy_page(dst, page_address(s_page));
953 } else {
954 kernel_map_pages(s_page, 1, 1);
955 do_copy_page(dst, page_address(s_page));
956 kernel_map_pages(s_page, 1, 0);
957 }
958}
959
960
941#ifdef CONFIG_HIGHMEM 961#ifdef CONFIG_HIGHMEM
942static inline struct page * 962static inline struct page *
943page_is_saveable(struct zone *zone, unsigned long pfn) 963page_is_saveable(struct zone *zone, unsigned long pfn)
@@ -946,8 +966,7 @@ page_is_saveable(struct zone *zone, unsigned long pfn)
946 saveable_highmem_page(pfn) : saveable_page(pfn); 966 saveable_highmem_page(pfn) : saveable_page(pfn);
947} 967}
948 968
949static inline void 969static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
950copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
951{ 970{
952 struct page *s_page, *d_page; 971 struct page *s_page, *d_page;
953 void *src, *dst; 972 void *src, *dst;
@@ -961,29 +980,26 @@ copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
961 kunmap_atomic(src, KM_USER0); 980 kunmap_atomic(src, KM_USER0);
962 kunmap_atomic(dst, KM_USER1); 981 kunmap_atomic(dst, KM_USER1);
963 } else { 982 } else {
964 src = page_address(s_page);
965 if (PageHighMem(d_page)) { 983 if (PageHighMem(d_page)) {
966 /* Page pointed to by src may contain some kernel 984 /* Page pointed to by src may contain some kernel
967 * data modified by kmap_atomic() 985 * data modified by kmap_atomic()
968 */ 986 */
969 do_copy_page(buffer, src); 987 safe_copy_page(buffer, s_page);
970 dst = kmap_atomic(pfn_to_page(dst_pfn), KM_USER0); 988 dst = kmap_atomic(pfn_to_page(dst_pfn), KM_USER0);
971 memcpy(dst, buffer, PAGE_SIZE); 989 memcpy(dst, buffer, PAGE_SIZE);
972 kunmap_atomic(dst, KM_USER0); 990 kunmap_atomic(dst, KM_USER0);
973 } else { 991 } else {
974 dst = page_address(d_page); 992 safe_copy_page(page_address(d_page), s_page);
975 do_copy_page(dst, src);
976 } 993 }
977 } 994 }
978} 995}
979#else 996#else
980#define page_is_saveable(zone, pfn) saveable_page(pfn) 997#define page_is_saveable(zone, pfn) saveable_page(pfn)
981 998
982static inline void 999static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
983copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
984{ 1000{
985 do_copy_page(page_address(pfn_to_page(dst_pfn)), 1001 safe_copy_page(page_address(pfn_to_page(dst_pfn)),
986 page_address(pfn_to_page(src_pfn))); 1002 pfn_to_page(src_pfn));
987} 1003}
988#endif /* CONFIG_HIGHMEM */ 1004#endif /* CONFIG_HIGHMEM */
989 1005
diff --git a/kernel/printk.c b/kernel/printk.c
index bee36100f110..9adc2a473e6e 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -666,7 +666,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
666 } 666 }
667 /* Emit the output into the temporary buffer */ 667 /* Emit the output into the temporary buffer */
668 printed_len += vscnprintf(printk_buf + printed_len, 668 printed_len += vscnprintf(printk_buf + printed_len,
669 sizeof(printk_buf), fmt, args); 669 sizeof(printk_buf) - printed_len, fmt, args);
670 670
671 /* 671 /*
672 * Copy the output into log_buf. If the caller didn't provide 672 * Copy the output into log_buf. If the caller didn't provide
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 987cfb7ade89..e9517014b57c 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -23,6 +23,10 @@
23 * to Suparna Bhattacharya for pushing me completely away 23 * to Suparna Bhattacharya for pushing me completely away
24 * from atomic instructions on the read side. 24 * from atomic instructions on the read side.
25 * 25 *
26 * - Added handling of Dynamic Ticks
27 * Copyright 2007 - Paul E. Mckenney <paulmck@us.ibm.com>
28 * - Steven Rostedt <srostedt@redhat.com>
29 *
26 * Papers: http://www.rdrop.com/users/paulmck/RCU 30 * Papers: http://www.rdrop.com/users/paulmck/RCU
27 * 31 *
28 * Design Document: http://lwn.net/Articles/253651/ 32 * Design Document: http://lwn.net/Articles/253651/
@@ -409,6 +413,212 @@ static void __rcu_advance_callbacks(struct rcu_data *rdp)
409 } 413 }
410} 414}
411 415
416#ifdef CONFIG_NO_HZ
417
418DEFINE_PER_CPU(long, dynticks_progress_counter) = 1;
419static DEFINE_PER_CPU(long, rcu_dyntick_snapshot);
420static DEFINE_PER_CPU(int, rcu_update_flag);
421
422/**
423 * rcu_irq_enter - Called from Hard irq handlers and NMI/SMI.
424 *
425 * If the CPU was idle with dynamic ticks active, this updates the
426 * dynticks_progress_counter to let the RCU handling know that the
427 * CPU is active.
428 */
429void rcu_irq_enter(void)
430{
431 int cpu = smp_processor_id();
432
433 if (per_cpu(rcu_update_flag, cpu))
434 per_cpu(rcu_update_flag, cpu)++;
435
436 /*
437 * Only update if we are coming from a stopped ticks mode
438 * (dynticks_progress_counter is even).
439 */
440 if (!in_interrupt() &&
441 (per_cpu(dynticks_progress_counter, cpu) & 0x1) == 0) {
442 /*
443 * The following might seem like we could have a race
444 * with NMI/SMIs. But this really isn't a problem.
445 * Here we do a read/modify/write, and the race happens
446 * when an NMI/SMI comes in after the read and before
447 * the write. But NMI/SMIs will increment this counter
448 * twice before returning, so the zero bit will not
449 * be corrupted by the NMI/SMI which is the most important
450 * part.
451 *
452 * The only thing is that we would bring back the counter
453 * to a postion that it was in during the NMI/SMI.
454 * But the zero bit would be set, so the rest of the
455 * counter would again be ignored.
456 *
457 * On return from the IRQ, the counter may have the zero
458 * bit be 0 and the counter the same as the return from
459 * the NMI/SMI. If the state machine was so unlucky to
460 * see that, it still doesn't matter, since all
461 * RCU read-side critical sections on this CPU would
462 * have already completed.
463 */
464 per_cpu(dynticks_progress_counter, cpu)++;
465 /*
466 * The following memory barrier ensures that any
467 * rcu_read_lock() primitives in the irq handler
468 * are seen by other CPUs to follow the above
469 * increment to dynticks_progress_counter. This is
470 * required in order for other CPUs to correctly
471 * determine when it is safe to advance the RCU
472 * grace-period state machine.
473 */
474 smp_mb(); /* see above block comment. */
475 /*
476 * Since we can't determine the dynamic tick mode from
477 * the dynticks_progress_counter after this routine,
478 * we use a second flag to acknowledge that we came
479 * from an idle state with ticks stopped.
480 */
481 per_cpu(rcu_update_flag, cpu)++;
482 /*
483 * If we take an NMI/SMI now, they will also increment
484 * the rcu_update_flag, and will not update the
485 * dynticks_progress_counter on exit. That is for
486 * this IRQ to do.
487 */
488 }
489}
490
491/**
492 * rcu_irq_exit - Called from exiting Hard irq context.
493 *
494 * If the CPU was idle with dynamic ticks active, update the
495 * dynticks_progress_counter to put let the RCU handling be
496 * aware that the CPU is going back to idle with no ticks.
497 */
498void rcu_irq_exit(void)
499{
500 int cpu = smp_processor_id();
501
502 /*
503 * rcu_update_flag is set if we interrupted the CPU
504 * when it was idle with ticks stopped.
505 * Once this occurs, we keep track of interrupt nesting
506 * because a NMI/SMI could also come in, and we still
507 * only want the IRQ that started the increment of the
508 * dynticks_progress_counter to be the one that modifies
509 * it on exit.
510 */
511 if (per_cpu(rcu_update_flag, cpu)) {
512 if (--per_cpu(rcu_update_flag, cpu))
513 return;
514
515 /* This must match the interrupt nesting */
516 WARN_ON(in_interrupt());
517
518 /*
519 * If an NMI/SMI happens now we are still
520 * protected by the dynticks_progress_counter being odd.
521 */
522
523 /*
524 * The following memory barrier ensures that any
525 * rcu_read_unlock() primitives in the irq handler
526 * are seen by other CPUs to preceed the following
527 * increment to dynticks_progress_counter. This
528 * is required in order for other CPUs to determine
529 * when it is safe to advance the RCU grace-period
530 * state machine.
531 */
532 smp_mb(); /* see above block comment. */
533 per_cpu(dynticks_progress_counter, cpu)++;
534 WARN_ON(per_cpu(dynticks_progress_counter, cpu) & 0x1);
535 }
536}
537
538static void dyntick_save_progress_counter(int cpu)
539{
540 per_cpu(rcu_dyntick_snapshot, cpu) =
541 per_cpu(dynticks_progress_counter, cpu);
542}
543
544static inline int
545rcu_try_flip_waitack_needed(int cpu)
546{
547 long curr;
548 long snap;
549
550 curr = per_cpu(dynticks_progress_counter, cpu);
551 snap = per_cpu(rcu_dyntick_snapshot, cpu);
552 smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
553
554 /*
555 * If the CPU remained in dynticks mode for the entire time
556 * and didn't take any interrupts, NMIs, SMIs, or whatever,
557 * then it cannot be in the middle of an rcu_read_lock(), so
558 * the next rcu_read_lock() it executes must use the new value
559 * of the counter. So we can safely pretend that this CPU
560 * already acknowledged the counter.
561 */
562
563 if ((curr == snap) && ((curr & 0x1) == 0))
564 return 0;
565
566 /*
567 * If the CPU passed through or entered a dynticks idle phase with
568 * no active irq handlers, then, as above, we can safely pretend
569 * that this CPU already acknowledged the counter.
570 */
571
572 if ((curr - snap) > 2 || (snap & 0x1) == 0)
573 return 0;
574
575 /* We need this CPU to explicitly acknowledge the counter flip. */
576
577 return 1;
578}
579
580static inline int
581rcu_try_flip_waitmb_needed(int cpu)
582{
583 long curr;
584 long snap;
585
586 curr = per_cpu(dynticks_progress_counter, cpu);
587 snap = per_cpu(rcu_dyntick_snapshot, cpu);
588 smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
589
590 /*
591 * If the CPU remained in dynticks mode for the entire time
592 * and didn't take any interrupts, NMIs, SMIs, or whatever,
593 * then it cannot have executed an RCU read-side critical section
594 * during that time, so there is no need for it to execute a
595 * memory barrier.
596 */
597
598 if ((curr == snap) && ((curr & 0x1) == 0))
599 return 0;
600
601 /*
602 * If the CPU either entered or exited an outermost interrupt,
603 * SMI, NMI, or whatever handler, then we know that it executed
604 * a memory barrier when doing so. So we don't need another one.
605 */
606 if (curr != snap)
607 return 0;
608
609 /* We need the CPU to execute a memory barrier. */
610
611 return 1;
612}
613
614#else /* !CONFIG_NO_HZ */
615
616# define dyntick_save_progress_counter(cpu) do { } while (0)
617# define rcu_try_flip_waitack_needed(cpu) (1)
618# define rcu_try_flip_waitmb_needed(cpu) (1)
619
620#endif /* CONFIG_NO_HZ */
621
412/* 622/*
413 * Get here when RCU is idle. Decide whether we need to 623 * Get here when RCU is idle. Decide whether we need to
414 * move out of idle state, and return non-zero if so. 624 * move out of idle state, and return non-zero if so.
@@ -447,8 +657,10 @@ rcu_try_flip_idle(void)
447 657
448 /* Now ask each CPU for acknowledgement of the flip. */ 658 /* Now ask each CPU for acknowledgement of the flip. */
449 659
450 for_each_cpu_mask(cpu, rcu_cpu_online_map) 660 for_each_cpu_mask(cpu, rcu_cpu_online_map) {
451 per_cpu(rcu_flip_flag, cpu) = rcu_flipped; 661 per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
662 dyntick_save_progress_counter(cpu);
663 }
452 664
453 return 1; 665 return 1;
454} 666}
@@ -464,7 +676,8 @@ rcu_try_flip_waitack(void)
464 676
465 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); 677 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
466 for_each_cpu_mask(cpu, rcu_cpu_online_map) 678 for_each_cpu_mask(cpu, rcu_cpu_online_map)
467 if (per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { 679 if (rcu_try_flip_waitack_needed(cpu) &&
680 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
468 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); 681 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
469 return 0; 682 return 0;
470 } 683 }
@@ -509,8 +722,10 @@ rcu_try_flip_waitzero(void)
509 smp_mb(); /* ^^^^^^^^^^^^ */ 722 smp_mb(); /* ^^^^^^^^^^^^ */
510 723
511 /* Call for a memory barrier from each CPU. */ 724 /* Call for a memory barrier from each CPU. */
512 for_each_cpu_mask(cpu, rcu_cpu_online_map) 725 for_each_cpu_mask(cpu, rcu_cpu_online_map) {
513 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; 726 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
727 dyntick_save_progress_counter(cpu);
728 }
514 729
515 RCU_TRACE_ME(rcupreempt_trace_try_flip_z2); 730 RCU_TRACE_ME(rcupreempt_trace_try_flip_z2);
516 return 1; 731 return 1;
@@ -528,7 +743,8 @@ rcu_try_flip_waitmb(void)
528 743
529 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); 744 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
530 for_each_cpu_mask(cpu, rcu_cpu_online_map) 745 for_each_cpu_mask(cpu, rcu_cpu_online_map)
531 if (per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { 746 if (rcu_try_flip_waitmb_needed(cpu) &&
747 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
532 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); 748 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
533 return 0; 749 return 0;
534 } 750 }
@@ -702,8 +918,9 @@ void rcu_offline_cpu(int cpu)
702 * fix. 918 * fix.
703 */ 919 */
704 920
921 local_irq_save(flags);
705 rdp = RCU_DATA_ME(); 922 rdp = RCU_DATA_ME();
706 spin_lock_irqsave(&rdp->lock, flags); 923 spin_lock(&rdp->lock);
707 *rdp->nexttail = list; 924 *rdp->nexttail = list;
708 if (list) 925 if (list)
709 rdp->nexttail = tail; 926 rdp->nexttail = tail;
@@ -735,9 +952,11 @@ static void rcu_process_callbacks(struct softirq_action *unused)
735{ 952{
736 unsigned long flags; 953 unsigned long flags;
737 struct rcu_head *next, *list; 954 struct rcu_head *next, *list;
738 struct rcu_data *rdp = RCU_DATA_ME(); 955 struct rcu_data *rdp;
739 956
740 spin_lock_irqsave(&rdp->lock, flags); 957 local_irq_save(flags);
958 rdp = RCU_DATA_ME();
959 spin_lock(&rdp->lock);
741 list = rdp->donelist; 960 list = rdp->donelist;
742 if (list == NULL) { 961 if (list == NULL) {
743 spin_unlock_irqrestore(&rdp->lock, flags); 962 spin_unlock_irqrestore(&rdp->lock, flags);
diff --git a/kernel/sched.c b/kernel/sched.c
index f28f19e65b59..f06950c8a6ce 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -668,6 +668,8 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
668 */ 668 */
669unsigned int sysctl_sched_rt_period = 1000000; 669unsigned int sysctl_sched_rt_period = 1000000;
670 670
671static __read_mostly int scheduler_running;
672
671/* 673/*
672 * part of the period that we allow rt tasks to run in us. 674 * part of the period that we allow rt tasks to run in us.
673 * default: 0.95s 675 * default: 0.95s
@@ -689,14 +691,16 @@ unsigned long long cpu_clock(int cpu)
689 unsigned long flags; 691 unsigned long flags;
690 struct rq *rq; 692 struct rq *rq;
691 693
692 local_irq_save(flags);
693 rq = cpu_rq(cpu);
694 /* 694 /*
695 * Only call sched_clock() if the scheduler has already been 695 * Only call sched_clock() if the scheduler has already been
696 * initialized (some code might call cpu_clock() very early): 696 * initialized (some code might call cpu_clock() very early):
697 */ 697 */
698 if (rq->idle) 698 if (unlikely(!scheduler_running))
699 update_rq_clock(rq); 699 return 0;
700
701 local_irq_save(flags);
702 rq = cpu_rq(cpu);
703 update_rq_clock(rq);
700 now = rq->clock; 704 now = rq->clock;
701 local_irq_restore(flags); 705 local_irq_restore(flags);
702 706
@@ -1831,6 +1835,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1831 long old_state; 1835 long old_state;
1832 struct rq *rq; 1836 struct rq *rq;
1833 1837
1838 smp_wmb();
1834 rq = task_rq_lock(p, &flags); 1839 rq = task_rq_lock(p, &flags);
1835 old_state = p->state; 1840 old_state = p->state;
1836 if (!(old_state & state)) 1841 if (!(old_state & state))
@@ -3766,7 +3771,7 @@ void scheduler_tick(void)
3766 3771
3767#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) 3772#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
3768 3773
3769void add_preempt_count(int val) 3774void __kprobes add_preempt_count(int val)
3770{ 3775{
3771 /* 3776 /*
3772 * Underflow? 3777 * Underflow?
@@ -3782,7 +3787,7 @@ void add_preempt_count(int val)
3782} 3787}
3783EXPORT_SYMBOL(add_preempt_count); 3788EXPORT_SYMBOL(add_preempt_count);
3784 3789
3785void sub_preempt_count(int val) 3790void __kprobes sub_preempt_count(int val)
3786{ 3791{
3787 /* 3792 /*
3788 * Underflow? 3793 * Underflow?
@@ -3884,7 +3889,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
3884asmlinkage void __sched schedule(void) 3889asmlinkage void __sched schedule(void)
3885{ 3890{
3886 struct task_struct *prev, *next; 3891 struct task_struct *prev, *next;
3887 long *switch_count; 3892 unsigned long *switch_count;
3888 struct rq *rq; 3893 struct rq *rq;
3889 int cpu; 3894 int cpu;
3890 3895
@@ -7283,6 +7288,8 @@ void __init sched_init(void)
7283 * During early bootup we pretend to be a normal task: 7288 * During early bootup we pretend to be a normal task:
7284 */ 7289 */
7285 current->sched_class = &fair_sched_class; 7290 current->sched_class = &fair_sched_class;
7291
7292 scheduler_running = 1;
7286} 7293}
7287 7294
7288#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP 7295#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 6c091d6e159d..c8e6492c5925 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -202,17 +202,12 @@ static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
202 202
203static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) 203static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
204{ 204{
205 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; 205 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
206 struct sched_entity *se = NULL;
207 struct rb_node *parent;
208 206
209 while (*link) { 207 if (!last)
210 parent = *link; 208 return NULL;
211 se = rb_entry(parent, struct sched_entity, run_node);
212 link = &parent->rb_right;
213 }
214 209
215 return se; 210 return rb_entry(last, struct sched_entity, run_node);
216} 211}
217 212
218/************************************************************** 213/**************************************************************
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 5b3aea5f471e..31e9f2a47928 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -313,6 +313,7 @@ void irq_exit(void)
313 /* Make sure that timer wheel updates are propagated */ 313 /* Make sure that timer wheel updates are propagated */
314 if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched()) 314 if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched())
315 tick_nohz_stop_sched_tick(); 315 tick_nohz_stop_sched_tick();
316 rcu_irq_exit();
316#endif 317#endif
317 preempt_enable_no_resched(); 318 preempt_enable_no_resched();
318} 319}
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 7c2da88db4ed..01b6522fd92b 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -216,26 +216,27 @@ static int watchdog(void *__bind_cpu)
216 /* initialize timestamp */ 216 /* initialize timestamp */
217 touch_softlockup_watchdog(); 217 touch_softlockup_watchdog();
218 218
219 set_current_state(TASK_INTERRUPTIBLE);
219 /* 220 /*
220 * Run briefly once per second to reset the softlockup timestamp. 221 * Run briefly once per second to reset the softlockup timestamp.
221 * If this gets delayed for more than 60 seconds then the 222 * If this gets delayed for more than 60 seconds then the
222 * debug-printout triggers in softlockup_tick(). 223 * debug-printout triggers in softlockup_tick().
223 */ 224 */
224 while (!kthread_should_stop()) { 225 while (!kthread_should_stop()) {
225 set_current_state(TASK_INTERRUPTIBLE);
226 touch_softlockup_watchdog(); 226 touch_softlockup_watchdog();
227 schedule(); 227 schedule();
228 228
229 if (kthread_should_stop()) 229 if (kthread_should_stop())
230 break; 230 break;
231 231
232 if (this_cpu != check_cpu) 232 if (this_cpu == check_cpu) {
233 continue; 233 if (sysctl_hung_task_timeout_secs)
234 234 check_hung_uninterruptible_tasks(this_cpu);
235 if (sysctl_hung_task_timeout_secs) 235 }
236 check_hung_uninterruptible_tasks(this_cpu);
237 236
237 set_current_state(TASK_INTERRUPTIBLE);
238 } 238 }
239 __set_current_state(TASK_RUNNING);
239 240
240 return 0; 241 return 0;
241} 242}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index fa9bb73dbdb4..2968298f8f36 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -282,6 +282,7 @@ void tick_nohz_stop_sched_tick(void)
282 ts->idle_tick = ts->sched_timer.expires; 282 ts->idle_tick = ts->sched_timer.expires;
283 ts->tick_stopped = 1; 283 ts->tick_stopped = 1;
284 ts->idle_jiffies = last_jiffies; 284 ts->idle_jiffies = last_jiffies;
285 rcu_enter_nohz();
285 } 286 }
286 287
287 /* 288 /*
@@ -375,6 +376,8 @@ void tick_nohz_restart_sched_tick(void)
375 return; 376 return;
376 } 377 }
377 378
379 rcu_exit_nohz();
380
378 /* Update jiffies first */ 381 /* Update jiffies first */
379 select_nohz_load_balancer(0); 382 select_nohz_load_balancer(0);
380 now = ktime_get(); 383 now = ktime_get();
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index d3d94c1a0fd2..67fe8fc21fb1 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -65,9 +65,9 @@ print_timer(struct seq_file *m, struct hrtimer *timer, int idx, u64 now)
65 SEQ_printf(m, ", %s/%d", tmp, timer->start_pid); 65 SEQ_printf(m, ", %s/%d", tmp, timer->start_pid);
66#endif 66#endif
67 SEQ_printf(m, "\n"); 67 SEQ_printf(m, "\n");
68 SEQ_printf(m, " # expires at %Lu nsecs [in %Lu nsecs]\n", 68 SEQ_printf(m, " # expires at %Lu nsecs [in %Ld nsecs]\n",
69 (unsigned long long)ktime_to_ns(timer->expires), 69 (unsigned long long)ktime_to_ns(timer->expires),
70 (unsigned long long)(ktime_to_ns(timer->expires) - now)); 70 (long long)(ktime_to_ns(timer->expires) - now));
71} 71}
72 72
73static void 73static void