aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-12 07:48:57 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-12 07:48:57 -0500
commit45ab6b0c76d0e4cce5bd608ccf97b0f6b20f18df (patch)
tree4d51c73533c386aee16fde1e74b5e3bc22eedc53 /kernel
parent81444a799550214f549caf579cf65a0ca55e70b7 (diff)
parentd65bd5ecb2bd166cea4952a59b7e16cc3ad6ef6c (diff)
Merge branch 'sched/core' into cpus4096
Conflicts: include/linux/ftrace.h kernel/sched.c
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c32
-rw-r--r--kernel/auditsc.c24
-rw-r--r--kernel/fork.c16
-rw-r--r--kernel/latencytop.c2
-rw-r--r--kernel/posix-cpu-timers.c2
-rw-r--r--kernel/power/swap.c2
-rw-r--r--kernel/relay.c7
-rw-r--r--kernel/sched.c99
-rw-r--r--kernel/sched_debug.c6
-rw-r--r--kernel/sched_rt.c4
-rw-r--r--kernel/softlockup.c2
-rw-r--r--kernel/time/timekeeping.c22
-rw-r--r--kernel/user.c2
13 files changed, 149 insertions, 71 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index 4414e93d8750..ce6d8ea3131e 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -61,8 +61,11 @@
61 61
62#include "audit.h" 62#include "audit.h"
63 63
64/* No auditing will take place until audit_initialized != 0. 64/* No auditing will take place until audit_initialized == AUDIT_INITIALIZED.
65 * (Initialization happens after skb_init is called.) */ 65 * (Initialization happens after skb_init is called.) */
66#define AUDIT_DISABLED -1
67#define AUDIT_UNINITIALIZED 0
68#define AUDIT_INITIALIZED 1
66static int audit_initialized; 69static int audit_initialized;
67 70
68#define AUDIT_OFF 0 71#define AUDIT_OFF 0
@@ -965,6 +968,9 @@ static int __init audit_init(void)
965{ 968{
966 int i; 969 int i;
967 970
971 if (audit_initialized == AUDIT_DISABLED)
972 return 0;
973
968 printk(KERN_INFO "audit: initializing netlink socket (%s)\n", 974 printk(KERN_INFO "audit: initializing netlink socket (%s)\n",
969 audit_default ? "enabled" : "disabled"); 975 audit_default ? "enabled" : "disabled");
970 audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT, 0, 976 audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT, 0,
@@ -976,7 +982,7 @@ static int __init audit_init(void)
976 982
977 skb_queue_head_init(&audit_skb_queue); 983 skb_queue_head_init(&audit_skb_queue);
978 skb_queue_head_init(&audit_skb_hold_queue); 984 skb_queue_head_init(&audit_skb_hold_queue);
979 audit_initialized = 1; 985 audit_initialized = AUDIT_INITIALIZED;
980 audit_enabled = audit_default; 986 audit_enabled = audit_default;
981 audit_ever_enabled |= !!audit_default; 987 audit_ever_enabled |= !!audit_default;
982 988
@@ -999,13 +1005,21 @@ __initcall(audit_init);
999static int __init audit_enable(char *str) 1005static int __init audit_enable(char *str)
1000{ 1006{
1001 audit_default = !!simple_strtol(str, NULL, 0); 1007 audit_default = !!simple_strtol(str, NULL, 0);
1002 printk(KERN_INFO "audit: %s%s\n", 1008 if (!audit_default)
1003 audit_default ? "enabled" : "disabled", 1009 audit_initialized = AUDIT_DISABLED;
1004 audit_initialized ? "" : " (after initialization)"); 1010
1005 if (audit_initialized) { 1011 printk(KERN_INFO "audit: %s", audit_default ? "enabled" : "disabled");
1012
1013 if (audit_initialized == AUDIT_INITIALIZED) {
1006 audit_enabled = audit_default; 1014 audit_enabled = audit_default;
1007 audit_ever_enabled |= !!audit_default; 1015 audit_ever_enabled |= !!audit_default;
1016 } else if (audit_initialized == AUDIT_UNINITIALIZED) {
1017 printk(" (after initialization)");
1018 } else {
1019 printk(" (until reboot)");
1008 } 1020 }
1021 printk("\n");
1022
1009 return 1; 1023 return 1;
1010} 1024}
1011 1025
@@ -1107,9 +1121,7 @@ unsigned int audit_serial(void)
1107static inline void audit_get_stamp(struct audit_context *ctx, 1121static inline void audit_get_stamp(struct audit_context *ctx,
1108 struct timespec *t, unsigned int *serial) 1122 struct timespec *t, unsigned int *serial)
1109{ 1123{
1110 if (ctx) 1124 if (!ctx || !auditsc_get_stamp(ctx, t, serial)) {
1111 auditsc_get_stamp(ctx, t, serial);
1112 else {
1113 *t = CURRENT_TIME; 1125 *t = CURRENT_TIME;
1114 *serial = audit_serial(); 1126 *serial = audit_serial();
1115 } 1127 }
@@ -1146,7 +1158,7 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
1146 int reserve; 1158 int reserve;
1147 unsigned long timeout_start = jiffies; 1159 unsigned long timeout_start = jiffies;
1148 1160
1149 if (!audit_initialized) 1161 if (audit_initialized != AUDIT_INITIALIZED)
1150 return NULL; 1162 return NULL;
1151 1163
1152 if (unlikely(audit_filter_type(type))) 1164 if (unlikely(audit_filter_type(type)))
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index cf5bc2f5f9c3..2a3f0afc4d2a 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1459,7 +1459,6 @@ void audit_free(struct task_struct *tsk)
1459 1459
1460/** 1460/**
1461 * audit_syscall_entry - fill in an audit record at syscall entry 1461 * audit_syscall_entry - fill in an audit record at syscall entry
1462 * @tsk: task being audited
1463 * @arch: architecture type 1462 * @arch: architecture type
1464 * @major: major syscall type (function) 1463 * @major: major syscall type (function)
1465 * @a1: additional syscall register 1 1464 * @a1: additional syscall register 1
@@ -1548,9 +1547,25 @@ void audit_syscall_entry(int arch, int major,
1548 context->ppid = 0; 1547 context->ppid = 0;
1549} 1548}
1550 1549
1550void audit_finish_fork(struct task_struct *child)
1551{
1552 struct audit_context *ctx = current->audit_context;
1553 struct audit_context *p = child->audit_context;
1554 if (!p || !ctx || !ctx->auditable)
1555 return;
1556 p->arch = ctx->arch;
1557 p->major = ctx->major;
1558 memcpy(p->argv, ctx->argv, sizeof(ctx->argv));
1559 p->ctime = ctx->ctime;
1560 p->dummy = ctx->dummy;
1561 p->auditable = ctx->auditable;
1562 p->in_syscall = ctx->in_syscall;
1563 p->filterkey = kstrdup(ctx->filterkey, GFP_KERNEL);
1564 p->ppid = current->pid;
1565}
1566
1551/** 1567/**
1552 * audit_syscall_exit - deallocate audit context after a system call 1568 * audit_syscall_exit - deallocate audit context after a system call
1553 * @tsk: task being audited
1554 * @valid: success/failure flag 1569 * @valid: success/failure flag
1555 * @return_code: syscall return value 1570 * @return_code: syscall return value
1556 * 1571 *
@@ -1942,15 +1957,18 @@ EXPORT_SYMBOL_GPL(__audit_inode_child);
1942 * 1957 *
1943 * Also sets the context as auditable. 1958 * Also sets the context as auditable.
1944 */ 1959 */
1945void auditsc_get_stamp(struct audit_context *ctx, 1960int auditsc_get_stamp(struct audit_context *ctx,
1946 struct timespec *t, unsigned int *serial) 1961 struct timespec *t, unsigned int *serial)
1947{ 1962{
1963 if (!ctx->in_syscall)
1964 return 0;
1948 if (!ctx->serial) 1965 if (!ctx->serial)
1949 ctx->serial = audit_serial(); 1966 ctx->serial = audit_serial();
1950 t->tv_sec = ctx->ctime.tv_sec; 1967 t->tv_sec = ctx->ctime.tv_sec;
1951 t->tv_nsec = ctx->ctime.tv_nsec; 1968 t->tv_nsec = ctx->ctime.tv_nsec;
1952 *serial = ctx->serial; 1969 *serial = ctx->serial;
1953 ctx->auditable = 1; 1970 ctx->auditable = 1;
1971 return 1;
1954} 1972}
1955 1973
1956/* global counter which is incremented every time something logs in */ 1974/* global counter which is incremented every time something logs in */
diff --git a/kernel/fork.c b/kernel/fork.c
index 7407ab319875..7b93da72d4a2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -319,17 +319,20 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
319 file = tmp->vm_file; 319 file = tmp->vm_file;
320 if (file) { 320 if (file) {
321 struct inode *inode = file->f_path.dentry->d_inode; 321 struct inode *inode = file->f_path.dentry->d_inode;
322 struct address_space *mapping = file->f_mapping;
323
322 get_file(file); 324 get_file(file);
323 if (tmp->vm_flags & VM_DENYWRITE) 325 if (tmp->vm_flags & VM_DENYWRITE)
324 atomic_dec(&inode->i_writecount); 326 atomic_dec(&inode->i_writecount);
325 327 spin_lock(&mapping->i_mmap_lock);
326 /* insert tmp into the share list, just after mpnt */ 328 if (tmp->vm_flags & VM_SHARED)
327 spin_lock(&file->f_mapping->i_mmap_lock); 329 mapping->i_mmap_writable++;
328 tmp->vm_truncate_count = mpnt->vm_truncate_count; 330 tmp->vm_truncate_count = mpnt->vm_truncate_count;
329 flush_dcache_mmap_lock(file->f_mapping); 331 flush_dcache_mmap_lock(mapping);
332 /* insert tmp into the share list, just after mpnt */
330 vma_prio_tree_add(tmp, mpnt); 333 vma_prio_tree_add(tmp, mpnt);
331 flush_dcache_mmap_unlock(file->f_mapping); 334 flush_dcache_mmap_unlock(mapping);
332 spin_unlock(&file->f_mapping->i_mmap_lock); 335 spin_unlock(&mapping->i_mmap_lock);
333 } 336 }
334 337
335 /* 338 /*
@@ -1406,6 +1409,7 @@ long do_fork(unsigned long clone_flags,
1406 init_completion(&vfork); 1409 init_completion(&vfork);
1407 } 1410 }
1408 1411
1412 audit_finish_fork(p);
1409 tracehook_report_clone(trace, regs, clone_flags, nr, p); 1413 tracehook_report_clone(trace, regs, clone_flags, nr, p);
1410 1414
1411 /* 1415 /*
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index 5e7b45c56923..449db466bdbc 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -191,7 +191,7 @@ static int lstats_show(struct seq_file *m, void *v)
191 latency_record[i].time, 191 latency_record[i].time,
192 latency_record[i].max); 192 latency_record[i].max);
193 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { 193 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
194 char sym[KSYM_NAME_LEN]; 194 char sym[KSYM_SYMBOL_LEN];
195 char *c; 195 char *c;
196 if (!latency_record[i].backtrace[q]) 196 if (!latency_record[i].backtrace[q])
197 break; 197 break;
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 3f4377e0aa04..157de3a47832 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -311,7 +311,7 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
311 struct task_cputime cputime; 311 struct task_cputime cputime;
312 312
313 thread_group_cputime(p, &cputime); 313 thread_group_cputime(p, &cputime);
314 switch (which_clock) { 314 switch (CPUCLOCK_WHICH(which_clock)) {
315 default: 315 default:
316 return -EINVAL; 316 return -EINVAL;
317 case CPUCLOCK_PROF: 317 case CPUCLOCK_PROF:
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index b7713b53d07a..6da14358537c 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -633,7 +633,7 @@ void swsusp_close(fmode_t mode)
633 return; 633 return;
634 } 634 }
635 635
636 blkdev_put(resume_bdev, mode); /* move up */ 636 blkdev_put(resume_bdev, mode);
637} 637}
638 638
639static int swsusp_header_init(void) 639static int swsusp_header_init(void)
diff --git a/kernel/relay.c b/kernel/relay.c
index 32b0befdcb6a..09ac2008f77b 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -1317,12 +1317,9 @@ static ssize_t relay_file_splice_read(struct file *in,
1317 if (ret < 0) 1317 if (ret < 0)
1318 break; 1318 break;
1319 else if (!ret) { 1319 else if (!ret) {
1320 if (spliced) 1320 if (flags & SPLICE_F_NONBLOCK)
1321 break;
1322 if (flags & SPLICE_F_NONBLOCK) {
1323 ret = -EAGAIN; 1321 ret = -EAGAIN;
1324 break; 1322 break;
1325 }
1326 } 1323 }
1327 1324
1328 *ppos += ret; 1325 *ppos += ret;
diff --git a/kernel/sched.c b/kernel/sched.c
index 4ed9f588faa6..e00c92d22655 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -267,6 +267,10 @@ struct task_group {
267 struct cgroup_subsys_state css; 267 struct cgroup_subsys_state css;
268#endif 268#endif
269 269
270#ifdef CONFIG_USER_SCHED
271 uid_t uid;
272#endif
273
270#ifdef CONFIG_FAIR_GROUP_SCHED 274#ifdef CONFIG_FAIR_GROUP_SCHED
271 /* schedulable entities of this group on each cpu */ 275 /* schedulable entities of this group on each cpu */
272 struct sched_entity **se; 276 struct sched_entity **se;
@@ -292,6 +296,12 @@ struct task_group {
292 296
293#ifdef CONFIG_USER_SCHED 297#ifdef CONFIG_USER_SCHED
294 298
299/* Helper function to pass uid information to create_sched_user() */
300void set_tg_uid(struct user_struct *user)
301{
302 user->tg->uid = user->uid;
303}
304
295/* 305/*
296 * Root task group. 306 * Root task group.
297 * Every UID task group (including init_task_group aka UID-0) will 307 * Every UID task group (including init_task_group aka UID-0) will
@@ -1587,6 +1597,39 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1587 1597
1588#endif 1598#endif
1589 1599
1600/*
1601 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1602 */
1603static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1604 __releases(this_rq->lock)
1605 __acquires(busiest->lock)
1606 __acquires(this_rq->lock)
1607{
1608 int ret = 0;
1609
1610 if (unlikely(!irqs_disabled())) {
1611 /* printk() doesn't work good under rq->lock */
1612 spin_unlock(&this_rq->lock);
1613 BUG_ON(1);
1614 }
1615 if (unlikely(!spin_trylock(&busiest->lock))) {
1616 if (busiest < this_rq) {
1617 spin_unlock(&this_rq->lock);
1618 spin_lock(&busiest->lock);
1619 spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
1620 ret = 1;
1621 } else
1622 spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
1623 }
1624 return ret;
1625}
1626
1627static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1628 __releases(busiest->lock)
1629{
1630 spin_unlock(&busiest->lock);
1631 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1632}
1590#endif 1633#endif
1591 1634
1592#ifdef CONFIG_FAIR_GROUP_SCHED 1635#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -2784,40 +2827,6 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2784} 2827}
2785 2828
2786/* 2829/*
2787 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
2788 */
2789static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
2790 __releases(this_rq->lock)
2791 __acquires(busiest->lock)
2792 __acquires(this_rq->lock)
2793{
2794 int ret = 0;
2795
2796 if (unlikely(!irqs_disabled())) {
2797 /* printk() doesn't work good under rq->lock */
2798 spin_unlock(&this_rq->lock);
2799 BUG_ON(1);
2800 }
2801 if (unlikely(!spin_trylock(&busiest->lock))) {
2802 if (busiest < this_rq) {
2803 spin_unlock(&this_rq->lock);
2804 spin_lock(&busiest->lock);
2805 spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
2806 ret = 1;
2807 } else
2808 spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
2809 }
2810 return ret;
2811}
2812
2813static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
2814 __releases(busiest->lock)
2815{
2816 spin_unlock(&busiest->lock);
2817 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
2818}
2819
2820/*
2821 * If dest_cpu is allowed for this process, migrate the task to it. 2830 * If dest_cpu is allowed for this process, migrate the task to it.
2822 * This is accomplished by forcing the cpu_allowed mask to only 2831 * This is accomplished by forcing the cpu_allowed mask to only
2823 * allow dest_cpu, which will force the cpu onto dest_cpu. Then 2832 * allow dest_cpu, which will force the cpu onto dest_cpu. Then
@@ -3676,7 +3685,7 @@ out_balanced:
3676static void idle_balance(int this_cpu, struct rq *this_rq) 3685static void idle_balance(int this_cpu, struct rq *this_rq)
3677{ 3686{
3678 struct sched_domain *sd; 3687 struct sched_domain *sd;
3679 int pulled_task = -1; 3688 int pulled_task = 0;
3680 unsigned long next_balance = jiffies + HZ; 3689 unsigned long next_balance = jiffies + HZ;
3681 cpumask_var_t tmpmask; 3690 cpumask_var_t tmpmask;
3682 3691
@@ -6577,7 +6586,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
6577 req = list_entry(rq->migration_queue.next, 6586 req = list_entry(rq->migration_queue.next,
6578 struct migration_req, list); 6587 struct migration_req, list);
6579 list_del_init(&req->list); 6588 list_del_init(&req->list);
6589 spin_unlock_irq(&rq->lock);
6580 complete(&req->done); 6590 complete(&req->done);
6591 spin_lock_irq(&rq->lock);
6581 } 6592 }
6582 spin_unlock_irq(&rq->lock); 6593 spin_unlock_irq(&rq->lock);
6583 break; 6594 break;
@@ -6781,6 +6792,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
6781 SD_BALANCE_EXEC | 6792 SD_BALANCE_EXEC |
6782 SD_SHARE_CPUPOWER | 6793 SD_SHARE_CPUPOWER |
6783 SD_SHARE_PKG_RESOURCES); 6794 SD_SHARE_PKG_RESOURCES);
6795 if (nr_node_ids == 1)
6796 pflags &= ~SD_SERIALIZE;
6784 } 6797 }
6785 if (~cflags & pflags) 6798 if (~cflags & pflags)
6786 return 0; 6799 return 0;
@@ -7716,8 +7729,14 @@ static struct sched_domain_attr *dattr_cur;
7716 */ 7729 */
7717static cpumask_var_t fallback_doms; 7730static cpumask_var_t fallback_doms;
7718 7731
7719void __attribute__((weak)) arch_update_cpu_topology(void) 7732/*
7733 * arch_update_cpu_topology lets virtualized architectures update the
7734 * cpu core maps. It is supposed to return 1 if the topology changed
7735 * or 0 if it stayed the same.
7736 */
7737int __attribute__((weak)) arch_update_cpu_topology(void)
7720{ 7738{
7739 return 0;
7721} 7740}
7722 7741
7723/* 7742/*
@@ -7811,17 +7830,21 @@ void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
7811 struct sched_domain_attr *dattr_new) 7830 struct sched_domain_attr *dattr_new)
7812{ 7831{
7813 int i, j, n; 7832 int i, j, n;
7833 int new_topology;
7814 7834
7815 mutex_lock(&sched_domains_mutex); 7835 mutex_lock(&sched_domains_mutex);
7816 7836
7817 /* always unregister in case we don't destroy any domains */ 7837 /* always unregister in case we don't destroy any domains */
7818 unregister_sched_domain_sysctl(); 7838 unregister_sched_domain_sysctl();
7819 7839
7840 /* Let architecture update cpu core mappings. */
7841 new_topology = arch_update_cpu_topology();
7842
7820 n = doms_new ? ndoms_new : 0; 7843 n = doms_new ? ndoms_new : 0;
7821 7844
7822 /* Destroy deleted domains */ 7845 /* Destroy deleted domains */
7823 for (i = 0; i < ndoms_cur; i++) { 7846 for (i = 0; i < ndoms_cur; i++) {
7824 for (j = 0; j < n; j++) { 7847 for (j = 0; j < n && !new_topology; j++) {
7825 if (cpumask_equal(&doms_cur[i], &doms_new[j]) 7848 if (cpumask_equal(&doms_cur[i], &doms_new[j])
7826 && dattrs_equal(dattr_cur, i, dattr_new, j)) 7849 && dattrs_equal(dattr_cur, i, dattr_new, j))
7827 goto match1; 7850 goto match1;
@@ -7841,7 +7864,7 @@ match1:
7841 7864
7842 /* Build new domains */ 7865 /* Build new domains */
7843 for (i = 0; i < ndoms_new; i++) { 7866 for (i = 0; i < ndoms_new; i++) {
7844 for (j = 0; j < ndoms_cur; j++) { 7867 for (j = 0; j < ndoms_cur && !new_topology; j++) {
7845 if (cpumask_equal(&doms_new[i], &doms_cur[j]) 7868 if (cpumask_equal(&doms_new[i], &doms_cur[j])
7846 && dattrs_equal(dattr_new, i, dattr_cur, j)) 7869 && dattrs_equal(dattr_new, i, dattr_cur, j))
7847 goto match2; 7870 goto match2;
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index baf2f17af462..4293cfa9681d 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -160,10 +160,14 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
160 cgroup_path(tg->css.cgroup, path, sizeof(path)); 160 cgroup_path(tg->css.cgroup, path, sizeof(path));
161 161
162 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path); 162 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
163#elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
164 {
165 uid_t uid = cfs_rq->tg->uid;
166 SEQ_printf(m, "\ncfs_rq[%d] for UID: %u\n", cpu, uid);
167 }
163#else 168#else
164 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); 169 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
165#endif 170#endif
166
167 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", 171 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
168 SPLIT_NS(cfs_rq->exec_clock)); 172 SPLIT_NS(cfs_rq->exec_clock));
169 173
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 94aab72f6a02..1bbd99014011 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -914,10 +914,6 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
914/* Only try algorithms three times */ 914/* Only try algorithms three times */
915#define RT_MAX_TRIES 3 915#define RT_MAX_TRIES 3
916 916
917static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
918static inline void double_unlock_balance(struct rq *this_rq,
919 struct rq *busiest);
920
921static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); 917static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
922 918
923static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) 919static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 884e6cd2769c..1ab790c67b17 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -188,7 +188,7 @@ static void check_hung_task(struct task_struct *t, unsigned long now)
188 if ((long)(now - t->last_switch_timestamp) < 188 if ((long)(now - t->last_switch_timestamp) <
189 sysctl_hung_task_timeout_secs) 189 sysctl_hung_task_timeout_secs)
190 return; 190 return;
191 if (sysctl_hung_task_warnings < 0) 191 if (!sysctl_hung_task_warnings)
192 return; 192 return;
193 sysctl_hung_task_warnings--; 193 sysctl_hung_task_warnings--;
194 194
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index e7acfb482a68..fa05e88aa76f 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -518,6 +518,28 @@ void update_wall_time(void)
518 /* correct the clock when NTP error is too big */ 518 /* correct the clock when NTP error is too big */
519 clocksource_adjust(offset); 519 clocksource_adjust(offset);
520 520
521 /*
522 * Since in the loop above, we accumulate any amount of time
523 * in xtime_nsec over a second into xtime.tv_sec, its possible for
524 * xtime_nsec to be fairly small after the loop. Further, if we're
525 * slightly speeding the clocksource up in clocksource_adjust(),
526 * its possible the required corrective factor to xtime_nsec could
527 * cause it to underflow.
528 *
529 * Now, we cannot simply roll the accumulated second back, since
530 * the NTP subsystem has been notified via second_overflow. So
531 * instead we push xtime_nsec forward by the amount we underflowed,
532 * and add that amount into the error.
533 *
534 * We'll correct this error next time through this function, when
535 * xtime_nsec is not as small.
536 */
537 if (unlikely((s64)clock->xtime_nsec < 0)) {
538 s64 neg = -(s64)clock->xtime_nsec;
539 clock->xtime_nsec = 0;
540 clock->error += neg << (NTP_SCALE_SHIFT - clock->shift);
541 }
542
521 /* store full nanoseconds into xtime after rounding it up and 543 /* store full nanoseconds into xtime after rounding it up and
522 * add the remainder to the error difference. 544 * add the remainder to the error difference.
523 */ 545 */
diff --git a/kernel/user.c b/kernel/user.c
index 39d6159fae43..cec2224bc9f5 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -101,6 +101,8 @@ static int sched_create_user(struct user_struct *up)
101 if (IS_ERR(up->tg)) 101 if (IS_ERR(up->tg))
102 rc = -ENOMEM; 102 rc = -ENOMEM;
103 103
104 set_tg_uid(up);
105
104 return rc; 106 return rc;
105} 107}
106 108