aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c3
-rw-r--r--kernel/exit.c8
-rw-r--r--kernel/fork.c10
-rw-r--r--kernel/irq/chip.c5
-rw-r--r--kernel/irq/handle.c57
-rw-r--r--kernel/irq/internals.h7
-rw-r--r--kernel/irq/manage.c12
-rw-r--r--kernel/irq/migration.c12
-rw-r--r--kernel/irq/numa_migrate.c19
-rw-r--r--kernel/irq/proc.c4
-rw-r--r--kernel/itimer.c4
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/panic.c8
-rw-r--r--kernel/posix-cpu-timers.c117
-rw-r--r--kernel/profile.c3
-rw-r--r--kernel/sched.c34
-rw-r--r--kernel/sched_fair.c11
-rw-r--r--kernel/sched_rt.c32
-rw-r--r--kernel/sched_stats.h45
-rw-r--r--kernel/signal.c8
-rw-r--r--kernel/softirq.c5
-rw-r--r--kernel/sysctl.c5
-rw-r--r--kernel/user.c3
23 files changed, 295 insertions, 119 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 5a54ff42874e..e14db9c089b9 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2351,7 +2351,7 @@ static void cgroup_lock_hierarchy(struct cgroupfs_root *root)
2351 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { 2351 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2352 struct cgroup_subsys *ss = subsys[i]; 2352 struct cgroup_subsys *ss = subsys[i];
2353 if (ss->root == root) 2353 if (ss->root == root)
2354 mutex_lock_nested(&ss->hierarchy_mutex, i); 2354 mutex_lock(&ss->hierarchy_mutex);
2355 } 2355 }
2356} 2356}
2357 2357
@@ -2637,6 +2637,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
2637 BUG_ON(!list_empty(&init_task.tasks)); 2637 BUG_ON(!list_empty(&init_task.tasks));
2638 2638
2639 mutex_init(&ss->hierarchy_mutex); 2639 mutex_init(&ss->hierarchy_mutex);
2640 lockdep_set_class(&ss->hierarchy_mutex, &ss->subsys_key);
2640 ss->active = 1; 2641 ss->active = 1;
2641} 2642}
2642 2643
diff --git a/kernel/exit.c b/kernel/exit.c
index f80dec3f1875..167e1e3ad7c6 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -118,6 +118,8 @@ static void __exit_signal(struct task_struct *tsk)
118 * We won't ever get here for the group leader, since it 118 * We won't ever get here for the group leader, since it
119 * will have been the last reference on the signal_struct. 119 * will have been the last reference on the signal_struct.
120 */ 120 */
121 sig->utime = cputime_add(sig->utime, task_utime(tsk));
122 sig->stime = cputime_add(sig->stime, task_stime(tsk));
121 sig->gtime = cputime_add(sig->gtime, task_gtime(tsk)); 123 sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
122 sig->min_flt += tsk->min_flt; 124 sig->min_flt += tsk->min_flt;
123 sig->maj_flt += tsk->maj_flt; 125 sig->maj_flt += tsk->maj_flt;
@@ -126,6 +128,7 @@ static void __exit_signal(struct task_struct *tsk)
126 sig->inblock += task_io_get_inblock(tsk); 128 sig->inblock += task_io_get_inblock(tsk);
127 sig->oublock += task_io_get_oublock(tsk); 129 sig->oublock += task_io_get_oublock(tsk);
128 task_io_accounting_add(&sig->ioac, &tsk->ioac); 130 task_io_accounting_add(&sig->ioac, &tsk->ioac);
131 sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
129 sig = NULL; /* Marker for below. */ 132 sig = NULL; /* Marker for below. */
130 } 133 }
131 134
@@ -977,12 +980,9 @@ static void check_stack_usage(void)
977{ 980{
978 static DEFINE_SPINLOCK(low_water_lock); 981 static DEFINE_SPINLOCK(low_water_lock);
979 static int lowest_to_date = THREAD_SIZE; 982 static int lowest_to_date = THREAD_SIZE;
980 unsigned long *n = end_of_stack(current);
981 unsigned long free; 983 unsigned long free;
982 984
983 while (*n == 0) 985 free = stack_not_used(current);
984 n++;
985 free = (unsigned long)n - (unsigned long)end_of_stack(current);
986 986
987 if (free >= lowest_to_date) 987 if (free >= lowest_to_date)
988 return; 988 return;
diff --git a/kernel/fork.c b/kernel/fork.c
index 6d5dbb7a13e2..8de303bdd4e5 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -61,6 +61,7 @@
61#include <linux/proc_fs.h> 61#include <linux/proc_fs.h>
62#include <linux/blkdev.h> 62#include <linux/blkdev.h>
63#include <trace/sched.h> 63#include <trace/sched.h>
64#include <linux/magic.h>
64 65
65#include <asm/pgtable.h> 66#include <asm/pgtable.h>
66#include <asm/pgalloc.h> 67#include <asm/pgalloc.h>
@@ -212,6 +213,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
212{ 213{
213 struct task_struct *tsk; 214 struct task_struct *tsk;
214 struct thread_info *ti; 215 struct thread_info *ti;
216 unsigned long *stackend;
217
215 int err; 218 int err;
216 219
217 prepare_to_copy(orig); 220 prepare_to_copy(orig);
@@ -237,6 +240,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
237 goto out; 240 goto out;
238 241
239 setup_thread_stack(tsk, orig); 242 setup_thread_stack(tsk, orig);
243 stackend = end_of_stack(tsk);
244 *stackend = STACK_END_MAGIC; /* for overflow detection */
240 245
241#ifdef CONFIG_CC_STACKPROTECTOR 246#ifdef CONFIG_CC_STACKPROTECTOR
242 tsk->stack_canary = get_random_int(); 247 tsk->stack_canary = get_random_int();
@@ -851,13 +856,14 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
851 sig->tty_old_pgrp = NULL; 856 sig->tty_old_pgrp = NULL;
852 sig->tty = NULL; 857 sig->tty = NULL;
853 858
854 sig->cutime = sig->cstime = cputime_zero; 859 sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
855 sig->gtime = cputime_zero; 860 sig->gtime = cputime_zero;
856 sig->cgtime = cputime_zero; 861 sig->cgtime = cputime_zero;
857 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; 862 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
858 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; 863 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
859 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; 864 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
860 task_io_accounting_init(&sig->ioac); 865 task_io_accounting_init(&sig->ioac);
866 sig->sum_sched_runtime = 0;
861 taskstats_tgid_init(sig); 867 taskstats_tgid_init(sig);
862 868
863 task_lock(current->group_leader); 869 task_lock(current->group_leader);
@@ -1094,7 +1100,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1094#ifdef CONFIG_DEBUG_MUTEXES 1100#ifdef CONFIG_DEBUG_MUTEXES
1095 p->blocked_on = NULL; /* not blocked yet */ 1101 p->blocked_on = NULL; /* not blocked yet */
1096#endif 1102#endif
1097 if (unlikely(ptrace_reparented(current))) 1103 if (unlikely(current->ptrace))
1098 ptrace_fork(p, clone_flags); 1104 ptrace_fork(p, clone_flags);
1099 1105
1100 /* Perform scheduler related setup. Assign this task to a CPU. */ 1106 /* Perform scheduler related setup. Assign this task to a CPU. */
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 7de11bd64dfe..122fef4b0bd3 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -46,7 +46,10 @@ void dynamic_irq_init(unsigned int irq)
46 desc->irq_count = 0; 46 desc->irq_count = 0;
47 desc->irqs_unhandled = 0; 47 desc->irqs_unhandled = 0;
48#ifdef CONFIG_SMP 48#ifdef CONFIG_SMP
49 cpumask_setall(&desc->affinity); 49 cpumask_setall(desc->affinity);
50#ifdef CONFIG_GENERIC_PENDING_IRQ
51 cpumask_clear(desc->pending_mask);
52#endif
50#endif 53#endif
51 spin_unlock_irqrestore(&desc->lock, flags); 54 spin_unlock_irqrestore(&desc->lock, flags);
52} 55}
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 3aba8d12f328..f51eaee921b6 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -17,6 +17,7 @@
17#include <linux/kernel_stat.h> 17#include <linux/kernel_stat.h>
18#include <linux/rculist.h> 18#include <linux/rculist.h>
19#include <linux/hash.h> 19#include <linux/hash.h>
20#include <linux/bootmem.h>
20 21
21#include "internals.h" 22#include "internals.h"
22 23
@@ -69,6 +70,7 @@ int nr_irqs = NR_IRQS;
69EXPORT_SYMBOL_GPL(nr_irqs); 70EXPORT_SYMBOL_GPL(nr_irqs);
70 71
71#ifdef CONFIG_SPARSE_IRQ 72#ifdef CONFIG_SPARSE_IRQ
73
72static struct irq_desc irq_desc_init = { 74static struct irq_desc irq_desc_init = {
73 .irq = -1, 75 .irq = -1,
74 .status = IRQ_DISABLED, 76 .status = IRQ_DISABLED,
@@ -76,9 +78,6 @@ static struct irq_desc irq_desc_init = {
76 .handle_irq = handle_bad_irq, 78 .handle_irq = handle_bad_irq,
77 .depth = 1, 79 .depth = 1,
78 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 80 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
79#ifdef CONFIG_SMP
80 .affinity = CPU_MASK_ALL
81#endif
82}; 81};
83 82
84void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) 83void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
@@ -113,6 +112,10 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
113 printk(KERN_ERR "can not alloc kstat_irqs\n"); 112 printk(KERN_ERR "can not alloc kstat_irqs\n");
114 BUG_ON(1); 113 BUG_ON(1);
115 } 114 }
115 if (!init_alloc_desc_masks(desc, cpu, false)) {
116 printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
117 BUG_ON(1);
118 }
116 arch_init_chip_data(desc, cpu); 119 arch_init_chip_data(desc, cpu);
117} 120}
118 121
@@ -121,7 +124,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
121 */ 124 */
122DEFINE_SPINLOCK(sparse_irq_lock); 125DEFINE_SPINLOCK(sparse_irq_lock);
123 126
124struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly; 127struct irq_desc **irq_desc_ptrs __read_mostly;
125 128
126static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { 129static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
127 [0 ... NR_IRQS_LEGACY-1] = { 130 [0 ... NR_IRQS_LEGACY-1] = {
@@ -131,14 +134,10 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm
131 .handle_irq = handle_bad_irq, 134 .handle_irq = handle_bad_irq,
132 .depth = 1, 135 .depth = 1,
133 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 136 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
134#ifdef CONFIG_SMP
135 .affinity = CPU_MASK_ALL
136#endif
137 } 137 }
138}; 138};
139 139
140/* FIXME: use bootmem alloc ...*/ 140static unsigned int *kstat_irqs_legacy;
141static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
142 141
143int __init early_irq_init(void) 142int __init early_irq_init(void)
144{ 143{
@@ -148,18 +147,30 @@ int __init early_irq_init(void)
148 147
149 init_irq_default_affinity(); 148 init_irq_default_affinity();
150 149
150 /* initialize nr_irqs based on nr_cpu_ids */
151 arch_probe_nr_irqs();
152 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
153
151 desc = irq_desc_legacy; 154 desc = irq_desc_legacy;
152 legacy_count = ARRAY_SIZE(irq_desc_legacy); 155 legacy_count = ARRAY_SIZE(irq_desc_legacy);
153 156
157 /* allocate irq_desc_ptrs array based on nr_irqs */
158 irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *));
159
160 /* allocate based on nr_cpu_ids */
161 /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */
162 kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids *
163 sizeof(int));
164
154 for (i = 0; i < legacy_count; i++) { 165 for (i = 0; i < legacy_count; i++) {
155 desc[i].irq = i; 166 desc[i].irq = i;
156 desc[i].kstat_irqs = kstat_irqs_legacy[i]; 167 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
157 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 168 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
158 169 init_alloc_desc_masks(&desc[i], 0, true);
159 irq_desc_ptrs[i] = desc + i; 170 irq_desc_ptrs[i] = desc + i;
160 } 171 }
161 172
162 for (i = legacy_count; i < NR_IRQS; i++) 173 for (i = legacy_count; i < nr_irqs; i++)
163 irq_desc_ptrs[i] = NULL; 174 irq_desc_ptrs[i] = NULL;
164 175
165 return arch_early_irq_init(); 176 return arch_early_irq_init();
@@ -167,7 +178,10 @@ int __init early_irq_init(void)
167 178
168struct irq_desc *irq_to_desc(unsigned int irq) 179struct irq_desc *irq_to_desc(unsigned int irq)
169{ 180{
170 return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL; 181 if (irq_desc_ptrs && irq < nr_irqs)
182 return irq_desc_ptrs[irq];
183
184 return NULL;
171} 185}
172 186
173struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) 187struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
@@ -176,10 +190,9 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
176 unsigned long flags; 190 unsigned long flags;
177 int node; 191 int node;
178 192
179 if (irq >= NR_IRQS) { 193 if (irq >= nr_irqs) {
180 printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n", 194 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
181 irq, NR_IRQS); 195 irq, nr_irqs);
182 WARN_ON(1);
183 return NULL; 196 return NULL;
184 } 197 }
185 198
@@ -221,9 +234,6 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
221 .handle_irq = handle_bad_irq, 234 .handle_irq = handle_bad_irq,
222 .depth = 1, 235 .depth = 1,
223 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 236 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
224#ifdef CONFIG_SMP
225 .affinity = CPU_MASK_ALL
226#endif
227 } 237 }
228}; 238};
229 239
@@ -235,12 +245,15 @@ int __init early_irq_init(void)
235 245
236 init_irq_default_affinity(); 246 init_irq_default_affinity();
237 247
248 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
249
238 desc = irq_desc; 250 desc = irq_desc;
239 count = ARRAY_SIZE(irq_desc); 251 count = ARRAY_SIZE(irq_desc);
240 252
241 for (i = 0; i < count; i++) 253 for (i = 0; i < count; i++) {
242 desc[i].irq = i; 254 desc[i].irq = i;
243 255 init_alloc_desc_masks(&desc[i], 0, true);
256 }
244 return arch_early_irq_init(); 257 return arch_early_irq_init();
245} 258}
246 259
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index e6d0a43cc125..40416a81a0f5 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -16,7 +16,14 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
16extern struct lock_class_key irq_desc_lock_class; 16extern struct lock_class_key irq_desc_lock_class;
17extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr); 17extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
18extern spinlock_t sparse_irq_lock; 18extern spinlock_t sparse_irq_lock;
19
20#ifdef CONFIG_SPARSE_IRQ
21/* irq_desc_ptrs allocated at boot time */
22extern struct irq_desc **irq_desc_ptrs;
23#else
24/* irq_desc_ptrs is a fixed size array */
19extern struct irq_desc *irq_desc_ptrs[NR_IRQS]; 25extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
26#endif
20 27
21#ifdef CONFIG_PROC_FS 28#ifdef CONFIG_PROC_FS
22extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); 29extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 291f03664552..a3a5dc9ef346 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -90,14 +90,14 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
90 90
91#ifdef CONFIG_GENERIC_PENDING_IRQ 91#ifdef CONFIG_GENERIC_PENDING_IRQ
92 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { 92 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
93 cpumask_copy(&desc->affinity, cpumask); 93 cpumask_copy(desc->affinity, cpumask);
94 desc->chip->set_affinity(irq, cpumask); 94 desc->chip->set_affinity(irq, cpumask);
95 } else { 95 } else {
96 desc->status |= IRQ_MOVE_PENDING; 96 desc->status |= IRQ_MOVE_PENDING;
97 cpumask_copy(&desc->pending_mask, cpumask); 97 cpumask_copy(desc->pending_mask, cpumask);
98 } 98 }
99#else 99#else
100 cpumask_copy(&desc->affinity, cpumask); 100 cpumask_copy(desc->affinity, cpumask);
101 desc->chip->set_affinity(irq, cpumask); 101 desc->chip->set_affinity(irq, cpumask);
102#endif 102#endif
103 desc->status |= IRQ_AFFINITY_SET; 103 desc->status |= IRQ_AFFINITY_SET;
@@ -119,16 +119,16 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
119 * one of the targets is online. 119 * one of the targets is online.
120 */ 120 */
121 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { 121 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
122 if (cpumask_any_and(&desc->affinity, cpu_online_mask) 122 if (cpumask_any_and(desc->affinity, cpu_online_mask)
123 < nr_cpu_ids) 123 < nr_cpu_ids)
124 goto set_affinity; 124 goto set_affinity;
125 else 125 else
126 desc->status &= ~IRQ_AFFINITY_SET; 126 desc->status &= ~IRQ_AFFINITY_SET;
127 } 127 }
128 128
129 cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity); 129 cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
130set_affinity: 130set_affinity:
131 desc->chip->set_affinity(irq, &desc->affinity); 131 desc->chip->set_affinity(irq, desc->affinity);
132 132
133 return 0; 133 return 0;
134} 134}
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index bd72329e630c..e05ad9be43b7 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -18,7 +18,7 @@ void move_masked_irq(int irq)
18 18
19 desc->status &= ~IRQ_MOVE_PENDING; 19 desc->status &= ~IRQ_MOVE_PENDING;
20 20
21 if (unlikely(cpumask_empty(&desc->pending_mask))) 21 if (unlikely(cpumask_empty(desc->pending_mask)))
22 return; 22 return;
23 23
24 if (!desc->chip->set_affinity) 24 if (!desc->chip->set_affinity)
@@ -38,13 +38,13 @@ void move_masked_irq(int irq)
38 * For correct operation this depends on the caller 38 * For correct operation this depends on the caller
39 * masking the irqs. 39 * masking the irqs.
40 */ 40 */
41 if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask) 41 if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
42 < nr_cpu_ids)) { 42 < nr_cpu_ids)) {
43 cpumask_and(&desc->affinity, 43 cpumask_and(desc->affinity,
44 &desc->pending_mask, cpu_online_mask); 44 desc->pending_mask, cpu_online_mask);
45 desc->chip->set_affinity(irq, &desc->affinity); 45 desc->chip->set_affinity(irq, desc->affinity);
46 } 46 }
47 cpumask_clear(&desc->pending_mask); 47 cpumask_clear(desc->pending_mask);
48} 48}
49 49
50void move_native_irq(int irq) 50void move_native_irq(int irq)
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index acd88356ac76..7f9b80434e32 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -38,15 +38,22 @@ static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
38 old_desc->kstat_irqs = NULL; 38 old_desc->kstat_irqs = NULL;
39} 39}
40 40
41static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, 41static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
42 struct irq_desc *desc, int cpu) 42 struct irq_desc *desc, int cpu)
43{ 43{
44 memcpy(desc, old_desc, sizeof(struct irq_desc)); 44 memcpy(desc, old_desc, sizeof(struct irq_desc));
45 if (!init_alloc_desc_masks(desc, cpu, false)) {
46 printk(KERN_ERR "irq %d: can not get new irq_desc cpumask "
47 "for migration.\n", irq);
48 return false;
49 }
45 spin_lock_init(&desc->lock); 50 spin_lock_init(&desc->lock);
46 desc->cpu = cpu; 51 desc->cpu = cpu;
47 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 52 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
48 init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); 53 init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids);
54 init_copy_desc_masks(old_desc, desc);
49 arch_init_copy_chip_data(old_desc, desc, cpu); 55 arch_init_copy_chip_data(old_desc, desc, cpu);
56 return true;
50} 57}
51 58
52static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) 59static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
@@ -76,12 +83,18 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
76 node = cpu_to_node(cpu); 83 node = cpu_to_node(cpu);
77 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); 84 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
78 if (!desc) { 85 if (!desc) {
79 printk(KERN_ERR "irq %d: can not get new irq_desc for migration.\n", irq); 86 printk(KERN_ERR "irq %d: can not get new irq_desc "
87 "for migration.\n", irq);
88 /* still use old one */
89 desc = old_desc;
90 goto out_unlock;
91 }
92 if (!init_copy_one_irq_desc(irq, old_desc, desc, cpu)) {
80 /* still use old one */ 93 /* still use old one */
94 kfree(desc);
81 desc = old_desc; 95 desc = old_desc;
82 goto out_unlock; 96 goto out_unlock;
83 } 97 }
84 init_copy_one_irq_desc(irq, old_desc, desc, cpu);
85 98
86 irq_desc_ptrs[irq] = desc; 99 irq_desc_ptrs[irq] = desc;
87 spin_unlock_irqrestore(&sparse_irq_lock, flags); 100 spin_unlock_irqrestore(&sparse_irq_lock, flags);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index aae3f742bcec..692363dd591f 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -20,11 +20,11 @@ static struct proc_dir_entry *root_irq_dir;
20static int irq_affinity_proc_show(struct seq_file *m, void *v) 20static int irq_affinity_proc_show(struct seq_file *m, void *v)
21{ 21{
22 struct irq_desc *desc = irq_to_desc((long)m->private); 22 struct irq_desc *desc = irq_to_desc((long)m->private);
23 const struct cpumask *mask = &desc->affinity; 23 const struct cpumask *mask = desc->affinity;
24 24
25#ifdef CONFIG_GENERIC_PENDING_IRQ 25#ifdef CONFIG_GENERIC_PENDING_IRQ
26 if (desc->status & IRQ_MOVE_PENDING) 26 if (desc->status & IRQ_MOVE_PENDING)
27 mask = &desc->pending_mask; 27 mask = desc->pending_mask;
28#endif 28#endif
29 seq_cpumask(m, mask); 29 seq_cpumask(m, mask);
30 seq_putc(m, '\n'); 30 seq_putc(m, '\n');
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 6a5fe93dd8bd..58762f7077ec 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -62,7 +62,7 @@ int do_getitimer(int which, struct itimerval *value)
62 struct task_cputime cputime; 62 struct task_cputime cputime;
63 cputime_t utime; 63 cputime_t utime;
64 64
65 thread_group_cputime(tsk, &cputime); 65 thread_group_cputimer(tsk, &cputime);
66 utime = cputime.utime; 66 utime = cputime.utime;
67 if (cputime_le(cval, utime)) { /* about to fire */ 67 if (cputime_le(cval, utime)) { /* about to fire */
68 cval = jiffies_to_cputime(1); 68 cval = jiffies_to_cputime(1);
@@ -82,7 +82,7 @@ int do_getitimer(int which, struct itimerval *value)
82 struct task_cputime times; 82 struct task_cputime times;
83 cputime_t ptime; 83 cputime_t ptime;
84 84
85 thread_group_cputime(tsk, &times); 85 thread_group_cputimer(tsk, &times);
86 ptime = cputime_add(times.utime, times.stime); 86 ptime = cputime_add(times.utime, times.stime);
87 if (cputime_le(cval, ptime)) { /* about to fire */ 87 if (cputime_le(cval, ptime)) { /* about to fire */
88 cval = jiffies_to_cputime(1); 88 cval = jiffies_to_cputime(1);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 8a6d7b08864e..795e7b67a228 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1130,7 +1130,7 @@ void crash_save_cpu(struct pt_regs *regs, int cpu)
1130 return; 1130 return;
1131 memset(&prstatus, 0, sizeof(prstatus)); 1131 memset(&prstatus, 0, sizeof(prstatus));
1132 prstatus.pr_pid = current->pid; 1132 prstatus.pr_pid = current->pid;
1133 elf_core_copy_regs(&prstatus.pr_reg, regs); 1133 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1134 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, 1134 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1135 &prstatus, sizeof(prstatus)); 1135 &prstatus, sizeof(prstatus));
1136 final_note(buf); 1136 final_note(buf);
diff --git a/kernel/panic.c b/kernel/panic.c
index 2a2ff36ff44d..32fe4eff1b89 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -74,6 +74,9 @@ NORET_TYPE void panic(const char * fmt, ...)
74 vsnprintf(buf, sizeof(buf), fmt, args); 74 vsnprintf(buf, sizeof(buf), fmt, args);
75 va_end(args); 75 va_end(args);
76 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf); 76 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
77#ifdef CONFIG_DEBUG_BUGVERBOSE
78 dump_stack();
79#endif
77 bust_spinlocks(0); 80 bust_spinlocks(0);
78 81
79 /* 82 /*
@@ -355,15 +358,18 @@ EXPORT_SYMBOL(warn_slowpath);
355#endif 358#endif
356 359
357#ifdef CONFIG_CC_STACKPROTECTOR 360#ifdef CONFIG_CC_STACKPROTECTOR
361
358/* 362/*
359 * Called when gcc's -fstack-protector feature is used, and 363 * Called when gcc's -fstack-protector feature is used, and
360 * gcc detects corruption of the on-stack canary value 364 * gcc detects corruption of the on-stack canary value
361 */ 365 */
362void __stack_chk_fail(void) 366void __stack_chk_fail(void)
363{ 367{
364 panic("stack-protector: Kernel stack is corrupted"); 368 panic("stack-protector: Kernel stack is corrupted in: %p\n",
369 __builtin_return_address(0));
365} 370}
366EXPORT_SYMBOL(__stack_chk_fail); 371EXPORT_SYMBOL(__stack_chk_fail);
372
367#endif 373#endif
368 374
369core_param(panic, panic_timeout, int, 0644); 375core_param(panic, panic_timeout, int, 0644);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index fa07da94d7be..2313a4cc14ea 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -230,6 +230,71 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
230 return 0; 230 return 0;
231} 231}
232 232
233void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
234{
235 struct sighand_struct *sighand;
236 struct signal_struct *sig;
237 struct task_struct *t;
238
239 *times = INIT_CPUTIME;
240
241 rcu_read_lock();
242 sighand = rcu_dereference(tsk->sighand);
243 if (!sighand)
244 goto out;
245
246 sig = tsk->signal;
247
248 t = tsk;
249 do {
250 times->utime = cputime_add(times->utime, t->utime);
251 times->stime = cputime_add(times->stime, t->stime);
252 times->sum_exec_runtime += t->se.sum_exec_runtime;
253
254 t = next_thread(t);
255 } while (t != tsk);
256
257 times->utime = cputime_add(times->utime, sig->utime);
258 times->stime = cputime_add(times->stime, sig->stime);
259 times->sum_exec_runtime += sig->sum_sched_runtime;
260out:
261 rcu_read_unlock();
262}
263
264static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
265{
266 if (cputime_gt(b->utime, a->utime))
267 a->utime = b->utime;
268
269 if (cputime_gt(b->stime, a->stime))
270 a->stime = b->stime;
271
272 if (b->sum_exec_runtime > a->sum_exec_runtime)
273 a->sum_exec_runtime = b->sum_exec_runtime;
274}
275
276void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
277{
278 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
279 struct task_cputime sum;
280 unsigned long flags;
281
282 spin_lock_irqsave(&cputimer->lock, flags);
283 if (!cputimer->running) {
284 cputimer->running = 1;
285 /*
286 * The POSIX timer interface allows for absolute time expiry
287 * values through the TIMER_ABSTIME flag, therefore we have
288 * to synchronize the timer to the clock every time we start
289 * it.
290 */
291 thread_group_cputime(tsk, &sum);
292 update_gt_cputime(&cputimer->cputime, &sum);
293 }
294 *times = cputimer->cputime;
295 spin_unlock_irqrestore(&cputimer->lock, flags);
296}
297
233/* 298/*
234 * Sample a process (thread group) clock for the given group_leader task. 299 * Sample a process (thread group) clock for the given group_leader task.
235 * Must be called with tasklist_lock held for reading. 300 * Must be called with tasklist_lock held for reading.
@@ -457,7 +522,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk)
457{ 522{
458 struct task_cputime cputime; 523 struct task_cputime cputime;
459 524
460 thread_group_cputime(tsk, &cputime); 525 thread_group_cputimer(tsk, &cputime);
461 cleanup_timers(tsk->signal->cpu_timers, 526 cleanup_timers(tsk->signal->cpu_timers,
462 cputime.utime, cputime.stime, cputime.sum_exec_runtime); 527 cputime.utime, cputime.stime, cputime.sum_exec_runtime);
463} 528}
@@ -964,6 +1029,19 @@ static void check_thread_timers(struct task_struct *tsk,
964 } 1029 }
965} 1030}
966 1031
1032static void stop_process_timers(struct task_struct *tsk)
1033{
1034 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
1035 unsigned long flags;
1036
1037 if (!cputimer->running)
1038 return;
1039
1040 spin_lock_irqsave(&cputimer->lock, flags);
1041 cputimer->running = 0;
1042 spin_unlock_irqrestore(&cputimer->lock, flags);
1043}
1044
967/* 1045/*
968 * Check for any per-thread CPU timers that have fired and move them 1046 * Check for any per-thread CPU timers that have fired and move them
969 * off the tsk->*_timers list onto the firing list. Per-thread timers 1047 * off the tsk->*_timers list onto the firing list. Per-thread timers
@@ -987,13 +1065,15 @@ static void check_process_timers(struct task_struct *tsk,
987 sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY && 1065 sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
988 list_empty(&timers[CPUCLOCK_VIRT]) && 1066 list_empty(&timers[CPUCLOCK_VIRT]) &&
989 cputime_eq(sig->it_virt_expires, cputime_zero) && 1067 cputime_eq(sig->it_virt_expires, cputime_zero) &&
990 list_empty(&timers[CPUCLOCK_SCHED])) 1068 list_empty(&timers[CPUCLOCK_SCHED])) {
1069 stop_process_timers(tsk);
991 return; 1070 return;
1071 }
992 1072
993 /* 1073 /*
994 * Collect the current process totals. 1074 * Collect the current process totals.
995 */ 1075 */
996 thread_group_cputime(tsk, &cputime); 1076 thread_group_cputimer(tsk, &cputime);
997 utime = cputime.utime; 1077 utime = cputime.utime;
998 ptime = cputime_add(utime, cputime.stime); 1078 ptime = cputime_add(utime, cputime.stime);
999 sum_sched_runtime = cputime.sum_exec_runtime; 1079 sum_sched_runtime = cputime.sum_exec_runtime;
@@ -1259,7 +1339,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
1259 if (!task_cputime_zero(&sig->cputime_expires)) { 1339 if (!task_cputime_zero(&sig->cputime_expires)) {
1260 struct task_cputime group_sample; 1340 struct task_cputime group_sample;
1261 1341
1262 thread_group_cputime(tsk, &group_sample); 1342 thread_group_cputimer(tsk, &group_sample);
1263 if (task_cputime_expired(&group_sample, &sig->cputime_expires)) 1343 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1264 return 1; 1344 return 1;
1265 } 1345 }
@@ -1329,6 +1409,33 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1329} 1409}
1330 1410
1331/* 1411/*
1412 * Sample a process (thread group) timer for the given group_leader task.
1413 * Must be called with tasklist_lock held for reading.
1414 */
1415static int cpu_timer_sample_group(const clockid_t which_clock,
1416 struct task_struct *p,
1417 union cpu_time_count *cpu)
1418{
1419 struct task_cputime cputime;
1420
1421 thread_group_cputimer(p, &cputime);
1422 switch (CPUCLOCK_WHICH(which_clock)) {
1423 default:
1424 return -EINVAL;
1425 case CPUCLOCK_PROF:
1426 cpu->cpu = cputime_add(cputime.utime, cputime.stime);
1427 break;
1428 case CPUCLOCK_VIRT:
1429 cpu->cpu = cputime.utime;
1430 break;
1431 case CPUCLOCK_SCHED:
1432 cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
1433 break;
1434 }
1435 return 0;
1436}
1437
1438/*
1332 * Set one of the process-wide special case CPU timers. 1439 * Set one of the process-wide special case CPU timers.
1333 * The tsk->sighand->siglock must be held by the caller. 1440 * The tsk->sighand->siglock must be held by the caller.
1334 * The *newval argument is relative and we update it to be absolute, *oldval 1441 * The *newval argument is relative and we update it to be absolute, *oldval
@@ -1341,7 +1448,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1341 struct list_head *head; 1448 struct list_head *head;
1342 1449
1343 BUG_ON(clock_idx == CPUCLOCK_SCHED); 1450 BUG_ON(clock_idx == CPUCLOCK_SCHED);
1344 cpu_clock_sample_group(clock_idx, tsk, &now); 1451 cpu_timer_sample_group(clock_idx, tsk, &now);
1345 1452
1346 if (oldval) { 1453 if (oldval) {
1347 if (!cputime_eq(*oldval, cputime_zero)) { 1454 if (!cputime_eq(*oldval, cputime_zero)) {
diff --git a/kernel/profile.c b/kernel/profile.c
index 784933acf5b8..7724e0409bae 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -114,12 +114,15 @@ int __ref profile_init(void)
114 if (!slab_is_available()) { 114 if (!slab_is_available()) {
115 prof_buffer = alloc_bootmem(buffer_bytes); 115 prof_buffer = alloc_bootmem(buffer_bytes);
116 alloc_bootmem_cpumask_var(&prof_cpu_mask); 116 alloc_bootmem_cpumask_var(&prof_cpu_mask);
117 cpumask_copy(prof_cpu_mask, cpu_possible_mask);
117 return 0; 118 return 0;
118 } 119 }
119 120
120 if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) 121 if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
121 return -ENOMEM; 122 return -ENOMEM;
122 123
124 cpumask_copy(prof_cpu_mask, cpu_possible_mask);
125
123 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL); 126 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL);
124 if (prof_buffer) 127 if (prof_buffer)
125 return 0; 128 return 0;
diff --git a/kernel/sched.c b/kernel/sched.c
index 8ee437a5ec1d..61245b8d0f16 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2266,16 +2266,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
2266 if (!sched_feat(SYNC_WAKEUPS)) 2266 if (!sched_feat(SYNC_WAKEUPS))
2267 sync = 0; 2267 sync = 0;
2268 2268
2269 if (!sync) {
2270 if (current->se.avg_overlap < sysctl_sched_migration_cost &&
2271 p->se.avg_overlap < sysctl_sched_migration_cost)
2272 sync = 1;
2273 } else {
2274 if (current->se.avg_overlap >= sysctl_sched_migration_cost ||
2275 p->se.avg_overlap >= sysctl_sched_migration_cost)
2276 sync = 0;
2277 }
2278
2279#ifdef CONFIG_SMP 2269#ifdef CONFIG_SMP
2280 if (sched_feat(LB_WAKEUP_UPDATE)) { 2270 if (sched_feat(LB_WAKEUP_UPDATE)) {
2281 struct sched_domain *sd; 2271 struct sched_domain *sd;
@@ -3890,19 +3880,24 @@ int select_nohz_load_balancer(int stop_tick)
3890 int cpu = smp_processor_id(); 3880 int cpu = smp_processor_id();
3891 3881
3892 if (stop_tick) { 3882 if (stop_tick) {
3893 cpumask_set_cpu(cpu, nohz.cpu_mask);
3894 cpu_rq(cpu)->in_nohz_recently = 1; 3883 cpu_rq(cpu)->in_nohz_recently = 1;
3895 3884
3896 /* 3885 if (!cpu_active(cpu)) {
3897 * If we are going offline and still the leader, give up! 3886 if (atomic_read(&nohz.load_balancer) != cpu)
3898 */ 3887 return 0;
3899 if (!cpu_active(cpu) && 3888
3900 atomic_read(&nohz.load_balancer) == cpu) { 3889 /*
3890 * If we are going offline and still the leader,
3891 * give up!
3892 */
3901 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) 3893 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
3902 BUG(); 3894 BUG();
3895
3903 return 0; 3896 return 0;
3904 } 3897 }
3905 3898
3899 cpumask_set_cpu(cpu, nohz.cpu_mask);
3900
3906 /* time for ilb owner also to sleep */ 3901 /* time for ilb owner also to sleep */
3907 if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { 3902 if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
3908 if (atomic_read(&nohz.load_balancer) == cpu) 3903 if (atomic_read(&nohz.load_balancer) == cpu)
@@ -5949,12 +5944,7 @@ void sched_show_task(struct task_struct *p)
5949 printk(KERN_CONT " %016lx ", thread_saved_pc(p)); 5944 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
5950#endif 5945#endif
5951#ifdef CONFIG_DEBUG_STACK_USAGE 5946#ifdef CONFIG_DEBUG_STACK_USAGE
5952 { 5947 free = stack_not_used(p);
5953 unsigned long *n = end_of_stack(p);
5954 while (!*n)
5955 n++;
5956 free = (unsigned long)n - (unsigned long)end_of_stack(p);
5957 }
5958#endif 5948#endif
5959 printk(KERN_CONT "%5lu %5d %6d\n", free, 5949 printk(KERN_CONT "%5lu %5d %6d\n", free,
5960 task_pid_nr(p), task_pid_nr(p->real_parent)); 5950 task_pid_nr(p), task_pid_nr(p->real_parent));
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index a7e50ba185ac..0566f2a03c42 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1191,15 +1191,20 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
1191 int idx, unsigned long load, unsigned long this_load, 1191 int idx, unsigned long load, unsigned long this_load,
1192 unsigned int imbalance) 1192 unsigned int imbalance)
1193{ 1193{
1194 struct task_struct *curr = this_rq->curr;
1195 struct task_group *tg;
1194 unsigned long tl = this_load; 1196 unsigned long tl = this_load;
1195 unsigned long tl_per_task; 1197 unsigned long tl_per_task;
1196 struct task_group *tg;
1197 unsigned long weight; 1198 unsigned long weight;
1198 int balanced; 1199 int balanced;
1199 1200
1200 if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) 1201 if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
1201 return 0; 1202 return 0;
1202 1203
1204 if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost ||
1205 p->se.avg_overlap > sysctl_sched_migration_cost))
1206 sync = 0;
1207
1203 /* 1208 /*
1204 * If sync wakeup then subtract the (maximum possible) 1209 * If sync wakeup then subtract the (maximum possible)
1205 * effect of the currently running task from the load 1210 * effect of the currently running task from the load
@@ -1426,7 +1431,9 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
1426 if (!sched_feat(WAKEUP_PREEMPT)) 1431 if (!sched_feat(WAKEUP_PREEMPT))
1427 return; 1432 return;
1428 1433
1429 if (sched_feat(WAKEUP_OVERLAP) && sync) { 1434 if (sched_feat(WAKEUP_OVERLAP) && (sync ||
1435 (se->avg_overlap < sysctl_sched_migration_cost &&
1436 pse->avg_overlap < sysctl_sched_migration_cost))) {
1430 resched_task(curr); 1437 resched_task(curr);
1431 return; 1438 return;
1432 } 1439 }
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index bac1061cea2f..da932f4c8524 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -960,12 +960,13 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
960 960
961static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); 961static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
962 962
963static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) 963static inline int pick_optimal_cpu(int this_cpu,
964 const struct cpumask *mask)
964{ 965{
965 int first; 966 int first;
966 967
967 /* "this_cpu" is cheaper to preempt than a remote processor */ 968 /* "this_cpu" is cheaper to preempt than a remote processor */
968 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask)) 969 if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask))
969 return this_cpu; 970 return this_cpu;
970 971
971 first = cpumask_first(mask); 972 first = cpumask_first(mask);
@@ -981,6 +982,7 @@ static int find_lowest_rq(struct task_struct *task)
981 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); 982 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
982 int this_cpu = smp_processor_id(); 983 int this_cpu = smp_processor_id();
983 int cpu = task_cpu(task); 984 int cpu = task_cpu(task);
985 cpumask_var_t domain_mask;
984 986
985 if (task->rt.nr_cpus_allowed == 1) 987 if (task->rt.nr_cpus_allowed == 1)
986 return -1; /* No other targets possible */ 988 return -1; /* No other targets possible */
@@ -1013,19 +1015,25 @@ static int find_lowest_rq(struct task_struct *task)
1013 if (this_cpu == cpu) 1015 if (this_cpu == cpu)
1014 this_cpu = -1; /* Skip this_cpu opt if the same */ 1016 this_cpu = -1; /* Skip this_cpu opt if the same */
1015 1017
1016 for_each_domain(cpu, sd) { 1018 if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) {
1017 if (sd->flags & SD_WAKE_AFFINE) { 1019 for_each_domain(cpu, sd) {
1018 cpumask_t domain_mask; 1020 if (sd->flags & SD_WAKE_AFFINE) {
1019 int best_cpu; 1021 int best_cpu;
1020 1022
1021 cpumask_and(&domain_mask, sched_domain_span(sd), 1023 cpumask_and(domain_mask,
1022 lowest_mask); 1024 sched_domain_span(sd),
1025 lowest_mask);
1023 1026
1024 best_cpu = pick_optimal_cpu(this_cpu, 1027 best_cpu = pick_optimal_cpu(this_cpu,
1025 &domain_mask); 1028 domain_mask);
1026 if (best_cpu != -1) 1029
1027 return best_cpu; 1030 if (best_cpu != -1) {
1031 free_cpumask_var(domain_mask);
1032 return best_cpu;
1033 }
1034 }
1028 } 1035 }
1036 free_cpumask_var(domain_mask);
1029 } 1037 }
1030 1038
1031 /* 1039 /*
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 8ab0cef8ecab..a8f93dd374e1 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -296,19 +296,21 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
296static inline void account_group_user_time(struct task_struct *tsk, 296static inline void account_group_user_time(struct task_struct *tsk,
297 cputime_t cputime) 297 cputime_t cputime)
298{ 298{
299 struct task_cputime *times; 299 struct thread_group_cputimer *cputimer;
300 struct signal_struct *sig;
301 300
302 /* tsk == current, ensure it is safe to use ->signal */ 301 /* tsk == current, ensure it is safe to use ->signal */
303 if (unlikely(tsk->exit_state)) 302 if (unlikely(tsk->exit_state))
304 return; 303 return;
305 304
306 sig = tsk->signal; 305 cputimer = &tsk->signal->cputimer;
307 times = &sig->cputime.totals;
308 306
309 spin_lock(&times->lock); 307 if (!cputimer->running)
310 times->utime = cputime_add(times->utime, cputime); 308 return;
311 spin_unlock(&times->lock); 309
310 spin_lock(&cputimer->lock);
311 cputimer->cputime.utime =
312 cputime_add(cputimer->cputime.utime, cputime);
313 spin_unlock(&cputimer->lock);
312} 314}
313 315
314/** 316/**
@@ -324,19 +326,21 @@ static inline void account_group_user_time(struct task_struct *tsk,
324static inline void account_group_system_time(struct task_struct *tsk, 326static inline void account_group_system_time(struct task_struct *tsk,
325 cputime_t cputime) 327 cputime_t cputime)
326{ 328{
327 struct task_cputime *times; 329 struct thread_group_cputimer *cputimer;
328 struct signal_struct *sig;
329 330
330 /* tsk == current, ensure it is safe to use ->signal */ 331 /* tsk == current, ensure it is safe to use ->signal */
331 if (unlikely(tsk->exit_state)) 332 if (unlikely(tsk->exit_state))
332 return; 333 return;
333 334
334 sig = tsk->signal; 335 cputimer = &tsk->signal->cputimer;
335 times = &sig->cputime.totals; 336
337 if (!cputimer->running)
338 return;
336 339
337 spin_lock(&times->lock); 340 spin_lock(&cputimer->lock);
338 times->stime = cputime_add(times->stime, cputime); 341 cputimer->cputime.stime =
339 spin_unlock(&times->lock); 342 cputime_add(cputimer->cputime.stime, cputime);
343 spin_unlock(&cputimer->lock);
340} 344}
341 345
342/** 346/**
@@ -352,7 +356,7 @@ static inline void account_group_system_time(struct task_struct *tsk,
352static inline void account_group_exec_runtime(struct task_struct *tsk, 356static inline void account_group_exec_runtime(struct task_struct *tsk,
353 unsigned long long ns) 357 unsigned long long ns)
354{ 358{
355 struct task_cputime *times; 359 struct thread_group_cputimer *cputimer;
356 struct signal_struct *sig; 360 struct signal_struct *sig;
357 361
358 sig = tsk->signal; 362 sig = tsk->signal;
@@ -361,9 +365,12 @@ static inline void account_group_exec_runtime(struct task_struct *tsk,
361 if (unlikely(!sig)) 365 if (unlikely(!sig))
362 return; 366 return;
363 367
364 times = &sig->cputime.totals; 368 cputimer = &sig->cputimer;
369
370 if (!cputimer->running)
371 return;
365 372
366 spin_lock(&times->lock); 373 spin_lock(&cputimer->lock);
367 times->sum_exec_runtime += ns; 374 cputimer->cputime.sum_exec_runtime += ns;
368 spin_unlock(&times->lock); 375 spin_unlock(&cputimer->lock);
369} 376}
diff --git a/kernel/signal.c b/kernel/signal.c
index b6b36768b758..2a74fe87c0dd 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1367,7 +1367,6 @@ int do_notify_parent(struct task_struct *tsk, int sig)
1367 struct siginfo info; 1367 struct siginfo info;
1368 unsigned long flags; 1368 unsigned long flags;
1369 struct sighand_struct *psig; 1369 struct sighand_struct *psig;
1370 struct task_cputime cputime;
1371 int ret = sig; 1370 int ret = sig;
1372 1371
1373 BUG_ON(sig == -1); 1372 BUG_ON(sig == -1);
@@ -1397,9 +1396,10 @@ int do_notify_parent(struct task_struct *tsk, int sig)
1397 info.si_uid = __task_cred(tsk)->uid; 1396 info.si_uid = __task_cred(tsk)->uid;
1398 rcu_read_unlock(); 1397 rcu_read_unlock();
1399 1398
1400 thread_group_cputime(tsk, &cputime); 1399 info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1401 info.si_utime = cputime_to_jiffies(cputime.utime); 1400 tsk->signal->utime));
1402 info.si_stime = cputime_to_jiffies(cputime.stime); 1401 info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1402 tsk->signal->stime));
1403 1403
1404 info.si_status = tsk->exit_code & 0x7f; 1404 info.si_status = tsk->exit_code & 0x7f;
1405 if (tsk->exit_code & 0x80) 1405 if (tsk->exit_code & 0x80)
diff --git a/kernel/softirq.c b/kernel/softirq.c
index bdbe9de9cd8d..0365b4899a3d 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -795,6 +795,11 @@ int __init __weak early_irq_init(void)
795 return 0; 795 return 0;
796} 796}
797 797
798int __init __weak arch_probe_nr_irqs(void)
799{
800 return 0;
801}
802
798int __init __weak arch_early_irq_init(void) 803int __init __weak arch_early_irq_init(void)
799{ 804{
800 return 0; 805 return 0;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 790f9d785663..c5ef44ff850f 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -101,6 +101,7 @@ static int two = 2;
101 101
102static int zero; 102static int zero;
103static int one = 1; 103static int one = 1;
104static unsigned long one_ul = 1;
104static int one_hundred = 100; 105static int one_hundred = 100;
105 106
106/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ 107/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
@@ -974,7 +975,7 @@ static struct ctl_table vm_table[] = {
974 .mode = 0644, 975 .mode = 0644,
975 .proc_handler = &dirty_background_bytes_handler, 976 .proc_handler = &dirty_background_bytes_handler,
976 .strategy = &sysctl_intvec, 977 .strategy = &sysctl_intvec,
977 .extra1 = &one, 978 .extra1 = &one_ul,
978 }, 979 },
979 { 980 {
980 .ctl_name = VM_DIRTY_RATIO, 981 .ctl_name = VM_DIRTY_RATIO,
@@ -995,7 +996,7 @@ static struct ctl_table vm_table[] = {
995 .mode = 0644, 996 .mode = 0644,
996 .proc_handler = &dirty_bytes_handler, 997 .proc_handler = &dirty_bytes_handler,
997 .strategy = &sysctl_intvec, 998 .strategy = &sysctl_intvec,
998 .extra1 = &one, 999 .extra1 = &one_ul,
999 }, 1000 },
1000 { 1001 {
1001 .procname = "dirty_writeback_centisecs", 1002 .procname = "dirty_writeback_centisecs",
diff --git a/kernel/user.c b/kernel/user.c
index 477b6660f447..3551ac742395 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -72,6 +72,7 @@ static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
72static void uid_hash_remove(struct user_struct *up) 72static void uid_hash_remove(struct user_struct *up)
73{ 73{
74 hlist_del_init(&up->uidhash_node); 74 hlist_del_init(&up->uidhash_node);
75 put_user_ns(up->user_ns);
75} 76}
76 77
77static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) 78static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
@@ -334,7 +335,6 @@ static void free_user(struct user_struct *up, unsigned long flags)
334 atomic_inc(&up->__count); 335 atomic_inc(&up->__count);
335 spin_unlock_irqrestore(&uidhash_lock, flags); 336 spin_unlock_irqrestore(&uidhash_lock, flags);
336 337
337 put_user_ns(up->user_ns);
338 INIT_WORK(&up->work, remove_user_sysfs_dir); 338 INIT_WORK(&up->work, remove_user_sysfs_dir);
339 schedule_work(&up->work); 339 schedule_work(&up->work);
340} 340}
@@ -357,7 +357,6 @@ static void free_user(struct user_struct *up, unsigned long flags)
357 sched_destroy_user(up); 357 sched_destroy_user(up);
358 key_put(up->uid_keyring); 358 key_put(up->uid_keyring);
359 key_put(up->session_keyring); 359 key_put(up->session_keyring);
360 put_user_ns(up->user_ns);
361 kmem_cache_free(uid_cachep, up); 360 kmem_cache_free(uid_cachep, up);
362} 361}
363 362