aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hrtimer.c1
-rw-r--r--kernel/irq/autoprobe.c15
-rw-r--r--kernel/irq/handle.c48
-rw-r--r--kernel/irq/numa_migrate.c7
-rw-r--r--kernel/irq/spurious.c5
-rw-r--r--kernel/sched.c115
-rw-r--r--kernel/softirq.c20
-rw-r--r--kernel/time/tick-sched.c12
-rw-r--r--kernel/timer.c15
9 files changed, 136 insertions, 102 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index bda9cb924276..eb2bfefa6dcc 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -32,7 +32,6 @@
32 */ 32 */
33 33
34#include <linux/cpu.h> 34#include <linux/cpu.h>
35#include <linux/irq.h>
36#include <linux/module.h> 35#include <linux/module.h>
37#include <linux/percpu.h> 36#include <linux/percpu.h>
38#include <linux/hrtimer.h> 37#include <linux/hrtimer.h>
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 650ce4102a63..cc0f7321b8ce 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -40,9 +40,6 @@ unsigned long probe_irq_on(void)
40 * flush such a longstanding irq before considering it as spurious. 40 * flush such a longstanding irq before considering it as spurious.
41 */ 41 */
42 for_each_irq_desc_reverse(i, desc) { 42 for_each_irq_desc_reverse(i, desc) {
43 if (!desc)
44 continue;
45
46 spin_lock_irq(&desc->lock); 43 spin_lock_irq(&desc->lock);
47 if (!desc->action && !(desc->status & IRQ_NOPROBE)) { 44 if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
48 /* 45 /*
@@ -71,9 +68,6 @@ unsigned long probe_irq_on(void)
71 * happened in the previous stage, it may have masked itself) 68 * happened in the previous stage, it may have masked itself)
72 */ 69 */
73 for_each_irq_desc_reverse(i, desc) { 70 for_each_irq_desc_reverse(i, desc) {
74 if (!desc)
75 continue;
76
77 spin_lock_irq(&desc->lock); 71 spin_lock_irq(&desc->lock);
78 if (!desc->action && !(desc->status & IRQ_NOPROBE)) { 72 if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
79 desc->status |= IRQ_AUTODETECT | IRQ_WAITING; 73 desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
@@ -92,9 +86,6 @@ unsigned long probe_irq_on(void)
92 * Now filter out any obviously spurious interrupts 86 * Now filter out any obviously spurious interrupts
93 */ 87 */
94 for_each_irq_desc(i, desc) { 88 for_each_irq_desc(i, desc) {
95 if (!desc)
96 continue;
97
98 spin_lock_irq(&desc->lock); 89 spin_lock_irq(&desc->lock);
99 status = desc->status; 90 status = desc->status;
100 91
@@ -133,9 +124,6 @@ unsigned int probe_irq_mask(unsigned long val)
133 int i; 124 int i;
134 125
135 for_each_irq_desc(i, desc) { 126 for_each_irq_desc(i, desc) {
136 if (!desc)
137 continue;
138
139 spin_lock_irq(&desc->lock); 127 spin_lock_irq(&desc->lock);
140 status = desc->status; 128 status = desc->status;
141 129
@@ -178,9 +166,6 @@ int probe_irq_off(unsigned long val)
178 unsigned int status; 166 unsigned int status;
179 167
180 for_each_irq_desc(i, desc) { 168 for_each_irq_desc(i, desc) {
181 if (!desc)
182 continue;
183
184 spin_lock_irq(&desc->lock); 169 spin_lock_irq(&desc->lock);
185 status = desc->status; 170 status = desc->status;
186 171
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 6492400cb50d..c20db0be9173 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -56,10 +56,6 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
56int nr_irqs = NR_IRQS; 56int nr_irqs = NR_IRQS;
57EXPORT_SYMBOL_GPL(nr_irqs); 57EXPORT_SYMBOL_GPL(nr_irqs);
58 58
59void __init __attribute__((weak)) arch_early_irq_init(void)
60{
61}
62
63#ifdef CONFIG_SPARSE_IRQ 59#ifdef CONFIG_SPARSE_IRQ
64static struct irq_desc irq_desc_init = { 60static struct irq_desc irq_desc_init = {
65 .irq = -1, 61 .irq = -1,
@@ -90,13 +86,11 @@ void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
90 desc->kstat_irqs = (unsigned int *)ptr; 86 desc->kstat_irqs = (unsigned int *)ptr;
91} 87}
92 88
93void __attribute__((weak)) arch_init_chip_data(struct irq_desc *desc, int cpu)
94{
95}
96
97static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) 89static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
98{ 90{
99 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); 91 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
92
93 spin_lock_init(&desc->lock);
100 desc->irq = irq; 94 desc->irq = irq;
101#ifdef CONFIG_SMP 95#ifdef CONFIG_SMP
102 desc->cpu = cpu; 96 desc->cpu = cpu;
@@ -134,7 +128,7 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm
134/* FIXME: use bootmem alloc ...*/ 128/* FIXME: use bootmem alloc ...*/
135static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS]; 129static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
136 130
137void __init early_irq_init(void) 131int __init early_irq_init(void)
138{ 132{
139 struct irq_desc *desc; 133 struct irq_desc *desc;
140 int legacy_count; 134 int legacy_count;
@@ -146,6 +140,7 @@ void __init early_irq_init(void)
146 for (i = 0; i < legacy_count; i++) { 140 for (i = 0; i < legacy_count; i++) {
147 desc[i].irq = i; 141 desc[i].irq = i;
148 desc[i].kstat_irqs = kstat_irqs_legacy[i]; 142 desc[i].kstat_irqs = kstat_irqs_legacy[i];
143 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
149 144
150 irq_desc_ptrs[i] = desc + i; 145 irq_desc_ptrs[i] = desc + i;
151 } 146 }
@@ -153,7 +148,7 @@ void __init early_irq_init(void)
153 for (i = legacy_count; i < NR_IRQS; i++) 148 for (i = legacy_count; i < NR_IRQS; i++)
154 irq_desc_ptrs[i] = NULL; 149 irq_desc_ptrs[i] = NULL;
155 150
156 arch_early_irq_init(); 151 return arch_early_irq_init();
157} 152}
158 153
159struct irq_desc *irq_to_desc(unsigned int irq) 154struct irq_desc *irq_to_desc(unsigned int irq)
@@ -203,7 +198,7 @@ out_unlock:
203 return desc; 198 return desc;
204} 199}
205 200
206#else 201#else /* !CONFIG_SPARSE_IRQ */
207 202
208struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 203struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
209 [0 ... NR_IRQS-1] = { 204 [0 ... NR_IRQS-1] = {
@@ -218,7 +213,31 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
218 } 213 }
219}; 214};
220 215
221#endif 216int __init early_irq_init(void)
217{
218 struct irq_desc *desc;
219 int count;
220 int i;
221
222 desc = irq_desc;
223 count = ARRAY_SIZE(irq_desc);
224
225 for (i = 0; i < count; i++)
226 desc[i].irq = i;
227
228 return arch_early_irq_init();
229}
230
231struct irq_desc *irq_to_desc(unsigned int irq)
232{
233 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
234}
235
236struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
237{
238 return irq_to_desc(irq);
239}
240#endif /* !CONFIG_SPARSE_IRQ */
222 241
223/* 242/*
224 * What should we do if we get a hw irq event on an illegal vector? 243 * What should we do if we get a hw irq event on an illegal vector?
@@ -428,9 +447,6 @@ void early_init_irq_lock_class(void)
428 int i; 447 int i;
429 448
430 for_each_irq_desc(i, desc) { 449 for_each_irq_desc(i, desc) {
431 if (!desc)
432 continue;
433
434 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 450 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
435 } 451 }
436} 452}
@@ -439,7 +455,7 @@ void early_init_irq_lock_class(void)
439unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 455unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
440{ 456{
441 struct irq_desc *desc = irq_to_desc(irq); 457 struct irq_desc *desc = irq_to_desc(irq);
442 return desc->kstat_irqs[cpu]; 458 return desc ? desc->kstat_irqs[cpu] : 0;
443} 459}
444#endif 460#endif
445EXPORT_SYMBOL(kstat_irqs_cpu); 461EXPORT_SYMBOL(kstat_irqs_cpu);
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 089c3746358a..ecf765c6a77a 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -42,6 +42,7 @@ static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
42 struct irq_desc *desc, int cpu) 42 struct irq_desc *desc, int cpu)
43{ 43{
44 memcpy(desc, old_desc, sizeof(struct irq_desc)); 44 memcpy(desc, old_desc, sizeof(struct irq_desc));
45 spin_lock_init(&desc->lock);
45 desc->cpu = cpu; 46 desc->cpu = cpu;
46 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 47 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
47 init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); 48 init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids);
@@ -74,10 +75,8 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
74 75
75 node = cpu_to_node(cpu); 76 node = cpu_to_node(cpu);
76 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); 77 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
77 printk(KERN_DEBUG " move irq_desc for %d to cpu %d node %d\n",
78 irq, cpu, node);
79 if (!desc) { 78 if (!desc) {
80 printk(KERN_ERR "can not get new irq_desc for moving\n"); 79 printk(KERN_ERR "irq %d: can not get new irq_desc for migration.\n", irq);
81 /* still use old one */ 80 /* still use old one */
82 desc = old_desc; 81 desc = old_desc;
83 goto out_unlock; 82 goto out_unlock;
@@ -106,8 +105,6 @@ struct irq_desc *move_irq_desc(struct irq_desc *desc, int cpu)
106 return desc; 105 return desc;
107 106
108 old_cpu = desc->cpu; 107 old_cpu = desc->cpu;
109 printk(KERN_DEBUG
110 "try to move irq_desc from cpu %d to %d\n", old_cpu, cpu);
111 if (old_cpu != cpu) { 108 if (old_cpu != cpu) {
112 node = cpu_to_node(cpu); 109 node = cpu_to_node(cpu);
113 old_node = cpu_to_node(old_cpu); 110 old_node = cpu_to_node(old_cpu);
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 3738107531fd..dd364c11e56e 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -91,9 +91,6 @@ static int misrouted_irq(int irq)
91 int i, ok = 0; 91 int i, ok = 0;
92 92
93 for_each_irq_desc(i, desc) { 93 for_each_irq_desc(i, desc) {
94 if (!desc)
95 continue;
96
97 if (!i) 94 if (!i)
98 continue; 95 continue;
99 96
@@ -115,8 +112,6 @@ static void poll_spurious_irqs(unsigned long dummy)
115 for_each_irq_desc(i, desc) { 112 for_each_irq_desc(i, desc) {
116 unsigned int status; 113 unsigned int status;
117 114
118 if (!desc)
119 continue;
120 if (!i) 115 if (!i)
121 continue; 116 continue;
122 117
diff --git a/kernel/sched.c b/kernel/sched.c
index dd862d70e715..545c6fccd1dc 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4150,13 +4150,17 @@ unsigned long long task_delta_exec(struct task_struct *p)
4150 * Account user cpu time to a process. 4150 * Account user cpu time to a process.
4151 * @p: the process that the cpu time gets accounted to 4151 * @p: the process that the cpu time gets accounted to
4152 * @cputime: the cpu time spent in user space since the last update 4152 * @cputime: the cpu time spent in user space since the last update
4153 * @cputime_scaled: cputime scaled by cpu frequency
4153 */ 4154 */
4154void account_user_time(struct task_struct *p, cputime_t cputime) 4155void account_user_time(struct task_struct *p, cputime_t cputime,
4156 cputime_t cputime_scaled)
4155{ 4157{
4156 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 4158 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
4157 cputime64_t tmp; 4159 cputime64_t tmp;
4158 4160
4161 /* Add user time to process. */
4159 p->utime = cputime_add(p->utime, cputime); 4162 p->utime = cputime_add(p->utime, cputime);
4163 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
4160 account_group_user_time(p, cputime); 4164 account_group_user_time(p, cputime);
4161 4165
4162 /* Add user time to cpustat. */ 4166 /* Add user time to cpustat. */
@@ -4173,51 +4177,48 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
4173 * Account guest cpu time to a process. 4177 * Account guest cpu time to a process.
4174 * @p: the process that the cpu time gets accounted to 4178 * @p: the process that the cpu time gets accounted to
4175 * @cputime: the cpu time spent in virtual machine since the last update 4179 * @cputime: the cpu time spent in virtual machine since the last update
4180 * @cputime_scaled: cputime scaled by cpu frequency
4176 */ 4181 */
4177static void account_guest_time(struct task_struct *p, cputime_t cputime) 4182static void account_guest_time(struct task_struct *p, cputime_t cputime,
4183 cputime_t cputime_scaled)
4178{ 4184{
4179 cputime64_t tmp; 4185 cputime64_t tmp;
4180 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 4186 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
4181 4187
4182 tmp = cputime_to_cputime64(cputime); 4188 tmp = cputime_to_cputime64(cputime);
4183 4189
4190 /* Add guest time to process. */
4184 p->utime = cputime_add(p->utime, cputime); 4191 p->utime = cputime_add(p->utime, cputime);
4192 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
4185 account_group_user_time(p, cputime); 4193 account_group_user_time(p, cputime);
4186 p->gtime = cputime_add(p->gtime, cputime); 4194 p->gtime = cputime_add(p->gtime, cputime);
4187 4195
4196 /* Add guest time to cpustat. */
4188 cpustat->user = cputime64_add(cpustat->user, tmp); 4197 cpustat->user = cputime64_add(cpustat->user, tmp);
4189 cpustat->guest = cputime64_add(cpustat->guest, tmp); 4198 cpustat->guest = cputime64_add(cpustat->guest, tmp);
4190} 4199}
4191 4200
4192/* 4201/*
4193 * Account scaled user cpu time to a process.
4194 * @p: the process that the cpu time gets accounted to
4195 * @cputime: the cpu time spent in user space since the last update
4196 */
4197void account_user_time_scaled(struct task_struct *p, cputime_t cputime)
4198{
4199 p->utimescaled = cputime_add(p->utimescaled, cputime);
4200}
4201
4202/*
4203 * Account system cpu time to a process. 4202 * Account system cpu time to a process.
4204 * @p: the process that the cpu time gets accounted to 4203 * @p: the process that the cpu time gets accounted to
4205 * @hardirq_offset: the offset to subtract from hardirq_count() 4204 * @hardirq_offset: the offset to subtract from hardirq_count()
4206 * @cputime: the cpu time spent in kernel space since the last update 4205 * @cputime: the cpu time spent in kernel space since the last update
4206 * @cputime_scaled: cputime scaled by cpu frequency
4207 */ 4207 */
4208void account_system_time(struct task_struct *p, int hardirq_offset, 4208void account_system_time(struct task_struct *p, int hardirq_offset,
4209 cputime_t cputime) 4209 cputime_t cputime, cputime_t cputime_scaled)
4210{ 4210{
4211 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 4211 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
4212 struct rq *rq = this_rq();
4213 cputime64_t tmp; 4212 cputime64_t tmp;
4214 4213
4215 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { 4214 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
4216 account_guest_time(p, cputime); 4215 account_guest_time(p, cputime, cputime_scaled);
4217 return; 4216 return;
4218 } 4217 }
4219 4218
4219 /* Add system time to process. */
4220 p->stime = cputime_add(p->stime, cputime); 4220 p->stime = cputime_add(p->stime, cputime);
4221 p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
4221 account_group_system_time(p, cputime); 4222 account_group_system_time(p, cputime);
4222 4223
4223 /* Add system time to cpustat. */ 4224 /* Add system time to cpustat. */
@@ -4226,48 +4227,84 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
4226 cpustat->irq = cputime64_add(cpustat->irq, tmp); 4227 cpustat->irq = cputime64_add(cpustat->irq, tmp);
4227 else if (softirq_count()) 4228 else if (softirq_count())
4228 cpustat->softirq = cputime64_add(cpustat->softirq, tmp); 4229 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
4229 else if (p != rq->idle)
4230 cpustat->system = cputime64_add(cpustat->system, tmp);
4231 else if (atomic_read(&rq->nr_iowait) > 0)
4232 cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
4233 else 4230 else
4234 cpustat->idle = cputime64_add(cpustat->idle, tmp); 4231 cpustat->system = cputime64_add(cpustat->system, tmp);
4232
4235 /* Account for system time used */ 4233 /* Account for system time used */
4236 acct_update_integrals(p); 4234 acct_update_integrals(p);
4237} 4235}
4238 4236
4239/* 4237/*
4240 * Account scaled system cpu time to a process. 4238 * Account for involuntary wait time.
4241 * @p: the process that the cpu time gets accounted to 4239 * @steal: the cpu time spent in involuntary wait
4242 * @hardirq_offset: the offset to subtract from hardirq_count()
4243 * @cputime: the cpu time spent in kernel space since the last update
4244 */ 4240 */
4245void account_system_time_scaled(struct task_struct *p, cputime_t cputime) 4241void account_steal_time(cputime_t cputime)
4246{ 4242{
4247 p->stimescaled = cputime_add(p->stimescaled, cputime); 4243 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
4244 cputime64_t cputime64 = cputime_to_cputime64(cputime);
4245
4246 cpustat->steal = cputime64_add(cpustat->steal, cputime64);
4248} 4247}
4249 4248
4250/* 4249/*
4251 * Account for involuntary wait time. 4250 * Account for idle time.
4252 * @p: the process from which the cpu time has been stolen 4251 * @cputime: the cpu time spent in idle wait
4253 * @steal: the cpu time spent in involuntary wait
4254 */ 4252 */
4255void account_steal_time(struct task_struct *p, cputime_t steal) 4253void account_idle_time(cputime_t cputime)
4256{ 4254{
4257 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 4255 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
4258 cputime64_t tmp = cputime_to_cputime64(steal); 4256 cputime64_t cputime64 = cputime_to_cputime64(cputime);
4259 struct rq *rq = this_rq(); 4257 struct rq *rq = this_rq();
4260 4258
4261 if (p == rq->idle) { 4259 if (atomic_read(&rq->nr_iowait) > 0)
4262 p->stime = cputime_add(p->stime, steal); 4260 cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
4263 if (atomic_read(&rq->nr_iowait) > 0) 4261 else
4264 cpustat->iowait = cputime64_add(cpustat->iowait, tmp); 4262 cpustat->idle = cputime64_add(cpustat->idle, cputime64);
4265 else
4266 cpustat->idle = cputime64_add(cpustat->idle, tmp);
4267 } else
4268 cpustat->steal = cputime64_add(cpustat->steal, tmp);
4269} 4263}
4270 4264
4265#ifndef CONFIG_VIRT_CPU_ACCOUNTING
4266
4267/*
4268 * Account a single tick of cpu time.
4269 * @p: the process that the cpu time gets accounted to
4270 * @user_tick: indicates if the tick is a user or a system tick
4271 */
4272void account_process_tick(struct task_struct *p, int user_tick)
4273{
4274 cputime_t one_jiffy = jiffies_to_cputime(1);
4275 cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
4276 struct rq *rq = this_rq();
4277
4278 if (user_tick)
4279 account_user_time(p, one_jiffy, one_jiffy_scaled);
4280 else if (p != rq->idle)
4281 account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
4282 one_jiffy_scaled);
4283 else
4284 account_idle_time(one_jiffy);
4285}
4286
4287/*
4288 * Account multiple ticks of steal time.
4289 * @p: the process from which the cpu time has been stolen
4290 * @ticks: number of stolen ticks
4291 */
4292void account_steal_ticks(unsigned long ticks)
4293{
4294 account_steal_time(jiffies_to_cputime(ticks));
4295}
4296
4297/*
4298 * Account multiple ticks of idle time.
4299 * @ticks: number of stolen ticks
4300 */
4301void account_idle_ticks(unsigned long ticks)
4302{
4303 account_idle_time(jiffies_to_cputime(ticks));
4304}
4305
4306#endif
4307
4271/* 4308/*
4272 * Use precise platform statistics if available: 4309 * Use precise platform statistics if available:
4273 */ 4310 */
diff --git a/kernel/softirq.c b/kernel/softirq.c
index b7568d7def23..bdbe9de9cd8d 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -784,3 +784,23 @@ int on_each_cpu(void (*func) (void *info), void *info, int wait)
784} 784}
785EXPORT_SYMBOL(on_each_cpu); 785EXPORT_SYMBOL(on_each_cpu);
786#endif 786#endif
787
788/*
789 * [ These __weak aliases are kept in a separate compilation unit, so that
790 * GCC does not inline them incorrectly. ]
791 */
792
793int __init __weak early_irq_init(void)
794{
795 return 0;
796}
797
798int __init __weak arch_early_irq_init(void)
799{
800 return 0;
801}
802
803int __weak arch_init_chip_data(struct irq_desc *desc, int cpu)
804{
805 return 0;
806}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 76a574bbef97..1b6c05bd0d0a 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -419,7 +419,9 @@ void tick_nohz_restart_sched_tick(void)
419{ 419{
420 int cpu = smp_processor_id(); 420 int cpu = smp_processor_id();
421 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 421 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
422#ifndef CONFIG_VIRT_CPU_ACCOUNTING
422 unsigned long ticks; 423 unsigned long ticks;
424#endif
423 ktime_t now; 425 ktime_t now;
424 426
425 local_irq_disable(); 427 local_irq_disable();
@@ -441,6 +443,7 @@ void tick_nohz_restart_sched_tick(void)
441 tick_do_update_jiffies64(now); 443 tick_do_update_jiffies64(now);
442 cpumask_clear_cpu(cpu, nohz_cpu_mask); 444 cpumask_clear_cpu(cpu, nohz_cpu_mask);
443 445
446#ifndef CONFIG_VIRT_CPU_ACCOUNTING
444 /* 447 /*
445 * We stopped the tick in idle. Update process times would miss the 448 * We stopped the tick in idle. Update process times would miss the
446 * time we slept as update_process_times does only a 1 tick 449 * time we slept as update_process_times does only a 1 tick
@@ -450,12 +453,9 @@ void tick_nohz_restart_sched_tick(void)
450 /* 453 /*
451 * We might be one off. Do not randomly account a huge number of ticks! 454 * We might be one off. Do not randomly account a huge number of ticks!
452 */ 455 */
453 if (ticks && ticks < LONG_MAX) { 456 if (ticks && ticks < LONG_MAX)
454 add_preempt_count(HARDIRQ_OFFSET); 457 account_idle_ticks(ticks);
455 account_system_time(current, HARDIRQ_OFFSET, 458#endif
456 jiffies_to_cputime(ticks));
457 sub_preempt_count(HARDIRQ_OFFSET);
458 }
459 459
460 touch_softlockup_watchdog(); 460 touch_softlockup_watchdog();
461 /* 461 /*
diff --git a/kernel/timer.c b/kernel/timer.c
index 566257d1dc10..dee3f641a7a7 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1018,21 +1018,6 @@ unsigned long get_next_timer_interrupt(unsigned long now)
1018} 1018}
1019#endif 1019#endif
1020 1020
1021#ifndef CONFIG_VIRT_CPU_ACCOUNTING
1022void account_process_tick(struct task_struct *p, int user_tick)
1023{
1024 cputime_t one_jiffy = jiffies_to_cputime(1);
1025
1026 if (user_tick) {
1027 account_user_time(p, one_jiffy);
1028 account_user_time_scaled(p, cputime_to_scaled(one_jiffy));
1029 } else {
1030 account_system_time(p, HARDIRQ_OFFSET, one_jiffy);
1031 account_system_time_scaled(p, cputime_to_scaled(one_jiffy));
1032 }
1033}
1034#endif
1035
1036/* 1021/*
1037 * Called from the timer interrupt handler to charge one tick to the current 1022 * Called from the timer interrupt handler to charge one tick to the current
1038 * process. user_tick is 1 if the tick is user time, 0 for system. 1023 * process. user_tick is 1 if the tick is user time, 0 for system.