aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/Kconfig15
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/entry_64.S7
-rw-r--r--arch/powerpc/kernel/head_64.S9
-rw-r--r--arch/powerpc/kernel/irq.c30
-rw-r--r--arch/powerpc/kernel/process.c7
-rw-r--r--arch/powerpc/kernel/smp.c4
-rw-r--r--arch/powerpc/kernel/time.c236
-rw-r--r--include/asm-powerpc/cputable.h4
-rw-r--r--include/asm-powerpc/cputime.h202
-rw-r--r--include/asm-powerpc/irq.h6
-rw-r--r--include/asm-powerpc/paca.h5
-rw-r--r--include/asm-powerpc/ppc_asm.h42
-rw-r--r--include/asm-powerpc/system.h4
-rw-r--r--include/asm-powerpc/time.h15
-rw-r--r--include/asm-ppc/time.h5
16 files changed, 577 insertions, 17 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index fb0dcb994b84..d112aed2999b 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -250,6 +250,21 @@ config PPC_STD_MMU_32
250 def_bool y 250 def_bool y
251 depends on PPC_STD_MMU && PPC32 251 depends on PPC_STD_MMU && PPC32
252 252
253config VIRT_CPU_ACCOUNTING
254 bool "Deterministic task and CPU time accounting"
255 depends on PPC64
256 default y
257 help
258 Select this option to enable more accurate task and CPU time
259 accounting. This is done by reading a CPU counter on each
260 kernel entry and exit and on transitions within the kernel
261 between system, softirq and hardirq state, so there is a
262 small performance impact. This also enables accounting of
263 stolen time on logically-partitioned systems running on
264 IBM POWER5-based machines.
265
266 If in doubt, say Y here.
267
253config SMP 268config SMP
254 depends on PPC_STD_MMU 269 depends on PPC_STD_MMU
255 bool "Symmetric multi-processing support" 270 bool "Symmetric multi-processing support"
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 840aad43a98b..18810ac55bcc 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -137,6 +137,9 @@ int main(void)
137 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); 137 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
138 DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr)); 138 DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr));
139 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); 139 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
140 DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
141 DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
142 DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
140 143
141 DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0)); 144 DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
142 DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); 145 DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 79a0c910f0d8..8f606c1889fa 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -61,6 +61,7 @@ system_call_common:
61 std r12,_MSR(r1) 61 std r12,_MSR(r1)
62 std r0,GPR0(r1) 62 std r0,GPR0(r1)
63 std r10,GPR1(r1) 63 std r10,GPR1(r1)
64 ACCOUNT_CPU_USER_ENTRY(r10, r11)
64 std r2,GPR2(r1) 65 std r2,GPR2(r1)
65 std r3,GPR3(r1) 66 std r3,GPR3(r1)
66 std r4,GPR4(r1) 67 std r4,GPR4(r1)
@@ -168,8 +169,9 @@ syscall_error_cont:
168 stdcx. r0,0,r1 /* to clear the reservation */ 169 stdcx. r0,0,r1 /* to clear the reservation */
169 andi. r6,r8,MSR_PR 170 andi. r6,r8,MSR_PR
170 ld r4,_LINK(r1) 171 ld r4,_LINK(r1)
171 beq- 1f /* only restore r13 if */ 172 beq- 1f
172 ld r13,GPR13(r1) /* returning to usermode */ 173 ACCOUNT_CPU_USER_EXIT(r11, r12)
174 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
1731: ld r2,GPR2(r1) 1751: ld r2,GPR2(r1)
174 li r12,MSR_RI 176 li r12,MSR_RI
175 andc r11,r10,r12 177 andc r11,r10,r12
@@ -536,6 +538,7 @@ restore:
536 * userspace 538 * userspace
537 */ 539 */
538 beq 1f 540 beq 1f
541 ACCOUNT_CPU_USER_EXIT(r3, r4)
539 REST_GPR(13, r1) 542 REST_GPR(13, r1)
5401: 5431:
541 ld r3,_CTR(r1) 544 ld r3,_CTR(r1)
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 2b21ec499285..be3ae7733577 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -277,6 +277,7 @@ exception_marker:
277 std r10,0(r1); /* make stack chain pointer */ \ 277 std r10,0(r1); /* make stack chain pointer */ \
278 std r0,GPR0(r1); /* save r0 in stackframe */ \ 278 std r0,GPR0(r1); /* save r0 in stackframe */ \
279 std r10,GPR1(r1); /* save r1 in stackframe */ \ 279 std r10,GPR1(r1); /* save r1 in stackframe */ \
280 ACCOUNT_CPU_USER_ENTRY(r9, r10); \
280 std r2,GPR2(r1); /* save r2 in stackframe */ \ 281 std r2,GPR2(r1); /* save r2 in stackframe */ \
281 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ 282 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
282 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ 283 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
@@ -844,6 +845,14 @@ fast_exception_return:
844 ld r11,_NIP(r1) 845 ld r11,_NIP(r1)
845 andi. r3,r12,MSR_RI /* check if RI is set */ 846 andi. r3,r12,MSR_RI /* check if RI is set */
846 beq- unrecov_fer 847 beq- unrecov_fer
848
849#ifdef CONFIG_VIRT_CPU_ACCOUNTING
850 andi. r3,r12,MSR_PR
851 beq 2f
852 ACCOUNT_CPU_USER_EXIT(r3, r4)
8532:
854#endif
855
847 ld r3,_CCR(r1) 856 ld r3,_CCR(r1)
848 ld r4,_LINK(r1) 857 ld r4,_LINK(r1)
849 ld r5,_CTR(r1) 858 ld r5,_CTR(r1)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index edb2b00edbd2..24dc8117b822 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -369,6 +369,7 @@ unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
369 return NO_IRQ; 369 return NO_IRQ;
370 370
371} 371}
372#endif /* CONFIG_PPC64 */
372 373
373#ifdef CONFIG_IRQSTACKS 374#ifdef CONFIG_IRQSTACKS
374struct thread_info *softirq_ctx[NR_CPUS]; 375struct thread_info *softirq_ctx[NR_CPUS];
@@ -392,10 +393,24 @@ void irq_ctx_init(void)
392 } 393 }
393} 394}
394 395
396static inline void do_softirq_onstack(void)
397{
398 struct thread_info *curtp, *irqtp;
399
400 curtp = current_thread_info();
401 irqtp = softirq_ctx[smp_processor_id()];
402 irqtp->task = curtp->task;
403 call_do_softirq(irqtp);
404 irqtp->task = NULL;
405}
406
407#else
408#define do_softirq_onstack() __do_softirq()
409#endif /* CONFIG_IRQSTACKS */
410
395void do_softirq(void) 411void do_softirq(void)
396{ 412{
397 unsigned long flags; 413 unsigned long flags;
398 struct thread_info *curtp, *irqtp;
399 414
400 if (in_interrupt()) 415 if (in_interrupt())
401 return; 416 return;
@@ -403,19 +418,18 @@ void do_softirq(void)
403 local_irq_save(flags); 418 local_irq_save(flags);
404 419
405 if (local_softirq_pending()) { 420 if (local_softirq_pending()) {
406 curtp = current_thread_info(); 421 account_system_vtime(current);
407 irqtp = softirq_ctx[smp_processor_id()]; 422 local_bh_disable();
408 irqtp->task = curtp->task; 423 do_softirq_onstack();
409 call_do_softirq(irqtp); 424 account_system_vtime(current);
410 irqtp->task = NULL; 425 __local_bh_enable();
411 } 426 }
412 427
413 local_irq_restore(flags); 428 local_irq_restore(flags);
414} 429}
415EXPORT_SYMBOL(do_softirq); 430EXPORT_SYMBOL(do_softirq);
416 431
417#endif /* CONFIG_IRQSTACKS */ 432#ifdef CONFIG_PPC64
418
419static int __init setup_noirqdistrib(char *str) 433static int __init setup_noirqdistrib(char *str)
420{ 434{
421 distribute_irqs = 0; 435 distribute_irqs = 0;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index dd774c3c9302..1770a066c217 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -45,9 +45,9 @@
45#include <asm/mmu.h> 45#include <asm/mmu.h>
46#include <asm/prom.h> 46#include <asm/prom.h>
47#include <asm/machdep.h> 47#include <asm/machdep.h>
48#include <asm/time.h>
48#ifdef CONFIG_PPC64 49#ifdef CONFIG_PPC64
49#include <asm/firmware.h> 50#include <asm/firmware.h>
50#include <asm/time.h>
51#endif 51#endif
52 52
53extern unsigned long _get_SP(void); 53extern unsigned long _get_SP(void);
@@ -328,6 +328,11 @@ struct task_struct *__switch_to(struct task_struct *prev,
328#endif 328#endif
329 329
330 local_irq_save(flags); 330 local_irq_save(flags);
331
332 account_system_vtime(current);
333 account_process_vtime(current);
334 calculate_steal_time();
335
331 last = _switch(old_thread, new_thread); 336 last = _switch(old_thread, new_thread);
332 337
333 local_irq_restore(flags); 338 local_irq_restore(flags);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 13595a64f013..805eaedbc308 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -541,7 +541,7 @@ int __devinit start_secondary(void *unused)
541 smp_ops->take_timebase(); 541 smp_ops->take_timebase();
542 542
543 if (system_state > SYSTEM_BOOTING) 543 if (system_state > SYSTEM_BOOTING)
544 per_cpu(last_jiffy, cpu) = get_tb(); 544 snapshot_timebase();
545 545
546 spin_lock(&call_lock); 546 spin_lock(&call_lock);
547 cpu_set(cpu, cpu_online_map); 547 cpu_set(cpu, cpu_online_map);
@@ -573,6 +573,8 @@ void __init smp_cpus_done(unsigned int max_cpus)
573 573
574 set_cpus_allowed(current, old_mask); 574 set_cpus_allowed(current, old_mask);
575 575
576 snapshot_timebases();
577
576 dump_numa_cpu_topology(); 578 dump_numa_cpu_topology();
577} 579}
578 580
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 2a7ddc579379..0b34db28916f 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -51,6 +51,7 @@
51#include <linux/percpu.h> 51#include <linux/percpu.h>
52#include <linux/rtc.h> 52#include <linux/rtc.h>
53#include <linux/jiffies.h> 53#include <linux/jiffies.h>
54#include <linux/posix-timers.h>
54 55
55#include <asm/io.h> 56#include <asm/io.h>
56#include <asm/processor.h> 57#include <asm/processor.h>
@@ -135,6 +136,220 @@ unsigned long tb_last_stamp;
135 */ 136 */
136DEFINE_PER_CPU(unsigned long, last_jiffy); 137DEFINE_PER_CPU(unsigned long, last_jiffy);
137 138
139#ifdef CONFIG_VIRT_CPU_ACCOUNTING
140/*
141 * Factors for converting from cputime_t (timebase ticks) to
142 * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
143 * These are all stored as 0.64 fixed-point binary fractions.
144 */
145u64 __cputime_jiffies_factor;
146u64 __cputime_msec_factor;
147u64 __cputime_sec_factor;
148u64 __cputime_clockt_factor;
149
150static void calc_cputime_factors(void)
151{
152 struct div_result res;
153
154 div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
155 __cputime_jiffies_factor = res.result_low;
156 div128_by_32(1000, 0, tb_ticks_per_sec, &res);
157 __cputime_msec_factor = res.result_low;
158 div128_by_32(1, 0, tb_ticks_per_sec, &res);
159 __cputime_sec_factor = res.result_low;
160 div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
161 __cputime_clockt_factor = res.result_low;
162}
163
164/*
165 * Read the PURR on systems that have it, otherwise the timebase.
166 */
167static u64 read_purr(void)
168{
169 if (cpu_has_feature(CPU_FTR_PURR))
170 return mfspr(SPRN_PURR);
171 return mftb();
172}
173
174/*
175 * Account time for a transition between system, hard irq
176 * or soft irq state.
177 */
178void account_system_vtime(struct task_struct *tsk)
179{
180 u64 now, delta;
181 unsigned long flags;
182
183 local_irq_save(flags);
184 now = read_purr();
185 delta = now - get_paca()->startpurr;
186 get_paca()->startpurr = now;
187 if (!in_interrupt()) {
188 delta += get_paca()->system_time;
189 get_paca()->system_time = 0;
190 }
191 account_system_time(tsk, 0, delta);
192 local_irq_restore(flags);
193}
194
195/*
196 * Transfer the user and system times accumulated in the paca
197 * by the exception entry and exit code to the generic process
198 * user and system time records.
199 * Must be called with interrupts disabled.
200 */
201void account_process_vtime(struct task_struct *tsk)
202{
203 cputime_t utime;
204
205 utime = get_paca()->user_time;
206 get_paca()->user_time = 0;
207 account_user_time(tsk, utime);
208}
209
210static void account_process_time(struct pt_regs *regs)
211{
212 int cpu = smp_processor_id();
213
214 account_process_vtime(current);
215 run_local_timers();
216 if (rcu_pending(cpu))
217 rcu_check_callbacks(cpu, user_mode(regs));
218 scheduler_tick();
219 run_posix_cpu_timers(current);
220}
221
222#ifdef CONFIG_PPC_SPLPAR
223/*
224 * Stuff for accounting stolen time.
225 */
226struct cpu_purr_data {
227 int initialized; /* thread is running */
228 u64 tb0; /* timebase at origin time */
229 u64 purr0; /* PURR at origin time */
230 u64 tb; /* last TB value read */
231 u64 purr; /* last PURR value read */
232 u64 stolen; /* stolen time so far */
233 spinlock_t lock;
234};
235
236static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);
237
238static void snapshot_tb_and_purr(void *data)
239{
240 struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
241
242 p->tb0 = mftb();
243 p->purr0 = mfspr(SPRN_PURR);
244 p->tb = p->tb0;
245 p->purr = 0;
246 wmb();
247 p->initialized = 1;
248}
249
250/*
251 * Called during boot when all cpus have come up.
252 */
253void snapshot_timebases(void)
254{
255 int cpu;
256
257 if (!cpu_has_feature(CPU_FTR_PURR))
258 return;
259 for_each_cpu(cpu)
260 spin_lock_init(&per_cpu(cpu_purr_data, cpu).lock);
261 on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);
262}
263
264void calculate_steal_time(void)
265{
266 u64 tb, purr, t0;
267 s64 stolen;
268 struct cpu_purr_data *p0, *pme, *phim;
269 int cpu;
270
271 if (!cpu_has_feature(CPU_FTR_PURR))
272 return;
273 cpu = smp_processor_id();
274 pme = &per_cpu(cpu_purr_data, cpu);
275 if (!pme->initialized)
276 return; /* this can happen in early boot */
277 p0 = &per_cpu(cpu_purr_data, cpu & ~1);
278 phim = &per_cpu(cpu_purr_data, cpu ^ 1);
279 spin_lock(&p0->lock);
280 tb = mftb();
281 purr = mfspr(SPRN_PURR) - pme->purr0;
282 if (!phim->initialized || !cpu_online(cpu ^ 1)) {
283 stolen = (tb - pme->tb) - (purr - pme->purr);
284 } else {
285 t0 = pme->tb0;
286 if (phim->tb0 < t0)
287 t0 = phim->tb0;
288 stolen = phim->tb - t0 - phim->purr - purr - p0->stolen;
289 }
290 if (stolen > 0) {
291 account_steal_time(current, stolen);
292 p0->stolen += stolen;
293 }
294 pme->tb = tb;
295 pme->purr = purr;
296 spin_unlock(&p0->lock);
297}
298
299/*
300 * Must be called before the cpu is added to the online map when
301 * a cpu is being brought up at runtime.
302 */
303static void snapshot_purr(void)
304{
305 int cpu;
306 u64 purr;
307 struct cpu_purr_data *p0, *pme, *phim;
308 unsigned long flags;
309
310 if (!cpu_has_feature(CPU_FTR_PURR))
311 return;
312 cpu = smp_processor_id();
313 pme = &per_cpu(cpu_purr_data, cpu);
314 p0 = &per_cpu(cpu_purr_data, cpu & ~1);
315 phim = &per_cpu(cpu_purr_data, cpu ^ 1);
316 spin_lock_irqsave(&p0->lock, flags);
317 pme->tb = pme->tb0 = mftb();
318 purr = mfspr(SPRN_PURR);
319 if (!phim->initialized) {
320 pme->purr = 0;
321 pme->purr0 = purr;
322 } else {
323 /* set p->purr and p->purr0 for no change in p0->stolen */
324 pme->purr = phim->tb - phim->tb0 - phim->purr - p0->stolen;
325 pme->purr0 = purr - pme->purr;
326 }
327 pme->initialized = 1;
328 spin_unlock_irqrestore(&p0->lock, flags);
329}
330
331#endif /* CONFIG_PPC_SPLPAR */
332
333#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
334#define calc_cputime_factors()
335#define account_process_time(regs) update_process_times(user_mode(regs))
336#define calculate_steal_time() do { } while (0)
337#endif
338
339#if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))
340#define snapshot_purr() do { } while (0)
341#endif
342
343/*
344 * Called when a cpu comes up after the system has finished booting,
345 * i.e. as a result of a hotplug cpu action.
346 */
347void snapshot_timebase(void)
348{
349 __get_cpu_var(last_jiffy) = get_tb();
350 snapshot_purr();
351}
352
138void __delay(unsigned long loops) 353void __delay(unsigned long loops)
139{ 354{
140 unsigned long start; 355 unsigned long start;
@@ -382,6 +597,7 @@ static void iSeries_tb_recal(void)
382 new_tb_ticks_per_jiffy, sign, tick_diff ); 597 new_tb_ticks_per_jiffy, sign, tick_diff );
383 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy; 598 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
384 tb_ticks_per_sec = new_tb_ticks_per_sec; 599 tb_ticks_per_sec = new_tb_ticks_per_sec;
600 calc_cputime_factors();
385 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres ); 601 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
386 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; 602 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
387 tb_to_xs = divres.result_low; 603 tb_to_xs = divres.result_low;
@@ -430,6 +646,7 @@ void timer_interrupt(struct pt_regs * regs)
430 irq_enter(); 646 irq_enter();
431 647
432 profile_tick(CPU_PROFILING, regs); 648 profile_tick(CPU_PROFILING, regs);
649 calculate_steal_time();
433 650
434#ifdef CONFIG_PPC_ISERIES 651#ifdef CONFIG_PPC_ISERIES
435 get_lppaca()->int_dword.fields.decr_int = 0; 652 get_lppaca()->int_dword.fields.decr_int = 0;
@@ -451,7 +668,7 @@ void timer_interrupt(struct pt_regs * regs)
451 * is the case. 668 * is the case.
452 */ 669 */
453 if (!cpu_is_offline(cpu)) 670 if (!cpu_is_offline(cpu))
454 update_process_times(user_mode(regs)); 671 account_process_time(regs);
455 672
456 /* 673 /*
457 * No need to check whether cpu is offline here; boot_cpuid 674 * No need to check whether cpu is offline here; boot_cpuid
@@ -508,13 +725,27 @@ void wakeup_decrementer(void)
508void __init smp_space_timers(unsigned int max_cpus) 725void __init smp_space_timers(unsigned int max_cpus)
509{ 726{
510 int i; 727 int i;
728 unsigned long half = tb_ticks_per_jiffy / 2;
511 unsigned long offset = tb_ticks_per_jiffy / max_cpus; 729 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
512 unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid); 730 unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid);
513 731
514 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */ 732 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
515 previous_tb -= tb_ticks_per_jiffy; 733 previous_tb -= tb_ticks_per_jiffy;
734 /*
735 * The stolen time calculation for POWER5 shared-processor LPAR
736 * systems works better if the two threads' timebase interrupts
737 * are staggered by half a jiffy with respect to each other.
738 */
516 for_each_cpu(i) { 739 for_each_cpu(i) {
517 if (i != boot_cpuid) { 740 if (i == boot_cpuid)
741 continue;
742 if (i == (boot_cpuid ^ 1))
743 per_cpu(last_jiffy, i) =
744 per_cpu(last_jiffy, boot_cpuid) - half;
745 else if (i & 1)
746 per_cpu(last_jiffy, i) =
747 per_cpu(last_jiffy, i ^ 1) + half;
748 else {
518 previous_tb += offset; 749 previous_tb += offset;
519 per_cpu(last_jiffy, i) = previous_tb; 750 per_cpu(last_jiffy, i) = previous_tb;
520 } 751 }
@@ -706,6 +937,7 @@ void __init time_init(void)
706 tb_ticks_per_sec = ppc_tb_freq; 937 tb_ticks_per_sec = ppc_tb_freq;
707 tb_ticks_per_usec = ppc_tb_freq / 1000000; 938 tb_ticks_per_usec = ppc_tb_freq / 1000000;
708 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); 939 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
940 calc_cputime_factors();
709 941
710 /* 942 /*
711 * Calculate the length of each tick in ns. It will not be 943 * Calculate the length of each tick in ns. It will not be
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index 90d005bb4d1c..99d12ff6346c 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -117,6 +117,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
117#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0000080000000000) 117#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0000080000000000)
118#define CPU_FTR_CI_LARGE_PAGE ASM_CONST(0x0000100000000000) 118#define CPU_FTR_CI_LARGE_PAGE ASM_CONST(0x0000100000000000)
119#define CPU_FTR_PAUSE_ZERO ASM_CONST(0x0000200000000000) 119#define CPU_FTR_PAUSE_ZERO ASM_CONST(0x0000200000000000)
120#define CPU_FTR_PURR ASM_CONST(0x0000400000000000)
120#else 121#else
121/* ensure on 32b processors the flags are available for compiling but 122/* ensure on 32b processors the flags are available for compiling but
122 * don't do anything */ 123 * don't do anything */
@@ -132,6 +133,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
132#define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0) 133#define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0)
133#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0) 134#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0)
134#define CPU_FTR_CI_LARGE_PAGE ASM_CONST(0x0) 135#define CPU_FTR_CI_LARGE_PAGE ASM_CONST(0x0)
136#define CPU_FTR_PURR ASM_CONST(0x0)
135#endif 137#endif
136 138
137#ifndef __ASSEMBLY__ 139#ifndef __ASSEMBLY__
@@ -316,7 +318,7 @@ enum {
316 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | 318 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
317 CPU_FTR_MMCRA | CPU_FTR_SMT | 319 CPU_FTR_MMCRA | CPU_FTR_SMT |
318 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | 320 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
319 CPU_FTR_MMCRA_SIHV, 321 CPU_FTR_MMCRA_SIHV | CPU_FTR_PURR,
320 CPU_FTRS_CELL = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 322 CPU_FTRS_CELL = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
321 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | 323 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
322 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | 324 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT |
diff --git a/include/asm-powerpc/cputime.h b/include/asm-powerpc/cputime.h
index 6d68ad7e0ea3..a21185d47883 100644
--- a/include/asm-powerpc/cputime.h
+++ b/include/asm-powerpc/cputime.h
@@ -1 +1,203 @@
1/*
2 * Definitions for measuring cputime on powerpc machines.
3 *
4 * Copyright (C) 2006 Paul Mackerras, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in
12 * the same units as the timebase. Otherwise we measure cpu time
13 * in jiffies using the generic definitions.
14 */
15
16#ifndef __POWERPC_CPUTIME_H
17#define __POWERPC_CPUTIME_H
18
19#ifndef CONFIG_VIRT_CPU_ACCOUNTING
1#include <asm-generic/cputime.h> 20#include <asm-generic/cputime.h>
21#else
22
23#include <linux/types.h>
24#include <linux/time.h>
25#include <asm/div64.h>
26#include <asm/time.h>
27#include <asm/param.h>
28
29typedef u64 cputime_t;
30typedef u64 cputime64_t;
31
32#define cputime_zero ((cputime_t)0)
33#define cputime_max ((~((cputime_t)0) >> 1) - 1)
34#define cputime_add(__a, __b) ((__a) + (__b))
35#define cputime_sub(__a, __b) ((__a) - (__b))
36#define cputime_div(__a, __n) ((__a) / (__n))
37#define cputime_halve(__a) ((__a) >> 1)
38#define cputime_eq(__a, __b) ((__a) == (__b))
39#define cputime_gt(__a, __b) ((__a) > (__b))
40#define cputime_ge(__a, __b) ((__a) >= (__b))
41#define cputime_lt(__a, __b) ((__a) < (__b))
42#define cputime_le(__a, __b) ((__a) <= (__b))
43
44#define cputime64_zero ((cputime64_t)0)
45#define cputime64_add(__a, __b) ((__a) + (__b))
46#define cputime_to_cputime64(__ct) (__ct)
47
48#ifdef __KERNEL__
49
50/*
51 * Convert cputime <-> jiffies
52 */
53extern u64 __cputime_jiffies_factor;
54
55static inline unsigned long cputime_to_jiffies(const cputime_t ct)
56{
57 return mulhdu(ct, __cputime_jiffies_factor);
58}
59
60static inline cputime_t jiffies_to_cputime(const unsigned long jif)
61{
62 cputime_t ct;
63 unsigned long sec;
64
65 /* have to be a little careful about overflow */
66 ct = jif % HZ;
67 sec = jif / HZ;
68 if (ct) {
69 ct *= tb_ticks_per_sec;
70 do_div(ct, HZ);
71 }
72 if (sec)
73 ct += (cputime_t) sec * tb_ticks_per_sec;
74 return ct;
75}
76
77static inline u64 cputime64_to_jiffies64(const cputime_t ct)
78{
79 return mulhdu(ct, __cputime_jiffies_factor);
80}
81
82/*
83 * Convert cputime <-> milliseconds
84 */
85extern u64 __cputime_msec_factor;
86
87static inline unsigned long cputime_to_msecs(const cputime_t ct)
88{
89 return mulhdu(ct, __cputime_msec_factor);
90}
91
92static inline cputime_t msecs_to_cputime(const unsigned long ms)
93{
94 cputime_t ct;
95 unsigned long sec;
96
97 /* have to be a little careful about overflow */
98 ct = ms % 1000;
99 sec = ms / 1000;
100 if (ct) {
101 ct *= tb_ticks_per_sec;
102 do_div(ct, 1000);
103 }
104 if (sec)
105 ct += (cputime_t) sec * tb_ticks_per_sec;
106 return ct;
107}
108
109/*
110 * Convert cputime <-> seconds
111 */
112extern u64 __cputime_sec_factor;
113
114static inline unsigned long cputime_to_secs(const cputime_t ct)
115{
116 return mulhdu(ct, __cputime_sec_factor);
117}
118
119static inline cputime_t secs_to_cputime(const unsigned long sec)
120{
121 return (cputime_t) sec * tb_ticks_per_sec;
122}
123
124/*
125 * Convert cputime <-> timespec
126 */
127static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p)
128{
129 u64 x = ct;
130 unsigned int frac;
131
132 frac = do_div(x, tb_ticks_per_sec);
133 p->tv_sec = x;
134 x = (u64) frac * 1000000000;
135 do_div(x, tb_ticks_per_sec);
136 p->tv_nsec = x;
137}
138
139static inline cputime_t timespec_to_cputime(const struct timespec *p)
140{
141 cputime_t ct;
142
143 ct = (u64) p->tv_nsec * tb_ticks_per_sec;
144 do_div(ct, 1000000000);
145 return ct + (u64) p->tv_sec * tb_ticks_per_sec;
146}
147
148/*
149 * Convert cputime <-> timeval
150 */
151static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p)
152{
153 u64 x = ct;
154 unsigned int frac;
155
156 frac = do_div(x, tb_ticks_per_sec);
157 p->tv_sec = x;
158 x = (u64) frac * 1000000;
159 do_div(x, tb_ticks_per_sec);
160 p->tv_usec = x;
161}
162
163static inline cputime_t timeval_to_cputime(const struct timeval *p)
164{
165 cputime_t ct;
166
167 ct = (u64) p->tv_usec * tb_ticks_per_sec;
168 do_div(ct, 1000000);
169 return ct + (u64) p->tv_sec * tb_ticks_per_sec;
170}
171
172/*
173 * Convert cputime <-> clock_t (units of 1/USER_HZ seconds)
174 */
175extern u64 __cputime_clockt_factor;
176
177static inline unsigned long cputime_to_clock_t(const cputime_t ct)
178{
179 return mulhdu(ct, __cputime_clockt_factor);
180}
181
182static inline cputime_t clock_t_to_cputime(const unsigned long clk)
183{
184 cputime_t ct;
185 unsigned long sec;
186
187 /* have to be a little careful about overflow */
188 ct = clk % USER_HZ;
189 sec = clk / USER_HZ;
190 if (ct) {
191 ct *= tb_ticks_per_sec;
192 do_div(ct, USER_HZ);
193 }
194 if (sec)
195 ct += (cputime_t) sec * tb_ticks_per_sec;
196 return ct;
197}
198
199#define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct))
200
201#endif /* __KERNEL__ */
202#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
203#endif /* __POWERPC_CPUTIME_H */
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h
index 8eb7e857ec4c..51f87d9993b6 100644
--- a/include/asm-powerpc/irq.h
+++ b/include/asm-powerpc/irq.h
@@ -479,6 +479,10 @@ extern int distribute_irqs;
479struct irqaction; 479struct irqaction;
480struct pt_regs; 480struct pt_regs;
481 481
482#define __ARCH_HAS_DO_SOFTIRQ
483
484extern void __do_softirq(void);
485
482#ifdef CONFIG_IRQSTACKS 486#ifdef CONFIG_IRQSTACKS
483/* 487/*
484 * Per-cpu stacks for handling hard and soft interrupts. 488 * Per-cpu stacks for handling hard and soft interrupts.
@@ -491,8 +495,6 @@ extern void call_do_softirq(struct thread_info *tp);
491extern int call___do_IRQ(int irq, struct pt_regs *regs, 495extern int call___do_IRQ(int irq, struct pt_regs *regs,
492 struct thread_info *tp); 496 struct thread_info *tp);
493 497
494#define __ARCH_HAS_DO_SOFTIRQ
495
496#else 498#else
497#define irq_ctx_init() 499#define irq_ctx_init()
498 500
diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h
index ec94b51074fc..4465b95ebef0 100644
--- a/include/asm-powerpc/paca.h
+++ b/include/asm-powerpc/paca.h
@@ -96,6 +96,11 @@ struct paca_struct {
96 u64 saved_r1; /* r1 save for RTAS calls */ 96 u64 saved_r1; /* r1 save for RTAS calls */
97 u64 saved_msr; /* MSR saved here by enter_rtas */ 97 u64 saved_msr; /* MSR saved here by enter_rtas */
98 u8 proc_enabled; /* irq soft-enable flag */ 98 u8 proc_enabled; /* irq soft-enable flag */
99
100 /* Stuff for accurate time accounting */
101 u64 user_time; /* accumulated usermode TB ticks */
102 u64 system_time; /* accumulated system TB ticks */
103 u64 startpurr; /* PURR/TB value snapshot */
99}; 104};
100 105
101extern struct paca_struct paca[]; 106extern struct paca_struct paca[];
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h
index ab8688d39024..dd1c0a913d5f 100644
--- a/include/asm-powerpc/ppc_asm.h
+++ b/include/asm-powerpc/ppc_asm.h
@@ -15,6 +15,48 @@
15#define SZL (BITS_PER_LONG/8) 15#define SZL (BITS_PER_LONG/8)
16 16
17/* 17/*
18 * Stuff for accurate CPU time accounting.
19 * These macros handle transitions between user and system state
20 * in exception entry and exit and accumulate time to the
21 * user_time and system_time fields in the paca.
22 */
23
24#ifndef CONFIG_VIRT_CPU_ACCOUNTING
25#define ACCOUNT_CPU_USER_ENTRY(ra, rb)
26#define ACCOUNT_CPU_USER_EXIT(ra, rb)
27#else
28#define ACCOUNT_CPU_USER_ENTRY(ra, rb) \
29 beq 2f; /* if from kernel mode */ \
30BEGIN_FTR_SECTION; \
31 mfspr ra,SPRN_PURR; /* get processor util. reg */ \
32END_FTR_SECTION_IFSET(CPU_FTR_PURR); \
33BEGIN_FTR_SECTION; \
34 mftb ra; /* or get TB if no PURR */ \
35END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \
36 ld rb,PACA_STARTPURR(r13); \
37 std ra,PACA_STARTPURR(r13); \
38 subf rb,rb,ra; /* subtract start value */ \
39 ld ra,PACA_USER_TIME(r13); \
40 add ra,ra,rb; /* add on to user time */ \
41 std ra,PACA_USER_TIME(r13); \
422:
43
44#define ACCOUNT_CPU_USER_EXIT(ra, rb) \
45BEGIN_FTR_SECTION; \
46 mfspr ra,SPRN_PURR; /* get processor util. reg */ \
47END_FTR_SECTION_IFSET(CPU_FTR_PURR); \
48BEGIN_FTR_SECTION; \
49 mftb ra; /* or get TB if no PURR */ \
50END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \
51 ld rb,PACA_STARTPURR(r13); \
52 std ra,PACA_STARTPURR(r13); \
53 subf rb,rb,ra; /* subtract start value */ \
54 ld ra,PACA_SYSTEM_TIME(r13); \
55 add ra,ra,rb; /* add on to user time */ \
56 std ra,PACA_SYSTEM_TIME(r13);
57#endif
58
59/*
18 * Macros for storing registers into and loading registers from 60 * Macros for storing registers into and loading registers from
19 * exception frames. 61 * exception frames.
20 */ 62 */
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index d9bf53653b10..41b7a5b3d701 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -424,5 +424,9 @@ static inline void create_function_call(unsigned long addr, void * func)
424 create_branch(addr, func_addr, BRANCH_SET_LINK); 424 create_branch(addr, func_addr, BRANCH_SET_LINK);
425} 425}
426 426
427#ifdef CONFIG_VIRT_CPU_ACCOUNTING
428extern void account_system_vtime(struct task_struct *);
429#endif
430
427#endif /* __KERNEL__ */ 431#endif /* __KERNEL__ */
428#endif /* _ASM_POWERPC_SYSTEM_H */ 432#endif /* _ASM_POWERPC_SYSTEM_H */
diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h
index baddc9ab57ad..912118db13ae 100644
--- a/include/asm-powerpc/time.h
+++ b/include/asm-powerpc/time.h
@@ -41,6 +41,7 @@ extern time_t last_rtc_update;
41 41
42extern void generic_calibrate_decr(void); 42extern void generic_calibrate_decr(void);
43extern void wakeup_decrementer(void); 43extern void wakeup_decrementer(void);
44extern void snapshot_timebase(void);
44 45
45/* Some sane defaults: 125 MHz timebase, 1GHz processor */ 46/* Some sane defaults: 125 MHz timebase, 1GHz processor */
46extern unsigned long ppc_proc_freq; 47extern unsigned long ppc_proc_freq;
@@ -221,5 +222,19 @@ struct cpu_usage {
221 222
222DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array); 223DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
223 224
225#ifdef CONFIG_VIRT_CPU_ACCOUNTING
226extern void account_process_vtime(struct task_struct *tsk);
227#else
228#define account_process_vtime(tsk) do { } while (0)
229#endif
230
231#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)
232extern void calculate_steal_time(void);
233extern void snapshot_timebases(void);
234#else
235#define calculate_steal_time() do { } while (0)
236#define snapshot_timebases() do { } while (0)
237#endif
238
224#endif /* __KERNEL__ */ 239#endif /* __KERNEL__ */
225#endif /* __PPC64_TIME_H */ 240#endif /* __PPC64_TIME_H */
diff --git a/include/asm-ppc/time.h b/include/asm-ppc/time.h
index 321fb75b5f22..c86112323c9f 100644
--- a/include/asm-ppc/time.h
+++ b/include/asm-ppc/time.h
@@ -153,5 +153,10 @@ extern __inline__ unsigned binary_tbl(void) {
153({unsigned z; asm ("mulhwu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;}) 153({unsigned z; asm ("mulhwu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
154 154
155unsigned mulhwu_scale_factor(unsigned, unsigned); 155unsigned mulhwu_scale_factor(unsigned, unsigned);
156
157#define account_process_vtime(tsk) do { } while (0)
158#define calculate_steal_time() do { } while (0)
159#define snapshot_timebases() do { } while (0)
160
156#endif /* __ASM_TIME_H__ */ 161#endif /* __ASM_TIME_H__ */
157#endif /* __KERNEL__ */ 162#endif /* __KERNEL__ */