diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-22 00:19:54 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-22 00:19:54 -0400 |
| commit | d4429f608abde89e8bc1e24b43cd503feb95c496 (patch) | |
| tree | 4c11afa193593a5e3949391bf35022b4f87ba375 /arch/powerpc/kernel/time.c | |
| parent | e10117d36ef758da0690c95ecffc09d5dd7da479 (diff) | |
| parent | 6a1c9dfe4186f18fed38421b35b40fb9260cbfe1 (diff) | |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (71 commits)
powerpc/44x: Update ppc44x_defconfig
powerpc/watchdog: Make default timeout for Book-E watchdog a Kconfig option
fsl_rio: Add comments for sRIO registers.
powerpc/fsl-booke: Add e55xx (64-bit) smp defconfig
powerpc/fsl-booke: Add p5020 DS board support
powerpc/fsl-booke64: Use TLB CAMs to cover linear mapping on FSL 64-bit chips
powerpc/fsl-booke: Add support for FSL Arch v1.0 MMU in setup_page_sizes
powerpc/fsl-booke: Add support for FSL 64-bit e5500 core
powerpc/85xx: add cache-sram support
powerpc/85xx: add ngPIXIS FPGA device tree node to the P1022DS board
powerpc: Fix compile error with paca code on ppc64e
powerpc/fsl-booke: Add p3041 DS board support
oprofile/fsl emb: Don't set MSR[PMM] until after clearing the interrupt.
powerpc/fsl-booke: Add PCI device ids for P2040/P3041/P5010/P5020 QoirQ chips
powerpc/mpc8xxx_gpio: Add support for 'qoriq-gpio' controllers
powerpc/fsl_booke: Add support to boot from core other than 0
powerpc/p1022: Add probing for individual DMA channels
powerpc/fsl_soc: Search all global-utilities nodes for rstccr
powerpc: Fix invalid page flags in create TLB CAM path for PTE_64BIT
powerpc/mpc83xx: Support for MPC8308 P1M board
...
Fix up conflict with the generic irq_work changes in arch/powerpc/kernel/time.c
Diffstat (limited to 'arch/powerpc/kernel/time.c')
| -rw-r--r-- | arch/powerpc/kernel/time.c | 275 |
1 files changed, 133 insertions, 142 deletions
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 54888eb10c3b..010406958d97 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
| @@ -161,10 +161,9 @@ extern struct timezone sys_tz; | |||
| 161 | static long timezone_offset; | 161 | static long timezone_offset; |
| 162 | 162 | ||
| 163 | unsigned long ppc_proc_freq; | 163 | unsigned long ppc_proc_freq; |
| 164 | EXPORT_SYMBOL(ppc_proc_freq); | 164 | EXPORT_SYMBOL_GPL(ppc_proc_freq); |
| 165 | unsigned long ppc_tb_freq; | 165 | unsigned long ppc_tb_freq; |
| 166 | 166 | EXPORT_SYMBOL_GPL(ppc_tb_freq); | |
| 167 | static DEFINE_PER_CPU(u64, last_jiffy); | ||
| 168 | 167 | ||
| 169 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 168 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
| 170 | /* | 169 | /* |
| @@ -185,6 +184,8 @@ DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta); | |||
| 185 | 184 | ||
| 186 | cputime_t cputime_one_jiffy; | 185 | cputime_t cputime_one_jiffy; |
| 187 | 186 | ||
| 187 | void (*dtl_consumer)(struct dtl_entry *, u64); | ||
| 188 | |||
| 188 | static void calc_cputime_factors(void) | 189 | static void calc_cputime_factors(void) |
| 189 | { | 190 | { |
| 190 | struct div_result res; | 191 | struct div_result res; |
| @@ -200,62 +201,153 @@ static void calc_cputime_factors(void) | |||
| 200 | } | 201 | } |
| 201 | 202 | ||
| 202 | /* | 203 | /* |
| 203 | * Read the PURR on systems that have it, otherwise the timebase. | 204 | * Read the SPURR on systems that have it, otherwise the PURR, |
| 205 | * or if that doesn't exist return the timebase value passed in. | ||
| 204 | */ | 206 | */ |
| 205 | static u64 read_purr(void) | 207 | static u64 read_spurr(u64 tb) |
| 206 | { | 208 | { |
| 209 | if (cpu_has_feature(CPU_FTR_SPURR)) | ||
| 210 | return mfspr(SPRN_SPURR); | ||
| 207 | if (cpu_has_feature(CPU_FTR_PURR)) | 211 | if (cpu_has_feature(CPU_FTR_PURR)) |
| 208 | return mfspr(SPRN_PURR); | 212 | return mfspr(SPRN_PURR); |
| 209 | return mftb(); | 213 | return tb; |
| 210 | } | 214 | } |
| 211 | 215 | ||
| 216 | #ifdef CONFIG_PPC_SPLPAR | ||
| 217 | |||
| 212 | /* | 218 | /* |
| 213 | * Read the SPURR on systems that have it, otherwise the purr | 219 | * Scan the dispatch trace log and count up the stolen time. |
| 220 | * Should be called with interrupts disabled. | ||
| 214 | */ | 221 | */ |
| 215 | static u64 read_spurr(u64 purr) | 222 | static u64 scan_dispatch_log(u64 stop_tb) |
| 216 | { | 223 | { |
| 217 | /* | 224 | u64 i = local_paca->dtl_ridx; |
| 218 | * cpus without PURR won't have a SPURR | 225 | struct dtl_entry *dtl = local_paca->dtl_curr; |
| 219 | * We already know the former when we use this, so tell gcc | 226 | struct dtl_entry *dtl_end = local_paca->dispatch_log_end; |
| 220 | */ | 227 | struct lppaca *vpa = local_paca->lppaca_ptr; |
| 221 | if (cpu_has_feature(CPU_FTR_PURR) && cpu_has_feature(CPU_FTR_SPURR)) | 228 | u64 tb_delta; |
| 222 | return mfspr(SPRN_SPURR); | 229 | u64 stolen = 0; |
| 223 | return purr; | 230 | u64 dtb; |
| 231 | |||
| 232 | if (i == vpa->dtl_idx) | ||
| 233 | return 0; | ||
| 234 | while (i < vpa->dtl_idx) { | ||
| 235 | if (dtl_consumer) | ||
| 236 | dtl_consumer(dtl, i); | ||
| 237 | dtb = dtl->timebase; | ||
| 238 | tb_delta = dtl->enqueue_to_dispatch_time + | ||
| 239 | dtl->ready_to_enqueue_time; | ||
| 240 | barrier(); | ||
| 241 | if (i + N_DISPATCH_LOG < vpa->dtl_idx) { | ||
| 242 | /* buffer has overflowed */ | ||
| 243 | i = vpa->dtl_idx - N_DISPATCH_LOG; | ||
| 244 | dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); | ||
| 245 | continue; | ||
| 246 | } | ||
| 247 | if (dtb > stop_tb) | ||
| 248 | break; | ||
| 249 | stolen += tb_delta; | ||
| 250 | ++i; | ||
| 251 | ++dtl; | ||
| 252 | if (dtl == dtl_end) | ||
| 253 | dtl = local_paca->dispatch_log; | ||
| 254 | } | ||
| 255 | local_paca->dtl_ridx = i; | ||
| 256 | local_paca->dtl_curr = dtl; | ||
| 257 | return stolen; | ||
| 224 | } | 258 | } |
| 225 | 259 | ||
| 226 | /* | 260 | /* |
| 261 | * Accumulate stolen time by scanning the dispatch trace log. | ||
| 262 | * Called on entry from user mode. | ||
| 263 | */ | ||
| 264 | void accumulate_stolen_time(void) | ||
| 265 | { | ||
| 266 | u64 sst, ust; | ||
| 267 | |||
| 268 | sst = scan_dispatch_log(get_paca()->starttime_user); | ||
| 269 | ust = scan_dispatch_log(get_paca()->starttime); | ||
| 270 | get_paca()->system_time -= sst; | ||
| 271 | get_paca()->user_time -= ust; | ||
| 272 | get_paca()->stolen_time += ust + sst; | ||
| 273 | } | ||
| 274 | |||
| 275 | static inline u64 calculate_stolen_time(u64 stop_tb) | ||
| 276 | { | ||
| 277 | u64 stolen = 0; | ||
| 278 | |||
| 279 | if (get_paca()->dtl_ridx != get_paca()->lppaca_ptr->dtl_idx) { | ||
| 280 | stolen = scan_dispatch_log(stop_tb); | ||
| 281 | get_paca()->system_time -= stolen; | ||
| 282 | } | ||
| 283 | |||
| 284 | stolen += get_paca()->stolen_time; | ||
| 285 | get_paca()->stolen_time = 0; | ||
| 286 | return stolen; | ||
| 287 | } | ||
| 288 | |||
| 289 | #else /* CONFIG_PPC_SPLPAR */ | ||
| 290 | static inline u64 calculate_stolen_time(u64 stop_tb) | ||
| 291 | { | ||
| 292 | return 0; | ||
| 293 | } | ||
| 294 | |||
| 295 | #endif /* CONFIG_PPC_SPLPAR */ | ||
| 296 | |||
| 297 | /* | ||
| 227 | * Account time for a transition between system, hard irq | 298 | * Account time for a transition between system, hard irq |
| 228 | * or soft irq state. | 299 | * or soft irq state. |
| 229 | */ | 300 | */ |
| 230 | void account_system_vtime(struct task_struct *tsk) | 301 | void account_system_vtime(struct task_struct *tsk) |
| 231 | { | 302 | { |
| 232 | u64 now, nowscaled, delta, deltascaled, sys_time; | 303 | u64 now, nowscaled, delta, deltascaled; |
| 233 | unsigned long flags; | 304 | unsigned long flags; |
| 305 | u64 stolen, udelta, sys_scaled, user_scaled; | ||
| 234 | 306 | ||
| 235 | local_irq_save(flags); | 307 | local_irq_save(flags); |
| 236 | now = read_purr(); | 308 | now = mftb(); |
| 237 | nowscaled = read_spurr(now); | 309 | nowscaled = read_spurr(now); |
| 238 | delta = now - get_paca()->startpurr; | 310 | get_paca()->system_time += now - get_paca()->starttime; |
| 311 | get_paca()->starttime = now; | ||
| 239 | deltascaled = nowscaled - get_paca()->startspurr; | 312 | deltascaled = nowscaled - get_paca()->startspurr; |
| 240 | get_paca()->startpurr = now; | ||
| 241 | get_paca()->startspurr = nowscaled; | 313 | get_paca()->startspurr = nowscaled; |
| 242 | if (!in_interrupt()) { | 314 | |
| 243 | /* deltascaled includes both user and system time. | 315 | stolen = calculate_stolen_time(now); |
| 244 | * Hence scale it based on the purr ratio to estimate | 316 | |
| 245 | * the system time */ | 317 | delta = get_paca()->system_time; |
| 246 | sys_time = get_paca()->system_time; | 318 | get_paca()->system_time = 0; |
| 247 | if (get_paca()->user_time) | 319 | udelta = get_paca()->user_time - get_paca()->utime_sspurr; |
| 248 | deltascaled = deltascaled * sys_time / | 320 | get_paca()->utime_sspurr = get_paca()->user_time; |
| 249 | (sys_time + get_paca()->user_time); | 321 | |
| 250 | delta += sys_time; | 322 | /* |
| 251 | get_paca()->system_time = 0; | 323 | * Because we don't read the SPURR on every kernel entry/exit, |
| 324 | * deltascaled includes both user and system SPURR ticks. | ||
| 325 | * Apportion these ticks to system SPURR ticks and user | ||
| 326 | * SPURR ticks in the same ratio as the system time (delta) | ||
| 327 | * and user time (udelta) values obtained from the timebase | ||
| 328 | * over the same interval. The system ticks get accounted here; | ||
| 329 | * the user ticks get saved up in paca->user_time_scaled to be | ||
| 330 | * used by account_process_tick. | ||
| 331 | */ | ||
| 332 | sys_scaled = delta; | ||
| 333 | user_scaled = udelta; | ||
| 334 | if (deltascaled != delta + udelta) { | ||
| 335 | if (udelta) { | ||
| 336 | sys_scaled = deltascaled * delta / (delta + udelta); | ||
| 337 | user_scaled = deltascaled - sys_scaled; | ||
| 338 | } else { | ||
| 339 | sys_scaled = deltascaled; | ||
| 340 | } | ||
| 341 | } | ||
| 342 | get_paca()->user_time_scaled += user_scaled; | ||
| 343 | |||
| 344 | if (in_irq() || idle_task(smp_processor_id()) != tsk) { | ||
| 345 | account_system_time(tsk, 0, delta, sys_scaled); | ||
| 346 | if (stolen) | ||
| 347 | account_steal_time(stolen); | ||
| 348 | } else { | ||
| 349 | account_idle_time(delta + stolen); | ||
| 252 | } | 350 | } |
| 253 | if (in_irq() || idle_task(smp_processor_id()) != tsk) | ||
| 254 | account_system_time(tsk, 0, delta, deltascaled); | ||
| 255 | else | ||
| 256 | account_idle_time(delta); | ||
| 257 | __get_cpu_var(cputime_last_delta) = delta; | ||
| 258 | __get_cpu_var(cputime_scaled_last_delta) = deltascaled; | ||
| 259 | local_irq_restore(flags); | 351 | local_irq_restore(flags); |
| 260 | } | 352 | } |
| 261 | EXPORT_SYMBOL_GPL(account_system_vtime); | 353 | EXPORT_SYMBOL_GPL(account_system_vtime); |
| @@ -265,125 +357,26 @@ EXPORT_SYMBOL_GPL(account_system_vtime); | |||
| 265 | * by the exception entry and exit code to the generic process | 357 | * by the exception entry and exit code to the generic process |
| 266 | * user and system time records. | 358 | * user and system time records. |
| 267 | * Must be called with interrupts disabled. | 359 | * Must be called with interrupts disabled. |
| 360 | * Assumes that account_system_vtime() has been called recently | ||
| 361 | * (i.e. since the last entry from usermode) so that | ||
| 362 | * get_paca()->user_time_scaled is up to date. | ||
| 268 | */ | 363 | */ |
| 269 | void account_process_tick(struct task_struct *tsk, int user_tick) | 364 | void account_process_tick(struct task_struct *tsk, int user_tick) |
| 270 | { | 365 | { |
| 271 | cputime_t utime, utimescaled; | 366 | cputime_t utime, utimescaled; |
| 272 | 367 | ||
| 273 | utime = get_paca()->user_time; | 368 | utime = get_paca()->user_time; |
| 369 | utimescaled = get_paca()->user_time_scaled; | ||
| 274 | get_paca()->user_time = 0; | 370 | get_paca()->user_time = 0; |
| 275 | utimescaled = cputime_to_scaled(utime); | 371 | get_paca()->user_time_scaled = 0; |
| 372 | get_paca()->utime_sspurr = 0; | ||
| 276 | account_user_time(tsk, utime, utimescaled); | 373 | account_user_time(tsk, utime, utimescaled); |
| 277 | } | 374 | } |
| 278 | 375 | ||
| 279 | /* | ||
| 280 | * Stuff for accounting stolen time. | ||
| 281 | */ | ||
| 282 | struct cpu_purr_data { | ||
| 283 | int initialized; /* thread is running */ | ||
| 284 | u64 tb; /* last TB value read */ | ||
| 285 | u64 purr; /* last PURR value read */ | ||
| 286 | u64 spurr; /* last SPURR value read */ | ||
| 287 | }; | ||
| 288 | |||
| 289 | /* | ||
| 290 | * Each entry in the cpu_purr_data array is manipulated only by its | ||
| 291 | * "owner" cpu -- usually in the timer interrupt but also occasionally | ||
| 292 | * in process context for cpu online. As long as cpus do not touch | ||
| 293 | * each others' cpu_purr_data, disabling local interrupts is | ||
| 294 | * sufficient to serialize accesses. | ||
| 295 | */ | ||
| 296 | static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data); | ||
| 297 | |||
| 298 | static void snapshot_tb_and_purr(void *data) | ||
| 299 | { | ||
| 300 | unsigned long flags; | ||
| 301 | struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data); | ||
| 302 | |||
| 303 | local_irq_save(flags); | ||
| 304 | p->tb = get_tb_or_rtc(); | ||
| 305 | p->purr = mfspr(SPRN_PURR); | ||
| 306 | wmb(); | ||
| 307 | p->initialized = 1; | ||
| 308 | local_irq_restore(flags); | ||
| 309 | } | ||
| 310 | |||
| 311 | /* | ||
| 312 | * Called during boot when all cpus have come up. | ||
| 313 | */ | ||
| 314 | void snapshot_timebases(void) | ||
| 315 | { | ||
| 316 | if (!cpu_has_feature(CPU_FTR_PURR)) | ||
| 317 | return; | ||
| 318 | on_each_cpu(snapshot_tb_and_purr, NULL, 1); | ||
| 319 | } | ||
| 320 | |||
| 321 | /* | ||
| 322 | * Must be called with interrupts disabled. | ||
| 323 | */ | ||
| 324 | void calculate_steal_time(void) | ||
| 325 | { | ||
| 326 | u64 tb, purr; | ||
| 327 | s64 stolen; | ||
| 328 | struct cpu_purr_data *pme; | ||
| 329 | |||
| 330 | pme = &__get_cpu_var(cpu_purr_data); | ||
| 331 | if (!pme->initialized) | ||
| 332 | return; /* !CPU_FTR_PURR or early in early boot */ | ||
| 333 | tb = mftb(); | ||
| 334 | purr = mfspr(SPRN_PURR); | ||
| 335 | stolen = (tb - pme->tb) - (purr - pme->purr); | ||
| 336 | if (stolen > 0) { | ||
| 337 | if (idle_task(smp_processor_id()) != current) | ||
| 338 | account_steal_time(stolen); | ||
| 339 | else | ||
| 340 | account_idle_time(stolen); | ||
| 341 | } | ||
| 342 | pme->tb = tb; | ||
| 343 | pme->purr = purr; | ||
| 344 | } | ||
| 345 | |||
| 346 | #ifdef CONFIG_PPC_SPLPAR | ||
| 347 | /* | ||
| 348 | * Must be called before the cpu is added to the online map when | ||
| 349 | * a cpu is being brought up at runtime. | ||
| 350 | */ | ||
| 351 | static void snapshot_purr(void) | ||
| 352 | { | ||
| 353 | struct cpu_purr_data *pme; | ||
| 354 | unsigned long flags; | ||
| 355 | |||
| 356 | if (!cpu_has_feature(CPU_FTR_PURR)) | ||
| 357 | return; | ||
| 358 | local_irq_save(flags); | ||
| 359 | pme = &__get_cpu_var(cpu_purr_data); | ||
| 360 | pme->tb = mftb(); | ||
| 361 | pme->purr = mfspr(SPRN_PURR); | ||
| 362 | pme->initialized = 1; | ||
| 363 | local_irq_restore(flags); | ||
| 364 | } | ||
| 365 | |||
| 366 | #endif /* CONFIG_PPC_SPLPAR */ | ||
| 367 | |||
| 368 | #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ | 376 | #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ |
| 369 | #define calc_cputime_factors() | 377 | #define calc_cputime_factors() |
| 370 | #define calculate_steal_time() do { } while (0) | ||
| 371 | #endif | 378 | #endif |
| 372 | 379 | ||
| 373 | #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)) | ||
| 374 | #define snapshot_purr() do { } while (0) | ||
| 375 | #endif | ||
| 376 | |||
| 377 | /* | ||
| 378 | * Called when a cpu comes up after the system has finished booting, | ||
| 379 | * i.e. as a result of a hotplug cpu action. | ||
| 380 | */ | ||
| 381 | void snapshot_timebase(void) | ||
| 382 | { | ||
| 383 | __get_cpu_var(last_jiffy) = get_tb_or_rtc(); | ||
| 384 | snapshot_purr(); | ||
| 385 | } | ||
| 386 | |||
| 387 | void __delay(unsigned long loops) | 380 | void __delay(unsigned long loops) |
| 388 | { | 381 | { |
| 389 | unsigned long start; | 382 | unsigned long start; |
| @@ -585,8 +578,6 @@ void timer_interrupt(struct pt_regs * regs) | |||
| 585 | old_regs = set_irq_regs(regs); | 578 | old_regs = set_irq_regs(regs); |
| 586 | irq_enter(); | 579 | irq_enter(); |
| 587 | 580 | ||
| 588 | calculate_steal_time(); | ||
| 589 | |||
| 590 | if (test_irq_work_pending()) { | 581 | if (test_irq_work_pending()) { |
| 591 | clear_irq_work_pending(); | 582 | clear_irq_work_pending(); |
| 592 | irq_work_run(); | 583 | irq_work_run(); |
