diff options
Diffstat (limited to 'arch/powerpc/kernel/time.c')
| -rw-r--r-- | arch/powerpc/kernel/time.c | 241 |
1 files changed, 239 insertions, 2 deletions
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 86f7e3d154d8..4a27218a086c 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
| @@ -51,6 +51,7 @@ | |||
| 51 | #include <linux/percpu.h> | 51 | #include <linux/percpu.h> |
| 52 | #include <linux/rtc.h> | 52 | #include <linux/rtc.h> |
| 53 | #include <linux/jiffies.h> | 53 | #include <linux/jiffies.h> |
| 54 | #include <linux/posix-timers.h> | ||
| 54 | 55 | ||
| 55 | #include <asm/io.h> | 56 | #include <asm/io.h> |
| 56 | #include <asm/processor.h> | 57 | #include <asm/processor.h> |
| @@ -98,6 +99,7 @@ unsigned long tb_ticks_per_jiffy; | |||
| 98 | unsigned long tb_ticks_per_usec = 100; /* sane default */ | 99 | unsigned long tb_ticks_per_usec = 100; /* sane default */ |
| 99 | EXPORT_SYMBOL(tb_ticks_per_usec); | 100 | EXPORT_SYMBOL(tb_ticks_per_usec); |
| 100 | unsigned long tb_ticks_per_sec; | 101 | unsigned long tb_ticks_per_sec; |
| 102 | EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */ | ||
| 101 | u64 tb_to_xs; | 103 | u64 tb_to_xs; |
| 102 | unsigned tb_to_us; | 104 | unsigned tb_to_us; |
| 103 | 105 | ||
| @@ -135,6 +137,224 @@ unsigned long tb_last_stamp; | |||
| 135 | */ | 137 | */ |
| 136 | DEFINE_PER_CPU(unsigned long, last_jiffy); | 138 | DEFINE_PER_CPU(unsigned long, last_jiffy); |
| 137 | 139 | ||
| 140 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 141 | /* | ||
| 142 | * Factors for converting from cputime_t (timebase ticks) to | ||
| 143 | * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds). | ||
| 144 | * These are all stored as 0.64 fixed-point binary fractions. | ||
| 145 | */ | ||
| 146 | u64 __cputime_jiffies_factor; | ||
| 147 | EXPORT_SYMBOL(__cputime_jiffies_factor); | ||
| 148 | u64 __cputime_msec_factor; | ||
| 149 | EXPORT_SYMBOL(__cputime_msec_factor); | ||
| 150 | u64 __cputime_sec_factor; | ||
| 151 | EXPORT_SYMBOL(__cputime_sec_factor); | ||
| 152 | u64 __cputime_clockt_factor; | ||
| 153 | EXPORT_SYMBOL(__cputime_clockt_factor); | ||
| 154 | |||
| 155 | static void calc_cputime_factors(void) | ||
| 156 | { | ||
| 157 | struct div_result res; | ||
| 158 | |||
| 159 | div128_by_32(HZ, 0, tb_ticks_per_sec, &res); | ||
| 160 | __cputime_jiffies_factor = res.result_low; | ||
| 161 | div128_by_32(1000, 0, tb_ticks_per_sec, &res); | ||
| 162 | __cputime_msec_factor = res.result_low; | ||
| 163 | div128_by_32(1, 0, tb_ticks_per_sec, &res); | ||
| 164 | __cputime_sec_factor = res.result_low; | ||
| 165 | div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res); | ||
| 166 | __cputime_clockt_factor = res.result_low; | ||
| 167 | } | ||
| 168 | |||
| 169 | /* | ||
| 170 | * Read the PURR on systems that have it, otherwise the timebase. | ||
| 171 | */ | ||
| 172 | static u64 read_purr(void) | ||
| 173 | { | ||
| 174 | if (cpu_has_feature(CPU_FTR_PURR)) | ||
| 175 | return mfspr(SPRN_PURR); | ||
| 176 | return mftb(); | ||
| 177 | } | ||
| 178 | |||
| 179 | /* | ||
| 180 | * Account time for a transition between system, hard irq | ||
| 181 | * or soft irq state. | ||
| 182 | */ | ||
| 183 | void account_system_vtime(struct task_struct *tsk) | ||
| 184 | { | ||
| 185 | u64 now, delta; | ||
| 186 | unsigned long flags; | ||
| 187 | |||
| 188 | local_irq_save(flags); | ||
| 189 | now = read_purr(); | ||
| 190 | delta = now - get_paca()->startpurr; | ||
| 191 | get_paca()->startpurr = now; | ||
| 192 | if (!in_interrupt()) { | ||
| 193 | delta += get_paca()->system_time; | ||
| 194 | get_paca()->system_time = 0; | ||
| 195 | } | ||
| 196 | account_system_time(tsk, 0, delta); | ||
| 197 | local_irq_restore(flags); | ||
| 198 | } | ||
| 199 | |||
| 200 | /* | ||
| 201 | * Transfer the user and system times accumulated in the paca | ||
| 202 | * by the exception entry and exit code to the generic process | ||
| 203 | * user and system time records. | ||
| 204 | * Must be called with interrupts disabled. | ||
| 205 | */ | ||
| 206 | void account_process_vtime(struct task_struct *tsk) | ||
| 207 | { | ||
| 208 | cputime_t utime; | ||
| 209 | |||
| 210 | utime = get_paca()->user_time; | ||
| 211 | get_paca()->user_time = 0; | ||
| 212 | account_user_time(tsk, utime); | ||
| 213 | } | ||
| 214 | |||
| 215 | static void account_process_time(struct pt_regs *regs) | ||
| 216 | { | ||
| 217 | int cpu = smp_processor_id(); | ||
| 218 | |||
| 219 | account_process_vtime(current); | ||
| 220 | run_local_timers(); | ||
| 221 | if (rcu_pending(cpu)) | ||
| 222 | rcu_check_callbacks(cpu, user_mode(regs)); | ||
| 223 | scheduler_tick(); | ||
| 224 | run_posix_cpu_timers(current); | ||
| 225 | } | ||
| 226 | |||
| 227 | #ifdef CONFIG_PPC_SPLPAR | ||
| 228 | /* | ||
| 229 | * Stuff for accounting stolen time. | ||
| 230 | */ | ||
| 231 | struct cpu_purr_data { | ||
| 232 | int initialized; /* thread is running */ | ||
| 233 | u64 tb0; /* timebase at origin time */ | ||
| 234 | u64 purr0; /* PURR at origin time */ | ||
| 235 | u64 tb; /* last TB value read */ | ||
| 236 | u64 purr; /* last PURR value read */ | ||
| 237 | u64 stolen; /* stolen time so far */ | ||
| 238 | spinlock_t lock; | ||
| 239 | }; | ||
| 240 | |||
| 241 | static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data); | ||
| 242 | |||
| 243 | static void snapshot_tb_and_purr(void *data) | ||
| 244 | { | ||
| 245 | struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data); | ||
| 246 | |||
| 247 | p->tb0 = mftb(); | ||
| 248 | p->purr0 = mfspr(SPRN_PURR); | ||
| 249 | p->tb = p->tb0; | ||
| 250 | p->purr = 0; | ||
| 251 | wmb(); | ||
| 252 | p->initialized = 1; | ||
| 253 | } | ||
| 254 | |||
| 255 | /* | ||
| 256 | * Called during boot when all cpus have come up. | ||
| 257 | */ | ||
| 258 | void snapshot_timebases(void) | ||
| 259 | { | ||
| 260 | int cpu; | ||
| 261 | |||
| 262 | if (!cpu_has_feature(CPU_FTR_PURR)) | ||
| 263 | return; | ||
| 264 | for_each_cpu(cpu) | ||
| 265 | spin_lock_init(&per_cpu(cpu_purr_data, cpu).lock); | ||
| 266 | on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1); | ||
| 267 | } | ||
| 268 | |||
| 269 | void calculate_steal_time(void) | ||
| 270 | { | ||
| 271 | u64 tb, purr, t0; | ||
| 272 | s64 stolen; | ||
| 273 | struct cpu_purr_data *p0, *pme, *phim; | ||
| 274 | int cpu; | ||
| 275 | |||
| 276 | if (!cpu_has_feature(CPU_FTR_PURR)) | ||
| 277 | return; | ||
| 278 | cpu = smp_processor_id(); | ||
| 279 | pme = &per_cpu(cpu_purr_data, cpu); | ||
| 280 | if (!pme->initialized) | ||
| 281 | return; /* this can happen in early boot */ | ||
| 282 | p0 = &per_cpu(cpu_purr_data, cpu & ~1); | ||
| 283 | phim = &per_cpu(cpu_purr_data, cpu ^ 1); | ||
| 284 | spin_lock(&p0->lock); | ||
| 285 | tb = mftb(); | ||
| 286 | purr = mfspr(SPRN_PURR) - pme->purr0; | ||
| 287 | if (!phim->initialized || !cpu_online(cpu ^ 1)) { | ||
| 288 | stolen = (tb - pme->tb) - (purr - pme->purr); | ||
| 289 | } else { | ||
| 290 | t0 = pme->tb0; | ||
| 291 | if (phim->tb0 < t0) | ||
| 292 | t0 = phim->tb0; | ||
| 293 | stolen = phim->tb - t0 - phim->purr - purr - p0->stolen; | ||
| 294 | } | ||
| 295 | if (stolen > 0) { | ||
| 296 | account_steal_time(current, stolen); | ||
| 297 | p0->stolen += stolen; | ||
| 298 | } | ||
| 299 | pme->tb = tb; | ||
| 300 | pme->purr = purr; | ||
| 301 | spin_unlock(&p0->lock); | ||
| 302 | } | ||
| 303 | |||
| 304 | /* | ||
| 305 | * Must be called before the cpu is added to the online map when | ||
| 306 | * a cpu is being brought up at runtime. | ||
| 307 | */ | ||
| 308 | static void snapshot_purr(void) | ||
| 309 | { | ||
| 310 | int cpu; | ||
| 311 | u64 purr; | ||
| 312 | struct cpu_purr_data *p0, *pme, *phim; | ||
| 313 | unsigned long flags; | ||
| 314 | |||
| 315 | if (!cpu_has_feature(CPU_FTR_PURR)) | ||
| 316 | return; | ||
| 317 | cpu = smp_processor_id(); | ||
| 318 | pme = &per_cpu(cpu_purr_data, cpu); | ||
| 319 | p0 = &per_cpu(cpu_purr_data, cpu & ~1); | ||
| 320 | phim = &per_cpu(cpu_purr_data, cpu ^ 1); | ||
| 321 | spin_lock_irqsave(&p0->lock, flags); | ||
| 322 | pme->tb = pme->tb0 = mftb(); | ||
| 323 | purr = mfspr(SPRN_PURR); | ||
| 324 | if (!phim->initialized) { | ||
| 325 | pme->purr = 0; | ||
| 326 | pme->purr0 = purr; | ||
| 327 | } else { | ||
| 328 | /* set p->purr and p->purr0 for no change in p0->stolen */ | ||
| 329 | pme->purr = phim->tb - phim->tb0 - phim->purr - p0->stolen; | ||
| 330 | pme->purr0 = purr - pme->purr; | ||
| 331 | } | ||
| 332 | pme->initialized = 1; | ||
| 333 | spin_unlock_irqrestore(&p0->lock, flags); | ||
| 334 | } | ||
| 335 | |||
| 336 | #endif /* CONFIG_PPC_SPLPAR */ | ||
| 337 | |||
| 338 | #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ | ||
| 339 | #define calc_cputime_factors() | ||
| 340 | #define account_process_time(regs) update_process_times(user_mode(regs)) | ||
| 341 | #define calculate_steal_time() do { } while (0) | ||
| 342 | #endif | ||
| 343 | |||
| 344 | #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)) | ||
| 345 | #define snapshot_purr() do { } while (0) | ||
| 346 | #endif | ||
| 347 | |||
| 348 | /* | ||
| 349 | * Called when a cpu comes up after the system has finished booting, | ||
| 350 | * i.e. as a result of a hotplug cpu action. | ||
| 351 | */ | ||
| 352 | void snapshot_timebase(void) | ||
| 353 | { | ||
| 354 | __get_cpu_var(last_jiffy) = get_tb(); | ||
| 355 | snapshot_purr(); | ||
| 356 | } | ||
| 357 | |||
| 138 | void __delay(unsigned long loops) | 358 | void __delay(unsigned long loops) |
| 139 | { | 359 | { |
| 140 | unsigned long start; | 360 | unsigned long start; |
| @@ -392,6 +612,7 @@ static void iSeries_tb_recal(void) | |||
| 392 | new_tb_ticks_per_jiffy, sign, tick_diff ); | 612 | new_tb_ticks_per_jiffy, sign, tick_diff ); |
| 393 | tb_ticks_per_jiffy = new_tb_ticks_per_jiffy; | 613 | tb_ticks_per_jiffy = new_tb_ticks_per_jiffy; |
| 394 | tb_ticks_per_sec = new_tb_ticks_per_sec; | 614 | tb_ticks_per_sec = new_tb_ticks_per_sec; |
| 615 | calc_cputime_factors(); | ||
| 395 | div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres ); | 616 | div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres ); |
| 396 | do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; | 617 | do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; |
| 397 | tb_to_xs = divres.result_low; | 618 | tb_to_xs = divres.result_low; |
| @@ -440,6 +661,7 @@ void timer_interrupt(struct pt_regs * regs) | |||
| 440 | irq_enter(); | 661 | irq_enter(); |
| 441 | 662 | ||
| 442 | profile_tick(CPU_PROFILING, regs); | 663 | profile_tick(CPU_PROFILING, regs); |
| 664 | calculate_steal_time(); | ||
| 443 | 665 | ||
| 444 | #ifdef CONFIG_PPC_ISERIES | 666 | #ifdef CONFIG_PPC_ISERIES |
| 445 | get_lppaca()->int_dword.fields.decr_int = 0; | 667 | get_lppaca()->int_dword.fields.decr_int = 0; |
| @@ -461,7 +683,7 @@ void timer_interrupt(struct pt_regs * regs) | |||
| 461 | * is the case. | 683 | * is the case. |
| 462 | */ | 684 | */ |
| 463 | if (!cpu_is_offline(cpu)) | 685 | if (!cpu_is_offline(cpu)) |
| 464 | update_process_times(user_mode(regs)); | 686 | account_process_time(regs); |
| 465 | 687 | ||
| 466 | /* | 688 | /* |
| 467 | * No need to check whether cpu is offline here; boot_cpuid | 689 | * No need to check whether cpu is offline here; boot_cpuid |
| @@ -518,13 +740,27 @@ void wakeup_decrementer(void) | |||
| 518 | void __init smp_space_timers(unsigned int max_cpus) | 740 | void __init smp_space_timers(unsigned int max_cpus) |
| 519 | { | 741 | { |
| 520 | int i; | 742 | int i; |
| 743 | unsigned long half = tb_ticks_per_jiffy / 2; | ||
| 521 | unsigned long offset = tb_ticks_per_jiffy / max_cpus; | 744 | unsigned long offset = tb_ticks_per_jiffy / max_cpus; |
| 522 | unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid); | 745 | unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid); |
| 523 | 746 | ||
| 524 | /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */ | 747 | /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */ |
| 525 | previous_tb -= tb_ticks_per_jiffy; | 748 | previous_tb -= tb_ticks_per_jiffy; |
| 749 | /* | ||
| 750 | * The stolen time calculation for POWER5 shared-processor LPAR | ||
| 751 | * systems works better if the two threads' timebase interrupts | ||
| 752 | * are staggered by half a jiffy with respect to each other. | ||
| 753 | */ | ||
| 526 | for_each_cpu(i) { | 754 | for_each_cpu(i) { |
| 527 | if (i != boot_cpuid) { | 755 | if (i == boot_cpuid) |
| 756 | continue; | ||
| 757 | if (i == (boot_cpuid ^ 1)) | ||
| 758 | per_cpu(last_jiffy, i) = | ||
| 759 | per_cpu(last_jiffy, boot_cpuid) - half; | ||
| 760 | else if (i & 1) | ||
| 761 | per_cpu(last_jiffy, i) = | ||
| 762 | per_cpu(last_jiffy, i ^ 1) + half; | ||
| 763 | else { | ||
| 528 | previous_tb += offset; | 764 | previous_tb += offset; |
| 529 | per_cpu(last_jiffy, i) = previous_tb; | 765 | per_cpu(last_jiffy, i) = previous_tb; |
| 530 | } | 766 | } |
| @@ -720,6 +956,7 @@ void __init time_init(void) | |||
| 720 | tb_ticks_per_sec = ppc_tb_freq; | 956 | tb_ticks_per_sec = ppc_tb_freq; |
| 721 | tb_ticks_per_usec = ppc_tb_freq / 1000000; | 957 | tb_ticks_per_usec = ppc_tb_freq / 1000000; |
| 722 | tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); | 958 | tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); |
| 959 | calc_cputime_factors(); | ||
| 723 | 960 | ||
| 724 | /* | 961 | /* |
| 725 | * Calculate the length of each tick in ns. It will not be | 962 | * Calculate the length of each tick in ns. It will not be |
