aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorHelge Deller <deller@gmx.de>2016-04-20 15:34:15 -0400
committerHelge Deller <deller@gmx.de>2016-05-22 15:39:25 -0400
commit54b668009076caddbede8fde513ca2c982590bfe (patch)
tree873f576cebe662cdb3c8a6626ba6be193a0a6ef4 /arch
parent64e2a42bca12e408f0258c56adcf3595bcd116e7 (diff)
parisc: Add native high-resolution sched_clock() implementation
Add a native implementation for the sched_clock() function which utilizes the processor-internal cycle counter (Control Register 16) as high-resolution time source. With this patch we now get much more fine-grained resolutions in various in-kernel time measurements (e.g. when viewing the function tracing logs), and probably a more accurate scheduling on SMP systems. There are a few specific implementation details in this patch: 1. On a 32bit kernel we emulate the higher 32bits of the required 64-bit resolution of sched_clock() by increasing a per-cpu counter at every wrap-around of the 32bit cycle counter. 2. In a SMP system, the cycle counters of the various CPUs are not syncronized (similiar to the TSC in a x86_64 system). To cope with this we define HAVE_UNSTABLE_SCHED_CLOCK and let the upper layers do the adjustment work. 3. Since we need HAVE_UNSTABLE_SCHED_CLOCK, we need to provide a cmpxchg64() function even on a 32-bit kernel. 4. A 64-bit SMP kernel which is started on a UP system will mark the sched_clock() implementation as "stable", which means that we don't expect any jumps in the returned counter. This is true because we then run only on one CPU. Signed-off-by: Helge Deller <deller@gmx.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/cmpxchg.h9
-rw-r--r--arch/parisc/kernel/time.c63
-rw-r--r--arch/parisc/lib/bitops.c6
4 files changed, 70 insertions, 9 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 6c68c23dd7c2..dc117385ce2e 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -33,6 +33,7 @@ config PARISC
33 select HAVE_ARCH_AUDITSYSCALL 33 select HAVE_ARCH_AUDITSYSCALL
34 select HAVE_ARCH_SECCOMP_FILTER 34 select HAVE_ARCH_SECCOMP_FILTER
35 select HAVE_ARCH_TRACEHOOK 35 select HAVE_ARCH_TRACEHOOK
36 select HAVE_UNSTABLE_SCHED_CLOCK if (SMP || !64BIT)
36 select ARCH_NO_COHERENT_DMA_MMAP 37 select ARCH_NO_COHERENT_DMA_MMAP
37 select CPU_NO_EFFICIENT_FFS 38 select CPU_NO_EFFICIENT_FFS
38 39
diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
index 0a90b965cccb..7ada30900807 100644
--- a/arch/parisc/include/asm/cmpxchg.h
+++ b/arch/parisc/include/asm/cmpxchg.h
@@ -52,8 +52,7 @@ extern void __cmpxchg_called_with_bad_pointer(void);
52/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */ 52/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
53extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, 53extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
54 unsigned int new_); 54 unsigned int new_);
55extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, 55extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
56 unsigned long old, unsigned long new_);
57 56
58/* don't worry...optimizer will get rid of most of this */ 57/* don't worry...optimizer will get rid of most of this */
59static inline unsigned long 58static inline unsigned long
@@ -61,7 +60,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
61{ 60{
62 switch (size) { 61 switch (size) {
63#ifdef CONFIG_64BIT 62#ifdef CONFIG_64BIT
64 case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); 63 case 8: return __cmpxchg_u64((u64 *)ptr, old, new_);
65#endif 64#endif
66 case 4: return __cmpxchg_u32((unsigned int *)ptr, 65 case 4: return __cmpxchg_u32((unsigned int *)ptr,
67 (unsigned int)old, (unsigned int)new_); 66 (unsigned int)old, (unsigned int)new_);
@@ -86,7 +85,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
86{ 85{
87 switch (size) { 86 switch (size) {
88#ifdef CONFIG_64BIT 87#ifdef CONFIG_64BIT
89 case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); 88 case 8: return __cmpxchg_u64((u64 *)ptr, old, new_);
90#endif 89#endif
91 case 4: return __cmpxchg_u32(ptr, old, new_); 90 case 4: return __cmpxchg_u32(ptr, old, new_);
92 default: 91 default:
@@ -111,4 +110,6 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
111#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 110#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
112#endif 111#endif
113 112
113#define cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n)
114
114#endif /* _ASM_PARISC_CMPXCHG_H_ */ 115#endif /* _ASM_PARISC_CMPXCHG_H_ */
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 400acac0a304..58dd6801f5be 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -38,6 +38,18 @@
38 38
39static unsigned long clocktick __read_mostly; /* timer cycles per tick */ 39static unsigned long clocktick __read_mostly; /* timer cycles per tick */
40 40
41#ifndef CONFIG_64BIT
42/*
43 * The processor-internal cycle counter (Control Register 16) is used as time
44 * source for the sched_clock() function. This register is 64bit wide on a
45 * 64-bit kernel and 32bit on a 32-bit kernel. Since sched_clock() always
46 * requires a 64bit counter we emulate on the 32-bit kernel the higher 32bits
47 * with a per-cpu variable which we increase every time the counter
48 * wraps-around (which happens every ~4 secounds).
49 */
50static DEFINE_PER_CPU(unsigned long, cr16_high_32_bits);
51#endif
52
41/* 53/*
42 * We keep time on PA-RISC Linux by using the Interval Timer which is 54 * We keep time on PA-RISC Linux by using the Interval Timer which is
43 * a pair of registers; one is read-only and one is write-only; both 55 * a pair of registers; one is read-only and one is write-only; both
@@ -108,6 +120,12 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
108 */ 120 */
109 mtctl(next_tick, 16); 121 mtctl(next_tick, 16);
110 122
123#if !defined(CONFIG_64BIT)
124 /* check for overflow on a 32bit kernel (every ~4 seconds). */
125 if (unlikely(next_tick < now))
126 this_cpu_inc(cr16_high_32_bits);
127#endif
128
111 /* Skip one clocktick on purpose if we missed next_tick. 129 /* Skip one clocktick on purpose if we missed next_tick.
112 * The new CR16 must be "later" than current CR16 otherwise 130 * The new CR16 must be "later" than current CR16 otherwise
113 * itimer would not fire until CR16 wrapped - e.g 4 seconds 131 * itimer would not fire until CR16 wrapped - e.g 4 seconds
@@ -219,6 +237,12 @@ void __init start_cpu_itimer(void)
219 unsigned int cpu = smp_processor_id(); 237 unsigned int cpu = smp_processor_id();
220 unsigned long next_tick = mfctl(16) + clocktick; 238 unsigned long next_tick = mfctl(16) + clocktick;
221 239
240#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
241 /* With multiple 64bit CPUs online, the cr16's are not syncronized. */
242 if (cpu != 0)
243 clear_sched_clock_stable();
244#endif
245
222 mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */ 246 mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
223 247
224 per_cpu(cpu_data, cpu).it_value = next_tick; 248 per_cpu(cpu_data, cpu).it_value = next_tick;
@@ -246,15 +270,52 @@ void read_persistent_clock(struct timespec *ts)
246 } 270 }
247} 271}
248 272
273
274/*
275 * sched_clock() framework
276 */
277
278static u32 cyc2ns_mul __read_mostly;
279static u32 cyc2ns_shift __read_mostly;
280
281u64 sched_clock(void)
282{
283 u64 now;
284
285 /* Get current cycle counter (Control Register 16). */
286#ifdef CONFIG_64BIT
287 now = mfctl(16);
288#else
289 now = mfctl(16) + (((u64) this_cpu_read(cr16_high_32_bits)) << 32);
290#endif
291
292 /* return the value in ns (cycles_2_ns) */
293 return mul_u64_u32_shr(now, cyc2ns_mul, cyc2ns_shift);
294}
295
296
297/*
298 * timer interrupt and sched_clock() initialization
299 */
300
249void __init time_init(void) 301void __init time_init(void)
250{ 302{
251 unsigned long current_cr16_khz; 303 unsigned long current_cr16_khz;
252 304
305 current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */
253 clocktick = (100 * PAGE0->mem_10msec) / HZ; 306 clocktick = (100 * PAGE0->mem_10msec) / HZ;
254 307
308 /* calculate mult/shift values for cr16 */
309 clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
310 NSEC_PER_MSEC, 0);
311
312#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
313 /* At bootup only one 64bit CPU is online and cr16 is "stable" */
314 set_sched_clock_stable();
315#endif
316
255 start_cpu_itimer(); /* get CPU 0 started */ 317 start_cpu_itimer(); /* get CPU 0 started */
256 318
257 /* register at clocksource framework */ 319 /* register at clocksource framework */
258 current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */
259 clocksource_register_khz(&clocksource_cr16, current_cr16_khz); 320 clocksource_register_khz(&clocksource_cr16, current_cr16_khz);
260} 321}
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
index 187118841af1..8e45b0a97abf 100644
--- a/arch/parisc/lib/bitops.c
+++ b/arch/parisc/lib/bitops.c
@@ -55,11 +55,10 @@ unsigned long __xchg8(char x, char *ptr)
55} 55}
56 56
57 57
58#ifdef CONFIG_64BIT 58u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new)
59unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new)
60{ 59{
61 unsigned long flags; 60 unsigned long flags;
62 unsigned long prev; 61 u64 prev;
63 62
64 _atomic_spin_lock_irqsave(ptr, flags); 63 _atomic_spin_lock_irqsave(ptr, flags);
65 if ((prev = *ptr) == old) 64 if ((prev = *ptr) == old)
@@ -67,7 +66,6 @@ unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsi
67 _atomic_spin_unlock_irqrestore(ptr, flags); 66 _atomic_spin_unlock_irqrestore(ptr, flags);
68 return prev; 67 return prev;
69} 68}
70#endif
71 69
72unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new) 70unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new)
73{ 71{