aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHelge Deller <deller@gmx.de>2016-11-22 12:08:30 -0500
committerHelge Deller <deller@gmx.de>2016-11-25 06:31:58 -0500
commit43b1f6abd59063a088416a0df042b36450f91f75 (patch)
tree1b87f1a0711ecea21627aa25d9fe1a48398b7286
parent741dc7bf1c7c7d93b853bb55efe77baa27e1b0a9 (diff)
parisc: Switch to generic sched_clock implementation
Drop the open-coded sched_clock() function and replace it by the provided GENERIC_SCHED_CLOCK implementation. We have seen quite some hung tasks in the past, which seem to be fixed by this patch. Signed-off-by: Helge Deller <deller@gmx.de> Cc: <stable@vger.kernel.org> # v4.7+ Signed-off-by: Helge Deller <deller@gmx.de>
-rw-r--r--arch/parisc/Kconfig4
-rw-r--r--arch/parisc/kernel/time.c57
2 files changed, 14 insertions, 47 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 71c4a3aa3752..a14b86587013 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -34,7 +34,9 @@ config PARISC
34 select HAVE_ARCH_HASH 34 select HAVE_ARCH_HASH
35 select HAVE_ARCH_SECCOMP_FILTER 35 select HAVE_ARCH_SECCOMP_FILTER
36 select HAVE_ARCH_TRACEHOOK 36 select HAVE_ARCH_TRACEHOOK
37 select HAVE_UNSTABLE_SCHED_CLOCK if (SMP || !64BIT) 37 select GENERIC_SCHED_CLOCK
38 select HAVE_UNSTABLE_SCHED_CLOCK if SMP
39 select GENERIC_CLOCKEVENTS
38 select ARCH_NO_COHERENT_DMA_MMAP 40 select ARCH_NO_COHERENT_DMA_MMAP
39 select CPU_NO_EFFICIENT_FFS 41 select CPU_NO_EFFICIENT_FFS
40 42
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 9b63b876a13a..325f30d82b64 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/rtc.h> 15#include <linux/rtc.h>
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/sched_clock.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/param.h> 19#include <linux/param.h>
19#include <linux/string.h> 20#include <linux/string.h>
@@ -39,18 +40,6 @@
39 40
40static unsigned long clocktick __read_mostly; /* timer cycles per tick */ 41static unsigned long clocktick __read_mostly; /* timer cycles per tick */
41 42
42#ifndef CONFIG_64BIT
43/*
44 * The processor-internal cycle counter (Control Register 16) is used as time
45 * source for the sched_clock() function. This register is 64bit wide on a
46 * 64-bit kernel and 32bit on a 32-bit kernel. Since sched_clock() always
47 * requires a 64bit counter we emulate on the 32-bit kernel the higher 32bits
48 * with a per-cpu variable which we increase every time the counter
49 * wraps-around (which happens every ~4 secounds).
50 */
51static DEFINE_PER_CPU(unsigned long, cr16_high_32_bits);
52#endif
53
54/* 43/*
55 * We keep time on PA-RISC Linux by using the Interval Timer which is 44 * We keep time on PA-RISC Linux by using the Interval Timer which is
56 * a pair of registers; one is read-only and one is write-only; both 45 * a pair of registers; one is read-only and one is write-only; both
@@ -121,12 +110,6 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
121 */ 110 */
122 mtctl(next_tick, 16); 111 mtctl(next_tick, 16);
123 112
124#if !defined(CONFIG_64BIT)
125 /* check for overflow on a 32bit kernel (every ~4 seconds). */
126 if (unlikely(next_tick < now))
127 this_cpu_inc(cr16_high_32_bits);
128#endif
129
130 /* Skip one clocktick on purpose if we missed next_tick. 113 /* Skip one clocktick on purpose if we missed next_tick.
131 * The new CR16 must be "later" than current CR16 otherwise 114 * The new CR16 must be "later" than current CR16 otherwise
132 * itimer would not fire until CR16 wrapped - e.g 4 seconds 115 * itimer would not fire until CR16 wrapped - e.g 4 seconds
@@ -208,7 +191,7 @@ EXPORT_SYMBOL(profile_pc);
208 191
209/* clock source code */ 192/* clock source code */
210 193
211static cycle_t read_cr16(struct clocksource *cs) 194static cycle_t notrace read_cr16(struct clocksource *cs)
212{ 195{
213 return get_cycles(); 196 return get_cycles();
214} 197}
@@ -287,26 +270,9 @@ void read_persistent_clock(struct timespec *ts)
287} 270}
288 271
289 272
290/* 273static u64 notrace read_cr16_sched_clock(void)
291 * sched_clock() framework
292 */
293
294static u32 cyc2ns_mul __read_mostly;
295static u32 cyc2ns_shift __read_mostly;
296
297u64 sched_clock(void)
298{ 274{
299 u64 now; 275 return get_cycles();
300
301 /* Get current cycle counter (Control Register 16). */
302#ifdef CONFIG_64BIT
303 now = mfctl(16);
304#else
305 now = mfctl(16) + (((u64) this_cpu_read(cr16_high_32_bits)) << 32);
306#endif
307
308 /* return the value in ns (cycles_2_ns) */
309 return mul_u64_u32_shr(now, cyc2ns_mul, cyc2ns_shift);
310} 276}
311 277
312 278
@@ -316,17 +282,16 @@ u64 sched_clock(void)
316 282
317void __init time_init(void) 283void __init time_init(void)
318{ 284{
319 unsigned long current_cr16_khz; 285 unsigned long cr16_hz;
320 286
321 current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */
322 clocktick = (100 * PAGE0->mem_10msec) / HZ; 287 clocktick = (100 * PAGE0->mem_10msec) / HZ;
323
324 /* calculate mult/shift values for cr16 */
325 clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
326 NSEC_PER_MSEC, 0);
327
328 start_cpu_itimer(); /* get CPU 0 started */ 288 start_cpu_itimer(); /* get CPU 0 started */
329 289
290 cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */
291
330 /* register at clocksource framework */ 292 /* register at clocksource framework */
331 clocksource_register_khz(&clocksource_cr16, current_cr16_khz); 293 clocksource_register_hz(&clocksource_cr16, cr16_hz);
294
295 /* register as sched_clock source */
296 sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz);
332} 297}