diff options
author | Marc Zyngier <Marc.Zyngier@arm.com> | 2011-12-15 06:19:23 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-12-18 18:00:26 -0500 |
commit | 2f0778afac79bd8d226225556858a636931eeabc (patch) | |
tree | e00cea674f3d6cc8c5584aa5b75239f11f6d229d /arch/arm/kernel | |
parent | 3bdc3484e8f2b1b219ad0397d81ce4601fbaf76d (diff) |
ARM: 7205/2: sched_clock: allow sched_clock to be selected at runtime
sched_clock() is yet another blocker on the road to the single
image. This patch implements an idea by Russell King:
http://www.spinics.net/lists/linux-omap/msg49561.html
Instead of asking the platform to implement both sched_clock()
itself and the rollover callback, simply register a read()
function, and let the ARM code care about sched_clock() itself,
the conversion to ns and the rollover. sched_clock() uses
this read() function as an indirection to the platform code.
If the platform doesn't provide a read(), the code falls back
to the jiffy counter (just like the default sched_clock).
This allow some simplifications and possibly some footprint gain
when multiple platforms are compiled in. Among the drawbacks,
the removal of the *_fixed_sched_clock optimization which could
negatively impact some platforms (sa1100, tegra, versatile
and omap).
Tested on 11MPCore, OMAP4 and Tegra.
Cc: Imre Kaloz <kaloz@openwrt.org>
Cc: Eric Miao <eric.y.miao@gmail.com>
Cc: Colin Cross <ccross@android.com>
Cc: Erik Gilling <konkers@android.com>
Cc: Olof Johansson <olof@lixom.net>
Cc: Sascha Hauer <kernel@pengutronix.de>
Cc: Alessandro Rubini <rubini@unipv.it>
Cc: STEricsson <STEricsson_nomadik_linux@list.st.com>
Cc: Lennert Buytenhek <kernel@wantstofly.org>
Cc: Ben Dooks <ben-linux@fluff.org>
Tested-by: Jamie Iles <jamie@jamieiles.com>
Tested-by: Tony Lindgren <tony@atomide.com>
Tested-by: Kyungmin Park <kyungmin.park@samsung.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Krzysztof Halasa <khc@pm.waw.pl>
Acked-by: Kukjin Kim <kgene.kim@samsung.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/sched_clock.c | 118 |
1 files changed, 105 insertions, 13 deletions
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index 9a46370fe9da..5416c7c12528 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c | |||
@@ -14,61 +14,153 @@ | |||
14 | 14 | ||
15 | #include <asm/sched_clock.h> | 15 | #include <asm/sched_clock.h> |
16 | 16 | ||
17 | struct clock_data { | ||
18 | u64 epoch_ns; | ||
19 | u32 epoch_cyc; | ||
20 | u32 epoch_cyc_copy; | ||
21 | u32 mult; | ||
22 | u32 shift; | ||
23 | }; | ||
24 | |||
17 | static void sched_clock_poll(unsigned long wrap_ticks); | 25 | static void sched_clock_poll(unsigned long wrap_ticks); |
18 | static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0); | 26 | static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0); |
19 | static void (*sched_clock_update_fn)(void); | 27 | |
28 | static struct clock_data cd = { | ||
29 | .mult = NSEC_PER_SEC / HZ, | ||
30 | }; | ||
31 | |||
32 | static u32 __read_mostly sched_clock_mask = 0xffffffff; | ||
33 | |||
34 | static u32 notrace jiffy_sched_clock_read(void) | ||
35 | { | ||
36 | return (u32)(jiffies - INITIAL_JIFFIES); | ||
37 | } | ||
38 | |||
39 | static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; | ||
40 | |||
41 | static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) | ||
42 | { | ||
43 | return (cyc * mult) >> shift; | ||
44 | } | ||
45 | |||
46 | static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask) | ||
47 | { | ||
48 | u64 epoch_ns; | ||
49 | u32 epoch_cyc; | ||
50 | |||
51 | /* | ||
52 | * Load the epoch_cyc and epoch_ns atomically. We do this by | ||
53 | * ensuring that we always write epoch_cyc, epoch_ns and | ||
54 | * epoch_cyc_copy in strict order, and read them in strict order. | ||
55 | * If epoch_cyc and epoch_cyc_copy are not equal, then we're in | ||
56 | * the middle of an update, and we should repeat the load. | ||
57 | */ | ||
58 | do { | ||
59 | epoch_cyc = cd.epoch_cyc; | ||
60 | smp_rmb(); | ||
61 | epoch_ns = cd.epoch_ns; | ||
62 | smp_rmb(); | ||
63 | } while (epoch_cyc != cd.epoch_cyc_copy); | ||
64 | |||
65 | return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift); | ||
66 | } | ||
67 | |||
68 | /* | ||
69 | * Atomically update the sched_clock epoch. | ||
70 | */ | ||
71 | static void notrace update_sched_clock(void) | ||
72 | { | ||
73 | unsigned long flags; | ||
74 | u32 cyc; | ||
75 | u64 ns; | ||
76 | |||
77 | cyc = read_sched_clock(); | ||
78 | ns = cd.epoch_ns + | ||
79 | cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, | ||
80 | cd.mult, cd.shift); | ||
81 | /* | ||
82 | * Write epoch_cyc and epoch_ns in a way that the update is | ||
83 | * detectable in cyc_to_fixed_sched_clock(). | ||
84 | */ | ||
85 | raw_local_irq_save(flags); | ||
86 | cd.epoch_cyc = cyc; | ||
87 | smp_wmb(); | ||
88 | cd.epoch_ns = ns; | ||
89 | smp_wmb(); | ||
90 | cd.epoch_cyc_copy = cyc; | ||
91 | raw_local_irq_restore(flags); | ||
92 | } | ||
20 | 93 | ||
21 | static void sched_clock_poll(unsigned long wrap_ticks) | 94 | static void sched_clock_poll(unsigned long wrap_ticks) |
22 | { | 95 | { |
23 | mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks)); | 96 | mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks)); |
24 | sched_clock_update_fn(); | 97 | update_sched_clock(); |
25 | } | 98 | } |
26 | 99 | ||
27 | void __init init_sched_clock(struct clock_data *cd, void (*update)(void), | 100 | void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) |
28 | unsigned int clock_bits, unsigned long rate) | ||
29 | { | 101 | { |
30 | unsigned long r, w; | 102 | unsigned long r, w; |
31 | u64 res, wrap; | 103 | u64 res, wrap; |
32 | char r_unit; | 104 | char r_unit; |
33 | 105 | ||
34 | sched_clock_update_fn = update; | 106 | BUG_ON(bits > 32); |
107 | WARN_ON(!irqs_disabled()); | ||
108 | WARN_ON(read_sched_clock != jiffy_sched_clock_read); | ||
109 | read_sched_clock = read; | ||
110 | sched_clock_mask = (1 << bits) - 1; | ||
35 | 111 | ||
36 | /* calculate the mult/shift to convert counter ticks to ns. */ | 112 | /* calculate the mult/shift to convert counter ticks to ns. */ |
37 | clocks_calc_mult_shift(&cd->mult, &cd->shift, rate, NSEC_PER_SEC, 0); | 113 | clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0); |
38 | 114 | ||
39 | r = rate; | 115 | r = rate; |
40 | if (r >= 4000000) { | 116 | if (r >= 4000000) { |
41 | r /= 1000000; | 117 | r /= 1000000; |
42 | r_unit = 'M'; | 118 | r_unit = 'M'; |
43 | } else { | 119 | } else if (r >= 1000) { |
44 | r /= 1000; | 120 | r /= 1000; |
45 | r_unit = 'k'; | 121 | r_unit = 'k'; |
46 | } | 122 | } else |
123 | r_unit = ' '; | ||
47 | 124 | ||
48 | /* calculate how many ns until we wrap */ | 125 | /* calculate how many ns until we wrap */ |
49 | wrap = cyc_to_ns((1ULL << clock_bits) - 1, cd->mult, cd->shift); | 126 | wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift); |
50 | do_div(wrap, NSEC_PER_MSEC); | 127 | do_div(wrap, NSEC_PER_MSEC); |
51 | w = wrap; | 128 | w = wrap; |
52 | 129 | ||
53 | /* calculate the ns resolution of this counter */ | 130 | /* calculate the ns resolution of this counter */ |
54 | res = cyc_to_ns(1ULL, cd->mult, cd->shift); | 131 | res = cyc_to_ns(1ULL, cd.mult, cd.shift); |
55 | pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n", | 132 | pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n", |
56 | clock_bits, r, r_unit, res, w); | 133 | bits, r, r_unit, res, w); |
57 | 134 | ||
58 | /* | 135 | /* |
59 | * Start the timer to keep sched_clock() properly updated and | 136 | * Start the timer to keep sched_clock() properly updated and |
60 | * sets the initial epoch. | 137 | * sets the initial epoch. |
61 | */ | 138 | */ |
62 | sched_clock_timer.data = msecs_to_jiffies(w - (w / 10)); | 139 | sched_clock_timer.data = msecs_to_jiffies(w - (w / 10)); |
63 | update(); | 140 | update_sched_clock(); |
64 | 141 | ||
65 | /* | 142 | /* |
66 | * Ensure that sched_clock() starts off at 0ns | 143 | * Ensure that sched_clock() starts off at 0ns |
67 | */ | 144 | */ |
68 | cd->epoch_ns = 0; | 145 | cd.epoch_ns = 0; |
146 | |||
147 | pr_debug("Registered %pF as sched_clock source\n", read); | ||
148 | } | ||
149 | |||
150 | unsigned long long notrace sched_clock(void) | ||
151 | { | ||
152 | u32 cyc = read_sched_clock(); | ||
153 | return cyc_to_sched_clock(cyc, sched_clock_mask); | ||
69 | } | 154 | } |
70 | 155 | ||
71 | void __init sched_clock_postinit(void) | 156 | void __init sched_clock_postinit(void) |
72 | { | 157 | { |
158 | /* | ||
159 | * If no sched_clock function has been provided at that point, | ||
160 | * make it the final one one. | ||
161 | */ | ||
162 | if (read_sched_clock == jiffy_sched_clock_read) | ||
163 | setup_sched_clock(jiffy_sched_clock_read, 32, HZ); | ||
164 | |||
73 | sched_clock_poll(sched_clock_timer.data); | 165 | sched_clock_poll(sched_clock_timer.data); |
74 | } | 166 | } |