diff options
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r-- | arch/arm/include/asm/sched_clock.h | 118 | ||||
-rw-r--r-- | arch/arm/include/asm/system.h | 5 | ||||
-rw-r--r-- | arch/arm/include/asm/traps.h | 23 |
3 files changed, 144 insertions, 2 deletions
diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h new file mode 100644 index 000000000000..a84628be1a7b --- /dev/null +++ b/arch/arm/include/asm/sched_clock.h | |||
@@ -0,0 +1,118 @@ | |||
1 | /* | ||
2 | * sched_clock.h: support for extending counters to full 64-bit ns counter | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | #ifndef ASM_SCHED_CLOCK | ||
9 | #define ASM_SCHED_CLOCK | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/types.h> | ||
13 | |||
14 | struct clock_data { | ||
15 | u64 epoch_ns; | ||
16 | u32 epoch_cyc; | ||
17 | u32 epoch_cyc_copy; | ||
18 | u32 mult; | ||
19 | u32 shift; | ||
20 | }; | ||
21 | |||
22 | #define DEFINE_CLOCK_DATA(name) struct clock_data name | ||
23 | |||
24 | static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) | ||
25 | { | ||
26 | return (cyc * mult) >> shift; | ||
27 | } | ||
28 | |||
29 | /* | ||
30 | * Atomically update the sched_clock epoch. Your update callback will | ||
31 | * be called from a timer before the counter wraps - read the current | ||
32 | * counter value, and call this function to safely move the epochs | ||
33 | * forward. Only use this from the update callback. | ||
34 | */ | ||
35 | static inline void update_sched_clock(struct clock_data *cd, u32 cyc, u32 mask) | ||
36 | { | ||
37 | unsigned long flags; | ||
38 | u64 ns = cd->epoch_ns + | ||
39 | cyc_to_ns((cyc - cd->epoch_cyc) & mask, cd->mult, cd->shift); | ||
40 | |||
41 | /* | ||
42 | * Write epoch_cyc and epoch_ns in a way that the update is | ||
43 | * detectable in cyc_to_fixed_sched_clock(). | ||
44 | */ | ||
45 | raw_local_irq_save(flags); | ||
46 | cd->epoch_cyc = cyc; | ||
47 | smp_wmb(); | ||
48 | cd->epoch_ns = ns; | ||
49 | smp_wmb(); | ||
50 | cd->epoch_cyc_copy = cyc; | ||
51 | raw_local_irq_restore(flags); | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * If your clock rate is known at compile time, using this will allow | ||
56 | * you to optimize the mult/shift loads away. This is paired with | ||
57 | * init_fixed_sched_clock() to ensure that your mult/shift are correct. | ||
58 | */ | ||
59 | static inline unsigned long long cyc_to_fixed_sched_clock(struct clock_data *cd, | ||
60 | u32 cyc, u32 mask, u32 mult, u32 shift) | ||
61 | { | ||
62 | u64 epoch_ns; | ||
63 | u32 epoch_cyc; | ||
64 | |||
65 | /* | ||
66 | * Load the epoch_cyc and epoch_ns atomically. We do this by | ||
67 | * ensuring that we always write epoch_cyc, epoch_ns and | ||
68 | * epoch_cyc_copy in strict order, and read them in strict order. | ||
69 | * If epoch_cyc and epoch_cyc_copy are not equal, then we're in | ||
70 | * the middle of an update, and we should repeat the load. | ||
71 | */ | ||
72 | do { | ||
73 | epoch_cyc = cd->epoch_cyc; | ||
74 | smp_rmb(); | ||
75 | epoch_ns = cd->epoch_ns; | ||
76 | smp_rmb(); | ||
77 | } while (epoch_cyc != cd->epoch_cyc_copy); | ||
78 | |||
79 | return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, mult, shift); | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * Otherwise, you need to use this, which will obtain the mult/shift | ||
84 | * from the clock_data structure. Use init_sched_clock() with this. | ||
85 | */ | ||
86 | static inline unsigned long long cyc_to_sched_clock(struct clock_data *cd, | ||
87 | u32 cyc, u32 mask) | ||
88 | { | ||
89 | return cyc_to_fixed_sched_clock(cd, cyc, mask, cd->mult, cd->shift); | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * Initialize the clock data - calculate the appropriate multiplier | ||
94 | * and shift. Also setup a timer to ensure that the epoch is refreshed | ||
95 | * at the appropriate time interval, which will call your update | ||
96 | * handler. | ||
97 | */ | ||
98 | void init_sched_clock(struct clock_data *, void (*)(void), | ||
99 | unsigned int, unsigned long); | ||
100 | |||
101 | /* | ||
102 | * Use this initialization function rather than init_sched_clock() if | ||
103 | * you're using cyc_to_fixed_sched_clock, which will warn if your | ||
104 | * constants are incorrect. | ||
105 | */ | ||
106 | static inline void init_fixed_sched_clock(struct clock_data *cd, | ||
107 | void (*update)(void), unsigned int bits, unsigned long rate, | ||
108 | u32 mult, u32 shift) | ||
109 | { | ||
110 | init_sched_clock(cd, update, bits, rate); | ||
111 | if (cd->mult != mult || cd->shift != shift) { | ||
112 | pr_crit("sched_clock: wrong multiply/shift: %u>>%u vs calculated %u>>%u\n" | ||
113 | "sched_clock: fix multiply/shift to avoid scheduler hiccups\n", | ||
114 | mult, shift, cd->mult, cd->shift); | ||
115 | } | ||
116 | } | ||
117 | |||
118 | #endif | ||
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 80025948b8ad..3222ab8b3447 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h | |||
@@ -63,6 +63,11 @@ | |||
63 | #include <asm/outercache.h> | 63 | #include <asm/outercache.h> |
64 | 64 | ||
65 | #define __exception __attribute__((section(".exception.text"))) | 65 | #define __exception __attribute__((section(".exception.text"))) |
66 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
67 | #define __exception_irq_entry __irq_entry | ||
68 | #else | ||
69 | #define __exception_irq_entry __exception | ||
70 | #endif | ||
66 | 71 | ||
67 | struct thread_info; | 72 | struct thread_info; |
68 | struct task_struct; | 73 | struct task_struct; |
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h index 491960bf4260..124475afb007 100644 --- a/arch/arm/include/asm/traps.h +++ b/arch/arm/include/asm/traps.h | |||
@@ -15,13 +15,32 @@ struct undef_hook { | |||
15 | void register_undef_hook(struct undef_hook *hook); | 15 | void register_undef_hook(struct undef_hook *hook); |
16 | void unregister_undef_hook(struct undef_hook *hook); | 16 | void unregister_undef_hook(struct undef_hook *hook); |
17 | 17 | ||
18 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
19 | static inline int __in_irqentry_text(unsigned long ptr) | ||
20 | { | ||
21 | extern char __irqentry_text_start[]; | ||
22 | extern char __irqentry_text_end[]; | ||
23 | |||
24 | return ptr >= (unsigned long)&__irqentry_text_start && | ||
25 | ptr < (unsigned long)&__irqentry_text_end; | ||
26 | } | ||
27 | #else | ||
28 | static inline int __in_irqentry_text(unsigned long ptr) | ||
29 | { | ||
30 | return 0; | ||
31 | } | ||
32 | #endif | ||
33 | |||
18 | static inline int in_exception_text(unsigned long ptr) | 34 | static inline int in_exception_text(unsigned long ptr) |
19 | { | 35 | { |
20 | extern char __exception_text_start[]; | 36 | extern char __exception_text_start[]; |
21 | extern char __exception_text_end[]; | 37 | extern char __exception_text_end[]; |
38 | int in; | ||
39 | |||
40 | in = ptr >= (unsigned long)&__exception_text_start && | ||
41 | ptr < (unsigned long)&__exception_text_end; | ||
22 | 42 | ||
23 | return ptr >= (unsigned long)&__exception_text_start && | 43 | return in ? : __in_irqentry_text(ptr); |
24 | ptr < (unsigned long)&__exception_text_end; | ||
25 | } | 44 | } |
26 | 45 | ||
27 | extern void __init early_trap_init(void); | 46 | extern void __init early_trap_init(void); |