diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-05-14 06:06:36 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2011-05-14 06:06:36 -0400 |
commit | a18f22a968de17b29f2310cdb7ba69163e65ec15 (patch) | |
tree | a7d56d88fad5e444d7661484109758a2f436129e /arch/mips/kernel | |
parent | a1c57e0fec53defe745e64417eacdbd3618c3e66 (diff) | |
parent | 798778b8653f64b7b2162ac70eca10367cff6ce8 (diff) |
Merge branch 'consolidate-clksrc-i8253' of master.kernel.org:~rmk/linux-2.6-arm into timers/clocksource
Conflicts:
arch/ia64/kernel/cyclone.c
arch/mips/kernel/i8253.c
arch/x86/kernel/i8253.c
Reason: Resolve conflicts so further cleanups do not conflict further
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/mips/kernel')
28 files changed, 496 insertions, 583 deletions
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c index b8bb8ba60869..f305ca14351b 100644 --- a/arch/mips/kernel/cpu-bugs64.c +++ b/arch/mips/kernel/cpu-bugs64.c | |||
@@ -73,7 +73,7 @@ static inline void mult_sh_align_mod(long *v1, long *v2, long *w, | |||
73 | : "0" (5), "1" (8), "2" (5)); | 73 | : "0" (5), "1" (8), "2" (5)); |
74 | align_mod(align, mod); | 74 | align_mod(align, mod); |
75 | /* | 75 | /* |
76 | * The trailing nop is needed to fullfill the two-instruction | 76 | * The trailing nop is needed to fulfill the two-instruction |
77 | * requirement between reading hi/lo and staring a mult/div. | 77 | * requirement between reading hi/lo and staring a mult/div. |
78 | * Leaving it out may cause gas insert a nop itself breaking | 78 | * Leaving it out may cause gas insert a nop itself breaking |
79 | * the desired alignment of the next chunk. | 79 | * the desired alignment of the next chunk. |
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 5a84a1f11231..94ca2b018af7 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c | |||
@@ -17,29 +17,13 @@ | |||
17 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
18 | #include <asm/uasm.h> | 18 | #include <asm/uasm.h> |
19 | 19 | ||
20 | /* | 20 | #include <asm-generic/sections.h> |
21 | * If the Instruction Pointer is in module space (0xc0000000), return true; | ||
22 | * otherwise, it is in kernel space (0x80000000), return false. | ||
23 | * | ||
24 | * FIXME: This will not work when the kernel space and module space are the | ||
25 | * same. If they are the same, we need to modify scripts/recordmcount.pl, | ||
26 | * ftrace_make_nop/call() and the other related parts to ensure the | ||
27 | * enabling/disabling of the calling site to _mcount is right for both kernel | ||
28 | * and module. | ||
29 | */ | ||
30 | |||
31 | static inline int in_module(unsigned long ip) | ||
32 | { | ||
33 | return ip & 0x40000000; | ||
34 | } | ||
35 | 21 | ||
36 | #ifdef CONFIG_DYNAMIC_FTRACE | 22 | #ifdef CONFIG_DYNAMIC_FTRACE |
37 | 23 | ||
38 | #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ | 24 | #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ |
39 | #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ | 25 | #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ |
40 | 26 | ||
41 | #define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */ | ||
42 | #define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */ | ||
43 | #define INSN_NOP 0x00000000 /* nop */ | 27 | #define INSN_NOP 0x00000000 /* nop */ |
44 | #define INSN_JAL(addr) \ | 28 | #define INSN_JAL(addr) \ |
45 | ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) | 29 | ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) |
@@ -69,6 +53,20 @@ static inline void ftrace_dyn_arch_init_insns(void) | |||
69 | #endif | 53 | #endif |
70 | } | 54 | } |
71 | 55 | ||
56 | /* | ||
57 | * Check if the address is in kernel space | ||
58 | * | ||
59 | * Clone core_kernel_text() from kernel/extable.c, but doesn't call | ||
60 | * init_kernel_text() for Ftrace doesn't trace functions in init sections. | ||
61 | */ | ||
62 | static inline int in_kernel_space(unsigned long ip) | ||
63 | { | ||
64 | if (ip >= (unsigned long)_stext && | ||
65 | ip <= (unsigned long)_etext) | ||
66 | return 1; | ||
67 | return 0; | ||
68 | } | ||
69 | |||
72 | static int ftrace_modify_code(unsigned long ip, unsigned int new_code) | 70 | static int ftrace_modify_code(unsigned long ip, unsigned int new_code) |
73 | { | 71 | { |
74 | int faulted; | 72 | int faulted; |
@@ -84,6 +82,42 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code) | |||
84 | return 0; | 82 | return 0; |
85 | } | 83 | } |
86 | 84 | ||
85 | /* | ||
86 | * The details about the calling site of mcount on MIPS | ||
87 | * | ||
88 | * 1. For kernel: | ||
89 | * | ||
90 | * move at, ra | ||
91 | * jal _mcount --> nop | ||
92 | * | ||
93 | * 2. For modules: | ||
94 | * | ||
95 | * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT | ||
96 | * | ||
97 | * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) | ||
98 | * addiu v1, v1, low_16bit_of_mcount | ||
99 | * move at, ra | ||
100 | * move $12, ra_address | ||
101 | * jalr v1 | ||
102 | * sub sp, sp, 8 | ||
103 | * 1: offset = 5 instructions | ||
104 | * 2.2 For the Other situations | ||
105 | * | ||
106 | * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) | ||
107 | * addiu v1, v1, low_16bit_of_mcount | ||
108 | * move at, ra | ||
109 | * jalr v1 | ||
110 | * nop | move $12, ra_address | sub sp, sp, 8 | ||
111 | * 1: offset = 4 instructions | ||
112 | */ | ||
113 | |||
114 | #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) | ||
115 | #define MCOUNT_OFFSET_INSNS 5 | ||
116 | #else | ||
117 | #define MCOUNT_OFFSET_INSNS 4 | ||
118 | #endif | ||
119 | #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) | ||
120 | |||
87 | int ftrace_make_nop(struct module *mod, | 121 | int ftrace_make_nop(struct module *mod, |
88 | struct dyn_ftrace *rec, unsigned long addr) | 122 | struct dyn_ftrace *rec, unsigned long addr) |
89 | { | 123 | { |
@@ -91,39 +125,11 @@ int ftrace_make_nop(struct module *mod, | |||
91 | unsigned long ip = rec->ip; | 125 | unsigned long ip = rec->ip; |
92 | 126 | ||
93 | /* | 127 | /* |
94 | * We have compiled module with -mlong-calls, but compiled the kernel | 128 | * If ip is in kernel space, no long call, otherwise, long call is |
95 | * without it, we need to cope with them respectively. | 129 | * needed. |
96 | */ | 130 | */ |
97 | if (in_module(ip)) { | 131 | new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; |
98 | #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) | 132 | |
99 | /* | ||
100 | * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) | ||
101 | * addiu v1, v1, low_16bit_of_mcount | ||
102 | * move at, ra | ||
103 | * move $12, ra_address | ||
104 | * jalr v1 | ||
105 | * sub sp, sp, 8 | ||
106 | * 1: offset = 5 instructions | ||
107 | */ | ||
108 | new = INSN_B_1F_5; | ||
109 | #else | ||
110 | /* | ||
111 | * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) | ||
112 | * addiu v1, v1, low_16bit_of_mcount | ||
113 | * move at, ra | ||
114 | * jalr v1 | ||
115 | * nop | move $12, ra_address | sub sp, sp, 8 | ||
116 | * 1: offset = 4 instructions | ||
117 | */ | ||
118 | new = INSN_B_1F_4; | ||
119 | #endif | ||
120 | } else { | ||
121 | /* | ||
122 | * move at, ra | ||
123 | * jal _mcount --> nop | ||
124 | */ | ||
125 | new = INSN_NOP; | ||
126 | } | ||
127 | return ftrace_modify_code(ip, new); | 133 | return ftrace_modify_code(ip, new); |
128 | } | 134 | } |
129 | 135 | ||
@@ -132,8 +138,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |||
132 | unsigned int new; | 138 | unsigned int new; |
133 | unsigned long ip = rec->ip; | 139 | unsigned long ip = rec->ip; |
134 | 140 | ||
135 | /* ip, module: 0xc0000000, kernel: 0x80000000 */ | 141 | new = in_kernel_space(ip) ? insn_jal_ftrace_caller : |
136 | new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller; | 142 | insn_lui_v1_hi16_mcount; |
137 | 143 | ||
138 | return ftrace_modify_code(ip, new); | 144 | return ftrace_modify_code(ip, new); |
139 | } | 145 | } |
@@ -190,29 +196,25 @@ int ftrace_disable_ftrace_graph_caller(void) | |||
190 | #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ | 196 | #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ |
191 | #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ | 197 | #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ |
192 | 198 | ||
193 | unsigned long ftrace_get_parent_addr(unsigned long self_addr, | 199 | unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long |
194 | unsigned long parent, | 200 | old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) |
195 | unsigned long parent_addr, | ||
196 | unsigned long fp) | ||
197 | { | 201 | { |
198 | unsigned long sp, ip, ra; | 202 | unsigned long sp, ip, tmp; |
199 | unsigned int code; | 203 | unsigned int code; |
200 | int faulted; | 204 | int faulted; |
201 | 205 | ||
202 | /* | 206 | /* |
203 | * For module, move the ip from calling site of mcount to the | 207 | * For module, move the ip from the return address after the |
204 | * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for | 208 | * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for |
205 | * kernel, move to the instruction "move ra, at"(offset is 12) | 209 | * kernel, move after the instruction "move ra, at"(offset is 16) |
206 | */ | 210 | */ |
207 | ip = self_addr - (in_module(self_addr) ? 20 : 12); | 211 | ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24); |
208 | 212 | ||
209 | /* | 213 | /* |
210 | * search the text until finding the non-store instruction or "s{d,w} | 214 | * search the text until finding the non-store instruction or "s{d,w} |
211 | * ra, offset(sp)" instruction | 215 | * ra, offset(sp)" instruction |
212 | */ | 216 | */ |
213 | do { | 217 | do { |
214 | ip -= 4; | ||
215 | |||
216 | /* get the code at "ip": code = *(unsigned int *)ip; */ | 218 | /* get the code at "ip": code = *(unsigned int *)ip; */ |
217 | safe_load_code(code, ip, faulted); | 219 | safe_load_code(code, ip, faulted); |
218 | 220 | ||
@@ -224,18 +226,20 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, | |||
224 | * store the ra on the stack | 226 | * store the ra on the stack |
225 | */ | 227 | */ |
226 | if ((code & S_R_SP) != S_R_SP) | 228 | if ((code & S_R_SP) != S_R_SP) |
227 | return parent_addr; | 229 | return parent_ra_addr; |
228 | 230 | ||
229 | } while (((code & S_RA_SP) != S_RA_SP)); | 231 | /* Move to the next instruction */ |
232 | ip -= 4; | ||
233 | } while ((code & S_RA_SP) != S_RA_SP); | ||
230 | 234 | ||
231 | sp = fp + (code & OFFSET_MASK); | 235 | sp = fp + (code & OFFSET_MASK); |
232 | 236 | ||
233 | /* ra = *(unsigned long *)sp; */ | 237 | /* tmp = *(unsigned long *)sp; */ |
234 | safe_load_stack(ra, sp, faulted); | 238 | safe_load_stack(tmp, sp, faulted); |
235 | if (unlikely(faulted)) | 239 | if (unlikely(faulted)) |
236 | return 0; | 240 | return 0; |
237 | 241 | ||
238 | if (ra == parent) | 242 | if (tmp == old_parent_ra) |
239 | return sp; | 243 | return sp; |
240 | return 0; | 244 | return 0; |
241 | } | 245 | } |
@@ -246,21 +250,21 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, | |||
246 | * Hook the return address and push it in the stack of return addrs | 250 | * Hook the return address and push it in the stack of return addrs |
247 | * in current thread info. | 251 | * in current thread info. |
248 | */ | 252 | */ |
249 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | 253 | void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, |
250 | unsigned long fp) | 254 | unsigned long fp) |
251 | { | 255 | { |
252 | unsigned long old; | 256 | unsigned long old_parent_ra; |
253 | struct ftrace_graph_ent trace; | 257 | struct ftrace_graph_ent trace; |
254 | unsigned long return_hooker = (unsigned long) | 258 | unsigned long return_hooker = (unsigned long) |
255 | &return_to_handler; | 259 | &return_to_handler; |
256 | int faulted; | 260 | int faulted, insns; |
257 | 261 | ||
258 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 262 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
259 | return; | 263 | return; |
260 | 264 | ||
261 | /* | 265 | /* |
262 | * "parent" is the stack address saved the return address of the caller | 266 | * "parent_ra_addr" is the stack address saved the return address of |
263 | * of _mcount. | 267 | * the caller of _mcount. |
264 | * | 268 | * |
265 | * if the gcc < 4.5, a leaf function does not save the return address | 269 | * if the gcc < 4.5, a leaf function does not save the return address |
266 | * in the stack address, so, we "emulate" one in _mcount's stack space, | 270 | * in the stack address, so, we "emulate" one in _mcount's stack space, |
@@ -275,37 +279,44 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
275 | * do it in ftrace_graph_caller of mcount.S. | 279 | * do it in ftrace_graph_caller of mcount.S. |
276 | */ | 280 | */ |
277 | 281 | ||
278 | /* old = *parent; */ | 282 | /* old_parent_ra = *parent_ra_addr; */ |
279 | safe_load_stack(old, parent, faulted); | 283 | safe_load_stack(old_parent_ra, parent_ra_addr, faulted); |
280 | if (unlikely(faulted)) | 284 | if (unlikely(faulted)) |
281 | goto out; | 285 | goto out; |
282 | #ifndef KBUILD_MCOUNT_RA_ADDRESS | 286 | #ifndef KBUILD_MCOUNT_RA_ADDRESS |
283 | parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old, | 287 | parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra, |
284 | (unsigned long)parent, fp); | 288 | old_parent_ra, (unsigned long)parent_ra_addr, fp); |
285 | /* | 289 | /* |
286 | * If fails when getting the stack address of the non-leaf function's | 290 | * If fails when getting the stack address of the non-leaf function's |
287 | * ra, stop function graph tracer and return | 291 | * ra, stop function graph tracer and return |
288 | */ | 292 | */ |
289 | if (parent == 0) | 293 | if (parent_ra_addr == 0) |
290 | goto out; | 294 | goto out; |
291 | #endif | 295 | #endif |
292 | /* *parent = return_hooker; */ | 296 | /* *parent_ra_addr = return_hooker; */ |
293 | safe_store_stack(return_hooker, parent, faulted); | 297 | safe_store_stack(return_hooker, parent_ra_addr, faulted); |
294 | if (unlikely(faulted)) | 298 | if (unlikely(faulted)) |
295 | goto out; | 299 | goto out; |
296 | 300 | ||
297 | if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) == | 301 | if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp) |
298 | -EBUSY) { | 302 | == -EBUSY) { |
299 | *parent = old; | 303 | *parent_ra_addr = old_parent_ra; |
300 | return; | 304 | return; |
301 | } | 305 | } |
302 | 306 | ||
303 | trace.func = self_addr; | 307 | /* |
308 | * Get the recorded ip of the current mcount calling site in the | ||
309 | * __mcount_loc section, which will be used to filter the function | ||
310 | * entries configured through the tracing/set_graph_function interface. | ||
311 | */ | ||
312 | |||
313 | insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; | ||
314 | trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); | ||
304 | 315 | ||
305 | /* Only trace if the calling function expects to */ | 316 | /* Only trace if the calling function expects to */ |
306 | if (!ftrace_graph_entry(&trace)) { | 317 | if (!ftrace_graph_entry(&trace)) { |
307 | current->curr_ret_stack--; | 318 | current->curr_ret_stack--; |
308 | *parent = old; | 319 | *parent_ra_addr = old_parent_ra; |
309 | } | 320 | } |
310 | return; | 321 | return; |
311 | out: | 322 | out: |
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c index 9fadd17888d9..391221b6a6aa 100644 --- a/arch/mips/kernel/i8253.c +++ b/arch/mips/kernel/i8253.c | |||
@@ -125,84 +125,11 @@ void __init setup_pit_timer(void) | |||
125 | setup_irq(0, &irq0); | 125 | setup_irq(0, &irq0); |
126 | } | 126 | } |
127 | 127 | ||
128 | /* | ||
129 | * Since the PIT overflows every tick, its not very useful | ||
130 | * to just read by itself. So use jiffies to emulate a free | ||
131 | * running counter: | ||
132 | */ | ||
133 | static cycle_t pit_read(struct clocksource *cs) | ||
134 | { | ||
135 | unsigned long flags; | ||
136 | int count; | ||
137 | u32 jifs; | ||
138 | static int old_count; | ||
139 | static u32 old_jifs; | ||
140 | |||
141 | raw_spin_lock_irqsave(&i8253_lock, flags); | ||
142 | /* | ||
143 | * Although our caller may have the read side of xtime_lock, | ||
144 | * this is now a seqlock, and we are cheating in this routine | ||
145 | * by having side effects on state that we cannot undo if | ||
146 | * there is a collision on the seqlock and our caller has to | ||
147 | * retry. (Namely, old_jifs and old_count.) So we must treat | ||
148 | * jiffies as volatile despite the lock. We read jiffies | ||
149 | * before latching the timer count to guarantee that although | ||
150 | * the jiffies value might be older than the count (that is, | ||
151 | * the counter may underflow between the last point where | ||
152 | * jiffies was incremented and the point where we latch the | ||
153 | * count), it cannot be newer. | ||
154 | */ | ||
155 | jifs = jiffies; | ||
156 | outb_p(0x00, PIT_MODE); /* latch the count ASAP */ | ||
157 | count = inb_p(PIT_CH0); /* read the latched count */ | ||
158 | count |= inb_p(PIT_CH0) << 8; | ||
159 | |||
160 | /* VIA686a test code... reset the latch if count > max + 1 */ | ||
161 | if (count > LATCH) { | ||
162 | outb_p(0x34, PIT_MODE); | ||
163 | outb_p(LATCH & 0xff, PIT_CH0); | ||
164 | outb(LATCH >> 8, PIT_CH0); | ||
165 | count = LATCH - 1; | ||
166 | } | ||
167 | |||
168 | /* | ||
169 | * It's possible for count to appear to go the wrong way for a | ||
170 | * couple of reasons: | ||
171 | * | ||
172 | * 1. The timer counter underflows, but we haven't handled the | ||
173 | * resulting interrupt and incremented jiffies yet. | ||
174 | * 2. Hardware problem with the timer, not giving us continuous time, | ||
175 | * the counter does small "jumps" upwards on some Pentium systems, | ||
176 | * (see c't 95/10 page 335 for Neptun bug.) | ||
177 | * | ||
178 | * Previous attempts to handle these cases intelligently were | ||
179 | * buggy, so we just do the simple thing now. | ||
180 | */ | ||
181 | if (count > old_count && jifs == old_jifs) { | ||
182 | count = old_count; | ||
183 | } | ||
184 | old_count = count; | ||
185 | old_jifs = jifs; | ||
186 | |||
187 | raw_spin_unlock_irqrestore(&i8253_lock, flags); | ||
188 | |||
189 | count = (LATCH - 1) - count; | ||
190 | |||
191 | return (cycle_t)(jifs * LATCH) + count; | ||
192 | } | ||
193 | |||
194 | static struct clocksource clocksource_pit = { | ||
195 | .name = "pit", | ||
196 | .rating = 110, | ||
197 | .read = pit_read, | ||
198 | .mask = CLOCKSOURCE_MASK(32), | ||
199 | }; | ||
200 | |||
201 | static int __init init_pit_clocksource(void) | 128 | static int __init init_pit_clocksource(void) |
202 | { | 129 | { |
203 | if (num_possible_cpus() > 1) /* PIT does not scale! */ | 130 | if (num_possible_cpus() > 1) /* PIT does not scale! */ |
204 | return 0; | 131 | return 0; |
205 | 132 | ||
206 | return clocksource_register_hz(&clocksource_pit, CLOCK_TICK_RATE); | 133 | return clocksource_i8253_init(); |
207 | } | 134 | } |
208 | arch_initcall(init_pit_clocksource); | 135 | arch_initcall(init_pit_clocksource); |
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index c58176cc796b..c018696765d4 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c | |||
@@ -31,19 +31,19 @@ | |||
31 | 31 | ||
32 | static int i8259A_auto_eoi = -1; | 32 | static int i8259A_auto_eoi = -1; |
33 | DEFINE_RAW_SPINLOCK(i8259A_lock); | 33 | DEFINE_RAW_SPINLOCK(i8259A_lock); |
34 | static void disable_8259A_irq(unsigned int irq); | 34 | static void disable_8259A_irq(struct irq_data *d); |
35 | static void enable_8259A_irq(unsigned int irq); | 35 | static void enable_8259A_irq(struct irq_data *d); |
36 | static void mask_and_ack_8259A(unsigned int irq); | 36 | static void mask_and_ack_8259A(struct irq_data *d); |
37 | static void init_8259A(int auto_eoi); | 37 | static void init_8259A(int auto_eoi); |
38 | 38 | ||
39 | static struct irq_chip i8259A_chip = { | 39 | static struct irq_chip i8259A_chip = { |
40 | .name = "XT-PIC", | 40 | .name = "XT-PIC", |
41 | .mask = disable_8259A_irq, | 41 | .irq_mask = disable_8259A_irq, |
42 | .disable = disable_8259A_irq, | 42 | .irq_disable = disable_8259A_irq, |
43 | .unmask = enable_8259A_irq, | 43 | .irq_unmask = enable_8259A_irq, |
44 | .mask_ack = mask_and_ack_8259A, | 44 | .irq_mask_ack = mask_and_ack_8259A, |
45 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | 45 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF |
46 | .set_affinity = plat_set_irq_affinity, | 46 | .irq_set_affinity = plat_set_irq_affinity, |
47 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | 47 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ |
48 | }; | 48 | }; |
49 | 49 | ||
@@ -59,12 +59,11 @@ static unsigned int cached_irq_mask = 0xffff; | |||
59 | #define cached_master_mask (cached_irq_mask) | 59 | #define cached_master_mask (cached_irq_mask) |
60 | #define cached_slave_mask (cached_irq_mask >> 8) | 60 | #define cached_slave_mask (cached_irq_mask >> 8) |
61 | 61 | ||
62 | static void disable_8259A_irq(unsigned int irq) | 62 | static void disable_8259A_irq(struct irq_data *d) |
63 | { | 63 | { |
64 | unsigned int mask; | 64 | unsigned int mask, irq = d->irq - I8259A_IRQ_BASE; |
65 | unsigned long flags; | 65 | unsigned long flags; |
66 | 66 | ||
67 | irq -= I8259A_IRQ_BASE; | ||
68 | mask = 1 << irq; | 67 | mask = 1 << irq; |
69 | raw_spin_lock_irqsave(&i8259A_lock, flags); | 68 | raw_spin_lock_irqsave(&i8259A_lock, flags); |
70 | cached_irq_mask |= mask; | 69 | cached_irq_mask |= mask; |
@@ -75,12 +74,11 @@ static void disable_8259A_irq(unsigned int irq) | |||
75 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | 74 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
76 | } | 75 | } |
77 | 76 | ||
78 | static void enable_8259A_irq(unsigned int irq) | 77 | static void enable_8259A_irq(struct irq_data *d) |
79 | { | 78 | { |
80 | unsigned int mask; | 79 | unsigned int mask, irq = d->irq - I8259A_IRQ_BASE; |
81 | unsigned long flags; | 80 | unsigned long flags; |
82 | 81 | ||
83 | irq -= I8259A_IRQ_BASE; | ||
84 | mask = ~(1 << irq); | 82 | mask = ~(1 << irq); |
85 | raw_spin_lock_irqsave(&i8259A_lock, flags); | 83 | raw_spin_lock_irqsave(&i8259A_lock, flags); |
86 | cached_irq_mask &= mask; | 84 | cached_irq_mask &= mask; |
@@ -112,7 +110,7 @@ int i8259A_irq_pending(unsigned int irq) | |||
112 | void make_8259A_irq(unsigned int irq) | 110 | void make_8259A_irq(unsigned int irq) |
113 | { | 111 | { |
114 | disable_irq_nosync(irq); | 112 | disable_irq_nosync(irq); |
115 | set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq); | 113 | irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq); |
116 | enable_irq(irq); | 114 | enable_irq(irq); |
117 | } | 115 | } |
118 | 116 | ||
@@ -145,12 +143,11 @@ static inline int i8259A_irq_real(unsigned int irq) | |||
145 | * first, _then_ send the EOI, and the order of EOI | 143 | * first, _then_ send the EOI, and the order of EOI |
146 | * to the two 8259s is important! | 144 | * to the two 8259s is important! |
147 | */ | 145 | */ |
148 | static void mask_and_ack_8259A(unsigned int irq) | 146 | static void mask_and_ack_8259A(struct irq_data *d) |
149 | { | 147 | { |
150 | unsigned int irqmask; | 148 | unsigned int irqmask, irq = d->irq - I8259A_IRQ_BASE; |
151 | unsigned long flags; | 149 | unsigned long flags; |
152 | 150 | ||
153 | irq -= I8259A_IRQ_BASE; | ||
154 | irqmask = 1 << irq; | 151 | irqmask = 1 << irq; |
155 | raw_spin_lock_irqsave(&i8259A_lock, flags); | 152 | raw_spin_lock_irqsave(&i8259A_lock, flags); |
156 | /* | 153 | /* |
@@ -290,9 +287,9 @@ static void init_8259A(int auto_eoi) | |||
290 | * In AEOI mode we just have to mask the interrupt | 287 | * In AEOI mode we just have to mask the interrupt |
291 | * when acking. | 288 | * when acking. |
292 | */ | 289 | */ |
293 | i8259A_chip.mask_ack = disable_8259A_irq; | 290 | i8259A_chip.irq_mask_ack = disable_8259A_irq; |
294 | else | 291 | else |
295 | i8259A_chip.mask_ack = mask_and_ack_8259A; | 292 | i8259A_chip.irq_mask_ack = mask_and_ack_8259A; |
296 | 293 | ||
297 | udelay(100); /* wait for 8259A to initialize */ | 294 | udelay(100); /* wait for 8259A to initialize */ |
298 | 295 | ||
@@ -339,8 +336,8 @@ void __init init_i8259_irqs(void) | |||
339 | init_8259A(0); | 336 | init_8259A(0); |
340 | 337 | ||
341 | for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) { | 338 | for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) { |
342 | set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq); | 339 | irq_set_chip_and_handler(i, &i8259A_chip, handle_level_irq); |
343 | set_irq_probe(i); | 340 | irq_set_probe(i); |
344 | } | 341 | } |
345 | 342 | ||
346 | setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); | 343 | setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); |
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index 1774271af848..0c527f652196 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c | |||
@@ -87,17 +87,10 @@ unsigned int gic_get_int(void) | |||
87 | return i; | 87 | return i; |
88 | } | 88 | } |
89 | 89 | ||
90 | static unsigned int gic_irq_startup(unsigned int irq) | 90 | static void gic_irq_ack(struct irq_data *d) |
91 | { | 91 | { |
92 | irq -= _irqbase; | 92 | unsigned int irq = d->irq - _irqbase; |
93 | pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); | ||
94 | GIC_SET_INTR_MASK(irq); | ||
95 | return 0; | ||
96 | } | ||
97 | 93 | ||
98 | static void gic_irq_ack(unsigned int irq) | ||
99 | { | ||
100 | irq -= _irqbase; | ||
101 | pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); | 94 | pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); |
102 | GIC_CLR_INTR_MASK(irq); | 95 | GIC_CLR_INTR_MASK(irq); |
103 | 96 | ||
@@ -105,16 +98,16 @@ static void gic_irq_ack(unsigned int irq) | |||
105 | GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq); | 98 | GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq); |
106 | } | 99 | } |
107 | 100 | ||
108 | static void gic_mask_irq(unsigned int irq) | 101 | static void gic_mask_irq(struct irq_data *d) |
109 | { | 102 | { |
110 | irq -= _irqbase; | 103 | unsigned int irq = d->irq - _irqbase; |
111 | pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); | 104 | pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); |
112 | GIC_CLR_INTR_MASK(irq); | 105 | GIC_CLR_INTR_MASK(irq); |
113 | } | 106 | } |
114 | 107 | ||
115 | static void gic_unmask_irq(unsigned int irq) | 108 | static void gic_unmask_irq(struct irq_data *d) |
116 | { | 109 | { |
117 | irq -= _irqbase; | 110 | unsigned int irq = d->irq - _irqbase; |
118 | pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); | 111 | pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); |
119 | GIC_SET_INTR_MASK(irq); | 112 | GIC_SET_INTR_MASK(irq); |
120 | } | 113 | } |
@@ -123,13 +116,14 @@ static void gic_unmask_irq(unsigned int irq) | |||
123 | 116 | ||
124 | static DEFINE_SPINLOCK(gic_lock); | 117 | static DEFINE_SPINLOCK(gic_lock); |
125 | 118 | ||
126 | static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) | 119 | static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, |
120 | bool force) | ||
127 | { | 121 | { |
122 | unsigned int irq = d->irq - _irqbase; | ||
128 | cpumask_t tmp = CPU_MASK_NONE; | 123 | cpumask_t tmp = CPU_MASK_NONE; |
129 | unsigned long flags; | 124 | unsigned long flags; |
130 | int i; | 125 | int i; |
131 | 126 | ||
132 | irq -= _irqbase; | ||
133 | pr_debug("%s(%d) called\n", __func__, irq); | 127 | pr_debug("%s(%d) called\n", __func__, irq); |
134 | cpumask_and(&tmp, cpumask, cpu_online_mask); | 128 | cpumask_and(&tmp, cpumask, cpu_online_mask); |
135 | if (cpus_empty(tmp)) | 129 | if (cpus_empty(tmp)) |
@@ -147,23 +141,22 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
147 | set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); | 141 | set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); |
148 | 142 | ||
149 | } | 143 | } |
150 | cpumask_copy(irq_desc[irq].affinity, cpumask); | 144 | cpumask_copy(d->affinity, cpumask); |
151 | spin_unlock_irqrestore(&gic_lock, flags); | 145 | spin_unlock_irqrestore(&gic_lock, flags); |
152 | 146 | ||
153 | return 0; | 147 | return IRQ_SET_MASK_OK_NOCOPY; |
154 | } | 148 | } |
155 | #endif | 149 | #endif |
156 | 150 | ||
157 | static struct irq_chip gic_irq_controller = { | 151 | static struct irq_chip gic_irq_controller = { |
158 | .name = "MIPS GIC", | 152 | .name = "MIPS GIC", |
159 | .startup = gic_irq_startup, | 153 | .irq_ack = gic_irq_ack, |
160 | .ack = gic_irq_ack, | 154 | .irq_mask = gic_mask_irq, |
161 | .mask = gic_mask_irq, | 155 | .irq_mask_ack = gic_mask_irq, |
162 | .mask_ack = gic_mask_irq, | 156 | .irq_unmask = gic_unmask_irq, |
163 | .unmask = gic_unmask_irq, | 157 | .irq_eoi = gic_unmask_irq, |
164 | .eoi = gic_unmask_irq, | ||
165 | #ifdef CONFIG_SMP | 158 | #ifdef CONFIG_SMP |
166 | .set_affinity = gic_set_affinity, | 159 | .irq_set_affinity = gic_set_affinity, |
167 | #endif | 160 | #endif |
168 | }; | 161 | }; |
169 | 162 | ||
@@ -236,7 +229,7 @@ static void __init gic_basic_init(int numintrs, int numvpes, | |||
236 | vpe_local_setup(numvpes); | 229 | vpe_local_setup(numvpes); |
237 | 230 | ||
238 | for (i = _irqbase; i < (_irqbase + numintrs); i++) | 231 | for (i = _irqbase; i < (_irqbase + numintrs); i++) |
239 | set_irq_chip(i, &gic_irq_controller); | 232 | irq_set_chip(i, &gic_irq_controller); |
240 | } | 233 | } |
241 | 234 | ||
242 | void __init gic_init(unsigned long gic_base_addr, | 235 | void __init gic_init(unsigned long gic_base_addr, |
diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c index 42ef81461bfc..883fc6cead36 100644 --- a/arch/mips/kernel/irq-gt641xx.c +++ b/arch/mips/kernel/irq-gt641xx.c | |||
@@ -29,64 +29,64 @@ | |||
29 | 29 | ||
30 | static DEFINE_RAW_SPINLOCK(gt641xx_irq_lock); | 30 | static DEFINE_RAW_SPINLOCK(gt641xx_irq_lock); |
31 | 31 | ||
32 | static void ack_gt641xx_irq(unsigned int irq) | 32 | static void ack_gt641xx_irq(struct irq_data *d) |
33 | { | 33 | { |
34 | unsigned long flags; | 34 | unsigned long flags; |
35 | u32 cause; | 35 | u32 cause; |
36 | 36 | ||
37 | raw_spin_lock_irqsave(>641xx_irq_lock, flags); | 37 | raw_spin_lock_irqsave(>641xx_irq_lock, flags); |
38 | cause = GT_READ(GT_INTRCAUSE_OFS); | 38 | cause = GT_READ(GT_INTRCAUSE_OFS); |
39 | cause &= ~GT641XX_IRQ_TO_BIT(irq); | 39 | cause &= ~GT641XX_IRQ_TO_BIT(d->irq); |
40 | GT_WRITE(GT_INTRCAUSE_OFS, cause); | 40 | GT_WRITE(GT_INTRCAUSE_OFS, cause); |
41 | raw_spin_unlock_irqrestore(>641xx_irq_lock, flags); | 41 | raw_spin_unlock_irqrestore(>641xx_irq_lock, flags); |
42 | } | 42 | } |
43 | 43 | ||
44 | static void mask_gt641xx_irq(unsigned int irq) | 44 | static void mask_gt641xx_irq(struct irq_data *d) |
45 | { | 45 | { |
46 | unsigned long flags; | 46 | unsigned long flags; |
47 | u32 mask; | 47 | u32 mask; |
48 | 48 | ||
49 | raw_spin_lock_irqsave(>641xx_irq_lock, flags); | 49 | raw_spin_lock_irqsave(>641xx_irq_lock, flags); |
50 | mask = GT_READ(GT_INTRMASK_OFS); | 50 | mask = GT_READ(GT_INTRMASK_OFS); |
51 | mask &= ~GT641XX_IRQ_TO_BIT(irq); | 51 | mask &= ~GT641XX_IRQ_TO_BIT(d->irq); |
52 | GT_WRITE(GT_INTRMASK_OFS, mask); | 52 | GT_WRITE(GT_INTRMASK_OFS, mask); |
53 | raw_spin_unlock_irqrestore(>641xx_irq_lock, flags); | 53 | raw_spin_unlock_irqrestore(>641xx_irq_lock, flags); |
54 | } | 54 | } |
55 | 55 | ||
56 | static void mask_ack_gt641xx_irq(unsigned int irq) | 56 | static void mask_ack_gt641xx_irq(struct irq_data *d) |
57 | { | 57 | { |
58 | unsigned long flags; | 58 | unsigned long flags; |
59 | u32 cause, mask; | 59 | u32 cause, mask; |
60 | 60 | ||
61 | raw_spin_lock_irqsave(>641xx_irq_lock, flags); | 61 | raw_spin_lock_irqsave(>641xx_irq_lock, flags); |
62 | mask = GT_READ(GT_INTRMASK_OFS); | 62 | mask = GT_READ(GT_INTRMASK_OFS); |
63 | mask &= ~GT641XX_IRQ_TO_BIT(irq); | 63 | mask &= ~GT641XX_IRQ_TO_BIT(d->irq); |
64 | GT_WRITE(GT_INTRMASK_OFS, mask); | 64 | GT_WRITE(GT_INTRMASK_OFS, mask); |
65 | 65 | ||
66 | cause = GT_READ(GT_INTRCAUSE_OFS); | 66 | cause = GT_READ(GT_INTRCAUSE_OFS); |
67 | cause &= ~GT641XX_IRQ_TO_BIT(irq); | 67 | cause &= ~GT641XX_IRQ_TO_BIT(d->irq); |
68 | GT_WRITE(GT_INTRCAUSE_OFS, cause); | 68 | GT_WRITE(GT_INTRCAUSE_OFS, cause); |
69 | raw_spin_unlock_irqrestore(>641xx_irq_lock, flags); | 69 | raw_spin_unlock_irqrestore(>641xx_irq_lock, flags); |
70 | } | 70 | } |
71 | 71 | ||
72 | static void unmask_gt641xx_irq(unsigned int irq) | 72 | static void unmask_gt641xx_irq(struct irq_data *d) |
73 | { | 73 | { |
74 | unsigned long flags; | 74 | unsigned long flags; |
75 | u32 mask; | 75 | u32 mask; |
76 | 76 | ||
77 | raw_spin_lock_irqsave(>641xx_irq_lock, flags); | 77 | raw_spin_lock_irqsave(>641xx_irq_lock, flags); |
78 | mask = GT_READ(GT_INTRMASK_OFS); | 78 | mask = GT_READ(GT_INTRMASK_OFS); |
79 | mask |= GT641XX_IRQ_TO_BIT(irq); | 79 | mask |= GT641XX_IRQ_TO_BIT(d->irq); |
80 | GT_WRITE(GT_INTRMASK_OFS, mask); | 80 | GT_WRITE(GT_INTRMASK_OFS, mask); |
81 | raw_spin_unlock_irqrestore(>641xx_irq_lock, flags); | 81 | raw_spin_unlock_irqrestore(>641xx_irq_lock, flags); |
82 | } | 82 | } |
83 | 83 | ||
84 | static struct irq_chip gt641xx_irq_chip = { | 84 | static struct irq_chip gt641xx_irq_chip = { |
85 | .name = "GT641xx", | 85 | .name = "GT641xx", |
86 | .ack = ack_gt641xx_irq, | 86 | .irq_ack = ack_gt641xx_irq, |
87 | .mask = mask_gt641xx_irq, | 87 | .irq_mask = mask_gt641xx_irq, |
88 | .mask_ack = mask_ack_gt641xx_irq, | 88 | .irq_mask_ack = mask_ack_gt641xx_irq, |
89 | .unmask = unmask_gt641xx_irq, | 89 | .irq_unmask = unmask_gt641xx_irq, |
90 | }; | 90 | }; |
91 | 91 | ||
92 | void gt641xx_irq_dispatch(void) | 92 | void gt641xx_irq_dispatch(void) |
@@ -126,6 +126,6 @@ void __init gt641xx_irq_init(void) | |||
126 | * bit31: logical or of bits[25:1]. | 126 | * bit31: logical or of bits[25:1]. |
127 | */ | 127 | */ |
128 | for (i = 1; i < 30; i++) | 128 | for (i = 1; i < 30; i++) |
129 | set_irq_chip_and_handler(GT641XX_IRQ_BASE + i, | 129 | irq_set_chip_and_handler(GT641XX_IRQ_BASE + i, |
130 | >641xx_irq_chip, handle_level_irq); | 130 | >641xx_irq_chip, handle_level_irq); |
131 | } | 131 | } |
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c index 6a8cd28133d5..0c6afeed89d2 100644 --- a/arch/mips/kernel/irq-msc01.c +++ b/arch/mips/kernel/irq-msc01.c | |||
@@ -28,8 +28,10 @@ static unsigned long _icctrl_msc; | |||
28 | static unsigned int irq_base; | 28 | static unsigned int irq_base; |
29 | 29 | ||
30 | /* mask off an interrupt */ | 30 | /* mask off an interrupt */ |
31 | static inline void mask_msc_irq(unsigned int irq) | 31 | static inline void mask_msc_irq(struct irq_data *d) |
32 | { | 32 | { |
33 | unsigned int irq = d->irq; | ||
34 | |||
33 | if (irq < (irq_base + 32)) | 35 | if (irq < (irq_base + 32)) |
34 | MSCIC_WRITE(MSC01_IC_DISL, 1<<(irq - irq_base)); | 36 | MSCIC_WRITE(MSC01_IC_DISL, 1<<(irq - irq_base)); |
35 | else | 37 | else |
@@ -37,8 +39,10 @@ static inline void mask_msc_irq(unsigned int irq) | |||
37 | } | 39 | } |
38 | 40 | ||
39 | /* unmask an interrupt */ | 41 | /* unmask an interrupt */ |
40 | static inline void unmask_msc_irq(unsigned int irq) | 42 | static inline void unmask_msc_irq(struct irq_data *d) |
41 | { | 43 | { |
44 | unsigned int irq = d->irq; | ||
45 | |||
42 | if (irq < (irq_base + 32)) | 46 | if (irq < (irq_base + 32)) |
43 | MSCIC_WRITE(MSC01_IC_ENAL, 1<<(irq - irq_base)); | 47 | MSCIC_WRITE(MSC01_IC_ENAL, 1<<(irq - irq_base)); |
44 | else | 48 | else |
@@ -48,9 +52,11 @@ static inline void unmask_msc_irq(unsigned int irq) | |||
48 | /* | 52 | /* |
49 | * Masks and ACKs an IRQ | 53 | * Masks and ACKs an IRQ |
50 | */ | 54 | */ |
51 | static void level_mask_and_ack_msc_irq(unsigned int irq) | 55 | static void level_mask_and_ack_msc_irq(struct irq_data *d) |
52 | { | 56 | { |
53 | mask_msc_irq(irq); | 57 | unsigned int irq = d->irq; |
58 | |||
59 | mask_msc_irq(d); | ||
54 | if (!cpu_has_veic) | 60 | if (!cpu_has_veic) |
55 | MSCIC_WRITE(MSC01_IC_EOI, 0); | 61 | MSCIC_WRITE(MSC01_IC_EOI, 0); |
56 | /* This actually needs to be a call into platform code */ | 62 | /* This actually needs to be a call into platform code */ |
@@ -60,9 +66,11 @@ static void level_mask_and_ack_msc_irq(unsigned int irq) | |||
60 | /* | 66 | /* |
61 | * Masks and ACKs an IRQ | 67 | * Masks and ACKs an IRQ |
62 | */ | 68 | */ |
63 | static void edge_mask_and_ack_msc_irq(unsigned int irq) | 69 | static void edge_mask_and_ack_msc_irq(struct irq_data *d) |
64 | { | 70 | { |
65 | mask_msc_irq(irq); | 71 | unsigned int irq = d->irq; |
72 | |||
73 | mask_msc_irq(d); | ||
66 | if (!cpu_has_veic) | 74 | if (!cpu_has_veic) |
67 | MSCIC_WRITE(MSC01_IC_EOI, 0); | 75 | MSCIC_WRITE(MSC01_IC_EOI, 0); |
68 | else { | 76 | else { |
@@ -75,15 +83,6 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq) | |||
75 | } | 83 | } |
76 | 84 | ||
77 | /* | 85 | /* |
78 | * End IRQ processing | ||
79 | */ | ||
80 | static void end_msc_irq(unsigned int irq) | ||
81 | { | ||
82 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
83 | unmask_msc_irq(irq); | ||
84 | } | ||
85 | |||
86 | /* | ||
87 | * Interrupt handler for interrupts coming from SOC-it. | 86 | * Interrupt handler for interrupts coming from SOC-it. |
88 | */ | 87 | */ |
89 | void ll_msc_irq(void) | 88 | void ll_msc_irq(void) |
@@ -107,22 +106,20 @@ static void msc_bind_eic_interrupt(int irq, int set) | |||
107 | 106 | ||
108 | static struct irq_chip msc_levelirq_type = { | 107 | static struct irq_chip msc_levelirq_type = { |
109 | .name = "SOC-it-Level", | 108 | .name = "SOC-it-Level", |
110 | .ack = level_mask_and_ack_msc_irq, | 109 | .irq_ack = level_mask_and_ack_msc_irq, |
111 | .mask = mask_msc_irq, | 110 | .irq_mask = mask_msc_irq, |
112 | .mask_ack = level_mask_and_ack_msc_irq, | 111 | .irq_mask_ack = level_mask_and_ack_msc_irq, |
113 | .unmask = unmask_msc_irq, | 112 | .irq_unmask = unmask_msc_irq, |
114 | .eoi = unmask_msc_irq, | 113 | .irq_eoi = unmask_msc_irq, |
115 | .end = end_msc_irq, | ||
116 | }; | 114 | }; |
117 | 115 | ||
118 | static struct irq_chip msc_edgeirq_type = { | 116 | static struct irq_chip msc_edgeirq_type = { |
119 | .name = "SOC-it-Edge", | 117 | .name = "SOC-it-Edge", |
120 | .ack = edge_mask_and_ack_msc_irq, | 118 | .irq_ack = edge_mask_and_ack_msc_irq, |
121 | .mask = mask_msc_irq, | 119 | .irq_mask = mask_msc_irq, |
122 | .mask_ack = edge_mask_and_ack_msc_irq, | 120 | .irq_mask_ack = edge_mask_and_ack_msc_irq, |
123 | .unmask = unmask_msc_irq, | 121 | .irq_unmask = unmask_msc_irq, |
124 | .eoi = unmask_msc_irq, | 122 | .irq_eoi = unmask_msc_irq, |
125 | .end = end_msc_irq, | ||
126 | }; | 123 | }; |
127 | 124 | ||
128 | 125 | ||
@@ -140,16 +137,20 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma | |||
140 | 137 | ||
141 | switch (imp->im_type) { | 138 | switch (imp->im_type) { |
142 | case MSC01_IRQ_EDGE: | 139 | case MSC01_IRQ_EDGE: |
143 | set_irq_chip_and_handler_name(irqbase + n, | 140 | irq_set_chip_and_handler_name(irqbase + n, |
144 | &msc_edgeirq_type, handle_edge_irq, "edge"); | 141 | &msc_edgeirq_type, |
142 | handle_edge_irq, | ||
143 | "edge"); | ||
145 | if (cpu_has_veic) | 144 | if (cpu_has_veic) |
146 | MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT); | 145 | MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT); |
147 | else | 146 | else |
148 | MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl); | 147 | MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl); |
149 | break; | 148 | break; |
150 | case MSC01_IRQ_LEVEL: | 149 | case MSC01_IRQ_LEVEL: |
151 | set_irq_chip_and_handler_name(irqbase+n, | 150 | irq_set_chip_and_handler_name(irqbase + n, |
152 | &msc_levelirq_type, handle_level_irq, "level"); | 151 | &msc_levelirq_type, |
152 | handle_level_irq, | ||
153 | "level"); | ||
153 | if (cpu_has_veic) | 154 | if (cpu_has_veic) |
154 | MSCIC_WRITE(MSC01_IC_SUP+n*8, 0); | 155 | MSCIC_WRITE(MSC01_IC_SUP+n*8, 0); |
155 | else | 156 | else |
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c index 9731e8b47862..a8a8977d5887 100644 --- a/arch/mips/kernel/irq-rm7000.c +++ b/arch/mips/kernel/irq-rm7000.c | |||
@@ -18,23 +18,23 @@ | |||
18 | #include <asm/mipsregs.h> | 18 | #include <asm/mipsregs.h> |
19 | #include <asm/system.h> | 19 | #include <asm/system.h> |
20 | 20 | ||
21 | static inline void unmask_rm7k_irq(unsigned int irq) | 21 | static inline void unmask_rm7k_irq(struct irq_data *d) |
22 | { | 22 | { |
23 | set_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE)); | 23 | set_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE)); |
24 | } | 24 | } |
25 | 25 | ||
26 | static inline void mask_rm7k_irq(unsigned int irq) | 26 | static inline void mask_rm7k_irq(struct irq_data *d) |
27 | { | 27 | { |
28 | clear_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE)); | 28 | clear_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE)); |
29 | } | 29 | } |
30 | 30 | ||
31 | static struct irq_chip rm7k_irq_controller = { | 31 | static struct irq_chip rm7k_irq_controller = { |
32 | .name = "RM7000", | 32 | .name = "RM7000", |
33 | .ack = mask_rm7k_irq, | 33 | .irq_ack = mask_rm7k_irq, |
34 | .mask = mask_rm7k_irq, | 34 | .irq_mask = mask_rm7k_irq, |
35 | .mask_ack = mask_rm7k_irq, | 35 | .irq_mask_ack = mask_rm7k_irq, |
36 | .unmask = unmask_rm7k_irq, | 36 | .irq_unmask = unmask_rm7k_irq, |
37 | .eoi = unmask_rm7k_irq | 37 | .irq_eoi = unmask_rm7k_irq |
38 | }; | 38 | }; |
39 | 39 | ||
40 | void __init rm7k_cpu_irq_init(void) | 40 | void __init rm7k_cpu_irq_init(void) |
@@ -45,6 +45,6 @@ void __init rm7k_cpu_irq_init(void) | |||
45 | clear_c0_intcontrol(0x00000f00); /* Mask all */ | 45 | clear_c0_intcontrol(0x00000f00); /* Mask all */ |
46 | 46 | ||
47 | for (i = base; i < base + 4; i++) | 47 | for (i = base; i < base + 4; i++) |
48 | set_irq_chip_and_handler(i, &rm7k_irq_controller, | 48 | irq_set_chip_and_handler(i, &rm7k_irq_controller, |
49 | handle_percpu_irq); | 49 | handle_percpu_irq); |
50 | } | 50 | } |
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c index b7e4025b58a8..38874a4b9255 100644 --- a/arch/mips/kernel/irq-rm9000.c +++ b/arch/mips/kernel/irq-rm9000.c | |||
@@ -19,22 +19,22 @@ | |||
19 | #include <asm/mipsregs.h> | 19 | #include <asm/mipsregs.h> |
20 | #include <asm/system.h> | 20 | #include <asm/system.h> |
21 | 21 | ||
22 | static inline void unmask_rm9k_irq(unsigned int irq) | 22 | static inline void unmask_rm9k_irq(struct irq_data *d) |
23 | { | 23 | { |
24 | set_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE)); | 24 | set_c0_intcontrol(0x1000 << (d->irq - RM9K_CPU_IRQ_BASE)); |
25 | } | 25 | } |
26 | 26 | ||
27 | static inline void mask_rm9k_irq(unsigned int irq) | 27 | static inline void mask_rm9k_irq(struct irq_data *d) |
28 | { | 28 | { |
29 | clear_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE)); | 29 | clear_c0_intcontrol(0x1000 << (d->irq - RM9K_CPU_IRQ_BASE)); |
30 | } | 30 | } |
31 | 31 | ||
32 | static inline void rm9k_cpu_irq_enable(unsigned int irq) | 32 | static inline void rm9k_cpu_irq_enable(struct irq_data *d) |
33 | { | 33 | { |
34 | unsigned long flags; | 34 | unsigned long flags; |
35 | 35 | ||
36 | local_irq_save(flags); | 36 | local_irq_save(flags); |
37 | unmask_rm9k_irq(irq); | 37 | unmask_rm9k_irq(d); |
38 | local_irq_restore(flags); | 38 | local_irq_restore(flags); |
39 | } | 39 | } |
40 | 40 | ||
@@ -43,50 +43,47 @@ static inline void rm9k_cpu_irq_enable(unsigned int irq) | |||
43 | */ | 43 | */ |
44 | static void local_rm9k_perfcounter_irq_startup(void *args) | 44 | static void local_rm9k_perfcounter_irq_startup(void *args) |
45 | { | 45 | { |
46 | unsigned int irq = (unsigned int) args; | 46 | rm9k_cpu_irq_enable(args); |
47 | |||
48 | rm9k_cpu_irq_enable(irq); | ||
49 | } | 47 | } |
50 | 48 | ||
51 | static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq) | 49 | static unsigned int rm9k_perfcounter_irq_startup(struct irq_data *d) |
52 | { | 50 | { |
53 | on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 1); | 51 | on_each_cpu(local_rm9k_perfcounter_irq_startup, d, 1); |
54 | 52 | ||
55 | return 0; | 53 | return 0; |
56 | } | 54 | } |
57 | 55 | ||
58 | static void local_rm9k_perfcounter_irq_shutdown(void *args) | 56 | static void local_rm9k_perfcounter_irq_shutdown(void *args) |
59 | { | 57 | { |
60 | unsigned int irq = (unsigned int) args; | ||
61 | unsigned long flags; | 58 | unsigned long flags; |
62 | 59 | ||
63 | local_irq_save(flags); | 60 | local_irq_save(flags); |
64 | mask_rm9k_irq(irq); | 61 | mask_rm9k_irq(args); |
65 | local_irq_restore(flags); | 62 | local_irq_restore(flags); |
66 | } | 63 | } |
67 | 64 | ||
68 | static void rm9k_perfcounter_irq_shutdown(unsigned int irq) | 65 | static void rm9k_perfcounter_irq_shutdown(struct irq_data *d) |
69 | { | 66 | { |
70 | on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 1); | 67 | on_each_cpu(local_rm9k_perfcounter_irq_shutdown, d, 1); |
71 | } | 68 | } |
72 | 69 | ||
73 | static struct irq_chip rm9k_irq_controller = { | 70 | static struct irq_chip rm9k_irq_controller = { |
74 | .name = "RM9000", | 71 | .name = "RM9000", |
75 | .ack = mask_rm9k_irq, | 72 | .irq_ack = mask_rm9k_irq, |
76 | .mask = mask_rm9k_irq, | 73 | .irq_mask = mask_rm9k_irq, |
77 | .mask_ack = mask_rm9k_irq, | 74 | .irq_mask_ack = mask_rm9k_irq, |
78 | .unmask = unmask_rm9k_irq, | 75 | .irq_unmask = unmask_rm9k_irq, |
79 | .eoi = unmask_rm9k_irq | 76 | .irq_eoi = unmask_rm9k_irq |
80 | }; | 77 | }; |
81 | 78 | ||
82 | static struct irq_chip rm9k_perfcounter_irq = { | 79 | static struct irq_chip rm9k_perfcounter_irq = { |
83 | .name = "RM9000", | 80 | .name = "RM9000", |
84 | .startup = rm9k_perfcounter_irq_startup, | 81 | .irq_startup = rm9k_perfcounter_irq_startup, |
85 | .shutdown = rm9k_perfcounter_irq_shutdown, | 82 | .irq_shutdown = rm9k_perfcounter_irq_shutdown, |
86 | .ack = mask_rm9k_irq, | 83 | .irq_ack = mask_rm9k_irq, |
87 | .mask = mask_rm9k_irq, | 84 | .irq_mask = mask_rm9k_irq, |
88 | .mask_ack = mask_rm9k_irq, | 85 | .irq_mask_ack = mask_rm9k_irq, |
89 | .unmask = unmask_rm9k_irq, | 86 | .irq_unmask = unmask_rm9k_irq, |
90 | }; | 87 | }; |
91 | 88 | ||
92 | unsigned int rm9000_perfcount_irq; | 89 | unsigned int rm9000_perfcount_irq; |
@@ -101,10 +98,10 @@ void __init rm9k_cpu_irq_init(void) | |||
101 | clear_c0_intcontrol(0x0000f000); /* Mask all */ | 98 | clear_c0_intcontrol(0x0000f000); /* Mask all */ |
102 | 99 | ||
103 | for (i = base; i < base + 4; i++) | 100 | for (i = base; i < base + 4; i++) |
104 | set_irq_chip_and_handler(i, &rm9k_irq_controller, | 101 | irq_set_chip_and_handler(i, &rm9k_irq_controller, |
105 | handle_level_irq); | 102 | handle_level_irq); |
106 | 103 | ||
107 | rm9000_perfcount_irq = base + 1; | 104 | rm9000_perfcount_irq = base + 1; |
108 | set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq, | 105 | irq_set_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq, |
109 | handle_percpu_irq); | 106 | handle_percpu_irq); |
110 | } | 107 | } |
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index 4f93db58a79e..9b734d74ae8e 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c | |||
@@ -81,48 +81,9 @@ void ack_bad_irq(unsigned int irq) | |||
81 | 81 | ||
82 | atomic_t irq_err_count; | 82 | atomic_t irq_err_count; |
83 | 83 | ||
84 | /* | 84 | int arch_show_interrupts(struct seq_file *p, int prec) |
85 | * Generic, controller-independent functions: | ||
86 | */ | ||
87 | |||
88 | int show_interrupts(struct seq_file *p, void *v) | ||
89 | { | 85 | { |
90 | int i = *(loff_t *) v, j; | 86 | seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); |
91 | struct irqaction * action; | ||
92 | unsigned long flags; | ||
93 | |||
94 | if (i == 0) { | ||
95 | seq_printf(p, " "); | ||
96 | for_each_online_cpu(j) | ||
97 | seq_printf(p, "CPU%d ", j); | ||
98 | seq_putc(p, '\n'); | ||
99 | } | ||
100 | |||
101 | if (i < NR_IRQS) { | ||
102 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); | ||
103 | action = irq_desc[i].action; | ||
104 | if (!action) | ||
105 | goto skip; | ||
106 | seq_printf(p, "%3d: ", i); | ||
107 | #ifndef CONFIG_SMP | ||
108 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
109 | #else | ||
110 | for_each_online_cpu(j) | ||
111 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | ||
112 | #endif | ||
113 | seq_printf(p, " %14s", irq_desc[i].chip->name); | ||
114 | seq_printf(p, " %s", action->name); | ||
115 | |||
116 | for (action=action->next; action; action = action->next) | ||
117 | seq_printf(p, ", %s", action->name); | ||
118 | |||
119 | seq_putc(p, '\n'); | ||
120 | skip: | ||
121 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); | ||
122 | } else if (i == NR_IRQS) { | ||
123 | seq_putc(p, '\n'); | ||
124 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | ||
125 | } | ||
126 | return 0; | 87 | return 0; |
127 | } | 88 | } |
128 | 89 | ||
@@ -141,7 +102,7 @@ void __init init_IRQ(void) | |||
141 | #endif | 102 | #endif |
142 | 103 | ||
143 | for (i = 0; i < NR_IRQS; i++) | 104 | for (i = 0; i < NR_IRQS; i++) |
144 | set_irq_noprobe(i); | 105 | irq_set_noprobe(i); |
145 | 106 | ||
146 | arch_init_irq(); | 107 | arch_init_irq(); |
147 | 108 | ||
@@ -183,8 +144,8 @@ void __irq_entry do_IRQ(unsigned int irq) | |||
183 | { | 144 | { |
184 | irq_enter(); | 145 | irq_enter(); |
185 | check_stack_overflow(); | 146 | check_stack_overflow(); |
186 | __DO_IRQ_SMTC_HOOK(irq); | 147 | if (!smtc_handle_on_other_cpu(irq)) |
187 | generic_handle_irq(irq); | 148 | generic_handle_irq(irq); |
188 | irq_exit(); | 149 | irq_exit(); |
189 | } | 150 | } |
190 | 151 | ||
@@ -197,7 +158,7 @@ void __irq_entry do_IRQ(unsigned int irq) | |||
197 | void __irq_entry do_IRQ_no_affinity(unsigned int irq) | 158 | void __irq_entry do_IRQ_no_affinity(unsigned int irq) |
198 | { | 159 | { |
199 | irq_enter(); | 160 | irq_enter(); |
200 | __NO_AFFINITY_IRQ_SMTC_HOOK(irq); | 161 | smtc_im_backstop(irq); |
201 | generic_handle_irq(irq); | 162 | generic_handle_irq(irq); |
202 | irq_exit(); | 163 | irq_exit(); |
203 | } | 164 | } |
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c index 0262abe09121..6e71b284f6c9 100644 --- a/arch/mips/kernel/irq_cpu.c +++ b/arch/mips/kernel/irq_cpu.c | |||
@@ -37,42 +37,38 @@ | |||
37 | #include <asm/mipsmtregs.h> | 37 | #include <asm/mipsmtregs.h> |
38 | #include <asm/system.h> | 38 | #include <asm/system.h> |
39 | 39 | ||
40 | static inline void unmask_mips_irq(unsigned int irq) | 40 | static inline void unmask_mips_irq(struct irq_data *d) |
41 | { | 41 | { |
42 | set_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE)); | 42 | set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); |
43 | irq_enable_hazard(); | 43 | irq_enable_hazard(); |
44 | } | 44 | } |
45 | 45 | ||
46 | static inline void mask_mips_irq(unsigned int irq) | 46 | static inline void mask_mips_irq(struct irq_data *d) |
47 | { | 47 | { |
48 | clear_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE)); | 48 | clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); |
49 | irq_disable_hazard(); | 49 | irq_disable_hazard(); |
50 | } | 50 | } |
51 | 51 | ||
52 | static struct irq_chip mips_cpu_irq_controller = { | 52 | static struct irq_chip mips_cpu_irq_controller = { |
53 | .name = "MIPS", | 53 | .name = "MIPS", |
54 | .ack = mask_mips_irq, | 54 | .irq_ack = mask_mips_irq, |
55 | .mask = mask_mips_irq, | 55 | .irq_mask = mask_mips_irq, |
56 | .mask_ack = mask_mips_irq, | 56 | .irq_mask_ack = mask_mips_irq, |
57 | .unmask = unmask_mips_irq, | 57 | .irq_unmask = unmask_mips_irq, |
58 | .eoi = unmask_mips_irq, | 58 | .irq_eoi = unmask_mips_irq, |
59 | }; | 59 | }; |
60 | 60 | ||
61 | /* | 61 | /* |
62 | * Basically the same as above but taking care of all the MT stuff | 62 | * Basically the same as above but taking care of all the MT stuff |
63 | */ | 63 | */ |
64 | 64 | ||
65 | #define unmask_mips_mt_irq unmask_mips_irq | 65 | static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d) |
66 | #define mask_mips_mt_irq mask_mips_irq | ||
67 | |||
68 | static unsigned int mips_mt_cpu_irq_startup(unsigned int irq) | ||
69 | { | 66 | { |
70 | unsigned int vpflags = dvpe(); | 67 | unsigned int vpflags = dvpe(); |
71 | 68 | ||
72 | clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE)); | 69 | clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); |
73 | evpe(vpflags); | 70 | evpe(vpflags); |
74 | unmask_mips_mt_irq(irq); | 71 | unmask_mips_irq(d); |
75 | |||
76 | return 0; | 72 | return 0; |
77 | } | 73 | } |
78 | 74 | ||
@@ -80,22 +76,22 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq) | |||
80 | * While we ack the interrupt interrupts are disabled and thus we don't need | 76 | * While we ack the interrupt interrupts are disabled and thus we don't need |
81 | * to deal with concurrency issues. Same for mips_cpu_irq_end. | 77 | * to deal with concurrency issues. Same for mips_cpu_irq_end. |
82 | */ | 78 | */ |
83 | static void mips_mt_cpu_irq_ack(unsigned int irq) | 79 | static void mips_mt_cpu_irq_ack(struct irq_data *d) |
84 | { | 80 | { |
85 | unsigned int vpflags = dvpe(); | 81 | unsigned int vpflags = dvpe(); |
86 | clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE)); | 82 | clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); |
87 | evpe(vpflags); | 83 | evpe(vpflags); |
88 | mask_mips_mt_irq(irq); | 84 | mask_mips_irq(d); |
89 | } | 85 | } |
90 | 86 | ||
91 | static struct irq_chip mips_mt_cpu_irq_controller = { | 87 | static struct irq_chip mips_mt_cpu_irq_controller = { |
92 | .name = "MIPS", | 88 | .name = "MIPS", |
93 | .startup = mips_mt_cpu_irq_startup, | 89 | .irq_startup = mips_mt_cpu_irq_startup, |
94 | .ack = mips_mt_cpu_irq_ack, | 90 | .irq_ack = mips_mt_cpu_irq_ack, |
95 | .mask = mask_mips_mt_irq, | 91 | .irq_mask = mask_mips_irq, |
96 | .mask_ack = mips_mt_cpu_irq_ack, | 92 | .irq_mask_ack = mips_mt_cpu_irq_ack, |
97 | .unmask = unmask_mips_mt_irq, | 93 | .irq_unmask = unmask_mips_irq, |
98 | .eoi = unmask_mips_mt_irq, | 94 | .irq_eoi = unmask_mips_irq, |
99 | }; | 95 | }; |
100 | 96 | ||
101 | void __init mips_cpu_irq_init(void) | 97 | void __init mips_cpu_irq_init(void) |
@@ -113,10 +109,10 @@ void __init mips_cpu_irq_init(void) | |||
113 | */ | 109 | */ |
114 | if (cpu_has_mipsmt) | 110 | if (cpu_has_mipsmt) |
115 | for (i = irq_base; i < irq_base + 2; i++) | 111 | for (i = irq_base; i < irq_base + 2; i++) |
116 | set_irq_chip_and_handler(i, &mips_mt_cpu_irq_controller, | 112 | irq_set_chip_and_handler(i, &mips_mt_cpu_irq_controller, |
117 | handle_percpu_irq); | 113 | handle_percpu_irq); |
118 | 114 | ||
119 | for (i = irq_base + 2; i < irq_base + 8; i++) | 115 | for (i = irq_base + 2; i < irq_base + 8; i++) |
120 | set_irq_chip_and_handler(i, &mips_cpu_irq_controller, | 116 | irq_set_chip_and_handler(i, &mips_cpu_irq_controller, |
121 | handle_percpu_irq); | 117 | handle_percpu_irq); |
122 | } | 118 | } |
diff --git a/arch/mips/kernel/irq_txx9.c b/arch/mips/kernel/irq_txx9.c index 95a96f69172d..b0c55b50218e 100644 --- a/arch/mips/kernel/irq_txx9.c +++ b/arch/mips/kernel/irq_txx9.c | |||
@@ -63,9 +63,9 @@ static struct { | |||
63 | unsigned char mode; | 63 | unsigned char mode; |
64 | } txx9irq[TXx9_MAX_IR] __read_mostly; | 64 | } txx9irq[TXx9_MAX_IR] __read_mostly; |
65 | 65 | ||
66 | static void txx9_irq_unmask(unsigned int irq) | 66 | static void txx9_irq_unmask(struct irq_data *d) |
67 | { | 67 | { |
68 | unsigned int irq_nr = irq - TXX9_IRQ_BASE; | 68 | unsigned int irq_nr = d->irq - TXX9_IRQ_BASE; |
69 | u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16 ) / 2]; | 69 | u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16 ) / 2]; |
70 | int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8; | 70 | int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8; |
71 | 71 | ||
@@ -79,9 +79,9 @@ static void txx9_irq_unmask(unsigned int irq) | |||
79 | #endif | 79 | #endif |
80 | } | 80 | } |
81 | 81 | ||
82 | static inline void txx9_irq_mask(unsigned int irq) | 82 | static inline void txx9_irq_mask(struct irq_data *d) |
83 | { | 83 | { |
84 | unsigned int irq_nr = irq - TXX9_IRQ_BASE; | 84 | unsigned int irq_nr = d->irq - TXX9_IRQ_BASE; |
85 | u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16) / 2]; | 85 | u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16) / 2]; |
86 | int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8; | 86 | int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8; |
87 | 87 | ||
@@ -99,19 +99,19 @@ static inline void txx9_irq_mask(unsigned int irq) | |||
99 | #endif | 99 | #endif |
100 | } | 100 | } |
101 | 101 | ||
102 | static void txx9_irq_mask_ack(unsigned int irq) | 102 | static void txx9_irq_mask_ack(struct irq_data *d) |
103 | { | 103 | { |
104 | unsigned int irq_nr = irq - TXX9_IRQ_BASE; | 104 | unsigned int irq_nr = d->irq - TXX9_IRQ_BASE; |
105 | 105 | ||
106 | txx9_irq_mask(irq); | 106 | txx9_irq_mask(d); |
107 | /* clear edge detection */ | 107 | /* clear edge detection */ |
108 | if (unlikely(TXx9_IRCR_EDGE(txx9irq[irq_nr].mode))) | 108 | if (unlikely(TXx9_IRCR_EDGE(txx9irq[irq_nr].mode))) |
109 | __raw_writel(TXx9_IRSCR_EIClrE | irq_nr, &txx9_ircptr->scr); | 109 | __raw_writel(TXx9_IRSCR_EIClrE | irq_nr, &txx9_ircptr->scr); |
110 | } | 110 | } |
111 | 111 | ||
112 | static int txx9_irq_set_type(unsigned int irq, unsigned int flow_type) | 112 | static int txx9_irq_set_type(struct irq_data *d, unsigned int flow_type) |
113 | { | 113 | { |
114 | unsigned int irq_nr = irq - TXX9_IRQ_BASE; | 114 | unsigned int irq_nr = d->irq - TXX9_IRQ_BASE; |
115 | u32 cr; | 115 | u32 cr; |
116 | u32 __iomem *crp; | 116 | u32 __iomem *crp; |
117 | int ofs; | 117 | int ofs; |
@@ -139,11 +139,11 @@ static int txx9_irq_set_type(unsigned int irq, unsigned int flow_type) | |||
139 | 139 | ||
140 | static struct irq_chip txx9_irq_chip = { | 140 | static struct irq_chip txx9_irq_chip = { |
141 | .name = "TXX9", | 141 | .name = "TXX9", |
142 | .ack = txx9_irq_mask_ack, | 142 | .irq_ack = txx9_irq_mask_ack, |
143 | .mask = txx9_irq_mask, | 143 | .irq_mask = txx9_irq_mask, |
144 | .mask_ack = txx9_irq_mask_ack, | 144 | .irq_mask_ack = txx9_irq_mask_ack, |
145 | .unmask = txx9_irq_unmask, | 145 | .irq_unmask = txx9_irq_unmask, |
146 | .set_type = txx9_irq_set_type, | 146 | .irq_set_type = txx9_irq_set_type, |
147 | }; | 147 | }; |
148 | 148 | ||
149 | void __init txx9_irq_init(unsigned long baseaddr) | 149 | void __init txx9_irq_init(unsigned long baseaddr) |
@@ -154,8 +154,8 @@ void __init txx9_irq_init(unsigned long baseaddr) | |||
154 | for (i = 0; i < TXx9_MAX_IR; i++) { | 154 | for (i = 0; i < TXx9_MAX_IR; i++) { |
155 | txx9irq[i].level = 4; /* middle level */ | 155 | txx9irq[i].level = 4; /* middle level */ |
156 | txx9irq[i].mode = TXx9_IRCR_LOW; | 156 | txx9irq[i].mode = TXx9_IRCR_LOW; |
157 | set_irq_chip_and_handler(TXX9_IRQ_BASE + i, | 157 | irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &txx9_irq_chip, |
158 | &txx9_irq_chip, handle_level_irq); | 158 | handle_level_irq); |
159 | } | 159 | } |
160 | 160 | ||
161 | /* mask all IRC interrupts */ | 161 | /* mask all IRC interrupts */ |
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c index 2b7f3f703b83..a8244854d3dc 100644 --- a/arch/mips/kernel/perf_event.c +++ b/arch/mips/kernel/perf_event.c | |||
@@ -161,41 +161,6 @@ mipspmu_event_set_period(struct perf_event *event, | |||
161 | return ret; | 161 | return ret; |
162 | } | 162 | } |
163 | 163 | ||
164 | static int mipspmu_enable(struct perf_event *event) | ||
165 | { | ||
166 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
167 | struct hw_perf_event *hwc = &event->hw; | ||
168 | int idx; | ||
169 | int err = 0; | ||
170 | |||
171 | /* To look for a free counter for this event. */ | ||
172 | idx = mipspmu->alloc_counter(cpuc, hwc); | ||
173 | if (idx < 0) { | ||
174 | err = idx; | ||
175 | goto out; | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * If there is an event in the counter we are going to use then | ||
180 | * make sure it is disabled. | ||
181 | */ | ||
182 | event->hw.idx = idx; | ||
183 | mipspmu->disable_event(idx); | ||
184 | cpuc->events[idx] = event; | ||
185 | |||
186 | /* Set the period for the event. */ | ||
187 | mipspmu_event_set_period(event, hwc, idx); | ||
188 | |||
189 | /* Enable the event. */ | ||
190 | mipspmu->enable_event(hwc, idx); | ||
191 | |||
192 | /* Propagate our changes to the userspace mapping. */ | ||
193 | perf_event_update_userpage(event); | ||
194 | |||
195 | out: | ||
196 | return err; | ||
197 | } | ||
198 | |||
199 | static void mipspmu_event_update(struct perf_event *event, | 164 | static void mipspmu_event_update(struct perf_event *event, |
200 | struct hw_perf_event *hwc, | 165 | struct hw_perf_event *hwc, |
201 | int idx) | 166 | int idx) |
@@ -204,7 +169,7 @@ static void mipspmu_event_update(struct perf_event *event, | |||
204 | unsigned long flags; | 169 | unsigned long flags; |
205 | int shift = 64 - TOTAL_BITS; | 170 | int shift = 64 - TOTAL_BITS; |
206 | s64 prev_raw_count, new_raw_count; | 171 | s64 prev_raw_count, new_raw_count; |
207 | s64 delta; | 172 | u64 delta; |
208 | 173 | ||
209 | again: | 174 | again: |
210 | prev_raw_count = local64_read(&hwc->prev_count); | 175 | prev_raw_count = local64_read(&hwc->prev_count); |
@@ -231,32 +196,90 @@ again: | |||
231 | return; | 196 | return; |
232 | } | 197 | } |
233 | 198 | ||
234 | static void mipspmu_disable(struct perf_event *event) | 199 | static void mipspmu_start(struct perf_event *event, int flags) |
200 | { | ||
201 | struct hw_perf_event *hwc = &event->hw; | ||
202 | |||
203 | if (!mipspmu) | ||
204 | return; | ||
205 | |||
206 | if (flags & PERF_EF_RELOAD) | ||
207 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | ||
208 | |||
209 | hwc->state = 0; | ||
210 | |||
211 | /* Set the period for the event. */ | ||
212 | mipspmu_event_set_period(event, hwc, hwc->idx); | ||
213 | |||
214 | /* Enable the event. */ | ||
215 | mipspmu->enable_event(hwc, hwc->idx); | ||
216 | } | ||
217 | |||
218 | static void mipspmu_stop(struct perf_event *event, int flags) | ||
219 | { | ||
220 | struct hw_perf_event *hwc = &event->hw; | ||
221 | |||
222 | if (!mipspmu) | ||
223 | return; | ||
224 | |||
225 | if (!(hwc->state & PERF_HES_STOPPED)) { | ||
226 | /* We are working on a local event. */ | ||
227 | mipspmu->disable_event(hwc->idx); | ||
228 | barrier(); | ||
229 | mipspmu_event_update(event, hwc, hwc->idx); | ||
230 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
231 | } | ||
232 | } | ||
233 | |||
234 | static int mipspmu_add(struct perf_event *event, int flags) | ||
235 | { | 235 | { |
236 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 236 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
237 | struct hw_perf_event *hwc = &event->hw; | 237 | struct hw_perf_event *hwc = &event->hw; |
238 | int idx = hwc->idx; | 238 | int idx; |
239 | int err = 0; | ||
239 | 240 | ||
241 | perf_pmu_disable(event->pmu); | ||
240 | 242 | ||
241 | WARN_ON(idx < 0 || idx >= mipspmu->num_counters); | 243 | /* To look for a free counter for this event. */ |
244 | idx = mipspmu->alloc_counter(cpuc, hwc); | ||
245 | if (idx < 0) { | ||
246 | err = idx; | ||
247 | goto out; | ||
248 | } | ||
242 | 249 | ||
243 | /* We are working on a local event. */ | 250 | /* |
251 | * If there is an event in the counter we are going to use then | ||
252 | * make sure it is disabled. | ||
253 | */ | ||
254 | event->hw.idx = idx; | ||
244 | mipspmu->disable_event(idx); | 255 | mipspmu->disable_event(idx); |
256 | cpuc->events[idx] = event; | ||
245 | 257 | ||
246 | barrier(); | 258 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
247 | 259 | if (flags & PERF_EF_START) | |
248 | mipspmu_event_update(event, hwc, idx); | 260 | mipspmu_start(event, PERF_EF_RELOAD); |
249 | cpuc->events[idx] = NULL; | ||
250 | clear_bit(idx, cpuc->used_mask); | ||
251 | 261 | ||
262 | /* Propagate our changes to the userspace mapping. */ | ||
252 | perf_event_update_userpage(event); | 263 | perf_event_update_userpage(event); |
264 | |||
265 | out: | ||
266 | perf_pmu_enable(event->pmu); | ||
267 | return err; | ||
253 | } | 268 | } |
254 | 269 | ||
255 | static void mipspmu_unthrottle(struct perf_event *event) | 270 | static void mipspmu_del(struct perf_event *event, int flags) |
256 | { | 271 | { |
272 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
257 | struct hw_perf_event *hwc = &event->hw; | 273 | struct hw_perf_event *hwc = &event->hw; |
274 | int idx = hwc->idx; | ||
258 | 275 | ||
259 | mipspmu->enable_event(hwc, hwc->idx); | 276 | WARN_ON(idx < 0 || idx >= mipspmu->num_counters); |
277 | |||
278 | mipspmu_stop(event, PERF_EF_UPDATE); | ||
279 | cpuc->events[idx] = NULL; | ||
280 | clear_bit(idx, cpuc->used_mask); | ||
281 | |||
282 | perf_event_update_userpage(event); | ||
260 | } | 283 | } |
261 | 284 | ||
262 | static void mipspmu_read(struct perf_event *event) | 285 | static void mipspmu_read(struct perf_event *event) |
@@ -270,12 +293,17 @@ static void mipspmu_read(struct perf_event *event) | |||
270 | mipspmu_event_update(event, hwc, hwc->idx); | 293 | mipspmu_event_update(event, hwc, hwc->idx); |
271 | } | 294 | } |
272 | 295 | ||
273 | static struct pmu pmu = { | 296 | static void mipspmu_enable(struct pmu *pmu) |
274 | .enable = mipspmu_enable, | 297 | { |
275 | .disable = mipspmu_disable, | 298 | if (mipspmu) |
276 | .unthrottle = mipspmu_unthrottle, | 299 | mipspmu->start(); |
277 | .read = mipspmu_read, | 300 | } |
278 | }; | 301 | |
302 | static void mipspmu_disable(struct pmu *pmu) | ||
303 | { | ||
304 | if (mipspmu) | ||
305 | mipspmu->stop(); | ||
306 | } | ||
279 | 307 | ||
280 | static atomic_t active_events = ATOMIC_INIT(0); | 308 | static atomic_t active_events = ATOMIC_INIT(0); |
281 | static DEFINE_MUTEX(pmu_reserve_mutex); | 309 | static DEFINE_MUTEX(pmu_reserve_mutex); |
@@ -318,6 +346,82 @@ static void mipspmu_free_irq(void) | |||
318 | perf_irq = save_perf_irq; | 346 | perf_irq = save_perf_irq; |
319 | } | 347 | } |
320 | 348 | ||
349 | /* | ||
350 | * mipsxx/rm9000/loongson2 have different performance counters, they have | ||
351 | * specific low-level init routines. | ||
352 | */ | ||
353 | static void reset_counters(void *arg); | ||
354 | static int __hw_perf_event_init(struct perf_event *event); | ||
355 | |||
356 | static void hw_perf_event_destroy(struct perf_event *event) | ||
357 | { | ||
358 | if (atomic_dec_and_mutex_lock(&active_events, | ||
359 | &pmu_reserve_mutex)) { | ||
360 | /* | ||
361 | * We must not call the destroy function with interrupts | ||
362 | * disabled. | ||
363 | */ | ||
364 | on_each_cpu(reset_counters, | ||
365 | (void *)(long)mipspmu->num_counters, 1); | ||
366 | mipspmu_free_irq(); | ||
367 | mutex_unlock(&pmu_reserve_mutex); | ||
368 | } | ||
369 | } | ||
370 | |||
371 | static int mipspmu_event_init(struct perf_event *event) | ||
372 | { | ||
373 | int err = 0; | ||
374 | |||
375 | switch (event->attr.type) { | ||
376 | case PERF_TYPE_RAW: | ||
377 | case PERF_TYPE_HARDWARE: | ||
378 | case PERF_TYPE_HW_CACHE: | ||
379 | break; | ||
380 | |||
381 | default: | ||
382 | return -ENOENT; | ||
383 | } | ||
384 | |||
385 | if (!mipspmu || event->cpu >= nr_cpumask_bits || | ||
386 | (event->cpu >= 0 && !cpu_online(event->cpu))) | ||
387 | return -ENODEV; | ||
388 | |||
389 | if (!atomic_inc_not_zero(&active_events)) { | ||
390 | if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { | ||
391 | atomic_dec(&active_events); | ||
392 | return -ENOSPC; | ||
393 | } | ||
394 | |||
395 | mutex_lock(&pmu_reserve_mutex); | ||
396 | if (atomic_read(&active_events) == 0) | ||
397 | err = mipspmu_get_irq(); | ||
398 | |||
399 | if (!err) | ||
400 | atomic_inc(&active_events); | ||
401 | mutex_unlock(&pmu_reserve_mutex); | ||
402 | } | ||
403 | |||
404 | if (err) | ||
405 | return err; | ||
406 | |||
407 | err = __hw_perf_event_init(event); | ||
408 | if (err) | ||
409 | hw_perf_event_destroy(event); | ||
410 | |||
411 | return err; | ||
412 | } | ||
413 | |||
414 | static struct pmu pmu = { | ||
415 | .pmu_enable = mipspmu_enable, | ||
416 | .pmu_disable = mipspmu_disable, | ||
417 | .event_init = mipspmu_event_init, | ||
418 | .add = mipspmu_add, | ||
419 | .del = mipspmu_del, | ||
420 | .start = mipspmu_start, | ||
421 | .stop = mipspmu_stop, | ||
422 | .read = mipspmu_read, | ||
423 | }; | ||
424 | |||
321 | static inline unsigned int | 425 | static inline unsigned int |
322 | mipspmu_perf_event_encode(const struct mips_perf_event *pev) | 426 | mipspmu_perf_event_encode(const struct mips_perf_event *pev) |
323 | { | 427 | { |
@@ -382,8 +486,9 @@ static int validate_event(struct cpu_hw_events *cpuc, | |||
382 | { | 486 | { |
383 | struct hw_perf_event fake_hwc = event->hw; | 487 | struct hw_perf_event fake_hwc = event->hw; |
384 | 488 | ||
385 | if (event->pmu && event->pmu != &pmu) | 489 | /* Allow mixed event group. So return 1 to pass validation. */ |
386 | return 0; | 490 | if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) |
491 | return 1; | ||
387 | 492 | ||
388 | return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0; | 493 | return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0; |
389 | } | 494 | } |
@@ -409,73 +514,6 @@ static int validate_group(struct perf_event *event) | |||
409 | return 0; | 514 | return 0; |
410 | } | 515 | } |
411 | 516 | ||
412 | /* | ||
413 | * mipsxx/rm9000/loongson2 have different performance counters, they have | ||
414 | * specific low-level init routines. | ||
415 | */ | ||
416 | static void reset_counters(void *arg); | ||
417 | static int __hw_perf_event_init(struct perf_event *event); | ||
418 | |||
419 | static void hw_perf_event_destroy(struct perf_event *event) | ||
420 | { | ||
421 | if (atomic_dec_and_mutex_lock(&active_events, | ||
422 | &pmu_reserve_mutex)) { | ||
423 | /* | ||
424 | * We must not call the destroy function with interrupts | ||
425 | * disabled. | ||
426 | */ | ||
427 | on_each_cpu(reset_counters, | ||
428 | (void *)(long)mipspmu->num_counters, 1); | ||
429 | mipspmu_free_irq(); | ||
430 | mutex_unlock(&pmu_reserve_mutex); | ||
431 | } | ||
432 | } | ||
433 | |||
434 | const struct pmu *hw_perf_event_init(struct perf_event *event) | ||
435 | { | ||
436 | int err = 0; | ||
437 | |||
438 | if (!mipspmu || event->cpu >= nr_cpumask_bits || | ||
439 | (event->cpu >= 0 && !cpu_online(event->cpu))) | ||
440 | return ERR_PTR(-ENODEV); | ||
441 | |||
442 | if (!atomic_inc_not_zero(&active_events)) { | ||
443 | if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { | ||
444 | atomic_dec(&active_events); | ||
445 | return ERR_PTR(-ENOSPC); | ||
446 | } | ||
447 | |||
448 | mutex_lock(&pmu_reserve_mutex); | ||
449 | if (atomic_read(&active_events) == 0) | ||
450 | err = mipspmu_get_irq(); | ||
451 | |||
452 | if (!err) | ||
453 | atomic_inc(&active_events); | ||
454 | mutex_unlock(&pmu_reserve_mutex); | ||
455 | } | ||
456 | |||
457 | if (err) | ||
458 | return ERR_PTR(err); | ||
459 | |||
460 | err = __hw_perf_event_init(event); | ||
461 | if (err) | ||
462 | hw_perf_event_destroy(event); | ||
463 | |||
464 | return err ? ERR_PTR(err) : &pmu; | ||
465 | } | ||
466 | |||
467 | void hw_perf_enable(void) | ||
468 | { | ||
469 | if (mipspmu) | ||
470 | mipspmu->start(); | ||
471 | } | ||
472 | |||
473 | void hw_perf_disable(void) | ||
474 | { | ||
475 | if (mipspmu) | ||
476 | mipspmu->stop(); | ||
477 | } | ||
478 | |||
479 | /* This is needed by specific irq handlers in perf_event_*.c */ | 517 | /* This is needed by specific irq handlers in perf_event_*.c */ |
480 | static void | 518 | static void |
481 | handle_associated_event(struct cpu_hw_events *cpuc, | 519 | handle_associated_event(struct cpu_hw_events *cpuc, |
@@ -496,21 +534,13 @@ handle_associated_event(struct cpu_hw_events *cpuc, | |||
496 | #include "perf_event_mipsxx.c" | 534 | #include "perf_event_mipsxx.c" |
497 | 535 | ||
498 | /* Callchain handling code. */ | 536 | /* Callchain handling code. */ |
499 | static inline void | ||
500 | callchain_store(struct perf_callchain_entry *entry, | ||
501 | u64 ip) | ||
502 | { | ||
503 | if (entry->nr < PERF_MAX_STACK_DEPTH) | ||
504 | entry->ip[entry->nr++] = ip; | ||
505 | } | ||
506 | 537 | ||
507 | /* | 538 | /* |
508 | * Leave userspace callchain empty for now. When we find a way to trace | 539 | * Leave userspace callchain empty for now. When we find a way to trace |
509 | * the user stack callchains, we add here. | 540 | * the user stack callchains, we add here. |
510 | */ | 541 | */ |
511 | static void | 542 | void perf_callchain_user(struct perf_callchain_entry *entry, |
512 | perf_callchain_user(struct pt_regs *regs, | 543 | struct pt_regs *regs) |
513 | struct perf_callchain_entry *entry) | ||
514 | { | 544 | { |
515 | } | 545 | } |
516 | 546 | ||
@@ -523,23 +553,21 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry, | |||
523 | while (!kstack_end(sp)) { | 553 | while (!kstack_end(sp)) { |
524 | addr = *sp++; | 554 | addr = *sp++; |
525 | if (__kernel_text_address(addr)) { | 555 | if (__kernel_text_address(addr)) { |
526 | callchain_store(entry, addr); | 556 | perf_callchain_store(entry, addr); |
527 | if (entry->nr >= PERF_MAX_STACK_DEPTH) | 557 | if (entry->nr >= PERF_MAX_STACK_DEPTH) |
528 | break; | 558 | break; |
529 | } | 559 | } |
530 | } | 560 | } |
531 | } | 561 | } |
532 | 562 | ||
533 | static void | 563 | void perf_callchain_kernel(struct perf_callchain_entry *entry, |
534 | perf_callchain_kernel(struct pt_regs *regs, | 564 | struct pt_regs *regs) |
535 | struct perf_callchain_entry *entry) | ||
536 | { | 565 | { |
537 | unsigned long sp = regs->regs[29]; | 566 | unsigned long sp = regs->regs[29]; |
538 | #ifdef CONFIG_KALLSYMS | 567 | #ifdef CONFIG_KALLSYMS |
539 | unsigned long ra = regs->regs[31]; | 568 | unsigned long ra = regs->regs[31]; |
540 | unsigned long pc = regs->cp0_epc; | 569 | unsigned long pc = regs->cp0_epc; |
541 | 570 | ||
542 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
543 | if (raw_show_trace || !__kernel_text_address(pc)) { | 571 | if (raw_show_trace || !__kernel_text_address(pc)) { |
544 | unsigned long stack_page = | 572 | unsigned long stack_page = |
545 | (unsigned long)task_stack_page(current); | 573 | (unsigned long)task_stack_page(current); |
@@ -549,53 +577,12 @@ perf_callchain_kernel(struct pt_regs *regs, | |||
549 | return; | 577 | return; |
550 | } | 578 | } |
551 | do { | 579 | do { |
552 | callchain_store(entry, pc); | 580 | perf_callchain_store(entry, pc); |
553 | if (entry->nr >= PERF_MAX_STACK_DEPTH) | 581 | if (entry->nr >= PERF_MAX_STACK_DEPTH) |
554 | break; | 582 | break; |
555 | pc = unwind_stack(current, &sp, pc, &ra); | 583 | pc = unwind_stack(current, &sp, pc, &ra); |
556 | } while (pc); | 584 | } while (pc); |
557 | #else | 585 | #else |
558 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
559 | save_raw_perf_callchain(entry, sp); | 586 | save_raw_perf_callchain(entry, sp); |
560 | #endif | 587 | #endif |
561 | } | 588 | } |
562 | |||
563 | static void | ||
564 | perf_do_callchain(struct pt_regs *regs, | ||
565 | struct perf_callchain_entry *entry) | ||
566 | { | ||
567 | int is_user; | ||
568 | |||
569 | if (!regs) | ||
570 | return; | ||
571 | |||
572 | is_user = user_mode(regs); | ||
573 | |||
574 | if (!current || !current->pid) | ||
575 | return; | ||
576 | |||
577 | if (is_user && current->state != TASK_RUNNING) | ||
578 | return; | ||
579 | |||
580 | if (!is_user) { | ||
581 | perf_callchain_kernel(regs, entry); | ||
582 | if (current->mm) | ||
583 | regs = task_pt_regs(current); | ||
584 | else | ||
585 | regs = NULL; | ||
586 | } | ||
587 | if (regs) | ||
588 | perf_callchain_user(regs, entry); | ||
589 | } | ||
590 | |||
591 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); | ||
592 | |||
593 | struct perf_callchain_entry * | ||
594 | perf_callchain(struct pt_regs *regs) | ||
595 | { | ||
596 | struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry); | ||
597 | |||
598 | entry->nr = 0; | ||
599 | perf_do_callchain(regs, entry); | ||
600 | return entry; | ||
601 | } | ||
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 183e0d226669..75266ff4cc33 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c | |||
@@ -696,7 +696,7 @@ static int mipsxx_pmu_handle_shared_irq(void) | |||
696 | * interrupt, not NMI. | 696 | * interrupt, not NMI. |
697 | */ | 697 | */ |
698 | if (handled == IRQ_HANDLED) | 698 | if (handled == IRQ_HANDLED) |
699 | perf_event_do_pending(); | 699 | irq_work_run(); |
700 | 700 | ||
701 | #ifdef CONFIG_MIPS_MT_SMP | 701 | #ifdef CONFIG_MIPS_MT_SMP |
702 | read_unlock(&pmuint_rwlock); | 702 | read_unlock(&pmuint_rwlock); |
@@ -721,7 +721,7 @@ static void mipsxx_pmu_start(void) | |||
721 | 721 | ||
722 | /* | 722 | /* |
723 | * MIPS performance counters can be per-TC. The control registers can | 723 | * MIPS performance counters can be per-TC. The control registers can |
724 | * not be directly accessed accross CPUs. Hence if we want to do global | 724 | * not be directly accessed across CPUs. Hence if we want to do global |
725 | * control, we need cross CPU calls. on_each_cpu() can help us, but we | 725 | * control, we need cross CPU calls. on_each_cpu() can help us, but we |
726 | * can not make sure this function is called with interrupts enabled. So | 726 | * can not make sure this function is called with interrupts enabled. So |
727 | * here we pause local counters and then grab a rwlock and leave the | 727 | * here we pause local counters and then grab a rwlock and leave the |
@@ -1045,6 +1045,8 @@ init_hw_perf_events(void) | |||
1045 | "CPU, irq %d%s\n", mipspmu->name, counters, irq, | 1045 | "CPU, irq %d%s\n", mipspmu->name, counters, irq, |
1046 | irq < 0 ? " (share with timer interrupt)" : ""); | 1046 | irq < 0 ? " (share with timer interrupt)" : ""); |
1047 | 1047 | ||
1048 | perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); | ||
1049 | |||
1048 | return 0; | 1050 | return 0; |
1049 | } | 1051 | } |
1050 | early_initcall(init_hw_perf_events); | 1052 | early_initcall(init_hw_perf_events); |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index ae167df73ddd..d2112d3cf115 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -410,7 +410,7 @@ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, | |||
410 | if (!kallsyms_lookup_size_offset(pc, &size, &ofs)) | 410 | if (!kallsyms_lookup_size_offset(pc, &size, &ofs)) |
411 | return 0; | 411 | return 0; |
412 | /* | 412 | /* |
413 | * Return ra if an exception occured at the first instruction | 413 | * Return ra if an exception occurred at the first instruction |
414 | */ | 414 | */ |
415 | if (unlikely(ofs == 0)) { | 415 | if (unlikely(ofs == 0)) { |
416 | pc = *ra; | 416 | pc = *ra; |
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index fbaabad0e6e2..7f5468b38d4c 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S | |||
@@ -586,6 +586,10 @@ einval: li v0, -ENOSYS | |||
586 | sys sys_fanotify_init 2 | 586 | sys sys_fanotify_init 2 |
587 | sys sys_fanotify_mark 6 | 587 | sys sys_fanotify_mark 6 |
588 | sys sys_prlimit64 4 | 588 | sys sys_prlimit64 4 |
589 | sys sys_name_to_handle_at 5 | ||
590 | sys sys_open_by_handle_at 3 /* 4340 */ | ||
591 | sys sys_clock_adjtime 2 | ||
592 | sys sys_syncfs 1 | ||
589 | .endm | 593 | .endm |
590 | 594 | ||
591 | /* We pre-compute the number of _instruction_ bytes needed to | 595 | /* We pre-compute the number of _instruction_ bytes needed to |
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 3f4179283207..a2e1fcbc41dc 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S | |||
@@ -425,4 +425,8 @@ sys_call_table: | |||
425 | PTR sys_fanotify_init /* 5295 */ | 425 | PTR sys_fanotify_init /* 5295 */ |
426 | PTR sys_fanotify_mark | 426 | PTR sys_fanotify_mark |
427 | PTR sys_prlimit64 | 427 | PTR sys_prlimit64 |
428 | PTR sys_name_to_handle_at | ||
429 | PTR sys_open_by_handle_at | ||
430 | PTR sys_clock_adjtime /* 5300 */ | ||
431 | PTR sys_syncfs | ||
428 | .size sys_call_table,.-sys_call_table | 432 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index f08ece6d8acc..b2c7624995b8 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -425,4 +425,8 @@ EXPORT(sysn32_call_table) | |||
425 | PTR sys_fanotify_init /* 6300 */ | 425 | PTR sys_fanotify_init /* 6300 */ |
426 | PTR sys_fanotify_mark | 426 | PTR sys_fanotify_mark |
427 | PTR sys_prlimit64 | 427 | PTR sys_prlimit64 |
428 | PTR sys_name_to_handle_at | ||
429 | PTR sys_open_by_handle_at | ||
430 | PTR compat_sys_clock_adjtime /* 6305 */ | ||
431 | PTR sys_syncfs | ||
428 | .size sysn32_call_table,.-sysn32_call_table | 432 | .size sysn32_call_table,.-sysn32_call_table |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 78d768a3e19d..049a9c8c49a0 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -543,4 +543,8 @@ sys_call_table: | |||
543 | PTR sys_fanotify_init | 543 | PTR sys_fanotify_init |
544 | PTR sys_32_fanotify_mark | 544 | PTR sys_32_fanotify_mark |
545 | PTR sys_prlimit64 | 545 | PTR sys_prlimit64 |
546 | PTR sys_name_to_handle_at | ||
547 | PTR compat_sys_open_by_handle_at /* 4340 */ | ||
548 | PTR compat_sys_clock_adjtime | ||
549 | PTR sys_syncfs | ||
546 | .size sys_call_table,.-sys_call_table | 550 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 5922342bca39..dbbe0ce48d89 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
@@ -84,7 +84,7 @@ static int protected_save_fp_context(struct sigcontext __user *sc) | |||
84 | 84 | ||
85 | static int protected_restore_fp_context(struct sigcontext __user *sc) | 85 | static int protected_restore_fp_context(struct sigcontext __user *sc) |
86 | { | 86 | { |
87 | int err, tmp; | 87 | int err, tmp __maybe_unused; |
88 | while (1) { | 88 | while (1) { |
89 | lock_fpu_owner(); | 89 | lock_fpu_owner(); |
90 | own_fpu_inatomic(0); | 90 | own_fpu_inatomic(0); |
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index a0ed0e052b2e..aae986613795 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c | |||
@@ -115,7 +115,7 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc) | |||
115 | 115 | ||
116 | static int protected_restore_fp_context32(struct sigcontext32 __user *sc) | 116 | static int protected_restore_fp_context32(struct sigcontext32 __user *sc) |
117 | { | 117 | { |
118 | int err, tmp; | 118 | int err, tmp __maybe_unused; |
119 | while (1) { | 119 | while (1) { |
120 | lock_fpu_owner(); | 120 | lock_fpu_owner(); |
121 | own_fpu_inatomic(0); | 121 | own_fpu_inatomic(0); |
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index c0e81418ba21..1ec56e635d04 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c | |||
@@ -120,7 +120,7 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action) | |||
120 | 120 | ||
121 | local_irq_save(flags); | 121 | local_irq_save(flags); |
122 | 122 | ||
123 | vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */ | 123 | vpflags = dvpe(); /* can't access the other CPU's registers whilst MVPE enabled */ |
124 | 124 | ||
125 | switch (action) { | 125 | switch (action) { |
126 | case SMP_CALL_FUNCTION: | 126 | case SMP_CALL_FUNCTION: |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 383aeb95cb49..32a256101082 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -193,6 +193,22 @@ void __devinit smp_prepare_boot_cpu(void) | |||
193 | */ | 193 | */ |
194 | static struct task_struct *cpu_idle_thread[NR_CPUS]; | 194 | static struct task_struct *cpu_idle_thread[NR_CPUS]; |
195 | 195 | ||
196 | struct create_idle { | ||
197 | struct work_struct work; | ||
198 | struct task_struct *idle; | ||
199 | struct completion done; | ||
200 | int cpu; | ||
201 | }; | ||
202 | |||
203 | static void __cpuinit do_fork_idle(struct work_struct *work) | ||
204 | { | ||
205 | struct create_idle *c_idle = | ||
206 | container_of(work, struct create_idle, work); | ||
207 | |||
208 | c_idle->idle = fork_idle(c_idle->cpu); | ||
209 | complete(&c_idle->done); | ||
210 | } | ||
211 | |||
196 | int __cpuinit __cpu_up(unsigned int cpu) | 212 | int __cpuinit __cpu_up(unsigned int cpu) |
197 | { | 213 | { |
198 | struct task_struct *idle; | 214 | struct task_struct *idle; |
@@ -203,8 +219,19 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
203 | * Linux can schedule processes on this slave. | 219 | * Linux can schedule processes on this slave. |
204 | */ | 220 | */ |
205 | if (!cpu_idle_thread[cpu]) { | 221 | if (!cpu_idle_thread[cpu]) { |
206 | idle = fork_idle(cpu); | 222 | /* |
207 | cpu_idle_thread[cpu] = idle; | 223 | * Schedule work item to avoid forking user task |
224 | * Ported from arch/x86/kernel/smpboot.c | ||
225 | */ | ||
226 | struct create_idle c_idle = { | ||
227 | .cpu = cpu, | ||
228 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | ||
229 | }; | ||
230 | |||
231 | INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); | ||
232 | schedule_work(&c_idle.work); | ||
233 | wait_for_completion(&c_idle.done); | ||
234 | idle = cpu_idle_thread[cpu] = c_idle.idle; | ||
208 | 235 | ||
209 | if (IS_ERR(idle)) | 236 | if (IS_ERR(idle)) |
210 | panic(KERN_ERR "Fork failed for CPU %d", cpu); | 237 | panic(KERN_ERR "Fork failed for CPU %d", cpu); |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 39c08254b0f1..5a88cc4ccd5a 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -677,8 +677,9 @@ void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity) | |||
677 | */ | 677 | */ |
678 | } | 678 | } |
679 | 679 | ||
680 | void smtc_forward_irq(unsigned int irq) | 680 | void smtc_forward_irq(struct irq_data *d) |
681 | { | 681 | { |
682 | unsigned int irq = d->irq; | ||
682 | int target; | 683 | int target; |
683 | 684 | ||
684 | /* | 685 | /* |
@@ -692,7 +693,7 @@ void smtc_forward_irq(unsigned int irq) | |||
692 | * and efficiency, we just pick the easiest one to find. | 693 | * and efficiency, we just pick the easiest one to find. |
693 | */ | 694 | */ |
694 | 695 | ||
695 | target = cpumask_first(irq_desc[irq].affinity); | 696 | target = cpumask_first(d->affinity); |
696 | 697 | ||
697 | /* | 698 | /* |
698 | * We depend on the platform code to have correctly processed | 699 | * We depend on the platform code to have correctly processed |
@@ -707,12 +708,10 @@ void smtc_forward_irq(unsigned int irq) | |||
707 | */ | 708 | */ |
708 | 709 | ||
709 | /* If no one is eligible, service locally */ | 710 | /* If no one is eligible, service locally */ |
710 | if (target >= NR_CPUS) { | 711 | if (target >= NR_CPUS) |
711 | do_IRQ_no_affinity(irq); | 712 | do_IRQ_no_affinity(irq); |
712 | return; | 713 | else |
713 | } | 714 | smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq); |
714 | |||
715 | smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq); | ||
716 | } | 715 | } |
717 | 716 | ||
718 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | 717 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ |
@@ -1147,7 +1146,7 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe) | |||
1147 | 1146 | ||
1148 | setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); | 1147 | setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); |
1149 | 1148 | ||
1150 | set_irq_handler(cpu_ipi_irq, handle_percpu_irq); | 1149 | irq_set_handler(cpu_ipi_irq, handle_percpu_irq); |
1151 | } | 1150 | } |
1152 | 1151 | ||
1153 | /* | 1152 | /* |
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 1dc6edff45e0..58beabf50b3c 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c | |||
@@ -383,12 +383,11 @@ save_static_function(sys_sysmips); | |||
383 | static int __used noinline | 383 | static int __used noinline |
384 | _sys_sysmips(nabi_no_regargs struct pt_regs regs) | 384 | _sys_sysmips(nabi_no_regargs struct pt_regs regs) |
385 | { | 385 | { |
386 | long cmd, arg1, arg2, arg3; | 386 | long cmd, arg1, arg2; |
387 | 387 | ||
388 | cmd = regs.regs[4]; | 388 | cmd = regs.regs[4]; |
389 | arg1 = regs.regs[5]; | 389 | arg1 = regs.regs[5]; |
390 | arg2 = regs.regs[6]; | 390 | arg2 = regs.regs[6]; |
391 | arg3 = regs.regs[7]; | ||
392 | 391 | ||
393 | switch (cmd) { | 392 | switch (cmd) { |
394 | case MIPS_ATOMIC_SET: | 393 | case MIPS_ATOMIC_SET: |
@@ -405,7 +404,7 @@ _sys_sysmips(nabi_no_regargs struct pt_regs regs) | |||
405 | if (arg1 & 2) | 404 | if (arg1 & 2) |
406 | set_thread_flag(TIF_LOGADE); | 405 | set_thread_flag(TIF_LOGADE); |
407 | else | 406 | else |
408 | clear_thread_flag(TIF_FIXADE); | 407 | clear_thread_flag(TIF_LOGADE); |
409 | 408 | ||
410 | return 0; | 409 | return 0; |
411 | 410 | ||
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index fb7497405510..1083ad4e1017 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c | |||
@@ -102,7 +102,7 @@ static __init int cpu_has_mfc0_count_bug(void) | |||
102 | case CPU_R4400SC: | 102 | case CPU_R4400SC: |
103 | case CPU_R4400MC: | 103 | case CPU_R4400MC: |
104 | /* | 104 | /* |
105 | * The published errata for the R4400 upto 3.0 say the CPU | 105 | * The published errata for the R4400 up to 3.0 say the CPU |
106 | * has the mfc0 from count bug. | 106 | * has the mfc0 from count bug. |
107 | */ | 107 | */ |
108 | if ((current_cpu_data.processor_id & 0xff) <= 0x30) | 108 | if ((current_cpu_data.processor_id & 0xff) <= 0x30) |
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 570607b376b5..832afbb87588 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S | |||
@@ -115,7 +115,7 @@ SECTIONS | |||
115 | EXIT_DATA | 115 | EXIT_DATA |
116 | } | 116 | } |
117 | 117 | ||
118 | PERCPU(PAGE_SIZE) | 118 | PERCPU(1 << CONFIG_MIPS_L1_CACHE_SHIFT, PAGE_SIZE) |
119 | . = ALIGN(PAGE_SIZE); | 119 | . = ALIGN(PAGE_SIZE); |
120 | __init_end = .; | 120 | __init_end = .; |
121 | /* freed after init ends here */ | 121 | /* freed after init ends here */ |
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 6a1fdfef8fde..dbb6b408f001 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c | |||
@@ -19,7 +19,7 @@ | |||
19 | * VPE support module | 19 | * VPE support module |
20 | * | 20 | * |
21 | * Provides support for loading a MIPS SP program on VPE1. | 21 | * Provides support for loading a MIPS SP program on VPE1. |
22 | * The SP enviroment is rather simple, no tlb's. It needs to be relocatable | 22 | * The SP environment is rather simple, no tlb's. It needs to be relocatable |
23 | * (or partially linked). You should initialise your stack in the startup | 23 | * (or partially linked). You should initialise your stack in the startup |
24 | * code. This loader looks for the symbol __start and sets up | 24 | * code. This loader looks for the symbol __start and sets up |
25 | * execution to resume from there. The MIPS SDE kit contains suitable examples. | 25 | * execution to resume from there. The MIPS SDE kit contains suitable examples. |
@@ -148,9 +148,9 @@ struct { | |||
148 | spinlock_t tc_list_lock; | 148 | spinlock_t tc_list_lock; |
149 | struct list_head tc_list; /* Thread contexts */ | 149 | struct list_head tc_list; /* Thread contexts */ |
150 | } vpecontrol = { | 150 | } vpecontrol = { |
151 | .vpe_list_lock = SPIN_LOCK_UNLOCKED, | 151 | .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock), |
152 | .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), | 152 | .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), |
153 | .tc_list_lock = SPIN_LOCK_UNLOCKED, | 153 | .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock), |
154 | .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) | 154 | .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) |
155 | }; | 155 | }; |
156 | 156 | ||