diff options
Diffstat (limited to 'arch/microblaze/kernel')
-rw-r--r-- | arch/microblaze/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/microblaze/kernel/asm-offsets.c | 1 | ||||
-rw-r--r-- | arch/microblaze/kernel/cpu/cache.c | 211 | ||||
-rw-r--r-- | arch/microblaze/kernel/dma.c | 156 | ||||
-rw-r--r-- | arch/microblaze/kernel/entry.S | 116 | ||||
-rw-r--r-- | arch/microblaze/kernel/head.S | 13 | ||||
-rw-r--r-- | arch/microblaze/kernel/irq.c | 15 | ||||
-rw-r--r-- | arch/microblaze/kernel/setup.c | 45 |
8 files changed, 443 insertions, 116 deletions
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile index b07594eccf9b..e51bc1520825 100644 --- a/arch/microblaze/kernel/Makefile +++ b/arch/microblaze/kernel/Makefile | |||
@@ -14,7 +14,7 @@ endif | |||
14 | 14 | ||
15 | extra-y := head.o vmlinux.lds | 15 | extra-y := head.o vmlinux.lds |
16 | 16 | ||
17 | obj-y += exceptions.o \ | 17 | obj-y += dma.o exceptions.o \ |
18 | hw_exception_handler.o init_task.o intc.o irq.o of_device.o \ | 18 | hw_exception_handler.o init_task.o intc.o irq.o of_device.o \ |
19 | of_platform.o process.o prom.o prom_parse.o ptrace.o \ | 19 | of_platform.o process.o prom.o prom_parse.o ptrace.o \ |
20 | setup.o signal.o sys_microblaze.o timer.o traps.o reset.o | 20 | setup.o signal.o sys_microblaze.o timer.o traps.o reset.o |
diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c index 7bc7b68f97db..0071260a672c 100644 --- a/arch/microblaze/kernel/asm-offsets.c +++ b/arch/microblaze/kernel/asm-offsets.c | |||
@@ -90,6 +90,7 @@ int main(int argc, char *argv[]) | |||
90 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); | 90 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); |
91 | DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); | 91 | DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); |
92 | DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context)); | 92 | DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context)); |
93 | DEFINE(TI_PREEMPT_COUNT, offsetof(struct thread_info, preempt_count)); | ||
93 | BLANK(); | 94 | BLANK(); |
94 | 95 | ||
95 | /* struct cpu_context */ | 96 | /* struct cpu_context */ |
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c index 2a56bccce4e0..f04d8a86dead 100644 --- a/arch/microblaze/kernel/cpu/cache.c +++ b/arch/microblaze/kernel/cpu/cache.c | |||
@@ -15,25 +15,6 @@ | |||
15 | #include <asm/cpuinfo.h> | 15 | #include <asm/cpuinfo.h> |
16 | #include <asm/pvr.h> | 16 | #include <asm/pvr.h> |
17 | 17 | ||
18 | static inline void __invalidate_flush_icache(unsigned int addr) | ||
19 | { | ||
20 | __asm__ __volatile__ ("wic %0, r0;" \ | ||
21 | : : "r" (addr)); | ||
22 | } | ||
23 | |||
24 | static inline void __flush_dcache(unsigned int addr) | ||
25 | { | ||
26 | __asm__ __volatile__ ("wdc.flush %0, r0;" \ | ||
27 | : : "r" (addr)); | ||
28 | } | ||
29 | |||
30 | static inline void __invalidate_dcache(unsigned int baseaddr, | ||
31 | unsigned int offset) | ||
32 | { | ||
33 | __asm__ __volatile__ ("wdc.clear %0, %1;" \ | ||
34 | : : "r" (baseaddr), "r" (offset)); | ||
35 | } | ||
36 | |||
37 | static inline void __enable_icache_msr(void) | 18 | static inline void __enable_icache_msr(void) |
38 | { | 19 | { |
39 | __asm__ __volatile__ (" msrset r0, %0; \ | 20 | __asm__ __volatile__ (" msrset r0, %0; \ |
@@ -148,9 +129,9 @@ do { \ | |||
148 | int step = -line_length; \ | 129 | int step = -line_length; \ |
149 | BUG_ON(step >= 0); \ | 130 | BUG_ON(step >= 0); \ |
150 | \ | 131 | \ |
151 | __asm__ __volatile__ (" 1: " #op " r0, %0; \ | 132 | __asm__ __volatile__ (" 1: " #op " r0, %0; \ |
152 | bgtid %0, 1b; \ | 133 | bgtid %0, 1b; \ |
153 | addk %0, %0, %1; \ | 134 | addk %0, %0, %1; \ |
154 | " : : "r" (len), "r" (step) \ | 135 | " : : "r" (len), "r" (step) \ |
155 | : "memory"); \ | 136 | : "memory"); \ |
156 | } while (0); | 137 | } while (0); |
@@ -162,9 +143,9 @@ do { \ | |||
162 | int count = end - start; \ | 143 | int count = end - start; \ |
163 | BUG_ON(count <= 0); \ | 144 | BUG_ON(count <= 0); \ |
164 | \ | 145 | \ |
165 | __asm__ __volatile__ (" 1: " #op " %0, %1; \ | 146 | __asm__ __volatile__ (" 1: " #op " %0, %1; \ |
166 | bgtid %1, 1b; \ | 147 | bgtid %1, 1b; \ |
167 | addk %1, %1, %2; \ | 148 | addk %1, %1, %2; \ |
168 | " : : "r" (start), "r" (count), \ | 149 | " : : "r" (start), "r" (count), \ |
169 | "r" (step) : "memory"); \ | 150 | "r" (step) : "memory"); \ |
170 | } while (0); | 151 | } while (0); |
@@ -175,7 +156,7 @@ do { \ | |||
175 | int volatile temp; \ | 156 | int volatile temp; \ |
176 | BUG_ON(end - start <= 0); \ | 157 | BUG_ON(end - start <= 0); \ |
177 | \ | 158 | \ |
178 | __asm__ __volatile__ (" 1: " #op " %1, r0; \ | 159 | __asm__ __volatile__ (" 1: " #op " %1, r0; \ |
179 | cmpu %0, %1, %2; \ | 160 | cmpu %0, %1, %2; \ |
180 | bgtid %0, 1b; \ | 161 | bgtid %0, 1b; \ |
181 | addk %1, %1, %3; \ | 162 | addk %1, %1, %3; \ |
@@ -183,10 +164,14 @@ do { \ | |||
183 | "r" (line_length) : "memory"); \ | 164 | "r" (line_length) : "memory"); \ |
184 | } while (0); | 165 | } while (0); |
185 | 166 | ||
167 | #define ASM_LOOP | ||
168 | |||
186 | static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end) | 169 | static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end) |
187 | { | 170 | { |
188 | unsigned long flags; | 171 | unsigned long flags; |
189 | 172 | #ifndef ASM_LOOP | |
173 | int i; | ||
174 | #endif | ||
190 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, | 175 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, |
191 | (unsigned int)start, (unsigned int) end); | 176 | (unsigned int)start, (unsigned int) end); |
192 | 177 | ||
@@ -196,8 +181,13 @@ static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end) | |||
196 | local_irq_save(flags); | 181 | local_irq_save(flags); |
197 | __disable_icache_msr(); | 182 | __disable_icache_msr(); |
198 | 183 | ||
184 | #ifdef ASM_LOOP | ||
199 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); | 185 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); |
200 | 186 | #else | |
187 | for (i = start; i < end; i += cpuinfo.icache_line_length) | ||
188 | __asm__ __volatile__ ("wic %0, r0;" \ | ||
189 | : : "r" (i)); | ||
190 | #endif | ||
201 | __enable_icache_msr(); | 191 | __enable_icache_msr(); |
202 | local_irq_restore(flags); | 192 | local_irq_restore(flags); |
203 | } | 193 | } |
@@ -206,7 +196,9 @@ static void __flush_icache_range_nomsr_irq(unsigned long start, | |||
206 | unsigned long end) | 196 | unsigned long end) |
207 | { | 197 | { |
208 | unsigned long flags; | 198 | unsigned long flags; |
209 | 199 | #ifndef ASM_LOOP | |
200 | int i; | ||
201 | #endif | ||
210 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, | 202 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, |
211 | (unsigned int)start, (unsigned int) end); | 203 | (unsigned int)start, (unsigned int) end); |
212 | 204 | ||
@@ -216,7 +208,13 @@ static void __flush_icache_range_nomsr_irq(unsigned long start, | |||
216 | local_irq_save(flags); | 208 | local_irq_save(flags); |
217 | __disable_icache_nomsr(); | 209 | __disable_icache_nomsr(); |
218 | 210 | ||
211 | #ifdef ASM_LOOP | ||
219 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); | 212 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); |
213 | #else | ||
214 | for (i = start; i < end; i += cpuinfo.icache_line_length) | ||
215 | __asm__ __volatile__ ("wic %0, r0;" \ | ||
216 | : : "r" (i)); | ||
217 | #endif | ||
220 | 218 | ||
221 | __enable_icache_nomsr(); | 219 | __enable_icache_nomsr(); |
222 | local_irq_restore(flags); | 220 | local_irq_restore(flags); |
@@ -225,25 +223,41 @@ static void __flush_icache_range_nomsr_irq(unsigned long start, | |||
225 | static void __flush_icache_range_noirq(unsigned long start, | 223 | static void __flush_icache_range_noirq(unsigned long start, |
226 | unsigned long end) | 224 | unsigned long end) |
227 | { | 225 | { |
226 | #ifndef ASM_LOOP | ||
227 | int i; | ||
228 | #endif | ||
228 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, | 229 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, |
229 | (unsigned int)start, (unsigned int) end); | 230 | (unsigned int)start, (unsigned int) end); |
230 | 231 | ||
231 | CACHE_LOOP_LIMITS(start, end, | 232 | CACHE_LOOP_LIMITS(start, end, |
232 | cpuinfo.icache_line_length, cpuinfo.icache_size); | 233 | cpuinfo.icache_line_length, cpuinfo.icache_size); |
234 | #ifdef ASM_LOOP | ||
233 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); | 235 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); |
236 | #else | ||
237 | for (i = start; i < end; i += cpuinfo.icache_line_length) | ||
238 | __asm__ __volatile__ ("wic %0, r0;" \ | ||
239 | : : "r" (i)); | ||
240 | #endif | ||
234 | } | 241 | } |
235 | 242 | ||
236 | static void __flush_icache_all_msr_irq(void) | 243 | static void __flush_icache_all_msr_irq(void) |
237 | { | 244 | { |
238 | unsigned long flags; | 245 | unsigned long flags; |
239 | 246 | #ifndef ASM_LOOP | |
247 | int i; | ||
248 | #endif | ||
240 | pr_debug("%s\n", __func__); | 249 | pr_debug("%s\n", __func__); |
241 | 250 | ||
242 | local_irq_save(flags); | 251 | local_irq_save(flags); |
243 | __disable_icache_msr(); | 252 | __disable_icache_msr(); |
244 | 253 | #ifdef ASM_LOOP | |
245 | CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); | 254 | CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); |
246 | 255 | #else | |
256 | for (i = 0; i < cpuinfo.icache_size; | ||
257 | i += cpuinfo.icache_line_length) | ||
258 | __asm__ __volatile__ ("wic %0, r0;" \ | ||
259 | : : "r" (i)); | ||
260 | #endif | ||
247 | __enable_icache_msr(); | 261 | __enable_icache_msr(); |
248 | local_irq_restore(flags); | 262 | local_irq_restore(flags); |
249 | } | 263 | } |
@@ -251,35 +265,59 @@ static void __flush_icache_all_msr_irq(void) | |||
251 | static void __flush_icache_all_nomsr_irq(void) | 265 | static void __flush_icache_all_nomsr_irq(void) |
252 | { | 266 | { |
253 | unsigned long flags; | 267 | unsigned long flags; |
254 | 268 | #ifndef ASM_LOOP | |
269 | int i; | ||
270 | #endif | ||
255 | pr_debug("%s\n", __func__); | 271 | pr_debug("%s\n", __func__); |
256 | 272 | ||
257 | local_irq_save(flags); | 273 | local_irq_save(flags); |
258 | __disable_icache_nomsr(); | 274 | __disable_icache_nomsr(); |
259 | 275 | #ifdef ASM_LOOP | |
260 | CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); | 276 | CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); |
261 | 277 | #else | |
278 | for (i = 0; i < cpuinfo.icache_size; | ||
279 | i += cpuinfo.icache_line_length) | ||
280 | __asm__ __volatile__ ("wic %0, r0;" \ | ||
281 | : : "r" (i)); | ||
282 | #endif | ||
262 | __enable_icache_nomsr(); | 283 | __enable_icache_nomsr(); |
263 | local_irq_restore(flags); | 284 | local_irq_restore(flags); |
264 | } | 285 | } |
265 | 286 | ||
266 | static void __flush_icache_all_noirq(void) | 287 | static void __flush_icache_all_noirq(void) |
267 | { | 288 | { |
289 | #ifndef ASM_LOOP | ||
290 | int i; | ||
291 | #endif | ||
268 | pr_debug("%s\n", __func__); | 292 | pr_debug("%s\n", __func__); |
293 | #ifdef ASM_LOOP | ||
269 | CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); | 294 | CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); |
295 | #else | ||
296 | for (i = 0; i < cpuinfo.icache_size; | ||
297 | i += cpuinfo.icache_line_length) | ||
298 | __asm__ __volatile__ ("wic %0, r0;" \ | ||
299 | : : "r" (i)); | ||
300 | #endif | ||
270 | } | 301 | } |
271 | 302 | ||
272 | static void __invalidate_dcache_all_msr_irq(void) | 303 | static void __invalidate_dcache_all_msr_irq(void) |
273 | { | 304 | { |
274 | unsigned long flags; | 305 | unsigned long flags; |
275 | 306 | #ifndef ASM_LOOP | |
307 | int i; | ||
308 | #endif | ||
276 | pr_debug("%s\n", __func__); | 309 | pr_debug("%s\n", __func__); |
277 | 310 | ||
278 | local_irq_save(flags); | 311 | local_irq_save(flags); |
279 | __disable_dcache_msr(); | 312 | __disable_dcache_msr(); |
280 | 313 | #ifdef ASM_LOOP | |
281 | CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); | 314 | CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); |
282 | 315 | #else | |
316 | for (i = 0; i < cpuinfo.dcache_size; | ||
317 | i += cpuinfo.dcache_line_length) | ||
318 | __asm__ __volatile__ ("wdc %0, r0;" \ | ||
319 | : : "r" (i)); | ||
320 | #endif | ||
283 | __enable_dcache_msr(); | 321 | __enable_dcache_msr(); |
284 | local_irq_restore(flags); | 322 | local_irq_restore(flags); |
285 | } | 323 | } |
@@ -287,60 +325,107 @@ static void __invalidate_dcache_all_msr_irq(void) | |||
287 | static void __invalidate_dcache_all_nomsr_irq(void) | 325 | static void __invalidate_dcache_all_nomsr_irq(void) |
288 | { | 326 | { |
289 | unsigned long flags; | 327 | unsigned long flags; |
290 | 328 | #ifndef ASM_LOOP | |
329 | int i; | ||
330 | #endif | ||
291 | pr_debug("%s\n", __func__); | 331 | pr_debug("%s\n", __func__); |
292 | 332 | ||
293 | local_irq_save(flags); | 333 | local_irq_save(flags); |
294 | __disable_dcache_nomsr(); | 334 | __disable_dcache_nomsr(); |
295 | 335 | #ifdef ASM_LOOP | |
296 | CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); | 336 | CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); |
297 | 337 | #else | |
338 | for (i = 0; i < cpuinfo.dcache_size; | ||
339 | i += cpuinfo.dcache_line_length) | ||
340 | __asm__ __volatile__ ("wdc %0, r0;" \ | ||
341 | : : "r" (i)); | ||
342 | #endif | ||
298 | __enable_dcache_nomsr(); | 343 | __enable_dcache_nomsr(); |
299 | local_irq_restore(flags); | 344 | local_irq_restore(flags); |
300 | } | 345 | } |
301 | 346 | ||
302 | static void __invalidate_dcache_all_noirq_wt(void) | 347 | static void __invalidate_dcache_all_noirq_wt(void) |
303 | { | 348 | { |
349 | #ifndef ASM_LOOP | ||
350 | int i; | ||
351 | #endif | ||
304 | pr_debug("%s\n", __func__); | 352 | pr_debug("%s\n", __func__); |
353 | #ifdef ASM_LOOP | ||
305 | CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc) | 354 | CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc) |
355 | #else | ||
356 | for (i = 0; i < cpuinfo.dcache_size; | ||
357 | i += cpuinfo.dcache_line_length) | ||
358 | __asm__ __volatile__ ("wdc %0, r0;" \ | ||
359 | : : "r" (i)); | ||
360 | #endif | ||
306 | } | 361 | } |
307 | 362 | ||
308 | /* FIXME this is weird - should be only wdc but not work | 363 | /* FIXME this is weird - should be only wdc but not work |
309 | * MS: I am getting bus errors and other weird things */ | 364 | * MS: I am getting bus errors and other weird things */ |
310 | static void __invalidate_dcache_all_wb(void) | 365 | static void __invalidate_dcache_all_wb(void) |
311 | { | 366 | { |
367 | #ifndef ASM_LOOP | ||
368 | int i; | ||
369 | #endif | ||
312 | pr_debug("%s\n", __func__); | 370 | pr_debug("%s\n", __func__); |
371 | #ifdef ASM_LOOP | ||
313 | CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length, | 372 | CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length, |
314 | wdc.clear) | 373 | wdc.clear) |
374 | #else | ||
375 | for (i = 0; i < cpuinfo.dcache_size; | ||
376 | i += cpuinfo.dcache_line_length) | ||
377 | __asm__ __volatile__ ("wdc.clear %0, r0;" \ | ||
378 | : : "r" (i)); | ||
379 | #endif | ||
315 | } | 380 | } |
316 | 381 | ||
317 | static void __invalidate_dcache_range_wb(unsigned long start, | 382 | static void __invalidate_dcache_range_wb(unsigned long start, |
318 | unsigned long end) | 383 | unsigned long end) |
319 | { | 384 | { |
385 | #ifndef ASM_LOOP | ||
386 | int i; | ||
387 | #endif | ||
320 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, | 388 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, |
321 | (unsigned int)start, (unsigned int) end); | 389 | (unsigned int)start, (unsigned int) end); |
322 | 390 | ||
323 | CACHE_LOOP_LIMITS(start, end, | 391 | CACHE_LOOP_LIMITS(start, end, |
324 | cpuinfo.dcache_line_length, cpuinfo.dcache_size); | 392 | cpuinfo.dcache_line_length, cpuinfo.dcache_size); |
393 | #ifdef ASM_LOOP | ||
325 | CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear); | 394 | CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear); |
395 | #else | ||
396 | for (i = start; i < end; i += cpuinfo.icache_line_length) | ||
397 | __asm__ __volatile__ ("wdc.clear %0, r0;" \ | ||
398 | : : "r" (i)); | ||
399 | #endif | ||
326 | } | 400 | } |
327 | 401 | ||
328 | static void __invalidate_dcache_range_nomsr_wt(unsigned long start, | 402 | static void __invalidate_dcache_range_nomsr_wt(unsigned long start, |
329 | unsigned long end) | 403 | unsigned long end) |
330 | { | 404 | { |
405 | #ifndef ASM_LOOP | ||
406 | int i; | ||
407 | #endif | ||
331 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, | 408 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, |
332 | (unsigned int)start, (unsigned int) end); | 409 | (unsigned int)start, (unsigned int) end); |
333 | CACHE_LOOP_LIMITS(start, end, | 410 | CACHE_LOOP_LIMITS(start, end, |
334 | cpuinfo.dcache_line_length, cpuinfo.dcache_size); | 411 | cpuinfo.dcache_line_length, cpuinfo.dcache_size); |
335 | 412 | ||
413 | #ifdef ASM_LOOP | ||
336 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); | 414 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); |
415 | #else | ||
416 | for (i = start; i < end; i += cpuinfo.icache_line_length) | ||
417 | __asm__ __volatile__ ("wdc %0, r0;" \ | ||
418 | : : "r" (i)); | ||
419 | #endif | ||
337 | } | 420 | } |
338 | 421 | ||
339 | static void __invalidate_dcache_range_msr_irq_wt(unsigned long start, | 422 | static void __invalidate_dcache_range_msr_irq_wt(unsigned long start, |
340 | unsigned long end) | 423 | unsigned long end) |
341 | { | 424 | { |
342 | unsigned long flags; | 425 | unsigned long flags; |
343 | 426 | #ifndef ASM_LOOP | |
427 | int i; | ||
428 | #endif | ||
344 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, | 429 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, |
345 | (unsigned int)start, (unsigned int) end); | 430 | (unsigned int)start, (unsigned int) end); |
346 | CACHE_LOOP_LIMITS(start, end, | 431 | CACHE_LOOP_LIMITS(start, end, |
@@ -349,7 +434,13 @@ static void __invalidate_dcache_range_msr_irq_wt(unsigned long start, | |||
349 | local_irq_save(flags); | 434 | local_irq_save(flags); |
350 | __disable_dcache_msr(); | 435 | __disable_dcache_msr(); |
351 | 436 | ||
437 | #ifdef ASM_LOOP | ||
352 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); | 438 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); |
439 | #else | ||
440 | for (i = start; i < end; i += cpuinfo.icache_line_length) | ||
441 | __asm__ __volatile__ ("wdc %0, r0;" \ | ||
442 | : : "r" (i)); | ||
443 | #endif | ||
353 | 444 | ||
354 | __enable_dcache_msr(); | 445 | __enable_dcache_msr(); |
355 | local_irq_restore(flags); | 446 | local_irq_restore(flags); |
@@ -359,7 +450,9 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start, | |||
359 | unsigned long end) | 450 | unsigned long end) |
360 | { | 451 | { |
361 | unsigned long flags; | 452 | unsigned long flags; |
362 | 453 | #ifndef ASM_LOOP | |
454 | int i; | ||
455 | #endif | ||
363 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, | 456 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, |
364 | (unsigned int)start, (unsigned int) end); | 457 | (unsigned int)start, (unsigned int) end); |
365 | 458 | ||
@@ -369,7 +462,13 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start, | |||
369 | local_irq_save(flags); | 462 | local_irq_save(flags); |
370 | __disable_dcache_nomsr(); | 463 | __disable_dcache_nomsr(); |
371 | 464 | ||
465 | #ifdef ASM_LOOP | ||
372 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); | 466 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); |
467 | #else | ||
468 | for (i = start; i < end; i += cpuinfo.icache_line_length) | ||
469 | __asm__ __volatile__ ("wdc %0, r0;" \ | ||
470 | : : "r" (i)); | ||
471 | #endif | ||
373 | 472 | ||
374 | __enable_dcache_nomsr(); | 473 | __enable_dcache_nomsr(); |
375 | local_irq_restore(flags); | 474 | local_irq_restore(flags); |
@@ -377,19 +476,38 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start, | |||
377 | 476 | ||
378 | static void __flush_dcache_all_wb(void) | 477 | static void __flush_dcache_all_wb(void) |
379 | { | 478 | { |
479 | #ifndef ASM_LOOP | ||
480 | int i; | ||
481 | #endif | ||
380 | pr_debug("%s\n", __func__); | 482 | pr_debug("%s\n", __func__); |
483 | #ifdef ASM_LOOP | ||
381 | CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, | 484 | CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, |
382 | wdc.flush); | 485 | wdc.flush); |
486 | #else | ||
487 | for (i = 0; i < cpuinfo.dcache_size; | ||
488 | i += cpuinfo.dcache_line_length) | ||
489 | __asm__ __volatile__ ("wdc.flush %0, r0;" \ | ||
490 | : : "r" (i)); | ||
491 | #endif | ||
383 | } | 492 | } |
384 | 493 | ||
385 | static void __flush_dcache_range_wb(unsigned long start, unsigned long end) | 494 | static void __flush_dcache_range_wb(unsigned long start, unsigned long end) |
386 | { | 495 | { |
496 | #ifndef ASM_LOOP | ||
497 | int i; | ||
498 | #endif | ||
387 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, | 499 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, |
388 | (unsigned int)start, (unsigned int) end); | 500 | (unsigned int)start, (unsigned int) end); |
389 | 501 | ||
390 | CACHE_LOOP_LIMITS(start, end, | 502 | CACHE_LOOP_LIMITS(start, end, |
391 | cpuinfo.dcache_line_length, cpuinfo.dcache_size); | 503 | cpuinfo.dcache_line_length, cpuinfo.dcache_size); |
504 | #ifdef ASM_LOOP | ||
392 | CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush); | 505 | CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush); |
506 | #else | ||
507 | for (i = start; i < end; i += cpuinfo.icache_line_length) | ||
508 | __asm__ __volatile__ ("wdc.flush %0, r0;" \ | ||
509 | : : "r" (i)); | ||
510 | #endif | ||
393 | } | 511 | } |
394 | 512 | ||
395 | /* struct for wb caches and for wt caches */ | 513 | /* struct for wb caches and for wt caches */ |
@@ -493,7 +611,7 @@ const struct scache wt_nomsr_noirq = { | |||
493 | #define CPUVER_7_20_A 0x0c | 611 | #define CPUVER_7_20_A 0x0c |
494 | #define CPUVER_7_20_D 0x0f | 612 | #define CPUVER_7_20_D 0x0f |
495 | 613 | ||
496 | #define INFO(s) printk(KERN_INFO "cache: " s " \n"); | 614 | #define INFO(s) printk(KERN_INFO "cache: " s "\n"); |
497 | 615 | ||
498 | void microblaze_cache_init(void) | 616 | void microblaze_cache_init(void) |
499 | { | 617 | { |
@@ -532,4 +650,9 @@ void microblaze_cache_init(void) | |||
532 | } | 650 | } |
533 | } | 651 | } |
534 | } | 652 | } |
653 | invalidate_dcache(); | ||
654 | enable_dcache(); | ||
655 | |||
656 | invalidate_icache(); | ||
657 | enable_icache(); | ||
535 | } | 658 | } |
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c new file mode 100644 index 000000000000..b1084974fccd --- /dev/null +++ b/arch/microblaze/kernel/dma.c | |||
@@ -0,0 +1,156 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2009-2010 PetaLogix | ||
3 | * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation | ||
4 | * | ||
5 | * Provide default implementations of the DMA mapping callbacks for | ||
6 | * directly mapped busses. | ||
7 | */ | ||
8 | |||
9 | #include <linux/device.h> | ||
10 | #include <linux/dma-mapping.h> | ||
11 | #include <linux/dma-debug.h> | ||
12 | #include <asm/bug.h> | ||
13 | #include <asm/cacheflush.h> | ||
14 | |||
15 | /* | ||
16 | * Generic direct DMA implementation | ||
17 | * | ||
18 | * This implementation supports a per-device offset that can be applied if | ||
19 | * the address at which memory is visible to devices is not 0. Platform code | ||
20 | * can set archdata.dma_data to an unsigned long holding the offset. By | ||
21 | * default the offset is PCI_DRAM_OFFSET. | ||
22 | */ | ||
23 | static inline void __dma_sync_page(unsigned long paddr, unsigned long offset, | ||
24 | size_t size, enum dma_data_direction direction) | ||
25 | { | ||
26 | switch (direction) { | ||
27 | case DMA_TO_DEVICE: | ||
28 | flush_dcache_range(paddr + offset, paddr + offset + size); | ||
29 | break; | ||
30 | case DMA_FROM_DEVICE: | ||
31 | invalidate_dcache_range(paddr + offset, paddr + offset + size); | ||
32 | break; | ||
33 | default: | ||
34 | BUG(); | ||
35 | } | ||
36 | } | ||
37 | |||
38 | static unsigned long get_dma_direct_offset(struct device *dev) | ||
39 | { | ||
40 | if (dev) | ||
41 | return (unsigned long)dev->archdata.dma_data; | ||
42 | |||
43 | return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */ | ||
44 | } | ||
45 | |||
46 | #define NOT_COHERENT_CACHE | ||
47 | |||
48 | static void *dma_direct_alloc_coherent(struct device *dev, size_t size, | ||
49 | dma_addr_t *dma_handle, gfp_t flag) | ||
50 | { | ||
51 | #ifdef NOT_COHERENT_CACHE | ||
52 | return consistent_alloc(flag, size, dma_handle); | ||
53 | #else | ||
54 | void *ret; | ||
55 | struct page *page; | ||
56 | int node = dev_to_node(dev); | ||
57 | |||
58 | /* ignore region specifiers */ | ||
59 | flag &= ~(__GFP_HIGHMEM); | ||
60 | |||
61 | page = alloc_pages_node(node, flag, get_order(size)); | ||
62 | if (page == NULL) | ||
63 | return NULL; | ||
64 | ret = page_address(page); | ||
65 | memset(ret, 0, size); | ||
66 | *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev); | ||
67 | |||
68 | return ret; | ||
69 | #endif | ||
70 | } | ||
71 | |||
72 | static void dma_direct_free_coherent(struct device *dev, size_t size, | ||
73 | void *vaddr, dma_addr_t dma_handle) | ||
74 | { | ||
75 | #ifdef NOT_COHERENT_CACHE | ||
76 | consistent_free(vaddr); | ||
77 | #else | ||
78 | free_pages((unsigned long)vaddr, get_order(size)); | ||
79 | #endif | ||
80 | } | ||
81 | |||
82 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, | ||
83 | int nents, enum dma_data_direction direction, | ||
84 | struct dma_attrs *attrs) | ||
85 | { | ||
86 | struct scatterlist *sg; | ||
87 | int i; | ||
88 | |||
89 | /* FIXME this part of code is untested */ | ||
90 | for_each_sg(sgl, sg, nents, i) { | ||
91 | sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); | ||
92 | sg->dma_length = sg->length; | ||
93 | __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset, | ||
94 | sg->length, direction); | ||
95 | } | ||
96 | |||
97 | return nents; | ||
98 | } | ||
99 | |||
100 | static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
101 | int nents, enum dma_data_direction direction, | ||
102 | struct dma_attrs *attrs) | ||
103 | { | ||
104 | } | ||
105 | |||
106 | static int dma_direct_dma_supported(struct device *dev, u64 mask) | ||
107 | { | ||
108 | return 1; | ||
109 | } | ||
110 | |||
111 | static inline dma_addr_t dma_direct_map_page(struct device *dev, | ||
112 | struct page *page, | ||
113 | unsigned long offset, | ||
114 | size_t size, | ||
115 | enum dma_data_direction direction, | ||
116 | struct dma_attrs *attrs) | ||
117 | { | ||
118 | __dma_sync_page(page_to_phys(page), offset, size, direction); | ||
119 | return page_to_phys(page) + offset + get_dma_direct_offset(dev); | ||
120 | } | ||
121 | |||
122 | static inline void dma_direct_unmap_page(struct device *dev, | ||
123 | dma_addr_t dma_address, | ||
124 | size_t size, | ||
125 | enum dma_data_direction direction, | ||
126 | struct dma_attrs *attrs) | ||
127 | { | ||
128 | /* There is not necessary to do cache cleanup | ||
129 | * | ||
130 | * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and | ||
131 | * dma_address is physical address | ||
132 | */ | ||
133 | __dma_sync_page(dma_address, 0 , size, direction); | ||
134 | } | ||
135 | |||
136 | struct dma_map_ops dma_direct_ops = { | ||
137 | .alloc_coherent = dma_direct_alloc_coherent, | ||
138 | .free_coherent = dma_direct_free_coherent, | ||
139 | .map_sg = dma_direct_map_sg, | ||
140 | .unmap_sg = dma_direct_unmap_sg, | ||
141 | .dma_supported = dma_direct_dma_supported, | ||
142 | .map_page = dma_direct_map_page, | ||
143 | .unmap_page = dma_direct_unmap_page, | ||
144 | }; | ||
145 | EXPORT_SYMBOL(dma_direct_ops); | ||
146 | |||
147 | /* Number of entries preallocated for DMA-API debugging */ | ||
148 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | ||
149 | |||
150 | static int __init dma_init(void) | ||
151 | { | ||
152 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
153 | |||
154 | return 0; | ||
155 | } | ||
156 | fs_initcall(dma_init); | ||
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S index 3bad4ff49471..c0ede25c5b99 100644 --- a/arch/microblaze/kernel/entry.S +++ b/arch/microblaze/kernel/entry.S | |||
@@ -305,7 +305,7 @@ C_ENTRY(_user_exception): | |||
305 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ | 305 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ |
306 | addi r11, r0, 1; | 306 | addi r11, r0, 1; |
307 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ | 307 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ |
308 | 2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ | 308 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
309 | /* Save away the syscall number. */ | 309 | /* Save away the syscall number. */ |
310 | swi r12, r1, PTO+PT_R0; | 310 | swi r12, r1, PTO+PT_R0; |
311 | tovirt(r1,r1) | 311 | tovirt(r1,r1) |
@@ -322,8 +322,7 @@ C_ENTRY(_user_exception): | |||
322 | rtid r11, 0 | 322 | rtid r11, 0 |
323 | nop | 323 | nop |
324 | 3: | 324 | 3: |
325 | add r11, r0, CURRENT_TASK /* Get current task ptr into r11 */ | 325 | lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */ |
326 | lwi r11, r11, TS_THREAD_INFO /* get thread info */ | ||
327 | lwi r11, r11, TI_FLAGS /* get flags in thread info */ | 326 | lwi r11, r11, TI_FLAGS /* get flags in thread info */ |
328 | andi r11, r11, _TIF_WORK_SYSCALL_MASK | 327 | andi r11, r11, _TIF_WORK_SYSCALL_MASK |
329 | beqi r11, 4f | 328 | beqi r11, 4f |
@@ -382,60 +381,50 @@ C_ENTRY(ret_from_trap): | |||
382 | /* See if returning to kernel mode, if so, skip resched &c. */ | 381 | /* See if returning to kernel mode, if so, skip resched &c. */ |
383 | bnei r11, 2f; | 382 | bnei r11, 2f; |
384 | 383 | ||
384 | swi r3, r1, PTO + PT_R3 | ||
385 | swi r4, r1, PTO + PT_R4 | ||
386 | |||
385 | /* We're returning to user mode, so check for various conditions that | 387 | /* We're returning to user mode, so check for various conditions that |
386 | * trigger rescheduling. */ | 388 | * trigger rescheduling. */ |
387 | # FIXME: Restructure all these flag checks. | 389 | /* FIXME: Restructure all these flag checks. */ |
388 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 390 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
389 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
390 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 391 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
391 | andi r11, r11, _TIF_WORK_SYSCALL_MASK | 392 | andi r11, r11, _TIF_WORK_SYSCALL_MASK |
392 | beqi r11, 1f | 393 | beqi r11, 1f |
393 | 394 | ||
394 | swi r3, r1, PTO + PT_R3 | ||
395 | swi r4, r1, PTO + PT_R4 | ||
396 | brlid r15, do_syscall_trace_leave | 395 | brlid r15, do_syscall_trace_leave |
397 | addik r5, r1, PTO + PT_R0 | 396 | addik r5, r1, PTO + PT_R0 |
398 | lwi r3, r1, PTO + PT_R3 | ||
399 | lwi r4, r1, PTO + PT_R4 | ||
400 | 1: | 397 | 1: |
401 | |||
402 | /* We're returning to user mode, so check for various conditions that | 398 | /* We're returning to user mode, so check for various conditions that |
403 | * trigger rescheduling. */ | 399 | * trigger rescheduling. */ |
404 | /* Get current task ptr into r11 */ | 400 | /* get thread info from current task */ |
405 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 401 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; |
406 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
407 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 402 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
408 | andi r11, r11, _TIF_NEED_RESCHED; | 403 | andi r11, r11, _TIF_NEED_RESCHED; |
409 | beqi r11, 5f; | 404 | beqi r11, 5f; |
410 | 405 | ||
411 | swi r3, r1, PTO + PT_R3; /* store syscall result */ | ||
412 | swi r4, r1, PTO + PT_R4; | ||
413 | bralid r15, schedule; /* Call scheduler */ | 406 | bralid r15, schedule; /* Call scheduler */ |
414 | nop; /* delay slot */ | 407 | nop; /* delay slot */ |
415 | lwi r3, r1, PTO + PT_R3; /* restore syscall result */ | ||
416 | lwi r4, r1, PTO + PT_R4; | ||
417 | 408 | ||
418 | /* Maybe handle a signal */ | 409 | /* Maybe handle a signal */ |
419 | 5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 410 | 5: /* get thread info from current task*/ |
420 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | 411 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; |
421 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 412 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
422 | andi r11, r11, _TIF_SIGPENDING; | 413 | andi r11, r11, _TIF_SIGPENDING; |
423 | beqi r11, 1f; /* Signals to handle, handle them */ | 414 | beqi r11, 1f; /* Signals to handle, handle them */ |
424 | 415 | ||
425 | swi r3, r1, PTO + PT_R3; /* store syscall result */ | ||
426 | swi r4, r1, PTO + PT_R4; | ||
427 | la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ | 416 | la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ |
428 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ | ||
429 | addi r7, r0, 1; /* Arg 3: int in_syscall */ | 417 | addi r7, r0, 1; /* Arg 3: int in_syscall */ |
430 | bralid r15, do_signal; /* Handle any signals */ | 418 | bralid r15, do_signal; /* Handle any signals */ |
431 | nop; | 419 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ |
420 | |||
421 | /* Finally, return to user state. */ | ||
422 | 1: | ||
432 | lwi r3, r1, PTO + PT_R3; /* restore syscall result */ | 423 | lwi r3, r1, PTO + PT_R3; /* restore syscall result */ |
433 | lwi r4, r1, PTO + PT_R4; | 424 | lwi r4, r1, PTO + PT_R4; |
434 | 425 | ||
435 | /* Finally, return to user state. */ | 426 | swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ |
436 | 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ | 427 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ |
437 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | ||
438 | swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */ | ||
439 | VM_OFF; | 428 | VM_OFF; |
440 | tophys(r1,r1); | 429 | tophys(r1,r1); |
441 | RESTORE_REGS; | 430 | RESTORE_REGS; |
@@ -565,7 +554,7 @@ C_ENTRY(sys_rt_sigreturn_wrapper): | |||
565 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ \ | 554 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ \ |
566 | addi r11, r0, 1; \ | 555 | addi r11, r0, 1; \ |
567 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\ | 556 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\ |
568 | 2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\ | 557 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \ |
569 | /* Save away the syscall number. */ \ | 558 | /* Save away the syscall number. */ \ |
570 | swi r0, r1, PTO+PT_R0; \ | 559 | swi r0, r1, PTO+PT_R0; \ |
571 | tovirt(r1,r1) | 560 | tovirt(r1,r1) |
@@ -673,9 +662,7 @@ C_ENTRY(ret_from_exc): | |||
673 | 662 | ||
674 | /* We're returning to user mode, so check for various conditions that | 663 | /* We're returning to user mode, so check for various conditions that |
675 | trigger rescheduling. */ | 664 | trigger rescheduling. */ |
676 | /* Get current task ptr into r11 */ | 665 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
677 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | ||
678 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
679 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 666 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
680 | andi r11, r11, _TIF_NEED_RESCHED; | 667 | andi r11, r11, _TIF_NEED_RESCHED; |
681 | beqi r11, 5f; | 668 | beqi r11, 5f; |
@@ -685,8 +672,7 @@ C_ENTRY(ret_from_exc): | |||
685 | nop; /* delay slot */ | 672 | nop; /* delay slot */ |
686 | 673 | ||
687 | /* Maybe handle a signal */ | 674 | /* Maybe handle a signal */ |
688 | 5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 675 | 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
689 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
690 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 676 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
691 | andi r11, r11, _TIF_SIGPENDING; | 677 | andi r11, r11, _TIF_SIGPENDING; |
692 | beqi r11, 1f; /* Signals to handle, handle them */ | 678 | beqi r11, 1f; /* Signals to handle, handle them */ |
@@ -705,15 +691,13 @@ C_ENTRY(ret_from_exc): | |||
705 | * store return registers separately because this macros is use | 691 | * store return registers separately because this macros is use |
706 | * for others exceptions */ | 692 | * for others exceptions */ |
707 | la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ | 693 | la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ |
708 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ | ||
709 | addi r7, r0, 0; /* Arg 3: int in_syscall */ | 694 | addi r7, r0, 0; /* Arg 3: int in_syscall */ |
710 | bralid r15, do_signal; /* Handle any signals */ | 695 | bralid r15, do_signal; /* Handle any signals */ |
711 | nop; | 696 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ |
712 | 697 | ||
713 | /* Finally, return to user state. */ | 698 | /* Finally, return to user state. */ |
714 | 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ | 699 | 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ |
715 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 700 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ |
716 | swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */ | ||
717 | VM_OFF; | 701 | VM_OFF; |
718 | tophys(r1,r1); | 702 | tophys(r1,r1); |
719 | 703 | ||
@@ -802,7 +786,7 @@ C_ENTRY(_interrupt): | |||
802 | swi r11, r0, TOPHYS(PER_CPU(KM)); | 786 | swi r11, r0, TOPHYS(PER_CPU(KM)); |
803 | 787 | ||
804 | 2: | 788 | 2: |
805 | lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); | 789 | lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
806 | swi r0, r1, PTO + PT_R0; | 790 | swi r0, r1, PTO + PT_R0; |
807 | tovirt(r1,r1) | 791 | tovirt(r1,r1) |
808 | la r5, r1, PTO; | 792 | la r5, r1, PTO; |
@@ -817,8 +801,7 @@ ret_from_irq: | |||
817 | lwi r11, r1, PTO + PT_MODE; | 801 | lwi r11, r1, PTO + PT_MODE; |
818 | bnei r11, 2f; | 802 | bnei r11, 2f; |
819 | 803 | ||
820 | add r11, r0, CURRENT_TASK; | 804 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; |
821 | lwi r11, r11, TS_THREAD_INFO; | ||
822 | lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */ | 805 | lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */ |
823 | andi r11, r11, _TIF_NEED_RESCHED; | 806 | andi r11, r11, _TIF_NEED_RESCHED; |
824 | beqi r11, 5f | 807 | beqi r11, 5f |
@@ -826,8 +809,7 @@ ret_from_irq: | |||
826 | nop; /* delay slot */ | 809 | nop; /* delay slot */ |
827 | 810 | ||
828 | /* Maybe handle a signal */ | 811 | /* Maybe handle a signal */ |
829 | 5: add r11, r0, CURRENT_TASK; | 812 | 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */ |
830 | lwi r11, r11, TS_THREAD_INFO; /* MS: get thread info */ | ||
831 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 813 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
832 | andi r11, r11, _TIF_SIGPENDING; | 814 | andi r11, r11, _TIF_SIGPENDING; |
833 | beqid r11, no_intr_resched | 815 | beqid r11, no_intr_resched |
@@ -842,8 +824,7 @@ no_intr_resched: | |||
842 | /* Disable interrupts, we are now committed to the state restore */ | 824 | /* Disable interrupts, we are now committed to the state restore */ |
843 | disable_irq | 825 | disable_irq |
844 | swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */ | 826 | swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */ |
845 | add r11, r0, CURRENT_TASK; | 827 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); |
846 | swi r11, r0, PER_CPU(CURRENT_SAVE); | ||
847 | VM_OFF; | 828 | VM_OFF; |
848 | tophys(r1,r1); | 829 | tophys(r1,r1); |
849 | lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ | 830 | lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ |
@@ -853,7 +834,28 @@ no_intr_resched: | |||
853 | lwi r1, r1, PT_R1 - PT_SIZE; | 834 | lwi r1, r1, PT_R1 - PT_SIZE; |
854 | bri 6f; | 835 | bri 6f; |
855 | /* MS: Return to kernel state. */ | 836 | /* MS: Return to kernel state. */ |
856 | 2: VM_OFF /* MS: turn off MMU */ | 837 | 2: |
838 | #ifdef CONFIG_PREEMPT | ||
839 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; | ||
840 | /* MS: get preempt_count from thread info */ | ||
841 | lwi r5, r11, TI_PREEMPT_COUNT; | ||
842 | bgti r5, restore; | ||
843 | |||
844 | lwi r5, r11, TI_FLAGS; /* get flags in thread info */ | ||
845 | andi r5, r5, _TIF_NEED_RESCHED; | ||
846 | beqi r5, restore /* if zero jump over */ | ||
847 | |||
848 | preempt: | ||
849 | /* interrupts are off that's why I am calling preempt_chedule_irq */ | ||
850 | bralid r15, preempt_schedule_irq | ||
851 | nop | ||
852 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ | ||
853 | lwi r5, r11, TI_FLAGS; /* get flags in thread info */ | ||
854 | andi r5, r5, _TIF_NEED_RESCHED; | ||
855 | bnei r5, preempt /* if non zero jump to resched */ | ||
856 | restore: | ||
857 | #endif | ||
858 | VM_OFF /* MS: turn off MMU */ | ||
857 | tophys(r1,r1) | 859 | tophys(r1,r1) |
858 | lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ | 860 | lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ |
859 | lwi r4, r1, PTO + PT_R4; | 861 | lwi r4, r1, PTO + PT_R4; |
@@ -915,7 +917,7 @@ C_ENTRY(_debug_exception): | |||
915 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ | 917 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ |
916 | addi r11, r0, 1; | 918 | addi r11, r0, 1; |
917 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ | 919 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ |
918 | 2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ | 920 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
919 | /* Save away the syscall number. */ | 921 | /* Save away the syscall number. */ |
920 | swi r0, r1, PTO+PT_R0; | 922 | swi r0, r1, PTO+PT_R0; |
921 | tovirt(r1,r1) | 923 | tovirt(r1,r1) |
@@ -935,8 +937,7 @@ dbtrap_call: rtbd r11, 0; | |||
935 | bnei r11, 2f; | 937 | bnei r11, 2f; |
936 | 938 | ||
937 | /* Get current task ptr into r11 */ | 939 | /* Get current task ptr into r11 */ |
938 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 940 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
939 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
940 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 941 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
941 | andi r11, r11, _TIF_NEED_RESCHED; | 942 | andi r11, r11, _TIF_NEED_RESCHED; |
942 | beqi r11, 5f; | 943 | beqi r11, 5f; |
@@ -949,8 +950,7 @@ dbtrap_call: rtbd r11, 0; | |||
949 | /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */ | 950 | /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */ |
950 | 951 | ||
951 | /* Maybe handle a signal */ | 952 | /* Maybe handle a signal */ |
952 | 5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 953 | 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
953 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
954 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 954 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
955 | andi r11, r11, _TIF_SIGPENDING; | 955 | andi r11, r11, _TIF_SIGPENDING; |
956 | beqi r11, 1f; /* Signals to handle, handle them */ | 956 | beqi r11, 1f; /* Signals to handle, handle them */ |
@@ -966,16 +966,14 @@ dbtrap_call: rtbd r11, 0; | |||
966 | (in a possibly modified form) after do_signal returns. */ | 966 | (in a possibly modified form) after do_signal returns. */ |
967 | 967 | ||
968 | la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ | 968 | la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ |
969 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ | ||
970 | addi r7, r0, 0; /* Arg 3: int in_syscall */ | 969 | addi r7, r0, 0; /* Arg 3: int in_syscall */ |
971 | bralid r15, do_signal; /* Handle any signals */ | 970 | bralid r15, do_signal; /* Handle any signals */ |
972 | nop; | 971 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ |
973 | 972 | ||
974 | 973 | ||
975 | /* Finally, return to user state. */ | 974 | /* Finally, return to user state. */ |
976 | 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ | 975 | 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ |
977 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 976 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ |
978 | swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */ | ||
979 | VM_OFF; | 977 | VM_OFF; |
980 | tophys(r1,r1); | 978 | tophys(r1,r1); |
981 | 979 | ||
@@ -1007,7 +1005,7 @@ DBTRAP_return: /* Make global symbol for debugging */ | |||
1007 | 1005 | ||
1008 | ENTRY(_switch_to) | 1006 | ENTRY(_switch_to) |
1009 | /* prepare return value */ | 1007 | /* prepare return value */ |
1010 | addk r3, r0, r31 | 1008 | addk r3, r0, CURRENT_TASK |
1011 | 1009 | ||
1012 | /* save registers in cpu_context */ | 1010 | /* save registers in cpu_context */ |
1013 | /* use r11 and r12, volatile registers, as temp register */ | 1011 | /* use r11 and r12, volatile registers, as temp register */ |
@@ -1051,10 +1049,10 @@ ENTRY(_switch_to) | |||
1051 | nop | 1049 | nop |
1052 | swi r12, r11, CC_FSR | 1050 | swi r12, r11, CC_FSR |
1053 | 1051 | ||
1054 | /* update r31, the current */ | 1052 | /* update r31, the current-give me pointer to task which will be next */ |
1055 | lwi r31, r6, TI_TASK/* give me pointer to task which will be next */ | 1053 | lwi CURRENT_TASK, r6, TI_TASK |
1056 | /* stored it to current_save too */ | 1054 | /* stored it to current_save too */ |
1057 | swi r31, r0, PER_CPU(CURRENT_SAVE) | 1055 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE) |
1058 | 1056 | ||
1059 | /* get new process' cpu context and restore */ | 1057 | /* get new process' cpu context and restore */ |
1060 | /* give me start where start context of next task */ | 1058 | /* give me start where start context of next task */ |
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index 30916193fcc7..cb7815cfe5ab 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S | |||
@@ -99,8 +99,8 @@ no_fdt_arg: | |||
99 | tophys(r4,r4) /* convert to phys address */ | 99 | tophys(r4,r4) /* convert to phys address */ |
100 | ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ | 100 | ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ |
101 | _copy_command_line: | 101 | _copy_command_line: |
102 | lbu r7, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */ | 102 | lbu r2, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */ |
103 | sb r7, r4, r6 /* addr[r4+r6]= r7*/ | 103 | sb r2, r4, r6 /* addr[r4+r6]= r7*/ |
104 | addik r6, r6, 1 /* increment counting */ | 104 | addik r6, r6, 1 /* increment counting */ |
105 | bgtid r3, _copy_command_line /* loop for all entries */ | 105 | bgtid r3, _copy_command_line /* loop for all entries */ |
106 | addik r3, r3, -1 /* descrement loop */ | 106 | addik r3, r3, -1 /* descrement loop */ |
@@ -136,6 +136,11 @@ _invalidate: | |||
136 | addik r3, r3, -1 | 136 | addik r3, r3, -1 |
137 | /* sync */ | 137 | /* sync */ |
138 | 138 | ||
139 | /* Setup the kernel PID */ | ||
140 | mts rpid,r0 /* Load the kernel PID */ | ||
141 | nop | ||
142 | bri 4 | ||
143 | |||
139 | /* | 144 | /* |
140 | * We should still be executing code at physical address area | 145 | * We should still be executing code at physical address area |
141 | * RAM_BASEADDR at this point. However, kernel code is at | 146 | * RAM_BASEADDR at this point. However, kernel code is at |
@@ -146,10 +151,6 @@ _invalidate: | |||
146 | addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */ | 151 | addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */ |
147 | tophys(r4,r3) /* Load the kernel physical address */ | 152 | tophys(r4,r3) /* Load the kernel physical address */ |
148 | 153 | ||
149 | mts rpid,r0 /* Load the kernel PID */ | ||
150 | nop | ||
151 | bri 4 | ||
152 | |||
153 | /* | 154 | /* |
154 | * Configure and load two entries into TLB slots 0 and 1. | 155 | * Configure and load two entries into TLB slots 0 and 1. |
155 | * In case we are pinning TLBs, these are reserved in by the | 156 | * In case we are pinning TLBs, these are reserved in by the |
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c index 0f06034d1fe0..6f39e2c001f3 100644 --- a/arch/microblaze/kernel/irq.c +++ b/arch/microblaze/kernel/irq.c | |||
@@ -93,3 +93,18 @@ skip: | |||
93 | } | 93 | } |
94 | return 0; | 94 | return 0; |
95 | } | 95 | } |
96 | |||
97 | /* MS: There is no any advance mapping mechanism. We are using simple 32bit | ||
98 | intc without any cascades or any connection that's why mapping is 1:1 */ | ||
99 | unsigned int irq_create_mapping(struct irq_host *host, irq_hw_number_t hwirq) | ||
100 | { | ||
101 | return hwirq; | ||
102 | } | ||
103 | EXPORT_SYMBOL_GPL(irq_create_mapping); | ||
104 | |||
105 | unsigned int irq_create_of_mapping(struct device_node *controller, | ||
106 | u32 *intspec, unsigned int intsize) | ||
107 | { | ||
108 | return intspec[0]; | ||
109 | } | ||
110 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); | ||
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index bb8c4b9ccb80..f974ec7aa357 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c | |||
@@ -22,7 +22,10 @@ | |||
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/bug.h> | 23 | #include <linux/bug.h> |
24 | #include <linux/param.h> | 24 | #include <linux/param.h> |
25 | #include <linux/pci.h> | ||
25 | #include <linux/cache.h> | 26 | #include <linux/cache.h> |
27 | #include <linux/of_platform.h> | ||
28 | #include <linux/dma-mapping.h> | ||
26 | #include <asm/cacheflush.h> | 29 | #include <asm/cacheflush.h> |
27 | #include <asm/entry.h> | 30 | #include <asm/entry.h> |
28 | #include <asm/cpuinfo.h> | 31 | #include <asm/cpuinfo.h> |
@@ -54,14 +57,10 @@ void __init setup_arch(char **cmdline_p) | |||
54 | 57 | ||
55 | microblaze_cache_init(); | 58 | microblaze_cache_init(); |
56 | 59 | ||
57 | invalidate_dcache(); | ||
58 | enable_dcache(); | ||
59 | |||
60 | invalidate_icache(); | ||
61 | enable_icache(); | ||
62 | |||
63 | setup_memory(); | 60 | setup_memory(); |
64 | 61 | ||
62 | xilinx_pci_init(); | ||
63 | |||
65 | #if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER) | 64 | #if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER) |
66 | printk(KERN_NOTICE "Self modified code enable\n"); | 65 | printk(KERN_NOTICE "Self modified code enable\n"); |
67 | #endif | 66 | #endif |
@@ -188,3 +187,37 @@ static int microblaze_debugfs_init(void) | |||
188 | } | 187 | } |
189 | arch_initcall(microblaze_debugfs_init); | 188 | arch_initcall(microblaze_debugfs_init); |
190 | #endif | 189 | #endif |
190 | |||
191 | static int dflt_bus_notify(struct notifier_block *nb, | ||
192 | unsigned long action, void *data) | ||
193 | { | ||
194 | struct device *dev = data; | ||
195 | |||
196 | /* We are only intereted in device addition */ | ||
197 | if (action != BUS_NOTIFY_ADD_DEVICE) | ||
198 | return 0; | ||
199 | |||
200 | set_dma_ops(dev, &dma_direct_ops); | ||
201 | |||
202 | return NOTIFY_DONE; | ||
203 | } | ||
204 | |||
205 | static struct notifier_block dflt_plat_bus_notifier = { | ||
206 | .notifier_call = dflt_bus_notify, | ||
207 | .priority = INT_MAX, | ||
208 | }; | ||
209 | |||
210 | static struct notifier_block dflt_of_bus_notifier = { | ||
211 | .notifier_call = dflt_bus_notify, | ||
212 | .priority = INT_MAX, | ||
213 | }; | ||
214 | |||
215 | static int __init setup_bus_notifier(void) | ||
216 | { | ||
217 | bus_register_notifier(&platform_bus_type, &dflt_plat_bus_notifier); | ||
218 | bus_register_notifier(&of_platform_bus_type, &dflt_of_bus_notifier); | ||
219 | |||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | arch_initcall(setup_bus_notifier); | ||