diff options
Diffstat (limited to 'arch/m68k/mm/memory.c')
-rw-r--r-- | arch/m68k/mm/memory.c | 104 |
1 files changed, 0 insertions, 104 deletions
diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c index 1453a6013721..559942ce0e1e 100644 --- a/arch/m68k/mm/memory.c +++ b/arch/m68k/mm/memory.c | |||
@@ -354,110 +354,6 @@ void cache_push (unsigned long paddr, int len) | |||
354 | #endif | 354 | #endif |
355 | } | 355 | } |
356 | 356 | ||
357 | static unsigned long virt_to_phys_slow(unsigned long vaddr) | ||
358 | { | ||
359 | if (CPU_IS_060) { | ||
360 | mm_segment_t fs = get_fs(); | ||
361 | unsigned long paddr; | ||
362 | |||
363 | set_fs(get_ds()); | ||
364 | |||
365 | /* The PLPAR instruction causes an access error if the translation | ||
366 | * is not possible. To catch this we use the same exception mechanism | ||
367 | * as for user space accesses in <asm/uaccess.h>. */ | ||
368 | asm volatile (".chip 68060\n" | ||
369 | "1: plpar (%0)\n" | ||
370 | ".chip 68k\n" | ||
371 | "2:\n" | ||
372 | ".section .fixup,\"ax\"\n" | ||
373 | " .even\n" | ||
374 | "3: sub.l %0,%0\n" | ||
375 | " jra 2b\n" | ||
376 | ".previous\n" | ||
377 | ".section __ex_table,\"a\"\n" | ||
378 | " .align 4\n" | ||
379 | " .long 1b,3b\n" | ||
380 | ".previous" | ||
381 | : "=a" (paddr) | ||
382 | : "0" (vaddr)); | ||
383 | set_fs(fs); | ||
384 | return paddr; | ||
385 | } else if (CPU_IS_040) { | ||
386 | mm_segment_t fs = get_fs(); | ||
387 | unsigned long mmusr; | ||
388 | |||
389 | set_fs(get_ds()); | ||
390 | |||
391 | asm volatile (".chip 68040\n\t" | ||
392 | "ptestr (%1)\n\t" | ||
393 | "movec %%mmusr, %0\n\t" | ||
394 | ".chip 68k" | ||
395 | : "=r" (mmusr) | ||
396 | : "a" (vaddr)); | ||
397 | set_fs(fs); | ||
398 | |||
399 | if (mmusr & MMU_R_040) | ||
400 | return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK); | ||
401 | } else { | ||
402 | unsigned short mmusr; | ||
403 | unsigned long *descaddr; | ||
404 | |||
405 | asm volatile ("ptestr #5,%2@,#7,%0\n\t" | ||
406 | "pmove %%psr,%1@" | ||
407 | : "=a&" (descaddr) | ||
408 | : "a" (&mmusr), "a" (vaddr)); | ||
409 | if (mmusr & (MMU_I|MMU_B|MMU_L)) | ||
410 | return 0; | ||
411 | descaddr = phys_to_virt((unsigned long)descaddr); | ||
412 | switch (mmusr & MMU_NUM) { | ||
413 | case 1: | ||
414 | return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff); | ||
415 | case 2: | ||
416 | return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff); | ||
417 | case 3: | ||
418 | return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK); | ||
419 | } | ||
420 | } | ||
421 | return 0; | ||
422 | } | ||
423 | |||
424 | /* Push n pages at kernel virtual address and clear the icache */ | ||
425 | /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ | ||
426 | void flush_icache_range(unsigned long address, unsigned long endaddr) | ||
427 | { | ||
428 | if (CPU_IS_040_OR_060) { | ||
429 | address &= PAGE_MASK; | ||
430 | |||
431 | if (address >= PAGE_OFFSET && address < (unsigned long)high_memory) { | ||
432 | do { | ||
433 | asm volatile ("nop\n\t" | ||
434 | ".chip 68040\n\t" | ||
435 | "cpushp %%bc,(%0)\n\t" | ||
436 | ".chip 68k" | ||
437 | : : "a" (virt_to_phys((void *)address))); | ||
438 | address += PAGE_SIZE; | ||
439 | } while (address < endaddr); | ||
440 | } else { | ||
441 | do { | ||
442 | asm volatile ("nop\n\t" | ||
443 | ".chip 68040\n\t" | ||
444 | "cpushp %%bc,(%0)\n\t" | ||
445 | ".chip 68k" | ||
446 | : : "a" (virt_to_phys_slow(address))); | ||
447 | address += PAGE_SIZE; | ||
448 | } while (address < endaddr); | ||
449 | } | ||
450 | } else { | ||
451 | unsigned long tmp; | ||
452 | asm volatile ("movec %%cacr,%0\n\t" | ||
453 | "orw %1,%0\n\t" | ||
454 | "movec %0,%%cacr" | ||
455 | : "=&d" (tmp) | ||
456 | : "di" (FLUSH_I)); | ||
457 | } | ||
458 | } | ||
459 | |||
460 | |||
461 | #ifndef CONFIG_SINGLE_MEMORY_CHUNK | 357 | #ifndef CONFIG_SINGLE_MEMORY_CHUNK |
462 | int mm_end_of_chunk (unsigned long addr, int len) | 358 | int mm_end_of_chunk (unsigned long addr, int len) |
463 | { | 359 | { |