aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2007-03-15 18:50:11 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 04:55:27 -0400
commita94aa2530643f02a4b243f81b5f6354b9b958d7e (patch)
tree37e45a8b15f79f9116c94a9e6a790c25f408a4a5
parent4be5c34dc47b5a9e6f91c8f5937a93c464870b8e (diff)
[SPARC64]: Kill kvaddr_to_phys() and friends.
Just inline it into flush_icache_range() which is the only user. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc64/mm/init.c91
1 files changed, 28 insertions, 63 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 3cacea5da6ce..fca253989e5a 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -392,75 +392,30 @@ out:
392 put_cpu(); 392 put_cpu();
393} 393}
394 394
395struct linux_prom_translation {
396 unsigned long virt;
397 unsigned long size;
398 unsigned long data;
399};
400
401/* Exported for kernel TLB miss handling in ktlb.S */
402struct linux_prom_translation prom_trans[512] __read_mostly;
403unsigned int prom_trans_ents __read_mostly;
404
405/*
406 * Translate PROM's mapping we capture at boot time into physical address.
407 * The second parameter is only set from prom_callback() invocations.
408 */
409static unsigned long prom_virt_to_phys(unsigned long promva)
410{
411 unsigned long mask;
412 int i;
413
414 mask = _PAGE_PADDR_4U;
415 if (tlb_type == hypervisor)
416 mask = _PAGE_PADDR_4V;
417
418 for (i = 0; i < prom_trans_ents; i++) {
419 struct linux_prom_translation *p = &prom_trans[i];
420
421 if (promva >= p->virt &&
422 promva < (p->virt + p->size)) {
423 unsigned long base = p->data & mask;
424
425 return base + (promva & (8192 - 1));
426 }
427 }
428 return 0UL;
429}
430
431static unsigned long kvaddr_to_phys(unsigned long addr)
432{
433 pgd_t *pgdp;
434 pud_t *pudp;
435 pmd_t *pmdp;
436 pte_t *ptep;
437 unsigned long mask = _PAGE_PADDR_4U;
438
439 if (tlb_type == hypervisor)
440 mask = _PAGE_PADDR_4V;
441
442 if (addr >= PAGE_OFFSET)
443 return addr & mask;
444
445 if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
446 return prom_virt_to_phys(addr);
447
448 pgdp = pgd_offset_k(addr);
449 pudp = pud_offset(pgdp, addr);
450 pmdp = pmd_offset(pudp, addr);
451 ptep = pte_offset_kernel(pmdp, addr);
452
453 return pte_val(*ptep) & mask;
454}
455
456void __kprobes flush_icache_range(unsigned long start, unsigned long end) 395void __kprobes flush_icache_range(unsigned long start, unsigned long end)
457{ 396{
458 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */ 397 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
459 if (tlb_type == spitfire) { 398 if (tlb_type == spitfire) {
460 unsigned long kaddr; 399 unsigned long kaddr;
461 400
462 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) 401 /* This code only runs on Spitfire cpus so this is
463 __flush_icache_page(kvaddr_to_phys(kaddr)); 402 * why we can assume _PAGE_PADDR_4U.
403 */
404 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
405 unsigned long paddr, mask = _PAGE_PADDR_4U;
406
407 if (kaddr >= PAGE_OFFSET)
408 paddr = kaddr & mask;
409 else {
410 pgd_t *pgdp = pgd_offset_k(kaddr);
411 pud_t *pudp = pud_offset(pgdp, kaddr);
412 pmd_t *pmdp = pmd_offset(pudp, kaddr);
413 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
414
415 paddr = pte_val(*ptep) & mask;
416 }
417 __flush_icache_page(paddr);
418 }
464 } 419 }
465} 420}
466 421
@@ -497,6 +452,16 @@ void mmu_info(struct seq_file *m)
497#endif /* CONFIG_DEBUG_DCFLUSH */ 452#endif /* CONFIG_DEBUG_DCFLUSH */
498} 453}
499 454
455struct linux_prom_translation {
456 unsigned long virt;
457 unsigned long size;
458 unsigned long data;
459};
460
461/* Exported for kernel TLB miss handling in ktlb.S */
462struct linux_prom_translation prom_trans[512] __read_mostly;
463unsigned int prom_trans_ents __read_mostly;
464
500/* Exported for SMP bootup purposes. */ 465/* Exported for SMP bootup purposes. */
501unsigned long kern_locked_tte_data; 466unsigned long kern_locked_tte_data;
502 467