aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm/init.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-02-01 18:55:21 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:11:32 -0500
commit517af33237ecfc3c8a93b335365fa61e741ceca4 (patch)
tree58eff40eb4c517c4fd49fd347d38273ee1e1ee4b /arch/sparc64/mm/init.c
parentb0fd4e49aea8a460afab7bc67cd618e2d19291d4 (diff)
[SPARC64]: Access TSB with physical addresses when possible.
This way we don't need to lock the TSB into the TLB. The trick is that every TSB load/store is registered into a special instruction patch section. The default uses virtual addresses, and the patch instructions use physical address load/stores. We can't do this on all chips because only cheetah+ and later have the physical variant of the atomic quad load. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/mm/init.c')
-rw-r--r--arch/sparc64/mm/init.c32
1 files changed, 32 insertions, 0 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 2c21d85de78f..4893f3e2c336 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -39,6 +39,7 @@
39#include <asm/tlb.h> 39#include <asm/tlb.h>
40#include <asm/spitfire.h> 40#include <asm/spitfire.h>
41#include <asm/sections.h> 41#include <asm/sections.h>
42#include <asm/tsb.h>
42 43
43extern void device_scan(void); 44extern void device_scan(void);
44 45
@@ -244,6 +245,16 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
244 : "g1", "g7"); 245 : "g1", "g7");
245} 246}
246 247
248static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
249{
250 unsigned long tsb_addr = (unsigned long) ent;
251
252 if (tlb_type == cheetah_plus)
253 tsb_addr = __pa(tsb_addr);
254
255 __tsb_insert(tsb_addr, tag, pte);
256}
257
247void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) 258void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
248{ 259{
249 struct mm_struct *mm; 260 struct mm_struct *mm;
@@ -1040,6 +1051,24 @@ unsigned long __init find_ecache_flush_span(unsigned long size)
1040 return ~0UL; 1051 return ~0UL;
1041} 1052}
1042 1053
1054static void __init tsb_phys_patch(void)
1055{
1056 struct tsb_phys_patch_entry *p;
1057
1058 p = &__tsb_phys_patch;
1059 while (p < &__tsb_phys_patch_end) {
1060 unsigned long addr = p->addr;
1061
1062 *(unsigned int *) addr = p->insn;
1063 wmb();
1064 __asm__ __volatile__("flush %0"
1065 : /* no outputs */
1066 : "r" (addr));
1067
1068 p++;
1069 }
1070}
1071
1043/* paging_init() sets up the page tables */ 1072/* paging_init() sets up the page tables */
1044 1073
1045extern void cheetah_ecache_flush_init(void); 1074extern void cheetah_ecache_flush_init(void);
@@ -1052,6 +1081,9 @@ void __init paging_init(void)
1052 unsigned long end_pfn, pages_avail, shift; 1081 unsigned long end_pfn, pages_avail, shift;
1053 unsigned long real_end, i; 1082 unsigned long real_end, i;
1054 1083
1084 if (tlb_type == cheetah_plus)
1085 tsb_phys_patch();
1086
1055 /* Find available physical memory... */ 1087 /* Find available physical memory... */
1056 read_obp_memory("available", &pavail[0], &pavail_ents); 1088 read_obp_memory("available", &pavail[0], &pavail_ents);
1057 1089