aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64/mmu.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-02-01 18:55:21 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:11:32 -0500
commit517af33237ecfc3c8a93b335365fa61e741ceca4 (patch)
tree58eff40eb4c517c4fd49fd347d38273ee1e1ee4b /include/asm-sparc64/mmu.h
parentb0fd4e49aea8a460afab7bc67cd618e2d19291d4 (diff)
[SPARC64]: Access TSB with physical addresses when possible.
This way we don't need to lock the TSB into the TLB. The trick is that every TSB load/store is registered into a special instruction patch section. The default uses virtual addresses, and the patch instructions use physical address load/stores. We can't do this on all chips because only cheetah+ and later have the physical variant of the atomic quad load. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-sparc64/mmu.h')
-rw-r--r--include/asm-sparc64/mmu.h3
1 files changed, 2 insertions, 1 deletions
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 18f98edfbcda..55e622711b96 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -97,7 +97,8 @@ struct tsb {
97 unsigned long pte; 97 unsigned long pte;
98} __attribute__((aligned(TSB_ENTRY_ALIGNMENT))); 98} __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
99 99
100extern void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte); 100extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte);
101extern void tsb_flush(unsigned long ent, unsigned long tag);
101 102
102typedef struct { 103typedef struct {
103 unsigned long sparc64_ctx_val; 104 unsigned long sparc64_ctx_val;