aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64/mmu.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-01-31 21:31:20 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:11:17 -0500
commit98c5584cfc47932c4f3ccf5eee2e0bae1447b85e (patch)
treec067ac8bfc081bbe0b3073374cb15708458e04ab /include/asm-sparc64/mmu.h
parent09f94287f7260e03bbeab497e743691fafcc22c3 (diff)
[SPARC64]: Add infrastructure for dynamic TSB sizing.
This also cleans up tsb_context_switch(). The assembler routine is now __tsb_context_switch() and the former is an inline function that picks out the bits from the mm_struct and passes it into the assembler code as arguments. setup_tsb_parms() computes the locked TLB entry to map the TSB. Later when we support using the physical address quad load instructions of Cheetah+ and later, we'll simply use the physical address for the TSB register value and set the map virtual and PTE both to zero. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-sparc64/mmu.h')
-rw-r--r--include/asm-sparc64/mmu.h13
1 files changed, 12 insertions, 1 deletions
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 36384cf7faa6..2effeba2476c 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -90,9 +90,20 @@
90 90
91#ifndef __ASSEMBLY__ 91#ifndef __ASSEMBLY__
92 92
93#define TSB_ENTRY_ALIGNMENT 16
94
95struct tsb {
96 unsigned long tag;
97 unsigned long pte;
98} __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
99
93typedef struct { 100typedef struct {
94 unsigned long sparc64_ctx_val; 101 unsigned long sparc64_ctx_val;
95 unsigned long *sparc64_tsb; 102 struct tsb *tsb;
103 unsigned long tsb_nentries;
104 unsigned long tsb_reg_val;
105 unsigned long tsb_map_vaddr;
106 unsigned long tsb_map_pte;
96} mm_context_t; 107} mm_context_t;
97 108
98#endif /* !__ASSEMBLY__ */ 109#endif /* !__ASSEMBLY__ */