aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64/mmu_context.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-01-31 21:29:18 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:11:13 -0500
commit74bf4312fff083ab25c3f357cc653ada7995e5f6 (patch)
treec23dea461e32485f4cd7ca4b8c33c632655eb906 /include/asm-sparc64/mmu_context.h
parent30d4d1ffed7098afe2641536d67eef150499da02 (diff)
[SPARC64]: Move away from virtual page tables, part 1.
We now use the TSB hardware assist features of the UltraSPARC MMUs. SMP is currently knowingly broken, we need to find another place to store the per-cpu base pointers. We hid them away in the TSB base register, and that obviously will not work any more :-) Another known broken case is non-8KB base page size. Also noticed that flush_tlb_all() is not referenced anywhere, only the internal __flush_tlb_all() (local cpu only) is used by the sparc64 port, so we can get rid of flush_tlb_all(). The kernel gets it's own 8KB TSB (swapper_tsb) and each address space gets it's own private 8K TSB. Later we can add code to dynamically increase the size of per-process TSB as the RSS grows. An 8KB TSB is good enough for up to about a 4MB RSS, after which the TSB starts to incur many capacity and conflict misses. We even accumulate OBP translations into the kernel TSB. Another area for refinement is large page size support. We could use a secondary address space TSB to handle those. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-sparc64/mmu_context.h')
-rw-r--r--include/asm-sparc64/mmu_context.h46
1 files changed, 13 insertions, 33 deletions
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 57ee7b306189..34640a370ab4 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -25,7 +25,13 @@ extern void get_new_mmu_context(struct mm_struct *mm);
25 * This just needs to set mm->context to an invalid context. 25 * This just needs to set mm->context to an invalid context.
26 */ 26 */
27#define init_new_context(__tsk, __mm) \ 27#define init_new_context(__tsk, __mm) \
28 (((__mm)->context.sparc64_ctx_val = 0UL), 0) 28({ unsigned long __pg = get_zeroed_page(GFP_KERNEL); \
29 (__mm)->context.sparc64_ctx_val = 0UL; \
30 (__mm)->context.sparc64_tsb = \
31 (unsigned long *) __pg; \
32 (__pg ? 0 : -ENOMEM); \
33})
34
29 35
30/* Destroy a dead context. This occurs when mmput drops the 36/* Destroy a dead context. This occurs when mmput drops the
31 * mm_users count to zero, the mmaps have been released, and 37 * mm_users count to zero, the mmaps have been released, and
@@ -35,7 +41,8 @@ extern void get_new_mmu_context(struct mm_struct *mm);
35 * this task if valid. 41 * this task if valid.
36 */ 42 */
37#define destroy_context(__mm) \ 43#define destroy_context(__mm) \
38do { spin_lock(&ctx_alloc_lock); \ 44do { free_page((unsigned long)(__mm)->context.sparc64_tsb); \
45 spin_lock(&ctx_alloc_lock); \
39 if (CTX_VALID((__mm)->context)) { \ 46 if (CTX_VALID((__mm)->context)) { \
40 unsigned long nr = CTX_NRBITS((__mm)->context); \ 47 unsigned long nr = CTX_NRBITS((__mm)->context); \
41 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); \ 48 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); \
@@ -43,35 +50,7 @@ do { spin_lock(&ctx_alloc_lock); \
43 spin_unlock(&ctx_alloc_lock); \ 50 spin_unlock(&ctx_alloc_lock); \
44} while(0) 51} while(0)
45 52
46/* Reload the two core values used by TLB miss handler 53extern unsigned long tsb_context_switch(unsigned long pgd_pa, unsigned long *tsb);
47 * processing on sparc64. They are:
48 * 1) The physical address of mm->pgd, when full page
49 * table walks are necessary, this is where the
50 * search begins.
51 * 2) A "PGD cache". For 32-bit tasks only pgd[0] is
52 * ever used since that maps the entire low 4GB
53 * completely. To speed up TLB miss processing we
54 * make this value available to the handlers. This
55 * decreases the amount of memory traffic incurred.
56 */
57#define reload_tlbmiss_state(__tsk, __mm) \
58do { \
59 register unsigned long paddr asm("o5"); \
60 register unsigned long pgd_cache asm("o4"); \
61 paddr = __pa((__mm)->pgd); \
62 pgd_cache = 0UL; \
63 if (task_thread_info(__tsk)->flags & _TIF_32BIT) \
64 pgd_cache = get_pgd_cache((__mm)->pgd); \
65 __asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \
66 "mov %3, %%g4\n\t" \
67 "mov %0, %%g7\n\t" \
68 "stxa %1, [%%g4] %2\n\t" \
69 "membar #Sync\n\t" \
70 "wrpr %%g0, 0x096, %%pstate" \
71 : /* no outputs */ \
72 : "r" (paddr), "r" (pgd_cache),\
73 "i" (ASI_DMMU), "i" (TSB_REG)); \
74} while(0)
75 54
76/* Set MMU context in the actual hardware. */ 55/* Set MMU context in the actual hardware. */
77#define load_secondary_context(__mm) \ 56#define load_secondary_context(__mm) \
@@ -101,7 +80,8 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
101 80
102 if (!ctx_valid || (old_mm != mm)) { 81 if (!ctx_valid || (old_mm != mm)) {
103 load_secondary_context(mm); 82 load_secondary_context(mm);
104 reload_tlbmiss_state(tsk, mm); 83 tsb_context_switch(__pa(mm->pgd),
84 mm->context.sparc64_tsb);
105 } 85 }
106 86
107 /* Even if (mm == old_mm) we _must_ check 87 /* Even if (mm == old_mm) we _must_ check
@@ -139,7 +119,7 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm
139 119
140 load_secondary_context(mm); 120 load_secondary_context(mm);
141 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); 121 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
142 reload_tlbmiss_state(current, mm); 122 tsb_context_switch(__pa(mm->pgd), mm->context.sparc64_tsb);
143} 123}
144 124
145#endif /* !(__ASSEMBLY__) */ 125#endif /* !(__ASSEMBLY__) */