diff options
-rw-r--r-- | arch/sparc64/kernel/tsb.S | 11 | ||||
-rw-r--r-- | arch/sparc64/mm/tsb.c | 28 | ||||
-rw-r--r-- | include/asm-sparc64/mmu_context.h | 32 | ||||
-rw-r--r-- | include/asm-sparc64/pgtable.h | 4 |
4 files changed, 45 insertions, 30 deletions
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index 50752c518773..76f2c0b01f36 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S | |||
@@ -55,6 +55,17 @@ tsb_reload: | |||
55 | brgez,a,pn %g5, tsb_do_fault | 55 | brgez,a,pn %g5, tsb_do_fault |
56 | stx %g0, [%g1] | 56 | stx %g0, [%g1] |
57 | 57 | ||
58 | /* If it is larger than the base page size, don't | ||
59 | * bother putting it into the TSB. | ||
60 | */ | ||
61 | srlx %g5, 32, %g2 | ||
62 | sethi %hi(_PAGE_ALL_SZ_BITS >> 32), %g4 | ||
63 | sethi %hi(_PAGE_SZBITS >> 32), %g7 | ||
64 | and %g2, %g4, %g2 | ||
65 | cmp %g2, %g7 | ||
66 | bne,a,pn %xcc, tsb_tlb_reload | ||
67 | stx %g0, [%g1] | ||
68 | |||
58 | TSB_WRITE(%g1, %g5, %g6) | 69 | TSB_WRITE(%g1, %g5, %g6) |
59 | 70 | ||
60 | /* Finally, load TLB and return from trap. */ | 71 | /* Finally, load TLB and return from trap. */ |
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 15e8af58b1d2..2f84cef6c1b5 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <asm/page.h> | 8 | #include <asm/page.h> |
9 | #include <asm/tlbflush.h> | 9 | #include <asm/tlbflush.h> |
10 | #include <asm/tlb.h> | 10 | #include <asm/tlb.h> |
11 | #include <asm/mmu_context.h> | ||
11 | 12 | ||
12 | #define TSB_ENTRY_ALIGNMENT 16 | 13 | #define TSB_ENTRY_ALIGNMENT 16 |
13 | 14 | ||
@@ -82,3 +83,30 @@ void flush_tsb_user(struct mmu_gather *mp) | |||
82 | } | 83 | } |
83 | } | 84 | } |
84 | } | 85 | } |
86 | |||
87 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
88 | { | ||
89 | unsigned long page = get_zeroed_page(GFP_KERNEL); | ||
90 | |||
91 | mm->context.sparc64_ctx_val = 0UL; | ||
92 | if (unlikely(!page)) | ||
93 | return -ENOMEM; | ||
94 | |||
95 | mm->context.sparc64_tsb = (unsigned long *) page; | ||
96 | |||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | void destroy_context(struct mm_struct *mm) | ||
101 | { | ||
102 | free_page((unsigned long) mm->context.sparc64_tsb); | ||
103 | |||
104 | spin_lock(&ctx_alloc_lock); | ||
105 | |||
106 | if (CTX_VALID(mm->context)) { | ||
107 | unsigned long nr = CTX_NRBITS(mm->context); | ||
108 | mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); | ||
109 | } | ||
110 | |||
111 | spin_unlock(&ctx_alloc_lock); | ||
112 | } | ||
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h index 34640a370ab4..0dffb4ce8a1d 100644 --- a/include/asm-sparc64/mmu_context.h +++ b/include/asm-sparc64/mmu_context.h | |||
@@ -19,36 +19,8 @@ extern unsigned long tlb_context_cache; | |||
19 | extern unsigned long mmu_context_bmap[]; | 19 | extern unsigned long mmu_context_bmap[]; |
20 | 20 | ||
21 | extern void get_new_mmu_context(struct mm_struct *mm); | 21 | extern void get_new_mmu_context(struct mm_struct *mm); |
22 | 22 | extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); | |
23 | /* Initialize a new mmu context. This is invoked when a new | 23 | extern void destroy_context(struct mm_struct *mm); |
24 | * address space instance (unique or shared) is instantiated. | ||
25 | * This just needs to set mm->context to an invalid context. | ||
26 | */ | ||
27 | #define init_new_context(__tsk, __mm) \ | ||
28 | ({ unsigned long __pg = get_zeroed_page(GFP_KERNEL); \ | ||
29 | (__mm)->context.sparc64_ctx_val = 0UL; \ | ||
30 | (__mm)->context.sparc64_tsb = \ | ||
31 | (unsigned long *) __pg; \ | ||
32 | (__pg ? 0 : -ENOMEM); \ | ||
33 | }) | ||
34 | |||
35 | |||
36 | /* Destroy a dead context. This occurs when mmput drops the | ||
37 | * mm_users count to zero, the mmaps have been released, and | ||
38 | * all the page tables have been flushed. Our job is to destroy | ||
39 | * any remaining processor-specific state, and in the sparc64 | ||
40 | * case this just means freeing up the mmu context ID held by | ||
41 | * this task if valid. | ||
42 | */ | ||
43 | #define destroy_context(__mm) \ | ||
44 | do { free_page((unsigned long)(__mm)->context.sparc64_tsb); \ | ||
45 | spin_lock(&ctx_alloc_lock); \ | ||
46 | if (CTX_VALID((__mm)->context)) { \ | ||
47 | unsigned long nr = CTX_NRBITS((__mm)->context); \ | ||
48 | mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); \ | ||
49 | } \ | ||
50 | spin_unlock(&ctx_alloc_lock); \ | ||
51 | } while(0) | ||
52 | 24 | ||
53 | extern unsigned long tsb_context_switch(unsigned long pgd_pa, unsigned long *tsb); | 25 | extern unsigned long tsb_context_switch(unsigned long pgd_pa, unsigned long *tsb); |
54 | 26 | ||
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index 77ba0b6cc1ce..2b2ecd6104d2 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h | |||
@@ -116,6 +116,10 @@ | |||
116 | #define _PAGE_W _AC(0x0000000000000002,UL) /* Writable */ | 116 | #define _PAGE_W _AC(0x0000000000000002,UL) /* Writable */ |
117 | #define _PAGE_G _AC(0x0000000000000001,UL) /* Global */ | 117 | #define _PAGE_G _AC(0x0000000000000001,UL) /* Global */ |
118 | 118 | ||
119 | #define _PAGE_ALL_SZ_BITS \ | ||
120 | (_PAGE_SZ4MB | _PAGE_SZ512K | _PAGE_SZ64K | \ | ||
121 | _PAGE_SZ8K | _PAGE_SZ32MB | _PAGE_SZ256MB) | ||
122 | |||
119 | /* Here are the SpitFire software bits we use in the TTE's. | 123 | /* Here are the SpitFire software bits we use in the TTE's. |
120 | * | 124 | * |
121 | * WARNING: If you are going to try and start using some | 125 | * WARNING: If you are going to try and start using some |