diff options
Diffstat (limited to 'arch/frv/mm/mmu-context.c')
-rw-r--r-- | arch/frv/mm/mmu-context.c | 208 |
1 files changed, 208 insertions, 0 deletions
diff --git a/arch/frv/mm/mmu-context.c b/arch/frv/mm/mmu-context.c new file mode 100644 index 000000000000..f2c6866fc88b --- /dev/null +++ b/arch/frv/mm/mmu-context.c | |||
@@ -0,0 +1,208 @@ | |||
1 | /* mmu-context.c: MMU context allocation and management | ||
2 | * | ||
3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sched.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <asm/tlbflush.h> | ||
15 | |||
16 | #define NR_CXN 4096 | ||
17 | |||
18 | static unsigned long cxn_bitmap[NR_CXN / (sizeof(unsigned long) * 8)]; | ||
19 | static LIST_HEAD(cxn_owners_lru); | ||
20 | static DEFINE_SPINLOCK(cxn_owners_lock); | ||
21 | |||
22 | int __nongpreldata cxn_pinned = -1; | ||
23 | |||
24 | |||
25 | /*****************************************************************************/ | ||
26 | /* | ||
27 | * initialise a new context | ||
28 | */ | ||
29 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
30 | { | ||
31 | memset(&mm->context, 0, sizeof(mm->context)); | ||
32 | INIT_LIST_HEAD(&mm->context.id_link); | ||
33 | mm->context.itlb_cached_pge = 0xffffffffUL; | ||
34 | mm->context.dtlb_cached_pge = 0xffffffffUL; | ||
35 | |||
36 | return 0; | ||
37 | } /* end init_new_context() */ | ||
38 | |||
39 | /*****************************************************************************/ | ||
40 | /* | ||
41 | * make sure a kernel MMU context has a CPU context number | ||
42 | * - call with cxn_owners_lock held | ||
43 | */ | ||
44 | static unsigned get_cxn(mm_context_t *ctx) | ||
45 | { | ||
46 | struct list_head *_p; | ||
47 | mm_context_t *p; | ||
48 | unsigned cxn; | ||
49 | |||
50 | if (!list_empty(&ctx->id_link)) { | ||
51 | list_move_tail(&ctx->id_link, &cxn_owners_lru); | ||
52 | } | ||
53 | else { | ||
54 | /* find the first unallocated context number | ||
55 | * - 0 is reserved for the kernel | ||
56 | */ | ||
57 | cxn = find_next_zero_bit(&cxn_bitmap, NR_CXN, 1); | ||
58 | if (cxn < NR_CXN) { | ||
59 | set_bit(cxn, &cxn_bitmap); | ||
60 | } | ||
61 | else { | ||
62 | /* none remaining - need to steal someone else's cxn */ | ||
63 | p = NULL; | ||
64 | list_for_each(_p, &cxn_owners_lru) { | ||
65 | p = list_entry(_p, mm_context_t, id_link); | ||
66 | if (!p->id_busy && p->id != cxn_pinned) | ||
67 | break; | ||
68 | } | ||
69 | |||
70 | BUG_ON(_p == &cxn_owners_lru); | ||
71 | |||
72 | cxn = p->id; | ||
73 | p->id = 0; | ||
74 | list_del_init(&p->id_link); | ||
75 | __flush_tlb_mm(cxn); | ||
76 | } | ||
77 | |||
78 | ctx->id = cxn; | ||
79 | list_add_tail(&ctx->id_link, &cxn_owners_lru); | ||
80 | } | ||
81 | |||
82 | return ctx->id; | ||
83 | } /* end get_cxn() */ | ||
84 | |||
85 | /*****************************************************************************/ | ||
86 | /* | ||
87 | * restore the current TLB miss handler mapped page tables into the MMU context and set up a | ||
88 | * mapping for the page directory | ||
89 | */ | ||
90 | void change_mm_context(mm_context_t *old, mm_context_t *ctx, pgd_t *pgd) | ||
91 | { | ||
92 | unsigned long _pgd; | ||
93 | |||
94 | _pgd = virt_to_phys(pgd); | ||
95 | |||
96 | /* save the state of the outgoing MMU context */ | ||
97 | old->id_busy = 0; | ||
98 | |||
99 | asm volatile("movsg scr0,%0" : "=r"(old->itlb_cached_pge)); | ||
100 | asm volatile("movsg dampr4,%0" : "=r"(old->itlb_ptd_mapping)); | ||
101 | asm volatile("movsg scr1,%0" : "=r"(old->dtlb_cached_pge)); | ||
102 | asm volatile("movsg dampr5,%0" : "=r"(old->dtlb_ptd_mapping)); | ||
103 | |||
104 | /* select an MMU context number */ | ||
105 | spin_lock(&cxn_owners_lock); | ||
106 | get_cxn(ctx); | ||
107 | ctx->id_busy = 1; | ||
108 | spin_unlock(&cxn_owners_lock); | ||
109 | |||
110 | asm volatile("movgs %0,cxnr" : : "r"(ctx->id)); | ||
111 | |||
112 | /* restore the state of the incoming MMU context */ | ||
113 | asm volatile("movgs %0,scr0" : : "r"(ctx->itlb_cached_pge)); | ||
114 | asm volatile("movgs %0,dampr4" : : "r"(ctx->itlb_ptd_mapping)); | ||
115 | asm volatile("movgs %0,scr1" : : "r"(ctx->dtlb_cached_pge)); | ||
116 | asm volatile("movgs %0,dampr5" : : "r"(ctx->dtlb_ptd_mapping)); | ||
117 | |||
118 | /* map the PGD into uncached virtual memory */ | ||
119 | asm volatile("movgs %0,ttbr" : : "r"(_pgd)); | ||
120 | asm volatile("movgs %0,dampr3" | ||
121 | :: "r"(_pgd | xAMPRx_L | xAMPRx_M | xAMPRx_SS_16Kb | | ||
122 | xAMPRx_S | xAMPRx_C | xAMPRx_V)); | ||
123 | |||
124 | } /* end change_mm_context() */ | ||
125 | |||
126 | /*****************************************************************************/ | ||
127 | /* | ||
128 | * finished with an MMU context number | ||
129 | */ | ||
130 | void destroy_context(struct mm_struct *mm) | ||
131 | { | ||
132 | mm_context_t *ctx = &mm->context; | ||
133 | |||
134 | spin_lock(&cxn_owners_lock); | ||
135 | |||
136 | if (!list_empty(&ctx->id_link)) { | ||
137 | if (ctx->id == cxn_pinned) | ||
138 | cxn_pinned = -1; | ||
139 | |||
140 | list_del_init(&ctx->id_link); | ||
141 | clear_bit(ctx->id, &cxn_bitmap); | ||
142 | __flush_tlb_mm(ctx->id); | ||
143 | ctx->id = 0; | ||
144 | } | ||
145 | |||
146 | spin_unlock(&cxn_owners_lock); | ||
147 | } /* end destroy_context() */ | ||
148 | |||
149 | /*****************************************************************************/ | ||
150 | /* | ||
151 | * display the MMU context currently a process is currently using | ||
152 | */ | ||
153 | #ifdef CONFIG_PROC_FS | ||
154 | char *proc_pid_status_frv_cxnr(struct mm_struct *mm, char *buffer) | ||
155 | { | ||
156 | spin_lock(&cxn_owners_lock); | ||
157 | buffer += sprintf(buffer, "CXNR: %u\n", mm->context.id); | ||
158 | spin_unlock(&cxn_owners_lock); | ||
159 | |||
160 | return buffer; | ||
161 | } /* end proc_pid_status_frv_cxnr() */ | ||
162 | #endif | ||
163 | |||
164 | /*****************************************************************************/ | ||
165 | /* | ||
166 | * (un)pin a process's mm_struct's MMU context ID | ||
167 | */ | ||
168 | int cxn_pin_by_pid(pid_t pid) | ||
169 | { | ||
170 | struct task_struct *tsk; | ||
171 | struct mm_struct *mm = NULL; | ||
172 | int ret; | ||
173 | |||
174 | /* unpin if pid is zero */ | ||
175 | if (pid == 0) { | ||
176 | cxn_pinned = -1; | ||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | ret = -ESRCH; | ||
181 | |||
182 | /* get a handle on the mm_struct */ | ||
183 | read_lock(&tasklist_lock); | ||
184 | tsk = find_task_by_pid(pid); | ||
185 | if (tsk) { | ||
186 | ret = -EINVAL; | ||
187 | |||
188 | task_lock(tsk); | ||
189 | if (tsk->mm) { | ||
190 | mm = tsk->mm; | ||
191 | atomic_inc(&mm->mm_users); | ||
192 | ret = 0; | ||
193 | } | ||
194 | task_unlock(tsk); | ||
195 | } | ||
196 | read_unlock(&tasklist_lock); | ||
197 | |||
198 | if (ret < 0) | ||
199 | return ret; | ||
200 | |||
201 | /* make sure it has a CXN and pin it */ | ||
202 | spin_lock(&cxn_owners_lock); | ||
203 | cxn_pinned = get_cxn(&mm->context); | ||
204 | spin_unlock(&cxn_owners_lock); | ||
205 | |||
206 | mmput(mm); | ||
207 | return 0; | ||
208 | } /* end cxn_pin_by_pid() */ | ||