aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeff Dike <jdike@addtoit.com>2006-09-26 02:33:01 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 11:49:06 -0400
commit5e1f65a67d76341795ea527d30bfdca03999d46b (patch)
treedb642b9591fc67b9a32b4e8aaa9ee0ecdc7fd546
parent8f80e9466e18288df7391c9d21532c4125ac9c62 (diff)
[PATCH] uml: Whitespace fixes
arch/um/kernel/tlb.c had some pretty serious whitespace problems. I also fixed some returns. Signed-off-by: Jeff Dike <jdike@addtoit.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/um/kernel/tlb.c367
1 files changed, 183 insertions, 184 deletions
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index cca330edf717..54a5ff25645a 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
@@ -16,12 +16,12 @@
16#include "os.h" 16#include "os.h"
17 17
18static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, 18static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
19 int r, int w, int x, struct host_vm_op *ops, int *index, 19 int r, int w, int x, struct host_vm_op *ops, int *index,
20 int last_filled, union mm_context *mmu, void **flush, 20 int last_filled, union mm_context *mmu, void **flush,
21 int (*do_ops)(union mm_context *, struct host_vm_op *, 21 int (*do_ops)(union mm_context *, struct host_vm_op *,
22 int, int, void **)) 22 int, int, void **))
23{ 23{
24 __u64 offset; 24 __u64 offset;
25 struct host_vm_op *last; 25 struct host_vm_op *last;
26 int fd, ret = 0; 26 int fd, ret = 0;
27 27
@@ -89,7 +89,7 @@ static int add_munmap(unsigned long addr, unsigned long len,
89static int add_mprotect(unsigned long addr, unsigned long len, int r, int w, 89static int add_mprotect(unsigned long addr, unsigned long len, int r, int w,
90 int x, struct host_vm_op *ops, int *index, 90 int x, struct host_vm_op *ops, int *index,
91 int last_filled, union mm_context *mmu, void **flush, 91 int last_filled, union mm_context *mmu, void **flush,
92 int (*do_ops)(union mm_context *, struct host_vm_op *, 92 int (*do_ops)(union mm_context *, struct host_vm_op *,
93 int, int, void **)) 93 int, int, void **))
94{ 94{
95 struct host_vm_op *last; 95 struct host_vm_op *last;
@@ -124,106 +124,105 @@ static int add_mprotect(unsigned long addr, unsigned long len, int r, int w,
124#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1)) 124#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
125 125
126void fix_range_common(struct mm_struct *mm, unsigned long start_addr, 126void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
127 unsigned long end_addr, int force, 127 unsigned long end_addr, int force,
128 int (*do_ops)(union mm_context *, struct host_vm_op *, 128 int (*do_ops)(union mm_context *, struct host_vm_op *,
129 int, int, void **)) 129 int, int, void **))
130{ 130{
131 pgd_t *npgd; 131 pgd_t *npgd;
132 pud_t *npud; 132 pud_t *npud;
133 pmd_t *npmd; 133 pmd_t *npmd;
134 pte_t *npte; 134 pte_t *npte;
135 union mm_context *mmu = &mm->context; 135 union mm_context *mmu = &mm->context;
136 unsigned long addr, end; 136 unsigned long addr, end;
137 int r, w, x; 137 int r, w, x;
138 struct host_vm_op ops[1]; 138 struct host_vm_op ops[1];
139 void *flush = NULL; 139 void *flush = NULL;
140 int op_index = -1, last_op = ARRAY_SIZE(ops) - 1; 140 int op_index = -1, last_op = ARRAY_SIZE(ops) - 1;
141 int ret = 0; 141 int ret = 0;
142 142
143 if(mm == NULL) 143 if(mm == NULL)
144 return; 144 return;
145 145
146 ops[0].type = NONE; 146 ops[0].type = NONE;
147 for(addr = start_addr; addr < end_addr && !ret;){ 147 for(addr = start_addr; addr < end_addr && !ret;){
148 npgd = pgd_offset(mm, addr); 148 npgd = pgd_offset(mm, addr);
149 if(!pgd_present(*npgd)){ 149 if(!pgd_present(*npgd)){
150 end = ADD_ROUND(addr, PGDIR_SIZE); 150 end = ADD_ROUND(addr, PGDIR_SIZE);
151 if(end > end_addr) 151 if(end > end_addr)
152 end = end_addr; 152 end = end_addr;
153 if(force || pgd_newpage(*npgd)){ 153 if(force || pgd_newpage(*npgd)){
154 ret = add_munmap(addr, end - addr, ops, 154 ret = add_munmap(addr, end - addr, ops,
155 &op_index, last_op, mmu, 155 &op_index, last_op, mmu,
156 &flush, do_ops); 156 &flush, do_ops);
157 pgd_mkuptodate(*npgd); 157 pgd_mkuptodate(*npgd);
158 } 158 }
159 addr = end; 159 addr = end;
160 continue; 160 continue;
161 } 161 }
162 162
163 npud = pud_offset(npgd, addr); 163 npud = pud_offset(npgd, addr);
164 if(!pud_present(*npud)){ 164 if(!pud_present(*npud)){
165 end = ADD_ROUND(addr, PUD_SIZE); 165 end = ADD_ROUND(addr, PUD_SIZE);
166 if(end > end_addr) 166 if(end > end_addr)
167 end = end_addr; 167 end = end_addr;
168 if(force || pud_newpage(*npud)){ 168 if(force || pud_newpage(*npud)){
169 ret = add_munmap(addr, end - addr, ops, 169 ret = add_munmap(addr, end - addr, ops,
170 &op_index, last_op, mmu, 170 &op_index, last_op, mmu,
171 &flush, do_ops); 171 &flush, do_ops);
172 pud_mkuptodate(*npud); 172 pud_mkuptodate(*npud);
173 } 173 }
174 addr = end; 174 addr = end;
175 continue; 175 continue;
176 } 176 }
177 177
178 npmd = pmd_offset(npud, addr); 178 npmd = pmd_offset(npud, addr);
179 if(!pmd_present(*npmd)){ 179 if(!pmd_present(*npmd)){
180 end = ADD_ROUND(addr, PMD_SIZE); 180 end = ADD_ROUND(addr, PMD_SIZE);
181 if(end > end_addr) 181 if(end > end_addr)
182 end = end_addr; 182 end = end_addr;
183 if(force || pmd_newpage(*npmd)){ 183 if(force || pmd_newpage(*npmd)){
184 ret = add_munmap(addr, end - addr, ops, 184 ret = add_munmap(addr, end - addr, ops,
185 &op_index, last_op, mmu, 185 &op_index, last_op, mmu,
186 &flush, do_ops); 186 &flush, do_ops);
187 pmd_mkuptodate(*npmd); 187 pmd_mkuptodate(*npmd);
188 } 188 }
189 addr = end; 189 addr = end;
190 continue; 190 continue;
191 } 191 }
192 192
193 npte = pte_offset_kernel(npmd, addr); 193 npte = pte_offset_kernel(npmd, addr);
194 r = pte_read(*npte); 194 r = pte_read(*npte);
195 w = pte_write(*npte); 195 w = pte_write(*npte);
196 x = pte_exec(*npte); 196 x = pte_exec(*npte);
197 if (!pte_young(*npte)) { 197 if (!pte_young(*npte)) {
198 r = 0; 198 r = 0;
199 w = 0; 199 w = 0;
200 } else if (!pte_dirty(*npte)) { 200 } else if (!pte_dirty(*npte)) {
201 w = 0; 201 w = 0;
202 } 202 }
203 if(force || pte_newpage(*npte)){ 203 if(force || pte_newpage(*npte)){
204 if(pte_present(*npte)) 204 if(pte_present(*npte))
205 ret = add_mmap(addr, 205 ret = add_mmap(addr,
206 pte_val(*npte) & PAGE_MASK, 206 pte_val(*npte) & PAGE_MASK,
207 PAGE_SIZE, r, w, x, ops, 207 PAGE_SIZE, r, w, x, ops,
208 &op_index, last_op, mmu, 208 &op_index, last_op, mmu,
209 &flush, do_ops); 209 &flush, do_ops);
210 else ret = add_munmap(addr, PAGE_SIZE, ops, 210 else ret = add_munmap(addr, PAGE_SIZE, ops,
211 &op_index, last_op, mmu, 211 &op_index, last_op, mmu,
212 &flush, do_ops); 212 &flush, do_ops);
213 } 213 }
214 else if(pte_newprot(*npte)) 214 else if(pte_newprot(*npte))
215 ret = add_mprotect(addr, PAGE_SIZE, r, w, x, ops, 215 ret = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
216 &op_index, last_op, mmu, 216 &op_index, last_op, mmu,
217 &flush, do_ops); 217 &flush, do_ops);
218 218
219 *npte = pte_mkuptodate(*npte); 219 *npte = pte_mkuptodate(*npte);
220 addr += PAGE_SIZE; 220 addr += PAGE_SIZE;
221 } 221 }
222
223 if(!ret) 222 if(!ret)
224 ret = (*do_ops)(mmu, ops, op_index, 1, &flush); 223 ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
225 224
226 /* This is not an else because ret is modified above */ 225/* This is not an else because ret is modified above */
227 if(ret) { 226 if(ret) {
228 printk("fix_range_common: failed, killing current process\n"); 227 printk("fix_range_common: failed, killing current process\n");
229 force_sig(SIGKILL, current); 228 force_sig(SIGKILL, current);
@@ -232,160 +231,160 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
232 231
233int flush_tlb_kernel_range_common(unsigned long start, unsigned long end) 232int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
234{ 233{
235 struct mm_struct *mm; 234 struct mm_struct *mm;
236 pgd_t *pgd; 235 pgd_t *pgd;
237 pud_t *pud; 236 pud_t *pud;
238 pmd_t *pmd; 237 pmd_t *pmd;
239 pte_t *pte; 238 pte_t *pte;
240 unsigned long addr, last; 239 unsigned long addr, last;
241 int updated = 0, err; 240 int updated = 0, err;
242 241
243 mm = &init_mm; 242 mm = &init_mm;
244 for(addr = start; addr < end;){ 243 for(addr = start; addr < end;){
245 pgd = pgd_offset(mm, addr); 244 pgd = pgd_offset(mm, addr);
246 if(!pgd_present(*pgd)){ 245 if(!pgd_present(*pgd)){
247 last = ADD_ROUND(addr, PGDIR_SIZE); 246 last = ADD_ROUND(addr, PGDIR_SIZE);
248 if(last > end) 247 if(last > end)
249 last = end; 248 last = end;
250 if(pgd_newpage(*pgd)){ 249 if(pgd_newpage(*pgd)){
251 updated = 1; 250 updated = 1;
252 err = os_unmap_memory((void *) addr, 251 err = os_unmap_memory((void *) addr,
253 last - addr); 252 last - addr);
254 if(err < 0) 253 if(err < 0)
255 panic("munmap failed, errno = %d\n", 254 panic("munmap failed, errno = %d\n",
256 -err); 255 -err);
257 } 256 }
258 addr = last; 257 addr = last;
259 continue; 258 continue;
260 } 259 }
261 260
262 pud = pud_offset(pgd, addr); 261 pud = pud_offset(pgd, addr);
263 if(!pud_present(*pud)){ 262 if(!pud_present(*pud)){
264 last = ADD_ROUND(addr, PUD_SIZE); 263 last = ADD_ROUND(addr, PUD_SIZE);
265 if(last > end) 264 if(last > end)
266 last = end; 265 last = end;
267 if(pud_newpage(*pud)){ 266 if(pud_newpage(*pud)){
268 updated = 1; 267 updated = 1;
269 err = os_unmap_memory((void *) addr, 268 err = os_unmap_memory((void *) addr,
270 last - addr); 269 last - addr);
271 if(err < 0) 270 if(err < 0)
272 panic("munmap failed, errno = %d\n", 271 panic("munmap failed, errno = %d\n",
273 -err); 272 -err);
274 } 273 }
275 addr = last; 274 addr = last;
276 continue; 275 continue;
277 } 276 }
278 277
279 pmd = pmd_offset(pud, addr); 278 pmd = pmd_offset(pud, addr);
280 if(!pmd_present(*pmd)){ 279 if(!pmd_present(*pmd)){
281 last = ADD_ROUND(addr, PMD_SIZE); 280 last = ADD_ROUND(addr, PMD_SIZE);
282 if(last > end) 281 if(last > end)
283 last = end; 282 last = end;
284 if(pmd_newpage(*pmd)){ 283 if(pmd_newpage(*pmd)){
285 updated = 1; 284 updated = 1;
286 err = os_unmap_memory((void *) addr, 285 err = os_unmap_memory((void *) addr,
287 last - addr); 286 last - addr);
288 if(err < 0) 287 if(err < 0)
289 panic("munmap failed, errno = %d\n", 288 panic("munmap failed, errno = %d\n",
290 -err); 289 -err);
291 } 290 }
292 addr = last; 291 addr = last;
293 continue; 292 continue;
294 } 293 }
295 294
296 pte = pte_offset_kernel(pmd, addr); 295 pte = pte_offset_kernel(pmd, addr);
297 if(!pte_present(*pte) || pte_newpage(*pte)){ 296 if(!pte_present(*pte) || pte_newpage(*pte)){
298 updated = 1; 297 updated = 1;
299 err = os_unmap_memory((void *) addr, 298 err = os_unmap_memory((void *) addr,
300 PAGE_SIZE); 299 PAGE_SIZE);
301 if(err < 0) 300 if(err < 0)
302 panic("munmap failed, errno = %d\n", 301 panic("munmap failed, errno = %d\n",
303 -err); 302 -err);
304 if(pte_present(*pte)) 303 if(pte_present(*pte))
305 map_memory(addr, 304 map_memory(addr,
306 pte_val(*pte) & PAGE_MASK, 305 pte_val(*pte) & PAGE_MASK,
307 PAGE_SIZE, 1, 1, 1); 306 PAGE_SIZE, 1, 1, 1);
308 } 307 }
309 else if(pte_newprot(*pte)){ 308 else if(pte_newprot(*pte)){
310 updated = 1; 309 updated = 1;
311 os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1); 310 os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
312 } 311 }
313 addr += PAGE_SIZE; 312 addr += PAGE_SIZE;
314 } 313 }
315 return(updated); 314 return(updated);
316} 315}
317 316
318pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address) 317pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
319{ 318{
320 return(pgd_offset(mm, address)); 319 return(pgd_offset(mm, address));
321} 320}
322 321
323pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address) 322pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
324{ 323{
325 return(pud_offset(pgd, address)); 324 return(pud_offset(pgd, address));
326} 325}
327 326
328pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address) 327pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
329{ 328{
330 return(pmd_offset(pud, address)); 329 return(pmd_offset(pud, address));
331} 330}
332 331
333pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address) 332pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
334{ 333{
335 return(pte_offset_kernel(pmd, address)); 334 return(pte_offset_kernel(pmd, address));
336} 335}
337 336
338pte_t *addr_pte(struct task_struct *task, unsigned long addr) 337pte_t *addr_pte(struct task_struct *task, unsigned long addr)
339{ 338{
340 pgd_t *pgd = pgd_offset(task->mm, addr); 339 pgd_t *pgd = pgd_offset(task->mm, addr);
341 pud_t *pud = pud_offset(pgd, addr); 340 pud_t *pud = pud_offset(pgd, addr);
342 pmd_t *pmd = pmd_offset(pud, addr); 341 pmd_t *pmd = pmd_offset(pud, addr);
343 342
344 return(pte_offset_map(pmd, addr)); 343 return(pte_offset_map(pmd, addr));
345} 344}
346 345
347void flush_tlb_page(struct vm_area_struct *vma, unsigned long address) 346void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
348{ 347{
349 address &= PAGE_MASK; 348 address &= PAGE_MASK;
350 flush_tlb_range(vma, address, address + PAGE_SIZE); 349 flush_tlb_range(vma, address, address + PAGE_SIZE);
351} 350}
352 351
353void flush_tlb_all(void) 352void flush_tlb_all(void)
354{ 353{
355 flush_tlb_mm(current->mm); 354 flush_tlb_mm(current->mm);
356} 355}
357 356
358void flush_tlb_kernel_range(unsigned long start, unsigned long end) 357void flush_tlb_kernel_range(unsigned long start, unsigned long end)
359{ 358{
360 CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt, 359 CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt,
361 flush_tlb_kernel_range_common, start, end); 360 flush_tlb_kernel_range_common, start, end);
362} 361}
363 362
364void flush_tlb_kernel_vm(void) 363void flush_tlb_kernel_vm(void)
365{ 364{
366 CHOOSE_MODE(flush_tlb_kernel_vm_tt(), 365 CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
367 flush_tlb_kernel_range_common(start_vm, end_vm)); 366 flush_tlb_kernel_range_common(start_vm, end_vm));
368} 367}
369 368
370void __flush_tlb_one(unsigned long addr) 369void __flush_tlb_one(unsigned long addr)
371{ 370{
372 CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr); 371 CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr);
373} 372}
374 373
375void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 374void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
376 unsigned long end) 375 unsigned long end)
377{ 376{
378 CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start, 377 CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start,
379 end); 378 end);
380} 379}
381 380
382void flush_tlb_mm(struct mm_struct *mm) 381void flush_tlb_mm(struct mm_struct *mm)
383{ 382{
384 CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm); 383 CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm);
385} 384}
386 385
387void force_flush_all(void) 386void force_flush_all(void)
388{ 387{
389 CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas()); 388 CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());
390} 389}
391 390