aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJack Steiner <steiner@sgi.com>2009-04-02 19:59:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-02 22:05:06 -0400
commitecdaf2b55251f718a1fbaf4a3f72bfd6e25c582c (patch)
tree2c85778f12e1423d8cc05ce6be6da6e8d2c9a475 /drivers
parent874194123718e625aa96632bac457d686ba1378e (diff)
sgi-gru: restructure the GRU vtop functions
Restructure the GRU vtop functions in preparation for future changes. This patch simply moves code around & does not change the algorithm. Signed-off-by: Jack Steiner <steiner@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/misc/sgi-gru/grufault.c68
1 files changed, 44 insertions, 24 deletions
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 8ae426edc854..bf6e9f7bed54 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -267,6 +267,44 @@ err:
267 return 1; 267 return 1;
268} 268}
269 269
270static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
271 int write, int atomic, unsigned long *gpa, int *pageshift)
272{
273 struct mm_struct *mm = gts->ts_mm;
274 struct vm_area_struct *vma;
275 unsigned long paddr;
276 int ret, ps;
277
278 vma = find_vma(mm, vaddr);
279 if (!vma)
280 goto inval;
281
282 /*
283 * Atomic lookup is faster & usually works even if called in non-atomic
284 * context.
285 */
286 rmb(); /* Must/check ms_range_active before loading PTEs */
287 ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
288 if (ret) {
289 if (atomic)
290 goto upm;
291 if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
292 goto inval;
293 }
294 if (is_gru_paddr(paddr))
295 goto inval;
296 paddr = paddr & ~((1UL << ps) - 1);
297 *gpa = uv_soc_phys_ram_to_gpa(paddr);
298 *pageshift = ps;
299 return 0;
300
301inval:
302 return -1;
303upm:
304 return -2;
305}
306
307
270/* 308/*
271 * Drop a TLB entry into the GRU. The fault is described by info in an TFH. 309 * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
272 * Input: 310 * Input:
@@ -281,10 +319,8 @@ static int gru_try_dropin(struct gru_thread_state *gts,
281 struct gru_tlb_fault_handle *tfh, 319 struct gru_tlb_fault_handle *tfh,
282 unsigned long __user *cb) 320 unsigned long __user *cb)
283{ 321{
284 struct mm_struct *mm = gts->ts_mm; 322 int pageshift = 0, asid, write, ret, atomic = !cb;
285 struct vm_area_struct *vma; 323 unsigned long gpa = 0, vaddr = 0;
286 int pageshift, asid, write, ret;
287 unsigned long paddr, gpa, vaddr;
288 324
289 /* 325 /*
290 * NOTE: The GRU contains magic hardware that eliminates races between 326 * NOTE: The GRU contains magic hardware that eliminates races between
@@ -318,28 +354,12 @@ static int gru_try_dropin(struct gru_thread_state *gts,
318 if (atomic_read(&gts->ts_gms->ms_range_active)) 354 if (atomic_read(&gts->ts_gms->ms_range_active))
319 goto failactive; 355 goto failactive;
320 356
321 vma = find_vma(mm, vaddr); 357 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
322 if (!vma) 358 if (ret == -1)
323 goto failinval;
324
325 /*
326 * Atomic lookup is faster & usually works even if called in non-atomic
327 * context.
328 */
329 rmb(); /* Must/check ms_range_active before loading PTEs */
330 ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &pageshift);
331 if (ret) {
332 if (!cb)
333 goto failupm;
334 if (non_atomic_pte_lookup(vma, vaddr, write, &paddr,
335 &pageshift))
336 goto failinval;
337 }
338 if (is_gru_paddr(paddr))
339 goto failinval; 359 goto failinval;
360 if (ret == -2)
361 goto failupm;
340 362
341 paddr = paddr & ~((1UL << pageshift) - 1);
342 gpa = uv_soc_phys_ram_to_gpa(paddr);
343 gru_cb_set_istatus_active(cb); 363 gru_cb_set_istatus_active(cb);
344 tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write, 364 tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
345 GRU_PAGESIZE(pageshift)); 365 GRU_PAGESIZE(pageshift));