aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/sgi-gru/grufault.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/sgi-gru/grufault.c')
-rw-r--r--drivers/misc/sgi-gru/grufault.c130
1 files changed, 101 insertions, 29 deletions
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 3ee698ad8599..ab118558552e 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -32,6 +32,7 @@
32#include <linux/device.h> 32#include <linux/device.h>
33#include <linux/io.h> 33#include <linux/io.h>
34#include <linux/uaccess.h> 34#include <linux/uaccess.h>
35#include <linux/security.h>
35#include <asm/pgtable.h> 36#include <asm/pgtable.h>
36#include "gru.h" 37#include "gru.h"
37#include "grutables.h" 38#include "grutables.h"
@@ -266,6 +267,44 @@ err:
266 return 1; 267 return 1;
267} 268}
268 269
270static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
271 int write, int atomic, unsigned long *gpa, int *pageshift)
272{
273 struct mm_struct *mm = gts->ts_mm;
274 struct vm_area_struct *vma;
275 unsigned long paddr;
276 int ret, ps;
277
278 vma = find_vma(mm, vaddr);
279 if (!vma)
280 goto inval;
281
282 /*
283 * Atomic lookup is faster & usually works even if called in non-atomic
284 * context.
285 */
286 rmb(); /* Must/check ms_range_active before loading PTEs */
287 ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
288 if (ret) {
289 if (atomic)
290 goto upm;
291 if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
292 goto inval;
293 }
294 if (is_gru_paddr(paddr))
295 goto inval;
296 paddr = paddr & ~((1UL << ps) - 1);
297 *gpa = uv_soc_phys_ram_to_gpa(paddr);
298 *pageshift = ps;
299 return 0;
300
301inval:
302 return -1;
303upm:
304 return -2;
305}
306
307
269/* 308/*
270 * Drop a TLB entry into the GRU. The fault is described by info in an TFH. 309 * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
271 * Input: 310 * Input:
@@ -280,10 +319,8 @@ static int gru_try_dropin(struct gru_thread_state *gts,
280 struct gru_tlb_fault_handle *tfh, 319 struct gru_tlb_fault_handle *tfh,
281 unsigned long __user *cb) 320 unsigned long __user *cb)
282{ 321{
283 struct mm_struct *mm = gts->ts_mm; 322 int pageshift = 0, asid, write, ret, atomic = !cb;
284 struct vm_area_struct *vma; 323 unsigned long gpa = 0, vaddr = 0;
285 int pageshift, asid, write, ret;
286 unsigned long paddr, gpa, vaddr;
287 324
288 /* 325 /*
289 * NOTE: The GRU contains magic hardware that eliminates races between 326 * NOTE: The GRU contains magic hardware that eliminates races between
@@ -317,28 +354,19 @@ static int gru_try_dropin(struct gru_thread_state *gts,
317 if (atomic_read(&gts->ts_gms->ms_range_active)) 354 if (atomic_read(&gts->ts_gms->ms_range_active))
318 goto failactive; 355 goto failactive;
319 356
320 vma = find_vma(mm, vaddr); 357 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
321 if (!vma) 358 if (ret == -1)
322 goto failinval; 359 goto failinval;
360 if (ret == -2)
361 goto failupm;
323 362
324 /* 363 if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
325 * Atomic lookup is faster & usually works even if called in non-atomic 364 gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
326 * context. 365 if (atomic || !gru_update_cch(gts, 0)) {
327 */ 366 gts->ts_force_cch_reload = 1;
328 rmb(); /* Must/check ms_range_active before loading PTEs */
329 ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &pageshift);
330 if (ret) {
331 if (!cb)
332 goto failupm; 367 goto failupm;
333 if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, 368 }
334 &pageshift))
335 goto failinval;
336 } 369 }
337 if (is_gru_paddr(paddr))
338 goto failinval;
339
340 paddr = paddr & ~((1UL << pageshift) - 1);
341 gpa = uv_soc_phys_ram_to_gpa(paddr);
342 gru_cb_set_istatus_active(cb); 370 gru_cb_set_istatus_active(cb);
343 tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write, 371 tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
344 GRU_PAGESIZE(pageshift)); 372 GRU_PAGESIZE(pageshift));
@@ -368,6 +396,7 @@ failupm:
368 396
369failfmm: 397failfmm:
370 /* FMM state on UPM call */ 398 /* FMM state on UPM call */
399 gru_flush_cache(tfh);
371 STAT(tlb_dropin_fail_fmm); 400 STAT(tlb_dropin_fail_fmm);
372 gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state); 401 gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
373 return 0; 402 return 0;
@@ -448,6 +477,7 @@ irqreturn_t gru_intr(int irq, void *dev_id)
448 up_read(&gts->ts_mm->mmap_sem); 477 up_read(&gts->ts_mm->mmap_sem);
449 } else { 478 } else {
450 tfh_user_polling_mode(tfh); 479 tfh_user_polling_mode(tfh);
480 STAT(intr_mm_lock_failed);
451 } 481 }
452 } 482 }
453 return IRQ_HANDLED; 483 return IRQ_HANDLED;
@@ -497,10 +527,8 @@ int gru_handle_user_call_os(unsigned long cb)
497 if (!gts) 527 if (!gts)
498 return -EINVAL; 528 return -EINVAL;
499 529
500 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) { 530 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
501 ret = -EINVAL;
502 goto exit; 531 goto exit;
503 }
504 532
505 /* 533 /*
506 * If force_unload is set, the UPM TLB fault is phony. The task 534 * If force_unload is set, the UPM TLB fault is phony. The task
@@ -508,6 +536,20 @@ int gru_handle_user_call_os(unsigned long cb)
508 * unload the context. The task will page fault and assign a new 536 * unload the context. The task will page fault and assign a new
509 * context. 537 * context.
510 */ 538 */
539 if (gts->ts_tgid_owner == current->tgid && gts->ts_blade >= 0 &&
540 gts->ts_blade != uv_numa_blade_id()) {
541 STAT(call_os_offnode_reference);
542 gts->ts_force_unload = 1;
543 }
544
545 /*
546 * CCH may contain stale data if ts_force_cch_reload is set.
547 */
548 if (gts->ts_gru && gts->ts_force_cch_reload) {
549 gru_update_cch(gts, 0);
550 gts->ts_force_cch_reload = 0;
551 }
552
511 ret = -EAGAIN; 553 ret = -EAGAIN;
512 cbrnum = thread_cbr_number(gts, ucbnum); 554 cbrnum = thread_cbr_number(gts, ucbnum);
513 if (gts->ts_force_unload) { 555 if (gts->ts_force_unload) {
@@ -541,11 +583,13 @@ int gru_get_exception_detail(unsigned long arg)
541 if (!gts) 583 if (!gts)
542 return -EINVAL; 584 return -EINVAL;
543 585
544 if (gts->ts_gru) { 586 ucbnum = get_cb_number((void *)excdet.cb);
545 ucbnum = get_cb_number((void *)excdet.cb); 587 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
588 ret = -EINVAL;
589 } else if (gts->ts_gru) {
546 cbrnum = thread_cbr_number(gts, ucbnum); 590 cbrnum = thread_cbr_number(gts, ucbnum);
547 cbe = get_cbe_by_index(gts->ts_gru, cbrnum); 591 cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
548 prefetchw(cbe); /* Harmless on hardware, required for emulator */ 592 prefetchw(cbe);/* Harmless on hardware, required for emulator */
549 excdet.opc = cbe->opccpy; 593 excdet.opc = cbe->opccpy;
550 excdet.exopc = cbe->exopccpy; 594 excdet.exopc = cbe->exopccpy;
551 excdet.ecause = cbe->ecause; 595 excdet.ecause = cbe->ecause;
@@ -567,6 +611,31 @@ int gru_get_exception_detail(unsigned long arg)
567/* 611/*
568 * User request to unload a context. Content is saved for possible reload. 612 * User request to unload a context. Content is saved for possible reload.
569 */ 613 */
614static int gru_unload_all_contexts(void)
615{
616 struct gru_thread_state *gts;
617 struct gru_state *gru;
618 int gid, ctxnum;
619
620 if (!capable(CAP_SYS_ADMIN))
621 return -EPERM;
622 foreach_gid(gid) {
623 gru = GID_TO_GRU(gid);
624 spin_lock(&gru->gs_lock);
625 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
626 gts = gru->gs_gts[ctxnum];
627 if (gts && mutex_trylock(&gts->ts_ctxlock)) {
628 spin_unlock(&gru->gs_lock);
629 gru_unload_context(gts, 1);
630 gru_unlock_gts(gts);
631 spin_lock(&gru->gs_lock);
632 }
633 }
634 spin_unlock(&gru->gs_lock);
635 }
636 return 0;
637}
638
570int gru_user_unload_context(unsigned long arg) 639int gru_user_unload_context(unsigned long arg)
571{ 640{
572 struct gru_thread_state *gts; 641 struct gru_thread_state *gts;
@@ -578,6 +647,9 @@ int gru_user_unload_context(unsigned long arg)
578 647
579 gru_dbg(grudev, "gseg 0x%lx\n", req.gseg); 648 gru_dbg(grudev, "gseg 0x%lx\n", req.gseg);
580 649
650 if (!req.gseg)
651 return gru_unload_all_contexts();
652
581 gts = gru_find_lock_gts(req.gseg); 653 gts = gru_find_lock_gts(req.gseg);
582 if (!gts) 654 if (!gts)
583 return -EINVAL; 655 return -EINVAL;
@@ -609,7 +681,7 @@ int gru_user_flush_tlb(unsigned long arg)
609 if (!gts) 681 if (!gts)
610 return -EINVAL; 682 return -EINVAL;
611 683
612 gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.vaddr + req.len); 684 gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.len);
613 gru_unlock_gts(gts); 685 gru_unlock_gts(gts);
614 686
615 return 0; 687 return 0;