diff options
Diffstat (limited to 'drivers/misc/sgi-gru/grufault.c')
-rw-r--r-- | drivers/misc/sgi-gru/grufault.c | 118 |
1 files changed, 97 insertions, 21 deletions
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index ab118558552e..679e01778286 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c | |||
@@ -166,7 +166,8 @@ static inline struct gru_state *irq_to_gru(int irq) | |||
166 | * the GRU, atomic operations must be used to clear bits. | 166 | * the GRU, atomic operations must be used to clear bits. |
167 | */ | 167 | */ |
168 | static void get_clear_fault_map(struct gru_state *gru, | 168 | static void get_clear_fault_map(struct gru_state *gru, |
169 | struct gru_tlb_fault_map *map) | 169 | struct gru_tlb_fault_map *imap, |
170 | struct gru_tlb_fault_map *dmap) | ||
170 | { | 171 | { |
171 | unsigned long i, k; | 172 | unsigned long i, k; |
172 | struct gru_tlb_fault_map *tfm; | 173 | struct gru_tlb_fault_map *tfm; |
@@ -177,7 +178,11 @@ static void get_clear_fault_map(struct gru_state *gru, | |||
177 | k = tfm->fault_bits[i]; | 178 | k = tfm->fault_bits[i]; |
178 | if (k) | 179 | if (k) |
179 | k = xchg(&tfm->fault_bits[i], 0UL); | 180 | k = xchg(&tfm->fault_bits[i], 0UL); |
180 | map->fault_bits[i] = k; | 181 | imap->fault_bits[i] = k; |
182 | k = tfm->done_bits[i]; | ||
183 | if (k) | ||
184 | k = xchg(&tfm->done_bits[i], 0UL); | ||
185 | dmap->fault_bits[i] = k; | ||
181 | } | 186 | } |
182 | 187 | ||
183 | /* | 188 | /* |
@@ -334,6 +339,12 @@ static int gru_try_dropin(struct gru_thread_state *gts, | |||
334 | * Might be a hardware race OR a stupid user. Ignore FMM because FMM | 339 | * Might be a hardware race OR a stupid user. Ignore FMM because FMM |
335 | * is a transient state. | 340 | * is a transient state. |
336 | */ | 341 | */ |
342 | if (tfh->status != TFHSTATUS_EXCEPTION) { | ||
343 | gru_flush_cache(tfh); | ||
344 | if (tfh->status != TFHSTATUS_EXCEPTION) | ||
345 | goto failnoexception; | ||
346 | STAT(tfh_stale_on_fault); | ||
347 | } | ||
337 | if (tfh->state == TFHSTATE_IDLE) | 348 | if (tfh->state == TFHSTATE_IDLE) |
338 | goto failidle; | 349 | goto failidle; |
339 | if (tfh->state == TFHSTATE_MISS_FMM && cb) | 350 | if (tfh->state == TFHSTATE_MISS_FMM && cb) |
@@ -401,8 +412,17 @@ failfmm: | |||
401 | gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state); | 412 | gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state); |
402 | return 0; | 413 | return 0; |
403 | 414 | ||
415 | failnoexception: | ||
416 | /* TFH status did not show exception pending */ | ||
417 | gru_flush_cache(tfh); | ||
418 | if (cb) | ||
419 | gru_flush_cache(cb); | ||
420 | STAT(tlb_dropin_fail_no_exception); | ||
421 | gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n", tfh, tfh->status, tfh->state); | ||
422 | return 0; | ||
423 | |||
404 | failidle: | 424 | failidle: |
405 | /* TFH was idle - no miss pending */ | 425 | /* TFH state was idle - no miss pending */ |
406 | gru_flush_cache(tfh); | 426 | gru_flush_cache(tfh); |
407 | if (cb) | 427 | if (cb) |
408 | gru_flush_cache(cb); | 428 | gru_flush_cache(cb); |
@@ -438,7 +458,7 @@ failactive: | |||
438 | irqreturn_t gru_intr(int irq, void *dev_id) | 458 | irqreturn_t gru_intr(int irq, void *dev_id) |
439 | { | 459 | { |
440 | struct gru_state *gru; | 460 | struct gru_state *gru; |
441 | struct gru_tlb_fault_map map; | 461 | struct gru_tlb_fault_map imap, dmap; |
442 | struct gru_thread_state *gts; | 462 | struct gru_thread_state *gts; |
443 | struct gru_tlb_fault_handle *tfh = NULL; | 463 | struct gru_tlb_fault_handle *tfh = NULL; |
444 | int cbrnum, ctxnum; | 464 | int cbrnum, ctxnum; |
@@ -451,11 +471,15 @@ irqreturn_t gru_intr(int irq, void *dev_id) | |||
451 | raw_smp_processor_id(), irq); | 471 | raw_smp_processor_id(), irq); |
452 | return IRQ_NONE; | 472 | return IRQ_NONE; |
453 | } | 473 | } |
454 | get_clear_fault_map(gru, &map); | 474 | get_clear_fault_map(gru, &imap, &dmap); |
455 | gru_dbg(grudev, "irq %d, gru %x, map 0x%lx\n", irq, gru->gs_gid, | 475 | |
456 | map.fault_bits[0]); | 476 | for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) { |
477 | complete(gru->gs_blade->bs_async_wq); | ||
478 | gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n", | ||
479 | gru->gs_gid, cbrnum, gru->gs_blade->bs_async_wq->done); | ||
480 | } | ||
457 | 481 | ||
458 | for_each_cbr_in_tfm(cbrnum, map.fault_bits) { | 482 | for_each_cbr_in_tfm(cbrnum, imap.fault_bits) { |
459 | tfh = get_tfh_by_index(gru, cbrnum); | 483 | tfh = get_tfh_by_index(gru, cbrnum); |
460 | prefetchw(tfh); /* Helps on hdw, required for emulator */ | 484 | prefetchw(tfh); /* Helps on hdw, required for emulator */ |
461 | 485 | ||
@@ -472,7 +496,9 @@ irqreturn_t gru_intr(int irq, void *dev_id) | |||
472 | * This is running in interrupt context. Trylock the mmap_sem. | 496 | * This is running in interrupt context. Trylock the mmap_sem. |
473 | * If it fails, retry the fault in user context. | 497 | * If it fails, retry the fault in user context. |
474 | */ | 498 | */ |
475 | if (down_read_trylock(>s->ts_mm->mmap_sem)) { | 499 | if (!gts->ts_force_cch_reload && |
500 | down_read_trylock(>s->ts_mm->mmap_sem)) { | ||
501 | gts->ustats.fmm_tlbdropin++; | ||
476 | gru_try_dropin(gts, tfh, NULL); | 502 | gru_try_dropin(gts, tfh, NULL); |
477 | up_read(>s->ts_mm->mmap_sem); | 503 | up_read(>s->ts_mm->mmap_sem); |
478 | } else { | 504 | } else { |
@@ -491,6 +517,7 @@ static int gru_user_dropin(struct gru_thread_state *gts, | |||
491 | struct gru_mm_struct *gms = gts->ts_gms; | 517 | struct gru_mm_struct *gms = gts->ts_gms; |
492 | int ret; | 518 | int ret; |
493 | 519 | ||
520 | gts->ustats.upm_tlbdropin++; | ||
494 | while (1) { | 521 | while (1) { |
495 | wait_event(gms->ms_wait_queue, | 522 | wait_event(gms->ms_wait_queue, |
496 | atomic_read(&gms->ms_range_active) == 0); | 523 | atomic_read(&gms->ms_range_active) == 0); |
@@ -546,8 +573,8 @@ int gru_handle_user_call_os(unsigned long cb) | |||
546 | * CCH may contain stale data if ts_force_cch_reload is set. | 573 | * CCH may contain stale data if ts_force_cch_reload is set. |
547 | */ | 574 | */ |
548 | if (gts->ts_gru && gts->ts_force_cch_reload) { | 575 | if (gts->ts_gru && gts->ts_force_cch_reload) { |
549 | gru_update_cch(gts, 0); | ||
550 | gts->ts_force_cch_reload = 0; | 576 | gts->ts_force_cch_reload = 0; |
577 | gru_update_cch(gts, 0); | ||
551 | } | 578 | } |
552 | 579 | ||
553 | ret = -EAGAIN; | 580 | ret = -EAGAIN; |
@@ -589,20 +616,26 @@ int gru_get_exception_detail(unsigned long arg) | |||
589 | } else if (gts->ts_gru) { | 616 | } else if (gts->ts_gru) { |
590 | cbrnum = thread_cbr_number(gts, ucbnum); | 617 | cbrnum = thread_cbr_number(gts, ucbnum); |
591 | cbe = get_cbe_by_index(gts->ts_gru, cbrnum); | 618 | cbe = get_cbe_by_index(gts->ts_gru, cbrnum); |
592 | prefetchw(cbe);/* Harmless on hardware, required for emulator */ | 619 | gru_flush_cache(cbe); /* CBE not coherent */ |
593 | excdet.opc = cbe->opccpy; | 620 | excdet.opc = cbe->opccpy; |
594 | excdet.exopc = cbe->exopccpy; | 621 | excdet.exopc = cbe->exopccpy; |
595 | excdet.ecause = cbe->ecause; | 622 | excdet.ecause = cbe->ecause; |
596 | excdet.exceptdet0 = cbe->idef1upd; | 623 | excdet.exceptdet0 = cbe->idef1upd; |
597 | excdet.exceptdet1 = cbe->idef3upd; | 624 | excdet.exceptdet1 = cbe->idef3upd; |
625 | excdet.cbrstate = cbe->cbrstate; | ||
626 | excdet.cbrexecstatus = cbe->cbrexecstatus; | ||
627 | gru_flush_cache(cbe); | ||
598 | ret = 0; | 628 | ret = 0; |
599 | } else { | 629 | } else { |
600 | ret = -EAGAIN; | 630 | ret = -EAGAIN; |
601 | } | 631 | } |
602 | gru_unlock_gts(gts); | 632 | gru_unlock_gts(gts); |
603 | 633 | ||
604 | gru_dbg(grudev, "address 0x%lx, ecause 0x%x\n", excdet.cb, | 634 | gru_dbg(grudev, |
605 | excdet.ecause); | 635 | "cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, " |
636 | "exdet0 0x%lx, exdet1 0x%x\n", | ||
637 | excdet.cb, excdet.opc, excdet.exopc, excdet.cbrstate, excdet.cbrexecstatus, | ||
638 | excdet.ecause, excdet.exceptdet0, excdet.exceptdet1); | ||
606 | if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet))) | 639 | if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet))) |
607 | ret = -EFAULT; | 640 | ret = -EFAULT; |
608 | return ret; | 641 | return ret; |
@@ -627,7 +660,7 @@ static int gru_unload_all_contexts(void) | |||
627 | if (gts && mutex_trylock(>s->ts_ctxlock)) { | 660 | if (gts && mutex_trylock(>s->ts_ctxlock)) { |
628 | spin_unlock(&gru->gs_lock); | 661 | spin_unlock(&gru->gs_lock); |
629 | gru_unload_context(gts, 1); | 662 | gru_unload_context(gts, 1); |
630 | gru_unlock_gts(gts); | 663 | mutex_unlock(>s->ts_ctxlock); |
631 | spin_lock(&gru->gs_lock); | 664 | spin_lock(&gru->gs_lock); |
632 | } | 665 | } |
633 | } | 666 | } |
@@ -669,6 +702,7 @@ int gru_user_flush_tlb(unsigned long arg) | |||
669 | { | 702 | { |
670 | struct gru_thread_state *gts; | 703 | struct gru_thread_state *gts; |
671 | struct gru_flush_tlb_req req; | 704 | struct gru_flush_tlb_req req; |
705 | struct gru_mm_struct *gms; | ||
672 | 706 | ||
673 | STAT(user_flush_tlb); | 707 | STAT(user_flush_tlb); |
674 | if (copy_from_user(&req, (void __user *)arg, sizeof(req))) | 708 | if (copy_from_user(&req, (void __user *)arg, sizeof(req))) |
@@ -681,8 +715,34 @@ int gru_user_flush_tlb(unsigned long arg) | |||
681 | if (!gts) | 715 | if (!gts) |
682 | return -EINVAL; | 716 | return -EINVAL; |
683 | 717 | ||
684 | gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.len); | 718 | gms = gts->ts_gms; |
685 | gru_unlock_gts(gts); | 719 | gru_unlock_gts(gts); |
720 | gru_flush_tlb_range(gms, req.vaddr, req.len); | ||
721 | |||
722 | return 0; | ||
723 | } | ||
724 | |||
725 | /* | ||
726 | * Fetch GSEG statisticss | ||
727 | */ | ||
728 | long gru_get_gseg_statistics(unsigned long arg) | ||
729 | { | ||
730 | struct gru_thread_state *gts; | ||
731 | struct gru_get_gseg_statistics_req req; | ||
732 | |||
733 | if (copy_from_user(&req, (void __user *)arg, sizeof(req))) | ||
734 | return -EFAULT; | ||
735 | |||
736 | gts = gru_find_lock_gts(req.gseg); | ||
737 | if (gts) { | ||
738 | memcpy(&req.stats, >s->ustats, sizeof(gts->ustats)); | ||
739 | gru_unlock_gts(gts); | ||
740 | } else { | ||
741 | memset(&req.stats, 0, sizeof(gts->ustats)); | ||
742 | } | ||
743 | |||
744 | if (copy_to_user((void __user *)arg, &req, sizeof(req))) | ||
745 | return -EFAULT; | ||
686 | 746 | ||
687 | return 0; | 747 | return 0; |
688 | } | 748 | } |
@@ -691,18 +751,34 @@ int gru_user_flush_tlb(unsigned long arg) | |||
691 | * Register the current task as the user of the GSEG slice. | 751 | * Register the current task as the user of the GSEG slice. |
692 | * Needed for TLB fault interrupt targeting. | 752 | * Needed for TLB fault interrupt targeting. |
693 | */ | 753 | */ |
694 | int gru_set_task_slice(long address) | 754 | int gru_set_context_option(unsigned long arg) |
695 | { | 755 | { |
696 | struct gru_thread_state *gts; | 756 | struct gru_thread_state *gts; |
757 | struct gru_set_context_option_req req; | ||
758 | int ret = 0; | ||
759 | |||
760 | STAT(set_context_option); | ||
761 | if (copy_from_user(&req, (void __user *)arg, sizeof(req))) | ||
762 | return -EFAULT; | ||
763 | gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1); | ||
697 | 764 | ||
698 | STAT(set_task_slice); | 765 | gts = gru_alloc_locked_gts(req.gseg); |
699 | gru_dbg(grudev, "address 0x%lx\n", address); | ||
700 | gts = gru_alloc_locked_gts(address); | ||
701 | if (!gts) | 766 | if (!gts) |
702 | return -EINVAL; | 767 | return -EINVAL; |
703 | 768 | ||
704 | gts->ts_tgid_owner = current->tgid; | 769 | switch (req.op) { |
770 | case sco_gseg_owner: | ||
771 | /* Register the current task as the GSEG owner */ | ||
772 | gts->ts_tgid_owner = current->tgid; | ||
773 | break; | ||
774 | case sco_cch_req_slice: | ||
775 | /* Set the CCH slice option */ | ||
776 | gts->ts_cch_req_slice = req.val1 & 3; | ||
777 | break; | ||
778 | default: | ||
779 | ret = -EINVAL; | ||
780 | } | ||
705 | gru_unlock_gts(gts); | 781 | gru_unlock_gts(gts); |
706 | 782 | ||
707 | return 0; | 783 | return ret; |
708 | } | 784 | } |