aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c23
1 files changed, 19 insertions, 4 deletions
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index ccd57d1b5bf8..525e966dce34 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -40,6 +40,13 @@
40static DEFINE_SPINLOCK(npu_context_lock); 40static DEFINE_SPINLOCK(npu_context_lock);
41 41
42/* 42/*
43 * When an address shootdown range exceeds this threshold we invalidate the
44 * entire TLB on the GPU for the given PID rather than each specific address in
45 * the range.
46 */
47#define ATSD_THRESHOLD (2*1024*1024)
48
49/*
43 * Other types of TCE cache invalidation are not functional in the 50 * Other types of TCE cache invalidation are not functional in the
44 * hardware. 51 * hardware.
45 */ 52 */
@@ -677,11 +684,19 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
677 struct npu_context *npu_context = mn_to_npu_context(mn); 684 struct npu_context *npu_context = mn_to_npu_context(mn);
678 unsigned long address; 685 unsigned long address;
679 686
680 for (address = start; address < end; address += PAGE_SIZE) 687 if (end - start > ATSD_THRESHOLD) {
681 mmio_invalidate(npu_context, 1, address, false); 688 /*
689 * Just invalidate the entire PID if the address range is too
690 * large.
691 */
692 mmio_invalidate(npu_context, 0, 0, true);
693 } else {
694 for (address = start; address < end; address += PAGE_SIZE)
695 mmio_invalidate(npu_context, 1, address, false);
682 696
683 /* Do the flush only on the final addess == end */ 697 /* Do the flush only on the final addess == end */
684 mmio_invalidate(npu_context, 1, address, true); 698 mmio_invalidate(npu_context, 1, address, true);
699 }
685} 700}
686 701
687static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { 702static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {