diff options
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gk20a.h | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 26 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.h | 8 |
3 files changed, 34 insertions, 4 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h index c04c97ca..92bcb618 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gk20a.h | |||
@@ -137,6 +137,7 @@ enum gk20a_cbc_op { | |||
137 | 137 | ||
138 | enum nvgpu_unit; | 138 | enum nvgpu_unit; |
139 | 139 | ||
140 | enum nvgpu_flush_op; | ||
140 | /* | 141 | /* |
141 | * gpu_ops should only contain function pointers! Non-function pointer members | 142 | * gpu_ops should only contain function pointers! Non-function pointer members |
142 | * should go in struct gk20a or be implemented with the boolean flag API defined | 143 | * should go in struct gk20a or be implemented with the boolean flag API defined |
@@ -569,6 +570,7 @@ struct gpu_ops { | |||
569 | struct tsg_gk20a *tsg); | 570 | struct tsg_gk20a *tsg); |
570 | void (*deinit_eng_method_buffers)(struct gk20a *g, | 571 | void (*deinit_eng_method_buffers)(struct gk20a *g, |
571 | struct tsg_gk20a *tsg); | 572 | struct tsg_gk20a *tsg); |
573 | u32 (*get_preempt_timeout)(struct gk20a *g); | ||
572 | #ifdef CONFIG_TEGRA_GK20A_NVHOST | 574 | #ifdef CONFIG_TEGRA_GK20A_NVHOST |
573 | int (*alloc_syncpt_buf)(struct channel_gk20a *c, | 575 | int (*alloc_syncpt_buf)(struct channel_gk20a *c, |
574 | u32 syncpt_id, struct nvgpu_mem *syncpt_buf); | 576 | u32 syncpt_id, struct nvgpu_mem *syncpt_buf); |
@@ -760,6 +762,8 @@ struct gpu_ops { | |||
760 | void (*fault_info_mem_destroy)(struct gk20a *g); | 762 | void (*fault_info_mem_destroy)(struct gk20a *g); |
761 | u32 (*get_kind_invalid)(void); | 763 | u32 (*get_kind_invalid)(void); |
762 | u32 (*get_kind_pitch)(void); | 764 | u32 (*get_kind_pitch)(void); |
765 | u32 (*get_flush_retries)(struct gk20a *g, | ||
766 | enum nvgpu_flush_op op); | ||
763 | } mm; | 767 | } mm; |
764 | /* | 768 | /* |
765 | * This function is called to allocate secure memory (memory | 769 | * This function is called to allocate secure memory (memory |
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 67ab307f..d96fa4e1 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c | |||
@@ -790,6 +790,7 @@ int gk20a_mm_fb_flush(struct gk20a *g) | |||
790 | u32 data; | 790 | u32 data; |
791 | int ret = 0; | 791 | int ret = 0; |
792 | struct nvgpu_timeout timeout; | 792 | struct nvgpu_timeout timeout; |
793 | u32 retries; | ||
793 | 794 | ||
794 | gk20a_dbg_fn(""); | 795 | gk20a_dbg_fn(""); |
795 | 796 | ||
@@ -799,7 +800,12 @@ int gk20a_mm_fb_flush(struct gk20a *g) | |||
799 | return 0; | 800 | return 0; |
800 | } | 801 | } |
801 | 802 | ||
802 | nvgpu_timeout_init(g, &timeout, 100, NVGPU_TIMER_RETRY_TIMER); | 803 | retries = 100; |
804 | |||
805 | if (g->ops.mm.get_flush_retries) | ||
806 | retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_FB); | ||
807 | |||
808 | nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER); | ||
803 | 809 | ||
804 | nvgpu_mutex_acquire(&mm->l2_op_lock); | 810 | nvgpu_mutex_acquire(&mm->l2_op_lock); |
805 | 811 | ||
@@ -844,10 +850,14 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g) | |||
844 | { | 850 | { |
845 | u32 data; | 851 | u32 data; |
846 | struct nvgpu_timeout timeout; | 852 | struct nvgpu_timeout timeout; |
853 | u32 retries = 200; | ||
847 | 854 | ||
848 | trace_gk20a_mm_l2_invalidate(g->name); | 855 | trace_gk20a_mm_l2_invalidate(g->name); |
849 | 856 | ||
850 | nvgpu_timeout_init(g, &timeout, 200, NVGPU_TIMER_RETRY_TIMER); | 857 | if (g->ops.mm.get_flush_retries) |
858 | retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_L2_INV); | ||
859 | |||
860 | nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER); | ||
851 | 861 | ||
852 | /* Invalidate any clean lines from the L2 so subsequent reads go to | 862 | /* Invalidate any clean lines from the L2 so subsequent reads go to |
853 | DRAM. Dirty lines are not affected by this operation. */ | 863 | DRAM. Dirty lines are not affected by this operation. */ |
@@ -891,6 +901,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate) | |||
891 | struct mm_gk20a *mm = &g->mm; | 901 | struct mm_gk20a *mm = &g->mm; |
892 | u32 data; | 902 | u32 data; |
893 | struct nvgpu_timeout timeout; | 903 | struct nvgpu_timeout timeout; |
904 | u32 retries = 2000; | ||
894 | 905 | ||
895 | gk20a_dbg_fn(""); | 906 | gk20a_dbg_fn(""); |
896 | 907 | ||
@@ -898,7 +909,10 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate) | |||
898 | if (!g->power_on) | 909 | if (!g->power_on) |
899 | goto hw_was_off; | 910 | goto hw_was_off; |
900 | 911 | ||
901 | nvgpu_timeout_init(g, &timeout, 2000, NVGPU_TIMER_RETRY_TIMER); | 912 | if (g->ops.mm.get_flush_retries) |
913 | retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_L2_FLUSH); | ||
914 | |||
915 | nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER); | ||
902 | 916 | ||
903 | nvgpu_mutex_acquire(&mm->l2_op_lock); | 917 | nvgpu_mutex_acquire(&mm->l2_op_lock); |
904 | 918 | ||
@@ -939,6 +953,7 @@ void gk20a_mm_cbc_clean(struct gk20a *g) | |||
939 | struct mm_gk20a *mm = &g->mm; | 953 | struct mm_gk20a *mm = &g->mm; |
940 | u32 data; | 954 | u32 data; |
941 | struct nvgpu_timeout timeout; | 955 | struct nvgpu_timeout timeout; |
956 | u32 retries = 200; | ||
942 | 957 | ||
943 | gk20a_dbg_fn(""); | 958 | gk20a_dbg_fn(""); |
944 | 959 | ||
@@ -946,7 +961,10 @@ void gk20a_mm_cbc_clean(struct gk20a *g) | |||
946 | if (!g->power_on) | 961 | if (!g->power_on) |
947 | goto hw_was_off; | 962 | goto hw_was_off; |
948 | 963 | ||
949 | nvgpu_timeout_init(g, &timeout, 200, NVGPU_TIMER_RETRY_TIMER); | 964 | if (g->ops.mm.get_flush_retries) |
965 | retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_CBC_CLEAN); | ||
966 | |||
967 | nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER); | ||
950 | 968 | ||
951 | nvgpu_mutex_acquire(&mm->l2_op_lock); | 969 | nvgpu_mutex_acquire(&mm->l2_op_lock); |
952 | 970 | ||
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h index 13698cd7..15876b10 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h | |||
@@ -315,6 +315,14 @@ static inline u64 __nv_gmmu_va_small_page_limit(void) | |||
315 | return ((u64)SZ_1G * 56); | 315 | return ((u64)SZ_1G * 56); |
316 | } | 316 | } |
317 | 317 | ||
318 | enum nvgpu_flush_op { | ||
319 | NVGPU_FLUSH_DEFAULT, | ||
320 | NVGPU_FLUSH_FB, | ||
321 | NVGPU_FLUSH_L2_INV, | ||
322 | NVGPU_FLUSH_L2_FLUSH, | ||
323 | NVGPU_FLUSH_CBC_CLEAN, | ||
324 | }; | ||
325 | |||
318 | enum gmmu_pgsz_gk20a __get_pte_size_fixed_map(struct vm_gk20a *vm, | 326 | enum gmmu_pgsz_gk20a __get_pte_size_fixed_map(struct vm_gk20a *vm, |
319 | u64 base, u64 size); | 327 | u64 base, u64 size); |
320 | enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size); | 328 | enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size); |