diff options
author | Dave Airlie <airlied@redhat.com> | 2016-09-29 23:18:26 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2016-09-29 23:21:02 -0400 |
commit | 28a396545a2a5fbdffb2b661ed6c9b6820e28772 (patch) | |
tree | 0cc4d3a0a4956cf02c5c2133e382688ed7a30678 /drivers/gpu/drm/amd/amdgpu | |
parent | b2d7e08903e62b9f504fe6a954425b737aa9ff96 (diff) | |
parent | a481daa88fd4d6b54f25348972bba10b5f6a84d0 (diff) |
Merge branch 'drm-next-4.9' of git://people.freedesktop.org/~agd5f/linux into drm-next
Some additional fixes for 4.9:
- The rest of Christian's GTT rework which fixes a long standing bug
in the GPUVM code among other things
- Changes to the pci shutdown callbacks for certain hypervisors
- Fix hpd interrupt storms on eDP panels which have the hpd interrupt
enabled by the bios
- misc cleanups and bug fixes
* 'drm-next-4.9' of git://people.freedesktop.org/~agd5f/linux: (33 commits)
drm/radeon: always apply pci shutdown callbacks
drm/amdgpu: always apply pci shutdown callbacks (v2)
drm/amdgpu: improve VM PTE trace points
drm/amdgpu: fix GART_DEBUGFS define
drm/amdgpu: free userptrs even if GTT isn't bound
drm/amd/amdgpu: Various cleanups for DCEv6
drm/amdgpu: fix BO move offsets
drm/amdgpu: fix amdgpu_move_blit on 32bit systems
drm/amdgpu: fix gtt_mgr bo's offset
drm/amdgpu: fix initializing the VM BO shadow
drm/amdgpu: fix initializing the VM last eviction counter
drm/amdgpu: cleanup VM shadow BO unreferencing
drm/amdgpu: allocate GTT space for shadow VM page tables
drm/amdgpu: rename all rbo variable to abo v2
drm/amdgpu: remove unused member from struct amdgpu_bo
drm/amdgpu: add a custom GTT memory manager v2
drm/amdgpu/dce6: disable hpd on local panels
drm/amdgpu/dce8: disable hpd on local panels
drm/amdgpu/dce11: disable hpd on local panels
drm/amdgpu/dce10: disable hpd on local panels
...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
29 files changed, 674 insertions, 297 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index d15e9b080ce1..9ec262d4b8a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile | |||
@@ -23,7 +23,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ | |||
23 | amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \ | 23 | amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \ |
24 | atombios_encoders.o amdgpu_sa.o atombios_i2c.o \ | 24 | atombios_encoders.o amdgpu_sa.o atombios_i2c.o \ |
25 | amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ | 25 | amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ |
26 | amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o | 26 | amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ |
27 | amdgpu_gtt_mgr.o | ||
27 | 28 | ||
28 | # add asic specific block | 29 | # add asic specific block |
29 | amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ | 30 | amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 72c68dbb9821..235f3902643a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -445,8 +445,6 @@ struct amdgpu_bo_va { | |||
445 | #define AMDGPU_GEM_DOMAIN_MAX 0x3 | 445 | #define AMDGPU_GEM_DOMAIN_MAX 0x3 |
446 | 446 | ||
447 | struct amdgpu_bo { | 447 | struct amdgpu_bo { |
448 | /* Protected by gem.mutex */ | ||
449 | struct list_head list; | ||
450 | /* Protected by tbo.reserved */ | 448 | /* Protected by tbo.reserved */ |
451 | u32 prefered_domains; | 449 | u32 prefered_domains; |
452 | u32 allowed_domains; | 450 | u32 allowed_domains; |
@@ -704,7 +702,7 @@ struct amdgpu_flip_work { | |||
704 | u32 target_vblank; | 702 | u32 target_vblank; |
705 | uint64_t base; | 703 | uint64_t base; |
706 | struct drm_pending_vblank_event *event; | 704 | struct drm_pending_vblank_event *event; |
707 | struct amdgpu_bo *old_rbo; | 705 | struct amdgpu_bo *old_abo; |
708 | struct fence *excl; | 706 | struct fence *excl; |
709 | unsigned shared_count; | 707 | unsigned shared_count; |
710 | struct fence **shared; | 708 | struct fence **shared; |
@@ -2417,7 +2415,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); | |||
2417 | int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, | 2415 | int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, |
2418 | u32 ip_instance, u32 ring, | 2416 | u32 ip_instance, u32 ring, |
2419 | struct amdgpu_ring **out_ring); | 2417 | struct amdgpu_ring **out_ring); |
2420 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain); | 2418 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); |
2421 | bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); | 2419 | bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); |
2422 | int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); | 2420 | int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); |
2423 | int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, | 2421 | int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index b8412bcbad2a..b0f6e6957536 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -648,7 +648,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, | |||
648 | if (!r && p->uf_entry.robj) { | 648 | if (!r && p->uf_entry.robj) { |
649 | struct amdgpu_bo *uf = p->uf_entry.robj; | 649 | struct amdgpu_bo *uf = p->uf_entry.robj; |
650 | 650 | ||
651 | r = amdgpu_ttm_bind(uf->tbo.ttm, &uf->tbo.mem); | 651 | r = amdgpu_ttm_bind(&uf->tbo, &uf->tbo.mem); |
652 | p->job->uf_addr += amdgpu_bo_gpu_offset(uf); | 652 | p->job->uf_addr += amdgpu_bo_gpu_offset(uf); |
653 | } | 653 | } |
654 | 654 | ||
@@ -1192,7 +1192,7 @@ int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser) | |||
1192 | for (i = 0; i < parser->bo_list->num_entries; i++) { | 1192 | for (i = 0; i < parser->bo_list->num_entries; i++) { |
1193 | struct amdgpu_bo *bo = parser->bo_list->array[i].robj; | 1193 | struct amdgpu_bo *bo = parser->bo_list->array[i].robj; |
1194 | 1194 | ||
1195 | r = amdgpu_ttm_bind(bo->tbo.ttm, &bo->tbo.mem); | 1195 | r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); |
1196 | if (unlikely(r)) | 1196 | if (unlikely(r)) |
1197 | return r; | 1197 | return r; |
1198 | } | 1198 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 17e13621fae9..e203e5561107 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | |||
@@ -60,6 +60,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) | |||
60 | amd_sched_entity_fini(&adev->rings[j]->sched, | 60 | amd_sched_entity_fini(&adev->rings[j]->sched, |
61 | &ctx->rings[j].entity); | 61 | &ctx->rings[j].entity); |
62 | kfree(ctx->fences); | 62 | kfree(ctx->fences); |
63 | ctx->fences = NULL; | ||
63 | return r; | 64 | return r; |
64 | } | 65 | } |
65 | return 0; | 66 | return 0; |
@@ -77,6 +78,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) | |||
77 | for (j = 0; j < amdgpu_sched_jobs; ++j) | 78 | for (j = 0; j < amdgpu_sched_jobs; ++j) |
78 | fence_put(ctx->rings[i].fences[j]); | 79 | fence_put(ctx->rings[i].fences[j]); |
79 | kfree(ctx->fences); | 80 | kfree(ctx->fences); |
81 | ctx->fences = NULL; | ||
80 | 82 | ||
81 | for (i = 0; i < adev->num_rings; i++) | 83 | for (i = 0; i < adev->num_rings; i++) |
82 | amd_sched_entity_fini(&adev->rings[i]->sched, | 84 | amd_sched_entity_fini(&adev->rings[i]->sched, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 9af8d3c7ae8b..083e2b429872 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | |||
@@ -123,17 +123,17 @@ static void amdgpu_unpin_work_func(struct work_struct *__work) | |||
123 | int r; | 123 | int r; |
124 | 124 | ||
125 | /* unpin of the old buffer */ | 125 | /* unpin of the old buffer */ |
126 | r = amdgpu_bo_reserve(work->old_rbo, false); | 126 | r = amdgpu_bo_reserve(work->old_abo, false); |
127 | if (likely(r == 0)) { | 127 | if (likely(r == 0)) { |
128 | r = amdgpu_bo_unpin(work->old_rbo); | 128 | r = amdgpu_bo_unpin(work->old_abo); |
129 | if (unlikely(r != 0)) { | 129 | if (unlikely(r != 0)) { |
130 | DRM_ERROR("failed to unpin buffer after flip\n"); | 130 | DRM_ERROR("failed to unpin buffer after flip\n"); |
131 | } | 131 | } |
132 | amdgpu_bo_unreserve(work->old_rbo); | 132 | amdgpu_bo_unreserve(work->old_abo); |
133 | } else | 133 | } else |
134 | DRM_ERROR("failed to reserve buffer after flip\n"); | 134 | DRM_ERROR("failed to reserve buffer after flip\n"); |
135 | 135 | ||
136 | amdgpu_bo_unref(&work->old_rbo); | 136 | amdgpu_bo_unref(&work->old_abo); |
137 | kfree(work->shared); | 137 | kfree(work->shared); |
138 | kfree(work); | 138 | kfree(work); |
139 | } | 139 | } |
@@ -150,7 +150,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, | |||
150 | struct amdgpu_framebuffer *new_amdgpu_fb; | 150 | struct amdgpu_framebuffer *new_amdgpu_fb; |
151 | struct drm_gem_object *obj; | 151 | struct drm_gem_object *obj; |
152 | struct amdgpu_flip_work *work; | 152 | struct amdgpu_flip_work *work; |
153 | struct amdgpu_bo *new_rbo; | 153 | struct amdgpu_bo *new_abo; |
154 | unsigned long flags; | 154 | unsigned long flags; |
155 | u64 tiling_flags; | 155 | u64 tiling_flags; |
156 | u64 base; | 156 | u64 base; |
@@ -173,28 +173,28 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, | |||
173 | obj = old_amdgpu_fb->obj; | 173 | obj = old_amdgpu_fb->obj; |
174 | 174 | ||
175 | /* take a reference to the old object */ | 175 | /* take a reference to the old object */ |
176 | work->old_rbo = gem_to_amdgpu_bo(obj); | 176 | work->old_abo = gem_to_amdgpu_bo(obj); |
177 | amdgpu_bo_ref(work->old_rbo); | 177 | amdgpu_bo_ref(work->old_abo); |
178 | 178 | ||
179 | new_amdgpu_fb = to_amdgpu_framebuffer(fb); | 179 | new_amdgpu_fb = to_amdgpu_framebuffer(fb); |
180 | obj = new_amdgpu_fb->obj; | 180 | obj = new_amdgpu_fb->obj; |
181 | new_rbo = gem_to_amdgpu_bo(obj); | 181 | new_abo = gem_to_amdgpu_bo(obj); |
182 | 182 | ||
183 | /* pin the new buffer */ | 183 | /* pin the new buffer */ |
184 | r = amdgpu_bo_reserve(new_rbo, false); | 184 | r = amdgpu_bo_reserve(new_abo, false); |
185 | if (unlikely(r != 0)) { | 185 | if (unlikely(r != 0)) { |
186 | DRM_ERROR("failed to reserve new rbo buffer before flip\n"); | 186 | DRM_ERROR("failed to reserve new abo buffer before flip\n"); |
187 | goto cleanup; | 187 | goto cleanup; |
188 | } | 188 | } |
189 | 189 | ||
190 | r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base); | 190 | r = amdgpu_bo_pin_restricted(new_abo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base); |
191 | if (unlikely(r != 0)) { | 191 | if (unlikely(r != 0)) { |
192 | r = -EINVAL; | 192 | r = -EINVAL; |
193 | DRM_ERROR("failed to pin new rbo buffer before flip\n"); | 193 | DRM_ERROR("failed to pin new abo buffer before flip\n"); |
194 | goto unreserve; | 194 | goto unreserve; |
195 | } | 195 | } |
196 | 196 | ||
197 | r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl, | 197 | r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl, |
198 | &work->shared_count, | 198 | &work->shared_count, |
199 | &work->shared); | 199 | &work->shared); |
200 | if (unlikely(r != 0)) { | 200 | if (unlikely(r != 0)) { |
@@ -202,8 +202,8 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, | |||
202 | goto unpin; | 202 | goto unpin; |
203 | } | 203 | } |
204 | 204 | ||
205 | amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags); | 205 | amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags); |
206 | amdgpu_bo_unreserve(new_rbo); | 206 | amdgpu_bo_unreserve(new_abo); |
207 | 207 | ||
208 | work->base = base; | 208 | work->base = base; |
209 | work->target_vblank = target - drm_crtc_vblank_count(crtc) + | 209 | work->target_vblank = target - drm_crtc_vblank_count(crtc) + |
@@ -231,19 +231,19 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, | |||
231 | return 0; | 231 | return 0; |
232 | 232 | ||
233 | pflip_cleanup: | 233 | pflip_cleanup: |
234 | if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) { | 234 | if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) { |
235 | DRM_ERROR("failed to reserve new rbo in error path\n"); | 235 | DRM_ERROR("failed to reserve new abo in error path\n"); |
236 | goto cleanup; | 236 | goto cleanup; |
237 | } | 237 | } |
238 | unpin: | 238 | unpin: |
239 | if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) { | 239 | if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) { |
240 | DRM_ERROR("failed to unpin new rbo in error path\n"); | 240 | DRM_ERROR("failed to unpin new abo in error path\n"); |
241 | } | 241 | } |
242 | unreserve: | 242 | unreserve: |
243 | amdgpu_bo_unreserve(new_rbo); | 243 | amdgpu_bo_unreserve(new_abo); |
244 | 244 | ||
245 | cleanup: | 245 | cleanup: |
246 | amdgpu_bo_unref(&work->old_rbo); | 246 | amdgpu_bo_unref(&work->old_abo); |
247 | fence_put(work->excl); | 247 | fence_put(work->excl); |
248 | for (i = 0; i < work->shared_count; ++i) | 248 | for (i = 0; i < work->shared_count; ++i) |
249 | fence_put(work->shared[i]); | 249 | fence_put(work->shared[i]); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 7dbc7727e32b..dbe89fb25694 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | |||
@@ -57,9 +57,10 @@ | |||
57 | * - 3.5.0 - Add support for new UVD_NO_OP register. | 57 | * - 3.5.0 - Add support for new UVD_NO_OP register. |
58 | * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer. | 58 | * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer. |
59 | * - 3.7.0 - Add support for VCE clock list packet | 59 | * - 3.7.0 - Add support for VCE clock list packet |
60 | * - 3.8.0 - Add support raster config init in the kernel | ||
60 | */ | 61 | */ |
61 | #define KMS_DRIVER_MAJOR 3 | 62 | #define KMS_DRIVER_MAJOR 3 |
62 | #define KMS_DRIVER_MINOR 7 | 63 | #define KMS_DRIVER_MINOR 8 |
63 | #define KMS_DRIVER_PATCHLEVEL 0 | 64 | #define KMS_DRIVER_PATCHLEVEL 0 |
64 | 65 | ||
65 | int amdgpu_vram_limit = 0; | 66 | int amdgpu_vram_limit = 0; |
@@ -480,14 +481,12 @@ amdgpu_pci_remove(struct pci_dev *pdev) | |||
480 | static void | 481 | static void |
481 | amdgpu_pci_shutdown(struct pci_dev *pdev) | 482 | amdgpu_pci_shutdown(struct pci_dev *pdev) |
482 | { | 483 | { |
483 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
484 | struct amdgpu_device *adev = dev->dev_private; | ||
485 | |||
486 | /* if we are running in a VM, make sure the device | 484 | /* if we are running in a VM, make sure the device |
487 | * torn down properly on reboot/shutdown | 485 | * torn down properly on reboot/shutdown. |
486 | * unfortunately we can't detect certain | ||
487 | * hypervisors so just do this all the time. | ||
488 | */ | 488 | */ |
489 | if (amdgpu_passthrough(adev)) | 489 | amdgpu_pci_remove(pdev); |
490 | amdgpu_pci_remove(pdev); | ||
491 | } | 490 | } |
492 | 491 | ||
493 | static int amdgpu_pmops_suspend(struct device *dev) | 492 | static int amdgpu_pmops_suspend(struct device *dev) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 107fbb2d2847..9fb8aa4d6bae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | |||
@@ -115,14 +115,14 @@ int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tile | |||
115 | 115 | ||
116 | static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj) | 116 | static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj) |
117 | { | 117 | { |
118 | struct amdgpu_bo *rbo = gem_to_amdgpu_bo(gobj); | 118 | struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); |
119 | int ret; | 119 | int ret; |
120 | 120 | ||
121 | ret = amdgpu_bo_reserve(rbo, false); | 121 | ret = amdgpu_bo_reserve(abo, false); |
122 | if (likely(ret == 0)) { | 122 | if (likely(ret == 0)) { |
123 | amdgpu_bo_kunmap(rbo); | 123 | amdgpu_bo_kunmap(abo); |
124 | amdgpu_bo_unpin(rbo); | 124 | amdgpu_bo_unpin(abo); |
125 | amdgpu_bo_unreserve(rbo); | 125 | amdgpu_bo_unreserve(abo); |
126 | } | 126 | } |
127 | drm_gem_object_unreference_unlocked(gobj); | 127 | drm_gem_object_unreference_unlocked(gobj); |
128 | } | 128 | } |
@@ -133,7 +133,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, | |||
133 | { | 133 | { |
134 | struct amdgpu_device *adev = rfbdev->adev; | 134 | struct amdgpu_device *adev = rfbdev->adev; |
135 | struct drm_gem_object *gobj = NULL; | 135 | struct drm_gem_object *gobj = NULL; |
136 | struct amdgpu_bo *rbo = NULL; | 136 | struct amdgpu_bo *abo = NULL; |
137 | bool fb_tiled = false; /* useful for testing */ | 137 | bool fb_tiled = false; /* useful for testing */ |
138 | u32 tiling_flags = 0; | 138 | u32 tiling_flags = 0; |
139 | int ret; | 139 | int ret; |
@@ -159,30 +159,30 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, | |||
159 | aligned_size); | 159 | aligned_size); |
160 | return -ENOMEM; | 160 | return -ENOMEM; |
161 | } | 161 | } |
162 | rbo = gem_to_amdgpu_bo(gobj); | 162 | abo = gem_to_amdgpu_bo(gobj); |
163 | 163 | ||
164 | if (fb_tiled) | 164 | if (fb_tiled) |
165 | tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1); | 165 | tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1); |
166 | 166 | ||
167 | ret = amdgpu_bo_reserve(rbo, false); | 167 | ret = amdgpu_bo_reserve(abo, false); |
168 | if (unlikely(ret != 0)) | 168 | if (unlikely(ret != 0)) |
169 | goto out_unref; | 169 | goto out_unref; |
170 | 170 | ||
171 | if (tiling_flags) { | 171 | if (tiling_flags) { |
172 | ret = amdgpu_bo_set_tiling_flags(rbo, | 172 | ret = amdgpu_bo_set_tiling_flags(abo, |
173 | tiling_flags); | 173 | tiling_flags); |
174 | if (ret) | 174 | if (ret) |
175 | dev_err(adev->dev, "FB failed to set tiling flags\n"); | 175 | dev_err(adev->dev, "FB failed to set tiling flags\n"); |
176 | } | 176 | } |
177 | 177 | ||
178 | 178 | ||
179 | ret = amdgpu_bo_pin_restricted(rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, NULL); | 179 | ret = amdgpu_bo_pin_restricted(abo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, NULL); |
180 | if (ret) { | 180 | if (ret) { |
181 | amdgpu_bo_unreserve(rbo); | 181 | amdgpu_bo_unreserve(abo); |
182 | goto out_unref; | 182 | goto out_unref; |
183 | } | 183 | } |
184 | ret = amdgpu_bo_kmap(rbo, NULL); | 184 | ret = amdgpu_bo_kmap(abo, NULL); |
185 | amdgpu_bo_unreserve(rbo); | 185 | amdgpu_bo_unreserve(abo); |
186 | if (ret) { | 186 | if (ret) { |
187 | goto out_unref; | 187 | goto out_unref; |
188 | } | 188 | } |
@@ -204,7 +204,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, | |||
204 | struct drm_framebuffer *fb = NULL; | 204 | struct drm_framebuffer *fb = NULL; |
205 | struct drm_mode_fb_cmd2 mode_cmd; | 205 | struct drm_mode_fb_cmd2 mode_cmd; |
206 | struct drm_gem_object *gobj = NULL; | 206 | struct drm_gem_object *gobj = NULL; |
207 | struct amdgpu_bo *rbo = NULL; | 207 | struct amdgpu_bo *abo = NULL; |
208 | int ret; | 208 | int ret; |
209 | unsigned long tmp; | 209 | unsigned long tmp; |
210 | 210 | ||
@@ -223,7 +223,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, | |||
223 | return ret; | 223 | return ret; |
224 | } | 224 | } |
225 | 225 | ||
226 | rbo = gem_to_amdgpu_bo(gobj); | 226 | abo = gem_to_amdgpu_bo(gobj); |
227 | 227 | ||
228 | /* okay we have an object now allocate the framebuffer */ | 228 | /* okay we have an object now allocate the framebuffer */ |
229 | info = drm_fb_helper_alloc_fbi(helper); | 229 | info = drm_fb_helper_alloc_fbi(helper); |
@@ -246,7 +246,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, | |||
246 | /* setup helper */ | 246 | /* setup helper */ |
247 | rfbdev->helper.fb = fb; | 247 | rfbdev->helper.fb = fb; |
248 | 248 | ||
249 | memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo)); | 249 | memset_io(abo->kptr, 0x0, amdgpu_bo_size(abo)); |
250 | 250 | ||
251 | strcpy(info->fix.id, "amdgpudrmfb"); | 251 | strcpy(info->fix.id, "amdgpudrmfb"); |
252 | 252 | ||
@@ -255,11 +255,11 @@ static int amdgpufb_create(struct drm_fb_helper *helper, | |||
255 | info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; | 255 | info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; |
256 | info->fbops = &amdgpufb_ops; | 256 | info->fbops = &amdgpufb_ops; |
257 | 257 | ||
258 | tmp = amdgpu_bo_gpu_offset(rbo) - adev->mc.vram_start; | 258 | tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start; |
259 | info->fix.smem_start = adev->mc.aper_base + tmp; | 259 | info->fix.smem_start = adev->mc.aper_base + tmp; |
260 | info->fix.smem_len = amdgpu_bo_size(rbo); | 260 | info->fix.smem_len = amdgpu_bo_size(abo); |
261 | info->screen_base = rbo->kptr; | 261 | info->screen_base = abo->kptr; |
262 | info->screen_size = amdgpu_bo_size(rbo); | 262 | info->screen_size = amdgpu_bo_size(abo); |
263 | 263 | ||
264 | drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); | 264 | drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); |
265 | 265 | ||
@@ -276,7 +276,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, | |||
276 | 276 | ||
277 | DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); | 277 | DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); |
278 | DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base); | 278 | DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base); |
279 | DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(rbo)); | 279 | DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo)); |
280 | DRM_INFO("fb depth is %d\n", fb->depth); | 280 | DRM_INFO("fb depth is %d\n", fb->depth); |
281 | DRM_INFO(" pitch is %d\n", fb->pitches[0]); | 281 | DRM_INFO(" pitch is %d\n", fb->pitches[0]); |
282 | 282 | ||
@@ -286,7 +286,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, | |||
286 | out_destroy_fbi: | 286 | out_destroy_fbi: |
287 | drm_fb_helper_release_fbi(helper); | 287 | drm_fb_helper_release_fbi(helper); |
288 | out_unref: | 288 | out_unref: |
289 | if (rbo) { | 289 | if (abo) { |
290 | 290 | ||
291 | } | 291 | } |
292 | if (fb && ret) { | 292 | if (fb && ret) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 0b109aebfec6..3a2e42f4b897 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
@@ -454,6 +454,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) | |||
454 | for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) | 454 | for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) |
455 | fence_put(ring->fence_drv.fences[j]); | 455 | fence_put(ring->fence_drv.fences[j]); |
456 | kfree(ring->fence_drv.fences); | 456 | kfree(ring->fence_drv.fences); |
457 | ring->fence_drv.fences = NULL; | ||
457 | ring->fence_drv.initialized = false; | 458 | ring->fence_drv.initialized = false; |
458 | } | 459 | } |
459 | } | 460 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 0feea347f680..21a1242fc13b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | |||
@@ -238,7 +238,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, | |||
238 | t = offset / AMDGPU_GPU_PAGE_SIZE; | 238 | t = offset / AMDGPU_GPU_PAGE_SIZE; |
239 | p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); | 239 | p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); |
240 | for (i = 0; i < pages; i++, p++) { | 240 | for (i = 0; i < pages; i++, p++) { |
241 | #ifdef CONFIG_AMDGPU_GART_DEBUGFS | 241 | #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS |
242 | adev->gart.pages[p] = NULL; | 242 | adev->gart.pages[p] = NULL; |
243 | #endif | 243 | #endif |
244 | page_base = adev->dummy_page.addr; | 244 | page_base = adev->dummy_page.addr; |
@@ -286,7 +286,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, | |||
286 | p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); | 286 | p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); |
287 | 287 | ||
288 | for (i = 0; i < pages; i++, p++) { | 288 | for (i = 0; i < pages; i++, p++) { |
289 | #ifdef CONFIG_AMDGPU_GART_DEBUGFS | 289 | #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS |
290 | adev->gart.pages[p] = pagelist[i]; | 290 | adev->gart.pages[p] = pagelist[i]; |
291 | #endif | 291 | #endif |
292 | if (adev->gart.ptr) { | 292 | if (adev->gart.ptr) { |
@@ -331,7 +331,7 @@ int amdgpu_gart_init(struct amdgpu_device *adev) | |||
331 | DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", | 331 | DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", |
332 | adev->gart.num_cpu_pages, adev->gart.num_gpu_pages); | 332 | adev->gart.num_cpu_pages, adev->gart.num_gpu_pages); |
333 | 333 | ||
334 | #ifdef CONFIG_AMDGPU_GART_DEBUGFS | 334 | #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS |
335 | /* Allocate pages table */ | 335 | /* Allocate pages table */ |
336 | adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages); | 336 | adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages); |
337 | if (adev->gart.pages == NULL) { | 337 | if (adev->gart.pages == NULL) { |
@@ -357,7 +357,7 @@ void amdgpu_gart_fini(struct amdgpu_device *adev) | |||
357 | amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages); | 357 | amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages); |
358 | } | 358 | } |
359 | adev->gart.ready = false; | 359 | adev->gart.ready = false; |
360 | #ifdef CONFIG_AMDGPU_GART_DEBUGFS | 360 | #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS |
361 | vfree(adev->gart.pages); | 361 | vfree(adev->gart.pages); |
362 | adev->gart.pages = NULL; | 362 | adev->gart.pages = NULL; |
363 | #endif | 363 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 88fbed2389c0..a7ea9a3b454e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -118,23 +118,23 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev) | |||
118 | */ | 118 | */ |
119 | int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) | 119 | int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) |
120 | { | 120 | { |
121 | struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj); | 121 | struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj); |
122 | struct amdgpu_device *adev = rbo->adev; | 122 | struct amdgpu_device *adev = abo->adev; |
123 | struct amdgpu_fpriv *fpriv = file_priv->driver_priv; | 123 | struct amdgpu_fpriv *fpriv = file_priv->driver_priv; |
124 | struct amdgpu_vm *vm = &fpriv->vm; | 124 | struct amdgpu_vm *vm = &fpriv->vm; |
125 | struct amdgpu_bo_va *bo_va; | 125 | struct amdgpu_bo_va *bo_va; |
126 | int r; | 126 | int r; |
127 | r = amdgpu_bo_reserve(rbo, false); | 127 | r = amdgpu_bo_reserve(abo, false); |
128 | if (r) | 128 | if (r) |
129 | return r; | 129 | return r; |
130 | 130 | ||
131 | bo_va = amdgpu_vm_bo_find(vm, rbo); | 131 | bo_va = amdgpu_vm_bo_find(vm, abo); |
132 | if (!bo_va) { | 132 | if (!bo_va) { |
133 | bo_va = amdgpu_vm_bo_add(adev, vm, rbo); | 133 | bo_va = amdgpu_vm_bo_add(adev, vm, abo); |
134 | } else { | 134 | } else { |
135 | ++bo_va->ref_count; | 135 | ++bo_va->ref_count; |
136 | } | 136 | } |
137 | amdgpu_bo_unreserve(rbo); | 137 | amdgpu_bo_unreserve(abo); |
138 | return 0; | 138 | return 0; |
139 | } | 139 | } |
140 | 140 | ||
@@ -528,7 +528,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, | |||
528 | goto error_unreserve; | 528 | goto error_unreserve; |
529 | 529 | ||
530 | if (operation == AMDGPU_VA_OP_MAP) | 530 | if (operation == AMDGPU_VA_OP_MAP) |
531 | r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem); | 531 | r = amdgpu_vm_bo_update(adev, bo_va, false); |
532 | 532 | ||
533 | error_unreserve: | 533 | error_unreserve: |
534 | ttm_eu_backoff_reservation(&ticket, &list); | 534 | ttm_eu_backoff_reservation(&ticket, &list); |
@@ -547,7 +547,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
547 | struct drm_gem_object *gobj; | 547 | struct drm_gem_object *gobj; |
548 | struct amdgpu_device *adev = dev->dev_private; | 548 | struct amdgpu_device *adev = dev->dev_private; |
549 | struct amdgpu_fpriv *fpriv = filp->driver_priv; | 549 | struct amdgpu_fpriv *fpriv = filp->driver_priv; |
550 | struct amdgpu_bo *rbo; | 550 | struct amdgpu_bo *abo; |
551 | struct amdgpu_bo_va *bo_va; | 551 | struct amdgpu_bo_va *bo_va; |
552 | struct ttm_validate_buffer tv, tv_pd; | 552 | struct ttm_validate_buffer tv, tv_pd; |
553 | struct ww_acquire_ctx ticket; | 553 | struct ww_acquire_ctx ticket; |
@@ -587,10 +587,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
587 | gobj = drm_gem_object_lookup(filp, args->handle); | 587 | gobj = drm_gem_object_lookup(filp, args->handle); |
588 | if (gobj == NULL) | 588 | if (gobj == NULL) |
589 | return -ENOENT; | 589 | return -ENOENT; |
590 | rbo = gem_to_amdgpu_bo(gobj); | 590 | abo = gem_to_amdgpu_bo(gobj); |
591 | INIT_LIST_HEAD(&list); | 591 | INIT_LIST_HEAD(&list); |
592 | INIT_LIST_HEAD(&duplicates); | 592 | INIT_LIST_HEAD(&duplicates); |
593 | tv.bo = &rbo->tbo; | 593 | tv.bo = &abo->tbo; |
594 | tv.shared = true; | 594 | tv.shared = true; |
595 | list_add(&tv.head, &list); | 595 | list_add(&tv.head, &list); |
596 | 596 | ||
@@ -604,7 +604,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
604 | return r; | 604 | return r; |
605 | } | 605 | } |
606 | 606 | ||
607 | bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); | 607 | bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo); |
608 | if (!bo_va) { | 608 | if (!bo_va) { |
609 | ttm_eu_backoff_reservation(&ticket, &list); | 609 | ttm_eu_backoff_reservation(&ticket, &list); |
610 | drm_gem_object_unreference_unlocked(gobj); | 610 | drm_gem_object_unreference_unlocked(gobj); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c new file mode 100644 index 000000000000..f86c84427778 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | |||
@@ -0,0 +1,239 @@ | |||
1 | /* | ||
2 | * Copyright 2016 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Christian König | ||
23 | */ | ||
24 | |||
25 | #include <drm/drmP.h> | ||
26 | #include "amdgpu.h" | ||
27 | |||
28 | struct amdgpu_gtt_mgr { | ||
29 | struct drm_mm mm; | ||
30 | spinlock_t lock; | ||
31 | uint64_t available; | ||
32 | }; | ||
33 | |||
34 | /** | ||
35 | * amdgpu_gtt_mgr_init - init GTT manager and DRM MM | ||
36 | * | ||
37 | * @man: TTM memory type manager | ||
38 | * @p_size: maximum size of GTT | ||
39 | * | ||
40 | * Allocate and initialize the GTT manager. | ||
41 | */ | ||
42 | static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man, | ||
43 | unsigned long p_size) | ||
44 | { | ||
45 | struct amdgpu_gtt_mgr *mgr; | ||
46 | |||
47 | mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); | ||
48 | if (!mgr) | ||
49 | return -ENOMEM; | ||
50 | |||
51 | drm_mm_init(&mgr->mm, 0, p_size); | ||
52 | spin_lock_init(&mgr->lock); | ||
53 | mgr->available = p_size; | ||
54 | man->priv = mgr; | ||
55 | return 0; | ||
56 | } | ||
57 | |||
58 | /** | ||
59 | * amdgpu_gtt_mgr_fini - free and destroy GTT manager | ||
60 | * | ||
61 | * @man: TTM memory type manager | ||
62 | * | ||
63 | * Destroy and free the GTT manager, returns -EBUSY if ranges are still | ||
64 | * allocated inside it. | ||
65 | */ | ||
66 | static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) | ||
67 | { | ||
68 | struct amdgpu_gtt_mgr *mgr = man->priv; | ||
69 | |||
70 | spin_lock(&mgr->lock); | ||
71 | if (!drm_mm_clean(&mgr->mm)) { | ||
72 | spin_unlock(&mgr->lock); | ||
73 | return -EBUSY; | ||
74 | } | ||
75 | |||
76 | drm_mm_takedown(&mgr->mm); | ||
77 | spin_unlock(&mgr->lock); | ||
78 | kfree(mgr); | ||
79 | man->priv = NULL; | ||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | /** | ||
84 | * amdgpu_gtt_mgr_alloc - allocate new ranges | ||
85 | * | ||
86 | * @man: TTM memory type manager | ||
87 | * @tbo: TTM BO we need this range for | ||
88 | * @place: placement flags and restrictions | ||
89 | * @mem: the resulting mem object | ||
90 | * | ||
91 | * Allocate the address space for a node. | ||
92 | */ | ||
93 | int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, | ||
94 | struct ttm_buffer_object *tbo, | ||
95 | const struct ttm_place *place, | ||
96 | struct ttm_mem_reg *mem) | ||
97 | { | ||
98 | struct amdgpu_gtt_mgr *mgr = man->priv; | ||
99 | struct drm_mm_node *node = mem->mm_node; | ||
100 | enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST; | ||
101 | enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT; | ||
102 | unsigned long fpfn, lpfn; | ||
103 | int r; | ||
104 | |||
105 | if (node->start != AMDGPU_BO_INVALID_OFFSET) | ||
106 | return 0; | ||
107 | |||
108 | if (place) | ||
109 | fpfn = place->fpfn; | ||
110 | else | ||
111 | fpfn = 0; | ||
112 | |||
113 | if (place && place->lpfn) | ||
114 | lpfn = place->lpfn; | ||
115 | else | ||
116 | lpfn = man->size; | ||
117 | |||
118 | if (place && place->flags & TTM_PL_FLAG_TOPDOWN) { | ||
119 | sflags = DRM_MM_SEARCH_BELOW; | ||
120 | aflags = DRM_MM_CREATE_TOP; | ||
121 | } | ||
122 | |||
123 | spin_lock(&mgr->lock); | ||
124 | r = drm_mm_insert_node_in_range_generic(&mgr->mm, node, mem->num_pages, | ||
125 | mem->page_alignment, 0, | ||
126 | fpfn, lpfn, sflags, aflags); | ||
127 | spin_unlock(&mgr->lock); | ||
128 | |||
129 | if (!r) { | ||
130 | mem->start = node->start; | ||
131 | if (&tbo->mem == mem) | ||
132 | tbo->offset = (tbo->mem.start << PAGE_SHIFT) + | ||
133 | tbo->bdev->man[tbo->mem.mem_type].gpu_offset; | ||
134 | } | ||
135 | |||
136 | return r; | ||
137 | } | ||
138 | |||
139 | /** | ||
140 | * amdgpu_gtt_mgr_new - allocate a new node | ||
141 | * | ||
142 | * @man: TTM memory type manager | ||
143 | * @tbo: TTM BO we need this range for | ||
144 | * @place: placement flags and restrictions | ||
145 | * @mem: the resulting mem object | ||
146 | * | ||
147 | * Dummy, allocate the node but no space for it yet. | ||
148 | */ | ||
149 | static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, | ||
150 | struct ttm_buffer_object *tbo, | ||
151 | const struct ttm_place *place, | ||
152 | struct ttm_mem_reg *mem) | ||
153 | { | ||
154 | struct amdgpu_gtt_mgr *mgr = man->priv; | ||
155 | struct drm_mm_node *node; | ||
156 | int r; | ||
157 | |||
158 | spin_lock(&mgr->lock); | ||
159 | if (mgr->available < mem->num_pages) { | ||
160 | spin_unlock(&mgr->lock); | ||
161 | return 0; | ||
162 | } | ||
163 | mgr->available -= mem->num_pages; | ||
164 | spin_unlock(&mgr->lock); | ||
165 | |||
166 | node = kzalloc(sizeof(*node), GFP_KERNEL); | ||
167 | if (!node) | ||
168 | return -ENOMEM; | ||
169 | |||
170 | node->start = AMDGPU_BO_INVALID_OFFSET; | ||
171 | mem->mm_node = node; | ||
172 | |||
173 | if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) { | ||
174 | r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem); | ||
175 | if (unlikely(r)) { | ||
176 | kfree(node); | ||
177 | mem->mm_node = NULL; | ||
178 | } | ||
179 | } else { | ||
180 | mem->start = node->start; | ||
181 | } | ||
182 | |||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | /** | ||
187 | * amdgpu_gtt_mgr_del - free ranges | ||
188 | * | ||
189 | * @man: TTM memory type manager | ||
190 | * @tbo: TTM BO we need this range for | ||
191 | * @place: placement flags and restrictions | ||
192 | * @mem: TTM memory object | ||
193 | * | ||
194 | * Free the allocated GTT again. | ||
195 | */ | ||
196 | static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man, | ||
197 | struct ttm_mem_reg *mem) | ||
198 | { | ||
199 | struct amdgpu_gtt_mgr *mgr = man->priv; | ||
200 | struct drm_mm_node *node = mem->mm_node; | ||
201 | |||
202 | if (!node) | ||
203 | return; | ||
204 | |||
205 | spin_lock(&mgr->lock); | ||
206 | if (node->start != AMDGPU_BO_INVALID_OFFSET) | ||
207 | drm_mm_remove_node(node); | ||
208 | mgr->available += mem->num_pages; | ||
209 | spin_unlock(&mgr->lock); | ||
210 | |||
211 | kfree(node); | ||
212 | mem->mm_node = NULL; | ||
213 | } | ||
214 | |||
215 | /** | ||
216 | * amdgpu_gtt_mgr_debug - dump VRAM table | ||
217 | * | ||
218 | * @man: TTM memory type manager | ||
219 | * @prefix: text prefix | ||
220 | * | ||
221 | * Dump the table content using printk. | ||
222 | */ | ||
223 | static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man, | ||
224 | const char *prefix) | ||
225 | { | ||
226 | struct amdgpu_gtt_mgr *mgr = man->priv; | ||
227 | |||
228 | spin_lock(&mgr->lock); | ||
229 | drm_mm_debug_table(&mgr->mm, prefix); | ||
230 | spin_unlock(&mgr->lock); | ||
231 | } | ||
232 | |||
233 | const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = { | ||
234 | amdgpu_gtt_mgr_init, | ||
235 | amdgpu_gtt_mgr_fini, | ||
236 | amdgpu_gtt_mgr_new, | ||
237 | amdgpu_gtt_mgr_del, | ||
238 | amdgpu_gtt_mgr_debug | ||
239 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index c93a92a840ea..34bab616588c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c | |||
@@ -158,8 +158,8 @@ static const struct i2c_algorithm amdgpu_atombios_i2c_algo = { | |||
158 | }; | 158 | }; |
159 | 159 | ||
160 | struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev, | 160 | struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev, |
161 | struct amdgpu_i2c_bus_rec *rec, | 161 | const struct amdgpu_i2c_bus_rec *rec, |
162 | const char *name) | 162 | const char *name) |
163 | { | 163 | { |
164 | struct amdgpu_i2c_chan *i2c; | 164 | struct amdgpu_i2c_chan *i2c; |
165 | int ret; | 165 | int ret; |
@@ -249,8 +249,8 @@ void amdgpu_i2c_fini(struct amdgpu_device *adev) | |||
249 | 249 | ||
250 | /* Add additional buses */ | 250 | /* Add additional buses */ |
251 | void amdgpu_i2c_add(struct amdgpu_device *adev, | 251 | void amdgpu_i2c_add(struct amdgpu_device *adev, |
252 | struct amdgpu_i2c_bus_rec *rec, | 252 | const struct amdgpu_i2c_bus_rec *rec, |
253 | const char *name) | 253 | const char *name) |
254 | { | 254 | { |
255 | struct drm_device *dev = adev->ddev; | 255 | struct drm_device *dev = adev->ddev; |
256 | int i; | 256 | int i; |
@@ -266,7 +266,7 @@ void amdgpu_i2c_add(struct amdgpu_device *adev, | |||
266 | /* looks up bus based on id */ | 266 | /* looks up bus based on id */ |
267 | struct amdgpu_i2c_chan * | 267 | struct amdgpu_i2c_chan * |
268 | amdgpu_i2c_lookup(struct amdgpu_device *adev, | 268 | amdgpu_i2c_lookup(struct amdgpu_device *adev, |
269 | struct amdgpu_i2c_bus_rec *i2c_bus) | 269 | const struct amdgpu_i2c_bus_rec *i2c_bus) |
270 | { | 270 | { |
271 | int i; | 271 | int i; |
272 | 272 | ||
@@ -336,7 +336,7 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus, | |||
336 | 336 | ||
337 | /* ddc router switching */ | 337 | /* ddc router switching */ |
338 | void | 338 | void |
339 | amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector) | 339 | amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connector) |
340 | { | 340 | { |
341 | u8 val; | 341 | u8 val; |
342 | 342 | ||
@@ -365,7 +365,7 @@ amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector) | |||
365 | 365 | ||
366 | /* clock/data router switching */ | 366 | /* clock/data router switching */ |
367 | void | 367 | void |
368 | amdgpu_i2c_router_select_cd_port(struct amdgpu_connector *amdgpu_connector) | 368 | amdgpu_i2c_router_select_cd_port(const struct amdgpu_connector *amdgpu_connector) |
369 | { | 369 | { |
370 | u8 val; | 370 | u8 val; |
371 | 371 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h index d81e19b53973..63c2ff7499e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h | |||
@@ -25,20 +25,20 @@ | |||
25 | #define __AMDGPU_I2C_H__ | 25 | #define __AMDGPU_I2C_H__ |
26 | 26 | ||
27 | struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev, | 27 | struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev, |
28 | struct amdgpu_i2c_bus_rec *rec, | 28 | const struct amdgpu_i2c_bus_rec *rec, |
29 | const char *name); | 29 | const char *name); |
30 | void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c); | 30 | void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c); |
31 | void amdgpu_i2c_init(struct amdgpu_device *adev); | 31 | void amdgpu_i2c_init(struct amdgpu_device *adev); |
32 | void amdgpu_i2c_fini(struct amdgpu_device *adev); | 32 | void amdgpu_i2c_fini(struct amdgpu_device *adev); |
33 | void amdgpu_i2c_add(struct amdgpu_device *adev, | 33 | void amdgpu_i2c_add(struct amdgpu_device *adev, |
34 | struct amdgpu_i2c_bus_rec *rec, | 34 | const struct amdgpu_i2c_bus_rec *rec, |
35 | const char *name); | 35 | const char *name); |
36 | struct amdgpu_i2c_chan * | 36 | struct amdgpu_i2c_chan * |
37 | amdgpu_i2c_lookup(struct amdgpu_device *adev, | 37 | amdgpu_i2c_lookup(struct amdgpu_device *adev, |
38 | struct amdgpu_i2c_bus_rec *i2c_bus); | 38 | const struct amdgpu_i2c_bus_rec *i2c_bus); |
39 | void | 39 | void |
40 | amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector); | 40 | amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *connector); |
41 | void | 41 | void |
42 | amdgpu_i2c_router_select_cd_port(struct amdgpu_connector *amdgpu_connector); | 42 | amdgpu_i2c_router_select_cd_port(const struct amdgpu_connector *connector); |
43 | 43 | ||
44 | #endif | 44 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 428aa00025e4..aa074fac0c7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
@@ -203,10 +203,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, | |||
203 | placement->busy_placement = places; | 203 | placement->busy_placement = places; |
204 | } | 204 | } |
205 | 205 | ||
206 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain) | 206 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) |
207 | { | 207 | { |
208 | amdgpu_ttm_placement_init(rbo->adev, &rbo->placement, | 208 | amdgpu_ttm_placement_init(abo->adev, &abo->placement, |
209 | rbo->placements, domain, rbo->flags); | 209 | abo->placements, domain, abo->flags); |
210 | } | 210 | } |
211 | 211 | ||
212 | static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, | 212 | static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, |
@@ -352,7 +352,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | |||
352 | return r; | 352 | return r; |
353 | } | 353 | } |
354 | bo->adev = adev; | 354 | bo->adev = adev; |
355 | INIT_LIST_HEAD(&bo->list); | ||
356 | INIT_LIST_HEAD(&bo->shadow_list); | 355 | INIT_LIST_HEAD(&bo->shadow_list); |
357 | INIT_LIST_HEAD(&bo->va); | 356 | INIT_LIST_HEAD(&bo->va); |
358 | bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | | 357 | bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | |
@@ -673,7 +672,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, | |||
673 | dev_err(bo->adev->dev, "%p pin failed\n", bo); | 672 | dev_err(bo->adev->dev, "%p pin failed\n", bo); |
674 | goto error; | 673 | goto error; |
675 | } | 674 | } |
676 | r = amdgpu_ttm_bind(bo->tbo.ttm, &bo->tbo.mem); | 675 | r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); |
677 | if (unlikely(r)) { | 676 | if (unlikely(r)) { |
678 | dev_err(bo->adev->dev, "%p bind failed\n", bo); | 677 | dev_err(bo->adev->dev, "%p bind failed\n", bo); |
679 | goto error; | 678 | goto error; |
@@ -850,23 +849,23 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, | |||
850 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, | 849 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, |
851 | struct ttm_mem_reg *new_mem) | 850 | struct ttm_mem_reg *new_mem) |
852 | { | 851 | { |
853 | struct amdgpu_bo *rbo; | 852 | struct amdgpu_bo *abo; |
854 | struct ttm_mem_reg *old_mem = &bo->mem; | 853 | struct ttm_mem_reg *old_mem = &bo->mem; |
855 | 854 | ||
856 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) | 855 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) |
857 | return; | 856 | return; |
858 | 857 | ||
859 | rbo = container_of(bo, struct amdgpu_bo, tbo); | 858 | abo = container_of(bo, struct amdgpu_bo, tbo); |
860 | amdgpu_vm_bo_invalidate(rbo->adev, rbo); | 859 | amdgpu_vm_bo_invalidate(abo->adev, abo); |
861 | 860 | ||
862 | /* update statistics */ | 861 | /* update statistics */ |
863 | if (!new_mem) | 862 | if (!new_mem) |
864 | return; | 863 | return; |
865 | 864 | ||
866 | /* move_notify is called before move happens */ | 865 | /* move_notify is called before move happens */ |
867 | amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem); | 866 | amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem); |
868 | 867 | ||
869 | trace_amdgpu_ttm_bo_move(rbo, new_mem->mem_type, old_mem->mem_type); | 868 | trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type); |
870 | } | 869 | } |
871 | 870 | ||
872 | int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | 871 | int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 777f11b63b4c..e1fa8731d1e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | |||
@@ -264,6 +264,8 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) | |||
264 | (void **)&ring->ring); | 264 | (void **)&ring->ring); |
265 | 265 | ||
266 | amdgpu_debugfs_ring_fini(ring); | 266 | amdgpu_debugfs_ring_fini(ring); |
267 | |||
268 | ring->adev->rings[ring->idx] = NULL; | ||
267 | } | 269 | } |
268 | 270 | ||
269 | /* | 271 | /* |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 0d8d65eb46cd..067e5e683bb3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | |||
@@ -247,7 +247,7 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping, | |||
247 | TP_ARGS(mapping) | 247 | TP_ARGS(mapping) |
248 | ); | 248 | ); |
249 | 249 | ||
250 | TRACE_EVENT(amdgpu_vm_set_page, | 250 | TRACE_EVENT(amdgpu_vm_set_ptes, |
251 | TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, | 251 | TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, |
252 | uint32_t incr, uint32_t flags), | 252 | uint32_t incr, uint32_t flags), |
253 | TP_ARGS(pe, addr, count, incr, flags), | 253 | TP_ARGS(pe, addr, count, incr, flags), |
@@ -271,6 +271,24 @@ TRACE_EVENT(amdgpu_vm_set_page, | |||
271 | __entry->flags, __entry->count) | 271 | __entry->flags, __entry->count) |
272 | ); | 272 | ); |
273 | 273 | ||
274 | TRACE_EVENT(amdgpu_vm_copy_ptes, | ||
275 | TP_PROTO(uint64_t pe, uint64_t src, unsigned count), | ||
276 | TP_ARGS(pe, src, count), | ||
277 | TP_STRUCT__entry( | ||
278 | __field(u64, pe) | ||
279 | __field(u64, src) | ||
280 | __field(u32, count) | ||
281 | ), | ||
282 | |||
283 | TP_fast_assign( | ||
284 | __entry->pe = pe; | ||
285 | __entry->src = src; | ||
286 | __entry->count = count; | ||
287 | ), | ||
288 | TP_printk("pe=%010Lx, src=%010Lx, count=%u", | ||
289 | __entry->pe, __entry->src, __entry->count) | ||
290 | ); | ||
291 | |||
274 | TRACE_EVENT(amdgpu_vm_flush, | 292 | TRACE_EVENT(amdgpu_vm_flush, |
275 | TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id), | 293 | TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id), |
276 | TP_ARGS(pd_addr, ring, id), | 294 | TP_ARGS(pd_addr, ring, id), |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 160a094e1a93..887483b8b818 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -160,7 +160,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
160 | man->default_caching = TTM_PL_FLAG_CACHED; | 160 | man->default_caching = TTM_PL_FLAG_CACHED; |
161 | break; | 161 | break; |
162 | case TTM_PL_TT: | 162 | case TTM_PL_TT: |
163 | man->func = &ttm_bo_manager_func; | 163 | man->func = &amdgpu_gtt_mgr_func; |
164 | man->gpu_offset = adev->mc.gtt_start; | 164 | man->gpu_offset = adev->mc.gtt_start; |
165 | man->available_caching = TTM_PL_MASK_CACHING; | 165 | man->available_caching = TTM_PL_MASK_CACHING; |
166 | man->default_caching = TTM_PL_FLAG_CACHED; | 166 | man->default_caching = TTM_PL_FLAG_CACHED; |
@@ -195,7 +195,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
195 | static void amdgpu_evict_flags(struct ttm_buffer_object *bo, | 195 | static void amdgpu_evict_flags(struct ttm_buffer_object *bo, |
196 | struct ttm_placement *placement) | 196 | struct ttm_placement *placement) |
197 | { | 197 | { |
198 | struct amdgpu_bo *rbo; | 198 | struct amdgpu_bo *abo; |
199 | static struct ttm_place placements = { | 199 | static struct ttm_place placements = { |
200 | .fpfn = 0, | 200 | .fpfn = 0, |
201 | .lpfn = 0, | 201 | .lpfn = 0, |
@@ -210,43 +210,43 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, | |||
210 | placement->num_busy_placement = 1; | 210 | placement->num_busy_placement = 1; |
211 | return; | 211 | return; |
212 | } | 212 | } |
213 | rbo = container_of(bo, struct amdgpu_bo, tbo); | 213 | abo = container_of(bo, struct amdgpu_bo, tbo); |
214 | switch (bo->mem.mem_type) { | 214 | switch (bo->mem.mem_type) { |
215 | case TTM_PL_VRAM: | 215 | case TTM_PL_VRAM: |
216 | if (rbo->adev->mman.buffer_funcs_ring->ready == false) { | 216 | if (abo->adev->mman.buffer_funcs_ring->ready == false) { |
217 | amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU); | 217 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); |
218 | } else { | 218 | } else { |
219 | amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_GTT); | 219 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); |
220 | for (i = 0; i < rbo->placement.num_placement; ++i) { | 220 | for (i = 0; i < abo->placement.num_placement; ++i) { |
221 | if (!(rbo->placements[i].flags & | 221 | if (!(abo->placements[i].flags & |
222 | TTM_PL_FLAG_TT)) | 222 | TTM_PL_FLAG_TT)) |
223 | continue; | 223 | continue; |
224 | 224 | ||
225 | if (rbo->placements[i].lpfn) | 225 | if (abo->placements[i].lpfn) |
226 | continue; | 226 | continue; |
227 | 227 | ||
228 | /* set an upper limit to force directly | 228 | /* set an upper limit to force directly |
229 | * allocating address space for the BO. | 229 | * allocating address space for the BO. |
230 | */ | 230 | */ |
231 | rbo->placements[i].lpfn = | 231 | abo->placements[i].lpfn = |
232 | rbo->adev->mc.gtt_size >> PAGE_SHIFT; | 232 | abo->adev->mc.gtt_size >> PAGE_SHIFT; |
233 | } | 233 | } |
234 | } | 234 | } |
235 | break; | 235 | break; |
236 | case TTM_PL_TT: | 236 | case TTM_PL_TT: |
237 | default: | 237 | default: |
238 | amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU); | 238 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); |
239 | } | 239 | } |
240 | *placement = rbo->placement; | 240 | *placement = abo->placement; |
241 | } | 241 | } |
242 | 242 | ||
243 | static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) | 243 | static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) |
244 | { | 244 | { |
245 | struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo); | 245 | struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo); |
246 | 246 | ||
247 | if (amdgpu_ttm_tt_get_usermm(bo->ttm)) | 247 | if (amdgpu_ttm_tt_get_usermm(bo->ttm)) |
248 | return -EPERM; | 248 | return -EPERM; |
249 | return drm_vma_node_verify_access(&rbo->gem_base.vma_node, | 249 | return drm_vma_node_verify_access(&abo->gem_base.vma_node, |
250 | filp->private_data); | 250 | filp->private_data); |
251 | } | 251 | } |
252 | 252 | ||
@@ -273,16 +273,15 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, | |||
273 | 273 | ||
274 | adev = amdgpu_get_adev(bo->bdev); | 274 | adev = amdgpu_get_adev(bo->bdev); |
275 | ring = adev->mman.buffer_funcs_ring; | 275 | ring = adev->mman.buffer_funcs_ring; |
276 | old_start = (u64)old_mem->start << PAGE_SHIFT; | ||
277 | new_start = (u64)new_mem->start << PAGE_SHIFT; | ||
278 | 276 | ||
279 | switch (old_mem->mem_type) { | 277 | switch (old_mem->mem_type) { |
280 | case TTM_PL_TT: | 278 | case TTM_PL_TT: |
281 | r = amdgpu_ttm_bind(bo->ttm, old_mem); | 279 | r = amdgpu_ttm_bind(bo, old_mem); |
282 | if (r) | 280 | if (r) |
283 | return r; | 281 | return r; |
284 | 282 | ||
285 | case TTM_PL_VRAM: | 283 | case TTM_PL_VRAM: |
284 | old_start = (u64)old_mem->start << PAGE_SHIFT; | ||
286 | old_start += bo->bdev->man[old_mem->mem_type].gpu_offset; | 285 | old_start += bo->bdev->man[old_mem->mem_type].gpu_offset; |
287 | break; | 286 | break; |
288 | default: | 287 | default: |
@@ -291,11 +290,12 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, | |||
291 | } | 290 | } |
292 | switch (new_mem->mem_type) { | 291 | switch (new_mem->mem_type) { |
293 | case TTM_PL_TT: | 292 | case TTM_PL_TT: |
294 | r = amdgpu_ttm_bind(bo->ttm, new_mem); | 293 | r = amdgpu_ttm_bind(bo, new_mem); |
295 | if (r) | 294 | if (r) |
296 | return r; | 295 | return r; |
297 | 296 | ||
298 | case TTM_PL_VRAM: | 297 | case TTM_PL_VRAM: |
298 | new_start = (u64)new_mem->start << PAGE_SHIFT; | ||
299 | new_start += bo->bdev->man[new_mem->mem_type].gpu_offset; | 299 | new_start += bo->bdev->man[new_mem->mem_type].gpu_offset; |
300 | break; | 300 | break; |
301 | default: | 301 | default: |
@@ -676,7 +676,6 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, | |||
676 | return r; | 676 | return r; |
677 | } | 677 | } |
678 | } | 678 | } |
679 | gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; | ||
680 | if (!ttm->num_pages) { | 679 | if (!ttm->num_pages) { |
681 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", | 680 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", |
682 | ttm->num_pages, bo_mem, ttm); | 681 | ttm->num_pages, bo_mem, ttm); |
@@ -697,16 +696,25 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm) | |||
697 | return gtt && !list_empty(>t->list); | 696 | return gtt && !list_empty(>t->list); |
698 | } | 697 | } |
699 | 698 | ||
700 | int amdgpu_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) | 699 | int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) |
701 | { | 700 | { |
702 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | 701 | struct ttm_tt *ttm = bo->ttm; |
702 | struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; | ||
703 | uint32_t flags; | 703 | uint32_t flags; |
704 | int r; | 704 | int r; |
705 | 705 | ||
706 | if (!ttm || amdgpu_ttm_is_bound(ttm)) | 706 | if (!ttm || amdgpu_ttm_is_bound(ttm)) |
707 | return 0; | 707 | return 0; |
708 | 708 | ||
709 | r = amdgpu_gtt_mgr_alloc(&bo->bdev->man[TTM_PL_TT], bo, | ||
710 | NULL, bo_mem); | ||
711 | if (r) { | ||
712 | DRM_ERROR("Failed to allocate GTT address space (%d)\n", r); | ||
713 | return r; | ||
714 | } | ||
715 | |||
709 | flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); | 716 | flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); |
717 | gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; | ||
710 | r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, | 718 | r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, |
711 | ttm->pages, gtt->ttm.dma_address, flags); | 719 | ttm->pages, gtt->ttm.dma_address, flags); |
712 | 720 | ||
@@ -750,6 +758,9 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) | |||
750 | { | 758 | { |
751 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | 759 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
752 | 760 | ||
761 | if (gtt->userptr) | ||
762 | amdgpu_ttm_tt_unpin_userptr(ttm); | ||
763 | |||
753 | if (!amdgpu_ttm_is_bound(ttm)) | 764 | if (!amdgpu_ttm_is_bound(ttm)) |
754 | return 0; | 765 | return 0; |
755 | 766 | ||
@@ -757,9 +768,6 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) | |||
757 | if (gtt->adev->gart.ready) | 768 | if (gtt->adev->gart.ready) |
758 | amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages); | 769 | amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages); |
759 | 770 | ||
760 | if (gtt->userptr) | ||
761 | amdgpu_ttm_tt_unpin_userptr(ttm); | ||
762 | |||
763 | spin_lock(>t->adev->gtt_list_lock); | 771 | spin_lock(>t->adev->gtt_list_lock); |
764 | list_del_init(>t->list); | 772 | list_del_init(>t->list); |
765 | spin_unlock(>t->adev->gtt_list_lock); | 773 | spin_unlock(>t->adev->gtt_list_lock); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 3ee825f4de28..9812c805326c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | |||
@@ -65,6 +65,13 @@ struct amdgpu_mman { | |||
65 | struct amdgpu_mman_lru guard; | 65 | struct amdgpu_mman_lru guard; |
66 | }; | 66 | }; |
67 | 67 | ||
68 | extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; | ||
69 | |||
70 | int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, | ||
71 | struct ttm_buffer_object *tbo, | ||
72 | const struct ttm_place *place, | ||
73 | struct ttm_mem_reg *mem); | ||
74 | |||
68 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, | 75 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, |
69 | uint64_t src_offset, | 76 | uint64_t src_offset, |
70 | uint64_t dst_offset, | 77 | uint64_t dst_offset, |
@@ -78,6 +85,6 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, | |||
78 | 85 | ||
79 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); | 86 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); |
80 | bool amdgpu_ttm_is_bound(struct ttm_tt *ttm); | 87 | bool amdgpu_ttm_is_bound(struct ttm_tt *ttm); |
81 | int amdgpu_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); | 88 | int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem); |
82 | 89 | ||
83 | #endif | 90 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index cee7bc9a2314..e3281cacc586 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
@@ -351,12 +351,12 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) | |||
351 | } | 351 | } |
352 | } | 352 | } |
353 | 353 | ||
354 | static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *rbo) | 354 | static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo) |
355 | { | 355 | { |
356 | int i; | 356 | int i; |
357 | for (i = 0; i < rbo->placement.num_placement; ++i) { | 357 | for (i = 0; i < abo->placement.num_placement; ++i) { |
358 | rbo->placements[i].fpfn = 0 >> PAGE_SHIFT; | 358 | abo->placements[i].fpfn = 0 >> PAGE_SHIFT; |
359 | rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; | 359 | abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; |
360 | } | 360 | } |
361 | } | 361 | } |
362 | 362 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 06b94c13c2c9..3b03558ddb01 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | |||
@@ -210,6 +210,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) | |||
210 | */ | 210 | */ |
211 | int amdgpu_vce_sw_fini(struct amdgpu_device *adev) | 211 | int amdgpu_vce_sw_fini(struct amdgpu_device *adev) |
212 | { | 212 | { |
213 | unsigned i; | ||
214 | |||
213 | if (adev->vce.vcpu_bo == NULL) | 215 | if (adev->vce.vcpu_bo == NULL) |
214 | return 0; | 216 | return 0; |
215 | 217 | ||
@@ -217,8 +219,8 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) | |||
217 | 219 | ||
218 | amdgpu_bo_unref(&adev->vce.vcpu_bo); | 220 | amdgpu_bo_unref(&adev->vce.vcpu_bo); |
219 | 221 | ||
220 | amdgpu_ring_fini(&adev->vce.ring[0]); | 222 | for (i = 0; i < adev->vce.num_rings; i++) |
221 | amdgpu_ring_fini(&adev->vce.ring[1]); | 223 | amdgpu_ring_fini(&adev->vce.ring[i]); |
222 | 224 | ||
223 | release_firmware(adev->vce.fw); | 225 | release_firmware(adev->vce.fw); |
224 | mutex_destroy(&adev->vce.idle_mutex); | 226 | mutex_destroy(&adev->vce.idle_mutex); |
@@ -303,9 +305,12 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work) | |||
303 | { | 305 | { |
304 | struct amdgpu_device *adev = | 306 | struct amdgpu_device *adev = |
305 | container_of(work, struct amdgpu_device, vce.idle_work.work); | 307 | container_of(work, struct amdgpu_device, vce.idle_work.work); |
308 | unsigned i, count = 0; | ||
309 | |||
310 | for (i = 0; i < adev->vce.num_rings; i++) | ||
311 | count += amdgpu_fence_count_emitted(&adev->vce.ring[i]); | ||
306 | 312 | ||
307 | if ((amdgpu_fence_count_emitted(&adev->vce.ring[0]) == 0) && | 313 | if (count == 0) { |
308 | (amdgpu_fence_count_emitted(&adev->vce.ring[1]) == 0)) { | ||
309 | if (adev->pm.dpm_enabled) { | 314 | if (adev->pm.dpm_enabled) { |
310 | amdgpu_dpm_enable_vce(adev, false); | 315 | amdgpu_dpm_enable_vce(adev, false); |
311 | } else { | 316 | } else { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index a6a48ed9562e..bc4b22c6fc08 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -487,7 +487,7 @@ static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params, | |||
487 | unsigned count, uint32_t incr, | 487 | unsigned count, uint32_t incr, |
488 | uint32_t flags) | 488 | uint32_t flags) |
489 | { | 489 | { |
490 | trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); | 490 | trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); |
491 | 491 | ||
492 | if (count < 3) { | 492 | if (count < 3) { |
493 | amdgpu_vm_write_pte(params->adev, params->ib, pe, | 493 | amdgpu_vm_write_pte(params->adev, params->ib, pe, |
@@ -516,10 +516,12 @@ static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params, | |||
516 | unsigned count, uint32_t incr, | 516 | unsigned count, uint32_t incr, |
517 | uint32_t flags) | 517 | uint32_t flags) |
518 | { | 518 | { |
519 | trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); | 519 | uint64_t src = (params->src + (addr >> 12) * 8); |
520 | 520 | ||
521 | amdgpu_vm_copy_pte(params->adev, params->ib, pe, | 521 | |
522 | (params->src + (addr >> 12) * 8), count); | 522 | trace_amdgpu_vm_copy_ptes(pe, src, count); |
523 | |||
524 | amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count); | ||
523 | } | 525 | } |
524 | 526 | ||
525 | /** | 527 | /** |
@@ -552,6 +554,10 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
552 | if (r) | 554 | if (r) |
553 | goto error; | 555 | goto error; |
554 | 556 | ||
557 | r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); | ||
558 | if (r) | ||
559 | goto error; | ||
560 | |||
555 | addr = amdgpu_bo_gpu_offset(bo); | 561 | addr = amdgpu_bo_gpu_offset(bo); |
556 | entries = amdgpu_bo_size(bo) / 8; | 562 | entries = amdgpu_bo_size(bo) / 8; |
557 | 563 | ||
@@ -625,6 +631,11 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev, | |||
625 | 631 | ||
626 | if (!pd) | 632 | if (!pd) |
627 | return 0; | 633 | return 0; |
634 | |||
635 | r = amdgpu_ttm_bind(&pd->tbo, &pd->tbo.mem); | ||
636 | if (r) | ||
637 | return r; | ||
638 | |||
628 | pd_addr = amdgpu_bo_gpu_offset(pd); | 639 | pd_addr = amdgpu_bo_gpu_offset(pd); |
629 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); | 640 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); |
630 | 641 | ||
@@ -650,6 +661,14 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev, | |||
650 | if (bo == NULL) | 661 | if (bo == NULL) |
651 | continue; | 662 | continue; |
652 | 663 | ||
664 | if (bo->shadow) { | ||
665 | struct amdgpu_bo *shadow = bo->shadow; | ||
666 | |||
667 | r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem); | ||
668 | if (r) | ||
669 | return r; | ||
670 | } | ||
671 | |||
653 | pt = amdgpu_bo_gpu_offset(bo); | 672 | pt = amdgpu_bo_gpu_offset(bo); |
654 | if (!shadow) { | 673 | if (!shadow) { |
655 | if (vm->page_tables[pt_idx].addr == pt) | 674 | if (vm->page_tables[pt_idx].addr == pt) |
@@ -1000,6 +1019,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
1000 | AMDGPU_GPU_PAGE_SIZE); | 1019 | AMDGPU_GPU_PAGE_SIZE); |
1001 | pte[i] |= flags; | 1020 | pte[i] |= flags; |
1002 | } | 1021 | } |
1022 | addr = 0; | ||
1003 | } | 1023 | } |
1004 | 1024 | ||
1005 | r = amdgpu_sync_fence(adev, &job->sync, exclusive); | 1025 | r = amdgpu_sync_fence(adev, &job->sync, exclusive); |
@@ -1412,10 +1432,20 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
1412 | 1432 | ||
1413 | r = amdgpu_vm_clear_bo(adev, vm, pt); | 1433 | r = amdgpu_vm_clear_bo(adev, vm, pt); |
1414 | if (r) { | 1434 | if (r) { |
1435 | amdgpu_bo_unref(&pt->shadow); | ||
1415 | amdgpu_bo_unref(&pt); | 1436 | amdgpu_bo_unref(&pt); |
1416 | goto error_free; | 1437 | goto error_free; |
1417 | } | 1438 | } |
1418 | 1439 | ||
1440 | if (pt->shadow) { | ||
1441 | r = amdgpu_vm_clear_bo(adev, vm, pt->shadow); | ||
1442 | if (r) { | ||
1443 | amdgpu_bo_unref(&pt->shadow); | ||
1444 | amdgpu_bo_unref(&pt); | ||
1445 | goto error_free; | ||
1446 | } | ||
1447 | } | ||
1448 | |||
1419 | entry->robj = pt; | 1449 | entry->robj = pt; |
1420 | entry->priority = 0; | 1450 | entry->priority = 0; |
1421 | entry->tv.bo = &entry->robj->tbo; | 1451 | entry->tv.bo = &entry->robj->tbo; |
@@ -1610,14 +1640,25 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1610 | goto error_free_page_directory; | 1640 | goto error_free_page_directory; |
1611 | 1641 | ||
1612 | r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory); | 1642 | r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory); |
1613 | amdgpu_bo_unreserve(vm->page_directory); | ||
1614 | if (r) | 1643 | if (r) |
1615 | goto error_free_page_directory; | 1644 | goto error_unreserve; |
1645 | |||
1646 | if (vm->page_directory->shadow) { | ||
1647 | r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory->shadow); | ||
1648 | if (r) | ||
1649 | goto error_unreserve; | ||
1650 | } | ||
1651 | |||
1616 | vm->last_eviction_counter = atomic64_read(&adev->num_evictions); | 1652 | vm->last_eviction_counter = atomic64_read(&adev->num_evictions); |
1653 | amdgpu_bo_unreserve(vm->page_directory); | ||
1617 | 1654 | ||
1618 | return 0; | 1655 | return 0; |
1619 | 1656 | ||
1657 | error_unreserve: | ||
1658 | amdgpu_bo_unreserve(vm->page_directory); | ||
1659 | |||
1620 | error_free_page_directory: | 1660 | error_free_page_directory: |
1661 | amdgpu_bo_unref(&vm->page_directory->shadow); | ||
1621 | amdgpu_bo_unref(&vm->page_directory); | 1662 | amdgpu_bo_unref(&vm->page_directory); |
1622 | vm->page_directory = NULL; | 1663 | vm->page_directory = NULL; |
1623 | 1664 | ||
@@ -1660,15 +1701,17 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1660 | } | 1701 | } |
1661 | 1702 | ||
1662 | for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) { | 1703 | for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) { |
1663 | if (vm->page_tables[i].entry.robj && | 1704 | struct amdgpu_bo *pt = vm->page_tables[i].entry.robj; |
1664 | vm->page_tables[i].entry.robj->shadow) | 1705 | |
1665 | amdgpu_bo_unref(&vm->page_tables[i].entry.robj->shadow); | 1706 | if (!pt) |
1666 | amdgpu_bo_unref(&vm->page_tables[i].entry.robj); | 1707 | continue; |
1708 | |||
1709 | amdgpu_bo_unref(&pt->shadow); | ||
1710 | amdgpu_bo_unref(&pt); | ||
1667 | } | 1711 | } |
1668 | drm_free_large(vm->page_tables); | 1712 | drm_free_large(vm->page_tables); |
1669 | 1713 | ||
1670 | if (vm->page_directory->shadow) | 1714 | amdgpu_bo_unref(&vm->page_directory->shadow); |
1671 | amdgpu_bo_unref(&vm->page_directory->shadow); | ||
1672 | amdgpu_bo_unref(&vm->page_directory); | 1715 | amdgpu_bo_unref(&vm->page_directory); |
1673 | fence_put(vm->page_directory_fence); | 1716 | fence_put(vm->page_directory_fence); |
1674 | } | 1717 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 9d38fe0519e8..613ebb7ed50f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | |||
@@ -427,16 +427,6 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev) | |||
427 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 427 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
428 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | 428 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
429 | 429 | ||
430 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || | ||
431 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { | ||
432 | /* don't try to enable hpd on eDP or LVDS avoid breaking the | ||
433 | * aux dp channel on imac and help (but not completely fix) | ||
434 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 | ||
435 | * also avoid interrupt storms during dpms. | ||
436 | */ | ||
437 | continue; | ||
438 | } | ||
439 | |||
440 | switch (amdgpu_connector->hpd.hpd) { | 430 | switch (amdgpu_connector->hpd.hpd) { |
441 | case AMDGPU_HPD_1: | 431 | case AMDGPU_HPD_1: |
442 | idx = 0; | 432 | idx = 0; |
@@ -460,6 +450,19 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev) | |||
460 | continue; | 450 | continue; |
461 | } | 451 | } |
462 | 452 | ||
453 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || | ||
454 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { | ||
455 | /* don't try to enable hpd on eDP or LVDS avoid breaking the | ||
456 | * aux dp channel on imac and help (but not completely fix) | ||
457 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 | ||
458 | * also avoid interrupt storms during dpms. | ||
459 | */ | ||
460 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]); | ||
461 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); | ||
462 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp); | ||
463 | continue; | ||
464 | } | ||
465 | |||
463 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); | 466 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); |
464 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); | 467 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); |
465 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); | 468 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); |
@@ -2104,7 +2107,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
2104 | struct amdgpu_framebuffer *amdgpu_fb; | 2107 | struct amdgpu_framebuffer *amdgpu_fb; |
2105 | struct drm_framebuffer *target_fb; | 2108 | struct drm_framebuffer *target_fb; |
2106 | struct drm_gem_object *obj; | 2109 | struct drm_gem_object *obj; |
2107 | struct amdgpu_bo *rbo; | 2110 | struct amdgpu_bo *abo; |
2108 | uint64_t fb_location, tiling_flags; | 2111 | uint64_t fb_location, tiling_flags; |
2109 | uint32_t fb_format, fb_pitch_pixels; | 2112 | uint32_t fb_format, fb_pitch_pixels; |
2110 | u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE); | 2113 | u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE); |
@@ -2132,23 +2135,23 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
2132 | * just update base pointers | 2135 | * just update base pointers |
2133 | */ | 2136 | */ |
2134 | obj = amdgpu_fb->obj; | 2137 | obj = amdgpu_fb->obj; |
2135 | rbo = gem_to_amdgpu_bo(obj); | 2138 | abo = gem_to_amdgpu_bo(obj); |
2136 | r = amdgpu_bo_reserve(rbo, false); | 2139 | r = amdgpu_bo_reserve(abo, false); |
2137 | if (unlikely(r != 0)) | 2140 | if (unlikely(r != 0)) |
2138 | return r; | 2141 | return r; |
2139 | 2142 | ||
2140 | if (atomic) { | 2143 | if (atomic) { |
2141 | fb_location = amdgpu_bo_gpu_offset(rbo); | 2144 | fb_location = amdgpu_bo_gpu_offset(abo); |
2142 | } else { | 2145 | } else { |
2143 | r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); | 2146 | r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); |
2144 | if (unlikely(r != 0)) { | 2147 | if (unlikely(r != 0)) { |
2145 | amdgpu_bo_unreserve(rbo); | 2148 | amdgpu_bo_unreserve(abo); |
2146 | return -EINVAL; | 2149 | return -EINVAL; |
2147 | } | 2150 | } |
2148 | } | 2151 | } |
2149 | 2152 | ||
2150 | amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); | 2153 | amdgpu_bo_get_tiling_flags(abo, &tiling_flags); |
2151 | amdgpu_bo_unreserve(rbo); | 2154 | amdgpu_bo_unreserve(abo); |
2152 | 2155 | ||
2153 | pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); | 2156 | pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); |
2154 | 2157 | ||
@@ -2323,12 +2326,12 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
2323 | 2326 | ||
2324 | if (!atomic && fb && fb != crtc->primary->fb) { | 2327 | if (!atomic && fb && fb != crtc->primary->fb) { |
2325 | amdgpu_fb = to_amdgpu_framebuffer(fb); | 2328 | amdgpu_fb = to_amdgpu_framebuffer(fb); |
2326 | rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); | 2329 | abo = gem_to_amdgpu_bo(amdgpu_fb->obj); |
2327 | r = amdgpu_bo_reserve(rbo, false); | 2330 | r = amdgpu_bo_reserve(abo, false); |
2328 | if (unlikely(r != 0)) | 2331 | if (unlikely(r != 0)) |
2329 | return r; | 2332 | return r; |
2330 | amdgpu_bo_unpin(rbo); | 2333 | amdgpu_bo_unpin(abo); |
2331 | amdgpu_bo_unreserve(rbo); | 2334 | amdgpu_bo_unreserve(abo); |
2332 | } | 2335 | } |
2333 | 2336 | ||
2334 | /* Bytes per pixel may have changed */ | 2337 | /* Bytes per pixel may have changed */ |
@@ -2808,16 +2811,16 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc) | |||
2808 | if (crtc->primary->fb) { | 2811 | if (crtc->primary->fb) { |
2809 | int r; | 2812 | int r; |
2810 | struct amdgpu_framebuffer *amdgpu_fb; | 2813 | struct amdgpu_framebuffer *amdgpu_fb; |
2811 | struct amdgpu_bo *rbo; | 2814 | struct amdgpu_bo *abo; |
2812 | 2815 | ||
2813 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); | 2816 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); |
2814 | rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); | 2817 | abo = gem_to_amdgpu_bo(amdgpu_fb->obj); |
2815 | r = amdgpu_bo_reserve(rbo, false); | 2818 | r = amdgpu_bo_reserve(abo, false); |
2816 | if (unlikely(r)) | 2819 | if (unlikely(r)) |
2817 | DRM_ERROR("failed to reserve rbo before unpin\n"); | 2820 | DRM_ERROR("failed to reserve abo before unpin\n"); |
2818 | else { | 2821 | else { |
2819 | amdgpu_bo_unpin(rbo); | 2822 | amdgpu_bo_unpin(abo); |
2820 | amdgpu_bo_unreserve(rbo); | 2823 | amdgpu_bo_unreserve(abo); |
2821 | } | 2824 | } |
2822 | } | 2825 | } |
2823 | /* disable the GRPH */ | 2826 | /* disable the GRPH */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index b93eba077950..678f5eb6cbc2 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | |||
@@ -443,16 +443,6 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev) | |||
443 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 443 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
444 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | 444 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
445 | 445 | ||
446 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || | ||
447 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { | ||
448 | /* don't try to enable hpd on eDP or LVDS avoid breaking the | ||
449 | * aux dp channel on imac and help (but not completely fix) | ||
450 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 | ||
451 | * also avoid interrupt storms during dpms. | ||
452 | */ | ||
453 | continue; | ||
454 | } | ||
455 | |||
456 | switch (amdgpu_connector->hpd.hpd) { | 446 | switch (amdgpu_connector->hpd.hpd) { |
457 | case AMDGPU_HPD_1: | 447 | case AMDGPU_HPD_1: |
458 | idx = 0; | 448 | idx = 0; |
@@ -476,6 +466,19 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev) | |||
476 | continue; | 466 | continue; |
477 | } | 467 | } |
478 | 468 | ||
469 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || | ||
470 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { | ||
471 | /* don't try to enable hpd on eDP or LVDS avoid breaking the | ||
472 | * aux dp channel on imac and help (but not completely fix) | ||
473 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 | ||
474 | * also avoid interrupt storms during dpms. | ||
475 | */ | ||
476 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]); | ||
477 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); | ||
478 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp); | ||
479 | continue; | ||
480 | } | ||
481 | |||
479 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); | 482 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); |
480 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); | 483 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); |
481 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); | 484 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); |
@@ -2085,7 +2088,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
2085 | struct amdgpu_framebuffer *amdgpu_fb; | 2088 | struct amdgpu_framebuffer *amdgpu_fb; |
2086 | struct drm_framebuffer *target_fb; | 2089 | struct drm_framebuffer *target_fb; |
2087 | struct drm_gem_object *obj; | 2090 | struct drm_gem_object *obj; |
2088 | struct amdgpu_bo *rbo; | 2091 | struct amdgpu_bo *abo; |
2089 | uint64_t fb_location, tiling_flags; | 2092 | uint64_t fb_location, tiling_flags; |
2090 | uint32_t fb_format, fb_pitch_pixels; | 2093 | uint32_t fb_format, fb_pitch_pixels; |
2091 | u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE); | 2094 | u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE); |
@@ -2113,23 +2116,23 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
2113 | * just update base pointers | 2116 | * just update base pointers |
2114 | */ | 2117 | */ |
2115 | obj = amdgpu_fb->obj; | 2118 | obj = amdgpu_fb->obj; |
2116 | rbo = gem_to_amdgpu_bo(obj); | 2119 | abo = gem_to_amdgpu_bo(obj); |
2117 | r = amdgpu_bo_reserve(rbo, false); | 2120 | r = amdgpu_bo_reserve(abo, false); |
2118 | if (unlikely(r != 0)) | 2121 | if (unlikely(r != 0)) |
2119 | return r; | 2122 | return r; |
2120 | 2123 | ||
2121 | if (atomic) { | 2124 | if (atomic) { |
2122 | fb_location = amdgpu_bo_gpu_offset(rbo); | 2125 | fb_location = amdgpu_bo_gpu_offset(abo); |
2123 | } else { | 2126 | } else { |
2124 | r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); | 2127 | r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); |
2125 | if (unlikely(r != 0)) { | 2128 | if (unlikely(r != 0)) { |
2126 | amdgpu_bo_unreserve(rbo); | 2129 | amdgpu_bo_unreserve(abo); |
2127 | return -EINVAL; | 2130 | return -EINVAL; |
2128 | } | 2131 | } |
2129 | } | 2132 | } |
2130 | 2133 | ||
2131 | amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); | 2134 | amdgpu_bo_get_tiling_flags(abo, &tiling_flags); |
2132 | amdgpu_bo_unreserve(rbo); | 2135 | amdgpu_bo_unreserve(abo); |
2133 | 2136 | ||
2134 | pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); | 2137 | pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); |
2135 | 2138 | ||
@@ -2304,12 +2307,12 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
2304 | 2307 | ||
2305 | if (!atomic && fb && fb != crtc->primary->fb) { | 2308 | if (!atomic && fb && fb != crtc->primary->fb) { |
2306 | amdgpu_fb = to_amdgpu_framebuffer(fb); | 2309 | amdgpu_fb = to_amdgpu_framebuffer(fb); |
2307 | rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); | 2310 | abo = gem_to_amdgpu_bo(amdgpu_fb->obj); |
2308 | r = amdgpu_bo_reserve(rbo, false); | 2311 | r = amdgpu_bo_reserve(abo, false); |
2309 | if (unlikely(r != 0)) | 2312 | if (unlikely(r != 0)) |
2310 | return r; | 2313 | return r; |
2311 | amdgpu_bo_unpin(rbo); | 2314 | amdgpu_bo_unpin(abo); |
2312 | amdgpu_bo_unreserve(rbo); | 2315 | amdgpu_bo_unreserve(abo); |
2313 | } | 2316 | } |
2314 | 2317 | ||
2315 | /* Bytes per pixel may have changed */ | 2318 | /* Bytes per pixel may have changed */ |
@@ -2824,16 +2827,16 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc) | |||
2824 | if (crtc->primary->fb) { | 2827 | if (crtc->primary->fb) { |
2825 | int r; | 2828 | int r; |
2826 | struct amdgpu_framebuffer *amdgpu_fb; | 2829 | struct amdgpu_framebuffer *amdgpu_fb; |
2827 | struct amdgpu_bo *rbo; | 2830 | struct amdgpu_bo *abo; |
2828 | 2831 | ||
2829 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); | 2832 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); |
2830 | rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); | 2833 | abo = gem_to_amdgpu_bo(amdgpu_fb->obj); |
2831 | r = amdgpu_bo_reserve(rbo, false); | 2834 | r = amdgpu_bo_reserve(abo, false); |
2832 | if (unlikely(r)) | 2835 | if (unlikely(r)) |
2833 | DRM_ERROR("failed to reserve rbo before unpin\n"); | 2836 | DRM_ERROR("failed to reserve abo before unpin\n"); |
2834 | else { | 2837 | else { |
2835 | amdgpu_bo_unpin(rbo); | 2838 | amdgpu_bo_unpin(abo); |
2836 | amdgpu_bo_unreserve(rbo); | 2839 | amdgpu_bo_unreserve(abo); |
2837 | } | 2840 | } |
2838 | } | 2841 | } |
2839 | /* disable the GRPH */ | 2842 | /* disable the GRPH */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index eb8f96a61491..b948d6cb1399 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | |||
@@ -375,15 +375,6 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev) | |||
375 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 375 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
376 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | 376 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
377 | 377 | ||
378 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || | ||
379 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { | ||
380 | /* don't try to enable hpd on eDP or LVDS avoid breaking the | ||
381 | * aux dp channel on imac and help (but not completely fix) | ||
382 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 | ||
383 | * also avoid interrupt storms during dpms. | ||
384 | */ | ||
385 | continue; | ||
386 | } | ||
387 | switch (amdgpu_connector->hpd.hpd) { | 378 | switch (amdgpu_connector->hpd.hpd) { |
388 | case AMDGPU_HPD_1: | 379 | case AMDGPU_HPD_1: |
389 | WREG32(DC_HPD1_CONTROL, tmp); | 380 | WREG32(DC_HPD1_CONTROL, tmp); |
@@ -406,6 +397,45 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev) | |||
406 | default: | 397 | default: |
407 | break; | 398 | break; |
408 | } | 399 | } |
400 | |||
401 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || | ||
402 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { | ||
403 | /* don't try to enable hpd on eDP or LVDS avoid breaking the | ||
404 | * aux dp channel on imac and help (but not completely fix) | ||
405 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 | ||
406 | * also avoid interrupt storms during dpms. | ||
407 | */ | ||
408 | u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl; | ||
409 | |||
410 | switch (amdgpu_connector->hpd.hpd) { | ||
411 | case AMDGPU_HPD_1: | ||
412 | dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL; | ||
413 | break; | ||
414 | case AMDGPU_HPD_2: | ||
415 | dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL; | ||
416 | break; | ||
417 | case AMDGPU_HPD_3: | ||
418 | dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL; | ||
419 | break; | ||
420 | case AMDGPU_HPD_4: | ||
421 | dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL; | ||
422 | break; | ||
423 | case AMDGPU_HPD_5: | ||
424 | dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL; | ||
425 | break; | ||
426 | case AMDGPU_HPD_6: | ||
427 | dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL; | ||
428 | break; | ||
429 | default: | ||
430 | continue; | ||
431 | } | ||
432 | |||
433 | dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); | ||
434 | dc_hpd_int_cntl &= ~DC_HPDx_INT_EN; | ||
435 | WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); | ||
436 | continue; | ||
437 | } | ||
438 | |||
409 | dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); | 439 | dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); |
410 | amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); | 440 | amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); |
411 | } | 441 | } |
@@ -1475,10 +1505,7 @@ static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable) | |||
1475 | u32 vga_control; | 1505 | u32 vga_control; |
1476 | 1506 | ||
1477 | vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; | 1507 | vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; |
1478 | if (enable) | 1508 | WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0)); |
1479 | WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1); | ||
1480 | else | ||
1481 | WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control); | ||
1482 | } | 1509 | } |
1483 | 1510 | ||
1484 | static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable) | 1511 | static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable) |
@@ -1487,10 +1514,7 @@ static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable) | |||
1487 | struct drm_device *dev = crtc->dev; | 1514 | struct drm_device *dev = crtc->dev; |
1488 | struct amdgpu_device *adev = dev->dev_private; | 1515 | struct amdgpu_device *adev = dev->dev_private; |
1489 | 1516 | ||
1490 | if (enable) | 1517 | WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0); |
1491 | WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); | ||
1492 | else | ||
1493 | WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, 0); | ||
1494 | } | 1518 | } |
1495 | 1519 | ||
1496 | static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, | 1520 | static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, |
@@ -1503,7 +1527,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
1503 | struct amdgpu_framebuffer *amdgpu_fb; | 1527 | struct amdgpu_framebuffer *amdgpu_fb; |
1504 | struct drm_framebuffer *target_fb; | 1528 | struct drm_framebuffer *target_fb; |
1505 | struct drm_gem_object *obj; | 1529 | struct drm_gem_object *obj; |
1506 | struct amdgpu_bo *rbo; | 1530 | struct amdgpu_bo *abo; |
1507 | uint64_t fb_location, tiling_flags; | 1531 | uint64_t fb_location, tiling_flags; |
1508 | uint32_t fb_format, fb_pitch_pixels, pipe_config; | 1532 | uint32_t fb_format, fb_pitch_pixels, pipe_config; |
1509 | u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE); | 1533 | u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE); |
@@ -1520,8 +1544,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
1520 | if (atomic) { | 1544 | if (atomic) { |
1521 | amdgpu_fb = to_amdgpu_framebuffer(fb); | 1545 | amdgpu_fb = to_amdgpu_framebuffer(fb); |
1522 | target_fb = fb; | 1546 | target_fb = fb; |
1523 | } | 1547 | } else { |
1524 | else { | ||
1525 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); | 1548 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); |
1526 | target_fb = crtc->primary->fb; | 1549 | target_fb = crtc->primary->fb; |
1527 | } | 1550 | } |
@@ -1530,23 +1553,23 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
1530 | * just update base pointers | 1553 | * just update base pointers |
1531 | */ | 1554 | */ |
1532 | obj = amdgpu_fb->obj; | 1555 | obj = amdgpu_fb->obj; |
1533 | rbo = gem_to_amdgpu_bo(obj); | 1556 | abo = gem_to_amdgpu_bo(obj); |
1534 | r = amdgpu_bo_reserve(rbo, false); | 1557 | r = amdgpu_bo_reserve(abo, false); |
1535 | if (unlikely(r != 0)) | 1558 | if (unlikely(r != 0)) |
1536 | return r; | 1559 | return r; |
1537 | 1560 | ||
1538 | if (atomic) | 1561 | if (atomic) { |
1539 | fb_location = amdgpu_bo_gpu_offset(rbo); | 1562 | fb_location = amdgpu_bo_gpu_offset(abo); |
1540 | else { | 1563 | } else { |
1541 | r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); | 1564 | r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); |
1542 | if (unlikely(r != 0)) { | 1565 | if (unlikely(r != 0)) { |
1543 | amdgpu_bo_unreserve(rbo); | 1566 | amdgpu_bo_unreserve(abo); |
1544 | return -EINVAL; | 1567 | return -EINVAL; |
1545 | } | 1568 | } |
1546 | } | 1569 | } |
1547 | 1570 | ||
1548 | amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); | 1571 | amdgpu_bo_get_tiling_flags(abo, &tiling_flags); |
1549 | amdgpu_bo_unreserve(rbo); | 1572 | amdgpu_bo_unreserve(abo); |
1550 | 1573 | ||
1551 | switch (target_fb->pixel_format) { | 1574 | switch (target_fb->pixel_format) { |
1552 | case DRM_FORMAT_C8: | 1575 | case DRM_FORMAT_C8: |
@@ -1633,8 +1656,9 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
1633 | fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw); | 1656 | fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw); |
1634 | fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh); | 1657 | fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh); |
1635 | fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect); | 1658 | fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect); |
1636 | } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) | 1659 | } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) { |
1637 | fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); | 1660 | fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); |
1661 | } | ||
1638 | 1662 | ||
1639 | pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); | 1663 | pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); |
1640 | fb_format |= SI_GRPH_PIPE_CONFIG(pipe_config); | 1664 | fb_format |= SI_GRPH_PIPE_CONFIG(pipe_config); |
@@ -1698,12 +1722,12 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
1698 | 1722 | ||
1699 | if (!atomic && fb && fb != crtc->primary->fb) { | 1723 | if (!atomic && fb && fb != crtc->primary->fb) { |
1700 | amdgpu_fb = to_amdgpu_framebuffer(fb); | 1724 | amdgpu_fb = to_amdgpu_framebuffer(fb); |
1701 | rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); | 1725 | abo = gem_to_amdgpu_bo(amdgpu_fb->obj); |
1702 | r = amdgpu_bo_reserve(rbo, false); | 1726 | r = amdgpu_bo_reserve(abo, false); |
1703 | if (unlikely(r != 0)) | 1727 | if (unlikely(r != 0)) |
1704 | return r; | 1728 | return r; |
1705 | amdgpu_bo_unpin(rbo); | 1729 | amdgpu_bo_unpin(abo); |
1706 | amdgpu_bo_unreserve(rbo); | 1730 | amdgpu_bo_unreserve(abo); |
1707 | } | 1731 | } |
1708 | 1732 | ||
1709 | /* Bytes per pixel may have changed */ | 1733 | /* Bytes per pixel may have changed */ |
@@ -1798,26 +1822,13 @@ static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder) | |||
1798 | 1822 | ||
1799 | switch (amdgpu_encoder->encoder_id) { | 1823 | switch (amdgpu_encoder->encoder_id) { |
1800 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 1824 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
1801 | if (dig->linkb) | 1825 | return dig->linkb ? 1 : 0; |
1802 | return 1; | ||
1803 | else | ||
1804 | return 0; | ||
1805 | break; | ||
1806 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | 1826 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
1807 | if (dig->linkb) | 1827 | return dig->linkb ? 3 : 2; |
1808 | return 3; | ||
1809 | else | ||
1810 | return 2; | ||
1811 | break; | ||
1812 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | 1828 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
1813 | if (dig->linkb) | 1829 | return dig->linkb ? 5 : 4; |
1814 | return 5; | ||
1815 | else | ||
1816 | return 4; | ||
1817 | break; | ||
1818 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: | 1830 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: |
1819 | return 6; | 1831 | return 6; |
1820 | break; | ||
1821 | default: | 1832 | default: |
1822 | DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); | 1833 | DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); |
1823 | return 0; | 1834 | return 0; |
@@ -2052,7 +2063,6 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc) | |||
2052 | amdgpu_crtc->cursor_y); | 2063 | amdgpu_crtc->cursor_y); |
2053 | 2064 | ||
2054 | dce_v6_0_show_cursor(crtc); | 2065 | dce_v6_0_show_cursor(crtc); |
2055 | |||
2056 | dce_v6_0_lock_cursor(crtc, false); | 2066 | dce_v6_0_lock_cursor(crtc, false); |
2057 | } | 2067 | } |
2058 | } | 2068 | } |
@@ -2151,16 +2161,16 @@ static void dce_v6_0_crtc_disable(struct drm_crtc *crtc) | |||
2151 | if (crtc->primary->fb) { | 2161 | if (crtc->primary->fb) { |
2152 | int r; | 2162 | int r; |
2153 | struct amdgpu_framebuffer *amdgpu_fb; | 2163 | struct amdgpu_framebuffer *amdgpu_fb; |
2154 | struct amdgpu_bo *rbo; | 2164 | struct amdgpu_bo *abo; |
2155 | 2165 | ||
2156 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); | 2166 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); |
2157 | rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); | 2167 | abo = gem_to_amdgpu_bo(amdgpu_fb->obj); |
2158 | r = amdgpu_bo_reserve(rbo, false); | 2168 | r = amdgpu_bo_reserve(abo, false); |
2159 | if (unlikely(r)) | 2169 | if (unlikely(r)) |
2160 | DRM_ERROR("failed to reserve rbo before unpin\n"); | 2170 | DRM_ERROR("failed to reserve abo before unpin\n"); |
2161 | else { | 2171 | else { |
2162 | amdgpu_bo_unpin(rbo); | 2172 | amdgpu_bo_unpin(abo); |
2163 | amdgpu_bo_unreserve(rbo); | 2173 | amdgpu_bo_unreserve(abo); |
2164 | } | 2174 | } |
2165 | } | 2175 | } |
2166 | /* disable the GRPH */ | 2176 | /* disable the GRPH */ |
@@ -2375,15 +2385,11 @@ static int dce_v6_0_sw_init(void *handle) | |||
2375 | adev->mode_info.mode_config_initialized = true; | 2385 | adev->mode_info.mode_config_initialized = true; |
2376 | 2386 | ||
2377 | adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; | 2387 | adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; |
2378 | |||
2379 | adev->ddev->mode_config.async_page_flip = true; | 2388 | adev->ddev->mode_config.async_page_flip = true; |
2380 | |||
2381 | adev->ddev->mode_config.max_width = 16384; | 2389 | adev->ddev->mode_config.max_width = 16384; |
2382 | adev->ddev->mode_config.max_height = 16384; | 2390 | adev->ddev->mode_config.max_height = 16384; |
2383 | |||
2384 | adev->ddev->mode_config.preferred_depth = 24; | 2391 | adev->ddev->mode_config.preferred_depth = 24; |
2385 | adev->ddev->mode_config.prefer_shadow = 1; | 2392 | adev->ddev->mode_config.prefer_shadow = 1; |
2386 | |||
2387 | adev->ddev->mode_config.fb_base = adev->mc.aper_base; | 2393 | adev->ddev->mode_config.fb_base = adev->mc.aper_base; |
2388 | 2394 | ||
2389 | r = amdgpu_modeset_create_props(adev); | 2395 | r = amdgpu_modeset_create_props(adev); |
@@ -2429,7 +2435,6 @@ static int dce_v6_0_sw_fini(void *handle) | |||
2429 | drm_kms_helper_poll_fini(adev->ddev); | 2435 | drm_kms_helper_poll_fini(adev->ddev); |
2430 | 2436 | ||
2431 | dce_v6_0_audio_fini(adev); | 2437 | dce_v6_0_audio_fini(adev); |
2432 | |||
2433 | dce_v6_0_afmt_fini(adev); | 2438 | dce_v6_0_afmt_fini(adev); |
2434 | 2439 | ||
2435 | drm_mode_config_cleanup(adev->ddev); | 2440 | drm_mode_config_cleanup(adev->ddev); |
@@ -3057,7 +3062,6 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev, | |||
3057 | } | 3062 | } |
3058 | 3063 | ||
3059 | amdgpu_encoder->enc_priv = NULL; | 3064 | amdgpu_encoder->enc_priv = NULL; |
3060 | |||
3061 | amdgpu_encoder->encoder_enum = encoder_enum; | 3065 | amdgpu_encoder->encoder_enum = encoder_enum; |
3062 | amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | 3066 | amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; |
3063 | amdgpu_encoder->devices = supported_device; | 3067 | amdgpu_encoder->devices = supported_device; |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index a7decf977b5c..5966166ec94c 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | |||
@@ -397,15 +397,6 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev) | |||
397 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 397 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
398 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | 398 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
399 | 399 | ||
400 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || | ||
401 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { | ||
402 | /* don't try to enable hpd on eDP or LVDS avoid breaking the | ||
403 | * aux dp channel on imac and help (but not completely fix) | ||
404 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 | ||
405 | * also avoid interrupt storms during dpms. | ||
406 | */ | ||
407 | continue; | ||
408 | } | ||
409 | switch (amdgpu_connector->hpd.hpd) { | 400 | switch (amdgpu_connector->hpd.hpd) { |
410 | case AMDGPU_HPD_1: | 401 | case AMDGPU_HPD_1: |
411 | WREG32(mmDC_HPD1_CONTROL, tmp); | 402 | WREG32(mmDC_HPD1_CONTROL, tmp); |
@@ -428,6 +419,45 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev) | |||
428 | default: | 419 | default: |
429 | break; | 420 | break; |
430 | } | 421 | } |
422 | |||
423 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || | ||
424 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { | ||
425 | /* don't try to enable hpd on eDP or LVDS avoid breaking the | ||
426 | * aux dp channel on imac and help (but not completely fix) | ||
427 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 | ||
428 | * also avoid interrupt storms during dpms. | ||
429 | */ | ||
430 | u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl; | ||
431 | |||
432 | switch (amdgpu_connector->hpd.hpd) { | ||
433 | case AMDGPU_HPD_1: | ||
434 | dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL; | ||
435 | break; | ||
436 | case AMDGPU_HPD_2: | ||
437 | dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL; | ||
438 | break; | ||
439 | case AMDGPU_HPD_3: | ||
440 | dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL; | ||
441 | break; | ||
442 | case AMDGPU_HPD_4: | ||
443 | dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL; | ||
444 | break; | ||
445 | case AMDGPU_HPD_5: | ||
446 | dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL; | ||
447 | break; | ||
448 | case AMDGPU_HPD_6: | ||
449 | dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL; | ||
450 | break; | ||
451 | default: | ||
452 | continue; | ||
453 | } | ||
454 | |||
455 | dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); | ||
456 | dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; | ||
457 | WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); | ||
458 | continue; | ||
459 | } | ||
460 | |||
431 | dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); | 461 | dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); |
432 | amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); | 462 | amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); |
433 | } | 463 | } |
@@ -1992,7 +2022,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
1992 | struct amdgpu_framebuffer *amdgpu_fb; | 2022 | struct amdgpu_framebuffer *amdgpu_fb; |
1993 | struct drm_framebuffer *target_fb; | 2023 | struct drm_framebuffer *target_fb; |
1994 | struct drm_gem_object *obj; | 2024 | struct drm_gem_object *obj; |
1995 | struct amdgpu_bo *rbo; | 2025 | struct amdgpu_bo *abo; |
1996 | uint64_t fb_location, tiling_flags; | 2026 | uint64_t fb_location, tiling_flags; |
1997 | uint32_t fb_format, fb_pitch_pixels; | 2027 | uint32_t fb_format, fb_pitch_pixels; |
1998 | u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); | 2028 | u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); |
@@ -2020,23 +2050,23 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
2020 | * just update base pointers | 2050 | * just update base pointers |
2021 | */ | 2051 | */ |
2022 | obj = amdgpu_fb->obj; | 2052 | obj = amdgpu_fb->obj; |
2023 | rbo = gem_to_amdgpu_bo(obj); | 2053 | abo = gem_to_amdgpu_bo(obj); |
2024 | r = amdgpu_bo_reserve(rbo, false); | 2054 | r = amdgpu_bo_reserve(abo, false); |
2025 | if (unlikely(r != 0)) | 2055 | if (unlikely(r != 0)) |
2026 | return r; | 2056 | return r; |
2027 | 2057 | ||
2028 | if (atomic) { | 2058 | if (atomic) { |
2029 | fb_location = amdgpu_bo_gpu_offset(rbo); | 2059 | fb_location = amdgpu_bo_gpu_offset(abo); |
2030 | } else { | 2060 | } else { |
2031 | r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); | 2061 | r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); |
2032 | if (unlikely(r != 0)) { | 2062 | if (unlikely(r != 0)) { |
2033 | amdgpu_bo_unreserve(rbo); | 2063 | amdgpu_bo_unreserve(abo); |
2034 | return -EINVAL; | 2064 | return -EINVAL; |
2035 | } | 2065 | } |
2036 | } | 2066 | } |
2037 | 2067 | ||
2038 | amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); | 2068 | amdgpu_bo_get_tiling_flags(abo, &tiling_flags); |
2039 | amdgpu_bo_unreserve(rbo); | 2069 | amdgpu_bo_unreserve(abo); |
2040 | 2070 | ||
2041 | pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); | 2071 | pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); |
2042 | 2072 | ||
@@ -2192,12 +2222,12 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
2192 | 2222 | ||
2193 | if (!atomic && fb && fb != crtc->primary->fb) { | 2223 | if (!atomic && fb && fb != crtc->primary->fb) { |
2194 | amdgpu_fb = to_amdgpu_framebuffer(fb); | 2224 | amdgpu_fb = to_amdgpu_framebuffer(fb); |
2195 | rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); | 2225 | abo = gem_to_amdgpu_bo(amdgpu_fb->obj); |
2196 | r = amdgpu_bo_reserve(rbo, false); | 2226 | r = amdgpu_bo_reserve(abo, false); |
2197 | if (unlikely(r != 0)) | 2227 | if (unlikely(r != 0)) |
2198 | return r; | 2228 | return r; |
2199 | amdgpu_bo_unpin(rbo); | 2229 | amdgpu_bo_unpin(abo); |
2200 | amdgpu_bo_unreserve(rbo); | 2230 | amdgpu_bo_unreserve(abo); |
2201 | } | 2231 | } |
2202 | 2232 | ||
2203 | /* Bytes per pixel may have changed */ | 2233 | /* Bytes per pixel may have changed */ |
@@ -2669,16 +2699,16 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc) | |||
2669 | if (crtc->primary->fb) { | 2699 | if (crtc->primary->fb) { |
2670 | int r; | 2700 | int r; |
2671 | struct amdgpu_framebuffer *amdgpu_fb; | 2701 | struct amdgpu_framebuffer *amdgpu_fb; |
2672 | struct amdgpu_bo *rbo; | 2702 | struct amdgpu_bo *abo; |
2673 | 2703 | ||
2674 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); | 2704 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); |
2675 | rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); | 2705 | abo = gem_to_amdgpu_bo(amdgpu_fb->obj); |
2676 | r = amdgpu_bo_reserve(rbo, false); | 2706 | r = amdgpu_bo_reserve(abo, false); |
2677 | if (unlikely(r)) | 2707 | if (unlikely(r)) |
2678 | DRM_ERROR("failed to reserve rbo before unpin\n"); | 2708 | DRM_ERROR("failed to reserve abo before unpin\n"); |
2679 | else { | 2709 | else { |
2680 | amdgpu_bo_unpin(rbo); | 2710 | amdgpu_bo_unpin(abo); |
2681 | amdgpu_bo_unreserve(rbo); | 2711 | amdgpu_bo_unreserve(abo); |
2682 | } | 2712 | } |
2683 | } | 2713 | } |
2684 | /* disable the GRPH */ | 2714 | /* disable the GRPH */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 30badd261269..a754f2522ba2 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c | |||
@@ -229,16 +229,16 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc) | |||
229 | if (crtc->primary->fb) { | 229 | if (crtc->primary->fb) { |
230 | int r; | 230 | int r; |
231 | struct amdgpu_framebuffer *amdgpu_fb; | 231 | struct amdgpu_framebuffer *amdgpu_fb; |
232 | struct amdgpu_bo *rbo; | 232 | struct amdgpu_bo *abo; |
233 | 233 | ||
234 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); | 234 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); |
235 | rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); | 235 | abo = gem_to_amdgpu_bo(amdgpu_fb->obj); |
236 | r = amdgpu_bo_reserve(rbo, false); | 236 | r = amdgpu_bo_reserve(abo, false); |
237 | if (unlikely(r)) | 237 | if (unlikely(r)) |
238 | DRM_ERROR("failed to reserve rbo before unpin\n"); | 238 | DRM_ERROR("failed to reserve abo before unpin\n"); |
239 | else { | 239 | else { |
240 | amdgpu_bo_unpin(rbo); | 240 | amdgpu_bo_unpin(abo); |
241 | amdgpu_bo_unreserve(rbo); | 241 | amdgpu_bo_unreserve(abo); |
242 | } | 242 | } |
243 | } | 243 | } |
244 | 244 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index e2db4a734676..8bd08925b370 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c | |||
@@ -3023,9 +3023,12 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = { | |||
3023 | /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ | 3023 | /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ |
3024 | { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, | 3024 | { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, |
3025 | { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, | 3025 | { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, |
3026 | { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 }, | ||
3026 | { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, | 3027 | { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, |
3027 | { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 }, | 3028 | { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 }, |
3028 | { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, | 3029 | { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, |
3030 | { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 }, | ||
3031 | { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 }, | ||
3029 | { 0, 0, 0, 0 }, | 3032 | { 0, 0, 0, 0 }, |
3030 | }; | 3033 | }; |
3031 | 3034 | ||
@@ -3486,6 +3489,16 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, | |||
3486 | } | 3489 | } |
3487 | ++p; | 3490 | ++p; |
3488 | } | 3491 | } |
3492 | /* limit mclk on all R7 370 parts for stability */ | ||
3493 | if (adev->pdev->device == 0x6811 && | ||
3494 | adev->pdev->revision == 0x81) | ||
3495 | max_mclk = 120000; | ||
3496 | /* limit sclk/mclk on Jet parts for stability */ | ||
3497 | if (adev->pdev->device == 0x6665 && | ||
3498 | adev->pdev->revision == 0xc3) { | ||
3499 | max_sclk = 75000; | ||
3500 | max_mclk = 80000; | ||
3501 | } | ||
3489 | 3502 | ||
3490 | if (rps->vce_active) { | 3503 | if (rps->vce_active) { |
3491 | rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; | 3504 | rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; |
@@ -4580,7 +4593,7 @@ static int si_populate_smc_voltage_tables(struct amdgpu_device *adev, | |||
4580 | &adev->pm.dpm.dyn_state.phase_shedding_limits_table)) { | 4593 | &adev->pm.dpm.dyn_state.phase_shedding_limits_table)) { |
4581 | si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table); | 4594 | si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table); |
4582 | 4595 | ||
4583 | table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] = | 4596 | table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] = |
4584 | cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low); | 4597 | cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low); |
4585 | 4598 | ||
4586 | si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_phase_shedding_delay, | 4599 | si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_phase_shedding_delay, |
diff --git a/drivers/gpu/drm/amd/amdgpu/sislands_smc.h b/drivers/gpu/drm/amd/amdgpu/sislands_smc.h index ee4b846e58fa..d2930eceaf3c 100644 --- a/drivers/gpu/drm/amd/amdgpu/sislands_smc.h +++ b/drivers/gpu/drm/amd/amdgpu/sislands_smc.h | |||
@@ -194,6 +194,7 @@ typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE; | |||
194 | #define SISLANDS_SMC_VOLTAGEMASK_VDDC 0 | 194 | #define SISLANDS_SMC_VOLTAGEMASK_VDDC 0 |
195 | #define SISLANDS_SMC_VOLTAGEMASK_MVDD 1 | 195 | #define SISLANDS_SMC_VOLTAGEMASK_MVDD 1 |
196 | #define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2 | 196 | #define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2 |
197 | #define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3 | ||
197 | #define SISLANDS_SMC_VOLTAGEMASK_MAX 4 | 198 | #define SISLANDS_SMC_VOLTAGEMASK_MAX 4 |
198 | 199 | ||
199 | struct SISLANDS_SMC_VOLTAGEMASKTABLE | 200 | struct SISLANDS_SMC_VOLTAGEMASKTABLE |
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index a6b4e27bee89..3f6db4ec0102 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | |||
@@ -716,7 +716,8 @@ static int vce_v3_0_set_clockgating_state(void *handle, | |||
716 | int i; | 716 | int i; |
717 | 717 | ||
718 | if ((adev->asic_type == CHIP_POLARIS10) || | 718 | if ((adev->asic_type == CHIP_POLARIS10) || |
719 | (adev->asic_type == CHIP_TONGA)) | 719 | (adev->asic_type == CHIP_TONGA) || |
720 | (adev->asic_type == CHIP_FIJI)) | ||
720 | vce_v3_0_set_bypass_mode(adev, enable); | 721 | vce_v3_0_set_bypass_mode(adev, enable); |
721 | 722 | ||
722 | if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) | 723 | if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) |