aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2017-04-04 10:07:45 -0400
committerAlex Deucher <alexander.deucher@amd.com>2017-04-06 13:27:18 -0400
commit03f89feb57bf61749885ae98ce98b8c0fd28903b (patch)
tree7de2f901baa7a5ee5148632ac53f82970da86e3b /drivers/gpu/drm/amd/amdgpu
parent36b32a682bc32693e681cb984aac9c291a09c519 (diff)
drm/amdgpu: cleanup get_invalidate_req v2
The two hubs are just instances of the same hardware, so the register bits are identical. v2: keep the function pointer Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c2
8 files changed, 27 insertions, 49 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 9f244c39b65a..e0aee9ca9d4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -306,6 +306,7 @@ struct amdgpu_gart_funcs {
306 uint32_t flags); 306 uint32_t flags);
307 /* adjust mc addr in fb for APU case */ 307 /* adjust mc addr in fb for APU case */
308 u64 (*adjust_mc_addr)(struct amdgpu_device *adev, u64 addr); 308 u64 (*adjust_mc_addr)(struct amdgpu_device *adev, u64 addr);
309 uint32_t (*get_invalidate_req)(unsigned int vm_id);
309}; 310};
310 311
311/* provided by the ih block */ 312/* provided by the ih block */
@@ -570,7 +571,6 @@ struct amdgpu_vmhub {
570 uint32_t vm_context0_cntl; 571 uint32_t vm_context0_cntl;
571 uint32_t vm_l2_pro_fault_status; 572 uint32_t vm_l2_pro_fault_status;
572 uint32_t vm_l2_pro_fault_cntl; 573 uint32_t vm_l2_pro_fault_cntl;
573 uint32_t (*get_invalidate_req)(unsigned int vm_id);
574}; 574};
575 575
576/* 576/*
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 669bb98fc45d..4e6702d764e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3148,6 +3148,7 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3148 unsigned vm_id, uint64_t pd_addr) 3148 unsigned vm_id, uint64_t pd_addr)
3149{ 3149{
3150 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 3150 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3151 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
3151 unsigned eng = ring->idx; 3152 unsigned eng = ring->idx;
3152 unsigned i; 3153 unsigned i;
3153 3154
@@ -3157,7 +3158,6 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3157 3158
3158 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 3159 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
3159 struct amdgpu_vmhub *hub = &ring->adev->vmhub[i]; 3160 struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
3160 uint32_t req = hub->get_invalidate_req(vm_id);
3161 3161
3162 gfx_v9_0_write_data_to_reg(ring, usepfp, true, 3162 gfx_v9_0_write_data_to_reg(ring, usepfp, true,
3163 hub->ctx0_ptb_addr_lo32 3163 hub->ctx0_ptb_addr_lo32
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 70c21f9b904b..005075ff00f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -299,25 +299,6 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
299 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp); 299 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp);
300} 300}
301 301
302static uint32_t gfxhub_v1_0_get_invalidate_req(unsigned int vm_id)
303{
304 u32 req = 0;
305
306 /* invalidate using legacy mode on vm_id*/
307 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
308 PER_VMID_INVALIDATE_REQ, 1 << vm_id);
309 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
310 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
311 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
312 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
313 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
314 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
315 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
316 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
317
318 return req;
319}
320
321static int gfxhub_v1_0_early_init(void *handle) 302static int gfxhub_v1_0_early_init(void *handle)
322{ 303{
323 return 0; 304 return 0;
@@ -350,8 +331,6 @@ static int gfxhub_v1_0_sw_init(void *handle)
350 hub->vm_l2_pro_fault_cntl = 331 hub->vm_l2_pro_fault_cntl =
351 SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL); 332 SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
352 333
353 hub->get_invalidate_req = gfxhub_v1_0_get_invalidate_req;
354
355 return 0; 334 return 0;
356} 335}
357 336
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 7632f4ad7aa9..6329be81f260 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -173,6 +173,25 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
173 adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs; 173 adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
174} 174}
175 175
176static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id)
177{
178 u32 req = 0;
179
180 /* invalidate using legacy mode on vm_id*/
181 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
182 PER_VMID_INVALIDATE_REQ, 1 << vm_id);
183 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
184 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
185 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
186 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
187 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
188 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
189 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
190 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
191
192 return req;
193}
194
176/* 195/*
177 * GART 196 * GART
178 * VMID 0 is the physical GPU addresses as used by the kernel. 197 * VMID 0 is the physical GPU addresses as used by the kernel.
@@ -202,7 +221,7 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
202 221
203 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 222 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
204 struct amdgpu_vmhub *hub = &adev->vmhub[i]; 223 struct amdgpu_vmhub *hub = &adev->vmhub[i];
205 u32 tmp = hub->get_invalidate_req(vmid); 224 u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
206 225
207 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); 226 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
208 227
@@ -345,6 +364,7 @@ static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
345 .set_pte_pde = gmc_v9_0_gart_set_pte_pde, 364 .set_pte_pde = gmc_v9_0_gart_set_pte_pde,
346 .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags, 365 .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
347 .adjust_mc_addr = gmc_v9_0_adjust_mc_addr, 366 .adjust_mc_addr = gmc_v9_0_adjust_mc_addr,
367 .get_invalidate_req = gmc_v9_0_get_invalidate_req,
348}; 368};
349 369
350static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev) 370static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 3c9e27effc6a..62684510ddcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -317,25 +317,6 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
317 WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp); 317 WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp);
318} 318}
319 319
320static uint32_t mmhub_v1_0_get_invalidate_req(unsigned int vm_id)
321{
322 u32 req = 0;
323
324 /* invalidate using legacy mode on vm_id*/
325 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
326 PER_VMID_INVALIDATE_REQ, 1 << vm_id);
327 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
328 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
329 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
330 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
331 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
332 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
333 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
334 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
335
336 return req;
337}
338
339static int mmhub_v1_0_early_init(void *handle) 320static int mmhub_v1_0_early_init(void *handle)
340{ 321{
341 return 0; 322 return 0;
@@ -368,8 +349,6 @@ static int mmhub_v1_0_sw_init(void *handle)
368 hub->vm_l2_pro_fault_cntl = 349 hub->vm_l2_pro_fault_cntl =
369 SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL); 350 SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
370 351
371 hub->get_invalidate_req = mmhub_v1_0_get_invalidate_req;
372
373 return 0; 352 return 0;
374} 353}
375 354
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 2dd2b20d727e..21f38d882335 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1039,6 +1039,7 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1039static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 1039static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1040 unsigned vm_id, uint64_t pd_addr) 1040 unsigned vm_id, uint64_t pd_addr)
1041{ 1041{
1042 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
1042 unsigned eng = ring->idx; 1043 unsigned eng = ring->idx;
1043 unsigned i; 1044 unsigned i;
1044 1045
@@ -1048,7 +1049,6 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1048 1049
1049 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 1050 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
1050 struct amdgpu_vmhub *hub = &ring->adev->vmhub[i]; 1051 struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
1051 uint32_t req = hub->get_invalidate_req(vm_id);
1052 1052
1053 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | 1053 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1054 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); 1054 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 8a9a90b00c93..9bcf01469282 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -1034,6 +1034,7 @@ static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring,
1034static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 1034static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1035 unsigned vm_id, uint64_t pd_addr) 1035 unsigned vm_id, uint64_t pd_addr)
1036{ 1036{
1037 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
1037 uint32_t data0, data1, mask; 1038 uint32_t data0, data1, mask;
1038 unsigned eng = ring->idx; 1039 unsigned eng = ring->idx;
1039 unsigned i; 1040 unsigned i;
@@ -1044,7 +1045,6 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1044 1045
1045 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 1046 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
1046 struct amdgpu_vmhub *hub = &ring->adev->vmhub[i]; 1047 struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
1047 uint32_t req = hub->get_invalidate_req(vm_id);
1048 1048
1049 data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2; 1049 data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
1050 data1 = upper_32_bits(pd_addr); 1050 data1 = upper_32_bits(pd_addr);
@@ -1080,6 +1080,7 @@ static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1080static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, 1080static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1081 unsigned int vm_id, uint64_t pd_addr) 1081 unsigned int vm_id, uint64_t pd_addr)
1082{ 1082{
1083 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
1083 unsigned eng = ring->idx; 1084 unsigned eng = ring->idx;
1084 unsigned i; 1085 unsigned i;
1085 1086
@@ -1089,7 +1090,6 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1089 1090
1090 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 1091 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
1091 struct amdgpu_vmhub *hub = &ring->adev->vmhub[i]; 1092 struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
1092 uint32_t req = hub->get_invalidate_req(vm_id);
1093 1093
1094 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE); 1094 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1095 amdgpu_ring_write(ring, 1095 amdgpu_ring_write(ring,
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 2a3db99fbf1e..edde5fe938d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -973,6 +973,7 @@ static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring)
973static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring, 973static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
974 unsigned int vm_id, uint64_t pd_addr) 974 unsigned int vm_id, uint64_t pd_addr)
975{ 975{
976 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
976 unsigned eng = ring->idx; 977 unsigned eng = ring->idx;
977 unsigned i; 978 unsigned i;
978 979
@@ -982,7 +983,6 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
982 983
983 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 984 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
984 struct amdgpu_vmhub *hub = &ring->adev->vmhub[i]; 985 struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
985 uint32_t req = hub->get_invalidate_req(vm_id);
986 986
987 amdgpu_ring_write(ring, VCE_CMD_REG_WRITE); 987 amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
988 amdgpu_ring_write(ring, 988 amdgpu_ring_write(ring,