aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c105
1 files changed, 88 insertions, 17 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index ab05121b9272..e97f80f86005 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -22,7 +22,7 @@
22 */ 22 */
23 23
24#include "amdgpu.h" 24#include "amdgpu.h"
25#define MAX_KIQ_REG_WAIT 100000 25#define MAX_KIQ_REG_WAIT 100000000 /* in usecs */
26 26
27int amdgpu_allocate_static_csa(struct amdgpu_device *adev) 27int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
28{ 28{
@@ -114,27 +114,24 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
114uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) 114uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
115{ 115{
116 signed long r; 116 signed long r;
117 uint32_t val; 117 uint32_t val, seq;
118 struct dma_fence *f;
119 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 118 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
120 struct amdgpu_ring *ring = &kiq->ring; 119 struct amdgpu_ring *ring = &kiq->ring;
121 120
122 BUG_ON(!ring->funcs->emit_rreg); 121 BUG_ON(!ring->funcs->emit_rreg);
123 122
124 mutex_lock(&kiq->ring_mutex); 123 spin_lock(&kiq->ring_lock);
125 amdgpu_ring_alloc(ring, 32); 124 amdgpu_ring_alloc(ring, 32);
126 amdgpu_ring_emit_rreg(ring, reg); 125 amdgpu_ring_emit_rreg(ring, reg);
127 amdgpu_fence_emit(ring, &f); 126 amdgpu_fence_emit_polling(ring, &seq);
128 amdgpu_ring_commit(ring); 127 amdgpu_ring_commit(ring);
129 mutex_unlock(&kiq->ring_mutex); 128 spin_unlock(&kiq->ring_lock);
130 129
131 r = dma_fence_wait_timeout(f, false, msecs_to_jiffies(MAX_KIQ_REG_WAIT)); 130 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
132 dma_fence_put(f);
133 if (r < 1) { 131 if (r < 1) {
134 DRM_ERROR("wait for kiq fence error: %ld.\n", r); 132 DRM_ERROR("wait for kiq fence error: %ld\n", r);
135 return ~0; 133 return ~0;
136 } 134 }
137
138 val = adev->wb.wb[adev->virt.reg_val_offs]; 135 val = adev->wb.wb[adev->virt.reg_val_offs];
139 136
140 return val; 137 return val;
@@ -143,23 +140,22 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
143void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 140void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
144{ 141{
145 signed long r; 142 signed long r;
146 struct dma_fence *f; 143 uint32_t seq;
147 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 144 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
148 struct amdgpu_ring *ring = &kiq->ring; 145 struct amdgpu_ring *ring = &kiq->ring;
149 146
150 BUG_ON(!ring->funcs->emit_wreg); 147 BUG_ON(!ring->funcs->emit_wreg);
151 148
152 mutex_lock(&kiq->ring_mutex); 149 spin_lock(&kiq->ring_lock);
153 amdgpu_ring_alloc(ring, 32); 150 amdgpu_ring_alloc(ring, 32);
154 amdgpu_ring_emit_wreg(ring, reg, v); 151 amdgpu_ring_emit_wreg(ring, reg, v);
155 amdgpu_fence_emit(ring, &f); 152 amdgpu_fence_emit_polling(ring, &seq);
156 amdgpu_ring_commit(ring); 153 amdgpu_ring_commit(ring);
157 mutex_unlock(&kiq->ring_mutex); 154 spin_unlock(&kiq->ring_lock);
158 155
159 r = dma_fence_wait_timeout(f, false, msecs_to_jiffies(MAX_KIQ_REG_WAIT)); 156 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
160 if (r < 1) 157 if (r < 1)
161 DRM_ERROR("wait for kiq fence error: %ld.\n", r); 158 DRM_ERROR("wait for kiq fence error: %ld\n", r);
162 dma_fence_put(f);
163} 159}
164 160
165/** 161/**
@@ -274,3 +270,78 @@ void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
274 (void *)&adev->virt.mm_table.cpu_addr); 270 (void *)&adev->virt.mm_table.cpu_addr);
275 adev->virt.mm_table.gpu_addr = 0; 271 adev->virt.mm_table.gpu_addr = 0;
276} 272}
273
274
275int amdgpu_virt_fw_reserve_get_checksum(void *obj,
276 unsigned long obj_size,
277 unsigned int key,
278 unsigned int chksum)
279{
280 unsigned int ret = key;
281 unsigned long i = 0;
282 unsigned char *pos;
283
284 pos = (char *)obj;
285 /* calculate checksum */
286 for (i = 0; i < obj_size; ++i)
287 ret += *(pos + i);
288 /* minus the chksum itself */
289 pos = (char *)&chksum;
290 for (i = 0; i < sizeof(chksum); ++i)
291 ret -= *(pos + i);
292 return ret;
293}
294
295void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
296{
297 uint32_t pf2vf_ver = 0;
298 uint32_t pf2vf_size = 0;
299 uint32_t checksum = 0;
300 uint32_t checkval;
301 char *str;
302
303 adev->virt.fw_reserve.p_pf2vf = NULL;
304 adev->virt.fw_reserve.p_vf2pf = NULL;
305
306 if (adev->fw_vram_usage.va != NULL) {
307 adev->virt.fw_reserve.p_pf2vf =
308 (struct amdgim_pf2vf_info_header *)(
309 adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
310 pf2vf_ver = adev->virt.fw_reserve.p_pf2vf->version;
311 AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
312 AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
313
314 /* pf2vf message must be in 4K */
315 if (pf2vf_size > 0 && pf2vf_size < 4096) {
316 checkval = amdgpu_virt_fw_reserve_get_checksum(
317 adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
318 adev->virt.fw_reserve.checksum_key, checksum);
319 if (checkval == checksum) {
320 adev->virt.fw_reserve.p_vf2pf =
321 ((void *)adev->virt.fw_reserve.p_pf2vf +
322 pf2vf_size);
323 memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
324 sizeof(amdgim_vf2pf_info));
325 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
326 AMDGPU_FW_VRAM_VF2PF_VER);
327 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
328 sizeof(amdgim_vf2pf_info));
329 AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
330 &str);
331 if (THIS_MODULE->version != NULL)
332 strcpy(str, THIS_MODULE->version);
333 else
334 strcpy(str, "N/A");
335 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
336 0);
337 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
338 amdgpu_virt_fw_reserve_get_checksum(
339 adev->virt.fw_reserve.p_vf2pf,
340 pf2vf_size,
341 adev->virt.fw_reserve.checksum_key, 0));
342 }
343 }
344 }
345}
346
347