aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2017-07-27 11:24:36 -0400
committerAlex Deucher <alexander.deucher@amd.com>2017-08-15 14:45:56 -0400
commita4a02777892c16d98bbcc2ea2add47950bfbf9e0 (patch)
tree8bff3ada728d2eff39876ebe6e70b7a444a18fe7 /drivers
parent9d903cbd9995bd63e04d71362a8b59ce49437544 (diff)
drm/amdgpu: use amdgpu_bo_create_kernel more often
Saves us quite a bunch of loc. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c166
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c110
6 files changed, 103 insertions, 396 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 3cd9df890b00..510188477fe5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -336,35 +336,11 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
336 336
337static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) 337static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
338{ 338{
339 int r; 339 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
340 340 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
341 if (adev->vram_scratch.robj == NULL) { 341 &adev->vram_scratch.robj,
342 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, 342 &adev->vram_scratch.gpu_addr,
343 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 343 (void **)&adev->vram_scratch.ptr);
344 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
345 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
346 NULL, NULL, &adev->vram_scratch.robj);
347 if (r) {
348 return r;
349 }
350 }
351
352 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
353 if (unlikely(r != 0))
354 return r;
355 r = amdgpu_bo_pin(adev->vram_scratch.robj,
356 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
357 if (r) {
358 amdgpu_bo_unreserve(adev->vram_scratch.robj);
359 return r;
360 }
361 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
362 (void **)&adev->vram_scratch.ptr);
363 if (r)
364 amdgpu_bo_unpin(adev->vram_scratch.robj);
365 amdgpu_bo_unreserve(adev->vram_scratch.robj);
366
367 return r;
368} 344}
369 345
370static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev) 346static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 24c414630e65..2fbb40e3cceb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1232,23 +1232,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
1232 /* Change the size here instead of the init above so only lpfn is affected */ 1232 /* Change the size here instead of the init above so only lpfn is affected */
1233 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); 1233 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
1234 1234
1235 r = amdgpu_bo_create(adev, adev->mc.stolen_size, PAGE_SIZE, true, 1235 r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE,
1236 AMDGPU_GEM_DOMAIN_VRAM, 1236 AMDGPU_GEM_DOMAIN_VRAM,
1237 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 1237 &adev->stollen_vga_memory,
1238 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, 1238 NULL, NULL);
1239 NULL, NULL, &adev->stollen_vga_memory);
1240 if (r) {
1241 return r;
1242 }
1243 r = amdgpu_bo_reserve(adev->stollen_vga_memory, false);
1244 if (r) 1239 if (r)
1245 return r; 1240 return r;
1246 r = amdgpu_bo_pin(adev->stollen_vga_memory, AMDGPU_GEM_DOMAIN_VRAM, NULL);
1247 amdgpu_bo_unreserve(adev->stollen_vga_memory);
1248 if (r) {
1249 amdgpu_bo_unref(&adev->stollen_vga_memory);
1250 return r;
1251 }
1252 DRM_INFO("amdgpu: %uM of VRAM memory ready\n", 1241 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1253 (unsigned) (adev->mc.real_vram_size / (1024 * 1024))); 1242 (unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
1254 1243
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 4ac85f47f287..faf8d28303e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -2273,43 +2273,23 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
2273 2273
2274 if (src_ptr) { 2274 if (src_ptr) {
2275 /* save restore block */ 2275 /* save restore block */
2276 if (adev->gfx.rlc.save_restore_obj == NULL) { 2276 r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
2277 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, 2277 AMDGPU_GEM_DOMAIN_VRAM,
2278 AMDGPU_GEM_DOMAIN_VRAM, 2278 &adev->gfx.rlc.save_restore_obj,
2279 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 2279 &adev->gfx.rlc.save_restore_gpu_addr,
2280 NULL, NULL, 2280 (void **)&adev->gfx.rlc.sr_ptr);
2281 &adev->gfx.rlc.save_restore_obj);
2282
2283 if (r) {
2284 dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
2285 return r;
2286 }
2287 }
2288
2289 r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
2290 if (unlikely(r != 0)) {
2291 gfx_v6_0_rlc_fini(adev);
2292 return r;
2293 }
2294 r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
2295 &adev->gfx.rlc.save_restore_gpu_addr);
2296 if (r) { 2281 if (r) {
2297 amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); 2282 dev_warn(adev->dev, "(%d) create RLC sr bo failed\n",
2298 dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r); 2283 r);
2299 gfx_v6_0_rlc_fini(adev); 2284 gfx_v6_0_rlc_fini(adev);
2300 return r; 2285 return r;
2301 } 2286 }
2302 2287
2303 r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
2304 if (r) {
2305 dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
2306 gfx_v6_0_rlc_fini(adev);
2307 return r;
2308 }
2309 /* write the sr buffer */ 2288 /* write the sr buffer */
2310 dst_ptr = adev->gfx.rlc.sr_ptr; 2289 dst_ptr = adev->gfx.rlc.sr_ptr;
2311 for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) 2290 for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
2312 dst_ptr[i] = cpu_to_le32(src_ptr[i]); 2291 dst_ptr[i] = cpu_to_le32(src_ptr[i]);
2292
2313 amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj); 2293 amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
2314 amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); 2294 amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
2315 } 2295 }
@@ -2319,39 +2299,17 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
2319 adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev); 2299 adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev);
2320 dws = adev->gfx.rlc.clear_state_size + (256 / 4); 2300 dws = adev->gfx.rlc.clear_state_size + (256 / 4);
2321 2301
2322 if (adev->gfx.rlc.clear_state_obj == NULL) { 2302 r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
2323 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, 2303 AMDGPU_GEM_DOMAIN_VRAM,
2324 AMDGPU_GEM_DOMAIN_VRAM, 2304 &adev->gfx.rlc.clear_state_obj,
2325 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 2305 &adev->gfx.rlc.clear_state_gpu_addr,
2326 NULL, NULL, 2306 (void **)&adev->gfx.rlc.cs_ptr);
2327 &adev->gfx.rlc.clear_state_obj);
2328
2329 if (r) {
2330 dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
2331 gfx_v6_0_rlc_fini(adev);
2332 return r;
2333 }
2334 }
2335 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
2336 if (unlikely(r != 0)) {
2337 gfx_v6_0_rlc_fini(adev);
2338 return r;
2339 }
2340 r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
2341 &adev->gfx.rlc.clear_state_gpu_addr);
2342 if (r) { 2307 if (r) {
2343 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); 2308 dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
2344 dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
2345 gfx_v6_0_rlc_fini(adev); 2309 gfx_v6_0_rlc_fini(adev);
2346 return r; 2310 return r;
2347 } 2311 }
2348 2312
2349 r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
2350 if (r) {
2351 dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
2352 gfx_v6_0_rlc_fini(adev);
2353 return r;
2354 }
2355 /* set up the cs buffer */ 2313 /* set up the cs buffer */
2356 dst_ptr = adev->gfx.rlc.cs_ptr; 2314 dst_ptr = adev->gfx.rlc.cs_ptr;
2357 reg_list_mc_addr = adev->gfx.rlc.clear_state_gpu_addr + 256; 2315 reg_list_mc_addr = adev->gfx.rlc.clear_state_gpu_addr + 256;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 17b7c6934b0a..fab1cb569108 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2823,33 +2823,14 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
2823 /* allocate space for ALL pipes (even the ones we don't own) */ 2823 /* allocate space for ALL pipes (even the ones we don't own) */
2824 mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec 2824 mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec
2825 * GFX7_MEC_HPD_SIZE * 2; 2825 * GFX7_MEC_HPD_SIZE * 2;
2826 if (adev->gfx.mec.hpd_eop_obj == NULL) {
2827 r = amdgpu_bo_create(adev,
2828 mec_hpd_size,
2829 PAGE_SIZE, true,
2830 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
2831 &adev->gfx.mec.hpd_eop_obj);
2832 if (r) {
2833 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
2834 return r;
2835 }
2836 }
2837 2826
2838 r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); 2827 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
2839 if (unlikely(r != 0)) { 2828 AMDGPU_GEM_DOMAIN_GTT,
2840 gfx_v7_0_mec_fini(adev); 2829 &adev->gfx.mec.hpd_eop_obj,
2841 return r; 2830 &adev->gfx.mec.hpd_eop_gpu_addr,
2842 } 2831 (void **)&hpd);
2843 r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
2844 &adev->gfx.mec.hpd_eop_gpu_addr);
2845 if (r) { 2832 if (r) {
2846 dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r); 2833 dev_warn(adev->dev, "(%d) create, pin or map of HDP EOP bo failed\n", r);
2847 gfx_v7_0_mec_fini(adev);
2848 return r;
2849 }
2850 r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
2851 if (r) {
2852 dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
2853 gfx_v7_0_mec_fini(adev); 2834 gfx_v7_0_mec_fini(adev);
2854 return r; 2835 return r;
2855 } 2836 }
@@ -3108,32 +3089,12 @@ static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id)
3108 struct cik_mqd *mqd; 3089 struct cik_mqd *mqd;
3109 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; 3090 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
3110 3091
3111 if (ring->mqd_obj == NULL) { 3092 r = amdgpu_bo_create_reserved(adev, sizeof(struct cik_mqd), PAGE_SIZE,
3112 r = amdgpu_bo_create(adev, 3093 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
3113 sizeof(struct cik_mqd), 3094 &mqd_gpu_addr, (void **)&mqd);
3114 PAGE_SIZE, true,
3115 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
3116 &ring->mqd_obj);
3117 if (r) {
3118 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
3119 return r;
3120 }
3121 }
3122
3123 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3124 if (unlikely(r != 0))
3125 goto out;
3126
3127 r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT,
3128 &mqd_gpu_addr);
3129 if (r) {
3130 dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r);
3131 goto out_unreserve;
3132 }
3133 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
3134 if (r) { 3095 if (r) {
3135 dev_warn(adev->dev, "(%d) map MQD bo failed\n", r); 3096 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
3136 goto out_unreserve; 3097 return r;
3137 } 3098 }
3138 3099
3139 mutex_lock(&adev->srbm_mutex); 3100 mutex_lock(&adev->srbm_mutex);
@@ -3147,9 +3108,7 @@ static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id)
3147 mutex_unlock(&adev->srbm_mutex); 3108 mutex_unlock(&adev->srbm_mutex);
3148 3109
3149 amdgpu_bo_kunmap(ring->mqd_obj); 3110 amdgpu_bo_kunmap(ring->mqd_obj);
3150out_unreserve:
3151 amdgpu_bo_unreserve(ring->mqd_obj); 3111 amdgpu_bo_unreserve(ring->mqd_obj);
3152out:
3153 return 0; 3112 return 0;
3154} 3113}
3155 3114
@@ -3432,39 +3391,17 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3432 3391
3433 if (src_ptr) { 3392 if (src_ptr) {
3434 /* save restore block */ 3393 /* save restore block */
3435 if (adev->gfx.rlc.save_restore_obj == NULL) { 3394 r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
3436 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, 3395 AMDGPU_GEM_DOMAIN_VRAM,
3437 AMDGPU_GEM_DOMAIN_VRAM, 3396 &adev->gfx.rlc.save_restore_obj,
3438 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 3397 &adev->gfx.rlc.save_restore_gpu_addr,
3439 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, 3398 (void **)&adev->gfx.rlc.sr_ptr);
3440 NULL, NULL,
3441 &adev->gfx.rlc.save_restore_obj);
3442 if (r) {
3443 dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
3444 return r;
3445 }
3446 }
3447
3448 r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
3449 if (unlikely(r != 0)) {
3450 gfx_v7_0_rlc_fini(adev);
3451 return r;
3452 }
3453 r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
3454 &adev->gfx.rlc.save_restore_gpu_addr);
3455 if (r) { 3399 if (r) {
3456 amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); 3400 dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r);
3457 dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r);
3458 gfx_v7_0_rlc_fini(adev); 3401 gfx_v7_0_rlc_fini(adev);
3459 return r; 3402 return r;
3460 } 3403 }
3461 3404
3462 r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
3463 if (r) {
3464 dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
3465 gfx_v7_0_rlc_fini(adev);
3466 return r;
3467 }
3468 /* write the sr buffer */ 3405 /* write the sr buffer */
3469 dst_ptr = adev->gfx.rlc.sr_ptr; 3406 dst_ptr = adev->gfx.rlc.sr_ptr;
3470 for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) 3407 for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
@@ -3477,39 +3414,17 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3477 /* clear state block */ 3414 /* clear state block */
3478 adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev); 3415 adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev);
3479 3416
3480 if (adev->gfx.rlc.clear_state_obj == NULL) { 3417 r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
3481 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, 3418 AMDGPU_GEM_DOMAIN_VRAM,
3482 AMDGPU_GEM_DOMAIN_VRAM, 3419 &adev->gfx.rlc.clear_state_obj,
3483 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 3420 &adev->gfx.rlc.clear_state_gpu_addr,
3484 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, 3421 (void **)&adev->gfx.rlc.cs_ptr);
3485 NULL, NULL,
3486 &adev->gfx.rlc.clear_state_obj);
3487 if (r) {
3488 dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
3489 gfx_v7_0_rlc_fini(adev);
3490 return r;
3491 }
3492 }
3493 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
3494 if (unlikely(r != 0)) {
3495 gfx_v7_0_rlc_fini(adev);
3496 return r;
3497 }
3498 r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
3499 &adev->gfx.rlc.clear_state_gpu_addr);
3500 if (r) { 3422 if (r) {
3501 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); 3423 dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
3502 dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
3503 gfx_v7_0_rlc_fini(adev); 3424 gfx_v7_0_rlc_fini(adev);
3504 return r; 3425 return r;
3505 } 3426 }
3506 3427
3507 r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
3508 if (r) {
3509 dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
3510 gfx_v7_0_rlc_fini(adev);
3511 return r;
3512 }
3513 /* set up the cs buffer */ 3428 /* set up the cs buffer */
3514 dst_ptr = adev->gfx.rlc.cs_ptr; 3429 dst_ptr = adev->gfx.rlc.cs_ptr;
3515 gfx_v7_0_get_csb_buffer(adev, dst_ptr); 3430 gfx_v7_0_get_csb_buffer(adev, dst_ptr);
@@ -3518,37 +3433,14 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3518 } 3433 }
3519 3434
3520 if (adev->gfx.rlc.cp_table_size) { 3435 if (adev->gfx.rlc.cp_table_size) {
3521 if (adev->gfx.rlc.cp_table_obj == NULL) {
3522 r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
3523 AMDGPU_GEM_DOMAIN_VRAM,
3524 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
3525 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
3526 NULL, NULL,
3527 &adev->gfx.rlc.cp_table_obj);
3528 if (r) {
3529 dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
3530 gfx_v7_0_rlc_fini(adev);
3531 return r;
3532 }
3533 }
3534 3436
3535 r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); 3437 r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
3536 if (unlikely(r != 0)) { 3438 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
3537 dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); 3439 &adev->gfx.rlc.cp_table_obj,
3538 gfx_v7_0_rlc_fini(adev); 3440 &adev->gfx.rlc.cp_table_gpu_addr,
3539 return r; 3441 (void **)&adev->gfx.rlc.cp_table_ptr);
3540 }
3541 r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
3542 &adev->gfx.rlc.cp_table_gpu_addr);
3543 if (r) {
3544 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
3545 dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
3546 gfx_v7_0_rlc_fini(adev);
3547 return r;
3548 }
3549 r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
3550 if (r) { 3442 if (r) {
3551 dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r); 3443 dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
3552 gfx_v7_0_rlc_fini(adev); 3444 gfx_v7_0_rlc_fini(adev);
3553 return r; 3445 return r;
3554 } 3446 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 05436b8730b4..8465f1174c30 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1278,39 +1278,17 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
1278 /* clear state block */ 1278 /* clear state block */
1279 adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev); 1279 adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev);
1280 1280
1281 if (adev->gfx.rlc.clear_state_obj == NULL) { 1281 r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
1282 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, 1282 AMDGPU_GEM_DOMAIN_VRAM,
1283 AMDGPU_GEM_DOMAIN_VRAM, 1283 &adev->gfx.rlc.clear_state_obj,
1284 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 1284 &adev->gfx.rlc.clear_state_gpu_addr,
1285 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, 1285 (void **)&adev->gfx.rlc.cs_ptr);
1286 NULL, NULL,
1287 &adev->gfx.rlc.clear_state_obj);
1288 if (r) {
1289 dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
1290 gfx_v8_0_rlc_fini(adev);
1291 return r;
1292 }
1293 }
1294 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
1295 if (unlikely(r != 0)) {
1296 gfx_v8_0_rlc_fini(adev);
1297 return r;
1298 }
1299 r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
1300 &adev->gfx.rlc.clear_state_gpu_addr);
1301 if (r) { 1286 if (r) {
1302 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); 1287 dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
1303 dev_warn(adev->dev, "(%d) pin RLC cbs bo failed\n", r);
1304 gfx_v8_0_rlc_fini(adev); 1288 gfx_v8_0_rlc_fini(adev);
1305 return r; 1289 return r;
1306 } 1290 }
1307 1291
1308 r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
1309 if (r) {
1310 dev_warn(adev->dev, "(%d) map RLC cbs bo failed\n", r);
1311 gfx_v8_0_rlc_fini(adev);
1312 return r;
1313 }
1314 /* set up the cs buffer */ 1292 /* set up the cs buffer */
1315 dst_ptr = adev->gfx.rlc.cs_ptr; 1293 dst_ptr = adev->gfx.rlc.cs_ptr;
1316 gfx_v8_0_get_csb_buffer(adev, dst_ptr); 1294 gfx_v8_0_get_csb_buffer(adev, dst_ptr);
@@ -1321,34 +1299,13 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
1321 if ((adev->asic_type == CHIP_CARRIZO) || 1299 if ((adev->asic_type == CHIP_CARRIZO) ||
1322 (adev->asic_type == CHIP_STONEY)) { 1300 (adev->asic_type == CHIP_STONEY)) {
1323 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ 1301 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1324 if (adev->gfx.rlc.cp_table_obj == NULL) { 1302 r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
1325 r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, 1303 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1326 AMDGPU_GEM_DOMAIN_VRAM, 1304 &adev->gfx.rlc.cp_table_obj,
1327 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 1305 &adev->gfx.rlc.cp_table_gpu_addr,
1328 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, 1306 (void **)&adev->gfx.rlc.cp_table_ptr);
1329 NULL, NULL,
1330 &adev->gfx.rlc.cp_table_obj);
1331 if (r) {
1332 dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
1333 return r;
1334 }
1335 }
1336
1337 r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
1338 if (unlikely(r != 0)) {
1339 dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
1340 return r;
1341 }
1342 r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
1343 &adev->gfx.rlc.cp_table_gpu_addr);
1344 if (r) {
1345 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
1346 dev_warn(adev->dev, "(%d) pin RLC cp table bo failed\n", r);
1347 return r;
1348 }
1349 r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
1350 if (r) { 1307 if (r) {
1351 dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r); 1308 dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
1352 return r; 1309 return r;
1353 } 1310 }
1354 1311
@@ -1389,34 +1346,13 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
1389 1346
1390 mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE; 1347 mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
1391 1348
1392 if (adev->gfx.mec.hpd_eop_obj == NULL) { 1349 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1393 r = amdgpu_bo_create(adev, 1350 AMDGPU_GEM_DOMAIN_GTT,
1394 mec_hpd_size, 1351 &adev->gfx.mec.hpd_eop_obj,
1395 PAGE_SIZE, true, 1352 &adev->gfx.mec.hpd_eop_gpu_addr,
1396 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, 1353 (void **)&hpd);
1397 &adev->gfx.mec.hpd_eop_obj);
1398 if (r) {
1399 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1400 return r;
1401 }
1402 }
1403
1404 r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
1405 if (unlikely(r != 0)) {
1406 gfx_v8_0_mec_fini(adev);
1407 return r;
1408 }
1409 r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
1410 &adev->gfx.mec.hpd_eop_gpu_addr);
1411 if (r) {
1412 dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
1413 gfx_v8_0_mec_fini(adev);
1414 return r;
1415 }
1416 r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
1417 if (r) { 1354 if (r) {
1418 dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r); 1355 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1419 gfx_v8_0_mec_fini(adev);
1420 return r; 1356 return r;
1421 } 1357 }
1422 1358
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 8795a50bc099..1496113f6568 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -774,18 +774,16 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
774 if (cs_data) { 774 if (cs_data) {
775 /* clear state block */ 775 /* clear state block */
776 adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev); 776 adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
777 if (adev->gfx.rlc.clear_state_obj == NULL) { 777 r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
778 r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE, 778 AMDGPU_GEM_DOMAIN_VRAM,
779 AMDGPU_GEM_DOMAIN_VRAM, 779 &adev->gfx.rlc.clear_state_obj,
780 &adev->gfx.rlc.clear_state_obj, 780 &adev->gfx.rlc.clear_state_gpu_addr,
781 &adev->gfx.rlc.clear_state_gpu_addr, 781 (void **)&adev->gfx.rlc.cs_ptr);
782 (void **)&adev->gfx.rlc.cs_ptr); 782 if (r) {
783 if (r) { 783 dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
784 dev_err(adev->dev, 784 r);
785 "(%d) failed to create rlc csb bo\n", r); 785 gfx_v9_0_rlc_fini(adev);
786 gfx_v9_0_rlc_fini(adev); 786 return r;
787 return r;
788 }
789 } 787 }
790 /* set up the cs buffer */ 788 /* set up the cs buffer */
791 dst_ptr = adev->gfx.rlc.cs_ptr; 789 dst_ptr = adev->gfx.rlc.cs_ptr;
@@ -797,18 +795,16 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
797 if (adev->asic_type == CHIP_RAVEN) { 795 if (adev->asic_type == CHIP_RAVEN) {
798 /* TODO: double check the cp_table_size for RV */ 796 /* TODO: double check the cp_table_size for RV */
799 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ 797 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
800 if (adev->gfx.rlc.cp_table_obj == NULL) { 798 r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
801 r = amdgpu_bo_create_kernel(adev, adev->gfx.rlc.cp_table_size, 799 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
802 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 800 &adev->gfx.rlc.cp_table_obj,
803 &adev->gfx.rlc.cp_table_obj, 801 &adev->gfx.rlc.cp_table_gpu_addr,
804 &adev->gfx.rlc.cp_table_gpu_addr, 802 (void **)&adev->gfx.rlc.cp_table_ptr);
805 (void **)&adev->gfx.rlc.cp_table_ptr); 803 if (r) {
806 if (r) { 804 dev_err(adev->dev,
807 dev_err(adev->dev, 805 "(%d) failed to create cp table bo\n", r);
808 "(%d) failed to create cp table bo\n", r); 806 gfx_v9_0_rlc_fini(adev);
809 gfx_v9_0_rlc_fini(adev); 807 return r;
810 return r;
811 }
812 } 808 }
813 809
814 rv_init_cp_jump_table(adev); 810 rv_init_cp_jump_table(adev);
@@ -864,33 +860,13 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
864 amdgpu_gfx_compute_queue_acquire(adev); 860 amdgpu_gfx_compute_queue_acquire(adev);
865 mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; 861 mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
866 862
867 if (adev->gfx.mec.hpd_eop_obj == NULL) { 863 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
868 r = amdgpu_bo_create(adev, 864 AMDGPU_GEM_DOMAIN_GTT,
869 mec_hpd_size, 865 &adev->gfx.mec.hpd_eop_obj,
870 PAGE_SIZE, true, 866 &adev->gfx.mec.hpd_eop_gpu_addr,
871 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, 867 (void **)&hpd);
872 &adev->gfx.mec.hpd_eop_obj);
873 if (r) {
874 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
875 return r;
876 }
877 }
878
879 r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
880 if (unlikely(r != 0)) {
881 gfx_v9_0_mec_fini(adev);
882 return r;
883 }
884 r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
885 &adev->gfx.mec.hpd_eop_gpu_addr);
886 if (r) {
887 dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
888 gfx_v9_0_mec_fini(adev);
889 return r;
890 }
891 r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
892 if (r) { 868 if (r) {
893 dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r); 869 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
894 gfx_v9_0_mec_fini(adev); 870 gfx_v9_0_mec_fini(adev);
895 return r; 871 return r;
896 } 872 }
@@ -907,42 +883,22 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
907 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 883 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
908 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4; 884 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
909 885
910 if (adev->gfx.mec.mec_fw_obj == NULL) { 886 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
911 r = amdgpu_bo_create(adev, 887 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
912 mec_hdr->header.ucode_size_bytes, 888 &adev->gfx.mec.mec_fw_obj,
913 PAGE_SIZE, true, 889 &adev->gfx.mec.mec_fw_gpu_addr,
914 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, 890 (void **)&fw);
915 &adev->gfx.mec.mec_fw_obj);
916 if (r) {
917 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
918 return r;
919 }
920 }
921
922 r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, false);
923 if (unlikely(r != 0)) {
924 gfx_v9_0_mec_fini(adev);
925 return r;
926 }
927 r = amdgpu_bo_pin(adev->gfx.mec.mec_fw_obj, AMDGPU_GEM_DOMAIN_GTT,
928 &adev->gfx.mec.mec_fw_gpu_addr);
929 if (r) {
930 dev_warn(adev->dev, "(%d) pin mec firmware bo failed\n", r);
931 gfx_v9_0_mec_fini(adev);
932 return r;
933 }
934 r = amdgpu_bo_kmap(adev->gfx.mec.mec_fw_obj, (void **)&fw);
935 if (r) { 891 if (r) {
936 dev_warn(adev->dev, "(%d) map firmware bo failed\n", r); 892 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
937 gfx_v9_0_mec_fini(adev); 893 gfx_v9_0_mec_fini(adev);
938 return r; 894 return r;
939 } 895 }
896
940 memcpy(fw, fw_data, fw_size); 897 memcpy(fw, fw_data, fw_size);
941 898
942 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 899 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
943 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 900 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
944 901
945
946 return 0; 902 return 0;
947} 903}
948 904