diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_object.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 112 |
1 files changed, 80 insertions, 32 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 3ec43cf9ad78..6e72fe7901ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
@@ -220,7 +220,7 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, | |||
220 | } | 220 | } |
221 | 221 | ||
222 | /** | 222 | /** |
223 | * amdgpu_bo_create_kernel - create BO for kernel use | 223 | * amdgpu_bo_create_reserved - create reserved BO for kernel use |
224 | * | 224 | * |
225 | * @adev: amdgpu device object | 225 | * @adev: amdgpu device object |
226 | * @size: size for the new BO | 226 | * @size: size for the new BO |
@@ -230,24 +230,30 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, | |||
230 | * @gpu_addr: GPU addr of the pinned BO | 230 | * @gpu_addr: GPU addr of the pinned BO |
231 | * @cpu_addr: optional CPU address mapping | 231 | * @cpu_addr: optional CPU address mapping |
232 | * | 232 | * |
233 | * Allocates and pins a BO for kernel internal use. | 233 | * Allocates and pins a BO for kernel internal use, and returns it still |
234 | * reserved. | ||
234 | * | 235 | * |
235 | * Returns 0 on success, negative error code otherwise. | 236 | * Returns 0 on success, negative error code otherwise. |
236 | */ | 237 | */ |
237 | int amdgpu_bo_create_kernel(struct amdgpu_device *adev, | 238 | int amdgpu_bo_create_reserved(struct amdgpu_device *adev, |
238 | unsigned long size, int align, | 239 | unsigned long size, int align, |
239 | u32 domain, struct amdgpu_bo **bo_ptr, | 240 | u32 domain, struct amdgpu_bo **bo_ptr, |
240 | u64 *gpu_addr, void **cpu_addr) | 241 | u64 *gpu_addr, void **cpu_addr) |
241 | { | 242 | { |
243 | bool free = false; | ||
242 | int r; | 244 | int r; |
243 | 245 | ||
244 | r = amdgpu_bo_create(adev, size, align, true, domain, | 246 | if (!*bo_ptr) { |
245 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | | 247 | r = amdgpu_bo_create(adev, size, align, true, domain, |
246 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | 248 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
247 | NULL, NULL, bo_ptr); | 249 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, |
248 | if (r) { | 250 | NULL, NULL, 0, bo_ptr); |
249 | dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r); | 251 | if (r) { |
250 | return r; | 252 | dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", |
253 | r); | ||
254 | return r; | ||
255 | } | ||
256 | free = true; | ||
251 | } | 257 | } |
252 | 258 | ||
253 | r = amdgpu_bo_reserve(*bo_ptr, false); | 259 | r = amdgpu_bo_reserve(*bo_ptr, false); |
@@ -270,20 +276,52 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, | |||
270 | } | 276 | } |
271 | } | 277 | } |
272 | 278 | ||
273 | amdgpu_bo_unreserve(*bo_ptr); | ||
274 | |||
275 | return 0; | 279 | return 0; |
276 | 280 | ||
277 | error_unreserve: | 281 | error_unreserve: |
278 | amdgpu_bo_unreserve(*bo_ptr); | 282 | amdgpu_bo_unreserve(*bo_ptr); |
279 | 283 | ||
280 | error_free: | 284 | error_free: |
281 | amdgpu_bo_unref(bo_ptr); | 285 | if (free) |
286 | amdgpu_bo_unref(bo_ptr); | ||
282 | 287 | ||
283 | return r; | 288 | return r; |
284 | } | 289 | } |
285 | 290 | ||
286 | /** | 291 | /** |
292 | * amdgpu_bo_create_kernel - create BO for kernel use | ||
293 | * | ||
294 | * @adev: amdgpu device object | ||
295 | * @size: size for the new BO | ||
296 | * @align: alignment for the new BO | ||
297 | * @domain: where to place it | ||
298 | * @bo_ptr: resulting BO | ||
299 | * @gpu_addr: GPU addr of the pinned BO | ||
300 | * @cpu_addr: optional CPU address mapping | ||
301 | * | ||
302 | * Allocates and pins a BO for kernel internal use. | ||
303 | * | ||
304 | * Returns 0 on success, negative error code otherwise. | ||
305 | */ | ||
306 | int amdgpu_bo_create_kernel(struct amdgpu_device *adev, | ||
307 | unsigned long size, int align, | ||
308 | u32 domain, struct amdgpu_bo **bo_ptr, | ||
309 | u64 *gpu_addr, void **cpu_addr) | ||
310 | { | ||
311 | int r; | ||
312 | |||
313 | r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr, | ||
314 | gpu_addr, cpu_addr); | ||
315 | |||
316 | if (r) | ||
317 | return r; | ||
318 | |||
319 | amdgpu_bo_unreserve(*bo_ptr); | ||
320 | |||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | /** | ||
287 | * amdgpu_bo_free_kernel - free BO for kernel use | 325 | * amdgpu_bo_free_kernel - free BO for kernel use |
288 | * | 326 | * |
289 | * @bo: amdgpu BO to free | 327 | * @bo: amdgpu BO to free |
@@ -318,6 +356,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | |||
318 | struct sg_table *sg, | 356 | struct sg_table *sg, |
319 | struct ttm_placement *placement, | 357 | struct ttm_placement *placement, |
320 | struct reservation_object *resv, | 358 | struct reservation_object *resv, |
359 | uint64_t init_value, | ||
321 | struct amdgpu_bo **bo_ptr) | 360 | struct amdgpu_bo **bo_ptr) |
322 | { | 361 | { |
323 | struct amdgpu_bo *bo; | 362 | struct amdgpu_bo *bo; |
@@ -352,13 +391,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | |||
352 | } | 391 | } |
353 | INIT_LIST_HEAD(&bo->shadow_list); | 392 | INIT_LIST_HEAD(&bo->shadow_list); |
354 | INIT_LIST_HEAD(&bo->va); | 393 | INIT_LIST_HEAD(&bo->va); |
355 | bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | | 394 | bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | |
356 | AMDGPU_GEM_DOMAIN_GTT | | 395 | AMDGPU_GEM_DOMAIN_GTT | |
357 | AMDGPU_GEM_DOMAIN_CPU | | 396 | AMDGPU_GEM_DOMAIN_CPU | |
358 | AMDGPU_GEM_DOMAIN_GDS | | 397 | AMDGPU_GEM_DOMAIN_GDS | |
359 | AMDGPU_GEM_DOMAIN_GWS | | 398 | AMDGPU_GEM_DOMAIN_GWS | |
360 | AMDGPU_GEM_DOMAIN_OA); | 399 | AMDGPU_GEM_DOMAIN_OA); |
361 | bo->allowed_domains = bo->prefered_domains; | 400 | bo->allowed_domains = bo->preferred_domains; |
362 | if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) | 401 | if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) |
363 | bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; | 402 | bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; |
364 | 403 | ||
@@ -418,7 +457,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | |||
418 | bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { | 457 | bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { |
419 | struct dma_fence *fence; | 458 | struct dma_fence *fence; |
420 | 459 | ||
421 | r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); | 460 | r = amdgpu_fill_buffer(bo, init_value, bo->tbo.resv, &fence); |
422 | if (unlikely(r)) | 461 | if (unlikely(r)) |
423 | goto fail_unreserve; | 462 | goto fail_unreserve; |
424 | 463 | ||
@@ -470,6 +509,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, | |||
470 | AMDGPU_GEM_CREATE_CPU_GTT_USWC, | 509 | AMDGPU_GEM_CREATE_CPU_GTT_USWC, |
471 | NULL, &placement, | 510 | NULL, &placement, |
472 | bo->tbo.resv, | 511 | bo->tbo.resv, |
512 | 0, | ||
473 | &bo->shadow); | 513 | &bo->shadow); |
474 | if (!r) { | 514 | if (!r) { |
475 | bo->shadow->parent = amdgpu_bo_ref(bo); | 515 | bo->shadow->parent = amdgpu_bo_ref(bo); |
@@ -481,11 +521,15 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, | |||
481 | return r; | 521 | return r; |
482 | } | 522 | } |
483 | 523 | ||
524 | /* init_value will only take effect when flags contains | ||
525 | * AMDGPU_GEM_CREATE_VRAM_CLEARED. | ||
526 | */ | ||
484 | int amdgpu_bo_create(struct amdgpu_device *adev, | 527 | int amdgpu_bo_create(struct amdgpu_device *adev, |
485 | unsigned long size, int byte_align, | 528 | unsigned long size, int byte_align, |
486 | bool kernel, u32 domain, u64 flags, | 529 | bool kernel, u32 domain, u64 flags, |
487 | struct sg_table *sg, | 530 | struct sg_table *sg, |
488 | struct reservation_object *resv, | 531 | struct reservation_object *resv, |
532 | uint64_t init_value, | ||
489 | struct amdgpu_bo **bo_ptr) | 533 | struct amdgpu_bo **bo_ptr) |
490 | { | 534 | { |
491 | struct ttm_placement placement = {0}; | 535 | struct ttm_placement placement = {0}; |
@@ -500,7 +544,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, | |||
500 | 544 | ||
501 | r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, | 545 | r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, |
502 | domain, flags, sg, &placement, | 546 | domain, flags, sg, &placement, |
503 | resv, bo_ptr); | 547 | resv, init_value, bo_ptr); |
504 | if (r) | 548 | if (r) |
505 | return r; | 549 | return r; |
506 | 550 | ||
@@ -562,7 +606,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo) | |||
562 | if (bo->pin_count) | 606 | if (bo->pin_count) |
563 | return 0; | 607 | return 0; |
564 | 608 | ||
565 | domain = bo->prefered_domains; | 609 | domain = bo->preferred_domains; |
566 | 610 | ||
567 | retry: | 611 | retry: |
568 | amdgpu_ttm_placement_from_domain(bo, domain); | 612 | amdgpu_ttm_placement_from_domain(bo, domain); |
@@ -609,16 +653,16 @@ err: | |||
609 | 653 | ||
610 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) | 654 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) |
611 | { | 655 | { |
612 | bool is_iomem; | 656 | void *kptr; |
613 | long r; | 657 | long r; |
614 | 658 | ||
615 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) | 659 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) |
616 | return -EPERM; | 660 | return -EPERM; |
617 | 661 | ||
618 | if (bo->kptr) { | 662 | kptr = amdgpu_bo_kptr(bo); |
619 | if (ptr) { | 663 | if (kptr) { |
620 | *ptr = bo->kptr; | 664 | if (ptr) |
621 | } | 665 | *ptr = kptr; |
622 | return 0; | 666 | return 0; |
623 | } | 667 | } |
624 | 668 | ||
@@ -631,19 +675,23 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) | |||
631 | if (r) | 675 | if (r) |
632 | return r; | 676 | return r; |
633 | 677 | ||
634 | bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); | ||
635 | if (ptr) | 678 | if (ptr) |
636 | *ptr = bo->kptr; | 679 | *ptr = amdgpu_bo_kptr(bo); |
637 | 680 | ||
638 | return 0; | 681 | return 0; |
639 | } | 682 | } |
640 | 683 | ||
684 | void *amdgpu_bo_kptr(struct amdgpu_bo *bo) | ||
685 | { | ||
686 | bool is_iomem; | ||
687 | |||
688 | return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); | ||
689 | } | ||
690 | |||
641 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo) | 691 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo) |
642 | { | 692 | { |
643 | if (bo->kptr == NULL) | 693 | if (bo->kmap.bo) |
644 | return; | 694 | ttm_bo_kunmap(&bo->kmap); |
645 | bo->kptr = NULL; | ||
646 | ttm_bo_kunmap(&bo->kmap); | ||
647 | } | 695 | } |
648 | 696 | ||
649 | struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo) | 697 | struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo) |