diff options
author | Dave Airlie <airlied@redhat.com> | 2017-11-06 01:18:59 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2017-11-06 01:18:59 -0500 |
commit | 8a6fb5b5823d863b07f670dc9e791d4622d5b7e9 (patch) | |
tree | dc853580b18fedbdaa6f79d1d9cdd9b7495fed08 | |
parent | 36a5fdf76d3281345e000e115f33817570a76420 (diff) | |
parent | 767601d100a53e653233aebca7c262ce0addfa99 (diff) |
Merge branch 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux into drm-next
some more amd/ttm fixes.
* 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux:
drm/ttm: Downgrade pr_err to pr_debug for memory allocation failures
drm/ttm: Always and only destroy bo->ttm_resv in ttm_bo_release_list
drm/amd/amdgpu: Enabling ACP clock in hw_init (v2)
drm/amdgpu/virt: don't dereference undefined 'module' struct
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c | 159 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_page_alloc.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 12 |
5 files changed, 142 insertions, 60 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index a52795d9b458..023bfdb3e63f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c | |||
@@ -35,41 +35,50 @@ | |||
35 | 35 | ||
36 | #include "acp_gfx_if.h" | 36 | #include "acp_gfx_if.h" |
37 | 37 | ||
38 | #define ACP_TILE_ON_MASK 0x03 | 38 | #define ACP_TILE_ON_MASK 0x03 |
39 | #define ACP_TILE_OFF_MASK 0x02 | 39 | #define ACP_TILE_OFF_MASK 0x02 |
40 | #define ACP_TILE_ON_RETAIN_REG_MASK 0x1f | 40 | #define ACP_TILE_ON_RETAIN_REG_MASK 0x1f |
41 | #define ACP_TILE_OFF_RETAIN_REG_MASK 0x20 | 41 | #define ACP_TILE_OFF_RETAIN_REG_MASK 0x20 |
42 | 42 | ||
43 | #define ACP_TILE_P1_MASK 0x3e | 43 | #define ACP_TILE_P1_MASK 0x3e |
44 | #define ACP_TILE_P2_MASK 0x3d | 44 | #define ACP_TILE_P2_MASK 0x3d |
45 | #define ACP_TILE_DSP0_MASK 0x3b | 45 | #define ACP_TILE_DSP0_MASK 0x3b |
46 | #define ACP_TILE_DSP1_MASK 0x37 | 46 | #define ACP_TILE_DSP1_MASK 0x37 |
47 | 47 | ||
48 | #define ACP_TILE_DSP2_MASK 0x2f | 48 | #define ACP_TILE_DSP2_MASK 0x2f |
49 | 49 | ||
50 | #define ACP_DMA_REGS_END 0x146c0 | 50 | #define ACP_DMA_REGS_END 0x146c0 |
51 | #define ACP_I2S_PLAY_REGS_START 0x14840 | 51 | #define ACP_I2S_PLAY_REGS_START 0x14840 |
52 | #define ACP_I2S_PLAY_REGS_END 0x148b4 | 52 | #define ACP_I2S_PLAY_REGS_END 0x148b4 |
53 | #define ACP_I2S_CAP_REGS_START 0x148b8 | 53 | #define ACP_I2S_CAP_REGS_START 0x148b8 |
54 | #define ACP_I2S_CAP_REGS_END 0x1496c | 54 | #define ACP_I2S_CAP_REGS_END 0x1496c |
55 | 55 | ||
56 | #define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac | 56 | #define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac |
57 | #define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8 | 57 | #define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8 |
58 | #define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c | 58 | #define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c |
59 | #define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68 | 59 | #define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68 |
60 | 60 | ||
61 | #define mmACP_PGFSM_RETAIN_REG 0x51c9 | 61 | #define mmACP_PGFSM_RETAIN_REG 0x51c9 |
62 | #define mmACP_PGFSM_CONFIG_REG 0x51ca | 62 | #define mmACP_PGFSM_CONFIG_REG 0x51ca |
63 | #define mmACP_PGFSM_READ_REG_0 0x51cc | 63 | #define mmACP_PGFSM_READ_REG_0 0x51cc |
64 | 64 | ||
65 | #define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8 | 65 | #define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8 |
66 | #define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9 | 66 | #define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9 |
67 | #define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa | 67 | #define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa |
68 | #define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb | 68 | #define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb |
69 | 69 | ||
70 | #define ACP_TIMEOUT_LOOP 0x000000FF | 70 | #define mmACP_CONTROL 0x5131 |
71 | #define ACP_DEVS 3 | 71 | #define mmACP_STATUS 0x5133 |
72 | #define ACP_SRC_ID 162 | 72 | #define mmACP_SOFT_RESET 0x5134 |
73 | #define ACP_CONTROL__ClkEn_MASK 0x1 | ||
74 | #define ACP_SOFT_RESET__SoftResetAud_MASK 0x100 | ||
75 | #define ACP_SOFT_RESET__SoftResetAudDone_MASK 0x1000000 | ||
76 | #define ACP_CLOCK_EN_TIME_OUT_VALUE 0x000000FF | ||
77 | #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF | ||
78 | |||
79 | #define ACP_TIMEOUT_LOOP 0x000000FF | ||
80 | #define ACP_DEVS 3 | ||
81 | #define ACP_SRC_ID 162 | ||
73 | 82 | ||
74 | enum { | 83 | enum { |
75 | ACP_TILE_P1 = 0, | 84 | ACP_TILE_P1 = 0, |
@@ -260,6 +269,8 @@ static int acp_hw_init(void *handle) | |||
260 | { | 269 | { |
261 | int r, i; | 270 | int r, i; |
262 | uint64_t acp_base; | 271 | uint64_t acp_base; |
272 | u32 val = 0; | ||
273 | u32 count = 0; | ||
263 | struct device *dev; | 274 | struct device *dev; |
264 | struct i2s_platform_data *i2s_pdata; | 275 | struct i2s_platform_data *i2s_pdata; |
265 | 276 | ||
@@ -400,6 +411,46 @@ static int acp_hw_init(void *handle) | |||
400 | } | 411 | } |
401 | } | 412 | } |
402 | 413 | ||
414 | /* Assert Soft reset of ACP */ | ||
415 | val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); | ||
416 | |||
417 | val |= ACP_SOFT_RESET__SoftResetAud_MASK; | ||
418 | cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); | ||
419 | |||
420 | count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE; | ||
421 | while (true) { | ||
422 | val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); | ||
423 | if (ACP_SOFT_RESET__SoftResetAudDone_MASK == | ||
424 | (val & ACP_SOFT_RESET__SoftResetAudDone_MASK)) | ||
425 | break; | ||
426 | if (--count == 0) { | ||
427 | dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); | ||
428 | return -ETIMEDOUT; | ||
429 | } | ||
430 | udelay(100); | ||
431 | } | ||
432 | /* Enable clock to ACP and wait until the clock is enabled */ | ||
433 | val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL); | ||
434 | val = val | ACP_CONTROL__ClkEn_MASK; | ||
435 | cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val); | ||
436 | |||
437 | count = ACP_CLOCK_EN_TIME_OUT_VALUE; | ||
438 | |||
439 | while (true) { | ||
440 | val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS); | ||
441 | if (val & (u32) 0x1) | ||
442 | break; | ||
443 | if (--count == 0) { | ||
444 | dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); | ||
445 | return -ETIMEDOUT; | ||
446 | } | ||
447 | udelay(100); | ||
448 | } | ||
449 | /* Deassert the SOFT RESET flags */ | ||
450 | val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); | ||
451 | val &= ~ACP_SOFT_RESET__SoftResetAud_MASK; | ||
452 | cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); | ||
453 | |||
403 | return 0; | 454 | return 0; |
404 | } | 455 | } |
405 | 456 | ||
@@ -412,6 +463,8 @@ static int acp_hw_init(void *handle) | |||
412 | static int acp_hw_fini(void *handle) | 463 | static int acp_hw_fini(void *handle) |
413 | { | 464 | { |
414 | int i, ret; | 465 | int i, ret; |
466 | u32 val = 0; | ||
467 | u32 count = 0; | ||
415 | struct device *dev; | 468 | struct device *dev; |
416 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 469 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
417 | 470 | ||
@@ -419,6 +472,42 @@ static int acp_hw_fini(void *handle) | |||
419 | if (!adev->acp.acp_cell) | 472 | if (!adev->acp.acp_cell) |
420 | return 0; | 473 | return 0; |
421 | 474 | ||
475 | /* Assert Soft reset of ACP */ | ||
476 | val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); | ||
477 | |||
478 | val |= ACP_SOFT_RESET__SoftResetAud_MASK; | ||
479 | cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); | ||
480 | |||
481 | count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE; | ||
482 | while (true) { | ||
483 | val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); | ||
484 | if (ACP_SOFT_RESET__SoftResetAudDone_MASK == | ||
485 | (val & ACP_SOFT_RESET__SoftResetAudDone_MASK)) | ||
486 | break; | ||
487 | if (--count == 0) { | ||
488 | dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); | ||
489 | return -ETIMEDOUT; | ||
490 | } | ||
491 | udelay(100); | ||
492 | } | ||
493 | /* Disable ACP clock */ | ||
494 | val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL); | ||
495 | val &= ~ACP_CONTROL__ClkEn_MASK; | ||
496 | cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val); | ||
497 | |||
498 | count = ACP_CLOCK_EN_TIME_OUT_VALUE; | ||
499 | |||
500 | while (true) { | ||
501 | val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS); | ||
502 | if (val & (u32) 0x1) | ||
503 | break; | ||
504 | if (--count == 0) { | ||
505 | dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); | ||
506 | return -ETIMEDOUT; | ||
507 | } | ||
508 | udelay(100); | ||
509 | } | ||
510 | |||
422 | if (adev->acp.acp_genpd) { | 511 | if (adev->acp.acp_genpd) { |
423 | for (i = 0; i < ACP_DEVS ; i++) { | 512 | for (i = 0; i < ACP_DEVS ; i++) { |
424 | dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); | 513 | dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index e97f80f86005..4e4a476593e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | |||
@@ -328,9 +328,11 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) | |||
328 | sizeof(amdgim_vf2pf_info)); | 328 | sizeof(amdgim_vf2pf_info)); |
329 | AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version, | 329 | AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version, |
330 | &str); | 330 | &str); |
331 | #ifdef MODULE | ||
331 | if (THIS_MODULE->version != NULL) | 332 | if (THIS_MODULE->version != NULL) |
332 | strcpy(str, THIS_MODULE->version); | 333 | strcpy(str, THIS_MODULE->version); |
333 | else | 334 | else |
335 | #endif | ||
334 | strcpy(str, "N/A"); | 336 | strcpy(str, "N/A"); |
335 | AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert, | 337 | AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert, |
336 | 0); | 338 | 0); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 379ec41d2c69..c088703777e2 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -150,8 +150,7 @@ static void ttm_bo_release_list(struct kref *list_kref) | |||
150 | ttm_tt_destroy(bo->ttm); | 150 | ttm_tt_destroy(bo->ttm); |
151 | atomic_dec(&bo->glob->bo_count); | 151 | atomic_dec(&bo->glob->bo_count); |
152 | dma_fence_put(bo->moving); | 152 | dma_fence_put(bo->moving); |
153 | if (bo->resv == &bo->ttm_resv) | 153 | reservation_object_fini(&bo->ttm_resv); |
154 | reservation_object_fini(&bo->ttm_resv); | ||
155 | mutex_destroy(&bo->wu_mutex); | 154 | mutex_destroy(&bo->wu_mutex); |
156 | if (bo->destroy) | 155 | if (bo->destroy) |
157 | bo->destroy(bo); | 156 | bo->destroy(bo); |
@@ -402,14 +401,11 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) | |||
402 | if (bo->resv == &bo->ttm_resv) | 401 | if (bo->resv == &bo->ttm_resv) |
403 | return 0; | 402 | return 0; |
404 | 403 | ||
405 | reservation_object_init(&bo->ttm_resv); | ||
406 | BUG_ON(!reservation_object_trylock(&bo->ttm_resv)); | 404 | BUG_ON(!reservation_object_trylock(&bo->ttm_resv)); |
407 | 405 | ||
408 | r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv); | 406 | r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv); |
409 | if (r) { | 407 | if (r) |
410 | reservation_object_unlock(&bo->ttm_resv); | 408 | reservation_object_unlock(&bo->ttm_resv); |
411 | reservation_object_fini(&bo->ttm_resv); | ||
412 | } | ||
413 | 409 | ||
414 | return r; | 410 | return r; |
415 | } | 411 | } |
@@ -457,10 +453,8 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) | |||
457 | if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) { | 453 | if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) { |
458 | ttm_bo_del_from_lru(bo); | 454 | ttm_bo_del_from_lru(bo); |
459 | spin_unlock(&glob->lru_lock); | 455 | spin_unlock(&glob->lru_lock); |
460 | if (bo->resv != &bo->ttm_resv) { | 456 | if (bo->resv != &bo->ttm_resv) |
461 | reservation_object_unlock(&bo->ttm_resv); | 457 | reservation_object_unlock(&bo->ttm_resv); |
462 | reservation_object_fini(&bo->ttm_resv); | ||
463 | } | ||
464 | 458 | ||
465 | ttm_bo_cleanup_memtype_use(bo); | 459 | ttm_bo_cleanup_memtype_use(bo); |
466 | return; | 460 | return; |
@@ -560,8 +554,6 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, | |||
560 | } | 554 | } |
561 | 555 | ||
562 | ttm_bo_del_from_lru(bo); | 556 | ttm_bo_del_from_lru(bo); |
563 | if (!list_empty(&bo->ddestroy) && (bo->resv != &bo->ttm_resv)) | ||
564 | reservation_object_fini(&bo->ttm_resv); | ||
565 | list_del_init(&bo->ddestroy); | 557 | list_del_init(&bo->ddestroy); |
566 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | 558 | kref_put(&bo->list_kref, ttm_bo_ref_bug); |
567 | 559 | ||
@@ -1210,8 +1202,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, | |||
1210 | lockdep_assert_held(&bo->resv->lock.base); | 1202 | lockdep_assert_held(&bo->resv->lock.base); |
1211 | } else { | 1203 | } else { |
1212 | bo->resv = &bo->ttm_resv; | 1204 | bo->resv = &bo->ttm_resv; |
1213 | reservation_object_init(&bo->ttm_resv); | ||
1214 | } | 1205 | } |
1206 | reservation_object_init(&bo->ttm_resv); | ||
1215 | atomic_inc(&bo->glob->bo_count); | 1207 | atomic_inc(&bo->glob->bo_count); |
1216 | drm_vma_node_reset(&bo->vma_node); | 1208 | drm_vma_node_reset(&bo->vma_node); |
1217 | bo->priority = 0; | 1209 | bo->priority = 0; |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 4d688c8d7853..316f831ad5f0 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
@@ -329,7 +329,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, | |||
329 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), | 329 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), |
330 | GFP_KERNEL); | 330 | GFP_KERNEL); |
331 | if (!pages_to_free) { | 331 | if (!pages_to_free) { |
332 | pr_err("Failed to allocate memory for pool free operation\n"); | 332 | pr_debug("Failed to allocate memory for pool free operation\n"); |
333 | return 0; | 333 | return 0; |
334 | } | 334 | } |
335 | 335 | ||
@@ -517,7 +517,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, | |||
517 | caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); | 517 | caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); |
518 | 518 | ||
519 | if (!caching_array) { | 519 | if (!caching_array) { |
520 | pr_err("Unable to allocate table for new pages\n"); | 520 | pr_debug("Unable to allocate table for new pages\n"); |
521 | return -ENOMEM; | 521 | return -ENOMEM; |
522 | } | 522 | } |
523 | 523 | ||
@@ -525,7 +525,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, | |||
525 | p = alloc_pages(gfp_flags, order); | 525 | p = alloc_pages(gfp_flags, order); |
526 | 526 | ||
527 | if (!p) { | 527 | if (!p) { |
528 | pr_err("Unable to get page %u\n", i); | 528 | pr_debug("Unable to get page %u\n", i); |
529 | 529 | ||
530 | /* store already allocated pages in the pool after | 530 | /* store already allocated pages in the pool after |
531 | * setting the caching state */ | 531 | * setting the caching state */ |
@@ -625,7 +625,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags, | |||
625 | ++pool->nrefills; | 625 | ++pool->nrefills; |
626 | pool->npages += alloc_size; | 626 | pool->npages += alloc_size; |
627 | } else { | 627 | } else { |
628 | pr_err("Failed to fill pool (%p)\n", pool); | 628 | pr_debug("Failed to fill pool (%p)\n", pool); |
629 | /* If we have any pages left put them to the pool. */ | 629 | /* If we have any pages left put them to the pool. */ |
630 | list_for_each_entry(p, &new_pages, lru) { | 630 | list_for_each_entry(p, &new_pages, lru) { |
631 | ++cpages; | 631 | ++cpages; |
@@ -885,8 +885,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, | |||
885 | while (npages) { | 885 | while (npages) { |
886 | p = alloc_page(gfp_flags); | 886 | p = alloc_page(gfp_flags); |
887 | if (!p) { | 887 | if (!p) { |
888 | 888 | pr_debug("Unable to allocate page\n"); | |
889 | pr_err("Unable to allocate page\n"); | ||
890 | return -ENOMEM; | 889 | return -ENOMEM; |
891 | } | 890 | } |
892 | 891 | ||
@@ -925,7 +924,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, | |||
925 | /* If there is any pages in the list put them back to | 924 | /* If there is any pages in the list put them back to |
926 | * the pool. | 925 | * the pool. |
927 | */ | 926 | */ |
928 | pr_err("Failed to allocate extra pages for large request\n"); | 927 | pr_debug("Failed to allocate extra pages for large request\n"); |
929 | ttm_put_pages(pages, count, flags, cstate); | 928 | ttm_put_pages(pages, count, flags, cstate); |
930 | return r; | 929 | return r; |
931 | } | 930 | } |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index 96ad12906621..6b2627fe9bc1 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | |||
@@ -463,7 +463,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, | |||
463 | GFP_KERNEL); | 463 | GFP_KERNEL); |
464 | 464 | ||
465 | if (!pages_to_free) { | 465 | if (!pages_to_free) { |
466 | pr_err("%s: Failed to allocate memory for pool free operation\n", | 466 | pr_debug("%s: Failed to allocate memory for pool free operation\n", |
467 | pool->dev_name); | 467 | pool->dev_name); |
468 | return 0; | 468 | return 0; |
469 | } | 469 | } |
@@ -755,7 +755,7 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, | |||
755 | caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); | 755 | caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); |
756 | 756 | ||
757 | if (!caching_array) { | 757 | if (!caching_array) { |
758 | pr_err("%s: Unable to allocate table for new pages\n", | 758 | pr_debug("%s: Unable to allocate table for new pages\n", |
759 | pool->dev_name); | 759 | pool->dev_name); |
760 | return -ENOMEM; | 760 | return -ENOMEM; |
761 | } | 761 | } |
@@ -768,8 +768,8 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, | |||
768 | for (i = 0, cpages = 0; i < count; ++i) { | 768 | for (i = 0, cpages = 0; i < count; ++i) { |
769 | dma_p = __ttm_dma_alloc_page(pool); | 769 | dma_p = __ttm_dma_alloc_page(pool); |
770 | if (!dma_p) { | 770 | if (!dma_p) { |
771 | pr_err("%s: Unable to get page %u\n", | 771 | pr_debug("%s: Unable to get page %u\n", |
772 | pool->dev_name, i); | 772 | pool->dev_name, i); |
773 | 773 | ||
774 | /* store already allocated pages in the pool after | 774 | /* store already allocated pages in the pool after |
775 | * setting the caching state */ | 775 | * setting the caching state */ |
@@ -855,8 +855,8 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool, | |||
855 | struct dma_page *d_page; | 855 | struct dma_page *d_page; |
856 | unsigned cpages = 0; | 856 | unsigned cpages = 0; |
857 | 857 | ||
858 | pr_err("%s: Failed to fill %s pool (r:%d)!\n", | 858 | pr_debug("%s: Failed to fill %s pool (r:%d)!\n", |
859 | pool->dev_name, pool->name, r); | 859 | pool->dev_name, pool->name, r); |
860 | 860 | ||
861 | list_for_each_entry(d_page, &d_pages, page_list) { | 861 | list_for_each_entry(d_page, &d_pages, page_list) { |
862 | cpages++; | 862 | cpages++; |