aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Kepplinger <martink@posteo.de>2014-06-14 20:10:39 -0400
committerDave Airlie <airlied@redhat.com>2014-07-07 21:28:28 -0400
commit0eff2a24d66cfa84ed803782c8614b4538126394 (patch)
treee8698feb6aea1024ff2303e8b40f23b4abbb8adb
parent2023d84d2c3b7a4b8fc5c64c08c59c4e1505e2d3 (diff)
ttm: use NULL instead of 0 for ttm_bo_reserve()'s pointer arg.
Fix a sparse warning: ttm_bo_reserve()'s last argument is a pointer to a struct, so use NULL as nullpointer. Signed-off-by: Martin Kepplinger <martink@posteo.de> Reviewed-by: Jingoo Han <jg1.han@samsung.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a13a10025ec7..3da89d5dab60 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -412,7 +412,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
412 int ret; 412 int ret;
413 413
414 spin_lock(&glob->lru_lock); 414 spin_lock(&glob->lru_lock);
415 ret = __ttm_bo_reserve(bo, false, true, false, 0); 415 ret = __ttm_bo_reserve(bo, false, true, false, NULL);
416 416
417 spin_lock(&bdev->fence_lock); 417 spin_lock(&bdev->fence_lock);
418 (void) ttm_bo_wait(bo, false, false, true); 418 (void) ttm_bo_wait(bo, false, false, true);
@@ -514,7 +514,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
514 return ret; 514 return ret;
515 515
516 spin_lock(&glob->lru_lock); 516 spin_lock(&glob->lru_lock);
517 ret = __ttm_bo_reserve(bo, false, true, false, 0); 517 ret = __ttm_bo_reserve(bo, false, true, false, NULL);
518 518
519 /* 519 /*
520 * We raced, and lost, someone else holds the reservation now, 520 * We raced, and lost, someone else holds the reservation now,
@@ -577,11 +577,11 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
577 kref_get(&nentry->list_kref); 577 kref_get(&nentry->list_kref);
578 } 578 }
579 579
580 ret = __ttm_bo_reserve(entry, false, true, false, 0); 580 ret = __ttm_bo_reserve(entry, false, true, false, NULL);
581 if (remove_all && ret) { 581 if (remove_all && ret) {
582 spin_unlock(&glob->lru_lock); 582 spin_unlock(&glob->lru_lock);
583 ret = __ttm_bo_reserve(entry, false, false, 583 ret = __ttm_bo_reserve(entry, false, false,
584 false, 0); 584 false, NULL);
585 spin_lock(&glob->lru_lock); 585 spin_lock(&glob->lru_lock);
586 } 586 }
587 587
@@ -726,7 +726,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
726 726
727 spin_lock(&glob->lru_lock); 727 spin_lock(&glob->lru_lock);
728 list_for_each_entry(bo, &man->lru, lru) { 728 list_for_each_entry(bo, &man->lru, lru) {
729 ret = __ttm_bo_reserve(bo, false, true, false, 0); 729 ret = __ttm_bo_reserve(bo, false, true, false, NULL);
730 if (!ret) 730 if (!ret)
731 break; 731 break;
732 } 732 }
@@ -1595,7 +1595,7 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1595 * Using ttm_bo_reserve makes sure the lru lists are updated. 1595 * Using ttm_bo_reserve makes sure the lru lists are updated.
1596 */ 1596 */
1597 1597
1598 ret = ttm_bo_reserve(bo, true, no_wait, false, 0); 1598 ret = ttm_bo_reserve(bo, true, no_wait, false, NULL);
1599 if (unlikely(ret != 0)) 1599 if (unlikely(ret != 0))
1600 return ret; 1600 return ret;
1601 spin_lock(&bdev->fence_lock); 1601 spin_lock(&bdev->fence_lock);
@@ -1630,7 +1630,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1630 1630
1631 spin_lock(&glob->lru_lock); 1631 spin_lock(&glob->lru_lock);
1632 list_for_each_entry(bo, &glob->swap_lru, swap) { 1632 list_for_each_entry(bo, &glob->swap_lru, swap) {
1633 ret = __ttm_bo_reserve(bo, false, true, false, 0); 1633 ret = __ttm_bo_reserve(bo, false, true, false, NULL);
1634 if (!ret) 1634 if (!ret)
1635 break; 1635 break;
1636 } 1636 }