aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2009-03-19 17:10:50 -0400
committerEric Anholt <eric@anholt.net>2009-03-27 17:46:28 -0400
commit856fa1988ea483fc2dab84a16681dcfde821b740 (patch)
tree8cd5f580f27d266fac0c540d33cd041e074872af
parent3de09aa3b38910d366f4710ffdf430c9d387d1a3 (diff)
drm/i915: Make GEM object's page lists refcounted instead of get/free.
We've wanted this for a few consumers that touch the pages directly (such as the following commit), which have been doing the refcounting outside of get/put pages. Signed-off-by: Eric Anholt <eric@anholt.net> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org>
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c70
2 files changed, 38 insertions, 35 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d6cc9861e0a1..75e33844146b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -404,7 +404,8 @@ struct drm_i915_gem_object {
404 /** AGP memory structure for our GTT binding. */ 404 /** AGP memory structure for our GTT binding. */
405 DRM_AGP_MEM *agp_mem; 405 DRM_AGP_MEM *agp_mem;
406 406
407 struct page **page_list; 407 struct page **pages;
408 int pages_refcount;
408 409
409 /** 410 /**
410 * Current offset of the object in GTT space. 411 * Current offset of the object in GTT space.
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 35f8c7bd0d32..b998d659fd98 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,8 +43,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
43 uint64_t offset, 43 uint64_t offset,
44 uint64_t size); 44 uint64_t size);
45static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); 45static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
46static int i915_gem_object_get_page_list(struct drm_gem_object *obj); 46static int i915_gem_object_get_pages(struct drm_gem_object *obj);
47static void i915_gem_object_free_page_list(struct drm_gem_object *obj); 47static void i915_gem_object_put_pages(struct drm_gem_object *obj);
48static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 48static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
50 unsigned alignment); 50 unsigned alignment);
@@ -928,29 +928,30 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
928} 928}
929 929
930static void 930static void
931i915_gem_object_free_page_list(struct drm_gem_object *obj) 931i915_gem_object_put_pages(struct drm_gem_object *obj)
932{ 932{
933 struct drm_i915_gem_object *obj_priv = obj->driver_private; 933 struct drm_i915_gem_object *obj_priv = obj->driver_private;
934 int page_count = obj->size / PAGE_SIZE; 934 int page_count = obj->size / PAGE_SIZE;
935 int i; 935 int i;
936 936
937 if (obj_priv->page_list == NULL) 937 BUG_ON(obj_priv->pages_refcount == 0);
938 return;
939 938
939 if (--obj_priv->pages_refcount != 0)
940 return;
940 941
941 for (i = 0; i < page_count; i++) 942 for (i = 0; i < page_count; i++)
942 if (obj_priv->page_list[i] != NULL) { 943 if (obj_priv->pages[i] != NULL) {
943 if (obj_priv->dirty) 944 if (obj_priv->dirty)
944 set_page_dirty(obj_priv->page_list[i]); 945 set_page_dirty(obj_priv->pages[i]);
945 mark_page_accessed(obj_priv->page_list[i]); 946 mark_page_accessed(obj_priv->pages[i]);
946 page_cache_release(obj_priv->page_list[i]); 947 page_cache_release(obj_priv->pages[i]);
947 } 948 }
948 obj_priv->dirty = 0; 949 obj_priv->dirty = 0;
949 950
950 drm_free(obj_priv->page_list, 951 drm_free(obj_priv->pages,
951 page_count * sizeof(struct page *), 952 page_count * sizeof(struct page *),
952 DRM_MEM_DRIVER); 953 DRM_MEM_DRIVER);
953 obj_priv->page_list = NULL; 954 obj_priv->pages = NULL;
954} 955}
955 956
956static void 957static void
@@ -1402,7 +1403,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1402 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 1403 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1403 i915_gem_clear_fence_reg(obj); 1404 i915_gem_clear_fence_reg(obj);
1404 1405
1405 i915_gem_object_free_page_list(obj); 1406 i915_gem_object_put_pages(obj);
1406 1407
1407 if (obj_priv->gtt_space) { 1408 if (obj_priv->gtt_space) {
1408 atomic_dec(&dev->gtt_count); 1409 atomic_dec(&dev->gtt_count);
@@ -1521,7 +1522,7 @@ i915_gem_evict_everything(struct drm_device *dev)
1521} 1522}
1522 1523
1523static int 1524static int
1524i915_gem_object_get_page_list(struct drm_gem_object *obj) 1525i915_gem_object_get_pages(struct drm_gem_object *obj)
1525{ 1526{
1526 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1527 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1527 int page_count, i; 1528 int page_count, i;
@@ -1530,18 +1531,19 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
1530 struct page *page; 1531 struct page *page;
1531 int ret; 1532 int ret;
1532 1533
1533 if (obj_priv->page_list) 1534 if (obj_priv->pages_refcount++ != 0)
1534 return 0; 1535 return 0;
1535 1536
1536 /* Get the list of pages out of our struct file. They'll be pinned 1537 /* Get the list of pages out of our struct file. They'll be pinned
1537 * at this point until we release them. 1538 * at this point until we release them.
1538 */ 1539 */
1539 page_count = obj->size / PAGE_SIZE; 1540 page_count = obj->size / PAGE_SIZE;
1540 BUG_ON(obj_priv->page_list != NULL); 1541 BUG_ON(obj_priv->pages != NULL);
1541 obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *), 1542 obj_priv->pages = drm_calloc(page_count, sizeof(struct page *),
1542 DRM_MEM_DRIVER); 1543 DRM_MEM_DRIVER);
1543 if (obj_priv->page_list == NULL) { 1544 if (obj_priv->pages == NULL) {
1544 DRM_ERROR("Faled to allocate page list\n"); 1545 DRM_ERROR("Faled to allocate page list\n");
1546 obj_priv->pages_refcount--;
1545 return -ENOMEM; 1547 return -ENOMEM;
1546 } 1548 }
1547 1549
@@ -1552,10 +1554,10 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
1552 if (IS_ERR(page)) { 1554 if (IS_ERR(page)) {
1553 ret = PTR_ERR(page); 1555 ret = PTR_ERR(page);
1554 DRM_ERROR("read_mapping_page failed: %d\n", ret); 1556 DRM_ERROR("read_mapping_page failed: %d\n", ret);
1555 i915_gem_object_free_page_list(obj); 1557 i915_gem_object_put_pages(obj);
1556 return ret; 1558 return ret;
1557 } 1559 }
1558 obj_priv->page_list[i] = page; 1560 obj_priv->pages[i] = page;
1559 } 1561 }
1560 return 0; 1562 return 0;
1561} 1563}
@@ -1878,7 +1880,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1878 DRM_INFO("Binding object of size %d at 0x%08x\n", 1880 DRM_INFO("Binding object of size %d at 0x%08x\n",
1879 obj->size, obj_priv->gtt_offset); 1881 obj->size, obj_priv->gtt_offset);
1880#endif 1882#endif
1881 ret = i915_gem_object_get_page_list(obj); 1883 ret = i915_gem_object_get_pages(obj);
1882 if (ret) { 1884 if (ret) {
1883 drm_mm_put_block(obj_priv->gtt_space); 1885 drm_mm_put_block(obj_priv->gtt_space);
1884 obj_priv->gtt_space = NULL; 1886 obj_priv->gtt_space = NULL;
@@ -1890,12 +1892,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1890 * into the GTT. 1892 * into the GTT.
1891 */ 1893 */
1892 obj_priv->agp_mem = drm_agp_bind_pages(dev, 1894 obj_priv->agp_mem = drm_agp_bind_pages(dev,
1893 obj_priv->page_list, 1895 obj_priv->pages,
1894 page_count, 1896 page_count,
1895 obj_priv->gtt_offset, 1897 obj_priv->gtt_offset,
1896 obj_priv->agp_type); 1898 obj_priv->agp_type);
1897 if (obj_priv->agp_mem == NULL) { 1899 if (obj_priv->agp_mem == NULL) {
1898 i915_gem_object_free_page_list(obj); 1900 i915_gem_object_put_pages(obj);
1899 drm_mm_put_block(obj_priv->gtt_space); 1901 drm_mm_put_block(obj_priv->gtt_space);
1900 obj_priv->gtt_space = NULL; 1902 obj_priv->gtt_space = NULL;
1901 return -ENOMEM; 1903 return -ENOMEM;
@@ -1922,10 +1924,10 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
1922 * to GPU, and we can ignore the cache flush because it'll happen 1924 * to GPU, and we can ignore the cache flush because it'll happen
1923 * again at bind time. 1925 * again at bind time.
1924 */ 1926 */
1925 if (obj_priv->page_list == NULL) 1927 if (obj_priv->pages == NULL)
1926 return; 1928 return;
1927 1929
1928 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); 1930 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
1929} 1931}
1930 1932
1931/** Flushes any GPU write domain for the object if it's dirty. */ 1933/** Flushes any GPU write domain for the object if it's dirty. */
@@ -2270,7 +2272,7 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
2270 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { 2272 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
2271 if (obj_priv->page_cpu_valid[i]) 2273 if (obj_priv->page_cpu_valid[i])
2272 continue; 2274 continue;
2273 drm_clflush_pages(obj_priv->page_list + i, 1); 2275 drm_clflush_pages(obj_priv->pages + i, 1);
2274 } 2276 }
2275 drm_agp_chipset_flush(dev); 2277 drm_agp_chipset_flush(dev);
2276 } 2278 }
@@ -2336,7 +2338,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2336 if (obj_priv->page_cpu_valid[i]) 2338 if (obj_priv->page_cpu_valid[i])
2337 continue; 2339 continue;
2338 2340
2339 drm_clflush_pages(obj_priv->page_list + i, 1); 2341 drm_clflush_pages(obj_priv->pages + i, 1);
2340 2342
2341 obj_priv->page_cpu_valid[i] = 1; 2343 obj_priv->page_cpu_valid[i] = 1;
2342 } 2344 }
@@ -3304,7 +3306,7 @@ i915_gem_init_hws(struct drm_device *dev)
3304 3306
3305 dev_priv->status_gfx_addr = obj_priv->gtt_offset; 3307 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
3306 3308
3307 dev_priv->hw_status_page = kmap(obj_priv->page_list[0]); 3309 dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
3308 if (dev_priv->hw_status_page == NULL) { 3310 if (dev_priv->hw_status_page == NULL) {
3309 DRM_ERROR("Failed to map status page.\n"); 3311 DRM_ERROR("Failed to map status page.\n");
3310 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 3312 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
@@ -3334,7 +3336,7 @@ i915_gem_cleanup_hws(struct drm_device *dev)
3334 obj = dev_priv->hws_obj; 3336 obj = dev_priv->hws_obj;
3335 obj_priv = obj->driver_private; 3337 obj_priv = obj->driver_private;
3336 3338
3337 kunmap(obj_priv->page_list[0]); 3339 kunmap(obj_priv->pages[0]);
3338 i915_gem_object_unpin(obj); 3340 i915_gem_object_unpin(obj);
3339 drm_gem_object_unreference(obj); 3341 drm_gem_object_unreference(obj);
3340 dev_priv->hws_obj = NULL; 3342 dev_priv->hws_obj = NULL;
@@ -3637,20 +3639,20 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
3637 if (!obj_priv->phys_obj) 3639 if (!obj_priv->phys_obj)
3638 return; 3640 return;
3639 3641
3640 ret = i915_gem_object_get_page_list(obj); 3642 ret = i915_gem_object_get_pages(obj);
3641 if (ret) 3643 if (ret)
3642 goto out; 3644 goto out;
3643 3645
3644 page_count = obj->size / PAGE_SIZE; 3646 page_count = obj->size / PAGE_SIZE;
3645 3647
3646 for (i = 0; i < page_count; i++) { 3648 for (i = 0; i < page_count; i++) {
3647 char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0); 3649 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
3648 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); 3650 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
3649 3651
3650 memcpy(dst, src, PAGE_SIZE); 3652 memcpy(dst, src, PAGE_SIZE);
3651 kunmap_atomic(dst, KM_USER0); 3653 kunmap_atomic(dst, KM_USER0);
3652 } 3654 }
3653 drm_clflush_pages(obj_priv->page_list, page_count); 3655 drm_clflush_pages(obj_priv->pages, page_count);
3654 drm_agp_chipset_flush(dev); 3656 drm_agp_chipset_flush(dev);
3655out: 3657out:
3656 obj_priv->phys_obj->cur_obj = NULL; 3658 obj_priv->phys_obj->cur_obj = NULL;
@@ -3693,7 +3695,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
3693 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; 3695 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
3694 obj_priv->phys_obj->cur_obj = obj; 3696 obj_priv->phys_obj->cur_obj = obj;
3695 3697
3696 ret = i915_gem_object_get_page_list(obj); 3698 ret = i915_gem_object_get_pages(obj);
3697 if (ret) { 3699 if (ret) {
3698 DRM_ERROR("failed to get page list\n"); 3700 DRM_ERROR("failed to get page list\n");
3699 goto out; 3701 goto out;
@@ -3702,7 +3704,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
3702 page_count = obj->size / PAGE_SIZE; 3704 page_count = obj->size / PAGE_SIZE;
3703 3705
3704 for (i = 0; i < page_count; i++) { 3706 for (i = 0; i < page_count; i++) {
3705 char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0); 3707 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
3706 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); 3708 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
3707 3709
3708 memcpy(dst, src, PAGE_SIZE); 3710 memcpy(dst, src, PAGE_SIZE);