aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorJesse Barnes <jbarnes@virtuousgeek.org>2009-05-08 19:13:25 -0400
committerEric Anholt <eric@anholt.net>2009-05-19 13:07:14 -0400
commit8e7d2b2c6ecd3c21a54b877eae3d5be48292e6b5 (patch)
tree704b3affc3b617be2bcd63ddd9865e9adceeb031 /drivers/gpu
parent1406de8e11eb043681297adf86d6892ff8efc27a (diff)
drm/i915: allocate large pointer arrays with vmalloc
For awhile now, many of the GEM code paths have allocated page or object arrays with the slab allocator. This is nice and fast, but won't work well if memory is fragmented, since the slab allocator works with physically contiguous memory (i.e. order > 2 allocations are likely to fail fairly early after booting and doing some work). This patch works around the issue by falling back to vmalloc for >PAGE_SIZE allocations. This is ugly, but much less work than chaining a bunch of pages together by hand (suprisingly there's not a bunch of generic kernel helpers for this yet afaik). vmalloc space is somewhat precious on 32 bit kernels, but our allocations shouldn't be big enough to cause problems, though they're routinely more than a page. Note that this patch doesn't address the unchecked alloc-based-on-ioctl-args in GEM; that needs to be fixed in a separate patch. Also, I've deliberately ignored the DRM's "area" junk. I don't think anyone actually uses it anymore and I'm hoping it gets ripped out soon. [Updated: removed size arg to new free function. We could unify the free functions as well once the DRM mem tracking is ripped out.] fd.o bug #20152 (part 1/3) Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org> Signed-off-by: Eric Anholt <eric@anholt.net>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c38
1 files changed, 15 insertions, 23 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b189b49c7602..4a24c90fb940 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -349,7 +349,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
349 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; 349 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
350 num_pages = last_data_page - first_data_page + 1; 350 num_pages = last_data_page - first_data_page + 1;
351 351
352 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); 352 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
353 if (user_pages == NULL) 353 if (user_pages == NULL)
354 return -ENOMEM; 354 return -ENOMEM;
355 355
@@ -429,7 +429,7 @@ fail_put_user_pages:
429 SetPageDirty(user_pages[i]); 429 SetPageDirty(user_pages[i]);
430 page_cache_release(user_pages[i]); 430 page_cache_release(user_pages[i]);
431 } 431 }
432 kfree(user_pages); 432 drm_free_large(user_pages);
433 433
434 return ret; 434 return ret;
435} 435}
@@ -649,7 +649,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
649 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; 649 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
650 num_pages = last_data_page - first_data_page + 1; 650 num_pages = last_data_page - first_data_page + 1;
651 651
652 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); 652 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
653 if (user_pages == NULL) 653 if (user_pages == NULL)
654 return -ENOMEM; 654 return -ENOMEM;
655 655
@@ -719,7 +719,7 @@ out_unlock:
719out_unpin_pages: 719out_unpin_pages:
720 for (i = 0; i < pinned_pages; i++) 720 for (i = 0; i < pinned_pages; i++)
721 page_cache_release(user_pages[i]); 721 page_cache_release(user_pages[i]);
722 kfree(user_pages); 722 drm_free_large(user_pages);
723 723
724 return ret; 724 return ret;
725} 725}
@@ -824,7 +824,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
824 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; 824 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
825 num_pages = last_data_page - first_data_page + 1; 825 num_pages = last_data_page - first_data_page + 1;
826 826
827 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); 827 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
828 if (user_pages == NULL) 828 if (user_pages == NULL)
829 return -ENOMEM; 829 return -ENOMEM;
830 830
@@ -902,7 +902,7 @@ fail_unlock:
902fail_put_user_pages: 902fail_put_user_pages:
903 for (i = 0; i < pinned_pages; i++) 903 for (i = 0; i < pinned_pages; i++)
904 page_cache_release(user_pages[i]); 904 page_cache_release(user_pages[i]);
905 kfree(user_pages); 905 drm_free_large(user_pages);
906 906
907 return ret; 907 return ret;
908} 908}
@@ -1408,9 +1408,7 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
1408 } 1408 }
1409 obj_priv->dirty = 0; 1409 obj_priv->dirty = 0;
1410 1410
1411 drm_free(obj_priv->pages, 1411 drm_free_large(obj_priv->pages);
1412 page_count * sizeof(struct page *),
1413 DRM_MEM_DRIVER);
1414 obj_priv->pages = NULL; 1412 obj_priv->pages = NULL;
1415} 1413}
1416 1414
@@ -2024,8 +2022,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
2024 */ 2022 */
2025 page_count = obj->size / PAGE_SIZE; 2023 page_count = obj->size / PAGE_SIZE;
2026 BUG_ON(obj_priv->pages != NULL); 2024 BUG_ON(obj_priv->pages != NULL);
2027 obj_priv->pages = drm_calloc(page_count, sizeof(struct page *), 2025 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
2028 DRM_MEM_DRIVER);
2029 if (obj_priv->pages == NULL) { 2026 if (obj_priv->pages == NULL) {
2030 DRM_ERROR("Faled to allocate page list\n"); 2027 DRM_ERROR("Faled to allocate page list\n");
2031 obj_priv->pages_refcount--; 2028 obj_priv->pages_refcount--;
@@ -3111,7 +3108,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3111 reloc_count += exec_list[i].relocation_count; 3108 reloc_count += exec_list[i].relocation_count;
3112 } 3109 }
3113 3110
3114 *relocs = drm_calloc(reloc_count, sizeof(**relocs), DRM_MEM_DRIVER); 3111 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
3115 if (*relocs == NULL) 3112 if (*relocs == NULL)
3116 return -ENOMEM; 3113 return -ENOMEM;
3117 3114
@@ -3125,8 +3122,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3125 exec_list[i].relocation_count * 3122 exec_list[i].relocation_count *
3126 sizeof(**relocs)); 3123 sizeof(**relocs));
3127 if (ret != 0) { 3124 if (ret != 0) {
3128 drm_free(*relocs, reloc_count * sizeof(**relocs), 3125 drm_free_large(*relocs);
3129 DRM_MEM_DRIVER);
3130 *relocs = NULL; 3126 *relocs = NULL;
3131 return -EFAULT; 3127 return -EFAULT;
3132 } 3128 }
@@ -3165,7 +3161,7 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
3165 } 3161 }
3166 3162
3167err: 3163err:
3168 drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER); 3164 drm_free_large(relocs);
3169 3165
3170 return ret; 3166 return ret;
3171} 3167}
@@ -3198,10 +3194,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3198 return -EINVAL; 3194 return -EINVAL;
3199 } 3195 }
3200 /* Copy in the exec list from userland */ 3196 /* Copy in the exec list from userland */
3201 exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count, 3197 exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
3202 DRM_MEM_DRIVER); 3198 object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
3203 object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
3204 DRM_MEM_DRIVER);
3205 if (exec_list == NULL || object_list == NULL) { 3199 if (exec_list == NULL || object_list == NULL) {
3206 DRM_ERROR("Failed to allocate exec or object list " 3200 DRM_ERROR("Failed to allocate exec or object list "
3207 "for %d buffers\n", 3201 "for %d buffers\n",
@@ -3462,10 +3456,8 @@ err:
3462 } 3456 }
3463 3457
3464pre_mutex_err: 3458pre_mutex_err:
3465 drm_free(object_list, sizeof(*object_list) * args->buffer_count, 3459 drm_free_large(object_list);
3466 DRM_MEM_DRIVER); 3460 drm_free_large(exec_list);
3467 drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
3468 DRM_MEM_DRIVER);
3469 drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects, 3461 drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects,
3470 DRM_MEM_DRIVER); 3462 DRM_MEM_DRIVER);
3471 3463