aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Gordon <david.s.gordon@intel.com>2016-05-20 06:54:05 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2016-05-20 08:42:58 -0400
commitb338fa473e16c9be208b0aec7ec4e710a8a5f9ee (patch)
tree8cdc229b0e33bce2666ade24589ca48d53611bd6
parentdd6034c67a50a4f6895e5bded512e0dd4bf58918 (diff)
drm/i915: optimise i915_gem_object_map() for small objects
We're using this function for ringbuffers and other "small" objects, so it's worth avoiding an extra malloc()/free() cycle if the page array is small enough to put on the stack. Here we've chosen an arbitrary cutoff of 32 (4k) pages, which is big enough for a ringbuffer (4 pages) or a context image (currently up to 22 pages). v5: change name of local array [Chris Wilson] Signed-off-by: Dave Gordon <david.s.gordon@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1463741647-15666-3-git-send-email-chris@chris-wilson.co.uk
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f504c3168da5..fd3be2b385cb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2404,7 +2404,8 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
2404 unsigned long n_pages = obj->base.size >> PAGE_SHIFT; 2404 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2405 struct sg_table *sgt = obj->pages; 2405 struct sg_table *sgt = obj->pages;
2406 struct sg_page_iter sg_iter; 2406 struct sg_page_iter sg_iter;
2407 struct page **pages; 2407 struct page *stack_pages[32];
2408 struct page **pages = stack_pages;
2408 unsigned long i = 0; 2409 unsigned long i = 0;
2409 void *addr; 2410 void *addr;
2410 2411
@@ -2412,9 +2413,12 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
2412 if (n_pages == 1) 2413 if (n_pages == 1)
2413 return kmap(sg_page(sgt->sgl)); 2414 return kmap(sg_page(sgt->sgl));
2414 2415
2415 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY); 2416 if (n_pages > ARRAY_SIZE(stack_pages)) {
2416 if (!pages) 2417 /* Too big for stack -- allocate temporary array instead */
2417 return NULL; 2418 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2419 if (!pages)
2420 return NULL;
2421 }
2418 2422
2419 for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) 2423 for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0)
2420 pages[i++] = sg_page_iter_page(&sg_iter); 2424 pages[i++] = sg_page_iter_page(&sg_iter);
@@ -2424,7 +2428,8 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
2424 2428
2425 addr = vmap(pages, n_pages, 0, PAGE_KERNEL); 2429 addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
2426 2430
2427 drm_free_large(pages); 2431 if (pages != stack_pages)
2432 drm_free_large(pages);
2428 2433
2429 return addr; 2434 return addr;
2430} 2435}