aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorBrad Volkin <bradley.d.volkin@intel.com>2014-02-18 13:15:45 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-03-07 16:36:59 -0500
commit4c914c0c7c787b8f730128a8cdcca9c50b0784ab (patch)
treef510536f90a2f9f0d2d945e4971154a0da9ccb8d /drivers/gpu/drm/i915/i915_gem.c
parent922044c9dfec40d5adc5d4a757f802e55e3d0a85 (diff)
drm/i915: Refactor shmem pread setup
The command parser is going to need the same synchronization and setup logic, so factor it out for reuse. v2: Add a check that the object is backed by shmem Signed-off-by: Brad Volkin <bradley.d.volkin@intel.com> Reviewed-by: Jani Nikula <jani.nikula@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c51
1 files changed, 37 insertions, 14 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 18ea6bccbbf0..177c20722656 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -327,6 +327,42 @@ __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
327 return 0; 327 return 0;
328} 328}
329 329
330/*
331 * Pins the specified object's pages and synchronizes the object with
332 * GPU accesses. Sets needs_clflush to non-zero if the caller should
333 * flush the object from the CPU cache.
334 */
335int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
336 int *needs_clflush)
337{
338 int ret;
339
340 *needs_clflush = 0;
341
342 if (!obj->base.filp)
343 return -EINVAL;
344
345 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
346 /* If we're not in the cpu read domain, set ourself into the gtt
347 * read domain and manually flush cachelines (if required). This
348 * optimizes for the case when the gpu will dirty the data
349 * anyway again before the next pread happens. */
350 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
351 obj->cache_level);
352 ret = i915_gem_object_wait_rendering(obj, true);
353 if (ret)
354 return ret;
355 }
356
357 ret = i915_gem_object_get_pages(obj);
358 if (ret)
359 return ret;
360
361 i915_gem_object_pin_pages(obj);
362
363 return ret;
364}
365
330/* Per-page copy function for the shmem pread fastpath. 366/* Per-page copy function for the shmem pread fastpath.
331 * Flushes invalid cachelines before reading the target if 367 * Flushes invalid cachelines before reading the target if
332 * needs_clflush is set. */ 368 * needs_clflush is set. */
@@ -424,23 +460,10 @@ i915_gem_shmem_pread(struct drm_device *dev,
424 460
425 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 461 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
426 462
427 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { 463 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
428 /* If we're not in the cpu read domain, set ourself into the gtt
429 * read domain and manually flush cachelines (if required). This
430 * optimizes for the case when the gpu will dirty the data
431 * anyway again before the next pread happens. */
432 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
433 ret = i915_gem_object_wait_rendering(obj, true);
434 if (ret)
435 return ret;
436 }
437
438 ret = i915_gem_object_get_pages(obj);
439 if (ret) 464 if (ret)
440 return ret; 465 return ret;
441 466
442 i915_gem_object_pin_pages(obj);
443
444 offset = args->offset; 467 offset = args->offset;
445 468
446 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 469 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,