diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2011-12-14 07:57:30 -0500 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-01-30 17:34:07 -0500 |
commit | 5c0480f21f9896c443b0e65d779c8e09a695da7b (patch) | |
tree | d87810ed77352d11fc1abdc9dbf14f8ba73852ec /drivers/gpu/drm/i915/i915_gem.c | |
parent | ea16a3cdb9218a2389fafc804356c1914c157654 (diff) |
drm/i915: fall through pwrite_gtt_slow to the shmem slow path
The gtt_pwrite slowpath grabs the userspace memory with
get_user_pages. This will not work for non-page backed memory, like a
gtt mmapped gem object. Hence fall throuh to the shmem paths if we hit
-EFAULT in the gtt paths.
Now the shmem paths have exactly the same problem, but this way we
only need to rearrange the code in one write path.
v2: v1 accidentaly falls back to shmem pwrite for phys objects. Fixed.
v3: Make the codeflow around phys_pwrite cleara as suggested by Chris
Wilson.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 33 |
1 files changed, 21 insertions, 12 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index df23c6273413..7bb32ecc13c5 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -996,10 +996,13 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
996 | * pread/pwrite currently are reading and writing from the CPU | 996 | * pread/pwrite currently are reading and writing from the CPU |
997 | * perspective, requiring manual detiling by the client. | 997 | * perspective, requiring manual detiling by the client. |
998 | */ | 998 | */ |
999 | if (obj->phys_obj) | 999 | if (obj->phys_obj) { |
1000 | ret = i915_gem_phys_pwrite(dev, obj, args, file); | 1000 | ret = i915_gem_phys_pwrite(dev, obj, args, file); |
1001 | else if (obj->gtt_space && | 1001 | goto out; |
1002 | obj->base.write_domain != I915_GEM_DOMAIN_CPU) { | 1002 | } |
1003 | |||
1004 | if (obj->gtt_space && | ||
1005 | obj->base.write_domain != I915_GEM_DOMAIN_CPU) { | ||
1003 | ret = i915_gem_object_pin(obj, 0, true); | 1006 | ret = i915_gem_object_pin(obj, 0, true); |
1004 | if (ret) | 1007 | if (ret) |
1005 | goto out; | 1008 | goto out; |
@@ -1018,18 +1021,24 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
1018 | 1021 | ||
1019 | out_unpin: | 1022 | out_unpin: |
1020 | i915_gem_object_unpin(obj); | 1023 | i915_gem_object_unpin(obj); |
1021 | } else { | ||
1022 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | ||
1023 | if (ret) | ||
1024 | goto out; | ||
1025 | 1024 | ||
1026 | ret = -EFAULT; | 1025 | if (ret != -EFAULT) |
1027 | if (!i915_gem_object_needs_bit17_swizzle(obj)) | 1026 | goto out; |
1028 | ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file); | 1027 | /* Fall through to the shmfs paths because the gtt paths might |
1029 | if (ret == -EFAULT) | 1028 | * fail with non-page-backed user pointers (e.g. gtt mappings |
1030 | ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); | 1029 | * when moving data between textures). */ |
1031 | } | 1030 | } |
1032 | 1031 | ||
1032 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | ||
1033 | if (ret) | ||
1034 | goto out; | ||
1035 | |||
1036 | ret = -EFAULT; | ||
1037 | if (!i915_gem_object_needs_bit17_swizzle(obj)) | ||
1038 | ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file); | ||
1039 | if (ret == -EFAULT) | ||
1040 | ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); | ||
1041 | |||
1033 | out: | 1042 | out: |
1034 | drm_gem_object_unreference(&obj->base); | 1043 | drm_gem_object_unreference(&obj->base); |
1035 | unlock: | 1044 | unlock: |