aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2013-08-26 18:50:55 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-09-03 13:17:58 -0400
commit5cfacdedb1a94efd29faeaab53f939554a3f5943 (patch)
tree0e100a605856f41a7eb12b3bbd7166704ac45da0 /drivers/gpu
parent1f5d76dbb636c73912c9ff1c90ff46dd2273f098 (diff)
drm/i915: Pin pages whilst mapping the dma-buf
As we attempt to kmalloc after calling get_pages, there is a possibility that the shrinker may reap the pages we just acquired. To prevent this we need to increment the pages_pin_count early, so rearrange the code and error paths to make it so. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Damien Lespiau <damien.lespiau@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c41
1 files changed, 22 insertions, 19 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index e918b05fcbdd..7d5752fda5f1 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -42,27 +42,24 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
42 42
43 ret = i915_mutex_lock_interruptible(obj->base.dev); 43 ret = i915_mutex_lock_interruptible(obj->base.dev);
44 if (ret) 44 if (ret)
45 return ERR_PTR(ret); 45 goto err;
46 46
47 ret = i915_gem_object_get_pages(obj); 47 ret = i915_gem_object_get_pages(obj);
48 if (ret) { 48 if (ret)
49 st = ERR_PTR(ret); 49 goto err_unlock;
50 goto out; 50
51 } 51 i915_gem_object_pin_pages(obj);
52 52
53 /* Copy sg so that we make an independent mapping */ 53 /* Copy sg so that we make an independent mapping */
54 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 54 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
55 if (st == NULL) { 55 if (st == NULL) {
56 st = ERR_PTR(-ENOMEM); 56 ret = -ENOMEM;
57 goto out; 57 goto err_unpin;
58 } 58 }
59 59
60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL); 60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
61 if (ret) { 61 if (ret)
62 kfree(st); 62 goto err_free;
63 st = ERR_PTR(ret);
64 goto out;
65 }
66 63
67 src = obj->pages->sgl; 64 src = obj->pages->sgl;
68 dst = st->sgl; 65 dst = st->sgl;
@@ -73,17 +70,23 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
73 } 70 }
74 71
75 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { 72 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
76 sg_free_table(st); 73 ret =-ENOMEM;
77 kfree(st); 74 goto err_free_sg;
78 st = ERR_PTR(-ENOMEM);
79 goto out;
80 } 75 }
81 76
82 i915_gem_object_pin_pages(obj);
83
84out:
85 mutex_unlock(&obj->base.dev->struct_mutex); 77 mutex_unlock(&obj->base.dev->struct_mutex);
86 return st; 78 return st;
79
80err_free_sg:
81 sg_free_table(st);
82err_free:
83 kfree(st);
84err_unpin:
85 i915_gem_object_unpin_pages(obj);
86err_unlock:
87 mutex_unlock(&obj->base.dev->struct_mutex);
88err:
89 return ERR_PTR(ret);
87} 90}
88 91
89static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, 92static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,