diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-05-10 09:25:09 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2012-05-23 05:47:10 -0400 |
commit | 1286ff7397737e407cdd8e5cd574318db177ba1f (patch) | |
tree | d83344e3beba60c8ec8bec566bd8f58a08f696ef /drivers/gpu/drm/i915 | |
parent | 22b33e8ed0e38b8ddcf082e35580f2e67a3a0262 (diff) |
i915: add dmabuf/prime buffer sharing support.
This adds handle->fd and fd->handle support to i915, this is to allow
for offloading of rendering in one direction and outputs in the other.
v2 from Daniel Vetter:
- fixup conflicts with the prepare/finish gtt prep work.
- implement ppgtt binding support.
Note that we have squat i-g-t testcoverage for any of the lifetime and
access rules dma_buf/prime support brings along. And there are quite a
few intricate situations here.
Also note that the integration with the existing code is a bit
hackish, especially around get_gtt_pages and put_gtt_pages. It imo
would be easier with the prep code from Chris Wilson's unbound series,
but that is for 3.6.
Also note that I didn't bother to put the new prepare/finish gtt hooks
to good use by moving the dma_buf_map/unmap_attachment calls in there
(like we've originally planned for).
Last but not least this patch is only compile-tested, but I've changed
very little compared to Dave Airlie's version. So there's a decent
chance v2 on drm-next works as well as v1 on 3.4-rc.
v3: Right when I've hit sent I've noticed that I've screwed up one
obj->sg_list (for dmar support) and obj->sg_table (for prime support)
disdinction. We should be able to merge these 2 paths, but that's
material for another patch.
v4: fix the error reporting bugs pointed out by ickle.
v5: fix another error, and stop non-gtt mmaps on shared objects
stop pread/pwrite on imported objects, add fake kmap
Signed-off-by: Dave Airlie <airlied@redhat.com>
Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r-- | drivers/gpu/drm/i915/Makefile | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 37 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_dmabuf.c | 171 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 15 |
6 files changed, 239 insertions, 6 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 0ca7f7646ab5..2e9268da58d8 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -38,7 +38,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \ | |||
38 | dvo_ch7017.o \ | 38 | dvo_ch7017.o \ |
39 | dvo_ivch.o \ | 39 | dvo_ivch.o \ |
40 | dvo_tfp410.o \ | 40 | dvo_tfp410.o \ |
41 | dvo_sil164.o | 41 | dvo_sil164.o \ |
42 | i915_gem_dmabuf.o | ||
42 | 43 | ||
43 | i915-$(CONFIG_COMPAT) += i915_ioc32.o | 44 | i915-$(CONFIG_COMPAT) += i915_ioc32.o |
44 | 45 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 7b967d5bf51d..238a52165833 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -1039,7 +1039,7 @@ static struct drm_driver driver = { | |||
1039 | */ | 1039 | */ |
1040 | .driver_features = | 1040 | .driver_features = |
1041 | DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ | 1041 | DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ |
1042 | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM, | 1042 | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME, |
1043 | .load = i915_driver_load, | 1043 | .load = i915_driver_load, |
1044 | .unload = i915_driver_unload, | 1044 | .unload = i915_driver_unload, |
1045 | .open = i915_driver_open, | 1045 | .open = i915_driver_open, |
@@ -1062,6 +1062,12 @@ static struct drm_driver driver = { | |||
1062 | .gem_init_object = i915_gem_init_object, | 1062 | .gem_init_object = i915_gem_init_object, |
1063 | .gem_free_object = i915_gem_free_object, | 1063 | .gem_free_object = i915_gem_free_object, |
1064 | .gem_vm_ops = &i915_gem_vm_ops, | 1064 | .gem_vm_ops = &i915_gem_vm_ops, |
1065 | |||
1066 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | ||
1067 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | ||
1068 | .gem_prime_export = i915_gem_prime_export, | ||
1069 | .gem_prime_import = i915_gem_prime_import, | ||
1070 | |||
1065 | .dumb_create = i915_gem_dumb_create, | 1071 | .dumb_create = i915_gem_dumb_create, |
1066 | .dumb_map_offset = i915_gem_mmap_gtt, | 1072 | .dumb_map_offset = i915_gem_mmap_gtt, |
1067 | .dumb_destroy = i915_gem_dumb_destroy, | 1073 | .dumb_destroy = i915_gem_dumb_destroy, |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 11c7a6a330c1..377c21f531e4 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -940,6 +940,8 @@ struct drm_i915_gem_object { | |||
940 | struct scatterlist *sg_list; | 940 | struct scatterlist *sg_list; |
941 | int num_sg; | 941 | int num_sg; |
942 | 942 | ||
943 | /* prime dma-buf support */ | ||
944 | struct sg_table *sg_table; | ||
943 | /** | 945 | /** |
944 | * Used for performing relocations during execbuffer insertion. | 946 | * Used for performing relocations during execbuffer insertion. |
945 | */ | 947 | */ |
@@ -1245,6 +1247,8 @@ int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); | |||
1245 | void i915_gem_release_mmap(struct drm_i915_gem_object *obj); | 1247 | void i915_gem_release_mmap(struct drm_i915_gem_object *obj); |
1246 | void i915_gem_lastclose(struct drm_device *dev); | 1248 | void i915_gem_lastclose(struct drm_device *dev); |
1247 | 1249 | ||
1250 | int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, | ||
1251 | gfp_t gfpmask); | ||
1248 | int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); | 1252 | int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); |
1249 | int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj); | 1253 | int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj); |
1250 | int i915_gem_object_sync(struct drm_i915_gem_object *obj, | 1254 | int i915_gem_object_sync(struct drm_i915_gem_object *obj, |
@@ -1342,6 +1346,13 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, | |||
1342 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | 1346 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, |
1343 | enum i915_cache_level cache_level); | 1347 | enum i915_cache_level cache_level); |
1344 | 1348 | ||
1349 | struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, | ||
1350 | struct dma_buf *dma_buf); | ||
1351 | |||
1352 | struct dma_buf *i915_gem_prime_export(struct drm_device *dev, | ||
1353 | struct drm_gem_object *gem_obj, int flags); | ||
1354 | |||
1355 | |||
1345 | /* i915_gem_gtt.c */ | 1356 | /* i915_gem_gtt.c */ |
1346 | int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev); | 1357 | int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev); |
1347 | void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); | 1358 | void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6d2180cf3da5..c1e5c66553df 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
36 | #include <linux/swap.h> | 36 | #include <linux/swap.h> |
37 | #include <linux/pci.h> | 37 | #include <linux/pci.h> |
38 | #include <linux/dma-buf.h> | ||
38 | 39 | ||
39 | static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); | 40 | static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); |
40 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); | 41 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); |
@@ -538,6 +539,14 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
538 | goto out; | 539 | goto out; |
539 | } | 540 | } |
540 | 541 | ||
542 | /* prime objects have no backing filp to GEM pread/pwrite | ||
543 | * pages from. | ||
544 | */ | ||
545 | if (!obj->base.filp) { | ||
546 | ret = -EINVAL; | ||
547 | goto out; | ||
548 | } | ||
549 | |||
541 | trace_i915_gem_object_pread(obj, args->offset, args->size); | 550 | trace_i915_gem_object_pread(obj, args->offset, args->size); |
542 | 551 | ||
543 | ret = i915_gem_shmem_pread(dev, obj, args, file); | 552 | ret = i915_gem_shmem_pread(dev, obj, args, file); |
@@ -880,6 +889,14 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
880 | goto out; | 889 | goto out; |
881 | } | 890 | } |
882 | 891 | ||
892 | /* prime objects have no backing filp to GEM pread/pwrite | ||
893 | * pages from. | ||
894 | */ | ||
895 | if (!obj->base.filp) { | ||
896 | ret = -EINVAL; | ||
897 | goto out; | ||
898 | } | ||
899 | |||
883 | trace_i915_gem_object_pwrite(obj, args->offset, args->size); | 900 | trace_i915_gem_object_pwrite(obj, args->offset, args->size); |
884 | 901 | ||
885 | ret = -EFAULT; | 902 | ret = -EFAULT; |
@@ -1021,6 +1038,14 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
1021 | if (obj == NULL) | 1038 | if (obj == NULL) |
1022 | return -ENOENT; | 1039 | return -ENOENT; |
1023 | 1040 | ||
1041 | /* prime objects have no backing filp to GEM mmap | ||
1042 | * pages from. | ||
1043 | */ | ||
1044 | if (!obj->filp) { | ||
1045 | drm_gem_object_unreference_unlocked(obj); | ||
1046 | return -EINVAL; | ||
1047 | } | ||
1048 | |||
1024 | addr = vm_mmap(obj->filp, 0, args->size, | 1049 | addr = vm_mmap(obj->filp, 0, args->size, |
1025 | PROT_READ | PROT_WRITE, MAP_SHARED, | 1050 | PROT_READ | PROT_WRITE, MAP_SHARED, |
1026 | args->offset); | 1051 | args->offset); |
@@ -1302,8 +1327,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
1302 | return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); | 1327 | return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); |
1303 | } | 1328 | } |
1304 | 1329 | ||
1305 | 1330 | int | |
1306 | static int | ||
1307 | i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, | 1331 | i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, |
1308 | gfp_t gfpmask) | 1332 | gfp_t gfpmask) |
1309 | { | 1333 | { |
@@ -1312,6 +1336,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, | |||
1312 | struct inode *inode; | 1336 | struct inode *inode; |
1313 | struct page *page; | 1337 | struct page *page; |
1314 | 1338 | ||
1339 | if (obj->pages || obj->sg_table) | ||
1340 | return 0; | ||
1341 | |||
1315 | /* Get the list of pages out of our struct file. They'll be pinned | 1342 | /* Get the list of pages out of our struct file. They'll be pinned |
1316 | * at this point until we release them. | 1343 | * at this point until we release them. |
1317 | */ | 1344 | */ |
@@ -1353,6 +1380,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) | |||
1353 | int page_count = obj->base.size / PAGE_SIZE; | 1380 | int page_count = obj->base.size / PAGE_SIZE; |
1354 | int i; | 1381 | int i; |
1355 | 1382 | ||
1383 | if (!obj->pages) | ||
1384 | return; | ||
1385 | |||
1356 | BUG_ON(obj->madv == __I915_MADV_PURGED); | 1386 | BUG_ON(obj->madv == __I915_MADV_PURGED); |
1357 | 1387 | ||
1358 | if (i915_gem_object_needs_bit17_swizzle(obj)) | 1388 | if (i915_gem_object_needs_bit17_swizzle(obj)) |
@@ -3327,6 +3357,9 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) | |||
3327 | 3357 | ||
3328 | trace_i915_gem_object_destroy(obj); | 3358 | trace_i915_gem_object_destroy(obj); |
3329 | 3359 | ||
3360 | if (gem_obj->import_attach) | ||
3361 | drm_prime_gem_destroy(gem_obj, obj->sg_table); | ||
3362 | |||
3330 | if (obj->phys_obj) | 3363 | if (obj->phys_obj) |
3331 | i915_gem_detach_phys_object(dev, obj); | 3364 | i915_gem_detach_phys_object(dev, obj); |
3332 | 3365 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c new file mode 100644 index 000000000000..4f8e142604ab --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c | |||
@@ -0,0 +1,171 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Red Hat Inc | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Dave Airlie <airlied@redhat.com> | ||
25 | */ | ||
26 | #include "drmP.h" | ||
27 | #include "i915_drv.h" | ||
28 | #include <linux/dma-buf.h> | ||
29 | |||
30 | struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, | ||
31 | enum dma_data_direction dir) | ||
32 | { | ||
33 | struct drm_i915_gem_object *obj = attachment->dmabuf->priv; | ||
34 | struct drm_device *dev = obj->base.dev; | ||
35 | int npages = obj->base.size / PAGE_SIZE; | ||
36 | struct sg_table *sg = NULL; | ||
37 | int ret; | ||
38 | int nents; | ||
39 | |||
40 | ret = i915_mutex_lock_interruptible(dev); | ||
41 | if (ret) | ||
42 | return ERR_PTR(ret); | ||
43 | |||
44 | if (!obj->pages) { | ||
45 | ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN); | ||
46 | if (ret) | ||
47 | goto out; | ||
48 | } | ||
49 | |||
50 | /* link the pages into an SG then map the sg */ | ||
51 | sg = drm_prime_pages_to_sg(obj->pages, npages); | ||
52 | nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir); | ||
53 | out: | ||
54 | mutex_unlock(&dev->struct_mutex); | ||
55 | return sg; | ||
56 | } | ||
57 | |||
58 | void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, | ||
59 | struct sg_table *sg, enum dma_data_direction dir) | ||
60 | { | ||
61 | dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); | ||
62 | sg_free_table(sg); | ||
63 | kfree(sg); | ||
64 | } | ||
65 | |||
66 | void i915_gem_dmabuf_release(struct dma_buf *dma_buf) | ||
67 | { | ||
68 | struct drm_i915_gem_object *obj = dma_buf->priv; | ||
69 | |||
70 | if (obj->base.export_dma_buf == dma_buf) { | ||
71 | /* drop the reference on the export fd holds */ | ||
72 | obj->base.export_dma_buf = NULL; | ||
73 | drm_gem_object_unreference_unlocked(&obj->base); | ||
74 | } | ||
75 | } | ||
76 | |||
77 | static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) | ||
78 | { | ||
79 | return NULL; | ||
80 | } | ||
81 | |||
82 | static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr) | ||
83 | { | ||
84 | |||
85 | } | ||
86 | static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num) | ||
87 | { | ||
88 | return NULL; | ||
89 | } | ||
90 | |||
91 | static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr) | ||
92 | { | ||
93 | |||
94 | } | ||
95 | |||
96 | struct dma_buf_ops i915_dmabuf_ops = { | ||
97 | .map_dma_buf = i915_gem_map_dma_buf, | ||
98 | .unmap_dma_buf = i915_gem_unmap_dma_buf, | ||
99 | .release = i915_gem_dmabuf_release, | ||
100 | .kmap = i915_gem_dmabuf_kmap, | ||
101 | .kmap_atomic = i915_gem_dmabuf_kmap_atomic, | ||
102 | .kunmap = i915_gem_dmabuf_kunmap, | ||
103 | .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic, | ||
104 | }; | ||
105 | |||
106 | struct dma_buf *i915_gem_prime_export(struct drm_device *dev, | ||
107 | struct drm_gem_object *gem_obj, int flags) | ||
108 | { | ||
109 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); | ||
110 | |||
111 | return dma_buf_export(obj, &i915_dmabuf_ops, | ||
112 | obj->base.size, 0600); | ||
113 | } | ||
114 | |||
115 | struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, | ||
116 | struct dma_buf *dma_buf) | ||
117 | { | ||
118 | struct dma_buf_attachment *attach; | ||
119 | struct sg_table *sg; | ||
120 | struct drm_i915_gem_object *obj; | ||
121 | int npages; | ||
122 | int size; | ||
123 | int ret; | ||
124 | |||
125 | /* is this one of own objects? */ | ||
126 | if (dma_buf->ops == &i915_dmabuf_ops) { | ||
127 | obj = dma_buf->priv; | ||
128 | /* is it from our device? */ | ||
129 | if (obj->base.dev == dev) { | ||
130 | drm_gem_object_reference(&obj->base); | ||
131 | return &obj->base; | ||
132 | } | ||
133 | } | ||
134 | |||
135 | /* need to attach */ | ||
136 | attach = dma_buf_attach(dma_buf, dev->dev); | ||
137 | if (IS_ERR(attach)) | ||
138 | return ERR_CAST(attach); | ||
139 | |||
140 | sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); | ||
141 | if (IS_ERR(sg)) { | ||
142 | ret = PTR_ERR(sg); | ||
143 | goto fail_detach; | ||
144 | } | ||
145 | |||
146 | size = dma_buf->size; | ||
147 | npages = size / PAGE_SIZE; | ||
148 | |||
149 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); | ||
150 | if (obj == NULL) { | ||
151 | ret = -ENOMEM; | ||
152 | goto fail_unmap; | ||
153 | } | ||
154 | |||
155 | ret = drm_gem_private_object_init(dev, &obj->base, size); | ||
156 | if (ret) { | ||
157 | kfree(obj); | ||
158 | goto fail_unmap; | ||
159 | } | ||
160 | |||
161 | obj->sg_table = sg; | ||
162 | obj->base.import_attach = attach; | ||
163 | |||
164 | return &obj->base; | ||
165 | |||
166 | fail_unmap: | ||
167 | dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); | ||
168 | fail_detach: | ||
169 | dma_buf_detach(dma_buf, attach); | ||
170 | return ERR_PTR(ret); | ||
171 | } | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 29d573c27b35..9fd25a435536 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -267,7 +267,13 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, | |||
267 | BUG(); | 267 | BUG(); |
268 | } | 268 | } |
269 | 269 | ||
270 | if (dev_priv->mm.gtt->needs_dmar) { | 270 | if (obj->sg_table) { |
271 | i915_ppgtt_insert_sg_entries(ppgtt, | ||
272 | obj->sg_table->sgl, | ||
273 | obj->sg_table->nents, | ||
274 | obj->gtt_space->start >> PAGE_SHIFT, | ||
275 | pte_flags); | ||
276 | } else if (dev_priv->mm.gtt->needs_dmar) { | ||
271 | BUG_ON(!obj->sg_list); | 277 | BUG_ON(!obj->sg_list); |
272 | 278 | ||
273 | i915_ppgtt_insert_sg_entries(ppgtt, | 279 | i915_ppgtt_insert_sg_entries(ppgtt, |
@@ -371,7 +377,12 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, | |||
371 | struct drm_i915_private *dev_priv = dev->dev_private; | 377 | struct drm_i915_private *dev_priv = dev->dev_private; |
372 | unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); | 378 | unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); |
373 | 379 | ||
374 | if (dev_priv->mm.gtt->needs_dmar) { | 380 | if (obj->sg_table) { |
381 | intel_gtt_insert_sg_entries(obj->sg_table->sgl, | ||
382 | obj->sg_table->nents, | ||
383 | obj->gtt_space->start >> PAGE_SHIFT, | ||
384 | agp_type); | ||
385 | } else if (dev_priv->mm.gtt->needs_dmar) { | ||
375 | BUG_ON(!obj->sg_list); | 386 | BUG_ON(!obj->sg_list); |
376 | 387 | ||
377 | intel_gtt_insert_sg_entries(obj->sg_list, | 388 | intel_gtt_insert_sg_entries(obj->sg_list, |