aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c8
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h11
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c37
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c171
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c15
6 files changed, 239 insertions, 6 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0ca7f7646ab..2e9268da58d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -38,7 +38,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
38 dvo_ch7017.o \ 38 dvo_ch7017.o \
39 dvo_ivch.o \ 39 dvo_ivch.o \
40 dvo_tfp410.o \ 40 dvo_tfp410.o \
41 dvo_sil164.o 41 dvo_sil164.o \
42 i915_gem_dmabuf.o
42 43
43i915-$(CONFIG_COMPAT) += i915_ioc32.o 44i915-$(CONFIG_COMPAT) += i915_ioc32.o
44 45
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 7b967d5bf51..238a5216583 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1039,7 +1039,7 @@ static struct drm_driver driver = {
1039 */ 1039 */
1040 .driver_features = 1040 .driver_features =
1041 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ 1041 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
1042 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM, 1042 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
1043 .load = i915_driver_load, 1043 .load = i915_driver_load,
1044 .unload = i915_driver_unload, 1044 .unload = i915_driver_unload,
1045 .open = i915_driver_open, 1045 .open = i915_driver_open,
@@ -1062,6 +1062,12 @@ static struct drm_driver driver = {
1062 .gem_init_object = i915_gem_init_object, 1062 .gem_init_object = i915_gem_init_object,
1063 .gem_free_object = i915_gem_free_object, 1063 .gem_free_object = i915_gem_free_object,
1064 .gem_vm_ops = &i915_gem_vm_ops, 1064 .gem_vm_ops = &i915_gem_vm_ops,
1065
1066 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1067 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1068 .gem_prime_export = i915_gem_prime_export,
1069 .gem_prime_import = i915_gem_prime_import,
1070
1065 .dumb_create = i915_gem_dumb_create, 1071 .dumb_create = i915_gem_dumb_create,
1066 .dumb_map_offset = i915_gem_mmap_gtt, 1072 .dumb_map_offset = i915_gem_mmap_gtt,
1067 .dumb_destroy = i915_gem_dumb_destroy, 1073 .dumb_destroy = i915_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 11c7a6a330c..377c21f531e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -940,6 +940,8 @@ struct drm_i915_gem_object {
940 struct scatterlist *sg_list; 940 struct scatterlist *sg_list;
941 int num_sg; 941 int num_sg;
942 942
943 /* prime dma-buf support */
944 struct sg_table *sg_table;
943 /** 945 /**
944 * Used for performing relocations during execbuffer insertion. 946 * Used for performing relocations during execbuffer insertion.
945 */ 947 */
@@ -1245,6 +1247,8 @@ int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
1245void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 1247void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1246void i915_gem_lastclose(struct drm_device *dev); 1248void i915_gem_lastclose(struct drm_device *dev);
1247 1249
1250int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1251 gfp_t gfpmask);
1248int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 1252int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1249int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj); 1253int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
1250int i915_gem_object_sync(struct drm_i915_gem_object *obj, 1254int i915_gem_object_sync(struct drm_i915_gem_object *obj,
@@ -1342,6 +1346,13 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1342int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 1346int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1343 enum i915_cache_level cache_level); 1347 enum i915_cache_level cache_level);
1344 1348
1349struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
1350 struct dma_buf *dma_buf);
1351
1352struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
1353 struct drm_gem_object *gem_obj, int flags);
1354
1355
1345/* i915_gem_gtt.c */ 1356/* i915_gem_gtt.c */
1346int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev); 1357int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
1347void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); 1358void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6d2180cf3da..c1e5c66553d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,6 +35,7 @@
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/swap.h> 36#include <linux/swap.h>
37#include <linux/pci.h> 37#include <linux/pci.h>
38#include <linux/dma-buf.h>
38 39
39static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); 40static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
40static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); 41static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
@@ -538,6 +539,14 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
538 goto out; 539 goto out;
539 } 540 }
540 541
542 /* prime objects have no backing filp to GEM pread/pwrite
543 * pages from.
544 */
545 if (!obj->base.filp) {
546 ret = -EINVAL;
547 goto out;
548 }
549
541 trace_i915_gem_object_pread(obj, args->offset, args->size); 550 trace_i915_gem_object_pread(obj, args->offset, args->size);
542 551
543 ret = i915_gem_shmem_pread(dev, obj, args, file); 552 ret = i915_gem_shmem_pread(dev, obj, args, file);
@@ -880,6 +889,14 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
880 goto out; 889 goto out;
881 } 890 }
882 891
892 /* prime objects have no backing filp to GEM pread/pwrite
893 * pages from.
894 */
895 if (!obj->base.filp) {
896 ret = -EINVAL;
897 goto out;
898 }
899
883 trace_i915_gem_object_pwrite(obj, args->offset, args->size); 900 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
884 901
885 ret = -EFAULT; 902 ret = -EFAULT;
@@ -1021,6 +1038,14 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1021 if (obj == NULL) 1038 if (obj == NULL)
1022 return -ENOENT; 1039 return -ENOENT;
1023 1040
1041 /* prime objects have no backing filp to GEM mmap
1042 * pages from.
1043 */
1044 if (!obj->filp) {
1045 drm_gem_object_unreference_unlocked(obj);
1046 return -EINVAL;
1047 }
1048
1024 addr = vm_mmap(obj->filp, 0, args->size, 1049 addr = vm_mmap(obj->filp, 0, args->size,
1025 PROT_READ | PROT_WRITE, MAP_SHARED, 1050 PROT_READ | PROT_WRITE, MAP_SHARED,
1026 args->offset); 1051 args->offset);
@@ -1302,8 +1327,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1302 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); 1327 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1303} 1328}
1304 1329
1305 1330int
1306static int
1307i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, 1331i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1308 gfp_t gfpmask) 1332 gfp_t gfpmask)
1309{ 1333{
@@ -1312,6 +1336,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1312 struct inode *inode; 1336 struct inode *inode;
1313 struct page *page; 1337 struct page *page;
1314 1338
1339 if (obj->pages || obj->sg_table)
1340 return 0;
1341
1315 /* Get the list of pages out of our struct file. They'll be pinned 1342 /* Get the list of pages out of our struct file. They'll be pinned
1316 * at this point until we release them. 1343 * at this point until we release them.
1317 */ 1344 */
@@ -1353,6 +1380,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1353 int page_count = obj->base.size / PAGE_SIZE; 1380 int page_count = obj->base.size / PAGE_SIZE;
1354 int i; 1381 int i;
1355 1382
1383 if (!obj->pages)
1384 return;
1385
1356 BUG_ON(obj->madv == __I915_MADV_PURGED); 1386 BUG_ON(obj->madv == __I915_MADV_PURGED);
1357 1387
1358 if (i915_gem_object_needs_bit17_swizzle(obj)) 1388 if (i915_gem_object_needs_bit17_swizzle(obj))
@@ -3327,6 +3357,9 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
3327 3357
3328 trace_i915_gem_object_destroy(obj); 3358 trace_i915_gem_object_destroy(obj);
3329 3359
3360 if (gem_obj->import_attach)
3361 drm_prime_gem_destroy(gem_obj, obj->sg_table);
3362
3330 if (obj->phys_obj) 3363 if (obj->phys_obj)
3331 i915_gem_detach_phys_object(dev, obj); 3364 i915_gem_detach_phys_object(dev, obj);
3332 3365
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
new file mode 100644
index 00000000000..4f8e142604a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -0,0 +1,171 @@
1/*
2 * Copyright 2012 Red Hat Inc
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 */
26#include "drmP.h"
27#include "i915_drv.h"
28#include <linux/dma-buf.h>
29
30struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
31 enum dma_data_direction dir)
32{
33 struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
34 struct drm_device *dev = obj->base.dev;
35 int npages = obj->base.size / PAGE_SIZE;
36 struct sg_table *sg = NULL;
37 int ret;
38 int nents;
39
40 ret = i915_mutex_lock_interruptible(dev);
41 if (ret)
42 return ERR_PTR(ret);
43
44 if (!obj->pages) {
45 ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
46 if (ret)
47 goto out;
48 }
49
50 /* link the pages into an SG then map the sg */
51 sg = drm_prime_pages_to_sg(obj->pages, npages);
52 nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
53out:
54 mutex_unlock(&dev->struct_mutex);
55 return sg;
56}
57
58void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
59 struct sg_table *sg, enum dma_data_direction dir)
60{
61 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
62 sg_free_table(sg);
63 kfree(sg);
64}
65
66void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
67{
68 struct drm_i915_gem_object *obj = dma_buf->priv;
69
70 if (obj->base.export_dma_buf == dma_buf) {
71 /* drop the reference on the export fd holds */
72 obj->base.export_dma_buf = NULL;
73 drm_gem_object_unreference_unlocked(&obj->base);
74 }
75}
76
77static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
78{
79 return NULL;
80}
81
82static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
83{
84
85}
86static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
87{
88 return NULL;
89}
90
91static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
92{
93
94}
95
96struct dma_buf_ops i915_dmabuf_ops = {
97 .map_dma_buf = i915_gem_map_dma_buf,
98 .unmap_dma_buf = i915_gem_unmap_dma_buf,
99 .release = i915_gem_dmabuf_release,
100 .kmap = i915_gem_dmabuf_kmap,
101 .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
102 .kunmap = i915_gem_dmabuf_kunmap,
103 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
104};
105
106struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
107 struct drm_gem_object *gem_obj, int flags)
108{
109 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
110
111 return dma_buf_export(obj, &i915_dmabuf_ops,
112 obj->base.size, 0600);
113}
114
115struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
116 struct dma_buf *dma_buf)
117{
118 struct dma_buf_attachment *attach;
119 struct sg_table *sg;
120 struct drm_i915_gem_object *obj;
121 int npages;
122 int size;
123 int ret;
124
125 /* is this one of own objects? */
126 if (dma_buf->ops == &i915_dmabuf_ops) {
127 obj = dma_buf->priv;
128 /* is it from our device? */
129 if (obj->base.dev == dev) {
130 drm_gem_object_reference(&obj->base);
131 return &obj->base;
132 }
133 }
134
135 /* need to attach */
136 attach = dma_buf_attach(dma_buf, dev->dev);
137 if (IS_ERR(attach))
138 return ERR_CAST(attach);
139
140 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
141 if (IS_ERR(sg)) {
142 ret = PTR_ERR(sg);
143 goto fail_detach;
144 }
145
146 size = dma_buf->size;
147 npages = size / PAGE_SIZE;
148
149 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
150 if (obj == NULL) {
151 ret = -ENOMEM;
152 goto fail_unmap;
153 }
154
155 ret = drm_gem_private_object_init(dev, &obj->base, size);
156 if (ret) {
157 kfree(obj);
158 goto fail_unmap;
159 }
160
161 obj->sg_table = sg;
162 obj->base.import_attach = attach;
163
164 return &obj->base;
165
166fail_unmap:
167 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
168fail_detach:
169 dma_buf_detach(dma_buf, attach);
170 return ERR_PTR(ret);
171}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 29d573c27b3..9fd25a43553 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -267,7 +267,13 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
267 BUG(); 267 BUG();
268 } 268 }
269 269
270 if (dev_priv->mm.gtt->needs_dmar) { 270 if (obj->sg_table) {
271 i915_ppgtt_insert_sg_entries(ppgtt,
272 obj->sg_table->sgl,
273 obj->sg_table->nents,
274 obj->gtt_space->start >> PAGE_SHIFT,
275 pte_flags);
276 } else if (dev_priv->mm.gtt->needs_dmar) {
271 BUG_ON(!obj->sg_list); 277 BUG_ON(!obj->sg_list);
272 278
273 i915_ppgtt_insert_sg_entries(ppgtt, 279 i915_ppgtt_insert_sg_entries(ppgtt,
@@ -371,7 +377,12 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
371 struct drm_i915_private *dev_priv = dev->dev_private; 377 struct drm_i915_private *dev_priv = dev->dev_private;
372 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); 378 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
373 379
374 if (dev_priv->mm.gtt->needs_dmar) { 380 if (obj->sg_table) {
381 intel_gtt_insert_sg_entries(obj->sg_table->sgl,
382 obj->sg_table->nents,
383 obj->gtt_space->start >> PAGE_SHIFT,
384 agp_type);
385 } else if (dev_priv->mm.gtt->needs_dmar) {
375 BUG_ON(!obj->sg_list); 386 BUG_ON(!obj->sg_list);
376 387
377 intel_gtt_insert_sg_entries(obj->sg_list, 388 intel_gtt_insert_sg_entries(obj->sg_list,