aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-31 21:01:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-31 21:01:18 -0400
commit9fdadb2cbaf4b482dfd6086e8bd3d2db071a1702 (patch)
tree4a6e11e3379ee93c1992cfd595872dded1fd2fac /drivers
parent76f901eb4659779ecacd0e4eba49f55442daef53 (diff)
parent63bc620b45af8c743ac291c8725933278c712692 (diff)
Merge branch 'drm-prime-vmap' of git://people.freedesktop.org/~airlied/linux
Pull drm prime mmap/vmap code from Dave Airlie: "As mentioned previously these are the extra bits of drm that relied on the dma-buf pull to work, the first three just stub out the mmap interface, and the next set provide vmap export to i915/radeon/nouveau and vmap import to udl." * 'drm-prime-vmap' of git://people.freedesktop.org/~airlied/linux: radeon: add radeon prime vmap support. nouveau: add vmap support to nouveau prime support udl: support vmapping imported dma-bufs i915: add dma-buf vmap support for exporting vmapped buffer radeon: add stub dma-buf mmap functionality nouveau: add stub dma-buf mmap functionality. i915: add stub dma-buf mmap callback.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c61
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c45
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c44
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c13
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c25
8 files changed, 192 insertions, 5 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 377c21f531e4..c9cfc67c2cf5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -942,6 +942,9 @@ struct drm_i915_gem_object {
942 942
943 /* prime dma-buf support */ 943 /* prime dma-buf support */
944 struct sg_table *sg_table; 944 struct sg_table *sg_table;
945 void *dma_buf_vmapping;
946 int vmapping_count;
947
945 /** 948 /**
946 * Used for performing relocations during execbuffer insertion. 949 * Used for performing relocations during execbuffer insertion.
947 */ 950 */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 8e269178d6a5..aa308e1337db 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -74,6 +74,59 @@ static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
74 } 74 }
75} 75}
76 76
77static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
78{
79 struct drm_i915_gem_object *obj = dma_buf->priv;
80 struct drm_device *dev = obj->base.dev;
81 int ret;
82
83 ret = i915_mutex_lock_interruptible(dev);
84 if (ret)
85 return ERR_PTR(ret);
86
87 if (obj->dma_buf_vmapping) {
88 obj->vmapping_count++;
89 goto out_unlock;
90 }
91
92 if (!obj->pages) {
93 ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
94 if (ret) {
95 mutex_unlock(&dev->struct_mutex);
96 return ERR_PTR(ret);
97 }
98 }
99
100 obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
101 if (!obj->dma_buf_vmapping) {
102 DRM_ERROR("failed to vmap object\n");
103 goto out_unlock;
104 }
105
106 obj->vmapping_count = 1;
107out_unlock:
108 mutex_unlock(&dev->struct_mutex);
109 return obj->dma_buf_vmapping;
110}
111
112static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
113{
114 struct drm_i915_gem_object *obj = dma_buf->priv;
115 struct drm_device *dev = obj->base.dev;
116 int ret;
117
118 ret = i915_mutex_lock_interruptible(dev);
119 if (ret)
120 return;
121
122 --obj->vmapping_count;
123 if (obj->vmapping_count == 0) {
124 vunmap(obj->dma_buf_vmapping);
125 obj->dma_buf_vmapping = NULL;
126 }
127 mutex_unlock(&dev->struct_mutex);
128}
129
77static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) 130static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
78{ 131{
79 return NULL; 132 return NULL;
@@ -93,6 +146,11 @@ static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_n
93 146
94} 147}
95 148
149static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
150{
151 return -EINVAL;
152}
153
96static const struct dma_buf_ops i915_dmabuf_ops = { 154static const struct dma_buf_ops i915_dmabuf_ops = {
97 .map_dma_buf = i915_gem_map_dma_buf, 155 .map_dma_buf = i915_gem_map_dma_buf,
98 .unmap_dma_buf = i915_gem_unmap_dma_buf, 156 .unmap_dma_buf = i915_gem_unmap_dma_buf,
@@ -101,6 +159,9 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
101 .kmap_atomic = i915_gem_dmabuf_kmap_atomic, 159 .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
102 .kunmap = i915_gem_dmabuf_kunmap, 160 .kunmap = i915_gem_dmabuf_kunmap,
103 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic, 161 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
162 .mmap = i915_gem_dmabuf_mmap,
163 .vmap = i915_gem_dmabuf_vmap,
164 .vunmap = i915_gem_dmabuf_vunmap,
104}; 165};
105 166
106struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 167struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 634d222c93de..8613cb23808c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -123,6 +123,9 @@ struct nouveau_bo {
123 123
124 struct drm_gem_object *gem; 124 struct drm_gem_object *gem;
125 int pin_refcnt; 125 int pin_refcnt;
126
127 struct ttm_bo_kmap_obj dma_buf_vmap;
128 int vmapping_count;
126}; 129};
127 130
128#define nouveau_bo_tile_layout(nvbo) \ 131#define nouveau_bo_tile_layout(nvbo) \
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index c58aab7370c5..a89240e5fb29 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -61,6 +61,48 @@ static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
61 61
62} 62}
63 63
64static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
65{
66 return -EINVAL;
67}
68
69static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf)
70{
71 struct nouveau_bo *nvbo = dma_buf->priv;
72 struct drm_device *dev = nvbo->gem->dev;
73 int ret;
74
75 mutex_lock(&dev->struct_mutex);
76 if (nvbo->vmapping_count) {
77 nvbo->vmapping_count++;
78 goto out_unlock;
79 }
80
81 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
82 &nvbo->dma_buf_vmap);
83 if (ret) {
84 mutex_unlock(&dev->struct_mutex);
85 return ERR_PTR(ret);
86 }
87 nvbo->vmapping_count = 1;
88out_unlock:
89 mutex_unlock(&dev->struct_mutex);
90 return nvbo->dma_buf_vmap.virtual;
91}
92
93static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
94{
95 struct nouveau_bo *nvbo = dma_buf->priv;
96 struct drm_device *dev = nvbo->gem->dev;
97
98 mutex_lock(&dev->struct_mutex);
99 nvbo->vmapping_count--;
100 if (nvbo->vmapping_count == 0) {
101 ttm_bo_kunmap(&nvbo->dma_buf_vmap);
102 }
103 mutex_unlock(&dev->struct_mutex);
104}
105
64static const struct dma_buf_ops nouveau_dmabuf_ops = { 106static const struct dma_buf_ops nouveau_dmabuf_ops = {
65 .map_dma_buf = nouveau_gem_map_dma_buf, 107 .map_dma_buf = nouveau_gem_map_dma_buf,
66 .unmap_dma_buf = nouveau_gem_unmap_dma_buf, 108 .unmap_dma_buf = nouveau_gem_unmap_dma_buf,
@@ -69,6 +111,9 @@ static const struct dma_buf_ops nouveau_dmabuf_ops = {
69 .kmap_atomic = nouveau_gem_kmap_atomic, 111 .kmap_atomic = nouveau_gem_kmap_atomic,
70 .kunmap = nouveau_gem_kunmap, 112 .kunmap = nouveau_gem_kunmap,
71 .kunmap_atomic = nouveau_gem_kunmap_atomic, 113 .kunmap_atomic = nouveau_gem_kunmap_atomic,
114 .mmap = nouveau_gem_prime_mmap,
115 .vmap = nouveau_gem_prime_vmap,
116 .vunmap = nouveau_gem_prime_vunmap,
72}; 117};
73 118
74static int 119static int
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 492654f8ee74..2e24022b389a 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -346,6 +346,9 @@ struct radeon_bo {
346 /* Constant after initialization */ 346 /* Constant after initialization */
347 struct radeon_device *rdev; 347 struct radeon_device *rdev;
348 struct drm_gem_object gem_base; 348 struct drm_gem_object gem_base;
349
350 struct ttm_bo_kmap_obj dma_buf_vmap;
351 int vmapping_count;
349}; 352};
350#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) 353#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
351 354
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index b8f835d8ecb4..8ddab4c76710 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -85,6 +85,47 @@ static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, v
85 85
86} 86}
87 87
88static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
89{
90 return -EINVAL;
91}
92
93static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
94{
95 struct radeon_bo *bo = dma_buf->priv;
96 struct drm_device *dev = bo->rdev->ddev;
97 int ret;
98
99 mutex_lock(&dev->struct_mutex);
100 if (bo->vmapping_count) {
101 bo->vmapping_count++;
102 goto out_unlock;
103 }
104
105 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
106 &bo->dma_buf_vmap);
107 if (ret) {
108 mutex_unlock(&dev->struct_mutex);
109 return ERR_PTR(ret);
110 }
111 bo->vmapping_count = 1;
112out_unlock:
113 mutex_unlock(&dev->struct_mutex);
114 return bo->dma_buf_vmap.virtual;
115}
116
117static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
118{
119 struct radeon_bo *bo = dma_buf->priv;
120 struct drm_device *dev = bo->rdev->ddev;
121
122 mutex_lock(&dev->struct_mutex);
123 bo->vmapping_count--;
124 if (bo->vmapping_count == 0) {
125 ttm_bo_kunmap(&bo->dma_buf_vmap);
126 }
127 mutex_unlock(&dev->struct_mutex);
128}
88const static struct dma_buf_ops radeon_dmabuf_ops = { 129const static struct dma_buf_ops radeon_dmabuf_ops = {
89 .map_dma_buf = radeon_gem_map_dma_buf, 130 .map_dma_buf = radeon_gem_map_dma_buf,
90 .unmap_dma_buf = radeon_gem_unmap_dma_buf, 131 .unmap_dma_buf = radeon_gem_unmap_dma_buf,
@@ -93,6 +134,9 @@ const static struct dma_buf_ops radeon_dmabuf_ops = {
93 .kmap_atomic = radeon_gem_kmap_atomic, 134 .kmap_atomic = radeon_gem_kmap_atomic,
94 .kunmap = radeon_gem_kunmap, 135 .kunmap = radeon_gem_kunmap,
95 .kunmap_atomic = radeon_gem_kunmap_atomic, 136 .kunmap_atomic = radeon_gem_kunmap_atomic,
137 .mmap = radeon_gem_prime_mmap,
138 .vmap = radeon_gem_prime_vmap,
139 .vunmap = radeon_gem_prime_vunmap,
96}; 140};
97 141
98static int radeon_prime_create(struct drm_device *dev, 142static int radeon_prime_create(struct drm_device *dev,
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index a029ee39b0c5..ce9a61179925 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -156,8 +156,17 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
156 if (!fb->active_16) 156 if (!fb->active_16)
157 return 0; 157 return 0;
158 158
159 if (!fb->obj->vmapping) 159 if (!fb->obj->vmapping) {
160 udl_gem_vmap(fb->obj); 160 ret = udl_gem_vmap(fb->obj);
161 if (ret == -ENOMEM) {
162 DRM_ERROR("failed to vmap fb\n");
163 return 0;
164 }
165 if (!fb->obj->vmapping) {
166 DRM_ERROR("failed to vmapping\n");
167 return 0;
168 }
169 }
161 170
162 start_cycles = get_cycles(); 171 start_cycles = get_cycles();
163 172
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 97acc9c6c95b..7bd65bdd15a8 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -180,6 +180,18 @@ int udl_gem_vmap(struct udl_gem_object *obj)
180 int page_count = obj->base.size / PAGE_SIZE; 180 int page_count = obj->base.size / PAGE_SIZE;
181 int ret; 181 int ret;
182 182
183 if (obj->base.import_attach) {
184 ret = dma_buf_begin_cpu_access(obj->base.import_attach->dmabuf,
185 0, obj->base.size, DMA_BIDIRECTIONAL);
186 if (ret)
187 return -EINVAL;
188
189 obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
190 if (!obj->vmapping)
191 return -ENOMEM;
192 return 0;
193 }
194
183 ret = udl_gem_get_pages(obj, GFP_KERNEL); 195 ret = udl_gem_get_pages(obj, GFP_KERNEL);
184 if (ret) 196 if (ret)
185 return ret; 197 return ret;
@@ -192,6 +204,13 @@ int udl_gem_vmap(struct udl_gem_object *obj)
192 204
193void udl_gem_vunmap(struct udl_gem_object *obj) 205void udl_gem_vunmap(struct udl_gem_object *obj)
194{ 206{
207 if (obj->base.import_attach) {
208 dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
209 dma_buf_end_cpu_access(obj->base.import_attach->dmabuf, 0,
210 obj->base.size, DMA_BIDIRECTIONAL);
211 return;
212 }
213
195 if (obj->vmapping) 214 if (obj->vmapping)
196 vunmap(obj->vmapping); 215 vunmap(obj->vmapping);
197 216
@@ -202,12 +221,12 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
202{ 221{
203 struct udl_gem_object *obj = to_udl_bo(gem_obj); 222 struct udl_gem_object *obj = to_udl_bo(gem_obj);
204 223
205 if (gem_obj->import_attach)
206 drm_prime_gem_destroy(gem_obj, obj->sg);
207
208 if (obj->vmapping) 224 if (obj->vmapping)
209 udl_gem_vunmap(obj); 225 udl_gem_vunmap(obj);
210 226
227 if (gem_obj->import_attach)
228 drm_prime_gem_destroy(gem_obj, obj->sg);
229
211 if (obj->pages) 230 if (obj->pages)
212 udl_gem_put_pages(obj); 231 udl_gem_put_pages(obj);
213 232