aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 18:02:41 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 18:02:41 -0400
commited0bb8ea059764c3fc882fb135473afd347335e9 (patch)
tree5274b8335afe85f76d1eb945eb03ffe4040737b4 /drivers
parent47b816ff7d520509176154748713e7d66b3ad6ac (diff)
parent3e0b2a1993c06e646d90d71e163d03869a211a4c (diff)
Merge branch 'for-linus-3.4' of git://git.linaro.org/people/sumitsemwal/linux-dma-buf
Pull dma-buf updates from Sumit Semwal: "This includes the following key items: - kernel cpu access support, - flag-passing to dma_buf_fd, - relevant Documentation updates, and - some minor cleanups and fixes. These changes are needed for the drm prime/dma-buf interface code that Dave Airlie plans to submit in this merge window." * 'for-linus-3.4' of git://git.linaro.org/people/sumitsemwal/linux-dma-buf: dma-buf: correct dummy function declarations. dma-buf: document fd flags and O_CLOEXEC requirement dma_buf: Add documentation for the new cpu access support dma-buf: add support for kernel cpu access dma-buf: don't hold the mutex around map/unmap calls dma-buf: add get_dma_buf() dma-buf: pass flags into dma_buf_fd. dma-buf: add dma_data_direction to unmap dma_buf_op dma-buf: Move code out of mutex-protected section in dma_buf_attach() dma-buf: Return error instead of using a goto statement when possible dma-buf: Remove unneeded sanity checks dma-buf: Constify ops argument to dma_buf_export()
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/dma-buf.c165
1 files changed, 141 insertions, 24 deletions
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index e38ad243b4b..07cbbc6fddb 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -71,7 +71,7 @@ static inline int is_dma_buf_file(struct file *file)
71 * ops, or error in allocating struct dma_buf, will return negative error. 71 * ops, or error in allocating struct dma_buf, will return negative error.
72 * 72 *
73 */ 73 */
74struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops, 74struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
75 size_t size, int flags) 75 size_t size, int flags)
76{ 76{
77 struct dma_buf *dmabuf; 77 struct dma_buf *dmabuf;
@@ -80,7 +80,9 @@ struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops,
80 if (WARN_ON(!priv || !ops 80 if (WARN_ON(!priv || !ops
81 || !ops->map_dma_buf 81 || !ops->map_dma_buf
82 || !ops->unmap_dma_buf 82 || !ops->unmap_dma_buf
83 || !ops->release)) { 83 || !ops->release
84 || !ops->kmap_atomic
85 || !ops->kmap)) {
84 return ERR_PTR(-EINVAL); 86 return ERR_PTR(-EINVAL);
85 } 87 }
86 88
@@ -107,17 +109,18 @@ EXPORT_SYMBOL_GPL(dma_buf_export);
107/** 109/**
108 * dma_buf_fd - returns a file descriptor for the given dma_buf 110 * dma_buf_fd - returns a file descriptor for the given dma_buf
109 * @dmabuf: [in] pointer to dma_buf for which fd is required. 111 * @dmabuf: [in] pointer to dma_buf for which fd is required.
112 * @flags: [in] flags to give to fd
110 * 113 *
111 * On success, returns an associated 'fd'. Else, returns error. 114 * On success, returns an associated 'fd'. Else, returns error.
112 */ 115 */
113int dma_buf_fd(struct dma_buf *dmabuf) 116int dma_buf_fd(struct dma_buf *dmabuf, int flags)
114{ 117{
115 int error, fd; 118 int error, fd;
116 119
117 if (!dmabuf || !dmabuf->file) 120 if (!dmabuf || !dmabuf->file)
118 return -EINVAL; 121 return -EINVAL;
119 122
120 error = get_unused_fd(); 123 error = get_unused_fd_flags(flags);
121 if (error < 0) 124 if (error < 0)
122 return error; 125 return error;
123 fd = error; 126 fd = error;
@@ -185,17 +188,18 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
185 struct dma_buf_attachment *attach; 188 struct dma_buf_attachment *attach;
186 int ret; 189 int ret;
187 190
188 if (WARN_ON(!dmabuf || !dev || !dmabuf->ops)) 191 if (WARN_ON(!dmabuf || !dev))
189 return ERR_PTR(-EINVAL); 192 return ERR_PTR(-EINVAL);
190 193
191 attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL); 194 attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL);
192 if (attach == NULL) 195 if (attach == NULL)
193 goto err_alloc; 196 return ERR_PTR(-ENOMEM);
194
195 mutex_lock(&dmabuf->lock);
196 197
197 attach->dev = dev; 198 attach->dev = dev;
198 attach->dmabuf = dmabuf; 199 attach->dmabuf = dmabuf;
200
201 mutex_lock(&dmabuf->lock);
202
199 if (dmabuf->ops->attach) { 203 if (dmabuf->ops->attach) {
200 ret = dmabuf->ops->attach(dmabuf, dev, attach); 204 ret = dmabuf->ops->attach(dmabuf, dev, attach);
201 if (ret) 205 if (ret)
@@ -206,8 +210,6 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
206 mutex_unlock(&dmabuf->lock); 210 mutex_unlock(&dmabuf->lock);
207 return attach; 211 return attach;
208 212
209err_alloc:
210 return ERR_PTR(-ENOMEM);
211err_attach: 213err_attach:
212 kfree(attach); 214 kfree(attach);
213 mutex_unlock(&dmabuf->lock); 215 mutex_unlock(&dmabuf->lock);
@@ -224,7 +226,7 @@ EXPORT_SYMBOL_GPL(dma_buf_attach);
224 */ 226 */
225void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) 227void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
226{ 228{
227 if (WARN_ON(!dmabuf || !attach || !dmabuf->ops)) 229 if (WARN_ON(!dmabuf || !attach))
228 return; 230 return;
229 231
230 mutex_lock(&dmabuf->lock); 232 mutex_lock(&dmabuf->lock);
@@ -255,13 +257,10 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
255 257
256 might_sleep(); 258 might_sleep();
257 259
258 if (WARN_ON(!attach || !attach->dmabuf || !attach->dmabuf->ops)) 260 if (WARN_ON(!attach || !attach->dmabuf))
259 return ERR_PTR(-EINVAL); 261 return ERR_PTR(-EINVAL);
260 262
261 mutex_lock(&attach->dmabuf->lock); 263 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
262 if (attach->dmabuf->ops->map_dma_buf)
263 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
264 mutex_unlock(&attach->dmabuf->lock);
265 264
266 return sg_table; 265 return sg_table;
267} 266}
@@ -273,19 +272,137 @@ EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
273 * dma_buf_ops. 272 * dma_buf_ops.
274 * @attach: [in] attachment to unmap buffer from 273 * @attach: [in] attachment to unmap buffer from
275 * @sg_table: [in] scatterlist info of the buffer to unmap 274 * @sg_table: [in] scatterlist info of the buffer to unmap
275 * @direction: [in] direction of DMA transfer
276 * 276 *
277 */ 277 */
278void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, 278void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
279 struct sg_table *sg_table) 279 struct sg_table *sg_table,
280 enum dma_data_direction direction)
280{ 281{
281 if (WARN_ON(!attach || !attach->dmabuf || !sg_table 282 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
282 || !attach->dmabuf->ops))
283 return; 283 return;
284 284
285 mutex_lock(&attach->dmabuf->lock); 285 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
286 if (attach->dmabuf->ops->unmap_dma_buf) 286 direction);
287 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table);
288 mutex_unlock(&attach->dmabuf->lock);
289
290} 287}
291EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); 288EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
289
290
291/**
292 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
293 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
294 * preparations. Coherency is only guaranteed in the specified range for the
295 * specified access direction.
296 * @dma_buf: [in] buffer to prepare cpu access for.
297 * @start: [in] start of range for cpu access.
298 * @len: [in] length of range for cpu access.
299 * @direction: [in] length of range for cpu access.
300 *
301 * Can return negative error values, returns 0 on success.
302 */
303int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
304 enum dma_data_direction direction)
305{
306 int ret = 0;
307
308 if (WARN_ON(!dmabuf))
309 return -EINVAL;
310
311 if (dmabuf->ops->begin_cpu_access)
312 ret = dmabuf->ops->begin_cpu_access(dmabuf, start, len, direction);
313
314 return ret;
315}
316EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
317
318/**
319 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
320 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
321 * actions. Coherency is only guaranteed in the specified range for the
322 * specified access direction.
323 * @dma_buf: [in] buffer to complete cpu access for.
324 * @start: [in] start of range for cpu access.
325 * @len: [in] length of range for cpu access.
326 * @direction: [in] length of range for cpu access.
327 *
328 * This call must always succeed.
329 */
330void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
331 enum dma_data_direction direction)
332{
333 WARN_ON(!dmabuf);
334
335 if (dmabuf->ops->end_cpu_access)
336 dmabuf->ops->end_cpu_access(dmabuf, start, len, direction);
337}
338EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
339
340/**
341 * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
342 * space. The same restrictions as for kmap_atomic and friends apply.
343 * @dma_buf: [in] buffer to map page from.
344 * @page_num: [in] page in PAGE_SIZE units to map.
345 *
346 * This call must always succeed, any necessary preparations that might fail
347 * need to be done in begin_cpu_access.
348 */
349void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
350{
351 WARN_ON(!dmabuf);
352
353 return dmabuf->ops->kmap_atomic(dmabuf, page_num);
354}
355EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
356
357/**
358 * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
359 * @dma_buf: [in] buffer to unmap page from.
360 * @page_num: [in] page in PAGE_SIZE units to unmap.
361 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic.
362 *
363 * This call must always succeed.
364 */
365void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
366 void *vaddr)
367{
368 WARN_ON(!dmabuf);
369
370 if (dmabuf->ops->kunmap_atomic)
371 dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr);
372}
373EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
374
375/**
376 * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
377 * same restrictions as for kmap and friends apply.
378 * @dma_buf: [in] buffer to map page from.
379 * @page_num: [in] page in PAGE_SIZE units to map.
380 *
381 * This call must always succeed, any necessary preparations that might fail
382 * need to be done in begin_cpu_access.
383 */
384void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
385{
386 WARN_ON(!dmabuf);
387
388 return dmabuf->ops->kmap(dmabuf, page_num);
389}
390EXPORT_SYMBOL_GPL(dma_buf_kmap);
391
392/**
393 * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
394 * @dma_buf: [in] buffer to unmap page from.
395 * @page_num: [in] page in PAGE_SIZE units to unmap.
396 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap.
397 *
398 * This call must always succeed.
399 */
400void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
401 void *vaddr)
402{
403 WARN_ON(!dmabuf);
404
405 if (dmabuf->ops->kunmap)
406 dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
407}
408EXPORT_SYMBOL_GPL(dma_buf_kunmap);