aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 18:02:41 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 18:02:41 -0400
commited0bb8ea059764c3fc882fb135473afd347335e9 (patch)
tree5274b8335afe85f76d1eb945eb03ffe4040737b4
parent47b816ff7d520509176154748713e7d66b3ad6ac (diff)
parent3e0b2a1993c06e646d90d71e163d03869a211a4c (diff)
Merge branch 'for-linus-3.4' of git://git.linaro.org/people/sumitsemwal/linux-dma-buf
Pull dma-buf updates from Sumit Semwal: "This includes the following key items: - kernel cpu access support, - flag-passing to dma_buf_fd, - relevant Documentation updates, and - some minor cleanups and fixes. These changes are needed for the drm prime/dma-buf interface code that Dave Airlie plans to submit in this merge window." * 'for-linus-3.4' of git://git.linaro.org/people/sumitsemwal/linux-dma-buf: dma-buf: correct dummy function declarations. dma-buf: document fd flags and O_CLOEXEC requirement dma_buf: Add documentation for the new cpu access support dma-buf: add support for kernel cpu access dma-buf: don't hold the mutex around map/unmap calls dma-buf: add get_dma_buf() dma-buf: pass flags into dma_buf_fd. dma-buf: add dma_data_direction to unmap dma_buf_op dma-buf: Move code out of mutex-protected section in dma_buf_attach() dma-buf: Return error instead of using a goto statement when possible dma-buf: Remove unneeded sanity checks dma-buf: Constify ops argument to dma_buf_export()
-rw-r--r--Documentation/dma-buf-sharing.txt120
-rw-r--r--drivers/base/dma-buf.c165
-rw-r--r--include/linux/dma-buf.h97
3 files changed, 345 insertions, 37 deletions
diff --git a/Documentation/dma-buf-sharing.txt b/Documentation/dma-buf-sharing.txt
index 225f96d88f55..3bbd5c51605a 100644
--- a/Documentation/dma-buf-sharing.txt
+++ b/Documentation/dma-buf-sharing.txt
@@ -32,8 +32,12 @@ The buffer-user
32*IMPORTANT*: [see https://lkml.org/lkml/2011/12/20/211 for more details] 32*IMPORTANT*: [see https://lkml.org/lkml/2011/12/20/211 for more details]
33For this first version, A buffer shared using the dma_buf sharing API: 33For this first version, A buffer shared using the dma_buf sharing API:
34- *may* be exported to user space using "mmap" *ONLY* by exporter, outside of 34- *may* be exported to user space using "mmap" *ONLY* by exporter, outside of
35 this framework. 35 this framework.
36- may be used *ONLY* by importers that do not need CPU access to the buffer. 36- with this new iteration of the dma-buf api cpu access from the kernel has been
37 enable, see below for the details.
38
39dma-buf operations for device dma only
40--------------------------------------
37 41
38The dma_buf buffer sharing API usage contains the following steps: 42The dma_buf buffer sharing API usage contains the following steps:
39 43
@@ -219,10 +223,120 @@ NOTES:
219 If the exporter chooses not to allow an attach() operation once a 223 If the exporter chooses not to allow an attach() operation once a
220 map_dma_buf() API has been called, it simply returns an error. 224 map_dma_buf() API has been called, it simply returns an error.
221 225
222Miscellaneous notes: 226Kernel cpu access to a dma-buf buffer object
227--------------------------------------------
228
229The motivation to allow cpu access from the kernel to a dma-buf object from the
230importers side are:
231- fallback operations, e.g. if the devices is connected to a usb bus and the
232 kernel needs to shuffle the data around first before sending it away.
233- full transparency for existing users on the importer side, i.e. userspace
234 should not notice the difference between a normal object from that subsystem
235 and an imported one backed by a dma-buf. This is really important for drm
236 opengl drivers that expect to still use all the existing upload/download
237 paths.
238
239Access to a dma_buf from the kernel context involves three steps:
240
2411. Prepare access, which invalidate any necessary caches and make the object
242 available for cpu access.
2432. Access the object page-by-page with the dma_buf map apis
2443. Finish access, which will flush any necessary cpu caches and free reserved
245 resources.
246
2471. Prepare access
248
249 Before an importer can access a dma_buf object with the cpu from the kernel
250 context, it needs to notify the exporter of the access that is about to
251 happen.
252
253 Interface:
254 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
255 size_t start, size_t len,
256 enum dma_data_direction direction)
257
258 This allows the exporter to ensure that the memory is actually available for
259 cpu access - the exporter might need to allocate or swap-in and pin the
260 backing storage. The exporter also needs to ensure that cpu access is
261 coherent for the given range and access direction. The range and access
262 direction can be used by the exporter to optimize the cache flushing, i.e.
263 access outside of the range or with a different direction (read instead of
264 write) might return stale or even bogus data (e.g. when the exporter needs to
265 copy the data to temporary storage).
266
267 This step might fail, e.g. in oom conditions.
268
2692. Accessing the buffer
270
271 To support dma_buf objects residing in highmem cpu access is page-based using
272 an api similar to kmap. Accessing a dma_buf is done in aligned chunks of
273 PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which returns
274 a pointer in kernel virtual address space. Afterwards the chunk needs to be
275 unmapped again. There is no limit on how often a given chunk can be mapped
276 and unmapped, i.e. the importer does not need to call begin_cpu_access again
277 before mapping the same chunk again.
278
279 Interfaces:
280 void *dma_buf_kmap(struct dma_buf *, unsigned long);
281 void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
282
283 There are also atomic variants of these interfaces. Like for kmap they
284 facilitate non-blocking fast-paths. Neither the importer nor the exporter (in
285 the callback) is allowed to block when using these.
286
287 Interfaces:
288 void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
289 void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
290
291 For importers all the restrictions of using kmap apply, like the limited
292 supply of kmap_atomic slots. Hence an importer shall only hold onto at most 2
293 atomic dma_buf kmaps at the same time (in any given process context).
294
295 dma_buf kmap calls outside of the range specified in begin_cpu_access are
296 undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
297 the partial chunks at the beginning and end but may return stale or bogus
298 data outside of the range (in these partial chunks).
299
300 Note that these calls need to always succeed. The exporter needs to complete
301 any preparations that might fail in begin_cpu_access.
302
3033. Finish access
304
305 When the importer is done accessing the range specified in begin_cpu_access,
306 it needs to announce this to the exporter (to facilitate cache flushing and
307 unpinning of any pinned resources). The result of of any dma_buf kmap calls
308 after end_cpu_access is undefined.
309
310 Interface:
311 void dma_buf_end_cpu_access(struct dma_buf *dma_buf,
312 size_t start, size_t len,
313 enum dma_data_direction dir);
314
315
316Miscellaneous notes
317-------------------
318
223- Any exporters or users of the dma-buf buffer sharing framework must have 319- Any exporters or users of the dma-buf buffer sharing framework must have
224 a 'select DMA_SHARED_BUFFER' in their respective Kconfigs. 320 a 'select DMA_SHARED_BUFFER' in their respective Kconfigs.
225 321
322- In order to avoid fd leaks on exec, the FD_CLOEXEC flag must be set
323 on the file descriptor. This is not just a resource leak, but a
324 potential security hole. It could give the newly exec'd application
325 access to buffers, via the leaked fd, to which it should otherwise
326 not be permitted access.
327
328 The problem with doing this via a separate fcntl() call, versus doing it
329 atomically when the fd is created, is that this is inherently racy in a
330 multi-threaded app[3]. The issue is made worse when it is library code
331 opening/creating the file descriptor, as the application may not even be
332 aware of the fd's.
333
334 To avoid this problem, userspace must have a way to request O_CLOEXEC
335 flag be set when the dma-buf fd is created. So any API provided by
336 the exporting driver to create a dmabuf fd must provide a way to let
337 userspace control setting of O_CLOEXEC flag passed in to dma_buf_fd().
338
226References: 339References:
227[1] struct dma_buf_ops in include/linux/dma-buf.h 340[1] struct dma_buf_ops in include/linux/dma-buf.h
228[2] All interfaces mentioned above defined in include/linux/dma-buf.h 341[2] All interfaces mentioned above defined in include/linux/dma-buf.h
342[3] https://lwn.net/Articles/236486/
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index e38ad243b4bb..07cbbc6fddb4 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -71,7 +71,7 @@ static inline int is_dma_buf_file(struct file *file)
71 * ops, or error in allocating struct dma_buf, will return negative error. 71 * ops, or error in allocating struct dma_buf, will return negative error.
72 * 72 *
73 */ 73 */
74struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops, 74struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
75 size_t size, int flags) 75 size_t size, int flags)
76{ 76{
77 struct dma_buf *dmabuf; 77 struct dma_buf *dmabuf;
@@ -80,7 +80,9 @@ struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops,
80 if (WARN_ON(!priv || !ops 80 if (WARN_ON(!priv || !ops
81 || !ops->map_dma_buf 81 || !ops->map_dma_buf
82 || !ops->unmap_dma_buf 82 || !ops->unmap_dma_buf
83 || !ops->release)) { 83 || !ops->release
84 || !ops->kmap_atomic
85 || !ops->kmap)) {
84 return ERR_PTR(-EINVAL); 86 return ERR_PTR(-EINVAL);
85 } 87 }
86 88
@@ -107,17 +109,18 @@ EXPORT_SYMBOL_GPL(dma_buf_export);
107/** 109/**
108 * dma_buf_fd - returns a file descriptor for the given dma_buf 110 * dma_buf_fd - returns a file descriptor for the given dma_buf
109 * @dmabuf: [in] pointer to dma_buf for which fd is required. 111 * @dmabuf: [in] pointer to dma_buf for which fd is required.
112 * @flags: [in] flags to give to fd
110 * 113 *
111 * On success, returns an associated 'fd'. Else, returns error. 114 * On success, returns an associated 'fd'. Else, returns error.
112 */ 115 */
113int dma_buf_fd(struct dma_buf *dmabuf) 116int dma_buf_fd(struct dma_buf *dmabuf, int flags)
114{ 117{
115 int error, fd; 118 int error, fd;
116 119
117 if (!dmabuf || !dmabuf->file) 120 if (!dmabuf || !dmabuf->file)
118 return -EINVAL; 121 return -EINVAL;
119 122
120 error = get_unused_fd(); 123 error = get_unused_fd_flags(flags);
121 if (error < 0) 124 if (error < 0)
122 return error; 125 return error;
123 fd = error; 126 fd = error;
@@ -185,17 +188,18 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
185 struct dma_buf_attachment *attach; 188 struct dma_buf_attachment *attach;
186 int ret; 189 int ret;
187 190
188 if (WARN_ON(!dmabuf || !dev || !dmabuf->ops)) 191 if (WARN_ON(!dmabuf || !dev))
189 return ERR_PTR(-EINVAL); 192 return ERR_PTR(-EINVAL);
190 193
191 attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL); 194 attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL);
192 if (attach == NULL) 195 if (attach == NULL)
193 goto err_alloc; 196 return ERR_PTR(-ENOMEM);
194
195 mutex_lock(&dmabuf->lock);
196 197
197 attach->dev = dev; 198 attach->dev = dev;
198 attach->dmabuf = dmabuf; 199 attach->dmabuf = dmabuf;
200
201 mutex_lock(&dmabuf->lock);
202
199 if (dmabuf->ops->attach) { 203 if (dmabuf->ops->attach) {
200 ret = dmabuf->ops->attach(dmabuf, dev, attach); 204 ret = dmabuf->ops->attach(dmabuf, dev, attach);
201 if (ret) 205 if (ret)
@@ -206,8 +210,6 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
206 mutex_unlock(&dmabuf->lock); 210 mutex_unlock(&dmabuf->lock);
207 return attach; 211 return attach;
208 212
209err_alloc:
210 return ERR_PTR(-ENOMEM);
211err_attach: 213err_attach:
212 kfree(attach); 214 kfree(attach);
213 mutex_unlock(&dmabuf->lock); 215 mutex_unlock(&dmabuf->lock);
@@ -224,7 +226,7 @@ EXPORT_SYMBOL_GPL(dma_buf_attach);
224 */ 226 */
225void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) 227void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
226{ 228{
227 if (WARN_ON(!dmabuf || !attach || !dmabuf->ops)) 229 if (WARN_ON(!dmabuf || !attach))
228 return; 230 return;
229 231
230 mutex_lock(&dmabuf->lock); 232 mutex_lock(&dmabuf->lock);
@@ -255,13 +257,10 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
255 257
256 might_sleep(); 258 might_sleep();
257 259
258 if (WARN_ON(!attach || !attach->dmabuf || !attach->dmabuf->ops)) 260 if (WARN_ON(!attach || !attach->dmabuf))
259 return ERR_PTR(-EINVAL); 261 return ERR_PTR(-EINVAL);
260 262
261 mutex_lock(&attach->dmabuf->lock); 263 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
262 if (attach->dmabuf->ops->map_dma_buf)
263 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
264 mutex_unlock(&attach->dmabuf->lock);
265 264
266 return sg_table; 265 return sg_table;
267} 266}
@@ -273,19 +272,137 @@ EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
273 * dma_buf_ops. 272 * dma_buf_ops.
274 * @attach: [in] attachment to unmap buffer from 273 * @attach: [in] attachment to unmap buffer from
275 * @sg_table: [in] scatterlist info of the buffer to unmap 274 * @sg_table: [in] scatterlist info of the buffer to unmap
275 * @direction: [in] direction of DMA transfer
276 * 276 *
277 */ 277 */
278void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, 278void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
279 struct sg_table *sg_table) 279 struct sg_table *sg_table,
280 enum dma_data_direction direction)
280{ 281{
281 if (WARN_ON(!attach || !attach->dmabuf || !sg_table 282 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
282 || !attach->dmabuf->ops))
283 return; 283 return;
284 284
285 mutex_lock(&attach->dmabuf->lock); 285 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
286 if (attach->dmabuf->ops->unmap_dma_buf) 286 direction);
287 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table);
288 mutex_unlock(&attach->dmabuf->lock);
289
290} 287}
291EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); 288EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
289
290
291/**
292 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
293 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
294 * preparations. Coherency is only guaranteed in the specified range for the
295 * specified access direction.
296 * @dma_buf: [in] buffer to prepare cpu access for.
297 * @start: [in] start of range for cpu access.
298 * @len: [in] length of range for cpu access.
299 * @direction: [in] length of range for cpu access.
300 *
301 * Can return negative error values, returns 0 on success.
302 */
303int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
304 enum dma_data_direction direction)
305{
306 int ret = 0;
307
308 if (WARN_ON(!dmabuf))
309 return -EINVAL;
310
311 if (dmabuf->ops->begin_cpu_access)
312 ret = dmabuf->ops->begin_cpu_access(dmabuf, start, len, direction);
313
314 return ret;
315}
316EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
317
318/**
319 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
320 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
321 * actions. Coherency is only guaranteed in the specified range for the
322 * specified access direction.
323 * @dma_buf: [in] buffer to complete cpu access for.
324 * @start: [in] start of range for cpu access.
325 * @len: [in] length of range for cpu access.
326 * @direction: [in] length of range for cpu access.
327 *
328 * This call must always succeed.
329 */
330void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
331 enum dma_data_direction direction)
332{
333 WARN_ON(!dmabuf);
334
335 if (dmabuf->ops->end_cpu_access)
336 dmabuf->ops->end_cpu_access(dmabuf, start, len, direction);
337}
338EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
339
340/**
341 * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
342 * space. The same restrictions as for kmap_atomic and friends apply.
343 * @dma_buf: [in] buffer to map page from.
344 * @page_num: [in] page in PAGE_SIZE units to map.
345 *
346 * This call must always succeed, any necessary preparations that might fail
347 * need to be done in begin_cpu_access.
348 */
349void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
350{
351 WARN_ON(!dmabuf);
352
353 return dmabuf->ops->kmap_atomic(dmabuf, page_num);
354}
355EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
356
357/**
358 * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
359 * @dma_buf: [in] buffer to unmap page from.
360 * @page_num: [in] page in PAGE_SIZE units to unmap.
361 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic.
362 *
363 * This call must always succeed.
364 */
365void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
366 void *vaddr)
367{
368 WARN_ON(!dmabuf);
369
370 if (dmabuf->ops->kunmap_atomic)
371 dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr);
372}
373EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
374
375/**
376 * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
377 * same restrictions as for kmap and friends apply.
378 * @dma_buf: [in] buffer to map page from.
379 * @page_num: [in] page in PAGE_SIZE units to map.
380 *
381 * This call must always succeed, any necessary preparations that might fail
382 * need to be done in begin_cpu_access.
383 */
384void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
385{
386 WARN_ON(!dmabuf);
387
388 return dmabuf->ops->kmap(dmabuf, page_num);
389}
390EXPORT_SYMBOL_GPL(dma_buf_kmap);
391
392/**
393 * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
394 * @dma_buf: [in] buffer to unmap page from.
395 * @page_num: [in] page in PAGE_SIZE units to unmap.
396 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap.
397 *
398 * This call must always succeed.
399 */
400void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
401 void *vaddr)
402{
403 WARN_ON(!dmabuf);
404
405 if (dmabuf->ops->kunmap)
406 dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
407}
408EXPORT_SYMBOL_GPL(dma_buf_kunmap);
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 887dcd487062..3efbfc2145c3 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -29,6 +29,7 @@
29#include <linux/scatterlist.h> 29#include <linux/scatterlist.h>
30#include <linux/list.h> 30#include <linux/list.h>
31#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <linux/fs.h>
32 33
33struct device; 34struct device;
34struct dma_buf; 35struct dma_buf;
@@ -49,6 +50,17 @@ struct dma_buf_attachment;
49 * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter 50 * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter
50 * pages. 51 * pages.
51 * @release: release this buffer; to be called after the last dma_buf_put. 52 * @release: release this buffer; to be called after the last dma_buf_put.
53 * @begin_cpu_access: [optional] called before cpu access to invalidate cpu
54 * caches and allocate backing storage (if not yet done)
55 * respectively pin the objet into memory.
56 * @end_cpu_access: [optional] called after cpu access to flush cashes.
57 * @kmap_atomic: maps a page from the buffer into kernel address
58 * space, users may not block until the subsequent unmap call.
59 * This callback must not sleep.
60 * @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
61 * This Callback must not sleep.
62 * @kmap: maps a page from the buffer into kernel address space.
63 * @kunmap: [optional] unmaps a page from the buffer.
52 */ 64 */
53struct dma_buf_ops { 65struct dma_buf_ops {
54 int (*attach)(struct dma_buf *, struct device *, 66 int (*attach)(struct dma_buf *, struct device *,
@@ -63,7 +75,8 @@ struct dma_buf_ops {
63 struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, 75 struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
64 enum dma_data_direction); 76 enum dma_data_direction);
65 void (*unmap_dma_buf)(struct dma_buf_attachment *, 77 void (*unmap_dma_buf)(struct dma_buf_attachment *,
66 struct sg_table *); 78 struct sg_table *,
79 enum dma_data_direction);
67 /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY 80 /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
68 * if the call would block. 81 * if the call would block.
69 */ 82 */
@@ -71,6 +84,14 @@ struct dma_buf_ops {
71 /* after final dma_buf_put() */ 84 /* after final dma_buf_put() */
72 void (*release)(struct dma_buf *); 85 void (*release)(struct dma_buf *);
73 86
87 int (*begin_cpu_access)(struct dma_buf *, size_t, size_t,
88 enum dma_data_direction);
89 void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
90 enum dma_data_direction);
91 void *(*kmap_atomic)(struct dma_buf *, unsigned long);
92 void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
93 void *(*kmap)(struct dma_buf *, unsigned long);
94 void (*kunmap)(struct dma_buf *, unsigned long, void *);
74}; 95};
75 96
76/** 97/**
@@ -86,7 +107,7 @@ struct dma_buf {
86 struct file *file; 107 struct file *file;
87 struct list_head attachments; 108 struct list_head attachments;
88 const struct dma_buf_ops *ops; 109 const struct dma_buf_ops *ops;
89 /* mutex to serialize list manipulation and other ops */ 110 /* mutex to serialize list manipulation and attach/detach */
90 struct mutex lock; 111 struct mutex lock;
91 void *priv; 112 void *priv;
92}; 113};
@@ -109,20 +130,43 @@ struct dma_buf_attachment {
109 void *priv; 130 void *priv;
110}; 131};
111 132
133/**
134 * get_dma_buf - convenience wrapper for get_file.
135 * @dmabuf: [in] pointer to dma_buf
136 *
137 * Increments the reference count on the dma-buf, needed in case of drivers
138 * that either need to create additional references to the dmabuf on the
139 * kernel side. For example, an exporter that needs to keep a dmabuf ptr
140 * so that subsequent exports don't create a new dmabuf.
141 */
142static inline void get_dma_buf(struct dma_buf *dmabuf)
143{
144 get_file(dmabuf->file);
145}
146
112#ifdef CONFIG_DMA_SHARED_BUFFER 147#ifdef CONFIG_DMA_SHARED_BUFFER
113struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, 148struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
114 struct device *dev); 149 struct device *dev);
115void dma_buf_detach(struct dma_buf *dmabuf, 150void dma_buf_detach(struct dma_buf *dmabuf,
116 struct dma_buf_attachment *dmabuf_attach); 151 struct dma_buf_attachment *dmabuf_attach);
117struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops, 152struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
118 size_t size, int flags); 153 size_t size, int flags);
119int dma_buf_fd(struct dma_buf *dmabuf); 154int dma_buf_fd(struct dma_buf *dmabuf, int flags);
120struct dma_buf *dma_buf_get(int fd); 155struct dma_buf *dma_buf_get(int fd);
121void dma_buf_put(struct dma_buf *dmabuf); 156void dma_buf_put(struct dma_buf *dmabuf);
122 157
123struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, 158struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
124 enum dma_data_direction); 159 enum dma_data_direction);
125void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *); 160void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
161 enum dma_data_direction);
162int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
163 enum dma_data_direction dir);
164void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
165 enum dma_data_direction dir);
166void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
167void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
168void *dma_buf_kmap(struct dma_buf *, unsigned long);
169void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
126#else 170#else
127 171
128static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, 172static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
@@ -138,13 +182,13 @@ static inline void dma_buf_detach(struct dma_buf *dmabuf,
138} 182}
139 183
140static inline struct dma_buf *dma_buf_export(void *priv, 184static inline struct dma_buf *dma_buf_export(void *priv,
141 struct dma_buf_ops *ops, 185 const struct dma_buf_ops *ops,
142 size_t size, int flags) 186 size_t size, int flags)
143{ 187{
144 return ERR_PTR(-ENODEV); 188 return ERR_PTR(-ENODEV);
145} 189}
146 190
147static inline int dma_buf_fd(struct dma_buf *dmabuf) 191static inline int dma_buf_fd(struct dma_buf *dmabuf, int flags)
148{ 192{
149 return -ENODEV; 193 return -ENODEV;
150} 194}
@@ -166,11 +210,44 @@ static inline struct sg_table *dma_buf_map_attachment(
166} 210}
167 211
168static inline void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, 212static inline void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
169 struct sg_table *sg) 213 struct sg_table *sg, enum dma_data_direction dir)
170{ 214{
171 return; 215 return;
172} 216}
173 217
218static inline int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
219 size_t start, size_t len,
220 enum dma_data_direction dir)
221{
222 return -ENODEV;
223}
224
225static inline void dma_buf_end_cpu_access(struct dma_buf *dmabuf,
226 size_t start, size_t len,
227 enum dma_data_direction dir)
228{
229}
230
231static inline void *dma_buf_kmap_atomic(struct dma_buf *dmabuf,
232 unsigned long pnum)
233{
234 return NULL;
235}
236
237static inline void dma_buf_kunmap_atomic(struct dma_buf *dmabuf,
238 unsigned long pnum, void *vaddr)
239{
240}
241
242static inline void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long pnum)
243{
244 return NULL;
245}
246
247static inline void dma_buf_kunmap(struct dma_buf *dmabuf,
248 unsigned long pnum, void *vaddr)
249{
250}
174#endif /* CONFIG_DMA_SHARED_BUFFER */ 251#endif /* CONFIG_DMA_SHARED_BUFFER */
175 252
176#endif /* __DMA_BUF_H__ */ 253#endif /* __DMA_BUF_H__ */