aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 18:02:41 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 18:02:41 -0400
commited0bb8ea059764c3fc882fb135473afd347335e9 (patch)
tree5274b8335afe85f76d1eb945eb03ffe4040737b4 /include
parent47b816ff7d520509176154748713e7d66b3ad6ac (diff)
parent3e0b2a1993c06e646d90d71e163d03869a211a4c (diff)
Merge branch 'for-linus-3.4' of git://git.linaro.org/people/sumitsemwal/linux-dma-buf
Pull dma-buf updates from Sumit Semwal: "This includes the following key items: - kernel cpu access support, - flag-passing to dma_buf_fd, - relevant Documentation updates, and - some minor cleanups and fixes. These changes are needed for the drm prime/dma-buf interface code that Dave Airlie plans to submit in this merge window." * 'for-linus-3.4' of git://git.linaro.org/people/sumitsemwal/linux-dma-buf: dma-buf: correct dummy function declarations. dma-buf: document fd flags and O_CLOEXEC requirement dma_buf: Add documentation for the new cpu access support dma-buf: add support for kernel cpu access dma-buf: don't hold the mutex around map/unmap calls dma-buf: add get_dma_buf() dma-buf: pass flags into dma_buf_fd. dma-buf: add dma_data_direction to unmap dma_buf_op dma-buf: Move code out of mutex-protected section in dma_buf_attach() dma-buf: Return error instead of using a goto statement when possible dma-buf: Remove unneeded sanity checks dma-buf: Constify ops argument to dma_buf_export()
Diffstat (limited to 'include')
-rw-r--r--include/linux/dma-buf.h97
1 files changed, 87 insertions, 10 deletions
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 887dcd487062..3efbfc2145c3 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -29,6 +29,7 @@
29#include <linux/scatterlist.h> 29#include <linux/scatterlist.h>
30#include <linux/list.h> 30#include <linux/list.h>
31#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <linux/fs.h>
32 33
33struct device; 34struct device;
34struct dma_buf; 35struct dma_buf;
@@ -49,6 +50,17 @@ struct dma_buf_attachment;
49 * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter 50 * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter
50 * pages. 51 * pages.
51 * @release: release this buffer; to be called after the last dma_buf_put. 52 * @release: release this buffer; to be called after the last dma_buf_put.
53 * @begin_cpu_access: [optional] called before cpu access to invalidate cpu
54 * caches and allocate backing storage (if not yet done)
55 * respectively pin the objet into memory.
56 * @end_cpu_access: [optional] called after cpu access to flush cashes.
57 * @kmap_atomic: maps a page from the buffer into kernel address
58 * space, users may not block until the subsequent unmap call.
59 * This callback must not sleep.
60 * @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
61 * This Callback must not sleep.
62 * @kmap: maps a page from the buffer into kernel address space.
63 * @kunmap: [optional] unmaps a page from the buffer.
52 */ 64 */
53struct dma_buf_ops { 65struct dma_buf_ops {
54 int (*attach)(struct dma_buf *, struct device *, 66 int (*attach)(struct dma_buf *, struct device *,
@@ -63,7 +75,8 @@ struct dma_buf_ops {
63 struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, 75 struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
64 enum dma_data_direction); 76 enum dma_data_direction);
65 void (*unmap_dma_buf)(struct dma_buf_attachment *, 77 void (*unmap_dma_buf)(struct dma_buf_attachment *,
66 struct sg_table *); 78 struct sg_table *,
79 enum dma_data_direction);
67 /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY 80 /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
68 * if the call would block. 81 * if the call would block.
69 */ 82 */
@@ -71,6 +84,14 @@ struct dma_buf_ops {
71 /* after final dma_buf_put() */ 84 /* after final dma_buf_put() */
72 void (*release)(struct dma_buf *); 85 void (*release)(struct dma_buf *);
73 86
87 int (*begin_cpu_access)(struct dma_buf *, size_t, size_t,
88 enum dma_data_direction);
89 void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
90 enum dma_data_direction);
91 void *(*kmap_atomic)(struct dma_buf *, unsigned long);
92 void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
93 void *(*kmap)(struct dma_buf *, unsigned long);
94 void (*kunmap)(struct dma_buf *, unsigned long, void *);
74}; 95};
75 96
76/** 97/**
@@ -86,7 +107,7 @@ struct dma_buf {
86 struct file *file; 107 struct file *file;
87 struct list_head attachments; 108 struct list_head attachments;
88 const struct dma_buf_ops *ops; 109 const struct dma_buf_ops *ops;
89 /* mutex to serialize list manipulation and other ops */ 110 /* mutex to serialize list manipulation and attach/detach */
90 struct mutex lock; 111 struct mutex lock;
91 void *priv; 112 void *priv;
92}; 113};
@@ -109,20 +130,43 @@ struct dma_buf_attachment {
109 void *priv; 130 void *priv;
110}; 131};
111 132
133/**
134 * get_dma_buf - convenience wrapper for get_file.
135 * @dmabuf: [in] pointer to dma_buf
136 *
137 * Increments the reference count on the dma-buf, needed in case of drivers
138 * that either need to create additional references to the dmabuf on the
139 * kernel side. For example, an exporter that needs to keep a dmabuf ptr
140 * so that subsequent exports don't create a new dmabuf.
141 */
142static inline void get_dma_buf(struct dma_buf *dmabuf)
143{
144 get_file(dmabuf->file);
145}
146
112#ifdef CONFIG_DMA_SHARED_BUFFER 147#ifdef CONFIG_DMA_SHARED_BUFFER
113struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, 148struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
114 struct device *dev); 149 struct device *dev);
115void dma_buf_detach(struct dma_buf *dmabuf, 150void dma_buf_detach(struct dma_buf *dmabuf,
116 struct dma_buf_attachment *dmabuf_attach); 151 struct dma_buf_attachment *dmabuf_attach);
117struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops, 152struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
118 size_t size, int flags); 153 size_t size, int flags);
119int dma_buf_fd(struct dma_buf *dmabuf); 154int dma_buf_fd(struct dma_buf *dmabuf, int flags);
120struct dma_buf *dma_buf_get(int fd); 155struct dma_buf *dma_buf_get(int fd);
121void dma_buf_put(struct dma_buf *dmabuf); 156void dma_buf_put(struct dma_buf *dmabuf);
122 157
123struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, 158struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
124 enum dma_data_direction); 159 enum dma_data_direction);
125void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *); 160void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
161 enum dma_data_direction);
162int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
163 enum dma_data_direction dir);
164void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
165 enum dma_data_direction dir);
166void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
167void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
168void *dma_buf_kmap(struct dma_buf *, unsigned long);
169void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
126#else 170#else
127 171
128static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, 172static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
@@ -138,13 +182,13 @@ static inline void dma_buf_detach(struct dma_buf *dmabuf,
138} 182}
139 183
140static inline struct dma_buf *dma_buf_export(void *priv, 184static inline struct dma_buf *dma_buf_export(void *priv,
141 struct dma_buf_ops *ops, 185 const struct dma_buf_ops *ops,
142 size_t size, int flags) 186 size_t size, int flags)
143{ 187{
144 return ERR_PTR(-ENODEV); 188 return ERR_PTR(-ENODEV);
145} 189}
146 190
147static inline int dma_buf_fd(struct dma_buf *dmabuf) 191static inline int dma_buf_fd(struct dma_buf *dmabuf, int flags)
148{ 192{
149 return -ENODEV; 193 return -ENODEV;
150} 194}
@@ -166,11 +210,44 @@ static inline struct sg_table *dma_buf_map_attachment(
166} 210}
167 211
168static inline void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, 212static inline void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
169 struct sg_table *sg) 213 struct sg_table *sg, enum dma_data_direction dir)
170{ 214{
171 return; 215 return;
172} 216}
173 217
218static inline int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
219 size_t start, size_t len,
220 enum dma_data_direction dir)
221{
222 return -ENODEV;
223}
224
225static inline void dma_buf_end_cpu_access(struct dma_buf *dmabuf,
226 size_t start, size_t len,
227 enum dma_data_direction dir)
228{
229}
230
231static inline void *dma_buf_kmap_atomic(struct dma_buf *dmabuf,
232 unsigned long pnum)
233{
234 return NULL;
235}
236
237static inline void dma_buf_kunmap_atomic(struct dma_buf *dmabuf,
238 unsigned long pnum, void *vaddr)
239{
240}
241
242static inline void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long pnum)
243{
244 return NULL;
245}
246
247static inline void dma_buf_kunmap(struct dma_buf *dmabuf,
248 unsigned long pnum, void *vaddr)
249{
250}
174#endif /* CONFIG_DMA_SHARED_BUFFER */ 251#endif /* CONFIG_DMA_SHARED_BUFFER */
175 252
176#endif /* __DMA_BUF_H__ */ 253#endif /* __DMA_BUF_H__ */