aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/base/dma-buf.c124
-rw-r--r--include/linux/dma-buf.h59
2 files changed, 182 insertions, 1 deletions
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 5641b9c8d50c..07cbbc6fddb4 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -80,7 +80,9 @@ struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
80 if (WARN_ON(!priv || !ops 80 if (WARN_ON(!priv || !ops
81 || !ops->map_dma_buf 81 || !ops->map_dma_buf
82 || !ops->unmap_dma_buf 82 || !ops->unmap_dma_buf
83 || !ops->release)) { 83 || !ops->release
84 || !ops->kmap_atomic
85 || !ops->kmap)) {
84 return ERR_PTR(-EINVAL); 86 return ERR_PTR(-EINVAL);
85 } 87 }
86 88
@@ -284,3 +286,123 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
284 direction); 286 direction);
285} 287}
286EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); 288EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
289
290
291/**
292 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
293 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
294 * preparations. Coherency is only guaranteed in the specified range for the
295 * specified access direction.
296 * @dma_buf: [in] buffer to prepare cpu access for.
297 * @start: [in] start of range for cpu access.
298 * @len: [in] length of range for cpu access.
299 * @direction: [in] length of range for cpu access.
300 *
301 * Can return negative error values, returns 0 on success.
302 */
303int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
304 enum dma_data_direction direction)
305{
306 int ret = 0;
307
308 if (WARN_ON(!dmabuf))
309 return -EINVAL;
310
311 if (dmabuf->ops->begin_cpu_access)
312 ret = dmabuf->ops->begin_cpu_access(dmabuf, start, len, direction);
313
314 return ret;
315}
316EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
317
318/**
319 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
320 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
321 * actions. Coherency is only guaranteed in the specified range for the
322 * specified access direction.
323 * @dma_buf: [in] buffer to complete cpu access for.
324 * @start: [in] start of range for cpu access.
325 * @len: [in] length of range for cpu access.
326 * @direction: [in] length of range for cpu access.
327 *
328 * This call must always succeed.
329 */
330void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
331 enum dma_data_direction direction)
332{
333 WARN_ON(!dmabuf);
334
335 if (dmabuf->ops->end_cpu_access)
336 dmabuf->ops->end_cpu_access(dmabuf, start, len, direction);
337}
338EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
339
340/**
341 * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
342 * space. The same restrictions as for kmap_atomic and friends apply.
343 * @dma_buf: [in] buffer to map page from.
344 * @page_num: [in] page in PAGE_SIZE units to map.
345 *
346 * This call must always succeed, any necessary preparations that might fail
347 * need to be done in begin_cpu_access.
348 */
349void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
350{
351 WARN_ON(!dmabuf);
352
353 return dmabuf->ops->kmap_atomic(dmabuf, page_num);
354}
355EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
356
357/**
358 * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
359 * @dma_buf: [in] buffer to unmap page from.
360 * @page_num: [in] page in PAGE_SIZE units to unmap.
361 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic.
362 *
363 * This call must always succeed.
364 */
365void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
366 void *vaddr)
367{
368 WARN_ON(!dmabuf);
369
370 if (dmabuf->ops->kunmap_atomic)
371 dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr);
372}
373EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
374
375/**
376 * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
377 * same restrictions as for kmap and friends apply.
378 * @dma_buf: [in] buffer to map page from.
379 * @page_num: [in] page in PAGE_SIZE units to map.
380 *
381 * This call must always succeed, any necessary preparations that might fail
382 * need to be done in begin_cpu_access.
383 */
384void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
385{
386 WARN_ON(!dmabuf);
387
388 return dmabuf->ops->kmap(dmabuf, page_num);
389}
390EXPORT_SYMBOL_GPL(dma_buf_kmap);
391
392/**
393 * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
394 * @dma_buf: [in] buffer to unmap page from.
395 * @page_num: [in] page in PAGE_SIZE units to unmap.
396 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap.
397 *
398 * This call must always succeed.
399 */
400void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
401 void *vaddr)
402{
403 WARN_ON(!dmabuf);
404
405 if (dmabuf->ops->kunmap)
406 dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
407}
408EXPORT_SYMBOL_GPL(dma_buf_kunmap);
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 24e0f4828711..ee7ef9990d9a 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -50,6 +50,17 @@ struct dma_buf_attachment;
50 * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter 50 * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter
51 * pages. 51 * pages.
52 * @release: release this buffer; to be called after the last dma_buf_put. 52 * @release: release this buffer; to be called after the last dma_buf_put.
53 * @begin_cpu_access: [optional] called before cpu access to invalidate cpu
54 * caches and allocate backing storage (if not yet done)
55 * respectively pin the objet into memory.
56 * @end_cpu_access: [optional] called after cpu access to flush cashes.
57 * @kmap_atomic: maps a page from the buffer into kernel address
58 * space, users may not block until the subsequent unmap call.
59 * This callback must not sleep.
60 * @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
61 * This Callback must not sleep.
62 * @kmap: maps a page from the buffer into kernel address space.
63 * @kunmap: [optional] unmaps a page from the buffer.
53 */ 64 */
54struct dma_buf_ops { 65struct dma_buf_ops {
55 int (*attach)(struct dma_buf *, struct device *, 66 int (*attach)(struct dma_buf *, struct device *,
@@ -73,6 +84,14 @@ struct dma_buf_ops {
73 /* after final dma_buf_put() */ 84 /* after final dma_buf_put() */
74 void (*release)(struct dma_buf *); 85 void (*release)(struct dma_buf *);
75 86
87 int (*begin_cpu_access)(struct dma_buf *, size_t, size_t,
88 enum dma_data_direction);
89 void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
90 enum dma_data_direction);
91 void *(*kmap_atomic)(struct dma_buf *, unsigned long);
92 void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
93 void *(*kmap)(struct dma_buf *, unsigned long);
94 void (*kunmap)(struct dma_buf *, unsigned long, void *);
76}; 95};
77 96
78/** 97/**
@@ -140,6 +159,14 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
140 enum dma_data_direction); 159 enum dma_data_direction);
141void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, 160void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
142 enum dma_data_direction); 161 enum dma_data_direction);
162int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
163 enum dma_data_direction dir);
164void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
165 enum dma_data_direction dir);
166void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
167void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
168void *dma_buf_kmap(struct dma_buf *, unsigned long);
169void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
143#else 170#else
144 171
145static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, 172static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
@@ -188,6 +215,38 @@ static inline void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
188 return; 215 return;
189} 216}
190 217
218static inline int dma_buf_begin_cpu_access(struct dma_buf *,
219 size_t, size_t,
220 enum dma_data_direction)
221{
222 return -ENODEV;
223}
224
225static inline void dma_buf_end_cpu_access(struct dma_buf *,
226 size_t, size_t,
227 enum dma_data_direction)
228{
229}
230
231static inline void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long)
232{
233 return NULL;
234}
235
236static inline void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long,
237 void *)
238{
239}
240
241static inline void *dma_buf_kmap(struct dma_buf *, unsigned long)
242{
243 return NULL;
244}
245
246static inline void dma_buf_kunmap(struct dma_buf *, unsigned long,
247 void *)
248{
249}
191#endif /* CONFIG_DMA_SHARED_BUFFER */ 250#endif /* CONFIG_DMA_SHARED_BUFFER */
192 251
193#endif /* __DMA_BUF_H__ */ 252#endif /* __DMA_BUF_H__ */