aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/dma-buf.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/dma-buf.h')
-rw-r--r--include/linux/dma-buf.h21
1 files changed, 8 insertions, 13 deletions
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 085db2fee2d7..58725f890b5b 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -39,12 +39,12 @@ struct dma_buf_attachment;
39 39
40/** 40/**
41 * struct dma_buf_ops - operations possible on struct dma_buf 41 * struct dma_buf_ops - operations possible on struct dma_buf
42 * @map_atomic: maps a page from the buffer into kernel address 42 * @map_atomic: [optional] maps a page from the buffer into kernel address
43 * space, users may not block until the subsequent unmap call. 43 * space, users may not block until the subsequent unmap call.
44 * This callback must not sleep. 44 * This callback must not sleep.
45 * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer. 45 * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
46 * This Callback must not sleep. 46 * This Callback must not sleep.
47 * @map: maps a page from the buffer into kernel address space. 47 * @map: [optional] maps a page from the buffer into kernel address space.
48 * @unmap: [optional] unmaps a page from the buffer. 48 * @unmap: [optional] unmaps a page from the buffer.
49 * @vmap: [optional] creates a virtual mapping for the buffer into kernel 49 * @vmap: [optional] creates a virtual mapping for the buffer into kernel
50 * address space. Same restrictions as for vmap and friends apply. 50 * address space. Same restrictions as for vmap and friends apply.
@@ -55,11 +55,11 @@ struct dma_buf_ops {
55 * @attach: 55 * @attach:
56 * 56 *
57 * This is called from dma_buf_attach() to make sure that a given 57 * This is called from dma_buf_attach() to make sure that a given
58 * &device can access the provided &dma_buf. Exporters which support 58 * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters
59 * buffer objects in special locations like VRAM or device-specific 59 * which support buffer objects in special locations like VRAM or
60 * carveout areas should check whether the buffer could be move to 60 * device-specific carveout areas should check whether the buffer could
61 * system memory (or directly accessed by the provided device), and 61 * be move to system memory (or directly accessed by the provided
62 * otherwise need to fail the attach operation. 62 * device), and otherwise need to fail the attach operation.
63 * 63 *
64 * The exporter should also in general check whether the current 64 * The exporter should also in general check whether the current
65 * allocation fullfills the DMA constraints of the new device. If this 65 * allocation fullfills the DMA constraints of the new device. If this
@@ -77,8 +77,7 @@ struct dma_buf_ops {
77 * to signal that backing storage is already allocated and incompatible 77 * to signal that backing storage is already allocated and incompatible
78 * with the requirements of requesting device. 78 * with the requirements of requesting device.
79 */ 79 */
80 int (*attach)(struct dma_buf *, struct device *, 80 int (*attach)(struct dma_buf *, struct dma_buf_attachment *);
81 struct dma_buf_attachment *);
82 81
83 /** 82 /**
84 * @detach: 83 * @detach:
@@ -206,8 +205,6 @@ struct dma_buf_ops {
206 * to be restarted. 205 * to be restarted.
207 */ 206 */
208 int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); 207 int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
209 void *(*map_atomic)(struct dma_buf *, unsigned long);
210 void (*unmap_atomic)(struct dma_buf *, unsigned long, void *);
211 void *(*map)(struct dma_buf *, unsigned long); 208 void *(*map)(struct dma_buf *, unsigned long);
212 void (*unmap)(struct dma_buf *, unsigned long, void *); 209 void (*unmap)(struct dma_buf *, unsigned long, void *);
213 210
@@ -395,8 +392,6 @@ int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
395 enum dma_data_direction dir); 392 enum dma_data_direction dir);
396int dma_buf_end_cpu_access(struct dma_buf *dma_buf, 393int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
397 enum dma_data_direction dir); 394 enum dma_data_direction dir);
398void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
399void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
400void *dma_buf_kmap(struct dma_buf *, unsigned long); 395void *dma_buf_kmap(struct dma_buf *, unsigned long);
401void dma_buf_kunmap(struct dma_buf *, unsigned long, void *); 396void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
402 397