aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/ion/ion_priv.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/ion/ion_priv.h')
-rw-r--r--drivers/gpu/ion/ion_priv.h293
1 files changed, 293 insertions, 0 deletions
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
new file mode 100644
index 00000000000..bfe26da9c04
--- /dev/null
+++ b/drivers/gpu/ion/ion_priv.h
@@ -0,0 +1,293 @@
1/*
2 * drivers/gpu/ion/ion_priv.h
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#ifndef _ION_PRIV_H
18#define _ION_PRIV_H
19
20#include <linux/kref.h>
21#include <linux/mm_types.h>
22#include <linux/mutex.h>
23#include <linux/rbtree.h>
24#include <linux/ion.h>
25#include <linux/miscdevice.h>
26
27struct ion_mapping;
28
29struct ion_dma_mapping {
30 struct kref ref;
31 struct scatterlist *sglist;
32};
33
34struct ion_kernel_mapping {
35 struct kref ref;
36 void *vaddr;
37};
38
39/**
40 * struct ion_device - the metadata of the ion device node
41 * @dev: the actual misc device
42 * @buffers: an rb tree of all the existing buffers
43 * @lock: lock protecting the buffers & heaps trees
44 * @heaps: list of all the heaps in the system
45 * @user_clients: list of all the clients created from userspace
46 */
47struct ion_device {
48 struct miscdevice dev;
49 struct rb_root buffers;
50 struct mutex lock;
51 struct rb_root heaps;
52 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
53 unsigned long arg);
54 struct rb_root user_clients;
55 struct rb_root kernel_clients;
56 struct dentry *debug_root;
57};
58
59/**
60 * struct ion_client - a process/hw block local address space
61 * @ref: for reference counting the client
62 * @node: node in the tree of all clients
63 * @dev: backpointer to ion device
64 * @handles: an rb tree of all the handles in this client
65 * @lock: lock protecting the tree of handles
66 * @heap_mask: mask of all supported heaps
67 * @name: used for debugging
68 * @task: used for debugging
69 *
70 * A client represents a list of buffers this client may access.
71 * The mutex stored here is used to protect both handles tree
72 * as well as the handles themselves, and should be held while modifying either.
73 */
74struct ion_client {
75 struct kref ref;
76 struct rb_node node;
77 struct ion_device *dev;
78 struct rb_root handles;
79 struct mutex lock;
80 unsigned int heap_mask;
81 const char *name;
82 struct task_struct *task;
83 pid_t pid;
84 struct dentry *debug_root;
85};
86
87/**
88 * ion_handle - a client local reference to a buffer
89 * @ref: reference count
90 * @client: back pointer to the client the buffer resides in
91 * @buffer: pointer to the buffer
92 * @node: node in the client's handle rbtree
93 * @kmap_cnt: count of times this client has mapped to kernel
94 * @dmap_cnt: count of times this client has mapped for dma
95 * @usermap_cnt: count of times this client has mapped for userspace
96 *
97 * Modifications to node, map_cnt or mapping should be protected by the
98 * lock in the client. Other fields are never changed after initialization.
99 */
100struct ion_handle {
101 struct kref ref;
102 struct ion_client *client;
103 struct ion_buffer *buffer;
104 struct rb_node node;
105 unsigned int kmap_cnt;
106 unsigned int dmap_cnt;
107 unsigned int usermap_cnt;
108};
109
110bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle);
111
112void ion_buffer_get(struct ion_buffer *buffer);
113
114struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
115
116struct ion_client *ion_client_get_file(int fd);
117
118void ion_client_get(struct ion_client *client);
119
120int ion_client_put(struct ion_client *client);
121
122void ion_handle_get(struct ion_handle *handle);
123
124int ion_handle_put(struct ion_handle *handle);
125
126struct ion_handle *ion_handle_create(struct ion_client *client,
127 struct ion_buffer *buffer);
128
129void ion_handle_add(struct ion_client *client, struct ion_handle *handle);
130
131int ion_remap_dma(struct ion_client *client,
132 struct ion_handle *handle,
133 unsigned long addr);
134/**
135 * struct ion_buffer - metadata for a particular buffer
136 * @ref: refernce count
137 * @node: node in the ion_device buffers tree
138 * @dev: back pointer to the ion_device
139 * @heap: back pointer to the heap the buffer came from
140 * @flags: buffer specific flags
141 * @size: size of the buffer
142 * @priv_virt: private data to the buffer representable as
143 * a void *
144 * @priv_phys: private data to the buffer representable as
145 * an ion_phys_addr_t (and someday a phys_addr_t)
146 * @lock: protects the buffers cnt fields
147 * @kmap_cnt: number of times the buffer is mapped to the kernel
148 * @vaddr: the kenrel mapping if kmap_cnt is not zero
149 * @dmap_cnt: number of times the buffer is mapped for dma
150 * @sglist: the scatterlist for the buffer is dmap_cnt is not zero
151 * @pages: list for allocated pages for the buffer
152 */
153struct ion_buffer {
154 struct kref ref;
155 struct rb_node node;
156 struct ion_device *dev;
157 struct ion_heap *heap;
158 unsigned long flags;
159 size_t size;
160 union {
161 void *priv_virt;
162 ion_phys_addr_t priv_phys;
163 };
164 struct mutex lock;
165 int kmap_cnt;
166 void *vaddr;
167 int dmap_cnt;
168 struct scatterlist *sglist;
169 struct page **pages;
170};
171
172/**
173 * struct ion_heap_ops - ops to operate on a given heap
174 * @allocate: allocate memory
175 * @free: free memory
176 * @phys get physical address of a buffer (only define on
177 * physically contiguous heaps)
178 * @map_dma map the memory for dma to a scatterlist
179 * @unmap_dma unmap the memory for dma
180 * @map_kernel map memory to the kernel
181 * @unmap_kernel unmap memory to the kernel
182 * @map_user map memory to userspace
183 */
184struct ion_heap_ops {
185 int (*allocate) (struct ion_heap *heap,
186 struct ion_buffer *buffer, unsigned long len,
187 unsigned long align, unsigned long flags);
188 void (*free) (struct ion_buffer *buffer);
189 int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
190 ion_phys_addr_t *addr, size_t *len);
191 struct scatterlist *(*map_dma) (struct ion_heap *heap,
192 struct ion_buffer *buffer);
193 void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
194 void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
195 void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
196 int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
197 struct vm_area_struct *vma);
198};
199
200/**
201 * struct ion_heap - represents a heap in the system
202 * @node: rb node to put the heap on the device's tree of heaps
203 * @dev: back pointer to the ion_device
204 * @type: type of heap
205 * @ops: ops struct as above
206 * @id: id of heap, also indicates priority of this heap when
207 * allocating. These are specified by platform data and
208 * MUST be unique
209 * @name: used for debugging
210 *
211 * Represents a pool of memory from which buffers can be made. In some
212 * systems the only heap is regular system memory allocated via vmalloc.
213 * On others, some blocks might require large physically contiguous buffers
214 * that are allocated from a specially reserved heap.
215 */
216struct ion_heap {
217 struct rb_node node;
218 struct ion_device *dev;
219 enum ion_heap_type type;
220 struct ion_heap_ops *ops;
221 int id;
222 const char *name;
223};
224
225/**
226 * ion_device_create - allocates and returns an ion device
227 * @custom_ioctl: arch specific ioctl function if applicable
228 *
229 * returns a valid device or -PTR_ERR
230 */
231struct ion_device *ion_device_create(long (*custom_ioctl)
232 (struct ion_client *client,
233 unsigned int cmd,
234 unsigned long arg));
235
236/**
237 * ion_device_destroy - free and device and it's resource
238 * @dev: the device
239 */
240void ion_device_destroy(struct ion_device *dev);
241
242/**
243 * ion_device_add_heap - adds a heap to the ion device
244 * @dev: the device
245 * @heap: the heap to add
246 */
247void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
248
249/**
250 * functions for creating and destroying the built in ion heaps.
251 * architectures can add their own custom architecture specific
252 * heaps as appropriate.
253 */
254
255struct ion_heap *ion_heap_create(struct ion_platform_heap *);
256void ion_heap_destroy(struct ion_heap *);
257
258struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
259void ion_system_heap_destroy(struct ion_heap *);
260
261struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
262void ion_system_contig_heap_destroy(struct ion_heap *);
263
264struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
265void ion_carveout_heap_destroy(struct ion_heap *);
266/**
267 * kernel api to allocate/free from carveout -- used when carveout is
268 * used to back an architecture specific custom heap
269 */
270ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
271 unsigned long align);
272void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
273 unsigned long size);
274
275#ifdef CONFIG_ION_IOMMU
276struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *);
277void ion_iommu_heap_destroy(struct ion_heap *);
278#else
279static inline struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *)
280{
281 return NULL;
282}
283static inline void ion_iommu_heap_destroy(struct ion_heap *)
284{
285}
286#endif
287/**
288 * The carveout heap returns physical addresses, since 0 may be a valid
289 * physical address, this is used to indicate allocation failed
290 */
291#define ION_CARVEOUT_ALLOCATE_FAIL -1
292
293#endif /* _ION_PRIV_H */