diff options
| author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
|---|---|---|
| committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
| commit | fcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch) | |
| tree | a57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/gpu/ion | |
| parent | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff) | |
Diffstat (limited to 'drivers/gpu/ion')
| -rw-r--r-- | drivers/gpu/ion/Kconfig | 17 | ||||
| -rw-r--r-- | drivers/gpu/ion/Makefile | 3 | ||||
| -rw-r--r-- | drivers/gpu/ion/ion.c | 1152 | ||||
| -rw-r--r-- | drivers/gpu/ion/ion_carveout_heap.c | 162 | ||||
| -rw-r--r-- | drivers/gpu/ion/ion_heap.c | 78 | ||||
| -rw-r--r-- | drivers/gpu/ion/ion_iommu_heap.c | 382 | ||||
| -rw-r--r-- | drivers/gpu/ion/ion_priv.h | 293 | ||||
| -rw-r--r-- | drivers/gpu/ion/ion_system_heap.c | 198 | ||||
| -rw-r--r-- | drivers/gpu/ion/ion_system_mapper.c | 114 | ||||
| -rw-r--r-- | drivers/gpu/ion/tegra/Makefile | 1 | ||||
| -rw-r--r-- | drivers/gpu/ion/tegra/tegra_ion.c | 599 |
11 files changed, 2999 insertions, 0 deletions
diff --git a/drivers/gpu/ion/Kconfig b/drivers/gpu/ion/Kconfig new file mode 100644 index 00000000000..9a8cbdd9836 --- /dev/null +++ b/drivers/gpu/ion/Kconfig | |||
| @@ -0,0 +1,17 @@ | |||
| 1 | menuconfig ION | ||
| 2 | tristate "Ion Memory Manager" | ||
| 3 | select GENERIC_ALLOCATOR | ||
| 4 | help | ||
| 5 | Chose this option to enable the ION Memory Manager. | ||
| 6 | |||
| 7 | config ION_IOMMU | ||
| 8 | bool | ||
| 9 | |||
| 10 | config ION_TEGRA | ||
| 11 | tristate "Ion for Tegra" | ||
| 12 | depends on ARCH_TEGRA && ION | ||
| 13 | select TEGRA_IOMMU_SMMU if !ARCH_TEGRA_2x_SOC | ||
| 14 | select ION_IOMMU if TEGRA_IOMMU_SMMU | ||
| 15 | help | ||
| 16 | Choose this option if you wish to use ion on an nVidia Tegra. | ||
| 17 | |||
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile new file mode 100644 index 00000000000..4ddc78e9d41 --- /dev/null +++ b/drivers/gpu/ion/Makefile | |||
| @@ -0,0 +1,3 @@ | |||
| 1 | obj-$(CONFIG_ION) += ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o | ||
| 2 | obj-$(CONFIG_ION_IOMMU) += ion_iommu_heap.o | ||
| 3 | obj-$(CONFIG_ION_TEGRA) += tegra/ | ||
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c new file mode 100644 index 00000000000..512ebc5cc8e --- /dev/null +++ b/drivers/gpu/ion/ion.c | |||
| @@ -0,0 +1,1152 @@ | |||
| 1 | /* | ||
| 2 | * drivers/gpu/ion/ion.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2011 Google, Inc. | ||
| 5 | * | ||
| 6 | * This software is licensed under the terms of the GNU General Public | ||
| 7 | * License version 2, as published by the Free Software Foundation, and | ||
| 8 | * may be copied, distributed, and modified under those terms. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | * | ||
| 15 | */ | ||
| 16 | |||
| 17 | #define pr_fmt(fmt) "%s():%d: " fmt, __func__, __LINE__ | ||
| 18 | |||
| 19 | #include <linux/device.h> | ||
| 20 | #include <linux/file.h> | ||
| 21 | #include <linux/fs.h> | ||
| 22 | #include <linux/anon_inodes.h> | ||
| 23 | #include <linux/ion.h> | ||
| 24 | #include <linux/list.h> | ||
| 25 | #include <linux/miscdevice.h> | ||
| 26 | #include <linux/mm.h> | ||
| 27 | #include <linux/mm_types.h> | ||
| 28 | #include <linux/rbtree.h> | ||
| 29 | #include <linux/sched.h> | ||
| 30 | #include <linux/slab.h> | ||
| 31 | #include <linux/seq_file.h> | ||
| 32 | #include <linux/uaccess.h> | ||
| 33 | #include <linux/debugfs.h> | ||
| 34 | |||
| 35 | #include "ion_priv.h" | ||
| 36 | #define DEBUG | ||
| 37 | |||
| 38 | /* this function should only be called while dev->lock is held */ | ||
| 39 | static void ion_buffer_add(struct ion_device *dev, | ||
| 40 | struct ion_buffer *buffer) | ||
| 41 | { | ||
| 42 | struct rb_node **p = &dev->buffers.rb_node; | ||
| 43 | struct rb_node *parent = NULL; | ||
| 44 | struct ion_buffer *entry; | ||
| 45 | |||
| 46 | while (*p) { | ||
| 47 | parent = *p; | ||
| 48 | entry = rb_entry(parent, struct ion_buffer, node); | ||
| 49 | |||
| 50 | if (buffer < entry) { | ||
| 51 | p = &(*p)->rb_left; | ||
| 52 | } else if (buffer > entry) { | ||
| 53 | p = &(*p)->rb_right; | ||
| 54 | } else { | ||
| 55 | pr_err("buffer already found."); | ||
| 56 | BUG(); | ||
| 57 | } | ||
| 58 | } | ||
| 59 | |||
| 60 | rb_link_node(&buffer->node, parent, p); | ||
| 61 | rb_insert_color(&buffer->node, &dev->buffers); | ||
| 62 | } | ||
| 63 | |||
| 64 | /* this function should only be called while dev->lock is held */ | ||
| 65 | static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, | ||
| 66 | struct ion_device *dev, | ||
| 67 | unsigned long len, | ||
| 68 | unsigned long align, | ||
| 69 | unsigned long flags) | ||
| 70 | { | ||
| 71 | struct ion_buffer *buffer; | ||
| 72 | int ret; | ||
| 73 | |||
| 74 | buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); | ||
| 75 | if (!buffer) | ||
| 76 | return ERR_PTR(-ENOMEM); | ||
| 77 | |||
| 78 | buffer->heap = heap; | ||
| 79 | kref_init(&buffer->ref); | ||
| 80 | |||
| 81 | ret = heap->ops->allocate(heap, buffer, len, align, flags); | ||
| 82 | if (ret) { | ||
| 83 | kfree(buffer); | ||
| 84 | return ERR_PTR(ret); | ||
| 85 | } | ||
| 86 | buffer->dev = dev; | ||
| 87 | buffer->size = len; | ||
| 88 | mutex_init(&buffer->lock); | ||
| 89 | ion_buffer_add(dev, buffer); | ||
| 90 | return buffer; | ||
| 91 | } | ||
| 92 | |||
| 93 | static void ion_buffer_destroy(struct kref *kref) | ||
| 94 | { | ||
| 95 | struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); | ||
| 96 | struct ion_device *dev = buffer->dev; | ||
| 97 | |||
| 98 | buffer->heap->ops->free(buffer); | ||
| 99 | mutex_lock(&dev->lock); | ||
| 100 | rb_erase(&buffer->node, &dev->buffers); | ||
| 101 | mutex_unlock(&dev->lock); | ||
| 102 | kfree(buffer); | ||
| 103 | } | ||
| 104 | |||
| 105 | void ion_buffer_get(struct ion_buffer *buffer) | ||
| 106 | { | ||
| 107 | kref_get(&buffer->ref); | ||
| 108 | } | ||
| 109 | |||
| 110 | static int ion_buffer_put(struct ion_buffer *buffer) | ||
| 111 | { | ||
| 112 | return kref_put(&buffer->ref, ion_buffer_destroy); | ||
| 113 | } | ||
| 114 | |||
| 115 | struct ion_handle *ion_handle_create(struct ion_client *client, | ||
| 116 | struct ion_buffer *buffer) | ||
| 117 | { | ||
| 118 | struct ion_handle *handle; | ||
| 119 | |||
| 120 | handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); | ||
| 121 | if (!handle) | ||
| 122 | return ERR_PTR(-ENOMEM); | ||
| 123 | kref_init(&handle->ref); | ||
| 124 | rb_init_node(&handle->node); | ||
| 125 | handle->client = client; | ||
| 126 | ion_buffer_get(buffer); | ||
| 127 | handle->buffer = buffer; | ||
| 128 | |||
| 129 | return handle; | ||
| 130 | } | ||
| 131 | |||
| 132 | static void ion_handle_destroy(struct kref *kref) | ||
| 133 | { | ||
| 134 | struct ion_handle *handle = container_of(kref, struct ion_handle, ref); | ||
| 135 | /* XXX Can a handle be destroyed while it's map count is non-zero?: | ||
| 136 | if (handle->map_cnt) unmap | ||
| 137 | */ | ||
| 138 | ion_buffer_put(handle->buffer); | ||
| 139 | mutex_lock(&handle->client->lock); | ||
| 140 | if (!RB_EMPTY_NODE(&handle->node)) | ||
| 141 | rb_erase(&handle->node, &handle->client->handles); | ||
| 142 | mutex_unlock(&handle->client->lock); | ||
| 143 | kfree(handle); | ||
| 144 | } | ||
| 145 | |||
| 146 | struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) | ||
| 147 | { | ||
| 148 | return handle->buffer; | ||
| 149 | } | ||
| 150 | |||
| 151 | void ion_handle_get(struct ion_handle *handle) | ||
| 152 | { | ||
| 153 | kref_get(&handle->ref); | ||
| 154 | } | ||
| 155 | |||
| 156 | int ion_handle_put(struct ion_handle *handle) | ||
| 157 | { | ||
| 158 | return kref_put(&handle->ref, ion_handle_destroy); | ||
| 159 | } | ||
| 160 | |||
| 161 | static struct ion_handle *ion_handle_lookup(struct ion_client *client, | ||
| 162 | struct ion_buffer *buffer) | ||
| 163 | { | ||
| 164 | struct rb_node *n; | ||
| 165 | |||
| 166 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | ||
| 167 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | ||
| 168 | node); | ||
| 169 | if (handle->buffer == buffer) | ||
| 170 | return handle; | ||
| 171 | } | ||
| 172 | return NULL; | ||
| 173 | } | ||
| 174 | |||
| 175 | bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) | ||
| 176 | { | ||
| 177 | struct rb_node *n = client->handles.rb_node; | ||
| 178 | |||
| 179 | while (n) { | ||
| 180 | struct ion_handle *handle_node = rb_entry(n, struct ion_handle, | ||
| 181 | node); | ||
| 182 | if (handle < handle_node) | ||
| 183 | n = n->rb_left; | ||
| 184 | else if (handle > handle_node) | ||
| 185 | n = n->rb_right; | ||
| 186 | else | ||
| 187 | return true; | ||
| 188 | } | ||
| 189 | WARN(1, "invalid handle passed h=0x%x,comm=%d\n", handle, | ||
| 190 | current->group_leader->comm); | ||
| 191 | return false; | ||
| 192 | } | ||
| 193 | |||
| 194 | void ion_handle_add(struct ion_client *client, struct ion_handle *handle) | ||
| 195 | { | ||
| 196 | struct rb_node **p = &client->handles.rb_node; | ||
| 197 | struct rb_node *parent = NULL; | ||
| 198 | struct ion_handle *entry; | ||
| 199 | |||
| 200 | while (*p) { | ||
| 201 | parent = *p; | ||
| 202 | entry = rb_entry(parent, struct ion_handle, node); | ||
| 203 | |||
| 204 | if (handle < entry) | ||
| 205 | p = &(*p)->rb_left; | ||
| 206 | else if (handle > entry) | ||
| 207 | p = &(*p)->rb_right; | ||
| 208 | else | ||
| 209 | WARN(1, "%s: buffer already found.", __func__); | ||
| 210 | } | ||
| 211 | |||
| 212 | rb_link_node(&handle->node, parent, p); | ||
| 213 | rb_insert_color(&handle->node, &client->handles); | ||
| 214 | } | ||
| 215 | |||
| 216 | struct ion_handle *ion_alloc(struct ion_client *client, size_t len, | ||
| 217 | size_t align, unsigned int flags) | ||
| 218 | { | ||
| 219 | struct rb_node *n; | ||
| 220 | struct ion_handle *handle; | ||
| 221 | struct ion_device *dev = client->dev; | ||
| 222 | struct ion_buffer *buffer = NULL; | ||
| 223 | |||
| 224 | /* | ||
| 225 | * traverse the list of heaps available in this system in priority | ||
| 226 | * order. If the heap type is supported by the client, and matches the | ||
| 227 | * request of the caller allocate from it. Repeat until allocate has | ||
| 228 | * succeeded or all heaps have been tried | ||
| 229 | */ | ||
| 230 | mutex_lock(&dev->lock); | ||
| 231 | for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { | ||
| 232 | struct ion_heap *heap = rb_entry(n, struct ion_heap, node); | ||
| 233 | /* if the client doesn't support this heap type */ | ||
| 234 | if (!((1 << heap->type) & client->heap_mask)) | ||
| 235 | continue; | ||
| 236 | /* if the caller didn't specify this heap type */ | ||
| 237 | if (!((1 << heap->id) & flags)) | ||
| 238 | continue; | ||
| 239 | buffer = ion_buffer_create(heap, dev, len, align, flags); | ||
| 240 | if (!IS_ERR_OR_NULL(buffer)) | ||
| 241 | break; | ||
| 242 | } | ||
| 243 | mutex_unlock(&dev->lock); | ||
| 244 | |||
| 245 | if (IS_ERR_OR_NULL(buffer)) | ||
| 246 | return ERR_PTR(PTR_ERR(buffer)); | ||
| 247 | |||
| 248 | handle = ion_handle_create(client, buffer); | ||
| 249 | |||
| 250 | if (IS_ERR_OR_NULL(handle)) | ||
| 251 | goto end; | ||
| 252 | |||
| 253 | /* | ||
| 254 | * ion_buffer_create will create a buffer with a ref_cnt of 1, | ||
| 255 | * and ion_handle_create will take a second reference, drop one here | ||
| 256 | */ | ||
| 257 | ion_buffer_put(buffer); | ||
| 258 | |||
| 259 | mutex_lock(&client->lock); | ||
| 260 | ion_handle_add(client, handle); | ||
| 261 | mutex_unlock(&client->lock); | ||
| 262 | return handle; | ||
| 263 | |||
| 264 | end: | ||
| 265 | ion_buffer_put(buffer); | ||
| 266 | return handle; | ||
| 267 | } | ||
| 268 | |||
| 269 | void ion_free(struct ion_client *client, struct ion_handle *handle) | ||
| 270 | { | ||
| 271 | bool valid_handle; | ||
| 272 | |||
| 273 | BUG_ON(client != handle->client); | ||
| 274 | |||
| 275 | mutex_lock(&client->lock); | ||
| 276 | valid_handle = ion_handle_validate(client, handle); | ||
| 277 | mutex_unlock(&client->lock); | ||
| 278 | |||
| 279 | if (!valid_handle) { | ||
| 280 | WARN(1, "%s: invalid handle passed to free.\n", __func__); | ||
| 281 | return; | ||
| 282 | } | ||
| 283 | ion_handle_put(handle); | ||
| 284 | } | ||
| 285 | |||
| 286 | static bool _ion_map(int *buffer_cnt, int *handle_cnt) | ||
| 287 | { | ||
| 288 | bool map; | ||
| 289 | |||
| 290 | BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0); | ||
| 291 | |||
| 292 | if (*buffer_cnt) | ||
| 293 | map = false; | ||
| 294 | else | ||
| 295 | map = true; | ||
| 296 | if (*handle_cnt == 0) | ||
| 297 | (*buffer_cnt)++; | ||
| 298 | (*handle_cnt)++; | ||
| 299 | return map; | ||
| 300 | } | ||
| 301 | |||
| 302 | static bool _ion_unmap(int *buffer_cnt, int *handle_cnt) | ||
| 303 | { | ||
| 304 | BUG_ON(*handle_cnt == 0); | ||
| 305 | (*handle_cnt)--; | ||
| 306 | if (*handle_cnt != 0) | ||
| 307 | return false; | ||
| 308 | BUG_ON(*buffer_cnt == 0); | ||
| 309 | (*buffer_cnt)--; | ||
| 310 | if (*buffer_cnt == 0) | ||
| 311 | return true; | ||
| 312 | return false; | ||
| 313 | } | ||
| 314 | |||
| 315 | int ion_phys(struct ion_client *client, struct ion_handle *handle, | ||
| 316 | ion_phys_addr_t *addr, size_t *len) | ||
| 317 | { | ||
| 318 | struct ion_buffer *buffer; | ||
| 319 | int ret; | ||
| 320 | |||
| 321 | mutex_lock(&client->lock); | ||
| 322 | if (!ion_handle_validate(client, handle)) { | ||
| 323 | mutex_unlock(&client->lock); | ||
| 324 | return -EINVAL; | ||
| 325 | } | ||
| 326 | |||
| 327 | buffer = handle->buffer; | ||
| 328 | |||
| 329 | if (!buffer->heap->ops->phys) { | ||
| 330 | pr_err("ion_phys is not implemented by this heap.\n"); | ||
| 331 | mutex_unlock(&client->lock); | ||
| 332 | return -ENODEV; | ||
| 333 | } | ||
| 334 | mutex_unlock(&client->lock); | ||
| 335 | ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); | ||
| 336 | return ret; | ||
| 337 | } | ||
| 338 | |||
| 339 | void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) | ||
| 340 | { | ||
| 341 | struct ion_buffer *buffer; | ||
| 342 | void *vaddr; | ||
| 343 | |||
| 344 | mutex_lock(&client->lock); | ||
| 345 | if (!ion_handle_validate(client, handle)) { | ||
| 346 | WARN(1, "invalid handle passed to map_kernel.\n"); | ||
| 347 | mutex_unlock(&client->lock); | ||
| 348 | return ERR_PTR(-EINVAL); | ||
| 349 | } | ||
| 350 | |||
| 351 | buffer = handle->buffer; | ||
| 352 | mutex_lock(&buffer->lock); | ||
| 353 | |||
| 354 | if (!handle->buffer->heap->ops->map_kernel) { | ||
| 355 | pr_err("map_kernel is not implemented by this heap.\n"); | ||
| 356 | mutex_unlock(&buffer->lock); | ||
| 357 | mutex_unlock(&client->lock); | ||
| 358 | return ERR_PTR(-ENODEV); | ||
| 359 | } | ||
| 360 | |||
| 361 | if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) { | ||
| 362 | vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); | ||
| 363 | if (IS_ERR_OR_NULL(vaddr)) | ||
| 364 | _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt); | ||
| 365 | buffer->vaddr = vaddr; | ||
| 366 | } else { | ||
| 367 | vaddr = buffer->vaddr; | ||
| 368 | } | ||
| 369 | mutex_unlock(&buffer->lock); | ||
| 370 | mutex_unlock(&client->lock); | ||
| 371 | return vaddr; | ||
| 372 | } | ||
| 373 | |||
| 374 | struct scatterlist *ion_map_dma(struct ion_client *client, | ||
| 375 | struct ion_handle *handle) | ||
| 376 | { | ||
| 377 | struct ion_buffer *buffer; | ||
| 378 | struct scatterlist *sglist; | ||
| 379 | |||
| 380 | mutex_lock(&client->lock); | ||
| 381 | if (!ion_handle_validate(client, handle)) { | ||
| 382 | WARN(1, "invalid handle passed to map_dma.\n"); | ||
| 383 | mutex_unlock(&client->lock); | ||
| 384 | return ERR_PTR(-EINVAL); | ||
| 385 | } | ||
| 386 | buffer = handle->buffer; | ||
| 387 | mutex_lock(&buffer->lock); | ||
| 388 | |||
| 389 | if (!handle->buffer->heap->ops->map_dma) { | ||
| 390 | pr_err("map_kernel is not implemented by this heap.\n"); | ||
| 391 | mutex_unlock(&buffer->lock); | ||
| 392 | mutex_unlock(&client->lock); | ||
| 393 | return ERR_PTR(-ENODEV); | ||
| 394 | } | ||
| 395 | if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) { | ||
| 396 | sglist = buffer->heap->ops->map_dma(buffer->heap, buffer); | ||
| 397 | if (IS_ERR_OR_NULL(sglist)) | ||
| 398 | _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt); | ||
| 399 | buffer->sglist = sglist; | ||
| 400 | } else { | ||
| 401 | sglist = buffer->sglist; | ||
| 402 | } | ||
| 403 | mutex_unlock(&buffer->lock); | ||
| 404 | mutex_unlock(&client->lock); | ||
| 405 | return sglist; | ||
| 406 | } | ||
| 407 | |||
| 408 | struct scatterlist *iommu_heap_remap_dma(struct ion_heap *heap, | ||
| 409 | struct ion_buffer *buf, | ||
| 410 | unsigned long addr); | ||
| 411 | int ion_remap_dma(struct ion_client *client, | ||
| 412 | struct ion_handle *handle, | ||
| 413 | unsigned long addr) | ||
| 414 | { | ||
| 415 | struct ion_buffer *buffer; | ||
| 416 | int ret; | ||
| 417 | |||
| 418 | mutex_lock(&client->lock); | ||
| 419 | if (!ion_handle_validate(client, handle)) { | ||
| 420 | pr_err("invalid handle passed to map_dma.\n"); | ||
| 421 | mutex_unlock(&client->lock); | ||
| 422 | return -EINVAL; | ||
| 423 | } | ||
| 424 | buffer = handle->buffer; | ||
| 425 | mutex_lock(&buffer->lock); | ||
| 426 | |||
| 427 | ret = iommu_heap_remap_dma(buffer->heap, buffer, addr); | ||
| 428 | |||
| 429 | mutex_unlock(&buffer->lock); | ||
| 430 | mutex_unlock(&client->lock); | ||
| 431 | return ret; | ||
| 432 | } | ||
| 433 | |||
| 434 | void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) | ||
| 435 | { | ||
| 436 | struct ion_buffer *buffer; | ||
| 437 | |||
| 438 | mutex_lock(&client->lock); | ||
| 439 | buffer = handle->buffer; | ||
| 440 | mutex_lock(&buffer->lock); | ||
| 441 | if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) { | ||
| 442 | buffer->heap->ops->unmap_kernel(buffer->heap, buffer); | ||
| 443 | buffer->vaddr = NULL; | ||
| 444 | } | ||
| 445 | mutex_unlock(&buffer->lock); | ||
| 446 | mutex_unlock(&client->lock); | ||
| 447 | } | ||
| 448 | |||
| 449 | void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle) | ||
| 450 | { | ||
| 451 | struct ion_buffer *buffer; | ||
| 452 | |||
| 453 | mutex_lock(&client->lock); | ||
| 454 | buffer = handle->buffer; | ||
| 455 | mutex_lock(&buffer->lock); | ||
| 456 | if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) { | ||
| 457 | buffer->heap->ops->unmap_dma(buffer->heap, buffer); | ||
| 458 | buffer->sglist = NULL; | ||
| 459 | } | ||
| 460 | mutex_unlock(&buffer->lock); | ||
| 461 | mutex_unlock(&client->lock); | ||
| 462 | } | ||
| 463 | |||
| 464 | |||
| 465 | struct ion_buffer *ion_share(struct ion_client *client, | ||
| 466 | struct ion_handle *handle) | ||
| 467 | { | ||
| 468 | bool valid_handle; | ||
| 469 | |||
| 470 | mutex_lock(&client->lock); | ||
| 471 | valid_handle = ion_handle_validate(client, handle); | ||
| 472 | mutex_unlock(&client->lock); | ||
| 473 | if (!valid_handle) { | ||
| 474 | WARN(1, "%s: invalid handle passed to share.\n", __func__); | ||
| 475 | return ERR_PTR(-EINVAL); | ||
| 476 | } | ||
| 477 | |||
| 478 | /* do not take an extra reference here, the burden is on the caller | ||
| 479 | * to make sure the buffer doesn't go away while it's passing it | ||
| 480 | * to another client -- ion_free should not be called on this handle | ||
| 481 | * until the buffer has been imported into the other client | ||
| 482 | */ | ||
| 483 | return handle->buffer; | ||
| 484 | } | ||
| 485 | |||
| 486 | struct ion_handle *ion_import(struct ion_client *client, | ||
| 487 | struct ion_buffer *buffer) | ||
| 488 | { | ||
| 489 | struct ion_handle *handle = NULL; | ||
| 490 | |||
| 491 | mutex_lock(&client->lock); | ||
| 492 | /* if a handle exists for this buffer just take a reference to it */ | ||
| 493 | handle = ion_handle_lookup(client, buffer); | ||
| 494 | if (!IS_ERR_OR_NULL(handle)) { | ||
| 495 | ion_handle_get(handle); | ||
| 496 | goto end; | ||
| 497 | } | ||
| 498 | handle = ion_handle_create(client, buffer); | ||
| 499 | if (IS_ERR_OR_NULL(handle)) { | ||
| 500 | pr_err("error during handle create\n"); | ||
| 501 | goto end; | ||
| 502 | } | ||
| 503 | ion_handle_add(client, handle); | ||
| 504 | end: | ||
| 505 | mutex_unlock(&client->lock); | ||
| 506 | return handle; | ||
| 507 | } | ||
| 508 | |||
| 509 | static const struct file_operations ion_share_fops; | ||
| 510 | |||
| 511 | struct ion_handle *ion_import_fd(struct ion_client *client, int fd) | ||
| 512 | { | ||
| 513 | struct file *file = fget(fd); | ||
| 514 | struct ion_handle *handle; | ||
| 515 | |||
| 516 | if (!file) { | ||
| 517 | pr_err("imported fd not found in file table.\n"); | ||
| 518 | return ERR_PTR(-EINVAL); | ||
| 519 | } | ||
| 520 | if (file->f_op != &ion_share_fops) { | ||
| 521 | pr_err("imported file is not a shared ion file.\n"); | ||
| 522 | handle = ERR_PTR(-EINVAL); | ||
| 523 | goto end; | ||
| 524 | } | ||
| 525 | handle = ion_import(client, file->private_data); | ||
| 526 | end: | ||
| 527 | fput(file); | ||
| 528 | return handle; | ||
| 529 | } | ||
| 530 | |||
| 531 | static int ion_debug_client_show(struct seq_file *s, void *unused) | ||
| 532 | { | ||
| 533 | struct ion_client *client = s->private; | ||
| 534 | struct rb_node *n; | ||
| 535 | size_t sizes[ION_NUM_HEAPS] = {0}; | ||
| 536 | const char *names[ION_NUM_HEAPS] = {0}; | ||
| 537 | int i; | ||
| 538 | |||
| 539 | mutex_lock(&client->lock); | ||
| 540 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | ||
| 541 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | ||
| 542 | node); | ||
| 543 | enum ion_heap_type type = handle->buffer->heap->type; | ||
| 544 | |||
| 545 | if (!names[type]) | ||
| 546 | names[type] = handle->buffer->heap->name; | ||
| 547 | sizes[type] += handle->buffer->size; | ||
| 548 | } | ||
| 549 | mutex_unlock(&client->lock); | ||
| 550 | |||
| 551 | seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); | ||
| 552 | for (i = 0; i < ION_NUM_HEAPS; i++) { | ||
| 553 | if (!names[i]) | ||
| 554 | continue; | ||
| 555 | seq_printf(s, "%16.16s: %16u %d\n", names[i], sizes[i], | ||
| 556 | atomic_read(&client->ref.refcount)); | ||
| 557 | } | ||
| 558 | return 0; | ||
| 559 | } | ||
| 560 | |||
| 561 | static int ion_debug_client_open(struct inode *inode, struct file *file) | ||
| 562 | { | ||
| 563 | return single_open(file, ion_debug_client_show, inode->i_private); | ||
| 564 | } | ||
| 565 | |||
| 566 | static const struct file_operations debug_client_fops = { | ||
| 567 | .open = ion_debug_client_open, | ||
| 568 | .read = seq_read, | ||
| 569 | .llseek = seq_lseek, | ||
| 570 | .release = single_release, | ||
| 571 | }; | ||
| 572 | |||
| 573 | static struct ion_client *ion_client_lookup(struct ion_device *dev, | ||
| 574 | struct task_struct *task) | ||
| 575 | { | ||
| 576 | struct rb_node *n = dev->user_clients.rb_node; | ||
| 577 | struct ion_client *client; | ||
| 578 | |||
| 579 | mutex_lock(&dev->lock); | ||
| 580 | while (n) { | ||
| 581 | client = rb_entry(n, struct ion_client, node); | ||
| 582 | if (task == client->task) { | ||
| 583 | ion_client_get(client); | ||
| 584 | mutex_unlock(&dev->lock); | ||
| 585 | return client; | ||
| 586 | } else if (task < client->task) { | ||
| 587 | n = n->rb_left; | ||
| 588 | } else if (task > client->task) { | ||
| 589 | n = n->rb_right; | ||
| 590 | } | ||
| 591 | } | ||
| 592 | mutex_unlock(&dev->lock); | ||
| 593 | return NULL; | ||
| 594 | } | ||
| 595 | |||
| 596 | struct ion_client *ion_client_create(struct ion_device *dev, | ||
| 597 | unsigned int heap_mask, | ||
| 598 | const char *name) | ||
| 599 | { | ||
| 600 | struct ion_client *client; | ||
| 601 | struct task_struct *task; | ||
| 602 | struct rb_node **p; | ||
| 603 | struct rb_node *parent = NULL; | ||
| 604 | struct ion_client *entry; | ||
| 605 | char debug_name[64]; | ||
| 606 | pid_t pid; | ||
| 607 | |||
| 608 | get_task_struct(current->group_leader); | ||
| 609 | task_lock(current->group_leader); | ||
| 610 | pid = task_pid_nr(current->group_leader); | ||
| 611 | /* don't bother to store task struct for kernel threads, | ||
| 612 | they can't be killed anyway */ | ||
| 613 | if (current->group_leader->flags & PF_KTHREAD) { | ||
| 614 | put_task_struct(current->group_leader); | ||
| 615 | task = NULL; | ||
| 616 | } else { | ||
| 617 | task = current->group_leader; | ||
| 618 | } | ||
| 619 | task_unlock(current->group_leader); | ||
| 620 | |||
| 621 | /* if this isn't a kernel thread, see if a client already | ||
| 622 | exists */ | ||
| 623 | if (task) { | ||
| 624 | client = ion_client_lookup(dev, task); | ||
| 625 | if (!IS_ERR_OR_NULL(client)) { | ||
| 626 | put_task_struct(current->group_leader); | ||
| 627 | return client; | ||
| 628 | } | ||
| 629 | } | ||
| 630 | |||
| 631 | client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); | ||
| 632 | if (!client) { | ||
| 633 | put_task_struct(current->group_leader); | ||
| 634 | return ERR_PTR(-ENOMEM); | ||
| 635 | } | ||
| 636 | |||
| 637 | client->dev = dev; | ||
| 638 | client->handles = RB_ROOT; | ||
| 639 | mutex_init(&client->lock); | ||
| 640 | client->name = name; | ||
| 641 | client->heap_mask = heap_mask; | ||
| 642 | client->task = task; | ||
| 643 | client->pid = pid; | ||
| 644 | kref_init(&client->ref); | ||
| 645 | |||
| 646 | mutex_lock(&dev->lock); | ||
| 647 | if (task) { | ||
| 648 | p = &dev->user_clients.rb_node; | ||
| 649 | while (*p) { | ||
| 650 | parent = *p; | ||
| 651 | entry = rb_entry(parent, struct ion_client, node); | ||
| 652 | |||
| 653 | if (task < entry->task) | ||
| 654 | p = &(*p)->rb_left; | ||
| 655 | else if (task > entry->task) | ||
| 656 | p = &(*p)->rb_right; | ||
| 657 | } | ||
| 658 | rb_link_node(&client->node, parent, p); | ||
| 659 | rb_insert_color(&client->node, &dev->user_clients); | ||
| 660 | } else { | ||
| 661 | p = &dev->kernel_clients.rb_node; | ||
| 662 | while (*p) { | ||
| 663 | parent = *p; | ||
| 664 | entry = rb_entry(parent, struct ion_client, node); | ||
| 665 | |||
| 666 | if (client < entry) | ||
| 667 | p = &(*p)->rb_left; | ||
| 668 | else if (client > entry) | ||
| 669 | p = &(*p)->rb_right; | ||
| 670 | } | ||
| 671 | rb_link_node(&client->node, parent, p); | ||
| 672 | rb_insert_color(&client->node, &dev->kernel_clients); | ||
| 673 | } | ||
| 674 | |||
| 675 | snprintf(debug_name, 64, "%u", client->pid); | ||
| 676 | client->debug_root = debugfs_create_file(debug_name, 0664, | ||
| 677 | dev->debug_root, client, | ||
| 678 | &debug_client_fops); | ||
| 679 | mutex_unlock(&dev->lock); | ||
| 680 | |||
| 681 | return client; | ||
| 682 | } | ||
| 683 | |||
| 684 | static void _ion_client_destroy(struct kref *kref) | ||
| 685 | { | ||
| 686 | struct ion_client *client = container_of(kref, struct ion_client, ref); | ||
| 687 | struct ion_device *dev = client->dev; | ||
| 688 | struct rb_node *n; | ||
| 689 | |||
| 690 | pr_debug("\n"); | ||
| 691 | while ((n = rb_first(&client->handles))) { | ||
| 692 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | ||
| 693 | node); | ||
| 694 | ion_handle_destroy(&handle->ref); | ||
| 695 | } | ||
| 696 | mutex_lock(&dev->lock); | ||
| 697 | if (client->task) { | ||
| 698 | rb_erase(&client->node, &dev->user_clients); | ||
| 699 | put_task_struct(client->task); | ||
| 700 | } else { | ||
| 701 | rb_erase(&client->node, &dev->kernel_clients); | ||
| 702 | } | ||
| 703 | debugfs_remove_recursive(client->debug_root); | ||
| 704 | mutex_unlock(&dev->lock); | ||
| 705 | |||
| 706 | kfree(client); | ||
| 707 | } | ||
| 708 | |||
| 709 | void ion_client_get(struct ion_client *client) | ||
| 710 | { | ||
| 711 | kref_get(&client->ref); | ||
| 712 | } | ||
| 713 | |||
| 714 | int ion_client_put(struct ion_client *client) | ||
| 715 | { | ||
| 716 | return kref_put(&client->ref, _ion_client_destroy); | ||
| 717 | } | ||
| 718 | |||
| 719 | void ion_client_destroy(struct ion_client *client) | ||
| 720 | { | ||
| 721 | ion_client_put(client); | ||
| 722 | } | ||
| 723 | |||
| 724 | static int ion_share_release(struct inode *inode, struct file* file) | ||
| 725 | { | ||
| 726 | struct ion_buffer *buffer = file->private_data; | ||
| 727 | |||
| 728 | pr_debug("\n"); | ||
| 729 | /* drop the reference to the buffer -- this prevents the | ||
| 730 | buffer from going away because the client holding it exited | ||
| 731 | while it was being passed */ | ||
| 732 | ion_buffer_put(buffer); | ||
| 733 | return 0; | ||
| 734 | } | ||
| 735 | |||
| 736 | static void ion_vma_open(struct vm_area_struct *vma) | ||
| 737 | { | ||
| 738 | |||
| 739 | struct ion_buffer *buffer = vma->vm_file->private_data; | ||
| 740 | struct ion_handle *handle = vma->vm_private_data; | ||
| 741 | struct ion_client *client; | ||
| 742 | |||
| 743 | pr_debug("\n"); | ||
| 744 | /* check that the client still exists and take a reference so | ||
| 745 | it can't go away until this vma is closed */ | ||
| 746 | client = ion_client_lookup(buffer->dev, current->group_leader); | ||
| 747 | if (IS_ERR_OR_NULL(client)) { | ||
| 748 | vma->vm_private_data = NULL; | ||
| 749 | return; | ||
| 750 | } | ||
| 751 | ion_buffer_get(buffer); | ||
| 752 | ion_handle_get(handle); | ||
| 753 | pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", | ||
| 754 | atomic_read(&client->ref.refcount), | ||
| 755 | atomic_read(&handle->ref.refcount), | ||
| 756 | atomic_read(&buffer->ref.refcount)); | ||
| 757 | } | ||
| 758 | |||
| 759 | static void ion_vma_close(struct vm_area_struct *vma) | ||
| 760 | { | ||
| 761 | struct ion_handle *handle = vma->vm_private_data; | ||
| 762 | struct ion_buffer *buffer = vma->vm_file->private_data; | ||
| 763 | struct ion_client *client; | ||
| 764 | |||
| 765 | pr_debug("\n"); | ||
| 766 | /* this indicates the client is gone, nothing to do here */ | ||
| 767 | if (!handle) | ||
| 768 | return; | ||
| 769 | client = handle->client; | ||
| 770 | pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", | ||
| 771 | atomic_read(&client->ref.refcount), | ||
| 772 | atomic_read(&handle->ref.refcount), | ||
| 773 | atomic_read(&buffer->ref.refcount)); | ||
| 774 | ion_handle_put(handle); | ||
| 775 | ion_client_put(client); | ||
| 776 | ion_buffer_put(buffer); | ||
| 777 | pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", | ||
| 778 | atomic_read(&client->ref.refcount), | ||
| 779 | atomic_read(&handle->ref.refcount), | ||
| 780 | atomic_read(&buffer->ref.refcount)); | ||
| 781 | } | ||
| 782 | |||
| 783 | static struct vm_operations_struct ion_vm_ops = { | ||
| 784 | .open = ion_vma_open, | ||
| 785 | .close = ion_vma_close, | ||
| 786 | }; | ||
| 787 | |||
| 788 | static int ion_share_mmap(struct file *file, struct vm_area_struct *vma) | ||
| 789 | { | ||
| 790 | struct ion_buffer *buffer = file->private_data; | ||
| 791 | unsigned long size = vma->vm_end - vma->vm_start; | ||
| 792 | struct ion_client *client; | ||
| 793 | struct ion_handle *handle; | ||
| 794 | int ret; | ||
| 795 | |||
| 796 | pr_debug("\n"); | ||
| 797 | /* make sure the client still exists, it's possible for the client to | ||
| 798 | have gone away but the map/share fd still to be around, take | ||
| 799 | a reference to it so it can't go away while this mapping exists */ | ||
| 800 | client = ion_client_lookup(buffer->dev, current->group_leader); | ||
| 801 | if (IS_ERR_OR_NULL(client)) { | ||
| 802 | WARN(1, "trying to mmap an ion handle in a process with no " | ||
| 803 | "ion client\n"); | ||
| 804 | return -EINVAL; | ||
| 805 | } | ||
| 806 | |||
| 807 | if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) > | ||
| 808 | buffer->size)) { | ||
| 809 | WARN(1, "trying to map larger area than handle has available" | ||
| 810 | "\n"); | ||
| 811 | ret = -EINVAL; | ||
| 812 | goto err; | ||
| 813 | } | ||
| 814 | |||
| 815 | /* find the handle and take a reference to it */ | ||
| 816 | handle = ion_import(client, buffer); | ||
| 817 | if (IS_ERR_OR_NULL(handle)) { | ||
| 818 | ret = -EINVAL; | ||
| 819 | goto err; | ||
| 820 | } | ||
| 821 | ion_buffer_get(buffer); | ||
| 822 | |||
| 823 | if (!handle->buffer->heap->ops->map_user) { | ||
| 824 | pr_err("this heap does not define a method for mapping " | ||
| 825 | "to userspace\n"); | ||
| 826 | ret = -EINVAL; | ||
| 827 | goto err1; | ||
| 828 | } | ||
| 829 | |||
| 830 | mutex_lock(&buffer->lock); | ||
| 831 | /* now map it to userspace */ | ||
| 832 | ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); | ||
| 833 | mutex_unlock(&buffer->lock); | ||
| 834 | if (ret) { | ||
| 835 | pr_err("failure mapping buffer to userspace\n"); | ||
| 836 | goto err1; | ||
| 837 | } | ||
| 838 | |||
| 839 | vma->vm_ops = &ion_vm_ops; | ||
| 840 | /* move the handle into the vm_private_data so we can access it from | ||
| 841 | vma_open/close */ | ||
| 842 | vma->vm_private_data = handle; | ||
| 843 | pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", | ||
| 844 | atomic_read(&client->ref.refcount), | ||
| 845 | atomic_read(&handle->ref.refcount), | ||
| 846 | atomic_read(&buffer->ref.refcount)); | ||
| 847 | return 0; | ||
| 848 | |||
| 849 | err1: | ||
| 850 | /* drop the reference to the handle */ | ||
| 851 | ion_handle_put(handle); | ||
| 852 | err: | ||
| 853 | /* drop the reference to the client */ | ||
| 854 | ion_client_put(client); | ||
| 855 | return ret; | ||
| 856 | } | ||
| 857 | |||
| 858 | static const struct file_operations ion_share_fops = { | ||
| 859 | .owner = THIS_MODULE, | ||
| 860 | .release = ion_share_release, | ||
| 861 | .mmap = ion_share_mmap, | ||
| 862 | }; | ||
| 863 | |||
| 864 | static int ion_ioctl_share(struct file *parent, struct ion_client *client, | ||
| 865 | struct ion_handle *handle) | ||
| 866 | { | ||
| 867 | int fd = get_unused_fd(); | ||
| 868 | struct file *file; | ||
| 869 | |||
| 870 | if (fd < 0) | ||
| 871 | return -ENFILE; | ||
| 872 | |||
| 873 | file = anon_inode_getfile("ion_share_fd", &ion_share_fops, | ||
| 874 | handle->buffer, O_RDWR); | ||
| 875 | if (IS_ERR_OR_NULL(file)) | ||
| 876 | goto err; | ||
| 877 | ion_buffer_get(handle->buffer); | ||
| 878 | fd_install(fd, file); | ||
| 879 | |||
| 880 | return fd; | ||
| 881 | |||
| 882 | err: | ||
| 883 | put_unused_fd(fd); | ||
| 884 | return -ENFILE; | ||
| 885 | } | ||
| 886 | |||
| 887 | static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | ||
| 888 | { | ||
| 889 | struct ion_client *client = filp->private_data; | ||
| 890 | |||
| 891 | switch (cmd) { | ||
| 892 | case ION_IOC_ALLOC: | ||
| 893 | { | ||
| 894 | struct ion_allocation_data data; | ||
| 895 | |||
| 896 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | ||
| 897 | return -EFAULT; | ||
| 898 | data.handle = ion_alloc(client, data.len, data.align, | ||
| 899 | data.flags); | ||
| 900 | if (copy_to_user((void __user *)arg, &data, sizeof(data))) | ||
| 901 | return -EFAULT; | ||
| 902 | break; | ||
| 903 | } | ||
| 904 | case ION_IOC_FREE: | ||
| 905 | { | ||
| 906 | struct ion_handle_data data; | ||
| 907 | bool valid; | ||
| 908 | |||
| 909 | if (copy_from_user(&data, (void __user *)arg, | ||
| 910 | sizeof(struct ion_handle_data))) | ||
| 911 | return -EFAULT; | ||
| 912 | mutex_lock(&client->lock); | ||
| 913 | valid = ion_handle_validate(client, data.handle); | ||
| 914 | mutex_unlock(&client->lock); | ||
| 915 | if (!valid) | ||
| 916 | return -EINVAL; | ||
| 917 | ion_free(client, data.handle); | ||
| 918 | break; | ||
| 919 | } | ||
| 920 | case ION_IOC_MAP: | ||
| 921 | case ION_IOC_SHARE: | ||
| 922 | { | ||
| 923 | struct ion_fd_data data; | ||
| 924 | |||
| 925 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | ||
| 926 | return -EFAULT; | ||
| 927 | mutex_lock(&client->lock); | ||
| 928 | if (!ion_handle_validate(client, data.handle)) { | ||
| 929 | WARN(1, "invalid handle passed to share ioctl.\n"); | ||
| 930 | mutex_unlock(&client->lock); | ||
| 931 | return -EINVAL; | ||
| 932 | } | ||
| 933 | data.fd = ion_ioctl_share(filp, client, data.handle); | ||
| 934 | mutex_unlock(&client->lock); | ||
| 935 | if (copy_to_user((void __user *)arg, &data, sizeof(data))) | ||
| 936 | return -EFAULT; | ||
| 937 | break; | ||
| 938 | } | ||
| 939 | case ION_IOC_IMPORT: | ||
| 940 | { | ||
| 941 | struct ion_fd_data data; | ||
| 942 | if (copy_from_user(&data, (void __user *)arg, | ||
| 943 | sizeof(struct ion_fd_data))) | ||
| 944 | return -EFAULT; | ||
| 945 | |||
| 946 | data.handle = ion_import_fd(client, data.fd); | ||
| 947 | if (IS_ERR(data.handle)) | ||
| 948 | data.handle = NULL; | ||
| 949 | if (copy_to_user((void __user *)arg, &data, | ||
| 950 | sizeof(struct ion_fd_data))) | ||
| 951 | return -EFAULT; | ||
| 952 | break; | ||
| 953 | } | ||
| 954 | case ION_IOC_CUSTOM: | ||
| 955 | { | ||
| 956 | struct ion_device *dev = client->dev; | ||
| 957 | struct ion_custom_data data; | ||
| 958 | |||
| 959 | if (!dev->custom_ioctl) | ||
| 960 | return -ENOTTY; | ||
| 961 | if (copy_from_user(&data, (void __user *)arg, | ||
| 962 | sizeof(struct ion_custom_data))) | ||
| 963 | return -EFAULT; | ||
| 964 | return dev->custom_ioctl(client, data.cmd, data.arg); | ||
| 965 | } | ||
| 966 | default: | ||
| 967 | return -ENOTTY; | ||
| 968 | } | ||
| 969 | return 0; | ||
| 970 | } | ||
| 971 | |||
| 972 | static int ion_release(struct inode *inode, struct file *file) | ||
| 973 | { | ||
| 974 | struct ion_client *client = file->private_data; | ||
| 975 | |||
| 976 | pr_debug("\n"); | ||
| 977 | ion_client_put(client); | ||
| 978 | return 0; | ||
| 979 | } | ||
| 980 | |||
| 981 | static int ion_open(struct inode *inode, struct file *file) | ||
| 982 | { | ||
| 983 | struct miscdevice *miscdev = file->private_data; | ||
| 984 | struct ion_device *dev = container_of(miscdev, struct ion_device, dev); | ||
| 985 | struct ion_client *client; | ||
| 986 | |||
| 987 | pr_debug("\n"); | ||
| 988 | client = ion_client_create(dev, -1, "user"); | ||
| 989 | if (IS_ERR_OR_NULL(client)) | ||
| 990 | return PTR_ERR(client); | ||
| 991 | file->private_data = client; | ||
| 992 | |||
| 993 | return 0; | ||
| 994 | } | ||
| 995 | |||
| 996 | static const struct file_operations ion_fops = { | ||
| 997 | .owner = THIS_MODULE, | ||
| 998 | .open = ion_open, | ||
| 999 | .release = ion_release, | ||
| 1000 | .unlocked_ioctl = ion_ioctl, | ||
| 1001 | }; | ||
| 1002 | |||
| 1003 | static size_t ion_debug_heap_total(struct ion_client *client, | ||
| 1004 | enum ion_heap_type type) | ||
| 1005 | { | ||
| 1006 | size_t size = 0; | ||
| 1007 | struct rb_node *n; | ||
| 1008 | |||
| 1009 | mutex_lock(&client->lock); | ||
| 1010 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | ||
| 1011 | struct ion_handle *handle = rb_entry(n, | ||
| 1012 | struct ion_handle, | ||
| 1013 | node); | ||
| 1014 | if (handle->buffer->heap->type == type) | ||
| 1015 | size += handle->buffer->size; | ||
| 1016 | } | ||
| 1017 | mutex_unlock(&client->lock); | ||
| 1018 | return size; | ||
| 1019 | } | ||
| 1020 | |||
| 1021 | static int ion_debug_heap_show(struct seq_file *s, void *unused) | ||
| 1022 | { | ||
| 1023 | struct ion_heap *heap = s->private; | ||
| 1024 | struct ion_device *dev = heap->dev; | ||
| 1025 | struct rb_node *n; | ||
| 1026 | |||
| 1027 | seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); | ||
| 1028 | for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) { | ||
| 1029 | struct ion_client *client = rb_entry(n, struct ion_client, | ||
| 1030 | node); | ||
| 1031 | char task_comm[TASK_COMM_LEN]; | ||
| 1032 | size_t size = ion_debug_heap_total(client, heap->type); | ||
| 1033 | if (!size) | ||
| 1034 | continue; | ||
| 1035 | |||
| 1036 | get_task_comm(task_comm, client->task); | ||
| 1037 | seq_printf(s, "%16.s %16u %16u\n", task_comm, client->pid, | ||
| 1038 | size); | ||
| 1039 | } | ||
| 1040 | |||
| 1041 | for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) { | ||
| 1042 | struct ion_client *client = rb_entry(n, struct ion_client, | ||
| 1043 | node); | ||
| 1044 | size_t size = ion_debug_heap_total(client, heap->type); | ||
| 1045 | if (!size) | ||
| 1046 | continue; | ||
| 1047 | seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid, | ||
| 1048 | size); | ||
| 1049 | } | ||
| 1050 | return 0; | ||
| 1051 | } | ||
| 1052 | |||
| 1053 | static int ion_debug_heap_open(struct inode *inode, struct file *file) | ||
| 1054 | { | ||
| 1055 | return single_open(file, ion_debug_heap_show, inode->i_private); | ||
| 1056 | } | ||
| 1057 | |||
| 1058 | static const struct file_operations debug_heap_fops = { | ||
| 1059 | .open = ion_debug_heap_open, | ||
| 1060 | .read = seq_read, | ||
| 1061 | .llseek = seq_lseek, | ||
| 1062 | .release = single_release, | ||
| 1063 | }; | ||
| 1064 | |||
| 1065 | void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) | ||
| 1066 | { | ||
| 1067 | struct rb_node **p = &dev->heaps.rb_node; | ||
| 1068 | struct rb_node *parent = NULL; | ||
| 1069 | struct ion_heap *entry; | ||
| 1070 | |||
| 1071 | heap->dev = dev; | ||
| 1072 | mutex_lock(&dev->lock); | ||
| 1073 | while (*p) { | ||
| 1074 | parent = *p; | ||
| 1075 | entry = rb_entry(parent, struct ion_heap, node); | ||
| 1076 | |||
| 1077 | if (heap->id < entry->id) { | ||
| 1078 | p = &(*p)->rb_left; | ||
| 1079 | } else if (heap->id > entry->id ) { | ||
| 1080 | p = &(*p)->rb_right; | ||
| 1081 | } else { | ||
| 1082 | pr_err("can not insert multiple heaps with " | ||
| 1083 | "id %d\n", heap->id); | ||
| 1084 | goto end; | ||
| 1085 | } | ||
| 1086 | } | ||
| 1087 | |||
| 1088 | rb_link_node(&heap->node, parent, p); | ||
| 1089 | rb_insert_color(&heap->node, &dev->heaps); | ||
| 1090 | debugfs_create_file(heap->name, 0664, dev->debug_root, heap, | ||
| 1091 | &debug_heap_fops); | ||
| 1092 | end: | ||
| 1093 | mutex_unlock(&dev->lock); | ||
| 1094 | } | ||
| 1095 | |||
| 1096 | struct ion_device *ion_device_create(long (*custom_ioctl) | ||
| 1097 | (struct ion_client *client, | ||
| 1098 | unsigned int cmd, | ||
| 1099 | unsigned long arg)) | ||
| 1100 | { | ||
| 1101 | struct ion_device *idev; | ||
| 1102 | int ret; | ||
| 1103 | |||
| 1104 | idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); | ||
| 1105 | if (!idev) | ||
| 1106 | return ERR_PTR(-ENOMEM); | ||
| 1107 | |||
| 1108 | idev->dev.minor = MISC_DYNAMIC_MINOR; | ||
| 1109 | idev->dev.name = "ion"; | ||
| 1110 | idev->dev.fops = &ion_fops; | ||
| 1111 | idev->dev.parent = NULL; | ||
| 1112 | ret = misc_register(&idev->dev); | ||
| 1113 | if (ret) { | ||
| 1114 | pr_err("ion: failed to register misc device.\n"); | ||
| 1115 | return ERR_PTR(ret); | ||
| 1116 | } | ||
| 1117 | |||
| 1118 | idev->debug_root = debugfs_create_dir("ion", NULL); | ||
| 1119 | if (IS_ERR_OR_NULL(idev->debug_root)) | ||
| 1120 | pr_err("ion: failed to create debug files.\n"); | ||
| 1121 | |||
| 1122 | idev->custom_ioctl = custom_ioctl; | ||
| 1123 | idev->buffers = RB_ROOT; | ||
| 1124 | mutex_init(&idev->lock); | ||
| 1125 | idev->heaps = RB_ROOT; | ||
| 1126 | idev->user_clients = RB_ROOT; | ||
| 1127 | idev->kernel_clients = RB_ROOT; | ||
| 1128 | return idev; | ||
| 1129 | } | ||
| 1130 | |||
| 1131 | void ion_device_destroy(struct ion_device *dev) | ||
| 1132 | { | ||
| 1133 | misc_deregister(&dev->dev); | ||
| 1134 | /* XXX need to free the heaps and clients ? */ | ||
| 1135 | kfree(dev); | ||
| 1136 | } | ||
| 1137 | |||
| 1138 | struct ion_client *ion_client_get_file(int fd) | ||
| 1139 | { | ||
| 1140 | struct ion_client *client = ERR_PTR(-EFAULT); | ||
| 1141 | struct file *f = fget(fd); | ||
| 1142 | if (!f) | ||
| 1143 | return ERR_PTR(-EINVAL); | ||
| 1144 | |||
| 1145 | if (f->f_op == &ion_fops) { | ||
| 1146 | client = f->private_data; | ||
| 1147 | ion_client_get(client); | ||
| 1148 | } | ||
| 1149 | |||
| 1150 | fput(f); | ||
| 1151 | return client; | ||
| 1152 | } | ||
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c new file mode 100644 index 00000000000..606adae13f4 --- /dev/null +++ b/drivers/gpu/ion/ion_carveout_heap.c | |||
| @@ -0,0 +1,162 @@ | |||
| 1 | /* | ||
| 2 | * drivers/gpu/ion/ion_carveout_heap.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2011 Google, Inc. | ||
| 5 | * | ||
| 6 | * This software is licensed under the terms of the GNU General Public | ||
| 7 | * License version 2, as published by the Free Software Foundation, and | ||
| 8 | * may be copied, distributed, and modified under those terms. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | * | ||
| 15 | */ | ||
| 16 | #include <linux/spinlock.h> | ||
| 17 | |||
| 18 | #include <linux/err.h> | ||
| 19 | #include <linux/genalloc.h> | ||
| 20 | #include <linux/io.h> | ||
| 21 | #include <linux/ion.h> | ||
| 22 | #include <linux/mm.h> | ||
| 23 | #include <linux/scatterlist.h> | ||
| 24 | #include <linux/slab.h> | ||
| 25 | #include <linux/vmalloc.h> | ||
| 26 | #include "ion_priv.h" | ||
| 27 | |||
| 28 | #include <asm/mach/map.h> | ||
| 29 | |||
| 30 | struct ion_carveout_heap { | ||
| 31 | struct ion_heap heap; | ||
| 32 | struct gen_pool *pool; | ||
| 33 | ion_phys_addr_t base; | ||
| 34 | }; | ||
| 35 | |||
| 36 | ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, | ||
| 37 | unsigned long size, | ||
| 38 | unsigned long align) | ||
| 39 | { | ||
| 40 | struct ion_carveout_heap *carveout_heap = | ||
| 41 | container_of(heap, struct ion_carveout_heap, heap); | ||
| 42 | unsigned long offset = gen_pool_alloc(carveout_heap->pool, size); | ||
| 43 | |||
| 44 | if (!offset) | ||
| 45 | return ION_CARVEOUT_ALLOCATE_FAIL; | ||
| 46 | |||
| 47 | return offset; | ||
| 48 | } | ||
| 49 | |||
| 50 | void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, | ||
| 51 | unsigned long size) | ||
| 52 | { | ||
| 53 | struct ion_carveout_heap *carveout_heap = | ||
| 54 | container_of(heap, struct ion_carveout_heap, heap); | ||
| 55 | |||
| 56 | if (addr == ION_CARVEOUT_ALLOCATE_FAIL) | ||
| 57 | return; | ||
| 58 | gen_pool_free(carveout_heap->pool, addr, size); | ||
| 59 | } | ||
| 60 | |||
| 61 | static int ion_carveout_heap_phys(struct ion_heap *heap, | ||
| 62 | struct ion_buffer *buffer, | ||
| 63 | ion_phys_addr_t *addr, size_t *len) | ||
| 64 | { | ||
| 65 | *addr = buffer->priv_phys; | ||
| 66 | *len = buffer->size; | ||
| 67 | return 0; | ||
| 68 | } | ||
| 69 | |||
| 70 | static int ion_carveout_heap_allocate(struct ion_heap *heap, | ||
| 71 | struct ion_buffer *buffer, | ||
| 72 | unsigned long size, unsigned long align, | ||
| 73 | unsigned long flags) | ||
| 74 | { | ||
| 75 | buffer->priv_phys = ion_carveout_allocate(heap, size, align); | ||
| 76 | return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0; | ||
| 77 | } | ||
| 78 | |||
| 79 | static void ion_carveout_heap_free(struct ion_buffer *buffer) | ||
| 80 | { | ||
| 81 | struct ion_heap *heap = buffer->heap; | ||
| 82 | |||
| 83 | ion_carveout_free(heap, buffer->priv_phys, buffer->size); | ||
| 84 | buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL; | ||
| 85 | } | ||
| 86 | |||
| 87 | struct scatterlist *ion_carveout_heap_map_dma(struct ion_heap *heap, | ||
| 88 | struct ion_buffer *buffer) | ||
| 89 | { | ||
| 90 | return ERR_PTR(-EINVAL); | ||
| 91 | } | ||
| 92 | |||
| 93 | void ion_carveout_heap_unmap_dma(struct ion_heap *heap, | ||
| 94 | struct ion_buffer *buffer) | ||
| 95 | { | ||
| 96 | return; | ||
| 97 | } | ||
| 98 | |||
| 99 | void *ion_carveout_heap_map_kernel(struct ion_heap *heap, | ||
| 100 | struct ion_buffer *buffer) | ||
| 101 | { | ||
| 102 | return __arch_ioremap(buffer->priv_phys, buffer->size, | ||
| 103 | MT_MEMORY_NONCACHED); | ||
| 104 | } | ||
| 105 | |||
| 106 | void ion_carveout_heap_unmap_kernel(struct ion_heap *heap, | ||
| 107 | struct ion_buffer *buffer) | ||
| 108 | { | ||
| 109 | __arch_iounmap(buffer->vaddr); | ||
| 110 | buffer->vaddr = NULL; | ||
| 111 | return; | ||
| 112 | } | ||
| 113 | |||
| 114 | int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, | ||
| 115 | struct vm_area_struct *vma) | ||
| 116 | { | ||
| 117 | return remap_pfn_range(vma, vma->vm_start, | ||
| 118 | __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff, | ||
| 119 | buffer->size, | ||
| 120 | pgprot_noncached(vma->vm_page_prot)); | ||
| 121 | } | ||
| 122 | |||
| 123 | static struct ion_heap_ops carveout_heap_ops = { | ||
| 124 | .allocate = ion_carveout_heap_allocate, | ||
| 125 | .free = ion_carveout_heap_free, | ||
| 126 | .phys = ion_carveout_heap_phys, | ||
| 127 | .map_user = ion_carveout_heap_map_user, | ||
| 128 | .map_kernel = ion_carveout_heap_map_kernel, | ||
| 129 | .unmap_kernel = ion_carveout_heap_unmap_kernel, | ||
| 130 | }; | ||
| 131 | |||
| 132 | struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) | ||
| 133 | { | ||
| 134 | struct ion_carveout_heap *carveout_heap; | ||
| 135 | |||
| 136 | carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL); | ||
| 137 | if (!carveout_heap) | ||
| 138 | return ERR_PTR(-ENOMEM); | ||
| 139 | |||
| 140 | carveout_heap->pool = gen_pool_create(12, -1); | ||
| 141 | if (!carveout_heap->pool) { | ||
| 142 | kfree(carveout_heap); | ||
| 143 | return ERR_PTR(-ENOMEM); | ||
| 144 | } | ||
| 145 | carveout_heap->base = heap_data->base; | ||
| 146 | gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size, | ||
| 147 | -1); | ||
| 148 | carveout_heap->heap.ops = &carveout_heap_ops; | ||
| 149 | carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT; | ||
| 150 | |||
| 151 | return &carveout_heap->heap; | ||
| 152 | } | ||
| 153 | |||
| 154 | void ion_carveout_heap_destroy(struct ion_heap *heap) | ||
| 155 | { | ||
| 156 | struct ion_carveout_heap *carveout_heap = | ||
| 157 | container_of(heap, struct ion_carveout_heap, heap); | ||
| 158 | |||
| 159 | gen_pool_destroy(carveout_heap->pool); | ||
| 160 | kfree(carveout_heap); | ||
| 161 | carveout_heap = NULL; | ||
| 162 | } | ||
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c new file mode 100644 index 00000000000..6d09778745c --- /dev/null +++ b/drivers/gpu/ion/ion_heap.c | |||
| @@ -0,0 +1,78 @@ | |||
| 1 | /* | ||
| 2 | * drivers/gpu/ion/ion_heap.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2011 Google, Inc. | ||
| 5 | * | ||
| 6 | * This software is licensed under the terms of the GNU General Public | ||
| 7 | * License version 2, as published by the Free Software Foundation, and | ||
| 8 | * may be copied, distributed, and modified under those terms. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | * | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/err.h> | ||
| 18 | #include <linux/ion.h> | ||
| 19 | #include "ion_priv.h" | ||
| 20 | |||
| 21 | struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) | ||
| 22 | { | ||
| 23 | struct ion_heap *heap = NULL; | ||
| 24 | |||
| 25 | switch (heap_data->type) { | ||
| 26 | case ION_HEAP_TYPE_SYSTEM_CONTIG: | ||
| 27 | heap = ion_system_contig_heap_create(heap_data); | ||
| 28 | break; | ||
| 29 | case ION_HEAP_TYPE_SYSTEM: | ||
| 30 | heap = ion_system_heap_create(heap_data); | ||
| 31 | break; | ||
| 32 | case ION_HEAP_TYPE_CARVEOUT: | ||
| 33 | heap = ion_carveout_heap_create(heap_data); | ||
| 34 | break; | ||
| 35 | case ION_HEAP_TYPE_IOMMU: | ||
| 36 | heap = ion_iommu_heap_create(heap_data); | ||
| 37 | break; | ||
| 38 | default: | ||
| 39 | pr_err("%s: Invalid heap type %d\n", __func__, | ||
| 40 | heap_data->type); | ||
| 41 | return ERR_PTR(-EINVAL); | ||
| 42 | } | ||
| 43 | |||
| 44 | if (IS_ERR_OR_NULL(heap)) { | ||
| 45 | pr_err("%s: error creating heap %s type %d base %lu size %u\n", | ||
| 46 | __func__, heap_data->name, heap_data->type, | ||
| 47 | heap_data->base, heap_data->size); | ||
| 48 | return ERR_PTR(-EINVAL); | ||
| 49 | } | ||
| 50 | |||
| 51 | heap->name = heap_data->name; | ||
| 52 | heap->id = heap_data->id; | ||
| 53 | return heap; | ||
| 54 | } | ||
| 55 | |||
| 56 | void ion_heap_destroy(struct ion_heap *heap) | ||
| 57 | { | ||
| 58 | if (!heap) | ||
| 59 | return; | ||
| 60 | |||
| 61 | switch (heap->type) { | ||
| 62 | case ION_HEAP_TYPE_SYSTEM_CONTIG: | ||
| 63 | ion_system_contig_heap_destroy(heap); | ||
| 64 | break; | ||
| 65 | case ION_HEAP_TYPE_SYSTEM: | ||
| 66 | ion_system_heap_destroy(heap); | ||
| 67 | break; | ||
| 68 | case ION_HEAP_TYPE_CARVEOUT: | ||
| 69 | ion_carveout_heap_destroy(heap); | ||
| 70 | break; | ||
| 71 | case ION_HEAP_TYPE_IOMMU: | ||
| 72 | ion_iommu_heap_destroy(heap); | ||
| 73 | break; | ||
| 74 | default: | ||
| 75 | pr_err("%s: Invalid heap type %d\n", __func__, | ||
| 76 | heap->type); | ||
| 77 | } | ||
| 78 | } | ||
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c new file mode 100644 index 00000000000..a3d2d726bda --- /dev/null +++ b/drivers/gpu/ion/ion_iommu_heap.c | |||
| @@ -0,0 +1,382 @@ | |||
| 1 | /* | ||
| 2 | * drivers/gpu/ion/ion_iommu_heap.c | ||
| 3 | * | ||
| 4 | * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms and conditions of the GNU General Public License, | ||
| 8 | * version 2, as published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 13 | * more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License along with | ||
| 16 | * this program; if not, write to the Free Software Foundation, Inc., | ||
| 17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 18 | */ | ||
| 19 | |||
| 20 | #define pr_fmt(fmt) "%s(): " fmt, __func__ | ||
| 21 | |||
| 22 | #include <linux/spinlock.h> | ||
| 23 | #include <linux/kernel.h> | ||
| 24 | #include <linux/genalloc.h> | ||
| 25 | #include <linux/io.h> | ||
| 26 | #include <linux/ion.h> | ||
| 27 | #include <linux/mm.h> | ||
| 28 | #include <linux/scatterlist.h> | ||
| 29 | #include <linux/slab.h> | ||
| 30 | #include <linux/vmalloc.h> | ||
| 31 | #include <linux/iommu.h> | ||
| 32 | #include <linux/highmem.h> | ||
| 33 | #include <linux/platform_device.h> | ||
| 34 | |||
| 35 | #include <asm/cacheflush.h> | ||
| 36 | |||
| 37 | #include "ion_priv.h" | ||
| 38 | |||
| 39 | #define NUM_PAGES(buf) (PAGE_ALIGN((buf)->size) >> PAGE_SHIFT) | ||
| 40 | |||
| 41 | #define GFP_ION (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN) | ||
| 42 | |||
| 43 | struct ion_iommu_heap { | ||
| 44 | struct ion_heap heap; | ||
| 45 | struct gen_pool *pool; | ||
| 46 | struct iommu_domain *domain; | ||
| 47 | struct device *dev; | ||
| 48 | }; | ||
| 49 | |||
| 50 | static struct scatterlist *iommu_heap_map_dma(struct ion_heap *heap, | ||
| 51 | struct ion_buffer *buf) | ||
| 52 | { | ||
| 53 | struct ion_iommu_heap *h = | ||
| 54 | container_of(heap, struct ion_iommu_heap, heap); | ||
| 55 | int err, npages = NUM_PAGES(buf); | ||
| 56 | unsigned int i; | ||
| 57 | struct scatterlist *sg; | ||
| 58 | unsigned long da = (unsigned long)buf->priv_virt; | ||
| 59 | |||
| 60 | for_each_sg(buf->sglist, sg, npages, i) { | ||
| 61 | phys_addr_t pa; | ||
| 62 | |||
| 63 | pa = sg_phys(sg); | ||
| 64 | BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE)); | ||
| 65 | err = iommu_map(h->domain, da, pa, PAGE_SIZE, 0); | ||
| 66 | if (err) | ||
| 67 | goto err_out; | ||
| 68 | |||
| 69 | sg->dma_address = da; | ||
| 70 | da += PAGE_SIZE; | ||
| 71 | } | ||
| 72 | |||
| 73 | pr_debug("da:%p pa:%08x va:%p\n", | ||
| 74 | buf->priv_virt, sg_phys(buf->sglist), buf->vaddr); | ||
| 75 | |||
| 76 | return buf->sglist; | ||
| 77 | |||
| 78 | err_out: | ||
| 79 | if (i-- > 0) { | ||
| 80 | unsigned int j; | ||
| 81 | for_each_sg(buf->sglist, sg, i, j) | ||
| 82 | iommu_unmap(h->domain, sg_dma_address(sg), 0); | ||
| 83 | } | ||
| 84 | return ERR_PTR(err); | ||
| 85 | } | ||
| 86 | |||
| 87 | static void iommu_heap_unmap_dma(struct ion_heap *heap, struct ion_buffer *buf) | ||
| 88 | { | ||
| 89 | struct ion_iommu_heap *h = | ||
| 90 | container_of(heap, struct ion_iommu_heap, heap); | ||
| 91 | unsigned int i; | ||
| 92 | struct scatterlist *sg; | ||
| 93 | int npages = NUM_PAGES(buf); | ||
| 94 | |||
| 95 | for_each_sg(buf->sglist, sg, npages, i) | ||
| 96 | iommu_unmap(h->domain, sg_dma_address(sg), 0); | ||
| 97 | |||
| 98 | pr_debug("da:%p\n", buf->priv_virt); | ||
| 99 | } | ||
| 100 | |||
| 101 | struct scatterlist *iommu_heap_remap_dma(struct ion_heap *heap, | ||
| 102 | struct ion_buffer *buf, | ||
| 103 | unsigned long addr) | ||
| 104 | { | ||
| 105 | struct ion_iommu_heap *h = | ||
| 106 | container_of(heap, struct ion_iommu_heap, heap); | ||
| 107 | int err; | ||
| 108 | unsigned int i; | ||
| 109 | unsigned long da, da_to_free = (unsigned long)buf->priv_virt; | ||
| 110 | int npages = NUM_PAGES(buf); | ||
| 111 | |||
| 112 | BUG_ON(!buf->priv_virt); | ||
| 113 | |||
| 114 | da = gen_pool_alloc_addr(h->pool, buf->size, addr); | ||
| 115 | if (da == 0) { | ||
| 116 | pr_err("dma address alloc failed, addr=0x%lx", addr); | ||
| 117 | return ERR_PTR(-ENOMEM); | ||
| 118 | } else { | ||
| 119 | pr_err("iommu_heap_remap_dma passed, addr=0x%lx", | ||
| 120 | addr); | ||
| 121 | iommu_heap_unmap_dma(heap, buf); | ||
| 122 | gen_pool_free(h->pool, da_to_free, buf->size); | ||
| 123 | buf->priv_virt = (void *)da; | ||
| 124 | } | ||
| 125 | for (i = 0; i < npages; i++) { | ||
| 126 | phys_addr_t pa; | ||
| 127 | |||
| 128 | pa = page_to_phys(buf->pages[i]); | ||
| 129 | err = iommu_map(h->domain, da, pa, 0, 0); | ||
| 130 | if (err) | ||
| 131 | goto err_out; | ||
| 132 | da += PAGE_SIZE; | ||
| 133 | } | ||
| 134 | |||
| 135 | pr_debug("da:%p pa:%08x va:%p\n", | ||
| 136 | buf->priv_virt, page_to_phys(buf->pages[0]), buf->vaddr); | ||
| 137 | |||
| 138 | return (struct scatterlist *)buf->pages; | ||
| 139 | |||
| 140 | err_out: | ||
| 141 | if (i-- > 0) { | ||
| 142 | da = (unsigned long)buf->priv_virt; | ||
| 143 | iommu_unmap(h->domain, da + (i << PAGE_SHIFT), 0); | ||
| 144 | } | ||
| 145 | return ERR_PTR(err); | ||
| 146 | } | ||
| 147 | |||
| 148 | static int ion_buffer_allocate(struct ion_buffer *buf) | ||
| 149 | { | ||
| 150 | int i, npages = NUM_PAGES(buf); | ||
| 151 | |||
| 152 | buf->pages = kmalloc(npages * sizeof(*buf->pages), GFP_KERNEL); | ||
| 153 | if (!buf->pages) | ||
| 154 | goto err_pages; | ||
| 155 | |||
| 156 | buf->sglist = vzalloc(npages * sizeof(*buf->sglist)); | ||
| 157 | if (!buf->sglist) | ||
| 158 | goto err_sgl; | ||
| 159 | |||
| 160 | sg_init_table(buf->sglist, npages); | ||
| 161 | |||
| 162 | for (i = 0; i < npages; i++) { | ||
| 163 | struct page *page; | ||
| 164 | phys_addr_t pa; | ||
| 165 | |||
| 166 | page = alloc_page(GFP_ION); | ||
| 167 | if (!page) | ||
| 168 | goto err_pgalloc; | ||
| 169 | pa = page_to_phys(page); | ||
| 170 | |||
| 171 | sg_set_page(&buf->sglist[i], page, PAGE_SIZE, 0); | ||
| 172 | |||
| 173 | flush_dcache_page(page); | ||
| 174 | outer_flush_range(pa, pa + PAGE_SIZE); | ||
| 175 | |||
| 176 | buf->pages[i] = page; | ||
| 177 | |||
| 178 | pr_debug_once("pa:%08x\n", pa); | ||
| 179 | } | ||
| 180 | return 0; | ||
| 181 | |||
| 182 | err_pgalloc: | ||
| 183 | while (i-- > 0) | ||
| 184 | __free_page(buf->pages[i]); | ||
| 185 | vfree(buf->sglist); | ||
| 186 | err_sgl: | ||
| 187 | kfree(buf->pages); | ||
| 188 | err_pages: | ||
| 189 | return -ENOMEM; | ||
| 190 | } | ||
| 191 | |||
| 192 | static void ion_buffer_free(struct ion_buffer *buf) | ||
| 193 | { | ||
| 194 | int i, npages = NUM_PAGES(buf); | ||
| 195 | |||
| 196 | for (i = 0; i < npages; i++) | ||
| 197 | __free_page(buf->pages[i]); | ||
| 198 | vfree(buf->sglist); | ||
| 199 | kfree(buf->pages); | ||
| 200 | } | ||
| 201 | |||
| 202 | static int iommu_heap_allocate(struct ion_heap *heap, struct ion_buffer *buf, | ||
| 203 | unsigned long len, unsigned long align, | ||
| 204 | unsigned long flags) | ||
| 205 | { | ||
| 206 | int err; | ||
| 207 | struct ion_iommu_heap *h = | ||
| 208 | container_of(heap, struct ion_iommu_heap, heap); | ||
| 209 | unsigned long da; | ||
| 210 | struct scatterlist *sgl; | ||
| 211 | |||
| 212 | len = round_up(len, PAGE_SIZE); | ||
| 213 | |||
| 214 | da = gen_pool_alloc(h->pool, len); | ||
| 215 | if (!da) | ||
| 216 | return -ENOMEM; | ||
| 217 | |||
| 218 | buf->priv_virt = (void *)da; | ||
| 219 | buf->size = len; | ||
| 220 | |||
| 221 | WARN_ON(!IS_ALIGNED(da, PAGE_SIZE)); | ||
| 222 | |||
| 223 | err = ion_buffer_allocate(buf); | ||
| 224 | if (err) | ||
| 225 | goto err_alloc_buf; | ||
| 226 | |||
| 227 | sgl = iommu_heap_map_dma(heap, buf); | ||
| 228 | if (IS_ERR_OR_NULL(sgl)) | ||
| 229 | goto err_heap_map_dma; | ||
| 230 | buf->vaddr = 0; | ||
| 231 | return 0; | ||
| 232 | |||
| 233 | err_heap_map_dma: | ||
| 234 | ion_buffer_free(buf); | ||
| 235 | err_alloc_buf: | ||
| 236 | gen_pool_free(h->pool, da, len); | ||
| 237 | buf->size = 0; | ||
| 238 | buf->pages = NULL; | ||
| 239 | buf->priv_virt = NULL; | ||
| 240 | return err; | ||
| 241 | } | ||
| 242 | |||
| 243 | static void iommu_heap_free(struct ion_buffer *buf) | ||
| 244 | { | ||
| 245 | struct ion_heap *heap = buf->heap; | ||
| 246 | struct ion_iommu_heap *h = | ||
| 247 | container_of(heap, struct ion_iommu_heap, heap); | ||
| 248 | void *da = buf->priv_virt; | ||
| 249 | |||
| 250 | iommu_heap_unmap_dma(heap, buf); | ||
| 251 | ion_buffer_free(buf); | ||
| 252 | gen_pool_free(h->pool, (unsigned long)da, buf->size); | ||
| 253 | |||
| 254 | buf->pages = NULL; | ||
| 255 | buf->priv_virt = NULL; | ||
| 256 | pr_debug("da:%p\n", da); | ||
| 257 | } | ||
| 258 | |||
| 259 | static int iommu_heap_phys(struct ion_heap *heap, struct ion_buffer *buf, | ||
| 260 | ion_phys_addr_t *addr, size_t *len) | ||
| 261 | { | ||
| 262 | *addr = (unsigned long)buf->priv_virt; | ||
| 263 | *len = buf->size; | ||
| 264 | pr_debug("da:%08lx(%x)\n", *addr, *len); | ||
| 265 | return 0; | ||
| 266 | } | ||
| 267 | |||
| 268 | static void *iommu_heap_map_kernel(struct ion_heap *heap, | ||
| 269 | struct ion_buffer *buf) | ||
| 270 | { | ||
| 271 | int npages = NUM_PAGES(buf); | ||
| 272 | |||
| 273 | BUG_ON(!buf->pages); | ||
| 274 | buf->vaddr = vm_map_ram(buf->pages, npages, -1, | ||
| 275 | pgprot_noncached(pgprot_kernel)); | ||
| 276 | pr_debug("va:%p\n", buf->vaddr); | ||
| 277 | WARN_ON(!buf->vaddr); | ||
| 278 | return buf->vaddr; | ||
| 279 | } | ||
| 280 | |||
| 281 | static void iommu_heap_unmap_kernel(struct ion_heap *heap, | ||
| 282 | struct ion_buffer *buf) | ||
| 283 | { | ||
| 284 | int npages = NUM_PAGES(buf); | ||
| 285 | |||
| 286 | BUG_ON(!buf->pages); | ||
| 287 | WARN_ON(!buf->vaddr); | ||
| 288 | vm_unmap_ram(buf->vaddr, npages); | ||
| 289 | buf->vaddr = NULL; | ||
| 290 | pr_debug("va:%p\n", buf->vaddr); | ||
| 291 | } | ||
| 292 | |||
| 293 | static int iommu_heap_map_user(struct ion_heap *mapper, | ||
| 294 | struct ion_buffer *buf, | ||
| 295 | struct vm_area_struct *vma) | ||
| 296 | { | ||
| 297 | int i = vma->vm_pgoff >> PAGE_SHIFT; | ||
| 298 | unsigned long uaddr = vma->vm_start; | ||
| 299 | unsigned long usize = vma->vm_end - vma->vm_start; | ||
| 300 | |||
| 301 | pr_debug("vma:%08lx-%08lx\n", vma->vm_start, vma->vm_end); | ||
| 302 | BUG_ON(!buf->pages); | ||
| 303 | |||
| 304 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
| 305 | do { | ||
| 306 | int ret; | ||
| 307 | struct page *page = buf->pages[i++]; | ||
| 308 | |||
| 309 | ret = vm_insert_page(vma, uaddr, page); | ||
| 310 | if (ret) | ||
| 311 | return ret; | ||
| 312 | |||
| 313 | uaddr += PAGE_SIZE; | ||
| 314 | usize -= PAGE_SIZE; | ||
| 315 | } while (usize > 0); | ||
| 316 | |||
| 317 | return 0; | ||
| 318 | } | ||
| 319 | |||
| 320 | static struct ion_heap_ops iommu_heap_ops = { | ||
| 321 | .allocate = iommu_heap_allocate, | ||
| 322 | .free = iommu_heap_free, | ||
| 323 | .phys = iommu_heap_phys, | ||
| 324 | .map_dma = iommu_heap_map_dma, | ||
| 325 | .unmap_dma = iommu_heap_unmap_dma, | ||
| 326 | .map_kernel = iommu_heap_map_kernel, | ||
| 327 | .unmap_kernel = iommu_heap_unmap_kernel, | ||
| 328 | .map_user = iommu_heap_map_user, | ||
| 329 | }; | ||
| 330 | |||
| 331 | struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *data) | ||
| 332 | { | ||
| 333 | struct ion_iommu_heap *h; | ||
| 334 | int err; | ||
| 335 | |||
| 336 | h = kzalloc(sizeof(*h), GFP_KERNEL); | ||
| 337 | if (!h) { | ||
| 338 | err = -ENOMEM; | ||
| 339 | goto err_heap; | ||
| 340 | } | ||
| 341 | |||
| 342 | h->pool = gen_pool_create(12, -1); | ||
| 343 | if (!h->pool) { | ||
| 344 | err = -ENOMEM; | ||
| 345 | goto err_genpool; | ||
| 346 | } | ||
| 347 | gen_pool_add(h->pool, data->base, data->size, -1); | ||
| 348 | |||
| 349 | h->heap.ops = &iommu_heap_ops; | ||
| 350 | h->domain = iommu_domain_alloc(&platform_bus_type); | ||
| 351 | h->dev = data->priv; | ||
| 352 | if (!h->domain) { | ||
| 353 | err = -ENOMEM; | ||
| 354 | goto err_iommu_alloc; | ||
| 355 | } | ||
| 356 | |||
| 357 | err = iommu_attach_device(h->domain, h->dev); | ||
| 358 | if (err) | ||
| 359 | goto err_iommu_attach; | ||
| 360 | |||
| 361 | return &h->heap; | ||
| 362 | |||
| 363 | err_iommu_attach: | ||
| 364 | iommu_domain_free(h->domain); | ||
| 365 | err_iommu_alloc: | ||
| 366 | gen_pool_destroy(h->pool); | ||
| 367 | err_genpool: | ||
| 368 | kfree(h); | ||
| 369 | err_heap: | ||
| 370 | return ERR_PTR(err); | ||
| 371 | } | ||
| 372 | |||
| 373 | void ion_iommu_heap_destroy(struct ion_heap *heap) | ||
| 374 | { | ||
| 375 | struct ion_iommu_heap *h = | ||
| 376 | container_of(heap, struct ion_iommu_heap, heap); | ||
| 377 | |||
| 378 | iommu_detach_device(h->domain, h->dev); | ||
| 379 | gen_pool_destroy(h->pool); | ||
| 380 | iommu_domain_free(h->domain); | ||
| 381 | kfree(h); | ||
| 382 | } | ||
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h new file mode 100644 index 00000000000..bfe26da9c04 --- /dev/null +++ b/drivers/gpu/ion/ion_priv.h | |||
| @@ -0,0 +1,293 @@ | |||
| 1 | /* | ||
| 2 | * drivers/gpu/ion/ion_priv.h | ||
| 3 | * | ||
| 4 | * Copyright (C) 2011 Google, Inc. | ||
| 5 | * | ||
| 6 | * This software is licensed under the terms of the GNU General Public | ||
| 7 | * License version 2, as published by the Free Software Foundation, and | ||
| 8 | * may be copied, distributed, and modified under those terms. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | * | ||
| 15 | */ | ||
| 16 | |||
| 17 | #ifndef _ION_PRIV_H | ||
| 18 | #define _ION_PRIV_H | ||
| 19 | |||
| 20 | #include <linux/kref.h> | ||
| 21 | #include <linux/mm_types.h> | ||
| 22 | #include <linux/mutex.h> | ||
| 23 | #include <linux/rbtree.h> | ||
| 24 | #include <linux/ion.h> | ||
| 25 | #include <linux/miscdevice.h> | ||
| 26 | |||
| 27 | struct ion_mapping; | ||
| 28 | |||
| 29 | struct ion_dma_mapping { | ||
| 30 | struct kref ref; | ||
| 31 | struct scatterlist *sglist; | ||
| 32 | }; | ||
| 33 | |||
| 34 | struct ion_kernel_mapping { | ||
| 35 | struct kref ref; | ||
| 36 | void *vaddr; | ||
| 37 | }; | ||
| 38 | |||
| 39 | /** | ||
| 40 | * struct ion_device - the metadata of the ion device node | ||
| 41 | * @dev: the actual misc device | ||
| 42 | * @buffers: an rb tree of all the existing buffers | ||
| 43 | * @lock: lock protecting the buffers & heaps trees | ||
| 44 | * @heaps: list of all the heaps in the system | ||
| 45 | * @user_clients: list of all the clients created from userspace | ||
| 46 | */ | ||
| 47 | struct ion_device { | ||
| 48 | struct miscdevice dev; | ||
| 49 | struct rb_root buffers; | ||
| 50 | struct mutex lock; | ||
| 51 | struct rb_root heaps; | ||
| 52 | long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, | ||
| 53 | unsigned long arg); | ||
| 54 | struct rb_root user_clients; | ||
| 55 | struct rb_root kernel_clients; | ||
| 56 | struct dentry *debug_root; | ||
| 57 | }; | ||
| 58 | |||
| 59 | /** | ||
| 60 | * struct ion_client - a process/hw block local address space | ||
| 61 | * @ref: for reference counting the client | ||
| 62 | * @node: node in the tree of all clients | ||
| 63 | * @dev: backpointer to ion device | ||
| 64 | * @handles: an rb tree of all the handles in this client | ||
| 65 | * @lock: lock protecting the tree of handles | ||
| 66 | * @heap_mask: mask of all supported heaps | ||
| 67 | * @name: used for debugging | ||
| 68 | * @task: used for debugging | ||
| 69 | * | ||
| 70 | * A client represents a list of buffers this client may access. | ||
| 71 | * The mutex stored here is used to protect both handles tree | ||
| 72 | * as well as the handles themselves, and should be held while modifying either. | ||
| 73 | */ | ||
| 74 | struct ion_client { | ||
| 75 | struct kref ref; | ||
| 76 | struct rb_node node; | ||
| 77 | struct ion_device *dev; | ||
| 78 | struct rb_root handles; | ||
| 79 | struct mutex lock; | ||
| 80 | unsigned int heap_mask; | ||
| 81 | const char *name; | ||
| 82 | struct task_struct *task; | ||
| 83 | pid_t pid; | ||
| 84 | struct dentry *debug_root; | ||
| 85 | }; | ||
| 86 | |||
| 87 | /** | ||
| 88 | * ion_handle - a client local reference to a buffer | ||
| 89 | * @ref: reference count | ||
| 90 | * @client: back pointer to the client the buffer resides in | ||
| 91 | * @buffer: pointer to the buffer | ||
| 92 | * @node: node in the client's handle rbtree | ||
| 93 | * @kmap_cnt: count of times this client has mapped to kernel | ||
| 94 | * @dmap_cnt: count of times this client has mapped for dma | ||
| 95 | * @usermap_cnt: count of times this client has mapped for userspace | ||
| 96 | * | ||
| 97 | * Modifications to node, map_cnt or mapping should be protected by the | ||
| 98 | * lock in the client. Other fields are never changed after initialization. | ||
| 99 | */ | ||
| 100 | struct ion_handle { | ||
| 101 | struct kref ref; | ||
| 102 | struct ion_client *client; | ||
| 103 | struct ion_buffer *buffer; | ||
| 104 | struct rb_node node; | ||
| 105 | unsigned int kmap_cnt; | ||
| 106 | unsigned int dmap_cnt; | ||
| 107 | unsigned int usermap_cnt; | ||
| 108 | }; | ||
| 109 | |||
| 110 | bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle); | ||
| 111 | |||
| 112 | void ion_buffer_get(struct ion_buffer *buffer); | ||
| 113 | |||
| 114 | struct ion_buffer *ion_handle_buffer(struct ion_handle *handle); | ||
| 115 | |||
| 116 | struct ion_client *ion_client_get_file(int fd); | ||
| 117 | |||
| 118 | void ion_client_get(struct ion_client *client); | ||
| 119 | |||
| 120 | int ion_client_put(struct ion_client *client); | ||
| 121 | |||
| 122 | void ion_handle_get(struct ion_handle *handle); | ||
| 123 | |||
| 124 | int ion_handle_put(struct ion_handle *handle); | ||
| 125 | |||
| 126 | struct ion_handle *ion_handle_create(struct ion_client *client, | ||
| 127 | struct ion_buffer *buffer); | ||
| 128 | |||
| 129 | void ion_handle_add(struct ion_client *client, struct ion_handle *handle); | ||
| 130 | |||
| 131 | int ion_remap_dma(struct ion_client *client, | ||
| 132 | struct ion_handle *handle, | ||
| 133 | unsigned long addr); | ||
| 134 | /** | ||
| 135 | * struct ion_buffer - metadata for a particular buffer | ||
| 136 | * @ref: refernce count | ||
| 137 | * @node: node in the ion_device buffers tree | ||
| 138 | * @dev: back pointer to the ion_device | ||
| 139 | * @heap: back pointer to the heap the buffer came from | ||
| 140 | * @flags: buffer specific flags | ||
| 141 | * @size: size of the buffer | ||
| 142 | * @priv_virt: private data to the buffer representable as | ||
| 143 | * a void * | ||
| 144 | * @priv_phys: private data to the buffer representable as | ||
| 145 | * an ion_phys_addr_t (and someday a phys_addr_t) | ||
| 146 | * @lock: protects the buffers cnt fields | ||
| 147 | * @kmap_cnt: number of times the buffer is mapped to the kernel | ||
| 148 | * @vaddr: the kenrel mapping if kmap_cnt is not zero | ||
| 149 | * @dmap_cnt: number of times the buffer is mapped for dma | ||
| 150 | * @sglist: the scatterlist for the buffer is dmap_cnt is not zero | ||
| 151 | * @pages: list for allocated pages for the buffer | ||
| 152 | */ | ||
| 153 | struct ion_buffer { | ||
| 154 | struct kref ref; | ||
| 155 | struct rb_node node; | ||
| 156 | struct ion_device *dev; | ||
| 157 | struct ion_heap *heap; | ||
| 158 | unsigned long flags; | ||
| 159 | size_t size; | ||
| 160 | union { | ||
| 161 | void *priv_virt; | ||
| 162 | ion_phys_addr_t priv_phys; | ||
| 163 | }; | ||
| 164 | struct mutex lock; | ||
| 165 | int kmap_cnt; | ||
| 166 | void *vaddr; | ||
| 167 | int dmap_cnt; | ||
| 168 | struct scatterlist *sglist; | ||
| 169 | struct page **pages; | ||
| 170 | }; | ||
| 171 | |||
| 172 | /** | ||
| 173 | * struct ion_heap_ops - ops to operate on a given heap | ||
| 174 | * @allocate: allocate memory | ||
| 175 | * @free: free memory | ||
| 176 | * @phys get physical address of a buffer (only define on | ||
| 177 | * physically contiguous heaps) | ||
| 178 | * @map_dma map the memory for dma to a scatterlist | ||
| 179 | * @unmap_dma unmap the memory for dma | ||
| 180 | * @map_kernel map memory to the kernel | ||
| 181 | * @unmap_kernel unmap memory to the kernel | ||
| 182 | * @map_user map memory to userspace | ||
| 183 | */ | ||
| 184 | struct ion_heap_ops { | ||
| 185 | int (*allocate) (struct ion_heap *heap, | ||
| 186 | struct ion_buffer *buffer, unsigned long len, | ||
| 187 | unsigned long align, unsigned long flags); | ||
| 188 | void (*free) (struct ion_buffer *buffer); | ||
| 189 | int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer, | ||
| 190 | ion_phys_addr_t *addr, size_t *len); | ||
| 191 | struct scatterlist *(*map_dma) (struct ion_heap *heap, | ||
| 192 | struct ion_buffer *buffer); | ||
| 193 | void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer); | ||
| 194 | void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); | ||
| 195 | void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); | ||
| 196 | int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer, | ||
| 197 | struct vm_area_struct *vma); | ||
| 198 | }; | ||
| 199 | |||
| 200 | /** | ||
| 201 | * struct ion_heap - represents a heap in the system | ||
| 202 | * @node: rb node to put the heap on the device's tree of heaps | ||
| 203 | * @dev: back pointer to the ion_device | ||
| 204 | * @type: type of heap | ||
| 205 | * @ops: ops struct as above | ||
| 206 | * @id: id of heap, also indicates priority of this heap when | ||
| 207 | * allocating. These are specified by platform data and | ||
| 208 | * MUST be unique | ||
| 209 | * @name: used for debugging | ||
| 210 | * | ||
| 211 | * Represents a pool of memory from which buffers can be made. In some | ||
| 212 | * systems the only heap is regular system memory allocated via vmalloc. | ||
| 213 | * On others, some blocks might require large physically contiguous buffers | ||
| 214 | * that are allocated from a specially reserved heap. | ||
| 215 | */ | ||
| 216 | struct ion_heap { | ||
| 217 | struct rb_node node; | ||
| 218 | struct ion_device *dev; | ||
| 219 | enum ion_heap_type type; | ||
| 220 | struct ion_heap_ops *ops; | ||
| 221 | int id; | ||
| 222 | const char *name; | ||
| 223 | }; | ||
| 224 | |||
| 225 | /** | ||
| 226 | * ion_device_create - allocates and returns an ion device | ||
| 227 | * @custom_ioctl: arch specific ioctl function if applicable | ||
| 228 | * | ||
| 229 | * returns a valid device or -PTR_ERR | ||
| 230 | */ | ||
| 231 | struct ion_device *ion_device_create(long (*custom_ioctl) | ||
| 232 | (struct ion_client *client, | ||
| 233 | unsigned int cmd, | ||
| 234 | unsigned long arg)); | ||
| 235 | |||
| 236 | /** | ||
| 237 | * ion_device_destroy - free and device and it's resource | ||
| 238 | * @dev: the device | ||
| 239 | */ | ||
| 240 | void ion_device_destroy(struct ion_device *dev); | ||
| 241 | |||
| 242 | /** | ||
| 243 | * ion_device_add_heap - adds a heap to the ion device | ||
| 244 | * @dev: the device | ||
| 245 | * @heap: the heap to add | ||
| 246 | */ | ||
| 247 | void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); | ||
| 248 | |||
| 249 | /** | ||
| 250 | * functions for creating and destroying the built in ion heaps. | ||
| 251 | * architectures can add their own custom architecture specific | ||
| 252 | * heaps as appropriate. | ||
| 253 | */ | ||
| 254 | |||
| 255 | struct ion_heap *ion_heap_create(struct ion_platform_heap *); | ||
| 256 | void ion_heap_destroy(struct ion_heap *); | ||
| 257 | |||
| 258 | struct ion_heap *ion_system_heap_create(struct ion_platform_heap *); | ||
| 259 | void ion_system_heap_destroy(struct ion_heap *); | ||
| 260 | |||
| 261 | struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *); | ||
| 262 | void ion_system_contig_heap_destroy(struct ion_heap *); | ||
| 263 | |||
| 264 | struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *); | ||
| 265 | void ion_carveout_heap_destroy(struct ion_heap *); | ||
| 266 | /** | ||
| 267 | * kernel api to allocate/free from carveout -- used when carveout is | ||
| 268 | * used to back an architecture specific custom heap | ||
| 269 | */ | ||
| 270 | ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size, | ||
| 271 | unsigned long align); | ||
| 272 | void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, | ||
| 273 | unsigned long size); | ||
| 274 | |||
| 275 | #ifdef CONFIG_ION_IOMMU | ||
| 276 | struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *); | ||
| 277 | void ion_iommu_heap_destroy(struct ion_heap *); | ||
| 278 | #else | ||
| 279 | static inline struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *) | ||
| 280 | { | ||
| 281 | return NULL; | ||
| 282 | } | ||
| 283 | static inline void ion_iommu_heap_destroy(struct ion_heap *) | ||
| 284 | { | ||
| 285 | } | ||
| 286 | #endif | ||
| 287 | /** | ||
| 288 | * The carveout heap returns physical addresses, since 0 may be a valid | ||
| 289 | * physical address, this is used to indicate allocation failed | ||
| 290 | */ | ||
| 291 | #define ION_CARVEOUT_ALLOCATE_FAIL -1 | ||
| 292 | |||
| 293 | #endif /* _ION_PRIV_H */ | ||
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c new file mode 100644 index 00000000000..c046cf1a321 --- /dev/null +++ b/drivers/gpu/ion/ion_system_heap.c | |||
| @@ -0,0 +1,198 @@ | |||
| 1 | /* | ||
| 2 | * drivers/gpu/ion/ion_system_heap.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2011 Google, Inc. | ||
| 5 | * | ||
| 6 | * This software is licensed under the terms of the GNU General Public | ||
| 7 | * License version 2, as published by the Free Software Foundation, and | ||
| 8 | * may be copied, distributed, and modified under those terms. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | * | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/err.h> | ||
| 18 | #include <linux/ion.h> | ||
| 19 | #include <linux/mm.h> | ||
| 20 | #include <linux/scatterlist.h> | ||
| 21 | #include <linux/slab.h> | ||
| 22 | #include <linux/vmalloc.h> | ||
| 23 | #include "ion_priv.h" | ||
| 24 | |||
| 25 | static int ion_system_heap_allocate(struct ion_heap *heap, | ||
| 26 | struct ion_buffer *buffer, | ||
| 27 | unsigned long size, unsigned long align, | ||
| 28 | unsigned long flags) | ||
| 29 | { | ||
| 30 | buffer->priv_virt = vmalloc_user(size); | ||
| 31 | if (!buffer->priv_virt) | ||
| 32 | return -ENOMEM; | ||
| 33 | return 0; | ||
| 34 | } | ||
| 35 | |||
| 36 | void ion_system_heap_free(struct ion_buffer *buffer) | ||
| 37 | { | ||
| 38 | vfree(buffer->priv_virt); | ||
| 39 | } | ||
| 40 | |||
| 41 | struct scatterlist *ion_system_heap_map_dma(struct ion_heap *heap, | ||
| 42 | struct ion_buffer *buffer) | ||
| 43 | { | ||
| 44 | struct scatterlist *sglist; | ||
| 45 | struct page *page; | ||
| 46 | int i; | ||
| 47 | int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; | ||
| 48 | void *vaddr = buffer->priv_virt; | ||
| 49 | |||
| 50 | sglist = vmalloc(npages * sizeof(struct scatterlist)); | ||
| 51 | if (!sglist) | ||
| 52 | return ERR_PTR(-ENOMEM); | ||
| 53 | memset(sglist, 0, npages * sizeof(struct scatterlist)); | ||
| 54 | sg_init_table(sglist, npages); | ||
| 55 | for (i = 0; i < npages; i++) { | ||
| 56 | page = vmalloc_to_page(vaddr); | ||
| 57 | if (!page) | ||
| 58 | goto end; | ||
| 59 | sg_set_page(&sglist[i], page, PAGE_SIZE, 0); | ||
| 60 | vaddr += PAGE_SIZE; | ||
| 61 | } | ||
| 62 | /* XXX do cache maintenance for dma? */ | ||
| 63 | return sglist; | ||
| 64 | end: | ||
| 65 | vfree(sglist); | ||
| 66 | return NULL; | ||
| 67 | } | ||
| 68 | |||
| 69 | void ion_system_heap_unmap_dma(struct ion_heap *heap, | ||
| 70 | struct ion_buffer *buffer) | ||
| 71 | { | ||
| 72 | /* XXX undo cache maintenance for dma? */ | ||
| 73 | if (buffer->sglist) | ||
| 74 | vfree(buffer->sglist); | ||
| 75 | } | ||
| 76 | |||
| 77 | void *ion_system_heap_map_kernel(struct ion_heap *heap, | ||
| 78 | struct ion_buffer *buffer) | ||
| 79 | { | ||
| 80 | return buffer->priv_virt; | ||
| 81 | } | ||
| 82 | |||
| 83 | void ion_system_heap_unmap_kernel(struct ion_heap *heap, | ||
| 84 | struct ion_buffer *buffer) | ||
| 85 | { | ||
| 86 | } | ||
| 87 | |||
| 88 | int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, | ||
| 89 | struct vm_area_struct *vma) | ||
| 90 | { | ||
| 91 | return remap_vmalloc_range(vma, buffer->priv_virt, vma->vm_pgoff); | ||
| 92 | } | ||
| 93 | |||
| 94 | static struct ion_heap_ops vmalloc_ops = { | ||
| 95 | .allocate = ion_system_heap_allocate, | ||
| 96 | .free = ion_system_heap_free, | ||
| 97 | .map_dma = ion_system_heap_map_dma, | ||
| 98 | .unmap_dma = ion_system_heap_unmap_dma, | ||
| 99 | .map_kernel = ion_system_heap_map_kernel, | ||
| 100 | .unmap_kernel = ion_system_heap_unmap_kernel, | ||
| 101 | .map_user = ion_system_heap_map_user, | ||
| 102 | }; | ||
| 103 | |||
| 104 | struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused) | ||
| 105 | { | ||
| 106 | struct ion_heap *heap; | ||
| 107 | |||
| 108 | heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); | ||
| 109 | if (!heap) | ||
| 110 | return ERR_PTR(-ENOMEM); | ||
| 111 | heap->ops = &vmalloc_ops; | ||
| 112 | heap->type = ION_HEAP_TYPE_SYSTEM; | ||
| 113 | return heap; | ||
| 114 | } | ||
| 115 | |||
| 116 | void ion_system_heap_destroy(struct ion_heap *heap) | ||
| 117 | { | ||
| 118 | kfree(heap); | ||
| 119 | } | ||
| 120 | |||
| 121 | static int ion_system_contig_heap_allocate(struct ion_heap *heap, | ||
| 122 | struct ion_buffer *buffer, | ||
| 123 | unsigned long len, | ||
| 124 | unsigned long align, | ||
| 125 | unsigned long flags) | ||
| 126 | { | ||
| 127 | buffer->priv_virt = kzalloc(len, GFP_KERNEL); | ||
| 128 | if (!buffer->priv_virt) | ||
| 129 | return -ENOMEM; | ||
| 130 | return 0; | ||
| 131 | } | ||
| 132 | |||
| 133 | void ion_system_contig_heap_free(struct ion_buffer *buffer) | ||
| 134 | { | ||
| 135 | kfree(buffer->priv_virt); | ||
| 136 | } | ||
| 137 | |||
| 138 | static int ion_system_contig_heap_phys(struct ion_heap *heap, | ||
| 139 | struct ion_buffer *buffer, | ||
| 140 | ion_phys_addr_t *addr, size_t *len) | ||
| 141 | { | ||
| 142 | *addr = virt_to_phys(buffer->priv_virt); | ||
| 143 | *len = buffer->size; | ||
| 144 | return 0; | ||
| 145 | } | ||
| 146 | |||
| 147 | struct scatterlist *ion_system_contig_heap_map_dma(struct ion_heap *heap, | ||
| 148 | struct ion_buffer *buffer) | ||
| 149 | { | ||
| 150 | struct scatterlist *sglist; | ||
| 151 | |||
| 152 | sglist = vmalloc(sizeof(struct scatterlist)); | ||
| 153 | if (!sglist) | ||
| 154 | return ERR_PTR(-ENOMEM); | ||
| 155 | sg_init_table(sglist, 1); | ||
| 156 | sg_set_page(sglist, virt_to_page(buffer->priv_virt), buffer->size, 0); | ||
| 157 | return sglist; | ||
| 158 | } | ||
| 159 | |||
| 160 | int ion_system_contig_heap_map_user(struct ion_heap *heap, | ||
| 161 | struct ion_buffer *buffer, | ||
| 162 | struct vm_area_struct *vma) | ||
| 163 | { | ||
| 164 | unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt)); | ||
| 165 | return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, | ||
| 166 | vma->vm_end - vma->vm_start, | ||
| 167 | vma->vm_page_prot); | ||
| 168 | |||
| 169 | } | ||
| 170 | |||
| 171 | static struct ion_heap_ops kmalloc_ops = { | ||
| 172 | .allocate = ion_system_contig_heap_allocate, | ||
| 173 | .free = ion_system_contig_heap_free, | ||
| 174 | .phys = ion_system_contig_heap_phys, | ||
| 175 | .map_dma = ion_system_contig_heap_map_dma, | ||
| 176 | .unmap_dma = ion_system_heap_unmap_dma, | ||
| 177 | .map_kernel = ion_system_heap_map_kernel, | ||
| 178 | .unmap_kernel = ion_system_heap_unmap_kernel, | ||
| 179 | .map_user = ion_system_contig_heap_map_user, | ||
| 180 | }; | ||
| 181 | |||
| 182 | struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused) | ||
| 183 | { | ||
| 184 | struct ion_heap *heap; | ||
| 185 | |||
| 186 | heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); | ||
| 187 | if (!heap) | ||
| 188 | return ERR_PTR(-ENOMEM); | ||
| 189 | heap->ops = &kmalloc_ops; | ||
| 190 | heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; | ||
| 191 | return heap; | ||
| 192 | } | ||
| 193 | |||
| 194 | void ion_system_contig_heap_destroy(struct ion_heap *heap) | ||
| 195 | { | ||
| 196 | kfree(heap); | ||
| 197 | } | ||
| 198 | |||
diff --git a/drivers/gpu/ion/ion_system_mapper.c b/drivers/gpu/ion/ion_system_mapper.c new file mode 100644 index 00000000000..692458e07b5 --- /dev/null +++ b/drivers/gpu/ion/ion_system_mapper.c | |||
| @@ -0,0 +1,114 @@ | |||
| 1 | /* | ||
| 2 | * drivers/gpu/ion/ion_system_mapper.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2011 Google, Inc. | ||
| 5 | * | ||
| 6 | * This software is licensed under the terms of the GNU General Public | ||
| 7 | * License version 2, as published by the Free Software Foundation, and | ||
| 8 | * may be copied, distributed, and modified under those terms. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | * | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/err.h> | ||
| 18 | #include <linux/ion.h> | ||
| 19 | #include <linux/memory.h> | ||
| 20 | #include <linux/mm.h> | ||
| 21 | #include <linux/slab.h> | ||
| 22 | #include <linux/vmalloc.h> | ||
| 23 | #include "ion_priv.h" | ||
| 24 | /* | ||
| 25 | * This mapper is valid for any heap that allocates memory that already has | ||
| 26 | * a kernel mapping, this includes vmalloc'd memory, kmalloc'd memory, | ||
| 27 | * pages obtained via io_remap, etc. | ||
| 28 | */ | ||
| 29 | static void *ion_kernel_mapper_map(struct ion_mapper *mapper, | ||
| 30 | struct ion_buffer *buffer, | ||
| 31 | struct ion_mapping **mapping) | ||
| 32 | { | ||
| 33 | if (!((1 << buffer->heap->type) & mapper->heap_mask)) { | ||
| 34 | pr_err("%s: attempting to map an unsupported heap\n", __func__); | ||
| 35 | return ERR_PTR(-EINVAL); | ||
| 36 | } | ||
| 37 | /* XXX REVISIT ME!!! */ | ||
| 38 | *((unsigned long *)mapping) = (unsigned long)buffer->priv; | ||
| 39 | return buffer->priv; | ||
| 40 | } | ||
| 41 | |||
| 42 | static void ion_kernel_mapper_unmap(struct ion_mapper *mapper, | ||
| 43 | struct ion_buffer *buffer, | ||
| 44 | struct ion_mapping *mapping) | ||
| 45 | { | ||
| 46 | if (!((1 << buffer->heap->type) & mapper->heap_mask)) | ||
| 47 | pr_err("%s: attempting to unmap an unsupported heap\n", | ||
| 48 | __func__); | ||
| 49 | } | ||
| 50 | |||
| 51 | static void *ion_kernel_mapper_map_kernel(struct ion_mapper *mapper, | ||
| 52 | struct ion_buffer *buffer, | ||
| 53 | struct ion_mapping *mapping) | ||
| 54 | { | ||
| 55 | if (!((1 << buffer->heap->type) & mapper->heap_mask)) { | ||
| 56 | pr_err("%s: attempting to unmap an unsupported heap\n", | ||
| 57 | __func__); | ||
| 58 | return ERR_PTR(-EINVAL); | ||
| 59 | } | ||
| 60 | return buffer->priv; | ||
| 61 | } | ||
| 62 | |||
| 63 | static int ion_kernel_mapper_map_user(struct ion_mapper *mapper, | ||
| 64 | struct ion_buffer *buffer, | ||
| 65 | struct vm_area_struct *vma, | ||
| 66 | struct ion_mapping *mapping) | ||
| 67 | { | ||
| 68 | int ret; | ||
| 69 | |||
| 70 | switch (buffer->heap->type) { | ||
| 71 | case ION_HEAP_KMALLOC: | ||
| 72 | { | ||
| 73 | unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv)); | ||
| 74 | ret = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, | ||
| 75 | vma->vm_end - vma->vm_start, | ||
| 76 | vma->vm_page_prot); | ||
| 77 | break; | ||
| 78 | } | ||
| 79 | case ION_HEAP_VMALLOC: | ||
| 80 | ret = remap_vmalloc_range(vma, buffer->priv, vma->vm_pgoff); | ||
| 81 | break; | ||
| 82 | default: | ||
| 83 | pr_err("%s: attempting to map unsupported heap to userspace\n", | ||
| 84 | __func__); | ||
| 85 | return -EINVAL; | ||
| 86 | } | ||
| 87 | |||
| 88 | return ret; | ||
| 89 | } | ||
| 90 | |||
| 91 | static struct ion_mapper_ops ops = { | ||
| 92 | .map = ion_kernel_mapper_map, | ||
| 93 | .map_kernel = ion_kernel_mapper_map_kernel, | ||
| 94 | .map_user = ion_kernel_mapper_map_user, | ||
| 95 | .unmap = ion_kernel_mapper_unmap, | ||
| 96 | }; | ||
| 97 | |||
| 98 | struct ion_mapper *ion_system_mapper_create(void) | ||
| 99 | { | ||
| 100 | struct ion_mapper *mapper; | ||
| 101 | mapper = kzalloc(sizeof(struct ion_mapper), GFP_KERNEL); | ||
| 102 | if (!mapper) | ||
| 103 | return ERR_PTR(-ENOMEM); | ||
| 104 | mapper->type = ION_SYSTEM_MAPPER; | ||
| 105 | mapper->ops = &ops; | ||
| 106 | mapper->heap_mask = (1 << ION_HEAP_VMALLOC) | (1 << ION_HEAP_KMALLOC); | ||
| 107 | return mapper; | ||
| 108 | } | ||
| 109 | |||
| 110 | void ion_system_mapper_destroy(struct ion_mapper *mapper) | ||
| 111 | { | ||
| 112 | kfree(mapper); | ||
| 113 | } | ||
| 114 | |||
diff --git a/drivers/gpu/ion/tegra/Makefile b/drivers/gpu/ion/tegra/Makefile new file mode 100644 index 00000000000..11cd003fb08 --- /dev/null +++ b/drivers/gpu/ion/tegra/Makefile | |||
| @@ -0,0 +1 @@ | |||
| obj-y += tegra_ion.o | |||
diff --git a/drivers/gpu/ion/tegra/tegra_ion.c b/drivers/gpu/ion/tegra/tegra_ion.c new file mode 100644 index 00000000000..2252079279e --- /dev/null +++ b/drivers/gpu/ion/tegra/tegra_ion.c | |||
| @@ -0,0 +1,599 @@ | |||
| 1 | /* | ||
| 2 | * drivers/gpu/tegra/tegra_ion.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2011 Google, Inc. | ||
| 5 | * Copyright (C) 2011, NVIDIA Corporation. | ||
| 6 | * | ||
| 7 | * This software is licensed under the terms of the GNU General Public | ||
| 8 | * License version 2, as published by the Free Software Foundation, and | ||
| 9 | * may be copied, distributed, and modified under those terms. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | * | ||
| 16 | */ | ||
| 17 | |||
| 18 | #define pr_fmt(fmt) "%s():%d: " fmt, __func__, __LINE__ | ||
| 19 | |||
| 20 | #include <linux/err.h> | ||
| 21 | #include <linux/ion.h> | ||
| 22 | #include <linux/tegra_ion.h> | ||
| 23 | #include <linux/platform_device.h> | ||
| 24 | #include <linux/slab.h> | ||
| 25 | #include <linux/uaccess.h> | ||
| 26 | #include <linux/syscalls.h> | ||
| 27 | #include <linux/io.h> | ||
| 28 | #include "../ion_priv.h" | ||
| 29 | |||
| 30 | #define CLIENT_HEAP_MASK 0xFFFFFFFF | ||
| 31 | #define HEAP_FLAGS 0xFF | ||
| 32 | |||
| 33 | #if !defined(CONFIG_TEGRA_NVMAP) | ||
| 34 | #include "mach/nvmap.h" | ||
| 35 | struct nvmap_device *nvmap_dev; | ||
| 36 | #endif | ||
| 37 | |||
| 38 | static struct ion_device *idev; | ||
| 39 | static int num_heaps; | ||
| 40 | static struct ion_heap **heaps; | ||
| 41 | |||
| 42 | static int tegra_ion_pin(struct ion_client *client, | ||
| 43 | unsigned int cmd, | ||
| 44 | unsigned long arg) | ||
| 45 | { | ||
| 46 | struct tegra_ion_pin_data data; | ||
| 47 | int ret; | ||
| 48 | struct ion_handle *on_stack[16]; | ||
| 49 | struct ion_handle **refs = on_stack; | ||
| 50 | int i; | ||
| 51 | bool valid_handle; | ||
| 52 | |||
| 53 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | ||
| 54 | return -EFAULT; | ||
| 55 | if (data.count) { | ||
| 56 | size_t bytes = data.count * sizeof(struct ion_handle *); | ||
| 57 | |||
| 58 | if (data.count > ARRAY_SIZE(on_stack)) | ||
| 59 | refs = kmalloc(data.count * sizeof(*refs), GFP_KERNEL); | ||
| 60 | else | ||
| 61 | refs = on_stack; | ||
| 62 | if (!refs) | ||
| 63 | return -ENOMEM; | ||
| 64 | if (copy_from_user(refs, (void *)data.handles, bytes)) { | ||
| 65 | ret = -EFAULT; | ||
| 66 | goto err; | ||
| 67 | } | ||
| 68 | } else | ||
| 69 | return -EINVAL; | ||
| 70 | |||
| 71 | mutex_lock(&client->lock); | ||
| 72 | for (i = 0; i < data.count; i++) { | ||
| 73 | /* Ignore NULL pointers during unpin operation. */ | ||
| 74 | if (!refs[i] && cmd == TEGRA_ION_UNPIN) | ||
| 75 | continue; | ||
| 76 | valid_handle = ion_handle_validate(client, refs[i]); | ||
| 77 | if (!valid_handle) { | ||
| 78 | WARN(1, "invalid handle passed h=0x%x", (u32)refs[i]); | ||
| 79 | mutex_unlock(&client->lock); | ||
| 80 | ret = -EINVAL; | ||
| 81 | goto err; | ||
| 82 | } | ||
| 83 | } | ||
| 84 | mutex_unlock(&client->lock); | ||
| 85 | |||
| 86 | if (cmd == TEGRA_ION_PIN) { | ||
| 87 | ion_phys_addr_t addr; | ||
| 88 | size_t len; | ||
| 89 | |||
| 90 | for (i = 0; i < data.count; i++) { | ||
| 91 | ret = ion_phys(client, refs[i], &addr, &len); | ||
| 92 | if (ret) | ||
| 93 | goto err; | ||
| 94 | ion_handle_get(refs[i]); | ||
| 95 | ret = put_user(addr, &data.addr[i]); | ||
| 96 | if (ret) | ||
| 97 | return ret; | ||
| 98 | } | ||
| 99 | } else if (cmd == TEGRA_ION_UNPIN) { | ||
| 100 | for (i = 0; i < data.count; i++) { | ||
| 101 | if (refs[i]) | ||
| 102 | ion_handle_put(refs[i]); | ||
| 103 | } | ||
| 104 | } | ||
| 105 | |||
| 106 | err: | ||
| 107 | if (ret) { | ||
| 108 | pr_err("error, ret=0x%x", ret); | ||
| 109 | /* FIXME: undo pinning. */ | ||
| 110 | } | ||
| 111 | if (refs != on_stack) | ||
| 112 | kfree(refs); | ||
| 113 | return ret; | ||
| 114 | } | ||
| 115 | |||
| 116 | static int tegra_ion_alloc_from_id(struct ion_client *client, | ||
| 117 | unsigned int cmd, | ||
| 118 | unsigned long arg) | ||
| 119 | { | ||
| 120 | struct tegra_ion_id_data data; | ||
| 121 | struct ion_buffer *buffer; | ||
| 122 | struct tegra_ion_id_data *user_data = (struct tegra_ion_id_data *)arg; | ||
| 123 | |||
| 124 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | ||
| 125 | return -EFAULT; | ||
| 126 | buffer = (struct ion_buffer *)data.id; | ||
| 127 | data.handle = ion_import(client, buffer); | ||
| 128 | data.size = buffer->size; | ||
| 129 | if (put_user(data.handle, &user_data->handle)) | ||
| 130 | return -EFAULT; | ||
| 131 | if (put_user(data.size, &user_data->size)) | ||
| 132 | return -EFAULT; | ||
| 133 | return 0; | ||
| 134 | } | ||
| 135 | |||
| 136 | static int tegra_ion_get_id(struct ion_client *client, | ||
| 137 | unsigned int cmd, | ||
| 138 | unsigned long arg) | ||
| 139 | { | ||
| 140 | bool valid_handle; | ||
| 141 | struct tegra_ion_id_data data; | ||
| 142 | struct tegra_ion_id_data *user_data = (struct tegra_ion_id_data *)arg; | ||
| 143 | |||
| 144 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | ||
| 145 | return -EFAULT; | ||
| 146 | |||
| 147 | mutex_lock(&client->lock); | ||
| 148 | valid_handle = ion_handle_validate(client, data.handle); | ||
| 149 | mutex_unlock(&client->lock); | ||
| 150 | |||
| 151 | if (!valid_handle) { | ||
| 152 | WARN(1, "invalid handle passed\n"); | ||
| 153 | return -EINVAL; | ||
| 154 | } | ||
| 155 | |||
| 156 | pr_debug("h=0x%x, b=0x%x, bref=%d", | ||
| 157 | (u32)data.handle, (u32)data.handle->buffer, | ||
| 158 | atomic_read(&data.handle->buffer->ref.refcount)); | ||
| 159 | if (put_user((unsigned long)ion_handle_buffer(data.handle), | ||
| 160 | &user_data->id)) | ||
| 161 | return -EFAULT; | ||
| 162 | return 0; | ||
| 163 | } | ||
| 164 | |||
| 165 | static int tegra_ion_cache_maint(struct ion_client *client, | ||
| 166 | unsigned int cmd, | ||
| 167 | unsigned long arg) | ||
| 168 | { | ||
| 169 | wmb(); | ||
| 170 | return 0; | ||
| 171 | } | ||
| 172 | |||
| 173 | static int tegra_ion_rw(struct ion_client *client, | ||
| 174 | unsigned int cmd, | ||
| 175 | unsigned long arg) | ||
| 176 | { | ||
| 177 | bool valid_handle; | ||
| 178 | struct tegra_ion_rw_data data; | ||
| 179 | char *kern_addr, *src; | ||
| 180 | int ret = 0; | ||
| 181 | size_t copied = 0; | ||
| 182 | |||
| 183 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | ||
| 184 | return -EFAULT; | ||
| 185 | |||
| 186 | if (!data.handle || !data.addr || !data.count || !data.elem_size) | ||
| 187 | return -EINVAL; | ||
| 188 | |||
| 189 | mutex_lock(&client->lock); | ||
| 190 | valid_handle = ion_handle_validate(client, data.handle); | ||
| 191 | mutex_unlock(&client->lock); | ||
| 192 | |||
| 193 | if (!valid_handle) { | ||
| 194 | WARN(1, "%s: invalid handle passed to get id.\n", __func__); | ||
| 195 | return -EINVAL; | ||
| 196 | } | ||
| 197 | |||
| 198 | if (data.elem_size == data.mem_stride && | ||
| 199 | data.elem_size == data.user_stride) { | ||
| 200 | data.elem_size *= data.count; | ||
| 201 | data.mem_stride = data.elem_size; | ||
| 202 | data.user_stride = data.elem_size; | ||
| 203 | data.count = 1; | ||
| 204 | } | ||
| 205 | |||
| 206 | kern_addr = ion_map_kernel(client, data.handle); | ||
| 207 | |||
| 208 | while (data.count--) { | ||
| 209 | if (data.offset + data.elem_size > data.handle->buffer->size) { | ||
| 210 | WARN(1, "read/write outside of handle\n"); | ||
| 211 | ret = -EFAULT; | ||
| 212 | break; | ||
| 213 | } | ||
| 214 | |||
| 215 | src = kern_addr + data.offset; | ||
| 216 | if (cmd == TEGRA_ION_READ) | ||
| 217 | ret = copy_to_user((void *)data.addr, | ||
| 218 | src, data.elem_size); | ||
| 219 | else | ||
| 220 | ret = copy_from_user(src, | ||
| 221 | (void *)data.addr, data.elem_size); | ||
| 222 | |||
| 223 | if (ret) | ||
| 224 | break; | ||
| 225 | |||
| 226 | copied += data.elem_size; | ||
| 227 | data.addr += data.user_stride; | ||
| 228 | data.offset += data.mem_stride; | ||
| 229 | } | ||
| 230 | |||
| 231 | ion_unmap_kernel(client, data.handle); | ||
| 232 | return ret; | ||
| 233 | } | ||
| 234 | |||
| 235 | static int tegra_ion_get_param(struct ion_client *client, | ||
| 236 | unsigned int cmd, | ||
| 237 | unsigned long arg) | ||
| 238 | { | ||
| 239 | bool valid_handle; | ||
| 240 | struct tegra_ion_get_params_data data; | ||
| 241 | struct tegra_ion_get_params_data *user_data = | ||
| 242 | (struct tegra_ion_get_params_data *)arg; | ||
| 243 | struct ion_buffer *buffer; | ||
| 244 | |||
| 245 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | ||
| 246 | return -EFAULT; | ||
| 247 | |||
| 248 | mutex_lock(&client->lock); | ||
| 249 | valid_handle = ion_handle_validate(client, data.handle); | ||
| 250 | mutex_unlock(&client->lock); | ||
| 251 | |||
| 252 | if (!valid_handle) { | ||
| 253 | WARN(1, "%s: invalid handle passed to get id.\n", __func__); | ||
| 254 | return -EINVAL; | ||
| 255 | } | ||
| 256 | |||
| 257 | buffer = ion_handle_buffer(data.handle); | ||
| 258 | data.align = 4096; | ||
| 259 | data.heap = 1; | ||
| 260 | ion_phys(client, data.handle, &data.addr, &data.size); | ||
| 261 | |||
| 262 | if (copy_to_user(user_data, &data, sizeof(data))) | ||
| 263 | return -EFAULT; | ||
| 264 | |||
| 265 | return 0; | ||
| 266 | } | ||
| 267 | |||
| 268 | static long tegra_ion_ioctl(struct ion_client *client, | ||
| 269 | unsigned int cmd, | ||
| 270 | unsigned long arg) | ||
| 271 | { | ||
| 272 | int ret = -ENOTTY; | ||
| 273 | |||
| 274 | switch (cmd) { | ||
| 275 | case TEGRA_ION_ALLOC_FROM_ID: | ||
| 276 | ret = tegra_ion_alloc_from_id(client, cmd, arg); | ||
| 277 | break; | ||
| 278 | case TEGRA_ION_GET_ID: | ||
| 279 | ret = tegra_ion_get_id(client, cmd, arg); | ||
| 280 | break; | ||
| 281 | case TEGRA_ION_PIN: | ||
| 282 | case TEGRA_ION_UNPIN: | ||
| 283 | ret = tegra_ion_pin(client, cmd, arg); | ||
| 284 | break; | ||
| 285 | case TEGRA_ION_CACHE_MAINT: | ||
| 286 | ret = tegra_ion_cache_maint(client, cmd, arg); | ||
| 287 | break; | ||
| 288 | case TEGRA_ION_READ: | ||
| 289 | case TEGRA_ION_WRITE: | ||
| 290 | ret = tegra_ion_rw(client, cmd, arg); | ||
| 291 | break; | ||
| 292 | case TEGRA_ION_GET_PARAM: | ||
| 293 | ret = tegra_ion_get_param(client, cmd, arg); | ||
| 294 | break; | ||
| 295 | default: | ||
| 296 | WARN(1, "Unknown custom ioctl\n"); | ||
| 297 | return -ENOTTY; | ||
| 298 | } | ||
| 299 | return ret; | ||
| 300 | } | ||
| 301 | |||
| 302 | int tegra_ion_probe(struct platform_device *pdev) | ||
| 303 | { | ||
| 304 | struct ion_platform_data *pdata = pdev->dev.platform_data; | ||
| 305 | int i; | ||
| 306 | |||
| 307 | num_heaps = pdata->nr; | ||
| 308 | |||
| 309 | heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL); | ||
| 310 | |||
| 311 | idev = ion_device_create(tegra_ion_ioctl); | ||
| 312 | if (IS_ERR_OR_NULL(idev)) { | ||
| 313 | kfree(heaps); | ||
| 314 | return PTR_ERR(idev); | ||
| 315 | } | ||
| 316 | |||
| 317 | /* create the heaps as specified in the board file */ | ||
| 318 | for (i = 0; i < num_heaps; i++) { | ||
| 319 | struct ion_platform_heap *heap_data = &pdata->heaps[i]; | ||
| 320 | |||
| 321 | heaps[i] = ion_heap_create(heap_data); | ||
| 322 | if (IS_ERR_OR_NULL(heaps[i])) { | ||
| 323 | pr_warn("%s(type:%d id:%d) isn't supported\n", | ||
| 324 | heap_data->name, | ||
| 325 | heap_data->type, heap_data->id); | ||
| 326 | continue; | ||
| 327 | } | ||
| 328 | ion_device_add_heap(idev, heaps[i]); | ||
| 329 | } | ||
| 330 | platform_set_drvdata(pdev, idev); | ||
| 331 | #if !defined(CONFIG_TEGRA_NVMAP) | ||
| 332 | nvmap_dev = (struct nvmap_device *)idev; | ||
| 333 | #endif | ||
| 334 | return 0; | ||
| 335 | } | ||
| 336 | |||
| 337 | int tegra_ion_remove(struct platform_device *pdev) | ||
| 338 | { | ||
| 339 | struct ion_device *idev = platform_get_drvdata(pdev); | ||
| 340 | int i; | ||
| 341 | |||
| 342 | ion_device_destroy(idev); | ||
| 343 | for (i = 0; i < num_heaps; i++) | ||
| 344 | ion_heap_destroy(heaps[i]); | ||
| 345 | kfree(heaps); | ||
| 346 | return 0; | ||
| 347 | } | ||
| 348 | |||
| 349 | static struct platform_driver ion_driver = { | ||
| 350 | .probe = tegra_ion_probe, | ||
| 351 | .remove = tegra_ion_remove, | ||
| 352 | .driver = { .name = "ion-tegra" } | ||
| 353 | }; | ||
| 354 | |||
| 355 | static int __init ion_init(void) | ||
| 356 | { | ||
| 357 | return platform_driver_register(&ion_driver); | ||
| 358 | } | ||
| 359 | |||
| 360 | static void __exit ion_exit(void) | ||
| 361 | { | ||
| 362 | platform_driver_unregister(&ion_driver); | ||
| 363 | } | ||
| 364 | |||
| 365 | fs_initcall(ion_init); | ||
| 366 | module_exit(ion_exit); | ||
| 367 | |||
| 368 | #if !defined(CONFIG_TEGRA_NVMAP) | ||
| 369 | struct nvmap_client *nvmap_create_client(struct nvmap_device *dev, | ||
| 370 | const char *name) | ||
| 371 | { | ||
| 372 | return ion_client_create(dev, CLIENT_HEAP_MASK, name); | ||
| 373 | } | ||
| 374 | |||
| 375 | struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size, | ||
| 376 | size_t align, unsigned int flags, | ||
| 377 | unsigned int heap_mask) | ||
| 378 | { | ||
| 379 | return ion_alloc(client, size, align, HEAP_FLAGS); | ||
| 380 | } | ||
| 381 | |||
| 382 | void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r) | ||
| 383 | { | ||
| 384 | ion_free(client, r); | ||
| 385 | } | ||
| 386 | |||
| 387 | void *nvmap_mmap(struct nvmap_handle_ref *r) | ||
| 388 | { | ||
| 389 | return ion_map_kernel(r->client, r); | ||
| 390 | } | ||
| 391 | |||
| 392 | void nvmap_munmap(struct nvmap_handle_ref *r, void *addr) | ||
| 393 | { | ||
| 394 | ion_unmap_kernel(r->client, r); | ||
| 395 | } | ||
| 396 | |||
| 397 | struct nvmap_client *nvmap_client_get_file(int fd) | ||
| 398 | { | ||
| 399 | return ion_client_get_file(fd); | ||
| 400 | } | ||
| 401 | |||
| 402 | struct nvmap_client *nvmap_client_get(struct nvmap_client *client) | ||
| 403 | { | ||
| 404 | ion_client_get(client); | ||
| 405 | return client; | ||
| 406 | } | ||
| 407 | |||
| 408 | void nvmap_client_put(struct nvmap_client *c) | ||
| 409 | { | ||
| 410 | ion_client_put(c); | ||
| 411 | } | ||
| 412 | |||
| 413 | phys_addr_t nvmap_pin(struct nvmap_client *c, struct nvmap_handle_ref *r) | ||
| 414 | { | ||
| 415 | ion_phys_addr_t addr; | ||
| 416 | size_t len; | ||
| 417 | |||
| 418 | ion_handle_get(r); | ||
| 419 | ion_phys(c, r, &addr, &len); | ||
| 420 | wmb(); | ||
| 421 | return addr; | ||
| 422 | } | ||
| 423 | |||
| 424 | phys_addr_t nvmap_handle_address(struct nvmap_client *c, unsigned long id) | ||
| 425 | { | ||
| 426 | struct ion_handle *handle; | ||
| 427 | ion_phys_addr_t addr; | ||
| 428 | size_t len; | ||
| 429 | |||
| 430 | handle = nvmap_convert_handle_u2k(id); | ||
| 431 | ion_phys(c, handle, &addr, &len); | ||
| 432 | return addr; | ||
| 433 | } | ||
| 434 | |||
| 435 | void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *r) | ||
| 436 | { | ||
| 437 | if (r) | ||
| 438 | ion_handle_put(r); | ||
| 439 | } | ||
| 440 | |||
| 441 | static int nvmap_reloc_pin_array(struct ion_client *client, | ||
| 442 | const struct nvmap_pinarray_elem *arr, | ||
| 443 | int nr, struct ion_handle *gather) | ||
| 444 | { | ||
| 445 | struct ion_handle *last_patch = NULL; | ||
| 446 | void *patch_addr; | ||
| 447 | ion_phys_addr_t pin_addr; | ||
| 448 | size_t len; | ||
| 449 | int i; | ||
| 450 | |||
| 451 | for (i = 0; i < nr; i++) { | ||
| 452 | struct ion_handle *patch; | ||
| 453 | struct ion_handle *pin; | ||
| 454 | ion_phys_addr_t reloc_addr; | ||
| 455 | |||
| 456 | /* all of the handles are validated and get'ted prior to | ||
| 457 | * calling this function, so casting is safe here */ | ||
| 458 | pin = (struct ion_handle *)arr[i].pin_mem; | ||
| 459 | |||
| 460 | if (arr[i].patch_mem == (unsigned long)last_patch) { | ||
| 461 | patch = last_patch; | ||
| 462 | } else if (arr[i].patch_mem == (unsigned long)gather) { | ||
| 463 | patch = gather; | ||
| 464 | } else { | ||
| 465 | if (last_patch) | ||
| 466 | ion_handle_put(last_patch); | ||
| 467 | |||
| 468 | ion_handle_get((struct ion_handle *)arr[i].patch_mem); | ||
| 469 | patch = (struct ion_handle *)arr[i].patch_mem; | ||
| 470 | if (!patch) | ||
| 471 | return -EPERM; | ||
| 472 | last_patch = patch; | ||
| 473 | } | ||
| 474 | |||
| 475 | patch_addr = ion_map_kernel(client, patch); | ||
| 476 | patch_addr = patch_addr + arr[i].patch_offset; | ||
| 477 | |||
| 478 | ion_phys(client, pin, &pin_addr, &len); | ||
| 479 | reloc_addr = pin_addr + arr[i].pin_offset; | ||
| 480 | __raw_writel(reloc_addr, patch_addr); | ||
| 481 | ion_unmap_kernel(client, patch); | ||
| 482 | } | ||
| 483 | |||
| 484 | if (last_patch) | ||
| 485 | ion_handle_put(last_patch); | ||
| 486 | |||
| 487 | wmb(); | ||
| 488 | return 0; | ||
| 489 | } | ||
| 490 | |||
| 491 | int nvmap_pin_array(struct nvmap_client *client, struct nvmap_handle *gather, | ||
| 492 | const struct nvmap_pinarray_elem *arr, int nr, | ||
| 493 | struct nvmap_handle **unique) | ||
| 494 | { | ||
| 495 | int i; | ||
| 496 | int count = 0; | ||
| 497 | |||
| 498 | /* FIXME: take care of duplicate ones & validation. */ | ||
| 499 | for (i = 0; i < nr; i++) { | ||
| 500 | unique[i] = (struct nvmap_handle *)arr[i].pin_mem; | ||
| 501 | nvmap_pin(client, (struct nvmap_handle_ref *)unique[i]); | ||
| 502 | count++; | ||
| 503 | } | ||
| 504 | nvmap_reloc_pin_array((struct ion_client *)client, | ||
| 505 | arr, nr, (struct ion_handle *)gather); | ||
| 506 | return nr; | ||
| 507 | } | ||
| 508 | |||
| 509 | void nvmap_unpin_handles(struct nvmap_client *client, | ||
| 510 | struct nvmap_handle **h, int nr) | ||
| 511 | { | ||
| 512 | int i; | ||
| 513 | |||
| 514 | for (i = 0; i < nr; i++) | ||
| 515 | nvmap_unpin(client, h[i]); | ||
| 516 | } | ||
| 517 | |||
| 518 | int nvmap_patch_word(struct nvmap_client *client, | ||
| 519 | struct nvmap_handle *patch, | ||
| 520 | u32 patch_offset, u32 patch_value) | ||
| 521 | { | ||
| 522 | void *vaddr; | ||
| 523 | u32 *patch_addr; | ||
| 524 | |||
| 525 | vaddr = ion_map_kernel(client, patch); | ||
| 526 | patch_addr = vaddr + patch_offset; | ||
| 527 | __raw_writel(patch_value, patch_addr); | ||
| 528 | wmb(); | ||
| 529 | ion_unmap_kernel(client, patch); | ||
| 530 | return 0; | ||
| 531 | } | ||
| 532 | |||
| 533 | struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h); | ||
| 534 | struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client, | ||
| 535 | unsigned long id) | ||
| 536 | { | ||
| 537 | struct ion_handle *handle; | ||
| 538 | |||
| 539 | handle = (struct ion_handle *)nvmap_convert_handle_u2k(id); | ||
| 540 | pr_debug("id=0x%x, h=0x%x,c=0x%x", | ||
| 541 | (u32)id, (u32)handle, (u32)client); | ||
| 542 | nvmap_handle_get(handle); | ||
| 543 | return handle; | ||
| 544 | } | ||
| 545 | |||
| 546 | struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client, | ||
| 547 | unsigned long id) | ||
| 548 | { | ||
| 549 | struct ion_buffer *buffer; | ||
| 550 | struct ion_handle *handle; | ||
| 551 | struct ion_client *ion_client = client; | ||
| 552 | |||
| 553 | handle = (struct ion_handle *)nvmap_convert_handle_u2k(id); | ||
| 554 | pr_debug("id=0x%x, h=0x%x,c=0x%x", | ||
| 555 | (u32)id, (u32)handle, (u32)client); | ||
| 556 | buffer = handle->buffer; | ||
| 557 | |||
| 558 | handle = ion_handle_create(client, buffer); | ||
| 559 | |||
| 560 | mutex_lock(&ion_client->lock); | ||
| 561 | ion_handle_add(ion_client, handle); | ||
| 562 | mutex_unlock(&ion_client->lock); | ||
| 563 | |||
| 564 | pr_debug("dup id=0x%x, h=0x%x", (u32)id, (u32)handle); | ||
| 565 | return handle; | ||
| 566 | } | ||
| 567 | |||
| 568 | void _nvmap_handle_free(struct nvmap_handle *h) | ||
| 569 | { | ||
| 570 | ion_handle_put(h); | ||
| 571 | } | ||
| 572 | |||
| 573 | struct nvmap_handle_ref *nvmap_alloc_iovm(struct nvmap_client *client, | ||
| 574 | size_t size, size_t align, unsigned int flags, unsigned int iova_start) | ||
| 575 | { | ||
| 576 | struct ion_handle *h; | ||
| 577 | |||
| 578 | h = ion_alloc(client, size, align, 0xFF); | ||
| 579 | ion_remap_dma(client, h, iova_start); | ||
| 580 | return h; | ||
| 581 | } | ||
| 582 | |||
| 583 | void nvmap_free_iovm(struct nvmap_client *client, struct nvmap_handle_ref *r) | ||
| 584 | { | ||
| 585 | ion_free(client, r); | ||
| 586 | } | ||
| 587 | |||
| 588 | struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h) | ||
| 589 | { | ||
| 590 | ion_handle_get(h); | ||
| 591 | return h; | ||
| 592 | } | ||
| 593 | |||
| 594 | void nvmap_handle_put(struct nvmap_handle *h) | ||
| 595 | { | ||
| 596 | ion_handle_put(h); | ||
| 597 | } | ||
| 598 | |||
| 599 | #endif | ||
