diff options
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem.c | 597 |
1 files changed, 597 insertions, 0 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c new file mode 100644 index 000000000000..6b5a6c8c7658 --- /dev/null +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -0,0 +1,597 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Red Hat | ||
3 | * Author: Rob Clark <robdclark@gmail.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #include <linux/spinlock.h> | ||
19 | #include <linux/shmem_fs.h> | ||
20 | |||
21 | #include "msm_drv.h" | ||
22 | #include "msm_gem.h" | ||
23 | #include "msm_gpu.h" | ||
24 | |||
25 | |||
26 | /* called with dev->struct_mutex held */ | ||
27 | static struct page **get_pages(struct drm_gem_object *obj) | ||
28 | { | ||
29 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
30 | |||
31 | if (!msm_obj->pages) { | ||
32 | struct drm_device *dev = obj->dev; | ||
33 | struct page **p = drm_gem_get_pages(obj, 0); | ||
34 | int npages = obj->size >> PAGE_SHIFT; | ||
35 | |||
36 | if (IS_ERR(p)) { | ||
37 | dev_err(dev->dev, "could not get pages: %ld\n", | ||
38 | PTR_ERR(p)); | ||
39 | return p; | ||
40 | } | ||
41 | |||
42 | msm_obj->sgt = drm_prime_pages_to_sg(p, npages); | ||
43 | if (!msm_obj->sgt) { | ||
44 | dev_err(dev->dev, "failed to allocate sgt\n"); | ||
45 | return ERR_PTR(-ENOMEM); | ||
46 | } | ||
47 | |||
48 | msm_obj->pages = p; | ||
49 | |||
50 | /* For non-cached buffers, ensure the new pages are clean | ||
51 | * because display controller, GPU, etc. are not coherent: | ||
52 | */ | ||
53 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) | ||
54 | dma_map_sg(dev->dev, msm_obj->sgt->sgl, | ||
55 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); | ||
56 | } | ||
57 | |||
58 | return msm_obj->pages; | ||
59 | } | ||
60 | |||
61 | static void put_pages(struct drm_gem_object *obj) | ||
62 | { | ||
63 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
64 | |||
65 | if (msm_obj->pages) { | ||
66 | /* For non-cached buffers, ensure the new pages are clean | ||
67 | * because display controller, GPU, etc. are not coherent: | ||
68 | */ | ||
69 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) | ||
70 | dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, | ||
71 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); | ||
72 | sg_free_table(msm_obj->sgt); | ||
73 | kfree(msm_obj->sgt); | ||
74 | |||
75 | drm_gem_put_pages(obj, msm_obj->pages, true, false); | ||
76 | msm_obj->pages = NULL; | ||
77 | } | ||
78 | } | ||
79 | |||
80 | int msm_gem_mmap_obj(struct drm_gem_object *obj, | ||
81 | struct vm_area_struct *vma) | ||
82 | { | ||
83 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
84 | |||
85 | vma->vm_flags &= ~VM_PFNMAP; | ||
86 | vma->vm_flags |= VM_MIXEDMAP; | ||
87 | |||
88 | if (msm_obj->flags & MSM_BO_WC) { | ||
89 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); | ||
90 | } else if (msm_obj->flags & MSM_BO_UNCACHED) { | ||
91 | vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); | ||
92 | } else { | ||
93 | /* | ||
94 | * Shunt off cached objs to shmem file so they have their own | ||
95 | * address_space (so unmap_mapping_range does what we want, | ||
96 | * in particular in the case of mmap'd dmabufs) | ||
97 | */ | ||
98 | fput(vma->vm_file); | ||
99 | get_file(obj->filp); | ||
100 | vma->vm_pgoff = 0; | ||
101 | vma->vm_file = obj->filp; | ||
102 | |||
103 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); | ||
104 | } | ||
105 | |||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | ||
110 | { | ||
111 | int ret; | ||
112 | |||
113 | ret = drm_gem_mmap(filp, vma); | ||
114 | if (ret) { | ||
115 | DBG("mmap failed: %d", ret); | ||
116 | return ret; | ||
117 | } | ||
118 | |||
119 | return msm_gem_mmap_obj(vma->vm_private_data, vma); | ||
120 | } | ||
121 | |||
122 | int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
123 | { | ||
124 | struct drm_gem_object *obj = vma->vm_private_data; | ||
125 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
126 | struct drm_device *dev = obj->dev; | ||
127 | struct page **pages; | ||
128 | unsigned long pfn; | ||
129 | pgoff_t pgoff; | ||
130 | int ret; | ||
131 | |||
132 | /* Make sure we don't parallel update on a fault, nor move or remove | ||
133 | * something from beneath our feet | ||
134 | */ | ||
135 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
136 | if (ret) | ||
137 | goto out; | ||
138 | |||
139 | /* make sure we have pages attached now */ | ||
140 | pages = get_pages(obj); | ||
141 | if (IS_ERR(pages)) { | ||
142 | ret = PTR_ERR(pages); | ||
143 | goto out_unlock; | ||
144 | } | ||
145 | |||
146 | /* We don't use vmf->pgoff since that has the fake offset: */ | ||
147 | pgoff = ((unsigned long)vmf->virtual_address - | ||
148 | vma->vm_start) >> PAGE_SHIFT; | ||
149 | |||
150 | pfn = page_to_pfn(msm_obj->pages[pgoff]); | ||
151 | |||
152 | VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, | ||
153 | pfn, pfn << PAGE_SHIFT); | ||
154 | |||
155 | ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); | ||
156 | |||
157 | out_unlock: | ||
158 | mutex_unlock(&dev->struct_mutex); | ||
159 | out: | ||
160 | switch (ret) { | ||
161 | case -EAGAIN: | ||
162 | set_need_resched(); | ||
163 | case 0: | ||
164 | case -ERESTARTSYS: | ||
165 | case -EINTR: | ||
166 | return VM_FAULT_NOPAGE; | ||
167 | case -ENOMEM: | ||
168 | return VM_FAULT_OOM; | ||
169 | default: | ||
170 | return VM_FAULT_SIGBUS; | ||
171 | } | ||
172 | } | ||
173 | |||
174 | /** get mmap offset */ | ||
175 | static uint64_t mmap_offset(struct drm_gem_object *obj) | ||
176 | { | ||
177 | struct drm_device *dev = obj->dev; | ||
178 | int ret; | ||
179 | |||
180 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
181 | |||
182 | /* Make it mmapable */ | ||
183 | ret = drm_gem_create_mmap_offset(obj); | ||
184 | |||
185 | if (ret) { | ||
186 | dev_err(dev->dev, "could not allocate mmap offset\n"); | ||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | return drm_vma_node_offset_addr(&obj->vma_node); | ||
191 | } | ||
192 | |||
193 | uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) | ||
194 | { | ||
195 | uint64_t offset; | ||
196 | mutex_lock(&obj->dev->struct_mutex); | ||
197 | offset = mmap_offset(obj); | ||
198 | mutex_unlock(&obj->dev->struct_mutex); | ||
199 | return offset; | ||
200 | } | ||
201 | |||
202 | /* helpers for dealing w/ iommu: */ | ||
203 | static int map_range(struct iommu_domain *domain, unsigned int iova, | ||
204 | struct sg_table *sgt, unsigned int len, int prot) | ||
205 | { | ||
206 | struct scatterlist *sg; | ||
207 | unsigned int da = iova; | ||
208 | unsigned int i, j; | ||
209 | int ret; | ||
210 | |||
211 | if (!domain || !sgt) | ||
212 | return -EINVAL; | ||
213 | |||
214 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
215 | u32 pa = sg_phys(sg) - sg->offset; | ||
216 | size_t bytes = sg->length + sg->offset; | ||
217 | |||
218 | VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes); | ||
219 | |||
220 | ret = iommu_map(domain, da, pa, bytes, prot); | ||
221 | if (ret) | ||
222 | goto fail; | ||
223 | |||
224 | da += bytes; | ||
225 | } | ||
226 | |||
227 | return 0; | ||
228 | |||
229 | fail: | ||
230 | da = iova; | ||
231 | |||
232 | for_each_sg(sgt->sgl, sg, i, j) { | ||
233 | size_t bytes = sg->length + sg->offset; | ||
234 | iommu_unmap(domain, da, bytes); | ||
235 | da += bytes; | ||
236 | } | ||
237 | return ret; | ||
238 | } | ||
239 | |||
240 | static void unmap_range(struct iommu_domain *domain, unsigned int iova, | ||
241 | struct sg_table *sgt, unsigned int len) | ||
242 | { | ||
243 | struct scatterlist *sg; | ||
244 | unsigned int da = iova; | ||
245 | int i; | ||
246 | |||
247 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
248 | size_t bytes = sg->length + sg->offset; | ||
249 | size_t unmapped; | ||
250 | |||
251 | unmapped = iommu_unmap(domain, da, bytes); | ||
252 | if (unmapped < bytes) | ||
253 | break; | ||
254 | |||
255 | VERB("unmap[%d]: %08x(%x)", i, iova, bytes); | ||
256 | |||
257 | BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); | ||
258 | |||
259 | da += bytes; | ||
260 | } | ||
261 | } | ||
262 | |||
263 | /* should be called under struct_mutex.. although it can be called | ||
264 | * from atomic context without struct_mutex to acquire an extra | ||
265 | * iova ref if you know one is already held. | ||
266 | * | ||
267 | * That means when I do eventually need to add support for unpinning | ||
268 | * the refcnt counter needs to be atomic_t. | ||
269 | */ | ||
270 | int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, | ||
271 | uint32_t *iova) | ||
272 | { | ||
273 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
274 | int ret = 0; | ||
275 | |||
276 | if (!msm_obj->domain[id].iova) { | ||
277 | struct msm_drm_private *priv = obj->dev->dev_private; | ||
278 | uint32_t offset = (uint32_t)mmap_offset(obj); | ||
279 | struct page **pages; | ||
280 | pages = get_pages(obj); | ||
281 | if (IS_ERR(pages)) | ||
282 | return PTR_ERR(pages); | ||
283 | // XXX ideally we would not map buffers writable when not needed... | ||
284 | ret = map_range(priv->iommus[id], offset, msm_obj->sgt, | ||
285 | obj->size, IOMMU_READ | IOMMU_WRITE); | ||
286 | msm_obj->domain[id].iova = offset; | ||
287 | } | ||
288 | |||
289 | if (!ret) | ||
290 | *iova = msm_obj->domain[id].iova; | ||
291 | |||
292 | return ret; | ||
293 | } | ||
294 | |||
295 | int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) | ||
296 | { | ||
297 | int ret; | ||
298 | mutex_lock(&obj->dev->struct_mutex); | ||
299 | ret = msm_gem_get_iova_locked(obj, id, iova); | ||
300 | mutex_unlock(&obj->dev->struct_mutex); | ||
301 | return ret; | ||
302 | } | ||
303 | |||
304 | void msm_gem_put_iova(struct drm_gem_object *obj, int id) | ||
305 | { | ||
306 | // XXX TODO .. | ||
307 | // NOTE: probably don't need a _locked() version.. we wouldn't | ||
308 | // normally unmap here, but instead just mark that it could be | ||
309 | // unmapped (if the iova refcnt drops to zero), but then later | ||
310 | // if another _get_iova_locked() fails we can start unmapping | ||
311 | // things that are no longer needed.. | ||
312 | } | ||
313 | |||
314 | int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, | ||
315 | struct drm_mode_create_dumb *args) | ||
316 | { | ||
317 | args->pitch = align_pitch(args->width, args->bpp); | ||
318 | args->size = PAGE_ALIGN(args->pitch * args->height); | ||
319 | return msm_gem_new_handle(dev, file, args->size, | ||
320 | MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); | ||
321 | } | ||
322 | |||
323 | int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, | ||
324 | uint32_t handle) | ||
325 | { | ||
326 | /* No special work needed, drop the reference and see what falls out */ | ||
327 | return drm_gem_handle_delete(file, handle); | ||
328 | } | ||
329 | |||
330 | int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, | ||
331 | uint32_t handle, uint64_t *offset) | ||
332 | { | ||
333 | struct drm_gem_object *obj; | ||
334 | int ret = 0; | ||
335 | |||
336 | /* GEM does all our handle to object mapping */ | ||
337 | obj = drm_gem_object_lookup(dev, file, handle); | ||
338 | if (obj == NULL) { | ||
339 | ret = -ENOENT; | ||
340 | goto fail; | ||
341 | } | ||
342 | |||
343 | *offset = msm_gem_mmap_offset(obj); | ||
344 | |||
345 | drm_gem_object_unreference_unlocked(obj); | ||
346 | |||
347 | fail: | ||
348 | return ret; | ||
349 | } | ||
350 | |||
351 | void *msm_gem_vaddr_locked(struct drm_gem_object *obj) | ||
352 | { | ||
353 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
354 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); | ||
355 | if (!msm_obj->vaddr) { | ||
356 | struct page **pages = get_pages(obj); | ||
357 | if (IS_ERR(pages)) | ||
358 | return ERR_CAST(pages); | ||
359 | msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, | ||
360 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); | ||
361 | } | ||
362 | return msm_obj->vaddr; | ||
363 | } | ||
364 | |||
365 | void *msm_gem_vaddr(struct drm_gem_object *obj) | ||
366 | { | ||
367 | void *ret; | ||
368 | mutex_lock(&obj->dev->struct_mutex); | ||
369 | ret = msm_gem_vaddr_locked(obj); | ||
370 | mutex_unlock(&obj->dev->struct_mutex); | ||
371 | return ret; | ||
372 | } | ||
373 | |||
374 | int msm_gem_queue_inactive_work(struct drm_gem_object *obj, | ||
375 | struct work_struct *work) | ||
376 | { | ||
377 | struct drm_device *dev = obj->dev; | ||
378 | struct msm_drm_private *priv = dev->dev_private; | ||
379 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
380 | int ret = 0; | ||
381 | |||
382 | mutex_lock(&dev->struct_mutex); | ||
383 | if (!list_empty(&work->entry)) { | ||
384 | ret = -EINVAL; | ||
385 | } else if (is_active(msm_obj)) { | ||
386 | list_add_tail(&work->entry, &msm_obj->inactive_work); | ||
387 | } else { | ||
388 | queue_work(priv->wq, work); | ||
389 | } | ||
390 | mutex_unlock(&dev->struct_mutex); | ||
391 | |||
392 | return ret; | ||
393 | } | ||
394 | |||
395 | void msm_gem_move_to_active(struct drm_gem_object *obj, | ||
396 | struct msm_gpu *gpu, uint32_t fence) | ||
397 | { | ||
398 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
399 | msm_obj->gpu = gpu; | ||
400 | msm_obj->fence = fence; | ||
401 | list_del_init(&msm_obj->mm_list); | ||
402 | list_add_tail(&msm_obj->mm_list, &gpu->active_list); | ||
403 | } | ||
404 | |||
405 | void msm_gem_move_to_inactive(struct drm_gem_object *obj) | ||
406 | { | ||
407 | struct drm_device *dev = obj->dev; | ||
408 | struct msm_drm_private *priv = dev->dev_private; | ||
409 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
410 | |||
411 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
412 | |||
413 | msm_obj->gpu = NULL; | ||
414 | msm_obj->fence = 0; | ||
415 | list_del_init(&msm_obj->mm_list); | ||
416 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | ||
417 | |||
418 | while (!list_empty(&msm_obj->inactive_work)) { | ||
419 | struct work_struct *work; | ||
420 | |||
421 | work = list_first_entry(&msm_obj->inactive_work, | ||
422 | struct work_struct, entry); | ||
423 | |||
424 | list_del_init(&work->entry); | ||
425 | queue_work(priv->wq, work); | ||
426 | } | ||
427 | } | ||
428 | |||
429 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, | ||
430 | struct timespec *timeout) | ||
431 | { | ||
432 | struct drm_device *dev = obj->dev; | ||
433 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
434 | int ret = 0; | ||
435 | |||
436 | if (is_active(msm_obj) && !(op & MSM_PREP_NOSYNC)) | ||
437 | ret = msm_wait_fence_interruptable(dev, msm_obj->fence, timeout); | ||
438 | |||
439 | /* TODO cache maintenance */ | ||
440 | |||
441 | return ret; | ||
442 | } | ||
443 | |||
444 | int msm_gem_cpu_fini(struct drm_gem_object *obj) | ||
445 | { | ||
446 | /* TODO cache maintenance */ | ||
447 | return 0; | ||
448 | } | ||
449 | |||
450 | #ifdef CONFIG_DEBUG_FS | ||
451 | void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) | ||
452 | { | ||
453 | struct drm_device *dev = obj->dev; | ||
454 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
455 | uint64_t off = drm_vma_node_start(&obj->vma_node); | ||
456 | |||
457 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
458 | seq_printf(m, "%08x: %c(%d) %2d (%2d) %08llx %p %d\n", | ||
459 | msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', | ||
460 | msm_obj->fence, obj->name, obj->refcount.refcount.counter, | ||
461 | off, msm_obj->vaddr, obj->size); | ||
462 | } | ||
463 | |||
464 | void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) | ||
465 | { | ||
466 | struct msm_gem_object *msm_obj; | ||
467 | int count = 0; | ||
468 | size_t size = 0; | ||
469 | |||
470 | list_for_each_entry(msm_obj, list, mm_list) { | ||
471 | struct drm_gem_object *obj = &msm_obj->base; | ||
472 | seq_printf(m, " "); | ||
473 | msm_gem_describe(obj, m); | ||
474 | count++; | ||
475 | size += obj->size; | ||
476 | } | ||
477 | |||
478 | seq_printf(m, "Total %d objects, %zu bytes\n", count, size); | ||
479 | } | ||
480 | #endif | ||
481 | |||
482 | void msm_gem_free_object(struct drm_gem_object *obj) | ||
483 | { | ||
484 | struct drm_device *dev = obj->dev; | ||
485 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
486 | int id; | ||
487 | |||
488 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
489 | |||
490 | /* object should not be on active list: */ | ||
491 | WARN_ON(is_active(msm_obj)); | ||
492 | |||
493 | list_del(&msm_obj->mm_list); | ||
494 | |||
495 | for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { | ||
496 | if (msm_obj->domain[id].iova) { | ||
497 | struct msm_drm_private *priv = obj->dev->dev_private; | ||
498 | uint32_t offset = (uint32_t)mmap_offset(obj); | ||
499 | unmap_range(priv->iommus[id], offset, | ||
500 | msm_obj->sgt, obj->size); | ||
501 | } | ||
502 | } | ||
503 | |||
504 | drm_gem_free_mmap_offset(obj); | ||
505 | |||
506 | if (msm_obj->vaddr) | ||
507 | vunmap(msm_obj->vaddr); | ||
508 | |||
509 | put_pages(obj); | ||
510 | |||
511 | if (msm_obj->resv == &msm_obj->_resv) | ||
512 | reservation_object_fini(msm_obj->resv); | ||
513 | |||
514 | drm_gem_object_release(obj); | ||
515 | |||
516 | kfree(msm_obj); | ||
517 | } | ||
518 | |||
519 | /* convenience method to construct a GEM buffer object, and userspace handle */ | ||
520 | int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, | ||
521 | uint32_t size, uint32_t flags, uint32_t *handle) | ||
522 | { | ||
523 | struct drm_gem_object *obj; | ||
524 | int ret; | ||
525 | |||
526 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
527 | if (ret) | ||
528 | return ret; | ||
529 | |||
530 | obj = msm_gem_new(dev, size, flags); | ||
531 | |||
532 | mutex_unlock(&dev->struct_mutex); | ||
533 | |||
534 | if (IS_ERR(obj)) | ||
535 | return PTR_ERR(obj); | ||
536 | |||
537 | ret = drm_gem_handle_create(file, obj, handle); | ||
538 | |||
539 | /* drop reference from allocate - handle holds it now */ | ||
540 | drm_gem_object_unreference_unlocked(obj); | ||
541 | |||
542 | return ret; | ||
543 | } | ||
544 | |||
545 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, | ||
546 | uint32_t size, uint32_t flags) | ||
547 | { | ||
548 | struct msm_drm_private *priv = dev->dev_private; | ||
549 | struct msm_gem_object *msm_obj; | ||
550 | struct drm_gem_object *obj = NULL; | ||
551 | int ret; | ||
552 | |||
553 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
554 | |||
555 | size = PAGE_ALIGN(size); | ||
556 | |||
557 | switch (flags & MSM_BO_CACHE_MASK) { | ||
558 | case MSM_BO_UNCACHED: | ||
559 | case MSM_BO_CACHED: | ||
560 | case MSM_BO_WC: | ||
561 | break; | ||
562 | default: | ||
563 | dev_err(dev->dev, "invalid cache flag: %x\n", | ||
564 | (flags & MSM_BO_CACHE_MASK)); | ||
565 | ret = -EINVAL; | ||
566 | goto fail; | ||
567 | } | ||
568 | |||
569 | msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); | ||
570 | if (!msm_obj) { | ||
571 | ret = -ENOMEM; | ||
572 | goto fail; | ||
573 | } | ||
574 | |||
575 | obj = &msm_obj->base; | ||
576 | |||
577 | ret = drm_gem_object_init(dev, obj, size); | ||
578 | if (ret) | ||
579 | goto fail; | ||
580 | |||
581 | msm_obj->flags = flags; | ||
582 | |||
583 | msm_obj->resv = &msm_obj->_resv; | ||
584 | reservation_object_init(msm_obj->resv); | ||
585 | |||
586 | INIT_LIST_HEAD(&msm_obj->submit_entry); | ||
587 | INIT_LIST_HEAD(&msm_obj->inactive_work); | ||
588 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | ||
589 | |||
590 | return obj; | ||
591 | |||
592 | fail: | ||
593 | if (obj) | ||
594 | drm_gem_object_unreference_unlocked(obj); | ||
595 | |||
596 | return ERR_PTR(ret); | ||
597 | } | ||