diff options
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo_util.c')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 561 |
1 files changed, 561 insertions, 0 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c new file mode 100644 index 000000000000..517c84559633 --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -0,0 +1,561 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | /* | ||
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | ||
29 | */ | ||
30 | |||
31 | #include "ttm/ttm_bo_driver.h" | ||
32 | #include "ttm/ttm_placement.h" | ||
33 | #include <linux/io.h> | ||
34 | #include <linux/highmem.h> | ||
35 | #include <linux/wait.h> | ||
36 | #include <linux/vmalloc.h> | ||
37 | #include <linux/version.h> | ||
38 | #include <linux/module.h> | ||
39 | |||
40 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) | ||
41 | { | ||
42 | struct ttm_mem_reg *old_mem = &bo->mem; | ||
43 | |||
44 | if (old_mem->mm_node) { | ||
45 | spin_lock(&bo->bdev->lru_lock); | ||
46 | drm_mm_put_block(old_mem->mm_node); | ||
47 | spin_unlock(&bo->bdev->lru_lock); | ||
48 | } | ||
49 | old_mem->mm_node = NULL; | ||
50 | } | ||
51 | |||
52 | int ttm_bo_move_ttm(struct ttm_buffer_object *bo, | ||
53 | bool evict, bool no_wait, struct ttm_mem_reg *new_mem) | ||
54 | { | ||
55 | struct ttm_tt *ttm = bo->ttm; | ||
56 | struct ttm_mem_reg *old_mem = &bo->mem; | ||
57 | uint32_t save_flags = old_mem->placement; | ||
58 | int ret; | ||
59 | |||
60 | if (old_mem->mem_type != TTM_PL_SYSTEM) { | ||
61 | ttm_tt_unbind(ttm); | ||
62 | ttm_bo_free_old_node(bo); | ||
63 | ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, | ||
64 | TTM_PL_MASK_MEM); | ||
65 | old_mem->mem_type = TTM_PL_SYSTEM; | ||
66 | save_flags = old_mem->placement; | ||
67 | } | ||
68 | |||
69 | ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); | ||
70 | if (unlikely(ret != 0)) | ||
71 | return ret; | ||
72 | |||
73 | if (new_mem->mem_type != TTM_PL_SYSTEM) { | ||
74 | ret = ttm_tt_bind(ttm, new_mem); | ||
75 | if (unlikely(ret != 0)) | ||
76 | return ret; | ||
77 | } | ||
78 | |||
79 | *old_mem = *new_mem; | ||
80 | new_mem->mm_node = NULL; | ||
81 | ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); | ||
82 | return 0; | ||
83 | } | ||
84 | EXPORT_SYMBOL(ttm_bo_move_ttm); | ||
85 | |||
86 | int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, | ||
87 | void **virtual) | ||
88 | { | ||
89 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | ||
90 | unsigned long bus_offset; | ||
91 | unsigned long bus_size; | ||
92 | unsigned long bus_base; | ||
93 | int ret; | ||
94 | void *addr; | ||
95 | |||
96 | *virtual = NULL; | ||
97 | ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size); | ||
98 | if (ret || bus_size == 0) | ||
99 | return ret; | ||
100 | |||
101 | if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) | ||
102 | addr = (void *)(((u8 *) man->io_addr) + bus_offset); | ||
103 | else { | ||
104 | if (mem->placement & TTM_PL_FLAG_WC) | ||
105 | addr = ioremap_wc(bus_base + bus_offset, bus_size); | ||
106 | else | ||
107 | addr = ioremap_nocache(bus_base + bus_offset, bus_size); | ||
108 | if (!addr) | ||
109 | return -ENOMEM; | ||
110 | } | ||
111 | *virtual = addr; | ||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, | ||
116 | void *virtual) | ||
117 | { | ||
118 | struct ttm_mem_type_manager *man; | ||
119 | |||
120 | man = &bdev->man[mem->mem_type]; | ||
121 | |||
122 | if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) | ||
123 | iounmap(virtual); | ||
124 | } | ||
125 | |||
126 | static int ttm_copy_io_page(void *dst, void *src, unsigned long page) | ||
127 | { | ||
128 | uint32_t *dstP = | ||
129 | (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); | ||
130 | uint32_t *srcP = | ||
131 | (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); | ||
132 | |||
133 | int i; | ||
134 | for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) | ||
135 | iowrite32(ioread32(srcP++), dstP++); | ||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, | ||
140 | unsigned long page) | ||
141 | { | ||
142 | struct page *d = ttm_tt_get_page(ttm, page); | ||
143 | void *dst; | ||
144 | |||
145 | if (!d) | ||
146 | return -ENOMEM; | ||
147 | |||
148 | src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); | ||
149 | dst = kmap(d); | ||
150 | if (!dst) | ||
151 | return -ENOMEM; | ||
152 | |||
153 | memcpy_fromio(dst, src, PAGE_SIZE); | ||
154 | kunmap(d); | ||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, | ||
159 | unsigned long page) | ||
160 | { | ||
161 | struct page *s = ttm_tt_get_page(ttm, page); | ||
162 | void *src; | ||
163 | |||
164 | if (!s) | ||
165 | return -ENOMEM; | ||
166 | |||
167 | dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); | ||
168 | src = kmap(s); | ||
169 | if (!src) | ||
170 | return -ENOMEM; | ||
171 | |||
172 | memcpy_toio(dst, src, PAGE_SIZE); | ||
173 | kunmap(s); | ||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | ||
178 | bool evict, bool no_wait, struct ttm_mem_reg *new_mem) | ||
179 | { | ||
180 | struct ttm_bo_device *bdev = bo->bdev; | ||
181 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; | ||
182 | struct ttm_tt *ttm = bo->ttm; | ||
183 | struct ttm_mem_reg *old_mem = &bo->mem; | ||
184 | struct ttm_mem_reg old_copy = *old_mem; | ||
185 | void *old_iomap; | ||
186 | void *new_iomap; | ||
187 | int ret; | ||
188 | uint32_t save_flags = old_mem->placement; | ||
189 | unsigned long i; | ||
190 | unsigned long page; | ||
191 | unsigned long add = 0; | ||
192 | int dir; | ||
193 | |||
194 | ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); | ||
195 | if (ret) | ||
196 | return ret; | ||
197 | ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); | ||
198 | if (ret) | ||
199 | goto out; | ||
200 | |||
201 | if (old_iomap == NULL && new_iomap == NULL) | ||
202 | goto out2; | ||
203 | if (old_iomap == NULL && ttm == NULL) | ||
204 | goto out2; | ||
205 | |||
206 | add = 0; | ||
207 | dir = 1; | ||
208 | |||
209 | if ((old_mem->mem_type == new_mem->mem_type) && | ||
210 | (new_mem->mm_node->start < | ||
211 | old_mem->mm_node->start + old_mem->mm_node->size)) { | ||
212 | dir = -1; | ||
213 | add = new_mem->num_pages - 1; | ||
214 | } | ||
215 | |||
216 | for (i = 0; i < new_mem->num_pages; ++i) { | ||
217 | page = i * dir + add; | ||
218 | if (old_iomap == NULL) | ||
219 | ret = ttm_copy_ttm_io_page(ttm, new_iomap, page); | ||
220 | else if (new_iomap == NULL) | ||
221 | ret = ttm_copy_io_ttm_page(ttm, old_iomap, page); | ||
222 | else | ||
223 | ret = ttm_copy_io_page(new_iomap, old_iomap, page); | ||
224 | if (ret) | ||
225 | goto out1; | ||
226 | } | ||
227 | mb(); | ||
228 | out2: | ||
229 | ttm_bo_free_old_node(bo); | ||
230 | |||
231 | *old_mem = *new_mem; | ||
232 | new_mem->mm_node = NULL; | ||
233 | ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); | ||
234 | |||
235 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { | ||
236 | ttm_tt_unbind(ttm); | ||
237 | ttm_tt_destroy(ttm); | ||
238 | bo->ttm = NULL; | ||
239 | } | ||
240 | |||
241 | out1: | ||
242 | ttm_mem_reg_iounmap(bdev, new_mem, new_iomap); | ||
243 | out: | ||
244 | ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); | ||
245 | return ret; | ||
246 | } | ||
247 | EXPORT_SYMBOL(ttm_bo_move_memcpy); | ||
248 | |||
249 | static void ttm_transfered_destroy(struct ttm_buffer_object *bo) | ||
250 | { | ||
251 | kfree(bo); | ||
252 | } | ||
253 | |||
254 | /** | ||
255 | * ttm_buffer_object_transfer | ||
256 | * | ||
257 | * @bo: A pointer to a struct ttm_buffer_object. | ||
258 | * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, | ||
259 | * holding the data of @bo with the old placement. | ||
260 | * | ||
261 | * This is a utility function that may be called after an accelerated move | ||
262 | * has been scheduled. A new buffer object is created as a placeholder for | ||
263 | * the old data while it's being copied. When that buffer object is idle, | ||
264 | * it can be destroyed, releasing the space of the old placement. | ||
265 | * Returns: | ||
266 | * !0: Failure. | ||
267 | */ | ||
268 | |||
269 | static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | ||
270 | struct ttm_buffer_object **new_obj) | ||
271 | { | ||
272 | struct ttm_buffer_object *fbo; | ||
273 | struct ttm_bo_device *bdev = bo->bdev; | ||
274 | struct ttm_bo_driver *driver = bdev->driver; | ||
275 | |||
276 | fbo = kzalloc(sizeof(*fbo), GFP_KERNEL); | ||
277 | if (!fbo) | ||
278 | return -ENOMEM; | ||
279 | |||
280 | *fbo = *bo; | ||
281 | |||
282 | /** | ||
283 | * Fix up members that we shouldn't copy directly: | ||
284 | * TODO: Explicit member copy would probably be better here. | ||
285 | */ | ||
286 | |||
287 | spin_lock_init(&fbo->lock); | ||
288 | init_waitqueue_head(&fbo->event_queue); | ||
289 | INIT_LIST_HEAD(&fbo->ddestroy); | ||
290 | INIT_LIST_HEAD(&fbo->lru); | ||
291 | INIT_LIST_HEAD(&fbo->swap); | ||
292 | fbo->vm_node = NULL; | ||
293 | |||
294 | fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); | ||
295 | if (fbo->mem.mm_node) | ||
296 | fbo->mem.mm_node->private = (void *)fbo; | ||
297 | kref_init(&fbo->list_kref); | ||
298 | kref_init(&fbo->kref); | ||
299 | fbo->destroy = &ttm_transfered_destroy; | ||
300 | |||
301 | *new_obj = fbo; | ||
302 | return 0; | ||
303 | } | ||
304 | |||
305 | pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) | ||
306 | { | ||
307 | #if defined(__i386__) || defined(__x86_64__) | ||
308 | if (caching_flags & TTM_PL_FLAG_WC) | ||
309 | tmp = pgprot_writecombine(tmp); | ||
310 | else if (boot_cpu_data.x86 > 3) | ||
311 | tmp = pgprot_noncached(tmp); | ||
312 | |||
313 | #elif defined(__powerpc__) | ||
314 | if (!(caching_flags & TTM_PL_FLAG_CACHED)) { | ||
315 | pgprot_val(tmp) |= _PAGE_NO_CACHE; | ||
316 | if (caching_flags & TTM_PL_FLAG_UNCACHED) | ||
317 | pgprot_val(tmp) |= _PAGE_GUARDED; | ||
318 | } | ||
319 | #endif | ||
320 | #if defined(__ia64__) | ||
321 | if (caching_flags & TTM_PL_FLAG_WC) | ||
322 | tmp = pgprot_writecombine(tmp); | ||
323 | else | ||
324 | tmp = pgprot_noncached(tmp); | ||
325 | #endif | ||
326 | #if defined(__sparc__) | ||
327 | if (!(caching_flags & TTM_PL_FLAG_CACHED)) | ||
328 | tmp = pgprot_noncached(tmp); | ||
329 | #endif | ||
330 | return tmp; | ||
331 | } | ||
332 | |||
333 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, | ||
334 | unsigned long bus_base, | ||
335 | unsigned long bus_offset, | ||
336 | unsigned long bus_size, | ||
337 | struct ttm_bo_kmap_obj *map) | ||
338 | { | ||
339 | struct ttm_bo_device *bdev = bo->bdev; | ||
340 | struct ttm_mem_reg *mem = &bo->mem; | ||
341 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | ||
342 | |||
343 | if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) { | ||
344 | map->bo_kmap_type = ttm_bo_map_premapped; | ||
345 | map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset); | ||
346 | } else { | ||
347 | map->bo_kmap_type = ttm_bo_map_iomap; | ||
348 | if (mem->placement & TTM_PL_FLAG_WC) | ||
349 | map->virtual = ioremap_wc(bus_base + bus_offset, | ||
350 | bus_size); | ||
351 | else | ||
352 | map->virtual = ioremap_nocache(bus_base + bus_offset, | ||
353 | bus_size); | ||
354 | } | ||
355 | return (!map->virtual) ? -ENOMEM : 0; | ||
356 | } | ||
357 | |||
358 | static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, | ||
359 | unsigned long start_page, | ||
360 | unsigned long num_pages, | ||
361 | struct ttm_bo_kmap_obj *map) | ||
362 | { | ||
363 | struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot; | ||
364 | struct ttm_tt *ttm = bo->ttm; | ||
365 | struct page *d; | ||
366 | int i; | ||
367 | |||
368 | BUG_ON(!ttm); | ||
369 | if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { | ||
370 | /* | ||
371 | * We're mapping a single page, and the desired | ||
372 | * page protection is consistent with the bo. | ||
373 | */ | ||
374 | |||
375 | map->bo_kmap_type = ttm_bo_map_kmap; | ||
376 | map->page = ttm_tt_get_page(ttm, start_page); | ||
377 | map->virtual = kmap(map->page); | ||
378 | } else { | ||
379 | /* | ||
380 | * Populate the part we're mapping; | ||
381 | */ | ||
382 | for (i = start_page; i < start_page + num_pages; ++i) { | ||
383 | d = ttm_tt_get_page(ttm, i); | ||
384 | if (!d) | ||
385 | return -ENOMEM; | ||
386 | } | ||
387 | |||
388 | /* | ||
389 | * We need to use vmap to get the desired page protection | ||
390 | * or to make the buffer object look contigous. | ||
391 | */ | ||
392 | prot = (mem->placement & TTM_PL_FLAG_CACHED) ? | ||
393 | PAGE_KERNEL : | ||
394 | ttm_io_prot(mem->placement, PAGE_KERNEL); | ||
395 | map->bo_kmap_type = ttm_bo_map_vmap; | ||
396 | map->virtual = vmap(ttm->pages + start_page, num_pages, | ||
397 | 0, prot); | ||
398 | } | ||
399 | return (!map->virtual) ? -ENOMEM : 0; | ||
400 | } | ||
401 | |||
402 | int ttm_bo_kmap(struct ttm_buffer_object *bo, | ||
403 | unsigned long start_page, unsigned long num_pages, | ||
404 | struct ttm_bo_kmap_obj *map) | ||
405 | { | ||
406 | int ret; | ||
407 | unsigned long bus_base; | ||
408 | unsigned long bus_offset; | ||
409 | unsigned long bus_size; | ||
410 | |||
411 | BUG_ON(!list_empty(&bo->swap)); | ||
412 | map->virtual = NULL; | ||
413 | if (num_pages > bo->num_pages) | ||
414 | return -EINVAL; | ||
415 | if (start_page > bo->num_pages) | ||
416 | return -EINVAL; | ||
417 | #if 0 | ||
418 | if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) | ||
419 | return -EPERM; | ||
420 | #endif | ||
421 | ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base, | ||
422 | &bus_offset, &bus_size); | ||
423 | if (ret) | ||
424 | return ret; | ||
425 | if (bus_size == 0) { | ||
426 | return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); | ||
427 | } else { | ||
428 | bus_offset += start_page << PAGE_SHIFT; | ||
429 | bus_size = num_pages << PAGE_SHIFT; | ||
430 | return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map); | ||
431 | } | ||
432 | } | ||
433 | EXPORT_SYMBOL(ttm_bo_kmap); | ||
434 | |||
435 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) | ||
436 | { | ||
437 | if (!map->virtual) | ||
438 | return; | ||
439 | switch (map->bo_kmap_type) { | ||
440 | case ttm_bo_map_iomap: | ||
441 | iounmap(map->virtual); | ||
442 | break; | ||
443 | case ttm_bo_map_vmap: | ||
444 | vunmap(map->virtual); | ||
445 | break; | ||
446 | case ttm_bo_map_kmap: | ||
447 | kunmap(map->page); | ||
448 | break; | ||
449 | case ttm_bo_map_premapped: | ||
450 | break; | ||
451 | default: | ||
452 | BUG(); | ||
453 | } | ||
454 | map->virtual = NULL; | ||
455 | map->page = NULL; | ||
456 | } | ||
457 | EXPORT_SYMBOL(ttm_bo_kunmap); | ||
458 | |||
459 | int ttm_bo_pfn_prot(struct ttm_buffer_object *bo, | ||
460 | unsigned long dst_offset, | ||
461 | unsigned long *pfn, pgprot_t *prot) | ||
462 | { | ||
463 | struct ttm_mem_reg *mem = &bo->mem; | ||
464 | struct ttm_bo_device *bdev = bo->bdev; | ||
465 | unsigned long bus_offset; | ||
466 | unsigned long bus_size; | ||
467 | unsigned long bus_base; | ||
468 | int ret; | ||
469 | ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, | ||
470 | &bus_size); | ||
471 | if (ret) | ||
472 | return -EINVAL; | ||
473 | if (bus_size != 0) | ||
474 | *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT; | ||
475 | else | ||
476 | if (!bo->ttm) | ||
477 | return -EINVAL; | ||
478 | else | ||
479 | *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm, | ||
480 | dst_offset >> | ||
481 | PAGE_SHIFT)); | ||
482 | *prot = (mem->placement & TTM_PL_FLAG_CACHED) ? | ||
483 | PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL); | ||
484 | |||
485 | return 0; | ||
486 | } | ||
487 | |||
488 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | ||
489 | void *sync_obj, | ||
490 | void *sync_obj_arg, | ||
491 | bool evict, bool no_wait, | ||
492 | struct ttm_mem_reg *new_mem) | ||
493 | { | ||
494 | struct ttm_bo_device *bdev = bo->bdev; | ||
495 | struct ttm_bo_driver *driver = bdev->driver; | ||
496 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; | ||
497 | struct ttm_mem_reg *old_mem = &bo->mem; | ||
498 | int ret; | ||
499 | uint32_t save_flags = old_mem->placement; | ||
500 | struct ttm_buffer_object *ghost_obj; | ||
501 | void *tmp_obj = NULL; | ||
502 | |||
503 | spin_lock(&bo->lock); | ||
504 | if (bo->sync_obj) { | ||
505 | tmp_obj = bo->sync_obj; | ||
506 | bo->sync_obj = NULL; | ||
507 | } | ||
508 | bo->sync_obj = driver->sync_obj_ref(sync_obj); | ||
509 | bo->sync_obj_arg = sync_obj_arg; | ||
510 | if (evict) { | ||
511 | ret = ttm_bo_wait(bo, false, false, false); | ||
512 | spin_unlock(&bo->lock); | ||
513 | driver->sync_obj_unref(&bo->sync_obj); | ||
514 | |||
515 | if (ret) | ||
516 | return ret; | ||
517 | |||
518 | ttm_bo_free_old_node(bo); | ||
519 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && | ||
520 | (bo->ttm != NULL)) { | ||
521 | ttm_tt_unbind(bo->ttm); | ||
522 | ttm_tt_destroy(bo->ttm); | ||
523 | bo->ttm = NULL; | ||
524 | } | ||
525 | } else { | ||
526 | /** | ||
527 | * This should help pipeline ordinary buffer moves. | ||
528 | * | ||
529 | * Hang old buffer memory on a new buffer object, | ||
530 | * and leave it to be released when the GPU | ||
531 | * operation has completed. | ||
532 | */ | ||
533 | |||
534 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); | ||
535 | spin_unlock(&bo->lock); | ||
536 | |||
537 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); | ||
538 | if (ret) | ||
539 | return ret; | ||
540 | |||
541 | /** | ||
542 | * If we're not moving to fixed memory, the TTM object | ||
543 | * needs to stay alive. Otherwhise hang it on the ghost | ||
544 | * bo to be unbound and destroyed. | ||
545 | */ | ||
546 | |||
547 | if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) | ||
548 | ghost_obj->ttm = NULL; | ||
549 | else | ||
550 | bo->ttm = NULL; | ||
551 | |||
552 | ttm_bo_unreserve(ghost_obj); | ||
553 | ttm_bo_unref(&ghost_obj); | ||
554 | } | ||
555 | |||
556 | *old_mem = *new_mem; | ||
557 | new_mem->mm_node = NULL; | ||
558 | ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); | ||
559 | return 0; | ||
560 | } | ||
561 | EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); | ||