diff options
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_tt.c')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_tt.c | 635 |
1 files changed, 635 insertions, 0 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c new file mode 100644 index 000000000000..c27ab3a877ad --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -0,0 +1,635 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | /* | ||
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | ||
29 | */ | ||
30 | |||
31 | #include <linux/version.h> | ||
32 | #include <linux/vmalloc.h> | ||
33 | #include <linux/sched.h> | ||
34 | #include <linux/highmem.h> | ||
35 | #include <linux/pagemap.h> | ||
36 | #include <linux/file.h> | ||
37 | #include <linux/swap.h> | ||
38 | #include "ttm/ttm_module.h" | ||
39 | #include "ttm/ttm_bo_driver.h" | ||
40 | #include "ttm/ttm_placement.h" | ||
41 | |||
42 | static int ttm_tt_swapin(struct ttm_tt *ttm); | ||
43 | |||
44 | #if defined(CONFIG_X86) | ||
45 | static void ttm_tt_clflush_page(struct page *page) | ||
46 | { | ||
47 | uint8_t *page_virtual; | ||
48 | unsigned int i; | ||
49 | |||
50 | if (unlikely(page == NULL)) | ||
51 | return; | ||
52 | |||
53 | page_virtual = kmap_atomic(page, KM_USER0); | ||
54 | |||
55 | for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) | ||
56 | clflush(page_virtual + i); | ||
57 | |||
58 | kunmap_atomic(page_virtual, KM_USER0); | ||
59 | } | ||
60 | |||
61 | static void ttm_tt_cache_flush_clflush(struct page *pages[], | ||
62 | unsigned long num_pages) | ||
63 | { | ||
64 | unsigned long i; | ||
65 | |||
66 | mb(); | ||
67 | for (i = 0; i < num_pages; ++i) | ||
68 | ttm_tt_clflush_page(*pages++); | ||
69 | mb(); | ||
70 | } | ||
71 | #else | ||
72 | static void ttm_tt_ipi_handler(void *null) | ||
73 | { | ||
74 | ; | ||
75 | } | ||
76 | #endif | ||
77 | |||
78 | void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages) | ||
79 | { | ||
80 | |||
81 | #if defined(CONFIG_X86) | ||
82 | if (cpu_has_clflush) { | ||
83 | ttm_tt_cache_flush_clflush(pages, num_pages); | ||
84 | return; | ||
85 | } | ||
86 | #else | ||
87 | if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0) | ||
88 | printk(KERN_ERR TTM_PFX | ||
89 | "Timed out waiting for drm cache flush.\n"); | ||
90 | #endif | ||
91 | } | ||
92 | |||
93 | /** | ||
94 | * Allocates storage for pointers to the pages that back the ttm. | ||
95 | * | ||
96 | * Uses kmalloc if possible. Otherwise falls back to vmalloc. | ||
97 | */ | ||
98 | static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) | ||
99 | { | ||
100 | unsigned long size = ttm->num_pages * sizeof(*ttm->pages); | ||
101 | ttm->pages = NULL; | ||
102 | |||
103 | if (size <= PAGE_SIZE) | ||
104 | ttm->pages = kzalloc(size, GFP_KERNEL); | ||
105 | |||
106 | if (!ttm->pages) { | ||
107 | ttm->pages = vmalloc_user(size); | ||
108 | if (ttm->pages) | ||
109 | ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC; | ||
110 | } | ||
111 | } | ||
112 | |||
113 | static void ttm_tt_free_page_directory(struct ttm_tt *ttm) | ||
114 | { | ||
115 | if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) { | ||
116 | vfree(ttm->pages); | ||
117 | ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC; | ||
118 | } else { | ||
119 | kfree(ttm->pages); | ||
120 | } | ||
121 | ttm->pages = NULL; | ||
122 | } | ||
123 | |||
124 | static struct page *ttm_tt_alloc_page(unsigned page_flags) | ||
125 | { | ||
126 | if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) | ||
127 | return alloc_page(GFP_HIGHUSER | __GFP_ZERO); | ||
128 | |||
129 | return alloc_page(GFP_HIGHUSER); | ||
130 | } | ||
131 | |||
132 | static void ttm_tt_free_user_pages(struct ttm_tt *ttm) | ||
133 | { | ||
134 | int write; | ||
135 | int dirty; | ||
136 | struct page *page; | ||
137 | int i; | ||
138 | struct ttm_backend *be = ttm->be; | ||
139 | |||
140 | BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER)); | ||
141 | write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0); | ||
142 | dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0); | ||
143 | |||
144 | if (be) | ||
145 | be->func->clear(be); | ||
146 | |||
147 | for (i = 0; i < ttm->num_pages; ++i) { | ||
148 | page = ttm->pages[i]; | ||
149 | if (page == NULL) | ||
150 | continue; | ||
151 | |||
152 | if (page == ttm->dummy_read_page) { | ||
153 | BUG_ON(write); | ||
154 | continue; | ||
155 | } | ||
156 | |||
157 | if (write && dirty && !PageReserved(page)) | ||
158 | set_page_dirty_lock(page); | ||
159 | |||
160 | ttm->pages[i] = NULL; | ||
161 | ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false); | ||
162 | put_page(page); | ||
163 | } | ||
164 | ttm->state = tt_unpopulated; | ||
165 | ttm->first_himem_page = ttm->num_pages; | ||
166 | ttm->last_lomem_page = -1; | ||
167 | } | ||
168 | |||
169 | static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) | ||
170 | { | ||
171 | struct page *p; | ||
172 | struct ttm_bo_device *bdev = ttm->bdev; | ||
173 | struct ttm_mem_global *mem_glob = bdev->mem_glob; | ||
174 | int ret; | ||
175 | |||
176 | while (NULL == (p = ttm->pages[index])) { | ||
177 | p = ttm_tt_alloc_page(ttm->page_flags); | ||
178 | |||
179 | if (!p) | ||
180 | return NULL; | ||
181 | |||
182 | if (PageHighMem(p)) { | ||
183 | ret = | ||
184 | ttm_mem_global_alloc(mem_glob, PAGE_SIZE, | ||
185 | false, false, true); | ||
186 | if (unlikely(ret != 0)) | ||
187 | goto out_err; | ||
188 | ttm->pages[--ttm->first_himem_page] = p; | ||
189 | } else { | ||
190 | ret = | ||
191 | ttm_mem_global_alloc(mem_glob, PAGE_SIZE, | ||
192 | false, false, false); | ||
193 | if (unlikely(ret != 0)) | ||
194 | goto out_err; | ||
195 | ttm->pages[++ttm->last_lomem_page] = p; | ||
196 | } | ||
197 | } | ||
198 | return p; | ||
199 | out_err: | ||
200 | put_page(p); | ||
201 | return NULL; | ||
202 | } | ||
203 | |||
204 | struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index) | ||
205 | { | ||
206 | int ret; | ||
207 | |||
208 | if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { | ||
209 | ret = ttm_tt_swapin(ttm); | ||
210 | if (unlikely(ret != 0)) | ||
211 | return NULL; | ||
212 | } | ||
213 | return __ttm_tt_get_page(ttm, index); | ||
214 | } | ||
215 | |||
216 | int ttm_tt_populate(struct ttm_tt *ttm) | ||
217 | { | ||
218 | struct page *page; | ||
219 | unsigned long i; | ||
220 | struct ttm_backend *be; | ||
221 | int ret; | ||
222 | |||
223 | if (ttm->state != tt_unpopulated) | ||
224 | return 0; | ||
225 | |||
226 | if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { | ||
227 | ret = ttm_tt_swapin(ttm); | ||
228 | if (unlikely(ret != 0)) | ||
229 | return ret; | ||
230 | } | ||
231 | |||
232 | be = ttm->be; | ||
233 | |||
234 | for (i = 0; i < ttm->num_pages; ++i) { | ||
235 | page = __ttm_tt_get_page(ttm, i); | ||
236 | if (!page) | ||
237 | return -ENOMEM; | ||
238 | } | ||
239 | |||
240 | be->func->populate(be, ttm->num_pages, ttm->pages, | ||
241 | ttm->dummy_read_page); | ||
242 | ttm->state = tt_unbound; | ||
243 | return 0; | ||
244 | } | ||
245 | |||
246 | #ifdef CONFIG_X86 | ||
247 | static inline int ttm_tt_set_page_caching(struct page *p, | ||
248 | enum ttm_caching_state c_state) | ||
249 | { | ||
250 | if (PageHighMem(p)) | ||
251 | return 0; | ||
252 | |||
253 | switch (c_state) { | ||
254 | case tt_cached: | ||
255 | return set_pages_wb(p, 1); | ||
256 | case tt_wc: | ||
257 | return set_memory_wc((unsigned long) page_address(p), 1); | ||
258 | default: | ||
259 | return set_pages_uc(p, 1); | ||
260 | } | ||
261 | } | ||
262 | #else /* CONFIG_X86 */ | ||
263 | static inline int ttm_tt_set_page_caching(struct page *p, | ||
264 | enum ttm_caching_state c_state) | ||
265 | { | ||
266 | return 0; | ||
267 | } | ||
268 | #endif /* CONFIG_X86 */ | ||
269 | |||
270 | /* | ||
271 | * Change caching policy for the linear kernel map | ||
272 | * for range of pages in a ttm. | ||
273 | */ | ||
274 | |||
275 | static int ttm_tt_set_caching(struct ttm_tt *ttm, | ||
276 | enum ttm_caching_state c_state) | ||
277 | { | ||
278 | int i, j; | ||
279 | struct page *cur_page; | ||
280 | int ret; | ||
281 | |||
282 | if (ttm->caching_state == c_state) | ||
283 | return 0; | ||
284 | |||
285 | if (c_state != tt_cached) { | ||
286 | ret = ttm_tt_populate(ttm); | ||
287 | if (unlikely(ret != 0)) | ||
288 | return ret; | ||
289 | } | ||
290 | |||
291 | if (ttm->caching_state == tt_cached) | ||
292 | ttm_tt_cache_flush(ttm->pages, ttm->num_pages); | ||
293 | |||
294 | for (i = 0; i < ttm->num_pages; ++i) { | ||
295 | cur_page = ttm->pages[i]; | ||
296 | if (likely(cur_page != NULL)) { | ||
297 | ret = ttm_tt_set_page_caching(cur_page, c_state); | ||
298 | if (unlikely(ret != 0)) | ||
299 | goto out_err; | ||
300 | } | ||
301 | } | ||
302 | |||
303 | ttm->caching_state = c_state; | ||
304 | |||
305 | return 0; | ||
306 | |||
307 | out_err: | ||
308 | for (j = 0; j < i; ++j) { | ||
309 | cur_page = ttm->pages[j]; | ||
310 | if (likely(cur_page != NULL)) { | ||
311 | (void)ttm_tt_set_page_caching(cur_page, | ||
312 | ttm->caching_state); | ||
313 | } | ||
314 | } | ||
315 | |||
316 | return ret; | ||
317 | } | ||
318 | |||
319 | int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) | ||
320 | { | ||
321 | enum ttm_caching_state state; | ||
322 | |||
323 | if (placement & TTM_PL_FLAG_WC) | ||
324 | state = tt_wc; | ||
325 | else if (placement & TTM_PL_FLAG_UNCACHED) | ||
326 | state = tt_uncached; | ||
327 | else | ||
328 | state = tt_cached; | ||
329 | |||
330 | return ttm_tt_set_caching(ttm, state); | ||
331 | } | ||
332 | |||
333 | static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) | ||
334 | { | ||
335 | int i; | ||
336 | struct page *cur_page; | ||
337 | struct ttm_backend *be = ttm->be; | ||
338 | |||
339 | if (be) | ||
340 | be->func->clear(be); | ||
341 | (void)ttm_tt_set_caching(ttm, tt_cached); | ||
342 | for (i = 0; i < ttm->num_pages; ++i) { | ||
343 | cur_page = ttm->pages[i]; | ||
344 | ttm->pages[i] = NULL; | ||
345 | if (cur_page) { | ||
346 | if (page_count(cur_page) != 1) | ||
347 | printk(KERN_ERR TTM_PFX | ||
348 | "Erroneous page count. " | ||
349 | "Leaking pages.\n"); | ||
350 | ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, | ||
351 | PageHighMem(cur_page)); | ||
352 | __free_page(cur_page); | ||
353 | } | ||
354 | } | ||
355 | ttm->state = tt_unpopulated; | ||
356 | ttm->first_himem_page = ttm->num_pages; | ||
357 | ttm->last_lomem_page = -1; | ||
358 | } | ||
359 | |||
360 | void ttm_tt_destroy(struct ttm_tt *ttm) | ||
361 | { | ||
362 | struct ttm_backend *be; | ||
363 | |||
364 | if (unlikely(ttm == NULL)) | ||
365 | return; | ||
366 | |||
367 | be = ttm->be; | ||
368 | if (likely(be != NULL)) { | ||
369 | be->func->destroy(be); | ||
370 | ttm->be = NULL; | ||
371 | } | ||
372 | |||
373 | if (likely(ttm->pages != NULL)) { | ||
374 | if (ttm->page_flags & TTM_PAGE_FLAG_USER) | ||
375 | ttm_tt_free_user_pages(ttm); | ||
376 | else | ||
377 | ttm_tt_free_alloced_pages(ttm); | ||
378 | |||
379 | ttm_tt_free_page_directory(ttm); | ||
380 | } | ||
381 | |||
382 | if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) && | ||
383 | ttm->swap_storage) | ||
384 | fput(ttm->swap_storage); | ||
385 | |||
386 | kfree(ttm); | ||
387 | } | ||
388 | |||
389 | int ttm_tt_set_user(struct ttm_tt *ttm, | ||
390 | struct task_struct *tsk, | ||
391 | unsigned long start, unsigned long num_pages) | ||
392 | { | ||
393 | struct mm_struct *mm = tsk->mm; | ||
394 | int ret; | ||
395 | int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0; | ||
396 | struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob; | ||
397 | |||
398 | BUG_ON(num_pages != ttm->num_pages); | ||
399 | BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0); | ||
400 | |||
401 | /** | ||
402 | * Account user pages as lowmem pages for now. | ||
403 | */ | ||
404 | |||
405 | ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE, | ||
406 | false, false, false); | ||
407 | if (unlikely(ret != 0)) | ||
408 | return ret; | ||
409 | |||
410 | down_read(&mm->mmap_sem); | ||
411 | ret = get_user_pages(tsk, mm, start, num_pages, | ||
412 | write, 0, ttm->pages, NULL); | ||
413 | up_read(&mm->mmap_sem); | ||
414 | |||
415 | if (ret != num_pages && write) { | ||
416 | ttm_tt_free_user_pages(ttm); | ||
417 | ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false); | ||
418 | return -ENOMEM; | ||
419 | } | ||
420 | |||
421 | ttm->tsk = tsk; | ||
422 | ttm->start = start; | ||
423 | ttm->state = tt_unbound; | ||
424 | |||
425 | return 0; | ||
426 | } | ||
427 | |||
428 | struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, | ||
429 | uint32_t page_flags, struct page *dummy_read_page) | ||
430 | { | ||
431 | struct ttm_bo_driver *bo_driver = bdev->driver; | ||
432 | struct ttm_tt *ttm; | ||
433 | |||
434 | if (!bo_driver) | ||
435 | return NULL; | ||
436 | |||
437 | ttm = kzalloc(sizeof(*ttm), GFP_KERNEL); | ||
438 | if (!ttm) | ||
439 | return NULL; | ||
440 | |||
441 | ttm->bdev = bdev; | ||
442 | |||
443 | ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
444 | ttm->first_himem_page = ttm->num_pages; | ||
445 | ttm->last_lomem_page = -1; | ||
446 | ttm->caching_state = tt_cached; | ||
447 | ttm->page_flags = page_flags; | ||
448 | |||
449 | ttm->dummy_read_page = dummy_read_page; | ||
450 | |||
451 | ttm_tt_alloc_page_directory(ttm); | ||
452 | if (!ttm->pages) { | ||
453 | ttm_tt_destroy(ttm); | ||
454 | printk(KERN_ERR TTM_PFX "Failed allocating page table\n"); | ||
455 | return NULL; | ||
456 | } | ||
457 | ttm->be = bo_driver->create_ttm_backend_entry(bdev); | ||
458 | if (!ttm->be) { | ||
459 | ttm_tt_destroy(ttm); | ||
460 | printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n"); | ||
461 | return NULL; | ||
462 | } | ||
463 | ttm->state = tt_unpopulated; | ||
464 | return ttm; | ||
465 | } | ||
466 | |||
467 | void ttm_tt_unbind(struct ttm_tt *ttm) | ||
468 | { | ||
469 | int ret; | ||
470 | struct ttm_backend *be = ttm->be; | ||
471 | |||
472 | if (ttm->state == tt_bound) { | ||
473 | ret = be->func->unbind(be); | ||
474 | BUG_ON(ret); | ||
475 | ttm->state = tt_unbound; | ||
476 | } | ||
477 | } | ||
478 | |||
479 | int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) | ||
480 | { | ||
481 | int ret = 0; | ||
482 | struct ttm_backend *be; | ||
483 | |||
484 | if (!ttm) | ||
485 | return -EINVAL; | ||
486 | |||
487 | if (ttm->state == tt_bound) | ||
488 | return 0; | ||
489 | |||
490 | be = ttm->be; | ||
491 | |||
492 | ret = ttm_tt_populate(ttm); | ||
493 | if (ret) | ||
494 | return ret; | ||
495 | |||
496 | ret = be->func->bind(be, bo_mem); | ||
497 | if (ret) { | ||
498 | printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n"); | ||
499 | return ret; | ||
500 | } | ||
501 | |||
502 | ttm->state = tt_bound; | ||
503 | |||
504 | if (ttm->page_flags & TTM_PAGE_FLAG_USER) | ||
505 | ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY; | ||
506 | return 0; | ||
507 | } | ||
508 | EXPORT_SYMBOL(ttm_tt_bind); | ||
509 | |||
510 | static int ttm_tt_swapin(struct ttm_tt *ttm) | ||
511 | { | ||
512 | struct address_space *swap_space; | ||
513 | struct file *swap_storage; | ||
514 | struct page *from_page; | ||
515 | struct page *to_page; | ||
516 | void *from_virtual; | ||
517 | void *to_virtual; | ||
518 | int i; | ||
519 | int ret; | ||
520 | |||
521 | if (ttm->page_flags & TTM_PAGE_FLAG_USER) { | ||
522 | ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start, | ||
523 | ttm->num_pages); | ||
524 | if (unlikely(ret != 0)) | ||
525 | return ret; | ||
526 | |||
527 | ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; | ||
528 | return 0; | ||
529 | } | ||
530 | |||
531 | swap_storage = ttm->swap_storage; | ||
532 | BUG_ON(swap_storage == NULL); | ||
533 | |||
534 | swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; | ||
535 | |||
536 | for (i = 0; i < ttm->num_pages; ++i) { | ||
537 | from_page = read_mapping_page(swap_space, i, NULL); | ||
538 | if (IS_ERR(from_page)) | ||
539 | goto out_err; | ||
540 | to_page = __ttm_tt_get_page(ttm, i); | ||
541 | if (unlikely(to_page == NULL)) | ||
542 | goto out_err; | ||
543 | |||
544 | preempt_disable(); | ||
545 | from_virtual = kmap_atomic(from_page, KM_USER0); | ||
546 | to_virtual = kmap_atomic(to_page, KM_USER1); | ||
547 | memcpy(to_virtual, from_virtual, PAGE_SIZE); | ||
548 | kunmap_atomic(to_virtual, KM_USER1); | ||
549 | kunmap_atomic(from_virtual, KM_USER0); | ||
550 | preempt_enable(); | ||
551 | page_cache_release(from_page); | ||
552 | } | ||
553 | |||
554 | if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP)) | ||
555 | fput(swap_storage); | ||
556 | ttm->swap_storage = NULL; | ||
557 | ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; | ||
558 | |||
559 | return 0; | ||
560 | out_err: | ||
561 | ttm_tt_free_alloced_pages(ttm); | ||
562 | return -ENOMEM; | ||
563 | } | ||
564 | |||
565 | int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) | ||
566 | { | ||
567 | struct address_space *swap_space; | ||
568 | struct file *swap_storage; | ||
569 | struct page *from_page; | ||
570 | struct page *to_page; | ||
571 | void *from_virtual; | ||
572 | void *to_virtual; | ||
573 | int i; | ||
574 | |||
575 | BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); | ||
576 | BUG_ON(ttm->caching_state != tt_cached); | ||
577 | |||
578 | /* | ||
579 | * For user buffers, just unpin the pages, as there should be | ||
580 | * vma references. | ||
581 | */ | ||
582 | |||
583 | if (ttm->page_flags & TTM_PAGE_FLAG_USER) { | ||
584 | ttm_tt_free_user_pages(ttm); | ||
585 | ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; | ||
586 | ttm->swap_storage = NULL; | ||
587 | return 0; | ||
588 | } | ||
589 | |||
590 | if (!persistant_swap_storage) { | ||
591 | swap_storage = shmem_file_setup("ttm swap", | ||
592 | ttm->num_pages << PAGE_SHIFT, | ||
593 | 0); | ||
594 | if (unlikely(IS_ERR(swap_storage))) { | ||
595 | printk(KERN_ERR "Failed allocating swap storage.\n"); | ||
596 | return -ENOMEM; | ||
597 | } | ||
598 | } else | ||
599 | swap_storage = persistant_swap_storage; | ||
600 | |||
601 | swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; | ||
602 | |||
603 | for (i = 0; i < ttm->num_pages; ++i) { | ||
604 | from_page = ttm->pages[i]; | ||
605 | if (unlikely(from_page == NULL)) | ||
606 | continue; | ||
607 | to_page = read_mapping_page(swap_space, i, NULL); | ||
608 | if (unlikely(to_page == NULL)) | ||
609 | goto out_err; | ||
610 | |||
611 | preempt_disable(); | ||
612 | from_virtual = kmap_atomic(from_page, KM_USER0); | ||
613 | to_virtual = kmap_atomic(to_page, KM_USER1); | ||
614 | memcpy(to_virtual, from_virtual, PAGE_SIZE); | ||
615 | kunmap_atomic(to_virtual, KM_USER1); | ||
616 | kunmap_atomic(from_virtual, KM_USER0); | ||
617 | preempt_enable(); | ||
618 | set_page_dirty(to_page); | ||
619 | mark_page_accessed(to_page); | ||
620 | page_cache_release(to_page); | ||
621 | } | ||
622 | |||
623 | ttm_tt_free_alloced_pages(ttm); | ||
624 | ttm->swap_storage = swap_storage; | ||
625 | ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; | ||
626 | if (persistant_swap_storage) | ||
627 | ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP; | ||
628 | |||
629 | return 0; | ||
630 | out_err: | ||
631 | if (!persistant_swap_storage) | ||
632 | fput(swap_storage); | ||
633 | |||
634 | return -ENOMEM; | ||
635 | } | ||