aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/ttm/Makefile2
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c711
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c44
-rw-r--r--include/drm/ttm/ttm_page_alloc.h70
5 files changed, 809 insertions, 25 deletions
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index 1e138f5bae09..4256e2006476 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,6 +4,6 @@
4ccflags-y := -Iinclude/drm 4ccflags-y := -Iinclude/drm
5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ 5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \ 6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
7 ttm_object.o ttm_lock.o ttm_execbuf_util.o 7 ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o
8 8
9obj-$(CONFIG_DRM_TTM) += ttm.o 9obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index c40e5f48e9a1..daff8a87977e 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -27,6 +27,7 @@
27 27
28#include "ttm/ttm_memory.h" 28#include "ttm/ttm_memory.h"
29#include "ttm/ttm_module.h" 29#include "ttm/ttm_module.h"
30#include "ttm/ttm_page_alloc.h"
30#include <linux/spinlock.h> 31#include <linux/spinlock.h>
31#include <linux/sched.h> 32#include <linux/sched.h>
32#include <linux/wait.h> 33#include <linux/wait.h>
@@ -392,6 +393,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
392 "Zone %7s: Available graphics memory: %llu kiB.\n", 393 "Zone %7s: Available graphics memory: %llu kiB.\n",
393 zone->name, (unsigned long long) zone->max_mem >> 10); 394 zone->name, (unsigned long long) zone->max_mem >> 10);
394 } 395 }
396 ttm_page_alloc_init(glob->zone_kernel->max_mem/(2*PAGE_SIZE));
395 return 0; 397 return 0;
396out_no_zone: 398out_no_zone:
397 ttm_mem_global_release(glob); 399 ttm_mem_global_release(glob);
@@ -404,6 +406,9 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
404 unsigned int i; 406 unsigned int i;
405 struct ttm_mem_zone *zone; 407 struct ttm_mem_zone *zone;
406 408
409 /* let the page allocator first stop the shrink work. */
410 ttm_page_alloc_fini();
411
407 flush_workqueue(glob->swap_queue); 412 flush_workqueue(glob->swap_queue);
408 destroy_workqueue(glob->swap_queue); 413 destroy_workqueue(glob->swap_queue);
409 glob->swap_queue = NULL; 414 glob->swap_queue = NULL;
@@ -411,7 +416,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
411 zone = glob->zones[i]; 416 zone = glob->zones[i];
412 kobject_del(&zone->kobj); 417 kobject_del(&zone->kobj);
413 kobject_put(&zone->kobj); 418 kobject_put(&zone->kobj);
414 } 419 }
415 kobject_del(&glob->kobj); 420 kobject_del(&glob->kobj);
416 kobject_put(&glob->kobj); 421 kobject_put(&glob->kobj);
417} 422}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
new file mode 100644
index 000000000000..f46e40be0797
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -0,0 +1,711 @@
1/*
2 * Copyright (c) Red Hat Inc.
3
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie <airlied@redhat.com>
24 * Jerome Glisse <jglisse@redhat.com>
25 * Pauli Nieminen <suokkos@gmail.com>
26 */
27
28/* simple list based uncached page pool
29 * - Pool collects resently freed pages for reuse
30 * - Use page->lru to keep a free list
31 * - doesn't track currently in use pages
32 */
33#include <linux/list.h>
34#include <linux/spinlock.h>
35#include <linux/highmem.h>
36#include <linux/mm_types.h>
37#include <linux/mm.h>
38
39#include <asm/atomic.h>
40#include <asm/agp.h>
41
42#include "ttm/ttm_bo_driver.h"
43#include "ttm/ttm_page_alloc.h"
44
45
46#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
47#define SMALL_ALLOCATION 16
48#define FREE_ALL_PAGES (~0U)
49/* times are in msecs */
50#define PAGE_FREE_INTERVAL 1000
51
52/**
53 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
54 *
55 * @lock: Protects the shared pool from concurrnet access. Must be used with
56 * irqsave/irqrestore variants because pool allocator maybe called from
57 * delayed work.
58 * @fill_lock: Prevent concurrent calls to fill.
59 * @list: Pool of free uc/wc pages for fast reuse.
60 * @gfp_flags: Flags to pass for alloc_page.
61 * @npages: Number of pages in pool.
62 */
63struct ttm_page_pool {
64 spinlock_t lock;
65 bool fill_lock;
66 struct list_head list;
67 int gfp_flags;
68 unsigned npages;
69};
70
71struct ttm_pool_opts {
72 unsigned alloc_size;
73 unsigned max_size;
74 unsigned small;
75};
76
77#define NUM_POOLS 4
78
79/**
80 * struct ttm_pool_manager - Holds memory pools for fst allocation
81 *
82 * Manager is read only object for pool code so it doesn't need locking.
83 *
84 * @free_interval: minimum number of jiffies between freeing pages from pool.
85 * @page_alloc_inited: reference counting for pool allocation.
86 * @work: Work that is used to shrink the pool. Work is only run when there is
87 * some pages to free.
88 * @small_allocation: Limit in number of pages what is small allocation.
89 *
90 * @pools: All pool objects in use.
91 **/
92struct ttm_pool_manager {
93 struct shrinker mm_shrink;
94 atomic_t page_alloc_inited;
95 struct ttm_pool_opts options;
96
97 union {
98 struct ttm_page_pool pools[NUM_POOLS];
99 struct {
100 struct ttm_page_pool wc_pool;
101 struct ttm_page_pool uc_pool;
102 struct ttm_page_pool wc_pool_dma32;
103 struct ttm_page_pool uc_pool_dma32;
104 } ;
105 };
106};
107
108static struct ttm_pool_manager _manager = {
109 .page_alloc_inited = ATOMIC_INIT(0)
110};
111
112#ifdef CONFIG_X86
113/* TODO: add this to x86 like _uc, this version here is inefficient */
114static int set_pages_array_wc(struct page **pages, int addrinarray)
115{
116 int i;
117
118 for (i = 0; i < addrinarray; i++)
119 set_memory_wc((unsigned long)page_address(pages[i]), 1);
120 return 0;
121}
122#else
123static int set_pages_array_wb(struct page **pages, int addrinarray)
124{
125#ifdef TTM_HAS_AGP
126 int i;
127
128 for (i = 0; i < addrinarray; i++)
129 unmap_page_from_agp(pages[i]);
130#endif
131 return 0;
132}
133
134static int set_pages_array_wc(struct page **pages, int addrinarray)
135{
136#ifdef TTM_HAS_AGP
137 int i;
138
139 for (i = 0; i < addrinarray; i++)
140 map_page_into_agp(pages[i]);
141#endif
142 return 0;
143}
144
145static int set_pages_array_uc(struct page **pages, int addrinarray)
146{
147#ifdef TTM_HAS_AGP
148 int i;
149
150 for (i = 0; i < addrinarray; i++)
151 map_page_into_agp(pages[i]);
152#endif
153 return 0;
154}
155#endif
156
157/**
158 * Select the right pool or requested caching state and ttm flags. */
159static struct ttm_page_pool *ttm_get_pool(int flags,
160 enum ttm_caching_state cstate)
161{
162 int pool_index;
163
164 if (cstate == tt_cached)
165 return NULL;
166
167 if (cstate == tt_wc)
168 pool_index = 0x0;
169 else
170 pool_index = 0x1;
171
172 if (flags & TTM_PAGE_FLAG_DMA32)
173 pool_index |= 0x2;
174
175 return &_manager.pools[pool_index];
176}
177
178/* set memory back to wb and free the pages. */
179static void ttm_pages_put(struct page *pages[], unsigned npages)
180{
181 unsigned i;
182 if (set_pages_array_wb(pages, npages))
183 printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n",
184 npages);
185 for (i = 0; i < npages; ++i)
186 __free_page(pages[i]);
187}
188
189static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
190 unsigned freed_pages)
191{
192 pool->npages -= freed_pages;
193}
194
195/**
196 * Free pages from pool.
197 *
198 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
199 * number of pages in one go.
200 *
201 * @pool: to free the pages from
202 * @free_all: If set to true will free all pages in pool
203 **/
204static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
205{
206 unsigned long irq_flags;
207 struct page *p;
208 struct page **pages_to_free;
209 unsigned freed_pages = 0,
210 npages_to_free = nr_free;
211
212 if (NUM_PAGES_TO_ALLOC < nr_free)
213 npages_to_free = NUM_PAGES_TO_ALLOC;
214
215 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
216 GFP_KERNEL);
217 if (!pages_to_free) {
218 printk(KERN_ERR "Failed to allocate memory for pool free operation.\n");
219 return 0;
220 }
221
222restart:
223 spin_lock_irqsave(&pool->lock, irq_flags);
224
225 list_for_each_entry_reverse(p, &pool->list, lru) {
226 if (freed_pages >= npages_to_free)
227 break;
228
229 pages_to_free[freed_pages++] = p;
230 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
231 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
232 /* remove range of pages from the pool */
233 __list_del(p->lru.prev, &pool->list);
234
235 ttm_pool_update_free_locked(pool, freed_pages);
236 /**
237 * Because changing page caching is costly
238 * we unlock the pool to prevent stalling.
239 */
240 spin_unlock_irqrestore(&pool->lock, irq_flags);
241
242 ttm_pages_put(pages_to_free, freed_pages);
243 if (likely(nr_free != FREE_ALL_PAGES))
244 nr_free -= freed_pages;
245
246 if (NUM_PAGES_TO_ALLOC >= nr_free)
247 npages_to_free = nr_free;
248 else
249 npages_to_free = NUM_PAGES_TO_ALLOC;
250
251 freed_pages = 0;
252
253 /* free all so restart the processing */
254 if (nr_free)
255 goto restart;
256
257 /* Not allowed to fall tough or break because
258 * following context is inside spinlock while we are
259 * outside here.
260 */
261 goto out;
262
263 }
264 }
265
266
267 /* remove range of pages from the pool */
268 if (freed_pages) {
269 __list_del(&p->lru, &pool->list);
270
271 ttm_pool_update_free_locked(pool, freed_pages);
272 nr_free -= freed_pages;
273 }
274
275 spin_unlock_irqrestore(&pool->lock, irq_flags);
276
277 if (freed_pages)
278 ttm_pages_put(pages_to_free, freed_pages);
279out:
280 kfree(pages_to_free);
281 return nr_free;
282}
283
284/* Get good estimation how many pages are free in pools */
285static int ttm_pool_get_num_unused_pages(void)
286{
287 unsigned i;
288 int total = 0;
289 for (i = 0; i < NUM_POOLS; ++i)
290 total += _manager.pools[i].npages;
291
292 return total;
293}
294
295/**
296 * Calback for mm to request pool to reduce number of page held.
297 */
298static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
299{
300 static atomic_t start_pool = ATOMIC_INIT(0);
301 unsigned i;
302 unsigned pool_offset = atomic_add_return(1, &start_pool);
303 struct ttm_page_pool *pool;
304
305 pool_offset = pool_offset % NUM_POOLS;
306 /* select start pool in round robin fashion */
307 for (i = 0; i < NUM_POOLS; ++i) {
308 unsigned nr_free = shrink_pages;
309 if (shrink_pages == 0)
310 break;
311 pool = &_manager.pools[(i + pool_offset)%NUM_POOLS];
312 shrink_pages = ttm_page_pool_free(pool, nr_free);
313 }
314 /* return estimated number of unused pages in pool */
315 return ttm_pool_get_num_unused_pages();
316}
317
318static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
319{
320 manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
321 manager->mm_shrink.seeks = 1;
322 register_shrinker(&manager->mm_shrink);
323}
324
325static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
326{
327 unregister_shrinker(&manager->mm_shrink);
328}
329
330static int ttm_set_pages_caching(struct page **pages,
331 enum ttm_caching_state cstate, unsigned cpages)
332{
333 int r = 0;
334 /* Set page caching */
335 switch (cstate) {
336 case tt_uncached:
337 r = set_pages_array_uc(pages, cpages);
338 if (r)
339 printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n",
340 cpages);
341 break;
342 case tt_wc:
343 r = set_pages_array_wc(pages, cpages);
344 if (r)
345 printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n",
346 cpages);
347 break;
348 default:
349 break;
350 }
351 return r;
352}
353
354/**
355 * Free pages the pages that failed to change the caching state. If there is
356 * any pages that have changed their caching state already put them to the
357 * pool.
358 */
359static void ttm_handle_caching_state_failure(struct list_head *pages,
360 int ttm_flags, enum ttm_caching_state cstate,
361 struct page **failed_pages, unsigned cpages)
362{
363 unsigned i;
364 /* Failed pages has to be reed */
365 for (i = 0; i < cpages; ++i) {
366 list_del(&failed_pages[i]->lru);
367 __free_page(failed_pages[i]);
368 }
369}
370
371/**
372 * Allocate new pages with correct caching.
373 *
374 * This function is reentrant if caller updates count depending on number of
375 * pages returned in pages array.
376 */
377static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
378 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
379{
380 struct page **caching_array;
381 struct page *p;
382 int r = 0;
383 unsigned i, cpages;
384 unsigned max_cpages = min(count,
385 (unsigned)(PAGE_SIZE/sizeof(struct page *)));
386
387 /* allocate array for page caching change */
388 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
389
390 if (!caching_array) {
391 printk(KERN_ERR "[ttm] unable to allocate table for new pages.");
392 return -ENOMEM;
393 }
394
395 for (i = 0, cpages = 0; i < count; ++i) {
396 p = alloc_page(gfp_flags);
397
398 if (!p) {
399 printk(KERN_ERR "[ttm] unable to get page %u\n", i);
400
401 /* store already allocated pages in the pool after
402 * setting the caching state */
403 if (cpages) {
404 r = ttm_set_pages_caching(caching_array, cstate, cpages);
405 if (r)
406 ttm_handle_caching_state_failure(pages,
407 ttm_flags, cstate,
408 caching_array, cpages);
409 }
410 r = -ENOMEM;
411 goto out;
412 }
413
414#ifdef CONFIG_HIGHMEM
415 /* gfp flags of highmem page should never be dma32 so we
416 * we should be fine in such case
417 */
418 if (!PageHighMem(p))
419#endif
420 {
421 caching_array[cpages++] = p;
422 if (cpages == max_cpages) {
423
424 r = ttm_set_pages_caching(caching_array,
425 cstate, cpages);
426 if (r) {
427 ttm_handle_caching_state_failure(pages,
428 ttm_flags, cstate,
429 caching_array, cpages);
430 goto out;
431 }
432 cpages = 0;
433 }
434 }
435
436 list_add(&p->lru, pages);
437 }
438
439 if (cpages) {
440 r = ttm_set_pages_caching(caching_array, cstate, cpages);
441 if (r)
442 ttm_handle_caching_state_failure(pages,
443 ttm_flags, cstate,
444 caching_array, cpages);
445 }
446out:
447 kfree(caching_array);
448
449 return r;
450}
451
452/**
453 * Fill the given pool if there isn't enough pages and requested number of
454 * pages is small.
455 */
456static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
457 int ttm_flags, enum ttm_caching_state cstate, unsigned count,
458 unsigned long *irq_flags)
459{
460 struct page *p;
461 int r;
462 unsigned cpages = 0;
463 /**
464 * Only allow one pool fill operation at a time.
465 * If pool doesn't have enough pages for the allocation new pages are
466 * allocated from outside of pool.
467 */
468 if (pool->fill_lock)
469 return;
470
471 pool->fill_lock = true;
472
473 /* If allocation request is small and there is not enough
474 * pages in pool we fill the pool first */
475 if (count < _manager.options.small
476 && count > pool->npages) {
477 struct list_head new_pages;
478 unsigned alloc_size = _manager.options.alloc_size;
479
480 /**
481 * Can't change page caching if in irqsave context. We have to
482 * drop the pool->lock.
483 */
484 spin_unlock_irqrestore(&pool->lock, *irq_flags);
485
486 INIT_LIST_HEAD(&new_pages);
487 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
488 cstate, alloc_size);
489 spin_lock_irqsave(&pool->lock, *irq_flags);
490
491 if (!r) {
492 list_splice(&new_pages, &pool->list);
493 pool->npages += alloc_size;
494 } else {
495 printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool);
496 /* If we have any pages left put them to the pool. */
497 list_for_each_entry(p, &pool->list, lru) {
498 ++cpages;
499 }
500 list_splice(&new_pages, &pool->list);
501 pool->npages += cpages;
502 }
503
504 }
505 pool->fill_lock = false;
506}
507
508/**
509 * Cut count nubmer of pages from the pool and put them to return list
510 *
511 * @return count of pages still to allocate to fill the request.
512 */
513static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
514 struct list_head *pages, int ttm_flags,
515 enum ttm_caching_state cstate, unsigned count)
516{
517 unsigned long irq_flags;
518 struct list_head *p;
519 unsigned i;
520
521 spin_lock_irqsave(&pool->lock, irq_flags);
522 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
523
524 if (count >= pool->npages) {
525 /* take all pages from the pool */
526 list_splice_init(&pool->list, pages);
527 count -= pool->npages;
528 pool->npages = 0;
529 goto out;
530 }
531 /* find the last pages to include for requested number of pages. Split
532 * pool to begin and halves to reduce search space. */
533 if (count <= pool->npages/2) {
534 i = 0;
535 list_for_each(p, &pool->list) {
536 if (++i == count)
537 break;
538 }
539 } else {
540 i = pool->npages + 1;
541 list_for_each_prev(p, &pool->list) {
542 if (--i == count)
543 break;
544 }
545 }
546 /* Cut count number of pages from pool */
547 list_cut_position(pages, &pool->list, p);
548 pool->npages -= count;
549 count = 0;
550out:
551 spin_unlock_irqrestore(&pool->lock, irq_flags);
552 return count;
553}
554
555/*
556 * On success pages list will hold count number of correctly
557 * cached pages.
558 */
559int ttm_get_pages(struct list_head *pages, int flags,
560 enum ttm_caching_state cstate, unsigned count)
561{
562 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
563 struct page *p = NULL;
564 int gfp_flags = 0;
565 int r;
566
567 /* set zero flag for page allocation if required */
568 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
569 gfp_flags |= __GFP_ZERO;
570
571 /* No pool for cached pages */
572 if (pool == NULL) {
573 if (flags & TTM_PAGE_FLAG_DMA32)
574 gfp_flags |= GFP_DMA32;
575 else
576 gfp_flags |= __GFP_HIGHMEM;
577
578 for (r = 0; r < count; ++r) {
579 p = alloc_page(gfp_flags);
580 if (!p) {
581
582 printk(KERN_ERR "[ttm] unable to allocate page.");
583 return -ENOMEM;
584 }
585
586 list_add(&p->lru, pages);
587 }
588 return 0;
589 }
590
591
592 /* combine zero flag to pool flags */
593 gfp_flags |= pool->gfp_flags;
594
595 /* First we take pages from the pool */
596 count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
597
598 /* clear the pages coming from the pool if requested */
599 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
600 list_for_each_entry(p, pages, lru) {
601 clear_page(page_address(p));
602 }
603 }
604
605 /* If pool didn't have enough pages allocate new one. */
606 if (count > 0) {
607 /* ttm_alloc_new_pages doesn't reference pool so we can run
608 * multiple requests in parallel.
609 **/
610 r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
611 if (r) {
612 /* If there is any pages in the list put them back to
613 * the pool. */
614 printk(KERN_ERR "[ttm] Failed to allocate extra pages "
615 "for large request.");
616 ttm_put_pages(pages, 0, flags, cstate);
617 return r;
618 }
619 }
620
621
622 return 0;
623}
624
625/* Put all pages in pages list to correct pool to wait for reuse */
626void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
627 enum ttm_caching_state cstate)
628{
629 unsigned long irq_flags;
630 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
631 struct page *p, *tmp;
632
633 if (pool == NULL) {
634 /* No pool for this memory type so free the pages */
635
636 list_for_each_entry_safe(p, tmp, pages, lru) {
637 __free_page(p);
638 }
639 /* Make the pages list empty */
640 INIT_LIST_HEAD(pages);
641 return;
642 }
643 if (page_count == 0) {
644 list_for_each_entry_safe(p, tmp, pages, lru) {
645 ++page_count;
646 }
647 }
648
649 spin_lock_irqsave(&pool->lock, irq_flags);
650 list_splice_init(pages, &pool->list);
651 pool->npages += page_count;
652 /* Check that we don't go over the pool limit */
653 page_count = 0;
654 if (pool->npages > _manager.options.max_size) {
655 page_count = pool->npages - _manager.options.max_size;
656 /* free at least NUM_PAGES_TO_ALLOC number of pages
657 * to reduce calls to set_memory_wb */
658 if (page_count < NUM_PAGES_TO_ALLOC)
659 page_count = NUM_PAGES_TO_ALLOC;
660 }
661 spin_unlock_irqrestore(&pool->lock, irq_flags);
662 if (page_count)
663 ttm_page_pool_free(pool, page_count);
664}
665
666static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags)
667{
668 spin_lock_init(&pool->lock);
669 pool->fill_lock = false;
670 INIT_LIST_HEAD(&pool->list);
671 pool->npages = 0;
672 pool->gfp_flags = flags;
673}
674
675int ttm_page_alloc_init(unsigned max_pages)
676{
677 if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
678 return 0;
679
680 printk(KERN_INFO "[ttm] Initializing pool allocator.\n");
681
682 ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER);
683
684 ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER);
685
686 ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32);
687
688 ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32);
689
690 _manager.options.max_size = max_pages;
691 _manager.options.small = SMALL_ALLOCATION;
692 _manager.options.alloc_size = NUM_PAGES_TO_ALLOC;
693
694 ttm_pool_mm_shrink_init(&_manager);
695
696 return 0;
697}
698
699void ttm_page_alloc_fini()
700{
701 int i;
702
703 if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
704 return;
705
706 printk(KERN_INFO "[ttm] Finilizing pool allocator.\n");
707 ttm_pool_mm_shrink_fini(&_manager);
708
709 for (i = 0; i < NUM_POOLS; ++i)
710 ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES);
711}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index bab6cd8d8a1e..a3269ef831c0 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -38,6 +38,7 @@
38#include "ttm/ttm_module.h" 38#include "ttm/ttm_module.h"
39#include "ttm/ttm_bo_driver.h" 39#include "ttm/ttm_bo_driver.h"
40#include "ttm/ttm_placement.h" 40#include "ttm/ttm_placement.h"
41#include "ttm/ttm_page_alloc.h"
41 42
42static int ttm_tt_swapin(struct ttm_tt *ttm); 43static int ttm_tt_swapin(struct ttm_tt *ttm);
43 44
@@ -55,21 +56,6 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
55 ttm->pages = NULL; 56 ttm->pages = NULL;
56} 57}
57 58
58static struct page *ttm_tt_alloc_page(unsigned page_flags)
59{
60 gfp_t gfp_flags = GFP_USER;
61
62 if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
63 gfp_flags |= __GFP_ZERO;
64
65 if (page_flags & TTM_PAGE_FLAG_DMA32)
66 gfp_flags |= __GFP_DMA32;
67 else
68 gfp_flags |= __GFP_HIGHMEM;
69
70 return alloc_page(gfp_flags);
71}
72
73static void ttm_tt_free_user_pages(struct ttm_tt *ttm) 59static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
74{ 60{
75 int write; 61 int write;
@@ -110,15 +96,21 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
110static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) 96static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
111{ 97{
112 struct page *p; 98 struct page *p;
99 struct list_head h;
113 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; 100 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
114 int ret; 101 int ret;
115 102
116 while (NULL == (p = ttm->pages[index])) { 103 while (NULL == (p = ttm->pages[index])) {
117 p = ttm_tt_alloc_page(ttm->page_flags);
118 104
119 if (!p) 105 INIT_LIST_HEAD(&h);
106
107 ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1);
108
109 if (ret != 0)
120 return NULL; 110 return NULL;
121 111
112 p = list_first_entry(&h, struct page, lru);
113
122 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false); 114 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
123 if (unlikely(ret != 0)) 115 if (unlikely(ret != 0))
124 goto out_err; 116 goto out_err;
@@ -227,10 +219,10 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
227 if (ttm->caching_state == c_state) 219 if (ttm->caching_state == c_state)
228 return 0; 220 return 0;
229 221
230 if (c_state != tt_cached) { 222 if (ttm->state == tt_unpopulated) {
231 ret = ttm_tt_populate(ttm); 223 /* Change caching but don't populate */
232 if (unlikely(ret != 0)) 224 ttm->caching_state = c_state;
233 return ret; 225 return 0;
234 } 226 }
235 227
236 if (ttm->caching_state == tt_cached) 228 if (ttm->caching_state == tt_cached)
@@ -281,13 +273,17 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
281static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) 273static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
282{ 274{
283 int i; 275 int i;
276 unsigned count = 0;
277 struct list_head h;
284 struct page *cur_page; 278 struct page *cur_page;
285 struct ttm_backend *be = ttm->be; 279 struct ttm_backend *be = ttm->be;
286 280
281 INIT_LIST_HEAD(&h);
282
287 if (be) 283 if (be)
288 be->func->clear(be); 284 be->func->clear(be);
289 (void)ttm_tt_set_caching(ttm, tt_cached);
290 for (i = 0; i < ttm->num_pages; ++i) { 285 for (i = 0; i < ttm->num_pages; ++i) {
286
291 cur_page = ttm->pages[i]; 287 cur_page = ttm->pages[i];
292 ttm->pages[i] = NULL; 288 ttm->pages[i] = NULL;
293 if (cur_page) { 289 if (cur_page) {
@@ -297,9 +293,11 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
297 "Leaking pages.\n"); 293 "Leaking pages.\n");
298 ttm_mem_global_free_page(ttm->glob->mem_glob, 294 ttm_mem_global_free_page(ttm->glob->mem_glob,
299 cur_page); 295 cur_page);
300 __free_page(cur_page); 296 list_add(&cur_page->lru, &h);
297 count++;
301 } 298 }
302 } 299 }
300 ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state);
303 ttm->state = tt_unpopulated; 301 ttm->state = tt_unpopulated;
304 ttm->first_himem_page = ttm->num_pages; 302 ttm->first_himem_page = ttm->num_pages;
305 ttm->last_lomem_page = -1; 303 ttm->last_lomem_page = -1;
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
new file mode 100644
index 000000000000..043d817b8164
--- /dev/null
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -0,0 +1,70 @@
1/*
2 * Copyright (c) Red Hat Inc.
3
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie <airlied@redhat.com>
24 * Jerome Glisse <jglisse@redhat.com>
25 */
26#ifndef TTM_PAGE_ALLOC
27#define TTM_PAGE_ALLOC
28
29#include "ttm_bo_driver.h"
30#include "ttm_memory.h"
31
32/**
33 * Get count number of pages from pool to pages list.
34 *
35 * @pages: heado of empty linked list where pages are filled.
36 * @flags: ttm flags for page allocation.
37 * @cstate: ttm caching state for the page.
38 * @count: number of pages to allocate.
39 */
40int ttm_get_pages(struct list_head *pages,
41 int flags,
42 enum ttm_caching_state cstate,
43 unsigned count);
44/**
45 * Put linked list of pages to pool.
46 *
47 * @pages: list of pages to free.
48 * @page_count: number of pages in the list. Zero can be passed for unknown
49 * count.
50 * @flags: ttm flags for page allocation.
51 * @cstate: ttm caching state.
52 */
53void ttm_put_pages(struct list_head *pages,
54 unsigned page_count,
55 int flags,
56 enum ttm_caching_state cstate);
57/**
58 * Initialize pool allocator.
59 *
60 * Pool allocator is internaly reference counted so it can be initialized
61 * multiple times but ttm_page_alloc_fini has to be called same number of
62 * times.
63 */
64int ttm_page_alloc_init(unsigned max_pages);
65/**
66 * Free pool allocator.
67 */
68void ttm_page_alloc_fini(void);
69
70#endif