aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2010-04-19 23:12:28 -0400
committerDave Airlie <airlied@redhat.com>2010-04-19 23:12:28 -0400
commitc2b41276da65481d36311a13d69020d150861c43 (patch)
tree5a67f674b77cfdc47bd386f114fbf5a6bff740c6
parent97921a5b03d40681b3aed620a5e719710336c6df (diff)
parent2125b8a44d771351fc44719ed291be70b73672ad (diff)
Merge branch 'drm-ttm-pool' into drm-core-next
* drm-ttm-pool: drm/ttm: using kmalloc/kfree requires including slab.h drm/ttm: include linux/seq_file.h for seq_printf drm/ttm: Add sysfs interface to control pool allocator. drm/ttm: Use set_pages_array_wc instead of set_memory_wc. arch/x86: Add array variants for setting memory to wc caching. drm/nouveau: Add ttm page pool debugfs file. drm/radeon/kms: Add ttm page pool debugfs file. drm/ttm: Add debugfs output entry to pool allocator. drm/ttm: add pool wc/uc page allocator V3
-rw-r--r--arch/x86/include/asm/cacheflush.h2
-rw-r--r--arch/x86/mm/pageattr.c53
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c13
-rw-r--r--drivers/gpu/drm/ttm/Makefile2
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c845
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c44
-rw-r--r--include/drm/ttm/ttm_page_alloc.h74
9 files changed, 1007 insertions, 36 deletions
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 634c40a739a6..d92d63a6286b 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -139,9 +139,11 @@ int set_memory_np(unsigned long addr, int numpages);
139int set_memory_4k(unsigned long addr, int numpages); 139int set_memory_4k(unsigned long addr, int numpages);
140 140
141int set_memory_array_uc(unsigned long *addr, int addrinarray); 141int set_memory_array_uc(unsigned long *addr, int addrinarray);
142int set_memory_array_wc(unsigned long *addr, int addrinarray);
142int set_memory_array_wb(unsigned long *addr, int addrinarray); 143int set_memory_array_wb(unsigned long *addr, int addrinarray);
143 144
144int set_pages_array_uc(struct page **pages, int addrinarray); 145int set_pages_array_uc(struct page **pages, int addrinarray);
146int set_pages_array_wc(struct page **pages, int addrinarray);
145int set_pages_array_wb(struct page **pages, int addrinarray); 147int set_pages_array_wb(struct page **pages, int addrinarray);
146 148
147/* 149/*
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 28195c350b97..532e7933d606 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -997,7 +997,8 @@ out_err:
997} 997}
998EXPORT_SYMBOL(set_memory_uc); 998EXPORT_SYMBOL(set_memory_uc);
999 999
1000int set_memory_array_uc(unsigned long *addr, int addrinarray) 1000int _set_memory_array(unsigned long *addr, int addrinarray,
1001 unsigned long new_type)
1001{ 1002{
1002 int i, j; 1003 int i, j;
1003 int ret; 1004 int ret;
@@ -1007,13 +1008,19 @@ int set_memory_array_uc(unsigned long *addr, int addrinarray)
1007 */ 1008 */
1008 for (i = 0; i < addrinarray; i++) { 1009 for (i = 0; i < addrinarray; i++) {
1009 ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE, 1010 ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
1010 _PAGE_CACHE_UC_MINUS, NULL); 1011 new_type, NULL);
1011 if (ret) 1012 if (ret)
1012 goto out_free; 1013 goto out_free;
1013 } 1014 }
1014 1015
1015 ret = change_page_attr_set(addr, addrinarray, 1016 ret = change_page_attr_set(addr, addrinarray,
1016 __pgprot(_PAGE_CACHE_UC_MINUS), 1); 1017 __pgprot(_PAGE_CACHE_UC_MINUS), 1);
1018
1019 if (!ret && new_type == _PAGE_CACHE_WC)
1020 ret = change_page_attr_set_clr(addr, addrinarray,
1021 __pgprot(_PAGE_CACHE_WC),
1022 __pgprot(_PAGE_CACHE_MASK),
1023 0, CPA_ARRAY, NULL);
1017 if (ret) 1024 if (ret)
1018 goto out_free; 1025 goto out_free;
1019 1026
@@ -1025,8 +1032,19 @@ out_free:
1025 1032
1026 return ret; 1033 return ret;
1027} 1034}
1035
1036int set_memory_array_uc(unsigned long *addr, int addrinarray)
1037{
1038 return _set_memory_array(addr, addrinarray, _PAGE_CACHE_UC_MINUS);
1039}
1028EXPORT_SYMBOL(set_memory_array_uc); 1040EXPORT_SYMBOL(set_memory_array_uc);
1029 1041
1042int set_memory_array_wc(unsigned long *addr, int addrinarray)
1043{
1044 return _set_memory_array(addr, addrinarray, _PAGE_CACHE_WC);
1045}
1046EXPORT_SYMBOL(set_memory_array_wc);
1047
1030int _set_memory_wc(unsigned long addr, int numpages) 1048int _set_memory_wc(unsigned long addr, int numpages)
1031{ 1049{
1032 int ret; 1050 int ret;
@@ -1153,26 +1171,34 @@ int set_pages_uc(struct page *page, int numpages)
1153} 1171}
1154EXPORT_SYMBOL(set_pages_uc); 1172EXPORT_SYMBOL(set_pages_uc);
1155 1173
1156int set_pages_array_uc(struct page **pages, int addrinarray) 1174static int _set_pages_array(struct page **pages, int addrinarray,
1175 unsigned long new_type)
1157{ 1176{
1158 unsigned long start; 1177 unsigned long start;
1159 unsigned long end; 1178 unsigned long end;
1160 int i; 1179 int i;
1161 int free_idx; 1180 int free_idx;
1181 int ret;
1162 1182
1163 for (i = 0; i < addrinarray; i++) { 1183 for (i = 0; i < addrinarray; i++) {
1164 if (PageHighMem(pages[i])) 1184 if (PageHighMem(pages[i]))
1165 continue; 1185 continue;
1166 start = page_to_pfn(pages[i]) << PAGE_SHIFT; 1186 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
1167 end = start + PAGE_SIZE; 1187 end = start + PAGE_SIZE;
1168 if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL)) 1188 if (reserve_memtype(start, end, new_type, NULL))
1169 goto err_out; 1189 goto err_out;
1170 } 1190 }
1171 1191
1172 if (cpa_set_pages_array(pages, addrinarray, 1192 ret = cpa_set_pages_array(pages, addrinarray,
1173 __pgprot(_PAGE_CACHE_UC_MINUS)) == 0) { 1193 __pgprot(_PAGE_CACHE_UC_MINUS));
1174 return 0; /* Success */ 1194 if (!ret && new_type == _PAGE_CACHE_WC)
1175 } 1195 ret = change_page_attr_set_clr(NULL, addrinarray,
1196 __pgprot(_PAGE_CACHE_WC),
1197 __pgprot(_PAGE_CACHE_MASK),
1198 0, CPA_PAGES_ARRAY, pages);
1199 if (ret)
1200 goto err_out;
1201 return 0; /* Success */
1176err_out: 1202err_out:
1177 free_idx = i; 1203 free_idx = i;
1178 for (i = 0; i < free_idx; i++) { 1204 for (i = 0; i < free_idx; i++) {
@@ -1184,8 +1210,19 @@ err_out:
1184 } 1210 }
1185 return -EINVAL; 1211 return -EINVAL;
1186} 1212}
1213
1214int set_pages_array_uc(struct page **pages, int addrinarray)
1215{
1216 return _set_pages_array(pages, addrinarray, _PAGE_CACHE_UC_MINUS);
1217}
1187EXPORT_SYMBOL(set_pages_array_uc); 1218EXPORT_SYMBOL(set_pages_array_uc);
1188 1219
1220int set_pages_array_wc(struct page **pages, int addrinarray)
1221{
1222 return _set_pages_array(pages, addrinarray, _PAGE_CACHE_WC);
1223}
1224EXPORT_SYMBOL(set_pages_array_wc);
1225
1189int set_pages_wb(struct page *page, int numpages) 1226int set_pages_wb(struct page *page, int numpages)
1190{ 1227{
1191 unsigned long addr = (unsigned long)page_address(page); 1228 unsigned long addr = (unsigned long)page_address(page);
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index a251886a0ce6..7933de4aff2e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -33,6 +33,8 @@
33#include "drmP.h" 33#include "drmP.h"
34#include "nouveau_drv.h" 34#include "nouveau_drv.h"
35 35
36#include <ttm/ttm_page_alloc.h>
37
36static int 38static int
37nouveau_debugfs_channel_info(struct seq_file *m, void *data) 39nouveau_debugfs_channel_info(struct seq_file *m, void *data)
38{ 40{
@@ -159,6 +161,7 @@ static struct drm_info_list nouveau_debugfs_list[] = {
159 { "chipset", nouveau_debugfs_chipset_info, 0, NULL }, 161 { "chipset", nouveau_debugfs_chipset_info, 0, NULL },
160 { "memory", nouveau_debugfs_memory_info, 0, NULL }, 162 { "memory", nouveau_debugfs_memory_info, 0, NULL },
161 { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL }, 163 { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
164 { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
162}; 165};
163#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list) 166#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
164 167
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index d031b6863082..f06533676e7d 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -33,6 +33,7 @@
33#include <ttm/ttm_bo_driver.h> 33#include <ttm/ttm_bo_driver.h>
34#include <ttm/ttm_placement.h> 34#include <ttm/ttm_placement.h>
35#include <ttm/ttm_module.h> 35#include <ttm/ttm_module.h>
36#include <ttm/ttm_page_alloc.h>
36#include <drm/drmP.h> 37#include <drm/drmP.h>
37#include <drm/radeon_drm.h> 38#include <drm/radeon_drm.h>
38#include <linux/seq_file.h> 39#include <linux/seq_file.h>
@@ -745,8 +746,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
745static int radeon_ttm_debugfs_init(struct radeon_device *rdev) 746static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
746{ 747{
747#if defined(CONFIG_DEBUG_FS) 748#if defined(CONFIG_DEBUG_FS)
748 static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES]; 749 static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
749 static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32]; 750 static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
750 unsigned i; 751 unsigned i;
751 752
752 for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { 753 for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
@@ -763,7 +764,13 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
763 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager; 764 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
764 765
765 } 766 }
766 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES); 767 /* Add ttm page pool to debugfs */
768 sprintf(radeon_mem_types_names[i], "ttm_page_pool");
769 radeon_mem_types_list[i].name = radeon_mem_types_names[i];
770 radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
771 radeon_mem_types_list[i].driver_features = 0;
772 radeon_mem_types_list[i].data = NULL;
773 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
767 774
768#endif 775#endif
769 return 0; 776 return 0;
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index 1e138f5bae09..4256e2006476 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,6 +4,6 @@
4ccflags-y := -Iinclude/drm 4ccflags-y := -Iinclude/drm
5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ 5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \ 6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
7 ttm_object.o ttm_lock.o ttm_execbuf_util.o 7 ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o
8 8
9obj-$(CONFIG_DRM_TTM) += ttm.o 9obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 801b702566e6..e70ddd82dc02 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -27,6 +27,7 @@
27 27
28#include "ttm/ttm_memory.h" 28#include "ttm/ttm_memory.h"
29#include "ttm/ttm_module.h" 29#include "ttm/ttm_module.h"
30#include "ttm/ttm_page_alloc.h"
30#include <linux/spinlock.h> 31#include <linux/spinlock.h>
31#include <linux/sched.h> 32#include <linux/sched.h>
32#include <linux/wait.h> 33#include <linux/wait.h>
@@ -393,6 +394,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
393 "Zone %7s: Available graphics memory: %llu kiB.\n", 394 "Zone %7s: Available graphics memory: %llu kiB.\n",
394 zone->name, (unsigned long long) zone->max_mem >> 10); 395 zone->name, (unsigned long long) zone->max_mem >> 10);
395 } 396 }
397 ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
396 return 0; 398 return 0;
397out_no_zone: 399out_no_zone:
398 ttm_mem_global_release(glob); 400 ttm_mem_global_release(glob);
@@ -405,6 +407,9 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
405 unsigned int i; 407 unsigned int i;
406 struct ttm_mem_zone *zone; 408 struct ttm_mem_zone *zone;
407 409
410 /* let the page allocator first stop the shrink work. */
411 ttm_page_alloc_fini();
412
408 flush_workqueue(glob->swap_queue); 413 flush_workqueue(glob->swap_queue);
409 destroy_workqueue(glob->swap_queue); 414 destroy_workqueue(glob->swap_queue);
410 glob->swap_queue = NULL; 415 glob->swap_queue = NULL;
@@ -412,7 +417,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
412 zone = glob->zones[i]; 417 zone = glob->zones[i];
413 kobject_del(&zone->kobj); 418 kobject_del(&zone->kobj);
414 kobject_put(&zone->kobj); 419 kobject_put(&zone->kobj);
415 } 420 }
416 kobject_del(&glob->kobj); 421 kobject_del(&glob->kobj);
417 kobject_put(&glob->kobj); 422 kobject_put(&glob->kobj);
418} 423}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
new file mode 100644
index 000000000000..0d9a42c2394f
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -0,0 +1,845 @@
1/*
2 * Copyright (c) Red Hat Inc.
3
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie <airlied@redhat.com>
24 * Jerome Glisse <jglisse@redhat.com>
25 * Pauli Nieminen <suokkos@gmail.com>
26 */
27
28/* simple list based uncached page pool
29 * - Pool collects resently freed pages for reuse
30 * - Use page->lru to keep a free list
31 * - doesn't track currently in use pages
32 */
33#include <linux/list.h>
34#include <linux/spinlock.h>
35#include <linux/highmem.h>
36#include <linux/mm_types.h>
37#include <linux/module.h>
38#include <linux/mm.h>
39#include <linux/seq_file.h> /* for seq_printf */
40#include <linux/slab.h>
41
42#include <asm/atomic.h>
43#include <asm/agp.h>
44
45#include "ttm/ttm_bo_driver.h"
46#include "ttm/ttm_page_alloc.h"
47
48
49#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
50#define SMALL_ALLOCATION 16
51#define FREE_ALL_PAGES (~0U)
52/* times are in msecs */
53#define PAGE_FREE_INTERVAL 1000
54
55/**
56 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
57 *
58 * @lock: Protects the shared pool from concurrnet access. Must be used with
59 * irqsave/irqrestore variants because pool allocator maybe called from
60 * delayed work.
61 * @fill_lock: Prevent concurrent calls to fill.
62 * @list: Pool of free uc/wc pages for fast reuse.
63 * @gfp_flags: Flags to pass for alloc_page.
64 * @npages: Number of pages in pool.
65 */
66struct ttm_page_pool {
67 spinlock_t lock;
68 bool fill_lock;
69 struct list_head list;
70 int gfp_flags;
71 unsigned npages;
72 char *name;
73 unsigned long nfrees;
74 unsigned long nrefills;
75};
76
77/**
78 * Limits for the pool. They are handled without locks because only place where
79 * they may change is in sysfs store. They won't have immediate effect anyway
80 * so forcing serialiazation to access them is pointless.
81 */
82
83struct ttm_pool_opts {
84 unsigned alloc_size;
85 unsigned max_size;
86 unsigned small;
87};
88
89#define NUM_POOLS 4
90
91/**
92 * struct ttm_pool_manager - Holds memory pools for fst allocation
93 *
94 * Manager is read only object for pool code so it doesn't need locking.
95 *
96 * @free_interval: minimum number of jiffies between freeing pages from pool.
97 * @page_alloc_inited: reference counting for pool allocation.
98 * @work: Work that is used to shrink the pool. Work is only run when there is
99 * some pages to free.
100 * @small_allocation: Limit in number of pages what is small allocation.
101 *
102 * @pools: All pool objects in use.
103 **/
104struct ttm_pool_manager {
105 struct kobject kobj;
106 struct shrinker mm_shrink;
107 atomic_t page_alloc_inited;
108 struct ttm_pool_opts options;
109
110 union {
111 struct ttm_page_pool pools[NUM_POOLS];
112 struct {
113 struct ttm_page_pool wc_pool;
114 struct ttm_page_pool uc_pool;
115 struct ttm_page_pool wc_pool_dma32;
116 struct ttm_page_pool uc_pool_dma32;
117 } ;
118 };
119};
120
121static struct attribute ttm_page_pool_max = {
122 .name = "pool_max_size",
123 .mode = S_IRUGO | S_IWUSR
124};
125static struct attribute ttm_page_pool_small = {
126 .name = "pool_small_allocation",
127 .mode = S_IRUGO | S_IWUSR
128};
129static struct attribute ttm_page_pool_alloc_size = {
130 .name = "pool_allocation_size",
131 .mode = S_IRUGO | S_IWUSR
132};
133
134static struct attribute *ttm_pool_attrs[] = {
135 &ttm_page_pool_max,
136 &ttm_page_pool_small,
137 &ttm_page_pool_alloc_size,
138 NULL
139};
140
141static void ttm_pool_kobj_release(struct kobject *kobj)
142{
143 struct ttm_pool_manager *m =
144 container_of(kobj, struct ttm_pool_manager, kobj);
145 (void)m;
146}
147
148static ssize_t ttm_pool_store(struct kobject *kobj,
149 struct attribute *attr, const char *buffer, size_t size)
150{
151 struct ttm_pool_manager *m =
152 container_of(kobj, struct ttm_pool_manager, kobj);
153 int chars;
154 unsigned val;
155 chars = sscanf(buffer, "%u", &val);
156 if (chars == 0)
157 return size;
158
159 /* Convert kb to number of pages */
160 val = val / (PAGE_SIZE >> 10);
161
162 if (attr == &ttm_page_pool_max)
163 m->options.max_size = val;
164 else if (attr == &ttm_page_pool_small)
165 m->options.small = val;
166 else if (attr == &ttm_page_pool_alloc_size) {
167 if (val > NUM_PAGES_TO_ALLOC*8) {
168 printk(KERN_ERR "[ttm] Setting allocation size to %lu "
169 "is not allowed. Recomended size is "
170 "%lu\n",
171 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
172 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
173 return size;
174 } else if (val > NUM_PAGES_TO_ALLOC) {
175 printk(KERN_WARNING "[ttm] Setting allocation size to "
176 "larger than %lu is not recomended.\n",
177 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
178 }
179 m->options.alloc_size = val;
180 }
181
182 return size;
183}
184
185static ssize_t ttm_pool_show(struct kobject *kobj,
186 struct attribute *attr, char *buffer)
187{
188 struct ttm_pool_manager *m =
189 container_of(kobj, struct ttm_pool_manager, kobj);
190 unsigned val = 0;
191
192 if (attr == &ttm_page_pool_max)
193 val = m->options.max_size;
194 else if (attr == &ttm_page_pool_small)
195 val = m->options.small;
196 else if (attr == &ttm_page_pool_alloc_size)
197 val = m->options.alloc_size;
198
199 val = val * (PAGE_SIZE >> 10);
200
201 return snprintf(buffer, PAGE_SIZE, "%u\n", val);
202}
203
204static const struct sysfs_ops ttm_pool_sysfs_ops = {
205 .show = &ttm_pool_show,
206 .store = &ttm_pool_store,
207};
208
209static struct kobj_type ttm_pool_kobj_type = {
210 .release = &ttm_pool_kobj_release,
211 .sysfs_ops = &ttm_pool_sysfs_ops,
212 .default_attrs = ttm_pool_attrs,
213};
214
215static struct ttm_pool_manager _manager = {
216 .page_alloc_inited = ATOMIC_INIT(0)
217};
218
219#ifndef CONFIG_X86
220static int set_pages_array_wb(struct page **pages, int addrinarray)
221{
222#ifdef TTM_HAS_AGP
223 int i;
224
225 for (i = 0; i < addrinarray; i++)
226 unmap_page_from_agp(pages[i]);
227#endif
228 return 0;
229}
230
231static int set_pages_array_wc(struct page **pages, int addrinarray)
232{
233#ifdef TTM_HAS_AGP
234 int i;
235
236 for (i = 0; i < addrinarray; i++)
237 map_page_into_agp(pages[i]);
238#endif
239 return 0;
240}
241
242static int set_pages_array_uc(struct page **pages, int addrinarray)
243{
244#ifdef TTM_HAS_AGP
245 int i;
246
247 for (i = 0; i < addrinarray; i++)
248 map_page_into_agp(pages[i]);
249#endif
250 return 0;
251}
252#endif
253
254/**
255 * Select the right pool or requested caching state and ttm flags. */
256static struct ttm_page_pool *ttm_get_pool(int flags,
257 enum ttm_caching_state cstate)
258{
259 int pool_index;
260
261 if (cstate == tt_cached)
262 return NULL;
263
264 if (cstate == tt_wc)
265 pool_index = 0x0;
266 else
267 pool_index = 0x1;
268
269 if (flags & TTM_PAGE_FLAG_DMA32)
270 pool_index |= 0x2;
271
272 return &_manager.pools[pool_index];
273}
274
275/* set memory back to wb and free the pages. */
276static void ttm_pages_put(struct page *pages[], unsigned npages)
277{
278 unsigned i;
279 if (set_pages_array_wb(pages, npages))
280 printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n",
281 npages);
282 for (i = 0; i < npages; ++i)
283 __free_page(pages[i]);
284}
285
286static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
287 unsigned freed_pages)
288{
289 pool->npages -= freed_pages;
290 pool->nfrees += freed_pages;
291}
292
293/**
294 * Free pages from pool.
295 *
296 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
297 * number of pages in one go.
298 *
299 * @pool: to free the pages from
300 * @free_all: If set to true will free all pages in pool
301 **/
302static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
303{
304 unsigned long irq_flags;
305 struct page *p;
306 struct page **pages_to_free;
307 unsigned freed_pages = 0,
308 npages_to_free = nr_free;
309
310 if (NUM_PAGES_TO_ALLOC < nr_free)
311 npages_to_free = NUM_PAGES_TO_ALLOC;
312
313 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
314 GFP_KERNEL);
315 if (!pages_to_free) {
316 printk(KERN_ERR "Failed to allocate memory for pool free operation.\n");
317 return 0;
318 }
319
320restart:
321 spin_lock_irqsave(&pool->lock, irq_flags);
322
323 list_for_each_entry_reverse(p, &pool->list, lru) {
324 if (freed_pages >= npages_to_free)
325 break;
326
327 pages_to_free[freed_pages++] = p;
328 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
329 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
330 /* remove range of pages from the pool */
331 __list_del(p->lru.prev, &pool->list);
332
333 ttm_pool_update_free_locked(pool, freed_pages);
334 /**
335 * Because changing page caching is costly
336 * we unlock the pool to prevent stalling.
337 */
338 spin_unlock_irqrestore(&pool->lock, irq_flags);
339
340 ttm_pages_put(pages_to_free, freed_pages);
341 if (likely(nr_free != FREE_ALL_PAGES))
342 nr_free -= freed_pages;
343
344 if (NUM_PAGES_TO_ALLOC >= nr_free)
345 npages_to_free = nr_free;
346 else
347 npages_to_free = NUM_PAGES_TO_ALLOC;
348
349 freed_pages = 0;
350
351 /* free all so restart the processing */
352 if (nr_free)
353 goto restart;
354
355 /* Not allowed to fall tough or break because
356 * following context is inside spinlock while we are
357 * outside here.
358 */
359 goto out;
360
361 }
362 }
363
364 /* remove range of pages from the pool */
365 if (freed_pages) {
366 __list_del(&p->lru, &pool->list);
367
368 ttm_pool_update_free_locked(pool, freed_pages);
369 nr_free -= freed_pages;
370 }
371
372 spin_unlock_irqrestore(&pool->lock, irq_flags);
373
374 if (freed_pages)
375 ttm_pages_put(pages_to_free, freed_pages);
376out:
377 kfree(pages_to_free);
378 return nr_free;
379}
380
381/* Get good estimation how many pages are free in pools */
382static int ttm_pool_get_num_unused_pages(void)
383{
384 unsigned i;
385 int total = 0;
386 for (i = 0; i < NUM_POOLS; ++i)
387 total += _manager.pools[i].npages;
388
389 return total;
390}
391
392/**
393 * Calback for mm to request pool to reduce number of page held.
394 */
395static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
396{
397 static atomic_t start_pool = ATOMIC_INIT(0);
398 unsigned i;
399 unsigned pool_offset = atomic_add_return(1, &start_pool);
400 struct ttm_page_pool *pool;
401
402 pool_offset = pool_offset % NUM_POOLS;
403 /* select start pool in round robin fashion */
404 for (i = 0; i < NUM_POOLS; ++i) {
405 unsigned nr_free = shrink_pages;
406 if (shrink_pages == 0)
407 break;
408 pool = &_manager.pools[(i + pool_offset)%NUM_POOLS];
409 shrink_pages = ttm_page_pool_free(pool, nr_free);
410 }
411 /* return estimated number of unused pages in pool */
412 return ttm_pool_get_num_unused_pages();
413}
414
415static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
416{
417 manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
418 manager->mm_shrink.seeks = 1;
419 register_shrinker(&manager->mm_shrink);
420}
421
422static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
423{
424 unregister_shrinker(&manager->mm_shrink);
425}
426
427static int ttm_set_pages_caching(struct page **pages,
428 enum ttm_caching_state cstate, unsigned cpages)
429{
430 int r = 0;
431 /* Set page caching */
432 switch (cstate) {
433 case tt_uncached:
434 r = set_pages_array_uc(pages, cpages);
435 if (r)
436 printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n",
437 cpages);
438 break;
439 case tt_wc:
440 r = set_pages_array_wc(pages, cpages);
441 if (r)
442 printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n",
443 cpages);
444 break;
445 default:
446 break;
447 }
448 return r;
449}
450
451/**
452 * Free pages the pages that failed to change the caching state. If there is
453 * any pages that have changed their caching state already put them to the
454 * pool.
455 */
456static void ttm_handle_caching_state_failure(struct list_head *pages,
457 int ttm_flags, enum ttm_caching_state cstate,
458 struct page **failed_pages, unsigned cpages)
459{
460 unsigned i;
461 /* Failed pages has to be reed */
462 for (i = 0; i < cpages; ++i) {
463 list_del(&failed_pages[i]->lru);
464 __free_page(failed_pages[i]);
465 }
466}
467
468/**
469 * Allocate new pages with correct caching.
470 *
471 * This function is reentrant if caller updates count depending on number of
472 * pages returned in pages array.
473 */
474static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
475 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
476{
477 struct page **caching_array;
478 struct page *p;
479 int r = 0;
480 unsigned i, cpages;
481 unsigned max_cpages = min(count,
482 (unsigned)(PAGE_SIZE/sizeof(struct page *)));
483
484 /* allocate array for page caching change */
485 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
486
487 if (!caching_array) {
488 printk(KERN_ERR "[ttm] unable to allocate table for new pages.");
489 return -ENOMEM;
490 }
491
492 for (i = 0, cpages = 0; i < count; ++i) {
493 p = alloc_page(gfp_flags);
494
495 if (!p) {
496 printk(KERN_ERR "[ttm] unable to get page %u\n", i);
497
498 /* store already allocated pages in the pool after
499 * setting the caching state */
500 if (cpages) {
501 r = ttm_set_pages_caching(caching_array, cstate, cpages);
502 if (r)
503 ttm_handle_caching_state_failure(pages,
504 ttm_flags, cstate,
505 caching_array, cpages);
506 }
507 r = -ENOMEM;
508 goto out;
509 }
510
511#ifdef CONFIG_HIGHMEM
512 /* gfp flags of highmem page should never be dma32 so we
513 * we should be fine in such case
514 */
515 if (!PageHighMem(p))
516#endif
517 {
518 caching_array[cpages++] = p;
519 if (cpages == max_cpages) {
520
521 r = ttm_set_pages_caching(caching_array,
522 cstate, cpages);
523 if (r) {
524 ttm_handle_caching_state_failure(pages,
525 ttm_flags, cstate,
526 caching_array, cpages);
527 goto out;
528 }
529 cpages = 0;
530 }
531 }
532
533 list_add(&p->lru, pages);
534 }
535
536 if (cpages) {
537 r = ttm_set_pages_caching(caching_array, cstate, cpages);
538 if (r)
539 ttm_handle_caching_state_failure(pages,
540 ttm_flags, cstate,
541 caching_array, cpages);
542 }
543out:
544 kfree(caching_array);
545
546 return r;
547}
548
549/**
550 * Fill the given pool if there isn't enough pages and requested number of
551 * pages is small.
552 */
553static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
554 int ttm_flags, enum ttm_caching_state cstate, unsigned count,
555 unsigned long *irq_flags)
556{
557 struct page *p;
558 int r;
559 unsigned cpages = 0;
560 /**
561 * Only allow one pool fill operation at a time.
562 * If pool doesn't have enough pages for the allocation new pages are
563 * allocated from outside of pool.
564 */
565 if (pool->fill_lock)
566 return;
567
568 pool->fill_lock = true;
569
570 /* If allocation request is small and there is not enough
571 * pages in pool we fill the pool first */
572 if (count < _manager.options.small
573 && count > pool->npages) {
574 struct list_head new_pages;
575 unsigned alloc_size = _manager.options.alloc_size;
576
577 /**
578 * Can't change page caching if in irqsave context. We have to
579 * drop the pool->lock.
580 */
581 spin_unlock_irqrestore(&pool->lock, *irq_flags);
582
583 INIT_LIST_HEAD(&new_pages);
584 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
585 cstate, alloc_size);
586 spin_lock_irqsave(&pool->lock, *irq_flags);
587
588 if (!r) {
589 list_splice(&new_pages, &pool->list);
590 ++pool->nrefills;
591 pool->npages += alloc_size;
592 } else {
593 printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool);
594 /* If we have any pages left put them to the pool. */
595 list_for_each_entry(p, &pool->list, lru) {
596 ++cpages;
597 }
598 list_splice(&new_pages, &pool->list);
599 pool->npages += cpages;
600 }
601
602 }
603 pool->fill_lock = false;
604}
605
606/**
607 * Cut count nubmer of pages from the pool and put them to return list
608 *
609 * @return count of pages still to allocate to fill the request.
610 */
611static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
612 struct list_head *pages, int ttm_flags,
613 enum ttm_caching_state cstate, unsigned count)
614{
615 unsigned long irq_flags;
616 struct list_head *p;
617 unsigned i;
618
619 spin_lock_irqsave(&pool->lock, irq_flags);
620 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
621
622 if (count >= pool->npages) {
623 /* take all pages from the pool */
624 list_splice_init(&pool->list, pages);
625 count -= pool->npages;
626 pool->npages = 0;
627 goto out;
628 }
629 /* find the last pages to include for requested number of pages. Split
630 * pool to begin and halves to reduce search space. */
631 if (count <= pool->npages/2) {
632 i = 0;
633 list_for_each(p, &pool->list) {
634 if (++i == count)
635 break;
636 }
637 } else {
638 i = pool->npages + 1;
639 list_for_each_prev(p, &pool->list) {
640 if (--i == count)
641 break;
642 }
643 }
644 /* Cut count number of pages from pool */
645 list_cut_position(pages, &pool->list, p);
646 pool->npages -= count;
647 count = 0;
648out:
649 spin_unlock_irqrestore(&pool->lock, irq_flags);
650 return count;
651}
652
653/*
654 * On success pages list will hold count number of correctly
655 * cached pages.
656 */
657int ttm_get_pages(struct list_head *pages, int flags,
658 enum ttm_caching_state cstate, unsigned count)
659{
660 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
661 struct page *p = NULL;
662 int gfp_flags = 0;
663 int r;
664
665 /* set zero flag for page allocation if required */
666 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
667 gfp_flags |= __GFP_ZERO;
668
669 /* No pool for cached pages */
670 if (pool == NULL) {
671 if (flags & TTM_PAGE_FLAG_DMA32)
672 gfp_flags |= GFP_DMA32;
673 else
674 gfp_flags |= __GFP_HIGHMEM;
675
676 for (r = 0; r < count; ++r) {
677 p = alloc_page(gfp_flags);
678 if (!p) {
679
680 printk(KERN_ERR "[ttm] unable to allocate page.");
681 return -ENOMEM;
682 }
683
684 list_add(&p->lru, pages);
685 }
686 return 0;
687 }
688
689
690 /* combine zero flag to pool flags */
691 gfp_flags |= pool->gfp_flags;
692
693 /* First we take pages from the pool */
694 count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
695
696 /* clear the pages coming from the pool if requested */
697 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
698 list_for_each_entry(p, pages, lru) {
699 clear_page(page_address(p));
700 }
701 }
702
703 /* If pool didn't have enough pages allocate new one. */
704 if (count > 0) {
705 /* ttm_alloc_new_pages doesn't reference pool so we can run
706 * multiple requests in parallel.
707 **/
708 r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
709 if (r) {
710 /* If there is any pages in the list put them back to
711 * the pool. */
712 printk(KERN_ERR "[ttm] Failed to allocate extra pages "
713 "for large request.");
714 ttm_put_pages(pages, 0, flags, cstate);
715 return r;
716 }
717 }
718
719
720 return 0;
721}
722
723/* Put all pages in pages list to correct pool to wait for reuse */
724void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
725 enum ttm_caching_state cstate)
726{
727 unsigned long irq_flags;
728 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
729 struct page *p, *tmp;
730
731 if (pool == NULL) {
732 /* No pool for this memory type so free the pages */
733
734 list_for_each_entry_safe(p, tmp, pages, lru) {
735 __free_page(p);
736 }
737 /* Make the pages list empty */
738 INIT_LIST_HEAD(pages);
739 return;
740 }
741 if (page_count == 0) {
742 list_for_each_entry_safe(p, tmp, pages, lru) {
743 ++page_count;
744 }
745 }
746
747 spin_lock_irqsave(&pool->lock, irq_flags);
748 list_splice_init(pages, &pool->list);
749 pool->npages += page_count;
750 /* Check that we don't go over the pool limit */
751 page_count = 0;
752 if (pool->npages > _manager.options.max_size) {
753 page_count = pool->npages - _manager.options.max_size;
754 /* free at least NUM_PAGES_TO_ALLOC number of pages
755 * to reduce calls to set_memory_wb */
756 if (page_count < NUM_PAGES_TO_ALLOC)
757 page_count = NUM_PAGES_TO_ALLOC;
758 }
759 spin_unlock_irqrestore(&pool->lock, irq_flags);
760 if (page_count)
761 ttm_page_pool_free(pool, page_count);
762}
763
764static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
765 char *name)
766{
767 spin_lock_init(&pool->lock);
768 pool->fill_lock = false;
769 INIT_LIST_HEAD(&pool->list);
770 pool->npages = pool->nfrees = 0;
771 pool->gfp_flags = flags;
772 pool->name = name;
773}
774
775int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
776{
777 int ret;
778 if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
779 return 0;
780
781 printk(KERN_INFO "[ttm] Initializing pool allocator.\n");
782
783 ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc");
784
785 ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc");
786
787 ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32,
788 "wc dma");
789
790 ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32,
791 "uc dma");
792
793 _manager.options.max_size = max_pages;
794 _manager.options.small = SMALL_ALLOCATION;
795 _manager.options.alloc_size = NUM_PAGES_TO_ALLOC;
796
797 kobject_init(&_manager.kobj, &ttm_pool_kobj_type);
798 ret = kobject_add(&_manager.kobj, &glob->kobj, "pool");
799 if (unlikely(ret != 0)) {
800 kobject_put(&_manager.kobj);
801 return ret;
802 }
803
804 ttm_pool_mm_shrink_init(&_manager);
805
806 return 0;
807}
808
809void ttm_page_alloc_fini()
810{
811 int i;
812
813 if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
814 return;
815
816 printk(KERN_INFO "[ttm] Finilizing pool allocator.\n");
817 ttm_pool_mm_shrink_fini(&_manager);
818
819 for (i = 0; i < NUM_POOLS; ++i)
820 ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES);
821
822 kobject_put(&_manager.kobj);
823}
824
825int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
826{
827 struct ttm_page_pool *p;
828 unsigned i;
829 char *h[] = {"pool", "refills", "pages freed", "size"};
830 if (atomic_read(&_manager.page_alloc_inited) == 0) {
831 seq_printf(m, "No pool allocator running.\n");
832 return 0;
833 }
834 seq_printf(m, "%6s %12s %13s %8s\n",
835 h[0], h[1], h[2], h[3]);
836 for (i = 0; i < NUM_POOLS; ++i) {
837 p = &_manager.pools[i];
838
839 seq_printf(m, "%6s %12ld %13ld %8d\n",
840 p->name, p->nrefills,
841 p->nfrees, p->npages);
842 }
843 return 0;
844}
845EXPORT_SYMBOL(ttm_page_alloc_debugfs);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index d5fd5b8faeb3..a7bab87a548b 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -39,6 +39,7 @@
39#include "ttm/ttm_module.h" 39#include "ttm/ttm_module.h"
40#include "ttm/ttm_bo_driver.h" 40#include "ttm/ttm_bo_driver.h"
41#include "ttm/ttm_placement.h" 41#include "ttm/ttm_placement.h"
42#include "ttm/ttm_page_alloc.h"
42 43
43static int ttm_tt_swapin(struct ttm_tt *ttm); 44static int ttm_tt_swapin(struct ttm_tt *ttm);
44 45
@@ -56,21 +57,6 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
56 ttm->pages = NULL; 57 ttm->pages = NULL;
57} 58}
58 59
59static struct page *ttm_tt_alloc_page(unsigned page_flags)
60{
61 gfp_t gfp_flags = GFP_USER;
62
63 if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
64 gfp_flags |= __GFP_ZERO;
65
66 if (page_flags & TTM_PAGE_FLAG_DMA32)
67 gfp_flags |= __GFP_DMA32;
68 else
69 gfp_flags |= __GFP_HIGHMEM;
70
71 return alloc_page(gfp_flags);
72}
73
74static void ttm_tt_free_user_pages(struct ttm_tt *ttm) 60static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
75{ 61{
76 int write; 62 int write;
@@ -111,15 +97,21 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
111static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) 97static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
112{ 98{
113 struct page *p; 99 struct page *p;
100 struct list_head h;
114 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; 101 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
115 int ret; 102 int ret;
116 103
117 while (NULL == (p = ttm->pages[index])) { 104 while (NULL == (p = ttm->pages[index])) {
118 p = ttm_tt_alloc_page(ttm->page_flags);
119 105
120 if (!p) 106 INIT_LIST_HEAD(&h);
107
108 ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1);
109
110 if (ret != 0)
121 return NULL; 111 return NULL;
122 112
113 p = list_first_entry(&h, struct page, lru);
114
123 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false); 115 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
124 if (unlikely(ret != 0)) 116 if (unlikely(ret != 0))
125 goto out_err; 117 goto out_err;
@@ -228,10 +220,10 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
228 if (ttm->caching_state == c_state) 220 if (ttm->caching_state == c_state)
229 return 0; 221 return 0;
230 222
231 if (c_state != tt_cached) { 223 if (ttm->state == tt_unpopulated) {
232 ret = ttm_tt_populate(ttm); 224 /* Change caching but don't populate */
233 if (unlikely(ret != 0)) 225 ttm->caching_state = c_state;
234 return ret; 226 return 0;
235 } 227 }
236 228
237 if (ttm->caching_state == tt_cached) 229 if (ttm->caching_state == tt_cached)
@@ -282,13 +274,17 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
282static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) 274static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
283{ 275{
284 int i; 276 int i;
277 unsigned count = 0;
278 struct list_head h;
285 struct page *cur_page; 279 struct page *cur_page;
286 struct ttm_backend *be = ttm->be; 280 struct ttm_backend *be = ttm->be;
287 281
282 INIT_LIST_HEAD(&h);
283
288 if (be) 284 if (be)
289 be->func->clear(be); 285 be->func->clear(be);
290 (void)ttm_tt_set_caching(ttm, tt_cached);
291 for (i = 0; i < ttm->num_pages; ++i) { 286 for (i = 0; i < ttm->num_pages; ++i) {
287
292 cur_page = ttm->pages[i]; 288 cur_page = ttm->pages[i];
293 ttm->pages[i] = NULL; 289 ttm->pages[i] = NULL;
294 if (cur_page) { 290 if (cur_page) {
@@ -298,9 +294,11 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
298 "Leaking pages.\n"); 294 "Leaking pages.\n");
299 ttm_mem_global_free_page(ttm->glob->mem_glob, 295 ttm_mem_global_free_page(ttm->glob->mem_glob,
300 cur_page); 296 cur_page);
301 __free_page(cur_page); 297 list_add(&cur_page->lru, &h);
298 count++;
302 } 299 }
303 } 300 }
301 ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state);
304 ttm->state = tt_unpopulated; 302 ttm->state = tt_unpopulated;
305 ttm->first_himem_page = ttm->num_pages; 303 ttm->first_himem_page = ttm->num_pages;
306 ttm->last_lomem_page = -1; 304 ttm->last_lomem_page = -1;
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
new file mode 100644
index 000000000000..8bb4de567b2c
--- /dev/null
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -0,0 +1,74 @@
1/*
2 * Copyright (c) Red Hat Inc.
3
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie <airlied@redhat.com>
24 * Jerome Glisse <jglisse@redhat.com>
25 */
26#ifndef TTM_PAGE_ALLOC
27#define TTM_PAGE_ALLOC
28
29#include "ttm_bo_driver.h"
30#include "ttm_memory.h"
31
32/**
33 * Get count number of pages from pool to pages list.
34 *
35 * @pages: heado of empty linked list where pages are filled.
36 * @flags: ttm flags for page allocation.
37 * @cstate: ttm caching state for the page.
38 * @count: number of pages to allocate.
39 */
40int ttm_get_pages(struct list_head *pages,
41 int flags,
42 enum ttm_caching_state cstate,
43 unsigned count);
44/**
45 * Put linked list of pages to pool.
46 *
47 * @pages: list of pages to free.
48 * @page_count: number of pages in the list. Zero can be passed for unknown
49 * count.
50 * @flags: ttm flags for page allocation.
51 * @cstate: ttm caching state.
52 */
53void ttm_put_pages(struct list_head *pages,
54 unsigned page_count,
55 int flags,
56 enum ttm_caching_state cstate);
57/**
58 * Initialize pool allocator.
59 *
60 * Pool allocator is internaly reference counted so it can be initialized
61 * multiple times but ttm_page_alloc_fini has to be called same number of
62 * times.
63 */
64int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
65/**
66 * Free pool allocator.
67 */
68void ttm_page_alloc_fini(void);
69
70/**
71 * Output the state of pools to debugfs file
72 */
73extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
74#endif