aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2009-08-17 10:28:39 -0400
committerDave Airlie <airlied@linux.ie>2009-08-19 02:09:53 -0400
commit5fd9cbad3a4ae82c83c55b9c621d156c326724ef (patch)
tree1a0868a3bd2751fa861c083aeb3ac27f3f695694
parente9840be8c23601285a70520b4898818f28ce8c2b (diff)
drm/ttm: Memory accounting rework.
Use inclusive zones to simplify accounting and its sysfs representation. Use DMA32 accounting where applicable. Add a sysfs interface to make the heuristically determined limits readable and configurable. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@linux.ie>
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_global.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c488
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c29
-rw-r--r--include/drm/ttm/ttm_memory.h43
-rw-r--r--include/drm/ttm/ttm_module.h2
6 files changed, 453 insertions, 119 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index c1c407f7cca3..f16909ceec93 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -70,7 +70,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
70 if (bo->destroy) 70 if (bo->destroy)
71 bo->destroy(bo); 71 bo->destroy(bo);
72 else { 72 else {
73 ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false); 73 ttm_mem_global_free(bdev->mem_glob, bo->acc_size);
74 kfree(bo); 74 kfree(bo);
75 } 75 }
76} 76}
@@ -1065,14 +1065,14 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1065 1065
1066 size_t acc_size = 1066 size_t acc_size =
1067 ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); 1067 ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1068 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false); 1068 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1069 if (unlikely(ret != 0)) 1069 if (unlikely(ret != 0))
1070 return ret; 1070 return ret;
1071 1071
1072 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 1072 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1073 1073
1074 if (unlikely(bo == NULL)) { 1074 if (unlikely(bo == NULL)) {
1075 ttm_mem_global_free(mem_glob, acc_size, false); 1075 ttm_mem_global_free(mem_glob, acc_size);
1076 return -ENOMEM; 1076 return -ENOMEM;
1077 } 1077 }
1078 1078
diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
index 0b14eb1972b8..541744d00d3e 100644
--- a/drivers/gpu/drm/ttm/ttm_global.c
+++ b/drivers/gpu/drm/ttm/ttm_global.c
@@ -71,7 +71,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
71 71
72 mutex_lock(&item->mutex); 72 mutex_lock(&item->mutex);
73 if (item->refcount == 0) { 73 if (item->refcount == 0) {
74 item->object = kmalloc(ref->size, GFP_KERNEL); 74 item->object = kzalloc(ref->size, GFP_KERNEL);
75 if (unlikely(item->object == NULL)) { 75 if (unlikely(item->object == NULL)) {
76 ret = -ENOMEM; 76 ret = -ENOMEM;
77 goto out_err; 77 goto out_err;
@@ -89,7 +89,6 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
89 mutex_unlock(&item->mutex); 89 mutex_unlock(&item->mutex);
90 return 0; 90 return 0;
91out_err: 91out_err:
92 kfree(item->object);
93 mutex_unlock(&item->mutex); 92 mutex_unlock(&item->mutex);
94 item->object = NULL; 93 item->object = NULL;
95 return ret; 94 return ret;
@@ -105,7 +104,6 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
105 BUG_ON(ref->object != item->object); 104 BUG_ON(ref->object != item->object);
106 if (--item->refcount == 0) { 105 if (--item->refcount == 0) {
107 ref->release(ref); 106 ref->release(ref);
108 kfree(item->object);
109 item->object = NULL; 107 item->object = NULL;
110 } 108 }
111 mutex_unlock(&item->mutex); 109 mutex_unlock(&item->mutex);
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 87323d4ff68d..62fb5cf0899e 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -26,15 +26,180 @@
26 **************************************************************************/ 26 **************************************************************************/
27 27
28#include "ttm/ttm_memory.h" 28#include "ttm/ttm_memory.h"
29#include "ttm/ttm_module.h"
29#include <linux/spinlock.h> 30#include <linux/spinlock.h>
30#include <linux/sched.h> 31#include <linux/sched.h>
31#include <linux/wait.h> 32#include <linux/wait.h>
32#include <linux/mm.h> 33#include <linux/mm.h>
33#include <linux/module.h> 34#include <linux/module.h>
34 35
35#define TTM_PFX "[TTM] "
36#define TTM_MEMORY_ALLOC_RETRIES 4 36#define TTM_MEMORY_ALLOC_RETRIES 4
37 37
38struct ttm_mem_zone {
39 struct kobject kobj;
40 struct ttm_mem_global *glob;
41 const char *name;
42 uint64_t zone_mem;
43 uint64_t emer_mem;
44 uint64_t max_mem;
45 uint64_t swap_limit;
46 uint64_t used_mem;
47};
48
49static struct attribute ttm_mem_sys = {
50 .name = "zone_memory",
51 .mode = S_IRUGO
52};
53static struct attribute ttm_mem_emer = {
54 .name = "emergency_memory",
55 .mode = S_IRUGO | S_IWUSR
56};
57static struct attribute ttm_mem_max = {
58 .name = "available_memory",
59 .mode = S_IRUGO | S_IWUSR
60};
61static struct attribute ttm_mem_swap = {
62 .name = "swap_limit",
63 .mode = S_IRUGO | S_IWUSR
64};
65static struct attribute ttm_mem_used = {
66 .name = "used_memory",
67 .mode = S_IRUGO
68};
69
70static void ttm_mem_zone_kobj_release(struct kobject *kobj)
71{
72 struct ttm_mem_zone *zone =
73 container_of(kobj, struct ttm_mem_zone, kobj);
74
75 printk(KERN_INFO TTM_PFX
76 "Zone %7s: Used memory at exit: %llu kiB.\n",
77 zone->name, (unsigned long long) zone->used_mem >> 10);
78 kfree(zone);
79}
80
81static ssize_t ttm_mem_zone_show(struct kobject *kobj,
82 struct attribute *attr,
83 char *buffer)
84{
85 struct ttm_mem_zone *zone =
86 container_of(kobj, struct ttm_mem_zone, kobj);
87 uint64_t val = 0;
88
89 spin_lock(&zone->glob->lock);
90 if (attr == &ttm_mem_sys)
91 val = zone->zone_mem;
92 else if (attr == &ttm_mem_emer)
93 val = zone->emer_mem;
94 else if (attr == &ttm_mem_max)
95 val = zone->max_mem;
96 else if (attr == &ttm_mem_swap)
97 val = zone->swap_limit;
98 else if (attr == &ttm_mem_used)
99 val = zone->used_mem;
100 spin_unlock(&zone->glob->lock);
101
102 return snprintf(buffer, PAGE_SIZE, "%llu\n",
103 (unsigned long long) val >> 10);
104}
105
106static void ttm_check_swapping(struct ttm_mem_global *glob);
107
108static ssize_t ttm_mem_zone_store(struct kobject *kobj,
109 struct attribute *attr,
110 const char *buffer,
111 size_t size)
112{
113 struct ttm_mem_zone *zone =
114 container_of(kobj, struct ttm_mem_zone, kobj);
115 int chars;
116 unsigned long val;
117 uint64_t val64;
118
119 chars = sscanf(buffer, "%lu", &val);
120 if (chars == 0)
121 return size;
122
123 val64 = val;
124 val64 <<= 10;
125
126 spin_lock(&zone->glob->lock);
127 if (val64 > zone->zone_mem)
128 val64 = zone->zone_mem;
129 if (attr == &ttm_mem_emer) {
130 zone->emer_mem = val64;
131 if (zone->max_mem > val64)
132 zone->max_mem = val64;
133 } else if (attr == &ttm_mem_max) {
134 zone->max_mem = val64;
135 if (zone->emer_mem < val64)
136 zone->emer_mem = val64;
137 } else if (attr == &ttm_mem_swap)
138 zone->swap_limit = val64;
139 spin_unlock(&zone->glob->lock);
140
141 ttm_check_swapping(zone->glob);
142
143 return size;
144}
145
146static struct attribute *ttm_mem_zone_attrs[] = {
147 &ttm_mem_sys,
148 &ttm_mem_emer,
149 &ttm_mem_max,
150 &ttm_mem_swap,
151 &ttm_mem_used,
152 NULL
153};
154
155static struct sysfs_ops ttm_mem_zone_ops = {
156 .show = &ttm_mem_zone_show,
157 .store = &ttm_mem_zone_store
158};
159
160static struct kobj_type ttm_mem_zone_kobj_type = {
161 .release = &ttm_mem_zone_kobj_release,
162 .sysfs_ops = &ttm_mem_zone_ops,
163 .default_attrs = ttm_mem_zone_attrs,
164};
165
166static void ttm_mem_global_kobj_release(struct kobject *kobj)
167{
168 struct ttm_mem_global *glob =
169 container_of(kobj, struct ttm_mem_global, kobj);
170
171 kfree(glob);
172}
173
174static struct kobj_type ttm_mem_glob_kobj_type = {
175 .release = &ttm_mem_global_kobj_release,
176};
177
178static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
179 bool from_wq, uint64_t extra)
180{
181 unsigned int i;
182 struct ttm_mem_zone *zone;
183 uint64_t target;
184
185 for (i = 0; i < glob->num_zones; ++i) {
186 zone = glob->zones[i];
187
188 if (from_wq)
189 target = zone->swap_limit;
190 else if (capable(CAP_SYS_ADMIN))
191 target = zone->emer_mem;
192 else
193 target = zone->max_mem;
194
195 target = (extra > target) ? 0ULL : target;
196
197 if (zone->used_mem > target)
198 return true;
199 }
200 return false;
201}
202
38/** 203/**
39 * At this point we only support a single shrink callback. 204 * At this point we only support a single shrink callback.
40 * Extend this if needed, perhaps using a linked list of callbacks. 205 * Extend this if needed, perhaps using a linked list of callbacks.
@@ -42,34 +207,17 @@
42 * many threads may try to swap out at any given time. 207 * many threads may try to swap out at any given time.
43 */ 208 */
44 209
45static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue, 210static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
46 uint64_t extra) 211 uint64_t extra)
47{ 212{
48 int ret; 213 int ret;
49 struct ttm_mem_shrink *shrink; 214 struct ttm_mem_shrink *shrink;
50 uint64_t target;
51 uint64_t total_target;
52 215
53 spin_lock(&glob->lock); 216 spin_lock(&glob->lock);
54 if (glob->shrink == NULL) 217 if (glob->shrink == NULL)
55 goto out; 218 goto out;
56 219
57 if (from_workqueue) { 220 while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
58 target = glob->swap_limit;
59 total_target = glob->total_memory_swap_limit;
60 } else if (capable(CAP_SYS_ADMIN)) {
61 total_target = glob->emer_total_memory;
62 target = glob->emer_memory;
63 } else {
64 total_target = glob->max_total_memory;
65 target = glob->max_memory;
66 }
67
68 total_target = (extra >= total_target) ? 0 : total_target - extra;
69 target = (extra >= target) ? 0 : target - extra;
70
71 while (glob->used_memory > target ||
72 glob->used_total_memory > total_target) {
73 shrink = glob->shrink; 221 shrink = glob->shrink;
74 spin_unlock(&glob->lock); 222 spin_unlock(&glob->lock);
75 ret = shrink->do_shrink(shrink); 223 ret = shrink->do_shrink(shrink);
@@ -81,6 +229,8 @@ out:
81 spin_unlock(&glob->lock); 229 spin_unlock(&glob->lock);
82} 230}
83 231
232
233
84static void ttm_shrink_work(struct work_struct *work) 234static void ttm_shrink_work(struct work_struct *work)
85{ 235{
86 struct ttm_mem_global *glob = 236 struct ttm_mem_global *glob =
@@ -89,63 +239,178 @@ static void ttm_shrink_work(struct work_struct *work)
89 ttm_shrink(glob, true, 0ULL); 239 ttm_shrink(glob, true, 0ULL);
90} 240}
91 241
242static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
243 const struct sysinfo *si)
244{
245 struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
246 uint64_t mem;
247
248 if (unlikely(!zone))
249 return -ENOMEM;
250
251 mem = si->totalram - si->totalhigh;
252 mem *= si->mem_unit;
253
254 zone->name = "kernel";
255 zone->zone_mem = mem;
256 zone->max_mem = mem >> 1;
257 zone->emer_mem = (mem >> 1) + (mem >> 2);
258 zone->swap_limit = zone->max_mem - (mem >> 3);
259 zone->used_mem = 0;
260 zone->glob = glob;
261 glob->zone_kernel = zone;
262 glob->zones[glob->num_zones++] = zone;
263 kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
264 return kobject_add(&zone->kobj, &glob->kobj, zone->name);
265}
266
267#ifdef CONFIG_HIGHMEM
268static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
269 const struct sysinfo *si)
270{
271 struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
272 uint64_t mem;
273
274 if (unlikely(!zone))
275 return -ENOMEM;
276
277 if (si->totalhigh == 0)
278 return 0;
279
280 mem = si->totalram;
281 mem *= si->mem_unit;
282
283 zone->name = "highmem";
284 zone->zone_mem = mem;
285 zone->max_mem = mem >> 1;
286 zone->emer_mem = (mem >> 1) + (mem >> 2);
287 zone->swap_limit = zone->max_mem - (mem >> 3);
288 zone->used_mem = 0;
289 zone->glob = glob;
290 glob->zone_highmem = zone;
291 glob->zones[glob->num_zones++] = zone;
292 kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
293 return kobject_add(&zone->kobj, &glob->kobj, zone->name);
294}
295#else
296static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
297 const struct sysinfo *si)
298{
299 struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
300 uint64_t mem;
301
302 if (unlikely(!zone))
303 return -ENOMEM;
304
305 mem = si->totalram;
306 mem *= si->mem_unit;
307
308 /**
309 * No special dma32 zone needed.
310 */
311
312 if (mem <= ((uint64_t) 1ULL << 32))
313 return 0;
314
315 /*
316 * Limit max dma32 memory to 4GB for now
317 * until we can figure out how big this
318 * zone really is.
319 */
320
321 mem = ((uint64_t) 1ULL << 32);
322 zone->name = "dma32";
323 zone->zone_mem = mem;
324 zone->max_mem = mem >> 1;
325 zone->emer_mem = (mem >> 1) + (mem >> 2);
326 zone->swap_limit = zone->max_mem - (mem >> 3);
327 zone->used_mem = 0;
328 zone->glob = glob;
329 glob->zone_dma32 = zone;
330 glob->zones[glob->num_zones++] = zone;
331 kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
332 return kobject_add(&zone->kobj, &glob->kobj, zone->name);
333}
334#endif
335
92int ttm_mem_global_init(struct ttm_mem_global *glob) 336int ttm_mem_global_init(struct ttm_mem_global *glob)
93{ 337{
94 struct sysinfo si; 338 struct sysinfo si;
95 uint64_t mem; 339 int ret;
340 int i;
341 struct ttm_mem_zone *zone;
96 342
97 spin_lock_init(&glob->lock); 343 spin_lock_init(&glob->lock);
98 glob->swap_queue = create_singlethread_workqueue("ttm_swap"); 344 glob->swap_queue = create_singlethread_workqueue("ttm_swap");
99 INIT_WORK(&glob->work, ttm_shrink_work); 345 INIT_WORK(&glob->work, ttm_shrink_work);
100 init_waitqueue_head(&glob->queue); 346 init_waitqueue_head(&glob->queue);
347 kobject_init(&glob->kobj, &ttm_mem_glob_kobj_type);
348 ret = kobject_add(&glob->kobj,
349 ttm_get_kobj(),
350 "memory_accounting");
351 if (unlikely(ret != 0))
352 goto out_no_zone;
101 353
102 si_meminfo(&si); 354 si_meminfo(&si);
103 355
104 mem = si.totalram - si.totalhigh; 356 ret = ttm_mem_init_kernel_zone(glob, &si);
105 mem *= si.mem_unit; 357 if (unlikely(ret != 0))
106 358 goto out_no_zone;
107 glob->max_memory = mem >> 1; 359#ifdef CONFIG_HIGHMEM
108 glob->emer_memory = (mem >> 1) + (mem >> 2); 360 ret = ttm_mem_init_highmem_zone(glob, &si);
109 glob->swap_limit = glob->max_memory - (mem >> 3); 361 if (unlikely(ret != 0))
110 glob->used_memory = 0; 362 goto out_no_zone;
111 glob->used_total_memory = 0; 363#else
112 glob->shrink = NULL; 364 ret = ttm_mem_init_dma32_zone(glob, &si);
113 365 if (unlikely(ret != 0))
114 mem = si.totalram; 366 goto out_no_zone;
115 mem *= si.mem_unit; 367#endif
116 368 for (i = 0; i < glob->num_zones; ++i) {
117 glob->max_total_memory = mem >> 1; 369 zone = glob->zones[i];
118 glob->emer_total_memory = (mem >> 1) + (mem >> 2); 370 printk(KERN_INFO TTM_PFX
119 371 "Zone %7s: Available graphics memory: %llu kiB.\n",
120 glob->total_memory_swap_limit = glob->max_total_memory - (mem >> 3); 372 zone->name, (unsigned long long) zone->max_mem >> 10);
121 373 }
122 printk(KERN_INFO TTM_PFX "TTM available graphics memory: %llu MiB\n",
123 glob->max_total_memory >> 20);
124 printk(KERN_INFO TTM_PFX "TTM available object memory: %llu MiB\n",
125 glob->max_memory >> 20);
126
127 return 0; 374 return 0;
375out_no_zone:
376 ttm_mem_global_release(glob);
377 return ret;
128} 378}
129EXPORT_SYMBOL(ttm_mem_global_init); 379EXPORT_SYMBOL(ttm_mem_global_init);
130 380
131void ttm_mem_global_release(struct ttm_mem_global *glob) 381void ttm_mem_global_release(struct ttm_mem_global *glob)
132{ 382{
133 printk(KERN_INFO TTM_PFX "Used total memory is %llu bytes.\n", 383 unsigned int i;
134 (unsigned long long)glob->used_total_memory); 384 struct ttm_mem_zone *zone;
385
135 flush_workqueue(glob->swap_queue); 386 flush_workqueue(glob->swap_queue);
136 destroy_workqueue(glob->swap_queue); 387 destroy_workqueue(glob->swap_queue);
137 glob->swap_queue = NULL; 388 glob->swap_queue = NULL;
389 for (i = 0; i < glob->num_zones; ++i) {
390 zone = glob->zones[i];
391 kobject_del(&zone->kobj);
392 kobject_put(&zone->kobj);
393 }
394 kobject_del(&glob->kobj);
395 kobject_put(&glob->kobj);
138} 396}
139EXPORT_SYMBOL(ttm_mem_global_release); 397EXPORT_SYMBOL(ttm_mem_global_release);
140 398
141static inline void ttm_check_swapping(struct ttm_mem_global *glob) 399static void ttm_check_swapping(struct ttm_mem_global *glob)
142{ 400{
143 bool needs_swapping; 401 bool needs_swapping = false;
402 unsigned int i;
403 struct ttm_mem_zone *zone;
144 404
145 spin_lock(&glob->lock); 405 spin_lock(&glob->lock);
146 needs_swapping = (glob->used_memory > glob->swap_limit || 406 for (i = 0; i < glob->num_zones; ++i) {
147 glob->used_total_memory > 407 zone = glob->zones[i];
148 glob->total_memory_swap_limit); 408 if (zone->used_mem > zone->swap_limit) {
409 needs_swapping = true;
410 break;
411 }
412 }
413
149 spin_unlock(&glob->lock); 414 spin_unlock(&glob->lock);
150 415
151 if (unlikely(needs_swapping)) 416 if (unlikely(needs_swapping))
@@ -153,44 +418,60 @@ static inline void ttm_check_swapping(struct ttm_mem_global *glob)
153 418
154} 419}
155 420
156void ttm_mem_global_free(struct ttm_mem_global *glob, 421static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
157 uint64_t amount, bool himem) 422 struct ttm_mem_zone *single_zone,
423 uint64_t amount)
158{ 424{
425 unsigned int i;
426 struct ttm_mem_zone *zone;
427
159 spin_lock(&glob->lock); 428 spin_lock(&glob->lock);
160 glob->used_total_memory -= amount; 429 for (i = 0; i < glob->num_zones; ++i) {
161 if (!himem) 430 zone = glob->zones[i];
162 glob->used_memory -= amount; 431 if (single_zone && zone != single_zone)
163 wake_up_all(&glob->queue); 432 continue;
433 zone->used_mem -= amount;
434 }
164 spin_unlock(&glob->lock); 435 spin_unlock(&glob->lock);
165} 436}
166 437
438void ttm_mem_global_free(struct ttm_mem_global *glob,
439 uint64_t amount)
440{
441 return ttm_mem_global_free_zone(glob, NULL, amount);
442}
443
167static int ttm_mem_global_reserve(struct ttm_mem_global *glob, 444static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
168 uint64_t amount, bool himem, bool reserve) 445 struct ttm_mem_zone *single_zone,
446 uint64_t amount, bool reserve)
169{ 447{
170 uint64_t limit; 448 uint64_t limit;
171 uint64_t lomem_limit;
172 int ret = -ENOMEM; 449 int ret = -ENOMEM;
450 unsigned int i;
451 struct ttm_mem_zone *zone;
173 452
174 spin_lock(&glob->lock); 453 spin_lock(&glob->lock);
454 for (i = 0; i < glob->num_zones; ++i) {
455 zone = glob->zones[i];
456 if (single_zone && zone != single_zone)
457 continue;
175 458
176 if (capable(CAP_SYS_ADMIN)) { 459 limit = (capable(CAP_SYS_ADMIN)) ?
177 limit = glob->emer_total_memory; 460 zone->emer_mem : zone->max_mem;
178 lomem_limit = glob->emer_memory;
179 } else {
180 limit = glob->max_total_memory;
181 lomem_limit = glob->max_memory;
182 }
183 461
184 if (unlikely(glob->used_total_memory + amount > limit)) 462 if (zone->used_mem > limit)
185 goto out_unlock; 463 goto out_unlock;
186 if (unlikely(!himem && glob->used_memory + amount > lomem_limit)) 464 }
187 goto out_unlock;
188 465
189 if (reserve) { 466 if (reserve) {
190 glob->used_total_memory += amount; 467 for (i = 0; i < glob->num_zones; ++i) {
191 if (!himem) 468 zone = glob->zones[i];
192 glob->used_memory += amount; 469 if (single_zone && zone != single_zone)
470 continue;
471 zone->used_mem += amount;
472 }
193 } 473 }
474
194 ret = 0; 475 ret = 0;
195out_unlock: 476out_unlock:
196 spin_unlock(&glob->lock); 477 spin_unlock(&glob->lock);
@@ -199,12 +480,17 @@ out_unlock:
199 return ret; 480 return ret;
200} 481}
201 482
202int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, 483
203 bool no_wait, bool interruptible, bool himem) 484static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
485 struct ttm_mem_zone *single_zone,
486 uint64_t memory,
487 bool no_wait, bool interruptible)
204{ 488{
205 int count = TTM_MEMORY_ALLOC_RETRIES; 489 int count = TTM_MEMORY_ALLOC_RETRIES;
206 490
207 while (unlikely(ttm_mem_global_reserve(glob, memory, himem, true) 491 while (unlikely(ttm_mem_global_reserve(glob,
492 single_zone,
493 memory, true)
208 != 0)) { 494 != 0)) {
209 if (no_wait) 495 if (no_wait)
210 return -ENOMEM; 496 return -ENOMEM;
@@ -216,6 +502,56 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
216 return 0; 502 return 0;
217} 503}
218 504
505int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
506 bool no_wait, bool interruptible)
507{
508 /**
509 * Normal allocations of kernel memory are registered in
510 * all zones.
511 */
512
513 return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
514 interruptible);
515}
516
517int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
518 struct page *page,
519 bool no_wait, bool interruptible)
520{
521
522 struct ttm_mem_zone *zone = NULL;
523
524 /**
525 * Page allocations may be registed in a single zone
526 * only if highmem or !dma32.
527 */
528
529#ifdef CONFIG_HIGHMEM
530 if (PageHighMem(page) && glob->zone_highmem != NULL)
531 zone = glob->zone_highmem;
532#else
533 if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
534 zone = glob->zone_kernel;
535#endif
536 return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
537 interruptible);
538}
539
540void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
541{
542 struct ttm_mem_zone *zone = NULL;
543
544#ifdef CONFIG_HIGHMEM
545 if (PageHighMem(page) && glob->zone_highmem != NULL)
546 zone = glob->zone_highmem;
547#else
548 if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
549 zone = glob->zone_kernel;
550#endif
551 ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
552}
553
554
219size_t ttm_round_pot(size_t size) 555size_t ttm_round_pot(size_t size)
220{ 556{
221 if ((size & (size - 1)) == 0) 557 if ((size & (size - 1)) == 0)
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 75dc8bd24592..4e1e2566d519 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -166,7 +166,7 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
166 set_page_dirty_lock(page); 166 set_page_dirty_lock(page);
167 167
168 ttm->pages[i] = NULL; 168 ttm->pages[i] = NULL;
169 ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false); 169 ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE);
170 put_page(page); 170 put_page(page);
171 } 171 }
172 ttm->state = tt_unpopulated; 172 ttm->state = tt_unpopulated;
@@ -187,21 +187,14 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
187 if (!p) 187 if (!p)
188 return NULL; 188 return NULL;
189 189
190 if (PageHighMem(p)) { 190 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
191 ret = 191 if (unlikely(ret != 0))
192 ttm_mem_global_alloc(mem_glob, PAGE_SIZE, 192 goto out_err;
193 false, false, true); 193
194 if (unlikely(ret != 0)) 194 if (PageHighMem(p))
195 goto out_err;
196 ttm->pages[--ttm->first_himem_page] = p; 195 ttm->pages[--ttm->first_himem_page] = p;
197 } else { 196 else
198 ret =
199 ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
200 false, false, false);
201 if (unlikely(ret != 0))
202 goto out_err;
203 ttm->pages[++ttm->last_lomem_page] = p; 197 ttm->pages[++ttm->last_lomem_page] = p;
204 }
205 } 198 }
206 return p; 199 return p;
207out_err: 200out_err:
@@ -355,8 +348,8 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
355 printk(KERN_ERR TTM_PFX 348 printk(KERN_ERR TTM_PFX
356 "Erroneous page count. " 349 "Erroneous page count. "
357 "Leaking pages.\n"); 350 "Leaking pages.\n");
358 ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, 351 ttm_mem_global_free_page(ttm->bdev->mem_glob,
359 PageHighMem(cur_page)); 352 cur_page);
360 __free_page(cur_page); 353 __free_page(cur_page);
361 } 354 }
362 } 355 }
@@ -411,7 +404,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm,
411 */ 404 */
412 405
413 ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE, 406 ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
414 false, false, false); 407 false, false);
415 if (unlikely(ret != 0)) 408 if (unlikely(ret != 0))
416 return ret; 409 return ret;
417 410
@@ -422,7 +415,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm,
422 415
423 if (ret != num_pages && write) { 416 if (ret != num_pages && write) {
424 ttm_tt_free_user_pages(ttm); 417 ttm_tt_free_user_pages(ttm);
425 ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false); 418 ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
426 return -ENOMEM; 419 return -ENOMEM;
427 } 420 }
428 421
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index d8b8f042c4f1..6983a7cf4da4 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -32,6 +32,7 @@
32#include <linux/spinlock.h> 32#include <linux/spinlock.h>
33#include <linux/wait.h> 33#include <linux/wait.h>
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/kobject.h>
35 36
36/** 37/**
37 * struct ttm_mem_shrink - callback to shrink TTM memory usage. 38 * struct ttm_mem_shrink - callback to shrink TTM memory usage.
@@ -60,34 +61,33 @@ struct ttm_mem_shrink {
60 * @queue: Wait queue for processes suspended waiting for memory. 61 * @queue: Wait queue for processes suspended waiting for memory.
61 * @lock: Lock to protect the @shrink - and the memory accounting members, 62 * @lock: Lock to protect the @shrink - and the memory accounting members,
62 * that is, essentially the whole structure with some exceptions. 63 * that is, essentially the whole structure with some exceptions.
63 * @emer_memory: Lowmem memory limit available for root. 64 * @zones: Array of pointers to accounting zones.
64 * @max_memory: Lowmem memory limit available for non-root. 65 * @num_zones: Number of populated entries in the @zones array.
65 * @swap_limit: Lowmem memory limit where the shrink workqueue kicks in. 66 * @zone_kernel: Pointer to the kernel zone.
66 * @used_memory: Currently used lowmem memory. 67 * @zone_highmem: Pointer to the highmem zone if there is one.
67 * @used_total_memory: Currently used total (lowmem + highmem) memory. 68 * @zone_dma32: Pointer to the dma32 zone if there is one.
68 * @total_memory_swap_limit: Total memory limit where the shrink workqueue
69 * kicks in.
70 * @max_total_memory: Total memory available to non-root processes.
71 * @emer_total_memory: Total memory available to root processes.
72 * 69 *
73 * Note that this structure is not per device. It should be global for all 70 * Note that this structure is not per device. It should be global for all
74 * graphics devices. 71 * graphics devices.
75 */ 72 */
76 73
74#define TTM_MEM_MAX_ZONES 2
75struct ttm_mem_zone;
77struct ttm_mem_global { 76struct ttm_mem_global {
77 struct kobject kobj;
78 struct ttm_mem_shrink *shrink; 78 struct ttm_mem_shrink *shrink;
79 struct workqueue_struct *swap_queue; 79 struct workqueue_struct *swap_queue;
80 struct work_struct work; 80 struct work_struct work;
81 wait_queue_head_t queue; 81 wait_queue_head_t queue;
82 spinlock_t lock; 82 spinlock_t lock;
83 uint64_t emer_memory; 83 struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
84 uint64_t max_memory; 84 unsigned int num_zones;
85 uint64_t swap_limit; 85 struct ttm_mem_zone *zone_kernel;
86 uint64_t used_memory; 86#ifdef CONFIG_HIGHMEM
87 uint64_t used_total_memory; 87 struct ttm_mem_zone *zone_highmem;
88 uint64_t total_memory_swap_limit; 88#else
89 uint64_t max_total_memory; 89 struct ttm_mem_zone *zone_dma32;
90 uint64_t emer_total_memory; 90#endif
91}; 91};
92 92
93/** 93/**
@@ -146,8 +146,13 @@ static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
146extern int ttm_mem_global_init(struct ttm_mem_global *glob); 146extern int ttm_mem_global_init(struct ttm_mem_global *glob);
147extern void ttm_mem_global_release(struct ttm_mem_global *glob); 147extern void ttm_mem_global_release(struct ttm_mem_global *glob);
148extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, 148extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
149 bool no_wait, bool interruptible, bool himem); 149 bool no_wait, bool interruptible);
150extern void ttm_mem_global_free(struct ttm_mem_global *glob, 150extern void ttm_mem_global_free(struct ttm_mem_global *glob,
151 uint64_t amount, bool himem); 151 uint64_t amount);
152extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
153 struct page *page,
154 bool no_wait, bool interruptible);
155extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
156 struct page *page);
152extern size_t ttm_round_pot(size_t size); 157extern size_t ttm_round_pot(size_t size);
153#endif 158#endif
diff --git a/include/drm/ttm/ttm_module.h b/include/drm/ttm/ttm_module.h
index 889a4c7958ae..0a72ac7c7e58 100644
--- a/include/drm/ttm/ttm_module.h
+++ b/include/drm/ttm/ttm_module.h
@@ -32,6 +32,7 @@
32#define _TTM_MODULE_H_ 32#define _TTM_MODULE_H_
33 33
34#include <linux/kernel.h> 34#include <linux/kernel.h>
35struct kobject;
35 36
36#define TTM_PFX "[TTM]" 37#define TTM_PFX "[TTM]"
37 38
@@ -54,5 +55,6 @@ extern void ttm_global_init(void);
54extern void ttm_global_release(void); 55extern void ttm_global_release(void);
55extern int ttm_global_item_ref(struct ttm_global_reference *ref); 56extern int ttm_global_item_ref(struct ttm_global_reference *ref);
56extern void ttm_global_item_unref(struct ttm_global_reference *ref); 57extern void ttm_global_item_unref(struct ttm_global_reference *ref);
58extern struct kobject *ttm_get_kobj(void);
57 59
58#endif /* _TTM_MODULE_H_ */ 60#endif /* _TTM_MODULE_H_ */