aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/drm/drm_memory_debug.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/char/drm/drm_memory_debug.h
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/char/drm/drm_memory_debug.h')
-rw-r--r--drivers/char/drm/drm_memory_debug.h459
1 files changed, 459 insertions, 0 deletions
diff --git a/drivers/char/drm/drm_memory_debug.h b/drivers/char/drm/drm_memory_debug.h
new file mode 100644
index 000000000000..2c82e69a7fd2
--- /dev/null
+++ b/drivers/char/drm/drm_memory_debug.h
@@ -0,0 +1,459 @@
1/**
2 * \file drm_memory.h
3 * Memory management wrappers for DRM.
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
11 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
12 * All Rights Reserved.
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the "Software"),
16 * to deal in the Software without restriction, including without limitation
17 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 * and/or sell copies of the Software, and to permit persons to whom the
19 * Software is furnished to do so, subject to the following conditions:
20 *
21 * The above copyright notice and this permission notice (including the next
22 * paragraph) shall be included in all copies or substantial portions of the
23 * Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
28 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
29 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
30 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
31 * OTHER DEALINGS IN THE SOFTWARE.
32 */
33
34#include <linux/config.h>
35#include "drmP.h"
36
37typedef struct drm_mem_stats {
38 const char *name;
39 int succeed_count;
40 int free_count;
41 int fail_count;
42 unsigned long bytes_allocated;
43 unsigned long bytes_freed;
44} drm_mem_stats_t;
45
46static DEFINE_SPINLOCK(DRM(mem_lock));
47static unsigned long DRM(ram_available) = 0; /* In pages */
48static unsigned long DRM(ram_used) = 0;
49static drm_mem_stats_t DRM(mem_stats)[] = {
50 [DRM_MEM_DMA] = { "dmabufs" },
51 [DRM_MEM_SAREA] = { "sareas" },
52 [DRM_MEM_DRIVER] = { "driver" },
53 [DRM_MEM_MAGIC] = { "magic" },
54 [DRM_MEM_IOCTLS] = { "ioctltab" },
55 [DRM_MEM_MAPS] = { "maplist" },
56 [DRM_MEM_VMAS] = { "vmalist" },
57 [DRM_MEM_BUFS] = { "buflist" },
58 [DRM_MEM_SEGS] = { "seglist" },
59 [DRM_MEM_PAGES] = { "pagelist" },
60 [DRM_MEM_FILES] = { "files" },
61 [DRM_MEM_QUEUES] = { "queues" },
62 [DRM_MEM_CMDS] = { "commands" },
63 [DRM_MEM_MAPPINGS] = { "mappings" },
64 [DRM_MEM_BUFLISTS] = { "buflists" },
65 [DRM_MEM_AGPLISTS] = { "agplist" },
66 [DRM_MEM_SGLISTS] = { "sglist" },
67 [DRM_MEM_TOTALAGP] = { "totalagp" },
68 [DRM_MEM_BOUNDAGP] = { "boundagp" },
69 [DRM_MEM_CTXBITMAP] = { "ctxbitmap"},
70 [DRM_MEM_CTXLIST] = { "ctxlist" },
71 [DRM_MEM_STUB] = { "stub" },
72 { NULL, 0, } /* Last entry must be null */
73};
74
75void DRM(mem_init)(void)
76{
77 drm_mem_stats_t *mem;
78 struct sysinfo si;
79
80 for (mem = DRM(mem_stats); mem->name; ++mem) {
81 mem->succeed_count = 0;
82 mem->free_count = 0;
83 mem->fail_count = 0;
84 mem->bytes_allocated = 0;
85 mem->bytes_freed = 0;
86 }
87
88 si_meminfo(&si);
89 DRM(ram_available) = si.totalram;
90 DRM(ram_used) = 0;
91}
92
93/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
94
95static int DRM(_mem_info)(char *buf, char **start, off_t offset,
96 int request, int *eof, void *data)
97{
98 drm_mem_stats_t *pt;
99 int len = 0;
100
101 if (offset > DRM_PROC_LIMIT) {
102 *eof = 1;
103 return 0;
104 }
105
106 *eof = 0;
107 *start = &buf[offset];
108
109 DRM_PROC_PRINT(" total counts "
110 " | outstanding \n");
111 DRM_PROC_PRINT("type alloc freed fail bytes freed"
112 " | allocs bytes\n\n");
113 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
114 "system", 0, 0, 0,
115 DRM(ram_available) << (PAGE_SHIFT - 10));
116 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
117 "locked", 0, 0, 0, DRM(ram_used) >> 10);
118 DRM_PROC_PRINT("\n");
119 for (pt = DRM(mem_stats); pt->name; pt++) {
120 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
121 pt->name,
122 pt->succeed_count,
123 pt->free_count,
124 pt->fail_count,
125 pt->bytes_allocated,
126 pt->bytes_freed,
127 pt->succeed_count - pt->free_count,
128 (long)pt->bytes_allocated
129 - (long)pt->bytes_freed);
130 }
131
132 if (len > request + offset) return request;
133 *eof = 1;
134 return len - offset;
135}
136
137int DRM(mem_info)(char *buf, char **start, off_t offset,
138 int len, int *eof, void *data)
139{
140 int ret;
141
142 spin_lock(&DRM(mem_lock));
143 ret = DRM(_mem_info)(buf, start, offset, len, eof, data);
144 spin_unlock(&DRM(mem_lock));
145 return ret;
146}
147
148void *DRM(alloc)(size_t size, int area)
149{
150 void *pt;
151
152 if (!size) {
153 DRM_MEM_ERROR(area, "Allocating 0 bytes\n");
154 return NULL;
155 }
156
157 if (!(pt = kmalloc(size, GFP_KERNEL))) {
158 spin_lock(&DRM(mem_lock));
159 ++DRM(mem_stats)[area].fail_count;
160 spin_unlock(&DRM(mem_lock));
161 return NULL;
162 }
163 spin_lock(&DRM(mem_lock));
164 ++DRM(mem_stats)[area].succeed_count;
165 DRM(mem_stats)[area].bytes_allocated += size;
166 spin_unlock(&DRM(mem_lock));
167 return pt;
168}
169
170void *DRM(calloc)(size_t nmemb, size_t size, int area)
171{
172 void *addr;
173
174 addr = DRM(alloc)(nmemb * size, area);
175 if (addr != NULL)
176 memset((void *)addr, 0, size * nmemb);
177
178 return addr;
179}
180
181void *DRM(realloc)(void *oldpt, size_t oldsize, size_t size, int area)
182{
183 void *pt;
184
185 if (!(pt = DRM(alloc)(size, area))) return NULL;
186 if (oldpt && oldsize) {
187 memcpy(pt, oldpt, oldsize);
188 DRM(free)(oldpt, oldsize, area);
189 }
190 return pt;
191}
192
193void DRM(free)(void *pt, size_t size, int area)
194{
195 int alloc_count;
196 int free_count;
197
198 if (!pt) DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
199 else kfree(pt);
200 spin_lock(&DRM(mem_lock));
201 DRM(mem_stats)[area].bytes_freed += size;
202 free_count = ++DRM(mem_stats)[area].free_count;
203 alloc_count = DRM(mem_stats)[area].succeed_count;
204 spin_unlock(&DRM(mem_lock));
205 if (free_count > alloc_count) {
206 DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
207 free_count, alloc_count);
208 }
209}
210
211unsigned long DRM(alloc_pages)(int order, int area)
212{
213 unsigned long address;
214 unsigned long bytes = PAGE_SIZE << order;
215 unsigned long addr;
216 unsigned int sz;
217
218 spin_lock(&DRM(mem_lock));
219 if ((DRM(ram_used) >> PAGE_SHIFT)
220 > (DRM_RAM_PERCENT * DRM(ram_available)) / 100) {
221 spin_unlock(&DRM(mem_lock));
222 return 0;
223 }
224 spin_unlock(&DRM(mem_lock));
225
226 address = __get_free_pages(GFP_KERNEL, order);
227 if (!address) {
228 spin_lock(&DRM(mem_lock));
229 ++DRM(mem_stats)[area].fail_count;
230 spin_unlock(&DRM(mem_lock));
231 return 0;
232 }
233 spin_lock(&DRM(mem_lock));
234 ++DRM(mem_stats)[area].succeed_count;
235 DRM(mem_stats)[area].bytes_allocated += bytes;
236 DRM(ram_used) += bytes;
237 spin_unlock(&DRM(mem_lock));
238
239
240 /* Zero outside the lock */
241 memset((void *)address, 0, bytes);
242
243 /* Reserve */
244 for (addr = address, sz = bytes;
245 sz > 0;
246 addr += PAGE_SIZE, sz -= PAGE_SIZE) {
247 SetPageReserved(virt_to_page(addr));
248 }
249
250 return address;
251}
252
253void DRM(free_pages)(unsigned long address, int order, int area)
254{
255 unsigned long bytes = PAGE_SIZE << order;
256 int alloc_count;
257 int free_count;
258 unsigned long addr;
259 unsigned int sz;
260
261 if (!address) {
262 DRM_MEM_ERROR(area, "Attempt to free address 0\n");
263 } else {
264 /* Unreserve */
265 for (addr = address, sz = bytes;
266 sz > 0;
267 addr += PAGE_SIZE, sz -= PAGE_SIZE) {
268 ClearPageReserved(virt_to_page(addr));
269 }
270 free_pages(address, order);
271 }
272
273 spin_lock(&DRM(mem_lock));
274 free_count = ++DRM(mem_stats)[area].free_count;
275 alloc_count = DRM(mem_stats)[area].succeed_count;
276 DRM(mem_stats)[area].bytes_freed += bytes;
277 DRM(ram_used) -= bytes;
278 spin_unlock(&DRM(mem_lock));
279 if (free_count > alloc_count) {
280 DRM_MEM_ERROR(area,
281 "Excess frees: %d frees, %d allocs\n",
282 free_count, alloc_count);
283 }
284}
285
286void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev)
287{
288 void *pt;
289
290 if (!size) {
291 DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
292 "Mapping 0 bytes at 0x%08lx\n", offset);
293 return NULL;
294 }
295
296 if (!(pt = drm_ioremap(offset, size, dev))) {
297 spin_lock(&DRM(mem_lock));
298 ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
299 spin_unlock(&DRM(mem_lock));
300 return NULL;
301 }
302 spin_lock(&DRM(mem_lock));
303 ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
304 DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size;
305 spin_unlock(&DRM(mem_lock));
306 return pt;
307}
308
309void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_t *dev)
310{
311 void *pt;
312
313 if (!size) {
314 DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
315 "Mapping 0 bytes at 0x%08lx\n", offset);
316 return NULL;
317 }
318
319 if (!(pt = drm_ioremap_nocache(offset, size, dev))) {
320 spin_lock(&DRM(mem_lock));
321 ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
322 spin_unlock(&DRM(mem_lock));
323 return NULL;
324 }
325 spin_lock(&DRM(mem_lock));
326 ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
327 DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size;
328 spin_unlock(&DRM(mem_lock));
329 return pt;
330}
331
332void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev)
333{
334 int alloc_count;
335 int free_count;
336
337 if (!pt)
338 DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
339 "Attempt to free NULL pointer\n");
340 else
341 drm_ioremapfree(pt, size, dev);
342
343 spin_lock(&DRM(mem_lock));
344 DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_freed += size;
345 free_count = ++DRM(mem_stats)[DRM_MEM_MAPPINGS].free_count;
346 alloc_count = DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
347 spin_unlock(&DRM(mem_lock));
348 if (free_count > alloc_count) {
349 DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
350 "Excess frees: %d frees, %d allocs\n",
351 free_count, alloc_count);
352 }
353}
354
355#if __OS_HAS_AGP
356
357DRM_AGP_MEM *DRM(alloc_agp)(int pages, u32 type)
358{
359 DRM_AGP_MEM *handle;
360
361 if (!pages) {
362 DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n");
363 return NULL;
364 }
365
366 if ((handle = DRM(agp_allocate_memory)(pages, type))) {
367 spin_lock(&DRM(mem_lock));
368 ++DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
369 DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_allocated
370 += pages << PAGE_SHIFT;
371 spin_unlock(&DRM(mem_lock));
372 return handle;
373 }
374 spin_lock(&DRM(mem_lock));
375 ++DRM(mem_stats)[DRM_MEM_TOTALAGP].fail_count;
376 spin_unlock(&DRM(mem_lock));
377 return NULL;
378}
379
380int DRM(free_agp)(DRM_AGP_MEM *handle, int pages)
381{
382 int alloc_count;
383 int free_count;
384 int retval = -EINVAL;
385
386 if (!handle) {
387 DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
388 "Attempt to free NULL AGP handle\n");
389 return retval;
390 }
391
392 if (DRM(agp_free_memory)(handle)) {
393 spin_lock(&DRM(mem_lock));
394 free_count = ++DRM(mem_stats)[DRM_MEM_TOTALAGP].free_count;
395 alloc_count = DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
396 DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_freed
397 += pages << PAGE_SHIFT;
398 spin_unlock(&DRM(mem_lock));
399 if (free_count > alloc_count) {
400 DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
401 "Excess frees: %d frees, %d allocs\n",
402 free_count, alloc_count);
403 }
404 return 0;
405 }
406 return retval;
407}
408
409int DRM(bind_agp)(DRM_AGP_MEM *handle, unsigned int start)
410{
411 int retcode = -EINVAL;
412
413 if (!handle) {
414 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
415 "Attempt to bind NULL AGP handle\n");
416 return retcode;
417 }
418
419 if (!(retcode = DRM(agp_bind_memory)(handle, start))) {
420 spin_lock(&DRM(mem_lock));
421 ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
422 DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_allocated
423 += handle->page_count << PAGE_SHIFT;
424 spin_unlock(&DRM(mem_lock));
425 return retcode;
426 }
427 spin_lock(&DRM(mem_lock));
428 ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].fail_count;
429 spin_unlock(&DRM(mem_lock));
430 return retcode;
431}
432
433int DRM(unbind_agp)(DRM_AGP_MEM *handle)
434{
435 int alloc_count;
436 int free_count;
437 int retcode = -EINVAL;
438
439 if (!handle) {
440 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
441 "Attempt to unbind NULL AGP handle\n");
442 return retcode;
443 }
444
445 if ((retcode = DRM(agp_unbind_memory)(handle))) return retcode;
446 spin_lock(&DRM(mem_lock));
447 free_count = ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].free_count;
448 alloc_count = DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
449 DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_freed
450 += handle->page_count << PAGE_SHIFT;
451 spin_unlock(&DRM(mem_lock));
452 if (free_count > alloc_count) {
453 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
454 "Excess frees: %d frees, %d allocs\n",
455 free_count, alloc_count);
456 }
457 return retcode;
458}
459#endif