aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDave Airlie <airlied@starflyer.(none)>2006-01-02 03:52:09 -0500
committerDave Airlie <airlied@linux.ie>2006-01-02 03:52:09 -0500
commitf0c408b564ddefa0959ada4e2c2248f4a57f1842 (patch)
treed75abe72d5e24bad23b8e9f9a0db63b9fd6a44dd /drivers
parent1e7d51902a8bd08e37113aaf182d12233b157151 (diff)
drm: update drm_memory_debug.h
Update from DRM CVS for drm memory debug From: Jon Smirl <jonsmirl@gmail.com> Signed-off-by: Dave Airlie <airlied@linux.ie>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/drm/drm_memory_debug.h269
1 files changed, 134 insertions, 135 deletions
diff --git a/drivers/char/drm/drm_memory_debug.h b/drivers/char/drm/drm_memory_debug.h
index 4542353195bd..706b75251ea1 100644
--- a/drivers/char/drm/drm_memory_debug.h
+++ b/drivers/char/drm/drm_memory_debug.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * \file drm_memory.h 2 * \file drm_memory_debug.h
3 * Memory management wrappers for DRM. 3 * Memory management wrappers for DRM.
4 * 4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com> 5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
@@ -43,42 +43,41 @@ typedef struct drm_mem_stats {
43 unsigned long bytes_freed; 43 unsigned long bytes_freed;
44} drm_mem_stats_t; 44} drm_mem_stats_t;
45 45
46static DEFINE_SPINLOCK(DRM(mem_lock)); 46static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED;
47static unsigned long DRM(ram_available) = 0; /* In pages */ 47static unsigned long drm_ram_available = 0; /* In pages */
48static unsigned long DRM(ram_used) = 0; 48static unsigned long drm_ram_used = 0;
49static drm_mem_stats_t DRM(mem_stats)[] = 49static drm_mem_stats_t drm_mem_stats[] =
50{ 50{
51 [DRM_MEM_DMA] = { 51 [DRM_MEM_DMA] = {"dmabufs"},
52 "dmabufs"},[DRM_MEM_SAREA] = { 52 [DRM_MEM_SAREA] = {"sareas"},
53 "sareas"},[DRM_MEM_DRIVER] = { 53 [DRM_MEM_DRIVER] = {"driver"},
54 "driver"},[DRM_MEM_MAGIC] = { 54 [DRM_MEM_MAGIC] = {"magic"},
55 "magic"},[DRM_MEM_IOCTLS] = { 55 [DRM_MEM_IOCTLS] = {"ioctltab"},
56 "ioctltab"},[DRM_MEM_MAPS] = { 56 [DRM_MEM_MAPS] = {"maplist"},
57 "maplist"},[DRM_MEM_VMAS] = { 57 [DRM_MEM_VMAS] = {"vmalist"},
58 "vmalist"},[DRM_MEM_BUFS] = { 58 [DRM_MEM_BUFS] = {"buflist"},
59 "buflist"},[DRM_MEM_SEGS] = { 59 [DRM_MEM_SEGS] = {"seglist"},
60 "seglist"},[DRM_MEM_PAGES] = { 60 [DRM_MEM_PAGES] = {"pagelist"},
61 "pagelist"},[DRM_MEM_FILES] = { 61 [DRM_MEM_FILES] = {"files"},
62 "files"},[DRM_MEM_QUEUES] = { 62 [DRM_MEM_QUEUES] = {"queues"},
63 "queues"},[DRM_MEM_CMDS] = { 63 [DRM_MEM_CMDS] = {"commands"},
64 "commands"},[DRM_MEM_MAPPINGS] = { 64 [DRM_MEM_MAPPINGS] = {"mappings"},
65 "mappings"},[DRM_MEM_BUFLISTS] = { 65 [DRM_MEM_BUFLISTS] = {"buflists"},
66 "buflists"},[DRM_MEM_AGPLISTS] = { 66 [DRM_MEM_AGPLISTS] = {"agplist"},
67 "agplist"},[DRM_MEM_SGLISTS] = { 67 [DRM_MEM_SGLISTS] = {"sglist"},
68 "sglist"},[DRM_MEM_TOTALAGP] = { 68 [DRM_MEM_TOTALAGP] = {"totalagp"},
69 "totalagp"},[DRM_MEM_BOUNDAGP] = { 69 [DRM_MEM_BOUNDAGP] = {"boundagp"},
70 "boundagp"},[DRM_MEM_CTXBITMAP] = { 70 [DRM_MEM_CTXBITMAP] = {"ctxbitmap"},
71 "ctxbitmap"},[DRM_MEM_CTXLIST] = { 71 [DRM_MEM_CTXLIST] = {"ctxlist"},
72 "ctxlist"},[DRM_MEM_STUB] = { 72 [DRM_MEM_STUB] = {"stub"},
73 "stub"}, { 73 {NULL, 0,} /* Last entry must be null */
74 NULL, 0,} /* Last entry must be null */
75}; 74};
76 75
77void DRM(mem_init) (void) { 76void drm_mem_init (void) {
78 drm_mem_stats_t *mem; 77 drm_mem_stats_t *mem;
79 struct sysinfo si; 78 struct sysinfo si;
80 79
81 for (mem = DRM(mem_stats); mem->name; ++mem) { 80 for (mem = drm_mem_stats; mem->name; ++mem) {
82 mem->succeed_count = 0; 81 mem->succeed_count = 0;
83 mem->free_count = 0; 82 mem->free_count = 0;
84 mem->fail_count = 0; 83 mem->fail_count = 0;
@@ -87,13 +86,13 @@ void DRM(mem_init) (void) {
87 } 86 }
88 87
89 si_meminfo(&si); 88 si_meminfo(&si);
90 DRM(ram_available) = si.totalram; 89 drm_ram_available = si.totalram;
91 DRM(ram_used) = 0; 90 drm_ram_used = 0;
92} 91}
93 92
94/* drm_mem_info is called whenever a process reads /dev/drm/mem. */ 93/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
95 94
96static int DRM(_mem_info) (char *buf, char **start, off_t offset, 95static int drm__mem_info (char *buf, char **start, off_t offset,
97 int request, int *eof, void *data) { 96 int request, int *eof, void *data) {
98 drm_mem_stats_t *pt; 97 drm_mem_stats_t *pt;
99 int len = 0; 98 int len = 0;
@@ -112,11 +111,11 @@ static int DRM(_mem_info) (char *buf, char **start, off_t offset,
112 " | allocs bytes\n\n"); 111 " | allocs bytes\n\n");
113 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", 112 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
114 "system", 0, 0, 0, 113 "system", 0, 0, 0,
115 DRM(ram_available) << (PAGE_SHIFT - 10)); 114 drm_ram_available << (PAGE_SHIFT - 10));
116 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", 115 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
117 "locked", 0, 0, 0, DRM(ram_used) >> 10); 116 "locked", 0, 0, 0, drm_ram_used >> 10);
118 DRM_PROC_PRINT("\n"); 117 DRM_PROC_PRINT("\n");
119 for (pt = DRM(mem_stats); pt->name; pt++) { 118 for (pt = drm_mem_stats; pt->name; pt++) {
120 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n", 119 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
121 pt->name, 120 pt->name,
122 pt->succeed_count, 121 pt->succeed_count,
@@ -135,17 +134,17 @@ static int DRM(_mem_info) (char *buf, char **start, off_t offset,
135 return len - offset; 134 return len - offset;
136} 135}
137 136
138int DRM(mem_info) (char *buf, char **start, off_t offset, 137int drm_mem_info (char *buf, char **start, off_t offset,
139 int len, int *eof, void *data) { 138 int len, int *eof, void *data) {
140 int ret; 139 int ret;
141 140
142 spin_lock(&DRM(mem_lock)); 141 spin_lock(&drm_mem_lock);
143 ret = DRM(_mem_info) (buf, start, offset, len, eof, data); 142 ret = drm__mem_info (buf, start, offset, len, eof, data);
144 spin_unlock(&DRM(mem_lock)); 143 spin_unlock(&drm_mem_lock);
145 return ret; 144 return ret;
146} 145}
147 146
148void *DRM(alloc) (size_t size, int area) { 147void *drm_alloc (size_t size, int area) {
149 void *pt; 148 void *pt;
150 149
151 if (!size) { 150 if (!size) {
@@ -154,41 +153,41 @@ void *DRM(alloc) (size_t size, int area) {
154 } 153 }
155 154
156 if (!(pt = kmalloc(size, GFP_KERNEL))) { 155 if (!(pt = kmalloc(size, GFP_KERNEL))) {
157 spin_lock(&DRM(mem_lock)); 156 spin_lock(&drm_mem_lock);
158 ++DRM(mem_stats)[area].fail_count; 157 ++drm_mem_stats[area].fail_count;
159 spin_unlock(&DRM(mem_lock)); 158 spin_unlock(&drm_mem_lock);
160 return NULL; 159 return NULL;
161 } 160 }
162 spin_lock(&DRM(mem_lock)); 161 spin_lock(&drm_mem_lock);
163 ++DRM(mem_stats)[area].succeed_count; 162 ++drm_mem_stats[area].succeed_count;
164 DRM(mem_stats)[area].bytes_allocated += size; 163 drm_mem_stats[area].bytes_allocated += size;
165 spin_unlock(&DRM(mem_lock)); 164 spin_unlock(&drm_mem_lock);
166 return pt; 165 return pt;
167} 166}
168 167
169void *DRM(calloc) (size_t nmemb, size_t size, int area) { 168void *drm_calloc (size_t nmemb, size_t size, int area) {
170 void *addr; 169 void *addr;
171 170
172 addr = DRM(alloc) (nmemb * size, area); 171 addr = drm_alloc (nmemb * size, area);
173 if (addr != NULL) 172 if (addr != NULL)
174 memset((void *)addr, 0, size * nmemb); 173 memset((void *)addr, 0, size * nmemb);
175 174
176 return addr; 175 return addr;
177} 176}
178 177
179void *DRM(realloc) (void *oldpt, size_t oldsize, size_t size, int area) { 178void *drm_realloc (void *oldpt, size_t oldsize, size_t size, int area) {
180 void *pt; 179 void *pt;
181 180
182 if (!(pt = DRM(alloc) (size, area))) 181 if (!(pt = drm_alloc (size, area)))
183 return NULL; 182 return NULL;
184 if (oldpt && oldsize) { 183 if (oldpt && oldsize) {
185 memcpy(pt, oldpt, oldsize); 184 memcpy(pt, oldpt, oldsize);
186 DRM(free) (oldpt, oldsize, area); 185 drm_free (oldpt, oldsize, area);
187 } 186 }
188 return pt; 187 return pt;
189} 188}
190 189
191void DRM(free) (void *pt, size_t size, int area) { 190void drm_free (void *pt, size_t size, int area) {
192 int alloc_count; 191 int alloc_count;
193 int free_count; 192 int free_count;
194 193
@@ -196,43 +195,43 @@ void DRM(free) (void *pt, size_t size, int area) {
196 DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n"); 195 DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
197 else 196 else
198 kfree(pt); 197 kfree(pt);
199 spin_lock(&DRM(mem_lock)); 198 spin_lock(&drm_mem_lock);
200 DRM(mem_stats)[area].bytes_freed += size; 199 drm_mem_stats[area].bytes_freed += size;
201 free_count = ++DRM(mem_stats)[area].free_count; 200 free_count = ++drm_mem_stats[area].free_count;
202 alloc_count = DRM(mem_stats)[area].succeed_count; 201 alloc_count = drm_mem_stats[area].succeed_count;
203 spin_unlock(&DRM(mem_lock)); 202 spin_unlock(&drm_mem_lock);
204 if (free_count > alloc_count) { 203 if (free_count > alloc_count) {
205 DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n", 204 DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
206 free_count, alloc_count); 205 free_count, alloc_count);
207 } 206 }
208} 207}
209 208
210unsigned long DRM(alloc_pages) (int order, int area) { 209unsigned long drm_alloc_pages (int order, int area) {
211 unsigned long address; 210 unsigned long address;
212 unsigned long bytes = PAGE_SIZE << order; 211 unsigned long bytes = PAGE_SIZE << order;
213 unsigned long addr; 212 unsigned long addr;
214 unsigned int sz; 213 unsigned int sz;
215 214
216 spin_lock(&DRM(mem_lock)); 215 spin_lock(&drm_mem_lock);
217 if ((DRM(ram_used) >> PAGE_SHIFT) 216 if ((drm_ram_used >> PAGE_SHIFT)
218 > (DRM_RAM_PERCENT * DRM(ram_available)) / 100) { 217 > (DRM_RAM_PERCENT * drm_ram_available) / 100) {
219 spin_unlock(&DRM(mem_lock)); 218 spin_unlock(&drm_mem_lock);
220 return 0; 219 return 0;
221 } 220 }
222 spin_unlock(&DRM(mem_lock)); 221 spin_unlock(&drm_mem_lock);
223 222
224 address = __get_free_pages(GFP_KERNEL, order); 223 address = __get_free_pages(GFP_KERNEL, order);
225 if (!address) { 224 if (!address) {
226 spin_lock(&DRM(mem_lock)); 225 spin_lock(&drm_mem_lock);
227 ++DRM(mem_stats)[area].fail_count; 226 ++drm_mem_stats[area].fail_count;
228 spin_unlock(&DRM(mem_lock)); 227 spin_unlock(&drm_mem_lock);
229 return 0; 228 return 0;
230 } 229 }
231 spin_lock(&DRM(mem_lock)); 230 spin_lock(&drm_mem_lock);
232 ++DRM(mem_stats)[area].succeed_count; 231 ++drm_mem_stats[area].succeed_count;
233 DRM(mem_stats)[area].bytes_allocated += bytes; 232 drm_mem_stats[area].bytes_allocated += bytes;
234 DRM(ram_used) += bytes; 233 drm_ram_used += bytes;
235 spin_unlock(&DRM(mem_lock)); 234 spin_unlock(&drm_mem_lock);
236 235
237 /* Zero outside the lock */ 236 /* Zero outside the lock */
238 memset((void *)address, 0, bytes); 237 memset((void *)address, 0, bytes);
@@ -246,7 +245,7 @@ unsigned long DRM(alloc_pages) (int order, int area) {
246 return address; 245 return address;
247} 246}
248 247
249void DRM(free_pages) (unsigned long address, int order, int area) { 248void drm_free_pages (unsigned long address, int order, int area) {
250 unsigned long bytes = PAGE_SIZE << order; 249 unsigned long bytes = PAGE_SIZE << order;
251 int alloc_count; 250 int alloc_count;
252 int free_count; 251 int free_count;
@@ -264,12 +263,12 @@ void DRM(free_pages) (unsigned long address, int order, int area) {
264 free_pages(address, order); 263 free_pages(address, order);
265 } 264 }
266 265
267 spin_lock(&DRM(mem_lock)); 266 spin_lock(&drm_mem_lock);
268 free_count = ++DRM(mem_stats)[area].free_count; 267 free_count = ++drm_mem_stats[area].free_count;
269 alloc_count = DRM(mem_stats)[area].succeed_count; 268 alloc_count = drm_mem_stats[area].succeed_count;
270 DRM(mem_stats)[area].bytes_freed += bytes; 269 drm_mem_stats[area].bytes_freed += bytes;
271 DRM(ram_used) -= bytes; 270 drm_ram_used -= bytes;
272 spin_unlock(&DRM(mem_lock)); 271 spin_unlock(&drm_mem_lock);
273 if (free_count > alloc_count) { 272 if (free_count > alloc_count) {
274 DRM_MEM_ERROR(area, 273 DRM_MEM_ERROR(area,
275 "Excess frees: %d frees, %d allocs\n", 274 "Excess frees: %d frees, %d allocs\n",
@@ -277,7 +276,7 @@ void DRM(free_pages) (unsigned long address, int order, int area) {
277 } 276 }
278} 277}
279 278
280void *DRM(ioremap) (unsigned long offset, unsigned long size, 279void *drm_ioremap (unsigned long offset, unsigned long size,
281 drm_device_t * dev) { 280 drm_device_t * dev) {
282 void *pt; 281 void *pt;
283 282
@@ -288,19 +287,19 @@ void *DRM(ioremap) (unsigned long offset, unsigned long size,
288 } 287 }
289 288
290 if (!(pt = drm_ioremap(offset, size, dev))) { 289 if (!(pt = drm_ioremap(offset, size, dev))) {
291 spin_lock(&DRM(mem_lock)); 290 spin_lock(&drm_mem_lock);
292 ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count; 291 ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
293 spin_unlock(&DRM(mem_lock)); 292 spin_unlock(&drm_mem_lock);
294 return NULL; 293 return NULL;
295 } 294 }
296 spin_lock(&DRM(mem_lock)); 295 spin_lock(&drm_mem_lock);
297 ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count; 296 ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
298 DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size; 297 drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
299 spin_unlock(&DRM(mem_lock)); 298 spin_unlock(&drm_mem_lock);
300 return pt; 299 return pt;
301} 300}
302 301
303void *DRM(ioremap_nocache) (unsigned long offset, unsigned long size, 302void *drm_ioremap_nocache (unsigned long offset, unsigned long size,
304 drm_device_t * dev) { 303 drm_device_t * dev) {
305 void *pt; 304 void *pt;
306 305
@@ -311,19 +310,19 @@ void *DRM(ioremap_nocache) (unsigned long offset, unsigned long size,
311 } 310 }
312 311
313 if (!(pt = drm_ioremap_nocache(offset, size, dev))) { 312 if (!(pt = drm_ioremap_nocache(offset, size, dev))) {
314 spin_lock(&DRM(mem_lock)); 313 spin_lock(&drm_mem_lock);
315 ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count; 314 ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
316 spin_unlock(&DRM(mem_lock)); 315 spin_unlock(&drm_mem_lock);
317 return NULL; 316 return NULL;
318 } 317 }
319 spin_lock(&DRM(mem_lock)); 318 spin_lock(&drm_mem_lock);
320 ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count; 319 ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
321 DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size; 320 drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
322 spin_unlock(&DRM(mem_lock)); 321 spin_unlock(&drm_mem_lock);
323 return pt; 322 return pt;
324} 323}
325 324
326void DRM(ioremapfree) (void *pt, unsigned long size, drm_device_t * dev) { 325void drm_ioremapfree (void *pt, unsigned long size, drm_device_t * dev) {
327 int alloc_count; 326 int alloc_count;
328 int free_count; 327 int free_count;
329 328
@@ -333,11 +332,11 @@ void DRM(ioremapfree) (void *pt, unsigned long size, drm_device_t * dev) {
333 else 332 else
334 drm_ioremapfree(pt, size, dev); 333 drm_ioremapfree(pt, size, dev);
335 334
336 spin_lock(&DRM(mem_lock)); 335 spin_lock(&drm_mem_lock);
337 DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_freed += size; 336 drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size;
338 free_count = ++DRM(mem_stats)[DRM_MEM_MAPPINGS].free_count; 337 free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count;
339 alloc_count = DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count; 338 alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
340 spin_unlock(&DRM(mem_lock)); 339 spin_unlock(&drm_mem_lock);
341 if (free_count > alloc_count) { 340 if (free_count > alloc_count) {
342 DRM_MEM_ERROR(DRM_MEM_MAPPINGS, 341 DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
343 "Excess frees: %d frees, %d allocs\n", 342 "Excess frees: %d frees, %d allocs\n",
@@ -347,7 +346,7 @@ void DRM(ioremapfree) (void *pt, unsigned long size, drm_device_t * dev) {
347 346
348#if __OS_HAS_AGP 347#if __OS_HAS_AGP
349 348
350DRM_AGP_MEM *DRM(alloc_agp) (int pages, u32 type) { 349DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) {
351 DRM_AGP_MEM *handle; 350 DRM_AGP_MEM *handle;
352 351
353 if (!pages) { 352 if (!pages) {
@@ -355,21 +354,21 @@ DRM_AGP_MEM *DRM(alloc_agp) (int pages, u32 type) {
355 return NULL; 354 return NULL;
356 } 355 }
357 356
358 if ((handle = DRM(agp_allocate_memory) (pages, type))) { 357 if ((handle = drm_agp_allocate_memory (pages, type))) {
359 spin_lock(&DRM(mem_lock)); 358 spin_lock(&drm_mem_lock);
360 ++DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count; 359 ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
361 DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_allocated 360 drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated
362 += pages << PAGE_SHIFT; 361 += pages << PAGE_SHIFT;
363 spin_unlock(&DRM(mem_lock)); 362 spin_unlock(&drm_mem_lock);
364 return handle; 363 return handle;
365 } 364 }
366 spin_lock(&DRM(mem_lock)); 365 spin_lock(&drm_mem_lock);
367 ++DRM(mem_stats)[DRM_MEM_TOTALAGP].fail_count; 366 ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count;
368 spin_unlock(&DRM(mem_lock)); 367 spin_unlock(&drm_mem_lock);
369 return NULL; 368 return NULL;
370} 369}
371 370
372int DRM(free_agp) (DRM_AGP_MEM * handle, int pages) { 371int drm_free_agp (DRM_AGP_MEM * handle, int pages) {
373 int alloc_count; 372 int alloc_count;
374 int free_count; 373 int free_count;
375 int retval = -EINVAL; 374 int retval = -EINVAL;
@@ -380,13 +379,13 @@ int DRM(free_agp) (DRM_AGP_MEM * handle, int pages) {
380 return retval; 379 return retval;
381 } 380 }
382 381
383 if (DRM(agp_free_memory) (handle)) { 382 if (drm_agp_free_memory (handle)) {
384 spin_lock(&DRM(mem_lock)); 383 spin_lock(&drm_mem_lock);
385 free_count = ++DRM(mem_stats)[DRM_MEM_TOTALAGP].free_count; 384 free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count;
386 alloc_count = DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count; 385 alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
387 DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_freed 386 drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed
388 += pages << PAGE_SHIFT; 387 += pages << PAGE_SHIFT;
389 spin_unlock(&DRM(mem_lock)); 388 spin_unlock(&drm_mem_lock);
390 if (free_count > alloc_count) { 389 if (free_count > alloc_count) {
391 DRM_MEM_ERROR(DRM_MEM_TOTALAGP, 390 DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
392 "Excess frees: %d frees, %d allocs\n", 391 "Excess frees: %d frees, %d allocs\n",
@@ -397,7 +396,7 @@ int DRM(free_agp) (DRM_AGP_MEM * handle, int pages) {
397 return retval; 396 return retval;
398} 397}
399 398
400int DRM(bind_agp) (DRM_AGP_MEM * handle, unsigned int start) { 399int drm_bind_agp (DRM_AGP_MEM * handle, unsigned int start) {
401 int retcode = -EINVAL; 400 int retcode = -EINVAL;
402 401
403 if (!handle) { 402 if (!handle) {
@@ -406,21 +405,21 @@ int DRM(bind_agp) (DRM_AGP_MEM * handle, unsigned int start) {
406 return retcode; 405 return retcode;
407 } 406 }
408 407
409 if (!(retcode = DRM(agp_bind_memory) (handle, start))) { 408 if (!(retcode = drm_agp_bind_memory (handle, start))) {
410 spin_lock(&DRM(mem_lock)); 409 spin_lock(&drm_mem_lock);
411 ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count; 410 ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
412 DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_allocated 411 drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated
413 += handle->page_count << PAGE_SHIFT; 412 += handle->page_count << PAGE_SHIFT;
414 spin_unlock(&DRM(mem_lock)); 413 spin_unlock(&drm_mem_lock);
415 return retcode; 414 return retcode;
416 } 415 }
417 spin_lock(&DRM(mem_lock)); 416 spin_lock(&drm_mem_lock);
418 ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].fail_count; 417 ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count;
419 spin_unlock(&DRM(mem_lock)); 418 spin_unlock(&drm_mem_lock);
420 return retcode; 419 return retcode;
421} 420}
422 421
423int DRM(unbind_agp) (DRM_AGP_MEM * handle) { 422int drm_unbind_agp (DRM_AGP_MEM * handle) {
424 int alloc_count; 423 int alloc_count;
425 int free_count; 424 int free_count;
426 int retcode = -EINVAL; 425 int retcode = -EINVAL;
@@ -431,14 +430,14 @@ int DRM(unbind_agp) (DRM_AGP_MEM * handle) {
431 return retcode; 430 return retcode;
432 } 431 }
433 432
434 if ((retcode = DRM(agp_unbind_memory) (handle))) 433 if ((retcode = drm_agp_unbind_memory (handle)))
435 return retcode; 434 return retcode;
436 spin_lock(&DRM(mem_lock)); 435 spin_lock(&drm_mem_lock);
437 free_count = ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].free_count; 436 free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count;
438 alloc_count = DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count; 437 alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
439 DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_freed 438 drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed
440 += handle->page_count << PAGE_SHIFT; 439 += handle->page_count << PAGE_SHIFT;
441 spin_unlock(&DRM(mem_lock)); 440 spin_unlock(&drm_mem_lock);
442 if (free_count > alloc_count) { 441 if (free_count > alloc_count) {
443 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, 442 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
444 "Excess frees: %d frees, %d allocs\n", 443 "Excess frees: %d frees, %d allocs\n",