aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/drm/drm_bufs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/drm/drm_bufs.c')
-rw-r--r--drivers/char/drm/drm_bufs.c891
1 files changed, 445 insertions, 446 deletions
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c
index f28e70ae6606..319bdea8de8a 100644
--- a/drivers/char/drm/drm_bufs.c
+++ b/drivers/char/drm/drm_bufs.c
@@ -1,7 +1,7 @@
1/** 1/**
2 * \file drm_bufs.h 2 * \file drm_bufs.c
3 * Generic buffer template 3 * Generic buffer template
4 * 4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com> 5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com> 6 * \author Gareth Hughes <gareth@valinux.com>
7 */ 7 */
@@ -36,20 +36,22 @@
36#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
37#include "drmP.h" 37#include "drmP.h"
38 38
39unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource) 39unsigned long drm_get_resource_start(drm_device_t * dev, unsigned int resource)
40{ 40{
41 return pci_resource_start(dev->pdev, resource); 41 return pci_resource_start(dev->pdev, resource);
42} 42}
43
43EXPORT_SYMBOL(drm_get_resource_start); 44EXPORT_SYMBOL(drm_get_resource_start);
44 45
45unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource) 46unsigned long drm_get_resource_len(drm_device_t * dev, unsigned int resource)
46{ 47{
47 return pci_resource_len(dev->pdev, resource); 48 return pci_resource_len(dev->pdev, resource);
48} 49}
50
49EXPORT_SYMBOL(drm_get_resource_len); 51EXPORT_SYMBOL(drm_get_resource_len);
50 52
51static drm_map_list_t *drm_find_matching_map(drm_device_t *dev, 53static drm_map_list_t *drm_find_matching_map(drm_device_t * dev,
52 drm_local_map_t *map) 54 drm_local_map_t * map)
53{ 55{
54 struct list_head *list; 56 struct list_head *list;
55 57
@@ -71,7 +73,8 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
71#define END_RANGE 0x40000000 73#define END_RANGE 0x40000000
72 74
73#ifdef _LP64 75#ifdef _LP64
74static __inline__ unsigned int HandleID(unsigned long lhandle, drm_device_t *dev) 76static __inline__ unsigned int HandleID(unsigned long lhandle,
77 drm_device_t * dev)
75{ 78{
76 static unsigned int map32_handle = START_RANGE; 79 static unsigned int map32_handle = START_RANGE;
77 unsigned int hash; 80 unsigned int hash;
@@ -81,12 +84,12 @@ static __inline__ unsigned int HandleID(unsigned long lhandle, drm_device_t *dev
81 map32_handle += PAGE_SIZE; 84 map32_handle += PAGE_SIZE;
82 if (map32_handle > END_RANGE) 85 if (map32_handle > END_RANGE)
83 map32_handle = START_RANGE; 86 map32_handle = START_RANGE;
84 } else 87 } else
85 hash = lhandle; 88 hash = lhandle;
86 89
87 while (1) { 90 while (1) {
88 drm_map_list_t *_entry; 91 drm_map_list_t *_entry;
89 list_for_each_entry(_entry, &dev->maplist->head,head) { 92 list_for_each_entry(_entry, &dev->maplist->head, head) {
90 if (_entry->user_token == hash) 93 if (_entry->user_token == hash)
91 break; 94 break;
92 } 95 }
@@ -114,16 +117,16 @@ static __inline__ unsigned int HandleID(unsigned long lhandle, drm_device_t *dev
114 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where 117 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
115 * applicable and if supported by the kernel. 118 * applicable and if supported by the kernel.
116 */ 119 */
117int drm_addmap_core(drm_device_t * dev, unsigned int offset, 120static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
118 unsigned int size, drm_map_type_t type, 121 unsigned int size, drm_map_type_t type,
119 drm_map_flags_t flags, drm_map_list_t **maplist) 122 drm_map_flags_t flags, drm_map_list_t ** maplist)
120{ 123{
121 drm_map_t *map; 124 drm_map_t *map;
122 drm_map_list_t *list; 125 drm_map_list_t *list;
123 drm_dma_handle_t *dmah; 126 drm_dma_handle_t *dmah;
124 127
125 map = drm_alloc( sizeof(*map), DRM_MEM_MAPS ); 128 map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
126 if ( !map ) 129 if (!map)
127 return -ENOMEM; 130 return -ENOMEM;
128 131
129 map->offset = offset; 132 map->offset = offset;
@@ -135,26 +138,26 @@ int drm_addmap_core(drm_device_t * dev, unsigned int offset,
135 * book keeping information about shared memory to allow for removal 138 * book keeping information about shared memory to allow for removal
136 * when processes fork. 139 * when processes fork.
137 */ 140 */
138 if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) { 141 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
139 drm_free( map, sizeof(*map), DRM_MEM_MAPS ); 142 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
140 return -EINVAL; 143 return -EINVAL;
141 } 144 }
142 DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n", 145 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
143 map->offset, map->size, map->type ); 146 map->offset, map->size, map->type);
144 if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) { 147 if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
145 drm_free( map, sizeof(*map), DRM_MEM_MAPS ); 148 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
146 return -EINVAL; 149 return -EINVAL;
147 } 150 }
148 map->mtrr = -1; 151 map->mtrr = -1;
149 map->handle = NULL; 152 map->handle = NULL;
150 153
151 switch ( map->type ) { 154 switch (map->type) {
152 case _DRM_REGISTERS: 155 case _DRM_REGISTERS:
153 case _DRM_FRAME_BUFFER: 156 case _DRM_FRAME_BUFFER:
154#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) 157#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
155 if ( map->offset + map->size < map->offset || 158 if (map->offset + map->size < map->offset ||
156 map->offset < virt_to_phys(high_memory) ) { 159 map->offset < virt_to_phys(high_memory)) {
157 drm_free( map, sizeof(*map), DRM_MEM_MAPS ); 160 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
158 return -EINVAL; 161 return -EINVAL;
159 } 162 }
160#endif 163#endif
@@ -169,8 +172,9 @@ int drm_addmap_core(drm_device_t * dev, unsigned int offset,
169 if (list != NULL) { 172 if (list != NULL) {
170 if (list->map->size != map->size) { 173 if (list->map->size != map->size) {
171 DRM_DEBUG("Matching maps of type %d with " 174 DRM_DEBUG("Matching maps of type %d with "
172 "mismatched sizes, (%ld vs %ld)\n", 175 "mismatched sizes, (%ld vs %ld)\n",
173 map->type, map->size, list->map->size); 176 map->type, map->size,
177 list->map->size);
174 list->map->size = map->size; 178 list->map->size = map->size;
175 } 179 }
176 180
@@ -180,35 +184,33 @@ int drm_addmap_core(drm_device_t * dev, unsigned int offset,
180 } 184 }
181 185
182 if (drm_core_has_MTRR(dev)) { 186 if (drm_core_has_MTRR(dev)) {
183 if ( map->type == _DRM_FRAME_BUFFER || 187 if (map->type == _DRM_FRAME_BUFFER ||
184 (map->flags & _DRM_WRITE_COMBINING) ) { 188 (map->flags & _DRM_WRITE_COMBINING)) {
185 map->mtrr = mtrr_add( map->offset, map->size, 189 map->mtrr = mtrr_add(map->offset, map->size,
186 MTRR_TYPE_WRCOMB, 1 ); 190 MTRR_TYPE_WRCOMB, 1);
187 } 191 }
188 } 192 }
189 if (map->type == _DRM_REGISTERS) 193 if (map->type == _DRM_REGISTERS)
190 map->handle = drm_ioremap( map->offset, map->size, 194 map->handle = drm_ioremap(map->offset, map->size, dev);
191 dev );
192 break; 195 break;
193 196
194 case _DRM_SHM: 197 case _DRM_SHM:
195 map->handle = vmalloc_32(map->size); 198 map->handle = vmalloc_32(map->size);
196 DRM_DEBUG( "%lu %d %p\n", 199 DRM_DEBUG("%lu %d %p\n",
197 map->size, drm_order( map->size ), map->handle ); 200 map->size, drm_order(map->size), map->handle);
198 if ( !map->handle ) { 201 if (!map->handle) {
199 drm_free( map, sizeof(*map), DRM_MEM_MAPS ); 202 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
200 return -ENOMEM; 203 return -ENOMEM;
201 } 204 }
202 map->offset = (unsigned long)map->handle; 205 map->offset = (unsigned long)map->handle;
203 if ( map->flags & _DRM_CONTAINS_LOCK ) { 206 if (map->flags & _DRM_CONTAINS_LOCK) {
204 /* Prevent a 2nd X Server from creating a 2nd lock */ 207 /* Prevent a 2nd X Server from creating a 2nd lock */
205 if (dev->lock.hw_lock != NULL) { 208 if (dev->lock.hw_lock != NULL) {
206 vfree( map->handle ); 209 vfree(map->handle);
207 drm_free( map, sizeof(*map), DRM_MEM_MAPS ); 210 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
208 return -EBUSY; 211 return -EBUSY;
209 } 212 }
210 dev->sigdata.lock = 213 dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */
211 dev->lock.hw_lock = map->handle; /* Pointer to lock */
212 } 214 }
213 break; 215 break;
214 case _DRM_AGP: 216 case _DRM_AGP:
@@ -217,7 +219,7 @@ int drm_addmap_core(drm_device_t * dev, unsigned int offset,
217 map->offset += dev->hose->mem_space->start; 219 map->offset += dev->hose->mem_space->start;
218#endif 220#endif
219 map->offset += dev->agp->base; 221 map->offset += dev->agp->base;
220 map->mtrr = dev->agp->agp_mtrr; /* for getmap */ 222 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
221 } 223 }
222 break; 224 break;
223 case _DRM_SCATTER_GATHER: 225 case _DRM_SCATTER_GATHER:
@@ -227,7 +229,7 @@ int drm_addmap_core(drm_device_t * dev, unsigned int offset,
227 } 229 }
228 map->offset += (unsigned long)dev->sg->virtual; 230 map->offset += (unsigned long)dev->sg->virtual;
229 break; 231 break;
230 case _DRM_CONSISTENT: 232 case _DRM_CONSISTENT:
231 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, 233 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
232 * As we're limiting the address to 2^32-1 (or less), 234 * As we're limiting the address to 2^32-1 (or less),
233 * casting it down to 32 bits is no problem, but we 235 * casting it down to 32 bits is no problem, but we
@@ -242,12 +244,12 @@ int drm_addmap_core(drm_device_t * dev, unsigned int offset,
242 kfree(dmah); 244 kfree(dmah);
243 break; 245 break;
244 default: 246 default:
245 drm_free( map, sizeof(*map), DRM_MEM_MAPS ); 247 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
246 return -EINVAL; 248 return -EINVAL;
247 } 249 }
248 250
249 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS); 251 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
250 if(!list) { 252 if (!list) {
251 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 253 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
252 return -EINVAL; 254 return -EINVAL;
253 } 255 }
@@ -258,18 +260,18 @@ int drm_addmap_core(drm_device_t * dev, unsigned int offset,
258 list_add(&list->head, &dev->maplist->head); 260 list_add(&list->head, &dev->maplist->head);
259 /* Assign a 32-bit handle */ 261 /* Assign a 32-bit handle */
260 /* We do it here so that dev->struct_sem protects the increment */ 262 /* We do it here so that dev->struct_sem protects the increment */
261 list->user_token = HandleID(map->type==_DRM_SHM 263 list->user_token = HandleID(map->type == _DRM_SHM
262 ? (unsigned long)map->handle 264 ? (unsigned long)map->handle
263 : map->offset, dev); 265 : map->offset, dev);
264 up(&dev->struct_sem); 266 up(&dev->struct_sem);
265 267
266 *maplist = list; 268 *maplist = list;
267 return 0; 269 return 0;
268} 270}
269 271
270int drm_addmap(drm_device_t *dev, unsigned int offset, 272int drm_addmap(drm_device_t * dev, unsigned int offset,
271 unsigned int size, drm_map_type_t type, 273 unsigned int size, drm_map_type_t type,
272 drm_map_flags_t flags, drm_local_map_t **map_ptr) 274 drm_map_flags_t flags, drm_local_map_t ** map_ptr)
273{ 275{
274 drm_map_list_t *list; 276 drm_map_list_t *list;
275 int rc; 277 int rc;
@@ -279,6 +281,7 @@ int drm_addmap(drm_device_t *dev, unsigned int offset,
279 *map_ptr = list->map; 281 *map_ptr = list->map;
280 return rc; 282 return rc;
281} 283}
284
282EXPORT_SYMBOL(drm_addmap); 285EXPORT_SYMBOL(drm_addmap);
283 286
284int drm_addmap_ioctl(struct inode *inode, struct file *filp, 287int drm_addmap_ioctl(struct inode *inode, struct file *filp,
@@ -294,24 +297,25 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
294 if (!(filp->f_mode & 3)) 297 if (!(filp->f_mode & 3))
295 return -EACCES; /* Require read/write */ 298 return -EACCES; /* Require read/write */
296 299
297 if (copy_from_user(& map, argp, sizeof(map))) { 300 if (copy_from_user(&map, argp, sizeof(map))) {
298 return -EFAULT; 301 return -EFAULT;
299 } 302 }
300 303
301 err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags, 304 err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags,
302 &maplist); 305 &maplist);
303 306
304 if (err) 307 if (err)
305 return err; 308 return err;
306 309
307 if (copy_to_user(argp, maplist->map, sizeof(drm_map_t))) 310 if (copy_to_user(argp, maplist->map, sizeof(drm_map_t)))
308 return -EFAULT; 311 return -EFAULT;
309 if (put_user(maplist->user_token, &argp->handle)) 312
313 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
314 if (put_user((void *)(unsigned long)maplist->user_token, &argp->handle))
310 return -EFAULT; 315 return -EFAULT;
311 return 0; 316 return 0;
312} 317}
313 318
314
315/** 319/**
316 * Remove a map private from list and deallocate resources if the mapping 320 * Remove a map private from list and deallocate resources if the mapping
317 * isn't in use. 321 * isn't in use.
@@ -328,7 +332,7 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
328 * 332 *
329 * \sa drm_addmap 333 * \sa drm_addmap
330 */ 334 */
331int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map) 335int drm_rmmap_locked(drm_device_t * dev, drm_local_map_t * map)
332{ 336{
333 struct list_head *list; 337 struct list_head *list;
334 drm_map_list_t *r_list = NULL; 338 drm_map_list_t *r_list = NULL;
@@ -359,9 +363,8 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
359 case _DRM_FRAME_BUFFER: 363 case _DRM_FRAME_BUFFER:
360 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { 364 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
361 int retcode; 365 int retcode;
362 retcode = mtrr_del(map->mtrr, map->offset, 366 retcode = mtrr_del(map->mtrr, map->offset, map->size);
363 map->size); 367 DRM_DEBUG("mtrr_del=%d\n", retcode);
364 DRM_DEBUG ("mtrr_del=%d\n", retcode);
365 } 368 }
366 break; 369 break;
367 case _DRM_SHM: 370 case _DRM_SHM:
@@ -381,9 +384,10 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
381 384
382 return 0; 385 return 0;
383} 386}
387
384EXPORT_SYMBOL(drm_rmmap_locked); 388EXPORT_SYMBOL(drm_rmmap_locked);
385 389
386int drm_rmmap(drm_device_t *dev, drm_local_map_t *map) 390int drm_rmmap(drm_device_t * dev, drm_local_map_t * map)
387{ 391{
388 int ret; 392 int ret;
389 393
@@ -393,6 +397,7 @@ int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
393 397
394 return ret; 398 return ret;
395} 399}
400
396EXPORT_SYMBOL(drm_rmmap); 401EXPORT_SYMBOL(drm_rmmap);
397 402
398/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on 403/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
@@ -414,7 +419,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
414 struct list_head *list; 419 struct list_head *list;
415 int ret; 420 int ret;
416 421
417 if (copy_from_user(&request, (drm_map_t __user *)arg, sizeof(request))) { 422 if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) {
418 return -EFAULT; 423 return -EFAULT;
419 } 424 }
420 425
@@ -423,7 +428,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
423 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head); 428 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
424 429
425 if (r_list->map && 430 if (r_list->map &&
426 r_list->user_token == (unsigned long) request.handle && 431 r_list->user_token == (unsigned long)request.handle &&
427 r_list->map->flags & _DRM_REMOVABLE) { 432 r_list->map->flags & _DRM_REMOVABLE) {
428 map = r_list->map; 433 map = r_list->map;
429 break; 434 break;
@@ -462,7 +467,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
462 * 467 *
463 * Frees any pages and buffers associated with the given entry. 468 * Frees any pages and buffers associated with the given entry.
464 */ 469 */
465static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry) 470static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry)
466{ 471{
467 int i; 472 int i;
468 473
@@ -470,30 +475,27 @@ static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
470 for (i = 0; i < entry->seg_count; i++) { 475 for (i = 0; i < entry->seg_count; i++) {
471 if (entry->seglist[i]) { 476 if (entry->seglist[i]) {
472 drm_free_pages(entry->seglist[i], 477 drm_free_pages(entry->seglist[i],
473 entry->page_order, 478 entry->page_order, DRM_MEM_DMA);
474 DRM_MEM_DMA);
475 } 479 }
476 } 480 }
477 drm_free(entry->seglist, 481 drm_free(entry->seglist,
478 entry->seg_count * 482 entry->seg_count *
479 sizeof(*entry->seglist), 483 sizeof(*entry->seglist), DRM_MEM_SEGS);
480 DRM_MEM_SEGS);
481 484
482 entry->seg_count = 0; 485 entry->seg_count = 0;
483 } 486 }
484 487
485 if (entry->buf_count) { 488 if (entry->buf_count) {
486 for (i = 0; i < entry->buf_count; i++) { 489 for (i = 0; i < entry->buf_count; i++) {
487 if (entry->buflist[i].dev_private) { 490 if (entry->buflist[i].dev_private) {
488 drm_free(entry->buflist[i].dev_private, 491 drm_free(entry->buflist[i].dev_private,
489 entry->buflist[i].dev_priv_size, 492 entry->buflist[i].dev_priv_size,
490 DRM_MEM_BUFS); 493 DRM_MEM_BUFS);
491 } 494 }
492 } 495 }
493 drm_free(entry->buflist, 496 drm_free(entry->buflist,
494 entry->buf_count * 497 entry->buf_count *
495 sizeof(*entry->buflist), 498 sizeof(*entry->buflist), DRM_MEM_BUFS);
496 DRM_MEM_BUFS);
497 499
498 entry->buf_count = 0; 500 entry->buf_count = 0;
499 } 501 }
@@ -506,12 +508,12 @@ static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
506 * \param dev drm_device_t to which the buffers are to be added. 508 * \param dev drm_device_t to which the buffers are to be added.
507 * \param request pointer to a drm_buf_desc_t describing the request. 509 * \param request pointer to a drm_buf_desc_t describing the request.
508 * \return zero on success or a negative number on failure. 510 * \return zero on success or a negative number on failure.
509 * 511 *
510 * After some sanity checks creates a drm_buf structure for each buffer and 512 * After some sanity checks creates a drm_buf structure for each buffer and
511 * reallocates the buffer list of the same size order to accommodate the new 513 * reallocates the buffer list of the same size order to accommodate the new
512 * buffers. 514 * buffers.
513 */ 515 */
514int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request) 516int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
515{ 517{
516 drm_device_dma_t *dma = dev->dma; 518 drm_device_dma_t *dma = dev->dma;
517 drm_buf_entry_t *entry; 519 drm_buf_entry_t *entry;
@@ -528,144 +530,145 @@ int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
528 int i; 530 int i;
529 drm_buf_t **temp_buflist; 531 drm_buf_t **temp_buflist;
530 532
531 if ( !dma ) return -EINVAL; 533 if (!dma)
534 return -EINVAL;
532 535
533 count = request->count; 536 count = request->count;
534 order = drm_order(request->size); 537 order = drm_order(request->size);
535 size = 1 << order; 538 size = 1 << order;
536 539
537 alignment = (request->flags & _DRM_PAGE_ALIGN) 540 alignment = (request->flags & _DRM_PAGE_ALIGN)
538 ? PAGE_ALIGN(size) : size; 541 ? PAGE_ALIGN(size) : size;
539 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 542 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
540 total = PAGE_SIZE << page_order; 543 total = PAGE_SIZE << page_order;
541 544
542 byte_count = 0; 545 byte_count = 0;
543 agp_offset = dev->agp->base + request->agp_start; 546 agp_offset = dev->agp->base + request->agp_start;
544 547
545 DRM_DEBUG( "count: %d\n", count ); 548 DRM_DEBUG("count: %d\n", count);
546 DRM_DEBUG( "order: %d\n", order ); 549 DRM_DEBUG("order: %d\n", order);
547 DRM_DEBUG( "size: %d\n", size ); 550 DRM_DEBUG("size: %d\n", size);
548 DRM_DEBUG( "agp_offset: %lu\n", agp_offset ); 551 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
549 DRM_DEBUG( "alignment: %d\n", alignment ); 552 DRM_DEBUG("alignment: %d\n", alignment);
550 DRM_DEBUG( "page_order: %d\n", page_order ); 553 DRM_DEBUG("page_order: %d\n", page_order);
551 DRM_DEBUG( "total: %d\n", total ); 554 DRM_DEBUG("total: %d\n", total);
552 555
553 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; 556 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
554 if ( dev->queue_count ) return -EBUSY; /* Not while in use */ 557 return -EINVAL;
558 if (dev->queue_count)
559 return -EBUSY; /* Not while in use */
555 560
556 spin_lock( &dev->count_lock ); 561 spin_lock(&dev->count_lock);
557 if ( dev->buf_use ) { 562 if (dev->buf_use) {
558 spin_unlock( &dev->count_lock ); 563 spin_unlock(&dev->count_lock);
559 return -EBUSY; 564 return -EBUSY;
560 } 565 }
561 atomic_inc( &dev->buf_alloc ); 566 atomic_inc(&dev->buf_alloc);
562 spin_unlock( &dev->count_lock ); 567 spin_unlock(&dev->count_lock);
563 568
564 down( &dev->struct_sem ); 569 down(&dev->struct_sem);
565 entry = &dma->bufs[order]; 570 entry = &dma->bufs[order];
566 if ( entry->buf_count ) { 571 if (entry->buf_count) {
567 up( &dev->struct_sem ); 572 up(&dev->struct_sem);
568 atomic_dec( &dev->buf_alloc ); 573 atomic_dec(&dev->buf_alloc);
569 return -ENOMEM; /* May only call once for each order */ 574 return -ENOMEM; /* May only call once for each order */
570 } 575 }
571 576
572 if (count < 0 || count > 4096) { 577 if (count < 0 || count > 4096) {
573 up( &dev->struct_sem ); 578 up(&dev->struct_sem);
574 atomic_dec( &dev->buf_alloc ); 579 atomic_dec(&dev->buf_alloc);
575 return -EINVAL; 580 return -EINVAL;
576 } 581 }
577 582
578 entry->buflist = drm_alloc( count * sizeof(*entry->buflist), 583 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
579 DRM_MEM_BUFS ); 584 DRM_MEM_BUFS);
580 if ( !entry->buflist ) { 585 if (!entry->buflist) {
581 up( &dev->struct_sem ); 586 up(&dev->struct_sem);
582 atomic_dec( &dev->buf_alloc ); 587 atomic_dec(&dev->buf_alloc);
583 return -ENOMEM; 588 return -ENOMEM;
584 } 589 }
585 memset( entry->buflist, 0, count * sizeof(*entry->buflist) ); 590 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
586 591
587 entry->buf_size = size; 592 entry->buf_size = size;
588 entry->page_order = page_order; 593 entry->page_order = page_order;
589 594
590 offset = 0; 595 offset = 0;
591 596
592 while ( entry->buf_count < count ) { 597 while (entry->buf_count < count) {
593 buf = &entry->buflist[entry->buf_count]; 598 buf = &entry->buflist[entry->buf_count];
594 buf->idx = dma->buf_count + entry->buf_count; 599 buf->idx = dma->buf_count + entry->buf_count;
595 buf->total = alignment; 600 buf->total = alignment;
596 buf->order = order; 601 buf->order = order;
597 buf->used = 0; 602 buf->used = 0;
598 603
599 buf->offset = (dma->byte_count + offset); 604 buf->offset = (dma->byte_count + offset);
600 buf->bus_address = agp_offset + offset; 605 buf->bus_address = agp_offset + offset;
601 buf->address = (void *)(agp_offset + offset); 606 buf->address = (void *)(agp_offset + offset);
602 buf->next = NULL; 607 buf->next = NULL;
603 buf->waiting = 0; 608 buf->waiting = 0;
604 buf->pending = 0; 609 buf->pending = 0;
605 init_waitqueue_head( &buf->dma_wait ); 610 init_waitqueue_head(&buf->dma_wait);
606 buf->filp = NULL; 611 buf->filp = NULL;
607 612
608 buf->dev_priv_size = dev->driver->dev_priv_size; 613 buf->dev_priv_size = dev->driver->dev_priv_size;
609 buf->dev_private = drm_alloc( buf->dev_priv_size, 614 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
610 DRM_MEM_BUFS ); 615 if (!buf->dev_private) {
611 if(!buf->dev_private) {
612 /* Set count correctly so we free the proper amount. */ 616 /* Set count correctly so we free the proper amount. */
613 entry->buf_count = count; 617 entry->buf_count = count;
614 drm_cleanup_buf_error(dev,entry); 618 drm_cleanup_buf_error(dev, entry);
615 up( &dev->struct_sem ); 619 up(&dev->struct_sem);
616 atomic_dec( &dev->buf_alloc ); 620 atomic_dec(&dev->buf_alloc);
617 return -ENOMEM; 621 return -ENOMEM;
618 } 622 }
619 memset( buf->dev_private, 0, buf->dev_priv_size ); 623 memset(buf->dev_private, 0, buf->dev_priv_size);
620 624
621 DRM_DEBUG( "buffer %d @ %p\n", 625 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
622 entry->buf_count, buf->address );
623 626
624 offset += alignment; 627 offset += alignment;
625 entry->buf_count++; 628 entry->buf_count++;
626 byte_count += PAGE_SIZE << page_order; 629 byte_count += PAGE_SIZE << page_order;
627 } 630 }
628 631
629 DRM_DEBUG( "byte_count: %d\n", byte_count ); 632 DRM_DEBUG("byte_count: %d\n", byte_count);
630 633
631 temp_buflist = drm_realloc( dma->buflist, 634 temp_buflist = drm_realloc(dma->buflist,
632 dma->buf_count * sizeof(*dma->buflist), 635 dma->buf_count * sizeof(*dma->buflist),
633 (dma->buf_count + entry->buf_count) 636 (dma->buf_count + entry->buf_count)
634 * sizeof(*dma->buflist), 637 * sizeof(*dma->buflist), DRM_MEM_BUFS);
635 DRM_MEM_BUFS ); 638 if (!temp_buflist) {
636 if(!temp_buflist) {
637 /* Free the entry because it isn't valid */ 639 /* Free the entry because it isn't valid */
638 drm_cleanup_buf_error(dev,entry); 640 drm_cleanup_buf_error(dev, entry);
639 up( &dev->struct_sem ); 641 up(&dev->struct_sem);
640 atomic_dec( &dev->buf_alloc ); 642 atomic_dec(&dev->buf_alloc);
641 return -ENOMEM; 643 return -ENOMEM;
642 } 644 }
643 dma->buflist = temp_buflist; 645 dma->buflist = temp_buflist;
644 646
645 for ( i = 0 ; i < entry->buf_count ; i++ ) { 647 for (i = 0; i < entry->buf_count; i++) {
646 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 648 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
647 } 649 }
648 650
649 dma->buf_count += entry->buf_count; 651 dma->buf_count += entry->buf_count;
650 dma->byte_count += byte_count; 652 dma->byte_count += byte_count;
651 653
652 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count ); 654 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
653 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count ); 655 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
654 656
655 up( &dev->struct_sem ); 657 up(&dev->struct_sem);
656 658
657 request->count = entry->buf_count; 659 request->count = entry->buf_count;
658 request->size = size; 660 request->size = size;
659 661
660 dma->flags = _DRM_DMA_USE_AGP; 662 dma->flags = _DRM_DMA_USE_AGP;
661 663
662 atomic_dec( &dev->buf_alloc ); 664 atomic_dec(&dev->buf_alloc);
663 return 0; 665 return 0;
664} 666}
667
665EXPORT_SYMBOL(drm_addbufs_agp); 668EXPORT_SYMBOL(drm_addbufs_agp);
666#endif /* __OS_HAS_AGP */ 669#endif /* __OS_HAS_AGP */
667 670
668int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request) 671int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
669{ 672{
670 drm_device_dma_t *dma = dev->dma; 673 drm_device_dma_t *dma = dev->dma;
671 int count; 674 int count;
@@ -684,178 +687,174 @@ int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
684 unsigned long *temp_pagelist; 687 unsigned long *temp_pagelist;
685 drm_buf_t **temp_buflist; 688 drm_buf_t **temp_buflist;
686 689
687 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL; 690 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
688 if ( !dma ) return -EINVAL; 691 return -EINVAL;
692 if (!dma)
693 return -EINVAL;
689 694
690 count = request->count; 695 count = request->count;
691 order = drm_order(request->size); 696 order = drm_order(request->size);
692 size = 1 << order; 697 size = 1 << order;
693 698
694 DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n", 699 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
695 request->count, request->size, size, 700 request->count, request->size, size, order, dev->queue_count);
696 order, dev->queue_count );
697 701
698 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; 702 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
699 if ( dev->queue_count ) return -EBUSY; /* Not while in use */ 703 return -EINVAL;
704 if (dev->queue_count)
705 return -EBUSY; /* Not while in use */
700 706
701 alignment = (request->flags & _DRM_PAGE_ALIGN) 707 alignment = (request->flags & _DRM_PAGE_ALIGN)
702 ? PAGE_ALIGN(size) : size; 708 ? PAGE_ALIGN(size) : size;
703 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 709 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
704 total = PAGE_SIZE << page_order; 710 total = PAGE_SIZE << page_order;
705 711
706 spin_lock( &dev->count_lock ); 712 spin_lock(&dev->count_lock);
707 if ( dev->buf_use ) { 713 if (dev->buf_use) {
708 spin_unlock( &dev->count_lock ); 714 spin_unlock(&dev->count_lock);
709 return -EBUSY; 715 return -EBUSY;
710 } 716 }
711 atomic_inc( &dev->buf_alloc ); 717 atomic_inc(&dev->buf_alloc);
712 spin_unlock( &dev->count_lock ); 718 spin_unlock(&dev->count_lock);
713 719
714 down( &dev->struct_sem ); 720 down(&dev->struct_sem);
715 entry = &dma->bufs[order]; 721 entry = &dma->bufs[order];
716 if ( entry->buf_count ) { 722 if (entry->buf_count) {
717 up( &dev->struct_sem ); 723 up(&dev->struct_sem);
718 atomic_dec( &dev->buf_alloc ); 724 atomic_dec(&dev->buf_alloc);
719 return -ENOMEM; /* May only call once for each order */ 725 return -ENOMEM; /* May only call once for each order */
720 } 726 }
721 727
722 if (count < 0 || count > 4096) { 728 if (count < 0 || count > 4096) {
723 up( &dev->struct_sem ); 729 up(&dev->struct_sem);
724 atomic_dec( &dev->buf_alloc ); 730 atomic_dec(&dev->buf_alloc);
725 return -EINVAL; 731 return -EINVAL;
726 } 732 }
727 733
728 entry->buflist = drm_alloc( count * sizeof(*entry->buflist), 734 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
729 DRM_MEM_BUFS ); 735 DRM_MEM_BUFS);
730 if ( !entry->buflist ) { 736 if (!entry->buflist) {
731 up( &dev->struct_sem ); 737 up(&dev->struct_sem);
732 atomic_dec( &dev->buf_alloc ); 738 atomic_dec(&dev->buf_alloc);
733 return -ENOMEM; 739 return -ENOMEM;
734 } 740 }
735 memset( entry->buflist, 0, count * sizeof(*entry->buflist) ); 741 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
736 742
737 entry->seglist = drm_alloc( count * sizeof(*entry->seglist), 743 entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
738 DRM_MEM_SEGS ); 744 DRM_MEM_SEGS);
739 if ( !entry->seglist ) { 745 if (!entry->seglist) {
740 drm_free( entry->buflist, 746 drm_free(entry->buflist,
741 count * sizeof(*entry->buflist), 747 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
742 DRM_MEM_BUFS ); 748 up(&dev->struct_sem);
743 up( &dev->struct_sem ); 749 atomic_dec(&dev->buf_alloc);
744 atomic_dec( &dev->buf_alloc );
745 return -ENOMEM; 750 return -ENOMEM;
746 } 751 }
747 memset( entry->seglist, 0, count * sizeof(*entry->seglist) ); 752 memset(entry->seglist, 0, count * sizeof(*entry->seglist));
748 753
749 /* Keep the original pagelist until we know all the allocations 754 /* Keep the original pagelist until we know all the allocations
750 * have succeeded 755 * have succeeded
751 */ 756 */
752 temp_pagelist = drm_alloc( (dma->page_count + (count << page_order)) 757 temp_pagelist = drm_alloc((dma->page_count + (count << page_order))
753 * sizeof(*dma->pagelist), 758 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
754 DRM_MEM_PAGES );
755 if (!temp_pagelist) { 759 if (!temp_pagelist) {
756 drm_free( entry->buflist, 760 drm_free(entry->buflist,
757 count * sizeof(*entry->buflist), 761 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
758 DRM_MEM_BUFS ); 762 drm_free(entry->seglist,
759 drm_free( entry->seglist, 763 count * sizeof(*entry->seglist), DRM_MEM_SEGS);
760 count * sizeof(*entry->seglist), 764 up(&dev->struct_sem);
761 DRM_MEM_SEGS ); 765 atomic_dec(&dev->buf_alloc);
762 up( &dev->struct_sem );
763 atomic_dec( &dev->buf_alloc );
764 return -ENOMEM; 766 return -ENOMEM;
765 } 767 }
766 memcpy(temp_pagelist, 768 memcpy(temp_pagelist,
767 dma->pagelist, 769 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
768 dma->page_count * sizeof(*dma->pagelist)); 770 DRM_DEBUG("pagelist: %d entries\n",
769 DRM_DEBUG( "pagelist: %d entries\n", 771 dma->page_count + (count << page_order));
770 dma->page_count + (count << page_order) );
771 772
772 entry->buf_size = size; 773 entry->buf_size = size;
773 entry->page_order = page_order; 774 entry->page_order = page_order;
774 byte_count = 0; 775 byte_count = 0;
775 page_count = 0; 776 page_count = 0;
776 777
777 while ( entry->buf_count < count ) { 778 while (entry->buf_count < count) {
778 page = drm_alloc_pages( page_order, DRM_MEM_DMA ); 779 page = drm_alloc_pages(page_order, DRM_MEM_DMA);
779 if ( !page ) { 780 if (!page) {
780 /* Set count correctly so we free the proper amount. */ 781 /* Set count correctly so we free the proper amount. */
781 entry->buf_count = count; 782 entry->buf_count = count;
782 entry->seg_count = count; 783 entry->seg_count = count;
783 drm_cleanup_buf_error(dev, entry); 784 drm_cleanup_buf_error(dev, entry);
784 drm_free( temp_pagelist, 785 drm_free(temp_pagelist,
785 (dma->page_count + (count << page_order)) 786 (dma->page_count + (count << page_order))
786 * sizeof(*dma->pagelist), 787 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
787 DRM_MEM_PAGES ); 788 up(&dev->struct_sem);
788 up( &dev->struct_sem ); 789 atomic_dec(&dev->buf_alloc);
789 atomic_dec( &dev->buf_alloc );
790 return -ENOMEM; 790 return -ENOMEM;
791 } 791 }
792 entry->seglist[entry->seg_count++] = page; 792 entry->seglist[entry->seg_count++] = page;
793 for ( i = 0 ; i < (1 << page_order) ; i++ ) { 793 for (i = 0; i < (1 << page_order); i++) {
794 DRM_DEBUG( "page %d @ 0x%08lx\n", 794 DRM_DEBUG("page %d @ 0x%08lx\n",
795 dma->page_count + page_count, 795 dma->page_count + page_count,
796 page + PAGE_SIZE * i ); 796 page + PAGE_SIZE * i);
797 temp_pagelist[dma->page_count + page_count++] 797 temp_pagelist[dma->page_count + page_count++]
798 = page + PAGE_SIZE * i; 798 = page + PAGE_SIZE * i;
799 } 799 }
800 for ( offset = 0 ; 800 for (offset = 0;
801 offset + size <= total && entry->buf_count < count ; 801 offset + size <= total && entry->buf_count < count;
802 offset += alignment, ++entry->buf_count ) { 802 offset += alignment, ++entry->buf_count) {
803 buf = &entry->buflist[entry->buf_count]; 803 buf = &entry->buflist[entry->buf_count];
804 buf->idx = dma->buf_count + entry->buf_count; 804 buf->idx = dma->buf_count + entry->buf_count;
805 buf->total = alignment; 805 buf->total = alignment;
806 buf->order = order; 806 buf->order = order;
807 buf->used = 0; 807 buf->used = 0;
808 buf->offset = (dma->byte_count + byte_count + offset); 808 buf->offset = (dma->byte_count + byte_count + offset);
809 buf->address = (void *)(page + offset); 809 buf->address = (void *)(page + offset);
810 buf->next = NULL; 810 buf->next = NULL;
811 buf->waiting = 0; 811 buf->waiting = 0;
812 buf->pending = 0; 812 buf->pending = 0;
813 init_waitqueue_head( &buf->dma_wait ); 813 init_waitqueue_head(&buf->dma_wait);
814 buf->filp = NULL; 814 buf->filp = NULL;
815 815
816 buf->dev_priv_size = dev->driver->dev_priv_size; 816 buf->dev_priv_size = dev->driver->dev_priv_size;
817 buf->dev_private = drm_alloc( buf->dev_priv_size, 817 buf->dev_private = drm_alloc(buf->dev_priv_size,
818 DRM_MEM_BUFS ); 818 DRM_MEM_BUFS);
819 if(!buf->dev_private) { 819 if (!buf->dev_private) {
820 /* Set count correctly so we free the proper amount. */ 820 /* Set count correctly so we free the proper amount. */
821 entry->buf_count = count; 821 entry->buf_count = count;
822 entry->seg_count = count; 822 entry->seg_count = count;
823 drm_cleanup_buf_error(dev,entry); 823 drm_cleanup_buf_error(dev, entry);
824 drm_free( temp_pagelist, 824 drm_free(temp_pagelist,
825 (dma->page_count + (count << page_order)) 825 (dma->page_count +
826 * sizeof(*dma->pagelist), 826 (count << page_order))
827 DRM_MEM_PAGES ); 827 * sizeof(*dma->pagelist),
828 up( &dev->struct_sem ); 828 DRM_MEM_PAGES);
829 atomic_dec( &dev->buf_alloc ); 829 up(&dev->struct_sem);
830 atomic_dec(&dev->buf_alloc);
830 return -ENOMEM; 831 return -ENOMEM;
831 } 832 }
832 memset( buf->dev_private, 0, buf->dev_priv_size ); 833 memset(buf->dev_private, 0, buf->dev_priv_size);
833 834
834 DRM_DEBUG( "buffer %d @ %p\n", 835 DRM_DEBUG("buffer %d @ %p\n",
835 entry->buf_count, buf->address ); 836 entry->buf_count, buf->address);
836 } 837 }
837 byte_count += PAGE_SIZE << page_order; 838 byte_count += PAGE_SIZE << page_order;
838 } 839 }
839 840
840 temp_buflist = drm_realloc( dma->buflist, 841 temp_buflist = drm_realloc(dma->buflist,
841 dma->buf_count * sizeof(*dma->buflist), 842 dma->buf_count * sizeof(*dma->buflist),
842 (dma->buf_count + entry->buf_count) 843 (dma->buf_count + entry->buf_count)
843 * sizeof(*dma->buflist), 844 * sizeof(*dma->buflist), DRM_MEM_BUFS);
844 DRM_MEM_BUFS );
845 if (!temp_buflist) { 845 if (!temp_buflist) {
846 /* Free the entry because it isn't valid */ 846 /* Free the entry because it isn't valid */
847 drm_cleanup_buf_error(dev,entry); 847 drm_cleanup_buf_error(dev, entry);
848 drm_free( temp_pagelist, 848 drm_free(temp_pagelist,
849 (dma->page_count + (count << page_order)) 849 (dma->page_count + (count << page_order))
850 * sizeof(*dma->pagelist), 850 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
851 DRM_MEM_PAGES ); 851 up(&dev->struct_sem);
852 up( &dev->struct_sem ); 852 atomic_dec(&dev->buf_alloc);
853 atomic_dec( &dev->buf_alloc );
854 return -ENOMEM; 853 return -ENOMEM;
855 } 854 }
856 dma->buflist = temp_buflist; 855 dma->buflist = temp_buflist;
857 856
858 for ( i = 0 ; i < entry->buf_count ; i++ ) { 857 for (i = 0; i < entry->buf_count; i++) {
859 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 858 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
860 } 859 }
861 860
@@ -864,8 +863,8 @@ int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
864 */ 863 */
865 if (dma->page_count) { 864 if (dma->page_count) {
866 drm_free(dma->pagelist, 865 drm_free(dma->pagelist,
867 dma->page_count * sizeof(*dma->pagelist), 866 dma->page_count * sizeof(*dma->pagelist),
868 DRM_MEM_PAGES); 867 DRM_MEM_PAGES);
869 } 868 }
870 dma->pagelist = temp_pagelist; 869 dma->pagelist = temp_pagelist;
871 870
@@ -874,18 +873,19 @@ int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
874 dma->page_count += entry->seg_count << page_order; 873 dma->page_count += entry->seg_count << page_order;
875 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); 874 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
876 875
877 up( &dev->struct_sem ); 876 up(&dev->struct_sem);
878 877
879 request->count = entry->buf_count; 878 request->count = entry->buf_count;
880 request->size = size; 879 request->size = size;
881 880
882 atomic_dec( &dev->buf_alloc ); 881 atomic_dec(&dev->buf_alloc);
883 return 0; 882 return 0;
884 883
885} 884}
885
886EXPORT_SYMBOL(drm_addbufs_pci); 886EXPORT_SYMBOL(drm_addbufs_pci);
887 887
888static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request) 888static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
889{ 889{
890 drm_device_dma_t *dma = dev->dma; 890 drm_device_dma_t *dma = dev->dma;
891 drm_buf_entry_t *entry; 891 drm_buf_entry_t *entry;
@@ -902,146 +902,147 @@ static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
902 int i; 902 int i;
903 drm_buf_t **temp_buflist; 903 drm_buf_t **temp_buflist;
904 904
905 if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL; 905 if (!drm_core_check_feature(dev, DRIVER_SG))
906 906 return -EINVAL;
907 if ( !dma ) return -EINVAL; 907
908 if (!dma)
909 return -EINVAL;
908 910
909 count = request->count; 911 count = request->count;
910 order = drm_order(request->size); 912 order = drm_order(request->size);
911 size = 1 << order; 913 size = 1 << order;
912 914
913 alignment = (request->flags & _DRM_PAGE_ALIGN) 915 alignment = (request->flags & _DRM_PAGE_ALIGN)
914 ? PAGE_ALIGN(size) : size; 916 ? PAGE_ALIGN(size) : size;
915 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 917 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
916 total = PAGE_SIZE << page_order; 918 total = PAGE_SIZE << page_order;
917 919
918 byte_count = 0; 920 byte_count = 0;
919 agp_offset = request->agp_start; 921 agp_offset = request->agp_start;
920 922
921 DRM_DEBUG( "count: %d\n", count ); 923 DRM_DEBUG("count: %d\n", count);
922 DRM_DEBUG( "order: %d\n", order ); 924 DRM_DEBUG("order: %d\n", order);
923 DRM_DEBUG( "size: %d\n", size ); 925 DRM_DEBUG("size: %d\n", size);
924 DRM_DEBUG( "agp_offset: %lu\n", agp_offset ); 926 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
925 DRM_DEBUG( "alignment: %d\n", alignment ); 927 DRM_DEBUG("alignment: %d\n", alignment);
926 DRM_DEBUG( "page_order: %d\n", page_order ); 928 DRM_DEBUG("page_order: %d\n", page_order);
927 DRM_DEBUG( "total: %d\n", total ); 929 DRM_DEBUG("total: %d\n", total);
928 930
929 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; 931 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
930 if ( dev->queue_count ) return -EBUSY; /* Not while in use */ 932 return -EINVAL;
933 if (dev->queue_count)
934 return -EBUSY; /* Not while in use */
931 935
932 spin_lock( &dev->count_lock ); 936 spin_lock(&dev->count_lock);
933 if ( dev->buf_use ) { 937 if (dev->buf_use) {
934 spin_unlock( &dev->count_lock ); 938 spin_unlock(&dev->count_lock);
935 return -EBUSY; 939 return -EBUSY;
936 } 940 }
937 atomic_inc( &dev->buf_alloc ); 941 atomic_inc(&dev->buf_alloc);
938 spin_unlock( &dev->count_lock ); 942 spin_unlock(&dev->count_lock);
939 943
940 down( &dev->struct_sem ); 944 down(&dev->struct_sem);
941 entry = &dma->bufs[order]; 945 entry = &dma->bufs[order];
942 if ( entry->buf_count ) { 946 if (entry->buf_count) {
943 up( &dev->struct_sem ); 947 up(&dev->struct_sem);
944 atomic_dec( &dev->buf_alloc ); 948 atomic_dec(&dev->buf_alloc);
945 return -ENOMEM; /* May only call once for each order */ 949 return -ENOMEM; /* May only call once for each order */
946 } 950 }
947 951
948 if (count < 0 || count > 4096) { 952 if (count < 0 || count > 4096) {
949 up( &dev->struct_sem ); 953 up(&dev->struct_sem);
950 atomic_dec( &dev->buf_alloc ); 954 atomic_dec(&dev->buf_alloc);
951 return -EINVAL; 955 return -EINVAL;
952 } 956 }
953 957
954 entry->buflist = drm_alloc( count * sizeof(*entry->buflist), 958 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
955 DRM_MEM_BUFS ); 959 DRM_MEM_BUFS);
956 if ( !entry->buflist ) { 960 if (!entry->buflist) {
957 up( &dev->struct_sem ); 961 up(&dev->struct_sem);
958 atomic_dec( &dev->buf_alloc ); 962 atomic_dec(&dev->buf_alloc);
959 return -ENOMEM; 963 return -ENOMEM;
960 } 964 }
961 memset( entry->buflist, 0, count * sizeof(*entry->buflist) ); 965 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
962 966
963 entry->buf_size = size; 967 entry->buf_size = size;
964 entry->page_order = page_order; 968 entry->page_order = page_order;
965 969
966 offset = 0; 970 offset = 0;
967 971
968 while ( entry->buf_count < count ) { 972 while (entry->buf_count < count) {
969 buf = &entry->buflist[entry->buf_count]; 973 buf = &entry->buflist[entry->buf_count];
970 buf->idx = dma->buf_count + entry->buf_count; 974 buf->idx = dma->buf_count + entry->buf_count;
971 buf->total = alignment; 975 buf->total = alignment;
972 buf->order = order; 976 buf->order = order;
973 buf->used = 0; 977 buf->used = 0;
974 978
975 buf->offset = (dma->byte_count + offset); 979 buf->offset = (dma->byte_count + offset);
976 buf->bus_address = agp_offset + offset; 980 buf->bus_address = agp_offset + offset;
977 buf->address = (void *)(agp_offset + offset 981 buf->address = (void *)(agp_offset + offset
978 + (unsigned long)dev->sg->virtual); 982 + (unsigned long)dev->sg->virtual);
979 buf->next = NULL; 983 buf->next = NULL;
980 buf->waiting = 0; 984 buf->waiting = 0;
981 buf->pending = 0; 985 buf->pending = 0;
982 init_waitqueue_head( &buf->dma_wait ); 986 init_waitqueue_head(&buf->dma_wait);
983 buf->filp = NULL; 987 buf->filp = NULL;
984 988
985 buf->dev_priv_size = dev->driver->dev_priv_size; 989 buf->dev_priv_size = dev->driver->dev_priv_size;
986 buf->dev_private = drm_alloc( buf->dev_priv_size, 990 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
987 DRM_MEM_BUFS ); 991 if (!buf->dev_private) {
988 if(!buf->dev_private) {
989 /* Set count correctly so we free the proper amount. */ 992 /* Set count correctly so we free the proper amount. */
990 entry->buf_count = count; 993 entry->buf_count = count;
991 drm_cleanup_buf_error(dev,entry); 994 drm_cleanup_buf_error(dev, entry);
992 up( &dev->struct_sem ); 995 up(&dev->struct_sem);
993 atomic_dec( &dev->buf_alloc ); 996 atomic_dec(&dev->buf_alloc);
994 return -ENOMEM; 997 return -ENOMEM;
995 } 998 }
996 999
997 memset( buf->dev_private, 0, buf->dev_priv_size ); 1000 memset(buf->dev_private, 0, buf->dev_priv_size);
998 1001
999 DRM_DEBUG( "buffer %d @ %p\n", 1002 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1000 entry->buf_count, buf->address );
1001 1003
1002 offset += alignment; 1004 offset += alignment;
1003 entry->buf_count++; 1005 entry->buf_count++;
1004 byte_count += PAGE_SIZE << page_order; 1006 byte_count += PAGE_SIZE << page_order;
1005 } 1007 }
1006 1008
1007 DRM_DEBUG( "byte_count: %d\n", byte_count ); 1009 DRM_DEBUG("byte_count: %d\n", byte_count);
1008 1010
1009 temp_buflist = drm_realloc( dma->buflist, 1011 temp_buflist = drm_realloc(dma->buflist,
1010 dma->buf_count * sizeof(*dma->buflist), 1012 dma->buf_count * sizeof(*dma->buflist),
1011 (dma->buf_count + entry->buf_count) 1013 (dma->buf_count + entry->buf_count)
1012 * sizeof(*dma->buflist), 1014 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1013 DRM_MEM_BUFS ); 1015 if (!temp_buflist) {
1014 if(!temp_buflist) {
1015 /* Free the entry because it isn't valid */ 1016 /* Free the entry because it isn't valid */
1016 drm_cleanup_buf_error(dev,entry); 1017 drm_cleanup_buf_error(dev, entry);
1017 up( &dev->struct_sem ); 1018 up(&dev->struct_sem);
1018 atomic_dec( &dev->buf_alloc ); 1019 atomic_dec(&dev->buf_alloc);
1019 return -ENOMEM; 1020 return -ENOMEM;
1020 } 1021 }
1021 dma->buflist = temp_buflist; 1022 dma->buflist = temp_buflist;
1022 1023
1023 for ( i = 0 ; i < entry->buf_count ; i++ ) { 1024 for (i = 0; i < entry->buf_count; i++) {
1024 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 1025 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1025 } 1026 }
1026 1027
1027 dma->buf_count += entry->buf_count; 1028 dma->buf_count += entry->buf_count;
1028 dma->byte_count += byte_count; 1029 dma->byte_count += byte_count;
1029 1030
1030 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count ); 1031 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1031 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count ); 1032 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1032 1033
1033 up( &dev->struct_sem ); 1034 up(&dev->struct_sem);
1034 1035
1035 request->count = entry->buf_count; 1036 request->count = entry->buf_count;
1036 request->size = size; 1037 request->size = size;
1037 1038
1038 dma->flags = _DRM_DMA_USE_SG; 1039 dma->flags = _DRM_DMA_USE_SG;
1039 1040
1040 atomic_dec( &dev->buf_alloc ); 1041 atomic_dec(&dev->buf_alloc);
1041 return 0; 1042 return 0;
1042} 1043}
1043 1044
1044static int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request) 1045static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
1045{ 1046{
1046 drm_device_dma_t *dma = dev->dma; 1047 drm_device_dma_t *dma = dev->dma;
1047 drm_buf_entry_t *entry; 1048 drm_buf_entry_t *entry;
@@ -1060,7 +1061,7 @@ static int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request)
1060 1061
1061 if (!drm_core_check_feature(dev, DRIVER_FB_DMA)) 1062 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1062 return -EINVAL; 1063 return -EINVAL;
1063 1064
1064 if (!dma) 1065 if (!dma)
1065 return -EINVAL; 1066 return -EINVAL;
1066 1067
@@ -1210,43 +1211,41 @@ static int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request)
1210 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent 1211 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1211 * PCI memory respectively. 1212 * PCI memory respectively.
1212 */ 1213 */
1213int drm_addbufs( struct inode *inode, struct file *filp, 1214int drm_addbufs(struct inode *inode, struct file *filp,
1214 unsigned int cmd, unsigned long arg ) 1215 unsigned int cmd, unsigned long arg)
1215{ 1216{
1216 drm_buf_desc_t request; 1217 drm_buf_desc_t request;
1217 drm_file_t *priv = filp->private_data; 1218 drm_file_t *priv = filp->private_data;
1218 drm_device_t *dev = priv->head->dev; 1219 drm_device_t *dev = priv->head->dev;
1219 int ret; 1220 int ret;
1220 1221
1221 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1222 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1222 return -EINVAL; 1223 return -EINVAL;
1223 1224
1224 if ( copy_from_user( &request, (drm_buf_desc_t __user *)arg, 1225 if (copy_from_user(&request, (drm_buf_desc_t __user *) arg,
1225 sizeof(request) ) ) 1226 sizeof(request)))
1226 return -EFAULT; 1227 return -EFAULT;
1227 1228
1228#if __OS_HAS_AGP 1229#if __OS_HAS_AGP
1229 if ( request.flags & _DRM_AGP_BUFFER ) 1230 if (request.flags & _DRM_AGP_BUFFER)
1230 ret=drm_addbufs_agp(dev, &request); 1231 ret = drm_addbufs_agp(dev, &request);
1231 else 1232 else
1232#endif 1233#endif
1233 if ( request.flags & _DRM_SG_BUFFER ) 1234 if (request.flags & _DRM_SG_BUFFER)
1234 ret=drm_addbufs_sg(dev, &request); 1235 ret = drm_addbufs_sg(dev, &request);
1235 else if ( request.flags & _DRM_FB_BUFFER) 1236 else if (request.flags & _DRM_FB_BUFFER)
1236 ret=drm_addbufs_fb(dev, &request); 1237 ret = drm_addbufs_fb(dev, &request);
1237 else 1238 else
1238 ret=drm_addbufs_pci(dev, &request); 1239 ret = drm_addbufs_pci(dev, &request);
1239 1240
1240 if (ret==0) { 1241 if (ret == 0) {
1241 if (copy_to_user((void __user *)arg, &request, 1242 if (copy_to_user((void __user *)arg, &request, sizeof(request))) {
1242 sizeof(request))) {
1243 ret = -EFAULT; 1243 ret = -EFAULT;
1244 } 1244 }
1245 } 1245 }
1246 return ret; 1246 return ret;
1247} 1247}
1248 1248
1249
1250/** 1249/**
1251 * Get information about the buffer mappings. 1250 * Get information about the buffer mappings.
1252 * 1251 *
@@ -1264,8 +1263,8 @@ int drm_addbufs( struct inode *inode, struct file *filp,
1264 * lock, preventing of allocating more buffers after this call. Information 1263 * lock, preventing of allocating more buffers after this call. Information
1265 * about each requested buffer is then copied into user space. 1264 * about each requested buffer is then copied into user space.
1266 */ 1265 */
1267int drm_infobufs( struct inode *inode, struct file *filp, 1266int drm_infobufs(struct inode *inode, struct file *filp,
1268 unsigned int cmd, unsigned long arg ) 1267 unsigned int cmd, unsigned long arg)
1269{ 1268{
1270 drm_file_t *priv = filp->private_data; 1269 drm_file_t *priv = filp->private_data;
1271 drm_device_t *dev = priv->head->dev; 1270 drm_device_t *dev = priv->head->dev;
@@ -1278,58 +1277,61 @@ int drm_infobufs( struct inode *inode, struct file *filp,
1278 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1277 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1279 return -EINVAL; 1278 return -EINVAL;
1280 1279
1281 if ( !dma ) return -EINVAL; 1280 if (!dma)
1281 return -EINVAL;
1282 1282
1283 spin_lock( &dev->count_lock ); 1283 spin_lock(&dev->count_lock);
1284 if ( atomic_read( &dev->buf_alloc ) ) { 1284 if (atomic_read(&dev->buf_alloc)) {
1285 spin_unlock( &dev->count_lock ); 1285 spin_unlock(&dev->count_lock);
1286 return -EBUSY; 1286 return -EBUSY;
1287 } 1287 }
1288 ++dev->buf_use; /* Can't allocate more after this call */ 1288 ++dev->buf_use; /* Can't allocate more after this call */
1289 spin_unlock( &dev->count_lock ); 1289 spin_unlock(&dev->count_lock);
1290 1290
1291 if ( copy_from_user( &request, argp, sizeof(request) ) ) 1291 if (copy_from_user(&request, argp, sizeof(request)))
1292 return -EFAULT; 1292 return -EFAULT;
1293 1293
1294 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) { 1294 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1295 if ( dma->bufs[i].buf_count ) ++count; 1295 if (dma->bufs[i].buf_count)
1296 ++count;
1296 } 1297 }
1297 1298
1298 DRM_DEBUG( "count = %d\n", count ); 1299 DRM_DEBUG("count = %d\n", count);
1299 1300
1300 if ( request.count >= count ) { 1301 if (request.count >= count) {
1301 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) { 1302 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1302 if ( dma->bufs[i].buf_count ) { 1303 if (dma->bufs[i].buf_count) {
1303 drm_buf_desc_t __user *to = &request.list[count]; 1304 drm_buf_desc_t __user *to =
1305 &request.list[count];
1304 drm_buf_entry_t *from = &dma->bufs[i]; 1306 drm_buf_entry_t *from = &dma->bufs[i];
1305 drm_freelist_t *list = &dma->bufs[i].freelist; 1307 drm_freelist_t *list = &dma->bufs[i].freelist;
1306 if ( copy_to_user( &to->count, 1308 if (copy_to_user(&to->count,
1307 &from->buf_count, 1309 &from->buf_count,
1308 sizeof(from->buf_count) ) || 1310 sizeof(from->buf_count)) ||
1309 copy_to_user( &to->size, 1311 copy_to_user(&to->size,
1310 &from->buf_size, 1312 &from->buf_size,
1311 sizeof(from->buf_size) ) || 1313 sizeof(from->buf_size)) ||
1312 copy_to_user( &to->low_mark, 1314 copy_to_user(&to->low_mark,
1313 &list->low_mark, 1315 &list->low_mark,
1314 sizeof(list->low_mark) ) || 1316 sizeof(list->low_mark)) ||
1315 copy_to_user( &to->high_mark, 1317 copy_to_user(&to->high_mark,
1316 &list->high_mark, 1318 &list->high_mark,
1317 sizeof(list->high_mark) ) ) 1319 sizeof(list->high_mark)))
1318 return -EFAULT; 1320 return -EFAULT;
1319 1321
1320 DRM_DEBUG( "%d %d %d %d %d\n", 1322 DRM_DEBUG("%d %d %d %d %d\n",
1321 i, 1323 i,
1322 dma->bufs[i].buf_count, 1324 dma->bufs[i].buf_count,
1323 dma->bufs[i].buf_size, 1325 dma->bufs[i].buf_size,
1324 dma->bufs[i].freelist.low_mark, 1326 dma->bufs[i].freelist.low_mark,
1325 dma->bufs[i].freelist.high_mark ); 1327 dma->bufs[i].freelist.high_mark);
1326 ++count; 1328 ++count;
1327 } 1329 }
1328 } 1330 }
1329 } 1331 }
1330 request.count = count; 1332 request.count = count;
1331 1333
1332 if ( copy_to_user( argp, &request, sizeof(request) ) ) 1334 if (copy_to_user(argp, &request, sizeof(request)))
1333 return -EFAULT; 1335 return -EFAULT;
1334 1336
1335 return 0; 1337 return 0;
@@ -1349,8 +1351,8 @@ int drm_infobufs( struct inode *inode, struct file *filp,
1349 * 1351 *
1350 * \note This ioctl is deprecated and mostly never used. 1352 * \note This ioctl is deprecated and mostly never used.
1351 */ 1353 */
1352int drm_markbufs( struct inode *inode, struct file *filp, 1354int drm_markbufs(struct inode *inode, struct file *filp,
1353 unsigned int cmd, unsigned long arg ) 1355 unsigned int cmd, unsigned long arg)
1354{ 1356{
1355 drm_file_t *priv = filp->private_data; 1357 drm_file_t *priv = filp->private_data;
1356 drm_device_t *dev = priv->head->dev; 1358 drm_device_t *dev = priv->head->dev;
@@ -1362,44 +1364,45 @@ int drm_markbufs( struct inode *inode, struct file *filp,
1362 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1364 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1363 return -EINVAL; 1365 return -EINVAL;
1364 1366
1365 if ( !dma ) return -EINVAL; 1367 if (!dma)
1368 return -EINVAL;
1366 1369
1367 if ( copy_from_user( &request, 1370 if (copy_from_user(&request,
1368 (drm_buf_desc_t __user *)arg, 1371 (drm_buf_desc_t __user *) arg, sizeof(request)))
1369 sizeof(request) ) )
1370 return -EFAULT; 1372 return -EFAULT;
1371 1373
1372 DRM_DEBUG( "%d, %d, %d\n", 1374 DRM_DEBUG("%d, %d, %d\n",
1373 request.size, request.low_mark, request.high_mark ); 1375 request.size, request.low_mark, request.high_mark);
1374 order = drm_order( request.size ); 1376 order = drm_order(request.size);
1375 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; 1377 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1378 return -EINVAL;
1376 entry = &dma->bufs[order]; 1379 entry = &dma->bufs[order];
1377 1380
1378 if ( request.low_mark < 0 || request.low_mark > entry->buf_count ) 1381 if (request.low_mark < 0 || request.low_mark > entry->buf_count)
1379 return -EINVAL; 1382 return -EINVAL;
1380 if ( request.high_mark < 0 || request.high_mark > entry->buf_count ) 1383 if (request.high_mark < 0 || request.high_mark > entry->buf_count)
1381 return -EINVAL; 1384 return -EINVAL;
1382 1385
1383 entry->freelist.low_mark = request.low_mark; 1386 entry->freelist.low_mark = request.low_mark;
1384 entry->freelist.high_mark = request.high_mark; 1387 entry->freelist.high_mark = request.high_mark;
1385 1388
1386 return 0; 1389 return 0;
1387} 1390}
1388 1391
1389/** 1392/**
1390 * Unreserve the buffers in list, previously reserved using drmDMA. 1393 * Unreserve the buffers in list, previously reserved using drmDMA.
1391 * 1394 *
1392 * \param inode device inode. 1395 * \param inode device inode.
1393 * \param filp file pointer. 1396 * \param filp file pointer.
1394 * \param cmd command. 1397 * \param cmd command.
1395 * \param arg pointer to a drm_buf_free structure. 1398 * \param arg pointer to a drm_buf_free structure.
1396 * \return zero on success or a negative number on failure. 1399 * \return zero on success or a negative number on failure.
1397 * 1400 *
1398 * Calls free_buffer() for each used buffer. 1401 * Calls free_buffer() for each used buffer.
1399 * This function is primarily used for debugging. 1402 * This function is primarily used for debugging.
1400 */ 1403 */
1401int drm_freebufs( struct inode *inode, struct file *filp, 1404int drm_freebufs(struct inode *inode, struct file *filp,
1402 unsigned int cmd, unsigned long arg ) 1405 unsigned int cmd, unsigned long arg)
1403{ 1406{
1404 drm_file_t *priv = filp->private_data; 1407 drm_file_t *priv = filp->private_data;
1405 drm_device_t *dev = priv->head->dev; 1408 drm_device_t *dev = priv->head->dev;
@@ -1412,31 +1415,29 @@ int drm_freebufs( struct inode *inode, struct file *filp,
1412 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1415 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1413 return -EINVAL; 1416 return -EINVAL;
1414 1417
1415 if ( !dma ) return -EINVAL; 1418 if (!dma)
1419 return -EINVAL;
1416 1420
1417 if ( copy_from_user( &request, 1421 if (copy_from_user(&request,
1418 (drm_buf_free_t __user *)arg, 1422 (drm_buf_free_t __user *) arg, sizeof(request)))
1419 sizeof(request) ) )
1420 return -EFAULT; 1423 return -EFAULT;
1421 1424
1422 DRM_DEBUG( "%d\n", request.count ); 1425 DRM_DEBUG("%d\n", request.count);
1423 for ( i = 0 ; i < request.count ; i++ ) { 1426 for (i = 0; i < request.count; i++) {
1424 if ( copy_from_user( &idx, 1427 if (copy_from_user(&idx, &request.list[i], sizeof(idx)))
1425 &request.list[i],
1426 sizeof(idx) ) )
1427 return -EFAULT; 1428 return -EFAULT;
1428 if ( idx < 0 || idx >= dma->buf_count ) { 1429 if (idx < 0 || idx >= dma->buf_count) {
1429 DRM_ERROR( "Index %d (of %d max)\n", 1430 DRM_ERROR("Index %d (of %d max)\n",
1430 idx, dma->buf_count - 1 ); 1431 idx, dma->buf_count - 1);
1431 return -EINVAL; 1432 return -EINVAL;
1432 } 1433 }
1433 buf = dma->buflist[idx]; 1434 buf = dma->buflist[idx];
1434 if ( buf->filp != filp ) { 1435 if (buf->filp != filp) {
1435 DRM_ERROR( "Process %d freeing buffer not owned\n", 1436 DRM_ERROR("Process %d freeing buffer not owned\n",
1436 current->pid ); 1437 current->pid);
1437 return -EINVAL; 1438 return -EINVAL;
1438 } 1439 }
1439 drm_free_buffer( dev, buf ); 1440 drm_free_buffer(dev, buf);
1440 } 1441 }
1441 1442
1442 return 0; 1443 return 0;
@@ -1455,8 +1456,8 @@ int drm_freebufs( struct inode *inode, struct file *filp,
1455 * about each buffer into user space. The PCI buffers are already mapped on the 1456 * about each buffer into user space. The PCI buffers are already mapped on the
1456 * addbufs_pci() call. 1457 * addbufs_pci() call.
1457 */ 1458 */
1458int drm_mapbufs( struct inode *inode, struct file *filp, 1459int drm_mapbufs(struct inode *inode, struct file *filp,
1459 unsigned int cmd, unsigned long arg ) 1460 unsigned int cmd, unsigned long arg)
1460{ 1461{
1461 drm_file_t *priv = filp->private_data; 1462 drm_file_t *priv = filp->private_data;
1462 drm_device_t *dev = priv->head->dev; 1463 drm_device_t *dev = priv->head->dev;
@@ -1472,86 +1473,84 @@ int drm_mapbufs( struct inode *inode, struct file *filp,
1472 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1473 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1473 return -EINVAL; 1474 return -EINVAL;
1474 1475
1475 if ( !dma ) return -EINVAL; 1476 if (!dma)
1477 return -EINVAL;
1476 1478
1477 spin_lock( &dev->count_lock ); 1479 spin_lock(&dev->count_lock);
1478 if ( atomic_read( &dev->buf_alloc ) ) { 1480 if (atomic_read(&dev->buf_alloc)) {
1479 spin_unlock( &dev->count_lock ); 1481 spin_unlock(&dev->count_lock);
1480 return -EBUSY; 1482 return -EBUSY;
1481 } 1483 }
1482 dev->buf_use++; /* Can't allocate more after this call */ 1484 dev->buf_use++; /* Can't allocate more after this call */
1483 spin_unlock( &dev->count_lock ); 1485 spin_unlock(&dev->count_lock);
1484 1486
1485 if ( copy_from_user( &request, argp, sizeof(request) ) ) 1487 if (copy_from_user(&request, argp, sizeof(request)))
1486 return -EFAULT; 1488 return -EFAULT;
1487 1489
1488 if ( request.count >= dma->buf_count ) { 1490 if (request.count >= dma->buf_count) {
1489 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) 1491 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1490 || (drm_core_check_feature(dev, DRIVER_SG) 1492 || (drm_core_check_feature(dev, DRIVER_SG)
1491 && (dma->flags & _DRM_DMA_USE_SG)) 1493 && (dma->flags & _DRM_DMA_USE_SG))
1492 || (drm_core_check_feature(dev, DRIVER_FB_DMA) 1494 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1493 && (dma->flags & _DRM_DMA_USE_FB))) { 1495 && (dma->flags & _DRM_DMA_USE_FB))) {
1494 drm_map_t *map = dev->agp_buffer_map; 1496 drm_map_t *map = dev->agp_buffer_map;
1495 unsigned long token = dev->agp_buffer_token; 1497 unsigned long token = dev->agp_buffer_token;
1496 1498
1497 if ( !map ) { 1499 if (!map) {
1498 retcode = -EINVAL; 1500 retcode = -EINVAL;
1499 goto done; 1501 goto done;
1500 } 1502 }
1501 1503
1502 down_write( &current->mm->mmap_sem ); 1504 down_write(&current->mm->mmap_sem);
1503 virtual = do_mmap( filp, 0, map->size, 1505 virtual = do_mmap(filp, 0, map->size,
1504 PROT_READ | PROT_WRITE, 1506 PROT_READ | PROT_WRITE,
1505 MAP_SHARED, 1507 MAP_SHARED, token);
1506 token ); 1508 up_write(&current->mm->mmap_sem);
1507 up_write( &current->mm->mmap_sem );
1508 } else { 1509 } else {
1509 down_write( &current->mm->mmap_sem ); 1510 down_write(&current->mm->mmap_sem);
1510 virtual = do_mmap( filp, 0, dma->byte_count, 1511 virtual = do_mmap(filp, 0, dma->byte_count,
1511 PROT_READ | PROT_WRITE, 1512 PROT_READ | PROT_WRITE,
1512 MAP_SHARED, 0 ); 1513 MAP_SHARED, 0);
1513 up_write( &current->mm->mmap_sem ); 1514 up_write(&current->mm->mmap_sem);
1514 } 1515 }
1515 if ( virtual > -1024UL ) { 1516 if (virtual > -1024UL) {
1516 /* Real error */ 1517 /* Real error */
1517 retcode = (signed long)virtual; 1518 retcode = (signed long)virtual;
1518 goto done; 1519 goto done;
1519 } 1520 }
1520 request.virtual = (void __user *)virtual; 1521 request.virtual = (void __user *)virtual;
1521 1522
1522 for ( i = 0 ; i < dma->buf_count ; i++ ) { 1523 for (i = 0; i < dma->buf_count; i++) {
1523 if ( copy_to_user( &request.list[i].idx, 1524 if (copy_to_user(&request.list[i].idx,
1524 &dma->buflist[i]->idx, 1525 &dma->buflist[i]->idx,
1525 sizeof(request.list[0].idx) ) ) { 1526 sizeof(request.list[0].idx))) {
1526 retcode = -EFAULT; 1527 retcode = -EFAULT;
1527 goto done; 1528 goto done;
1528 } 1529 }
1529 if ( copy_to_user( &request.list[i].total, 1530 if (copy_to_user(&request.list[i].total,
1530 &dma->buflist[i]->total, 1531 &dma->buflist[i]->total,
1531 sizeof(request.list[0].total) ) ) { 1532 sizeof(request.list[0].total))) {
1532 retcode = -EFAULT; 1533 retcode = -EFAULT;
1533 goto done; 1534 goto done;
1534 } 1535 }
1535 if ( copy_to_user( &request.list[i].used, 1536 if (copy_to_user(&request.list[i].used,
1536 &zero, 1537 &zero, sizeof(zero))) {
1537 sizeof(zero) ) ) {
1538 retcode = -EFAULT; 1538 retcode = -EFAULT;
1539 goto done; 1539 goto done;
1540 } 1540 }
1541 address = virtual + dma->buflist[i]->offset; /* *** */ 1541 address = virtual + dma->buflist[i]->offset; /* *** */
1542 if ( copy_to_user( &request.list[i].address, 1542 if (copy_to_user(&request.list[i].address,
1543 &address, 1543 &address, sizeof(address))) {
1544 sizeof(address) ) ) {
1545 retcode = -EFAULT; 1544 retcode = -EFAULT;
1546 goto done; 1545 goto done;
1547 } 1546 }
1548 } 1547 }
1549 } 1548 }
1550 done: 1549 done:
1551 request.count = dma->buf_count; 1550 request.count = dma->buf_count;
1552 DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode ); 1551 DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
1553 1552
1554 if ( copy_to_user( argp, &request, sizeof(request) ) ) 1553 if (copy_to_user(argp, &request, sizeof(request)))
1555 return -EFAULT; 1554 return -EFAULT;
1556 1555
1557 return retcode; 1556 return retcode;
@@ -1560,23 +1559,23 @@ int drm_mapbufs( struct inode *inode, struct file *filp,
1560/** 1559/**
1561 * Compute size order. Returns the exponent of the smaller power of two which 1560 * Compute size order. Returns the exponent of the smaller power of two which
1562 * is greater or equal to given number. 1561 * is greater or equal to given number.
1563 * 1562 *
1564 * \param size size. 1563 * \param size size.
1565 * \return order. 1564 * \return order.
1566 * 1565 *
1567 * \todo Can be made faster. 1566 * \todo Can be made faster.
1568 */ 1567 */
1569int drm_order( unsigned long size ) 1568int drm_order(unsigned long size)
1570{ 1569{
1571 int order; 1570 int order;
1572 unsigned long tmp; 1571 unsigned long tmp;
1573 1572
1574 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) 1573 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1575 ;
1576 1574
1577 if (size & (size - 1)) 1575 if (size & (size - 1))
1578 ++order; 1576 ++order;
1579 1577
1580 return order; 1578 return order;
1581} 1579}
1580
1582EXPORT_SYMBOL(drm_order); 1581EXPORT_SYMBOL(drm_order);