diff options
author | Dave Airlie <airlied@starflyer.(none)> | 2005-09-25 00:28:13 -0400 |
---|---|---|
committer | Dave Airlie <airlied@linux.ie> | 2005-09-25 00:28:13 -0400 |
commit | b5e89ed53ed8d24f83ba1941c07382af00ed238e (patch) | |
tree | 747bae7a565f88a2e1d5974776eeb054a932c505 /drivers/char/drm/drm_bufs.c | |
parent | 99a2657a29e2d623c3568cd86b27cac13fb63140 (diff) |
drm: lindent the drm directory.
I've been threatening this for a while, so no point hanging around.
This lindents the DRM code which was always really bad in tabbing department.
I've also fixed some misnamed files in comments and removed some trailing
whitespace.
Signed-off-by: Dave Airlie <airlied@linux.ie>
Diffstat (limited to 'drivers/char/drm/drm_bufs.c')
-rw-r--r-- | drivers/char/drm/drm_bufs.c | 883 |
1 files changed, 440 insertions, 443 deletions
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c index f28e70ae6606..bb989ccea5a0 100644 --- a/drivers/char/drm/drm_bufs.c +++ b/drivers/char/drm/drm_bufs.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /** | 1 | /** |
2 | * \file drm_bufs.h | 2 | * \file drm_bufs.c |
3 | * Generic buffer template | 3 | * Generic buffer template |
4 | * | 4 | * |
5 | * \author Rickard E. (Rik) Faith <faith@valinux.com> | 5 | * \author Rickard E. (Rik) Faith <faith@valinux.com> |
6 | * \author Gareth Hughes <gareth@valinux.com> | 6 | * \author Gareth Hughes <gareth@valinux.com> |
7 | */ | 7 | */ |
@@ -36,20 +36,22 @@ | |||
36 | #include <linux/vmalloc.h> | 36 | #include <linux/vmalloc.h> |
37 | #include "drmP.h" | 37 | #include "drmP.h" |
38 | 38 | ||
39 | unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource) | 39 | unsigned long drm_get_resource_start(drm_device_t * dev, unsigned int resource) |
40 | { | 40 | { |
41 | return pci_resource_start(dev->pdev, resource); | 41 | return pci_resource_start(dev->pdev, resource); |
42 | } | 42 | } |
43 | |||
43 | EXPORT_SYMBOL(drm_get_resource_start); | 44 | EXPORT_SYMBOL(drm_get_resource_start); |
44 | 45 | ||
45 | unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource) | 46 | unsigned long drm_get_resource_len(drm_device_t * dev, unsigned int resource) |
46 | { | 47 | { |
47 | return pci_resource_len(dev->pdev, resource); | 48 | return pci_resource_len(dev->pdev, resource); |
48 | } | 49 | } |
50 | |||
49 | EXPORT_SYMBOL(drm_get_resource_len); | 51 | EXPORT_SYMBOL(drm_get_resource_len); |
50 | 52 | ||
51 | static drm_map_list_t *drm_find_matching_map(drm_device_t *dev, | 53 | static drm_map_list_t *drm_find_matching_map(drm_device_t * dev, |
52 | drm_local_map_t *map) | 54 | drm_local_map_t * map) |
53 | { | 55 | { |
54 | struct list_head *list; | 56 | struct list_head *list; |
55 | 57 | ||
@@ -71,7 +73,8 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t *dev, | |||
71 | #define END_RANGE 0x40000000 | 73 | #define END_RANGE 0x40000000 |
72 | 74 | ||
73 | #ifdef _LP64 | 75 | #ifdef _LP64 |
74 | static __inline__ unsigned int HandleID(unsigned long lhandle, drm_device_t *dev) | 76 | static __inline__ unsigned int HandleID(unsigned long lhandle, |
77 | drm_device_t * dev) | ||
75 | { | 78 | { |
76 | static unsigned int map32_handle = START_RANGE; | 79 | static unsigned int map32_handle = START_RANGE; |
77 | unsigned int hash; | 80 | unsigned int hash; |
@@ -81,12 +84,12 @@ static __inline__ unsigned int HandleID(unsigned long lhandle, drm_device_t *dev | |||
81 | map32_handle += PAGE_SIZE; | 84 | map32_handle += PAGE_SIZE; |
82 | if (map32_handle > END_RANGE) | 85 | if (map32_handle > END_RANGE) |
83 | map32_handle = START_RANGE; | 86 | map32_handle = START_RANGE; |
84 | } else | 87 | } else |
85 | hash = lhandle; | 88 | hash = lhandle; |
86 | 89 | ||
87 | while (1) { | 90 | while (1) { |
88 | drm_map_list_t *_entry; | 91 | drm_map_list_t *_entry; |
89 | list_for_each_entry(_entry, &dev->maplist->head,head) { | 92 | list_for_each_entry(_entry, &dev->maplist->head, head) { |
90 | if (_entry->user_token == hash) | 93 | if (_entry->user_token == hash) |
91 | break; | 94 | break; |
92 | } | 95 | } |
@@ -116,14 +119,14 @@ static __inline__ unsigned int HandleID(unsigned long lhandle, drm_device_t *dev | |||
116 | */ | 119 | */ |
117 | int drm_addmap_core(drm_device_t * dev, unsigned int offset, | 120 | int drm_addmap_core(drm_device_t * dev, unsigned int offset, |
118 | unsigned int size, drm_map_type_t type, | 121 | unsigned int size, drm_map_type_t type, |
119 | drm_map_flags_t flags, drm_map_list_t **maplist) | 122 | drm_map_flags_t flags, drm_map_list_t ** maplist) |
120 | { | 123 | { |
121 | drm_map_t *map; | 124 | drm_map_t *map; |
122 | drm_map_list_t *list; | 125 | drm_map_list_t *list; |
123 | drm_dma_handle_t *dmah; | 126 | drm_dma_handle_t *dmah; |
124 | 127 | ||
125 | map = drm_alloc( sizeof(*map), DRM_MEM_MAPS ); | 128 | map = drm_alloc(sizeof(*map), DRM_MEM_MAPS); |
126 | if ( !map ) | 129 | if (!map) |
127 | return -ENOMEM; | 130 | return -ENOMEM; |
128 | 131 | ||
129 | map->offset = offset; | 132 | map->offset = offset; |
@@ -135,26 +138,26 @@ int drm_addmap_core(drm_device_t * dev, unsigned int offset, | |||
135 | * book keeping information about shared memory to allow for removal | 138 | * book keeping information about shared memory to allow for removal |
136 | * when processes fork. | 139 | * when processes fork. |
137 | */ | 140 | */ |
138 | if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) { | 141 | if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { |
139 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); | 142 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
140 | return -EINVAL; | 143 | return -EINVAL; |
141 | } | 144 | } |
142 | DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n", | 145 | DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", |
143 | map->offset, map->size, map->type ); | 146 | map->offset, map->size, map->type); |
144 | if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) { | 147 | if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) { |
145 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); | 148 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
146 | return -EINVAL; | 149 | return -EINVAL; |
147 | } | 150 | } |
148 | map->mtrr = -1; | 151 | map->mtrr = -1; |
149 | map->handle = NULL; | 152 | map->handle = NULL; |
150 | 153 | ||
151 | switch ( map->type ) { | 154 | switch (map->type) { |
152 | case _DRM_REGISTERS: | 155 | case _DRM_REGISTERS: |
153 | case _DRM_FRAME_BUFFER: | 156 | case _DRM_FRAME_BUFFER: |
154 | #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) | 157 | #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) |
155 | if ( map->offset + map->size < map->offset || | 158 | if (map->offset + map->size < map->offset || |
156 | map->offset < virt_to_phys(high_memory) ) { | 159 | map->offset < virt_to_phys(high_memory)) { |
157 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); | 160 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
158 | return -EINVAL; | 161 | return -EINVAL; |
159 | } | 162 | } |
160 | #endif | 163 | #endif |
@@ -169,8 +172,9 @@ int drm_addmap_core(drm_device_t * dev, unsigned int offset, | |||
169 | if (list != NULL) { | 172 | if (list != NULL) { |
170 | if (list->map->size != map->size) { | 173 | if (list->map->size != map->size) { |
171 | DRM_DEBUG("Matching maps of type %d with " | 174 | DRM_DEBUG("Matching maps of type %d with " |
172 | "mismatched sizes, (%ld vs %ld)\n", | 175 | "mismatched sizes, (%ld vs %ld)\n", |
173 | map->type, map->size, list->map->size); | 176 | map->type, map->size, |
177 | list->map->size); | ||
174 | list->map->size = map->size; | 178 | list->map->size = map->size; |
175 | } | 179 | } |
176 | 180 | ||
@@ -180,35 +184,33 @@ int drm_addmap_core(drm_device_t * dev, unsigned int offset, | |||
180 | } | 184 | } |
181 | 185 | ||
182 | if (drm_core_has_MTRR(dev)) { | 186 | if (drm_core_has_MTRR(dev)) { |
183 | if ( map->type == _DRM_FRAME_BUFFER || | 187 | if (map->type == _DRM_FRAME_BUFFER || |
184 | (map->flags & _DRM_WRITE_COMBINING) ) { | 188 | (map->flags & _DRM_WRITE_COMBINING)) { |
185 | map->mtrr = mtrr_add( map->offset, map->size, | 189 | map->mtrr = mtrr_add(map->offset, map->size, |
186 | MTRR_TYPE_WRCOMB, 1 ); | 190 | MTRR_TYPE_WRCOMB, 1); |
187 | } | 191 | } |
188 | } | 192 | } |
189 | if (map->type == _DRM_REGISTERS) | 193 | if (map->type == _DRM_REGISTERS) |
190 | map->handle = drm_ioremap( map->offset, map->size, | 194 | map->handle = drm_ioremap(map->offset, map->size, dev); |
191 | dev ); | ||
192 | break; | 195 | break; |
193 | 196 | ||
194 | case _DRM_SHM: | 197 | case _DRM_SHM: |
195 | map->handle = vmalloc_32(map->size); | 198 | map->handle = vmalloc_32(map->size); |
196 | DRM_DEBUG( "%lu %d %p\n", | 199 | DRM_DEBUG("%lu %d %p\n", |
197 | map->size, drm_order( map->size ), map->handle ); | 200 | map->size, drm_order(map->size), map->handle); |
198 | if ( !map->handle ) { | 201 | if (!map->handle) { |
199 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); | 202 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
200 | return -ENOMEM; | 203 | return -ENOMEM; |
201 | } | 204 | } |
202 | map->offset = (unsigned long)map->handle; | 205 | map->offset = (unsigned long)map->handle; |
203 | if ( map->flags & _DRM_CONTAINS_LOCK ) { | 206 | if (map->flags & _DRM_CONTAINS_LOCK) { |
204 | /* Prevent a 2nd X Server from creating a 2nd lock */ | 207 | /* Prevent a 2nd X Server from creating a 2nd lock */ |
205 | if (dev->lock.hw_lock != NULL) { | 208 | if (dev->lock.hw_lock != NULL) { |
206 | vfree( map->handle ); | 209 | vfree(map->handle); |
207 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); | 210 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
208 | return -EBUSY; | 211 | return -EBUSY; |
209 | } | 212 | } |
210 | dev->sigdata.lock = | 213 | dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */ |
211 | dev->lock.hw_lock = map->handle; /* Pointer to lock */ | ||
212 | } | 214 | } |
213 | break; | 215 | break; |
214 | case _DRM_AGP: | 216 | case _DRM_AGP: |
@@ -217,7 +219,7 @@ int drm_addmap_core(drm_device_t * dev, unsigned int offset, | |||
217 | map->offset += dev->hose->mem_space->start; | 219 | map->offset += dev->hose->mem_space->start; |
218 | #endif | 220 | #endif |
219 | map->offset += dev->agp->base; | 221 | map->offset += dev->agp->base; |
220 | map->mtrr = dev->agp->agp_mtrr; /* for getmap */ | 222 | map->mtrr = dev->agp->agp_mtrr; /* for getmap */ |
221 | } | 223 | } |
222 | break; | 224 | break; |
223 | case _DRM_SCATTER_GATHER: | 225 | case _DRM_SCATTER_GATHER: |
@@ -227,7 +229,7 @@ int drm_addmap_core(drm_device_t * dev, unsigned int offset, | |||
227 | } | 229 | } |
228 | map->offset += (unsigned long)dev->sg->virtual; | 230 | map->offset += (unsigned long)dev->sg->virtual; |
229 | break; | 231 | break; |
230 | case _DRM_CONSISTENT: | 232 | case _DRM_CONSISTENT: |
231 | /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, | 233 | /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, |
232 | * As we're limiting the address to 2^32-1 (or less), | 234 | * As we're limiting the address to 2^32-1 (or less), |
233 | * casting it down to 32 bits is no problem, but we | 235 | * casting it down to 32 bits is no problem, but we |
@@ -242,12 +244,12 @@ int drm_addmap_core(drm_device_t * dev, unsigned int offset, | |||
242 | kfree(dmah); | 244 | kfree(dmah); |
243 | break; | 245 | break; |
244 | default: | 246 | default: |
245 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); | 247 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
246 | return -EINVAL; | 248 | return -EINVAL; |
247 | } | 249 | } |
248 | 250 | ||
249 | list = drm_alloc(sizeof(*list), DRM_MEM_MAPS); | 251 | list = drm_alloc(sizeof(*list), DRM_MEM_MAPS); |
250 | if(!list) { | 252 | if (!list) { |
251 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 253 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
252 | return -EINVAL; | 254 | return -EINVAL; |
253 | } | 255 | } |
@@ -258,18 +260,18 @@ int drm_addmap_core(drm_device_t * dev, unsigned int offset, | |||
258 | list_add(&list->head, &dev->maplist->head); | 260 | list_add(&list->head, &dev->maplist->head); |
259 | /* Assign a 32-bit handle */ | 261 | /* Assign a 32-bit handle */ |
260 | /* We do it here so that dev->struct_sem protects the increment */ | 262 | /* We do it here so that dev->struct_sem protects the increment */ |
261 | list->user_token = HandleID(map->type==_DRM_SHM | 263 | list->user_token = HandleID(map->type == _DRM_SHM |
262 | ? (unsigned long)map->handle | 264 | ? (unsigned long)map->handle |
263 | : map->offset, dev); | 265 | : map->offset, dev); |
264 | up(&dev->struct_sem); | 266 | up(&dev->struct_sem); |
265 | 267 | ||
266 | *maplist = list; | 268 | *maplist = list; |
267 | return 0; | 269 | return 0; |
268 | } | 270 | } |
269 | 271 | ||
270 | int drm_addmap(drm_device_t *dev, unsigned int offset, | 272 | int drm_addmap(drm_device_t * dev, unsigned int offset, |
271 | unsigned int size, drm_map_type_t type, | 273 | unsigned int size, drm_map_type_t type, |
272 | drm_map_flags_t flags, drm_local_map_t **map_ptr) | 274 | drm_map_flags_t flags, drm_local_map_t ** map_ptr) |
273 | { | 275 | { |
274 | drm_map_list_t *list; | 276 | drm_map_list_t *list; |
275 | int rc; | 277 | int rc; |
@@ -279,6 +281,7 @@ int drm_addmap(drm_device_t *dev, unsigned int offset, | |||
279 | *map_ptr = list->map; | 281 | *map_ptr = list->map; |
280 | return rc; | 282 | return rc; |
281 | } | 283 | } |
284 | |||
282 | EXPORT_SYMBOL(drm_addmap); | 285 | EXPORT_SYMBOL(drm_addmap); |
283 | 286 | ||
284 | int drm_addmap_ioctl(struct inode *inode, struct file *filp, | 287 | int drm_addmap_ioctl(struct inode *inode, struct file *filp, |
@@ -294,14 +297,14 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp, | |||
294 | if (!(filp->f_mode & 3)) | 297 | if (!(filp->f_mode & 3)) |
295 | return -EACCES; /* Require read/write */ | 298 | return -EACCES; /* Require read/write */ |
296 | 299 | ||
297 | if (copy_from_user(& map, argp, sizeof(map))) { | 300 | if (copy_from_user(&map, argp, sizeof(map))) { |
298 | return -EFAULT; | 301 | return -EFAULT; |
299 | } | 302 | } |
300 | 303 | ||
301 | err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags, | 304 | err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags, |
302 | &maplist); | 305 | &maplist); |
303 | 306 | ||
304 | if (err) | 307 | if (err) |
305 | return err; | 308 | return err; |
306 | 309 | ||
307 | if (copy_to_user(argp, maplist->map, sizeof(drm_map_t))) | 310 | if (copy_to_user(argp, maplist->map, sizeof(drm_map_t))) |
@@ -311,7 +314,6 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp, | |||
311 | return 0; | 314 | return 0; |
312 | } | 315 | } |
313 | 316 | ||
314 | |||
315 | /** | 317 | /** |
316 | * Remove a map private from list and deallocate resources if the mapping | 318 | * Remove a map private from list and deallocate resources if the mapping |
317 | * isn't in use. | 319 | * isn't in use. |
@@ -328,7 +330,7 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp, | |||
328 | * | 330 | * |
329 | * \sa drm_addmap | 331 | * \sa drm_addmap |
330 | */ | 332 | */ |
331 | int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map) | 333 | int drm_rmmap_locked(drm_device_t * dev, drm_local_map_t * map) |
332 | { | 334 | { |
333 | struct list_head *list; | 335 | struct list_head *list; |
334 | drm_map_list_t *r_list = NULL; | 336 | drm_map_list_t *r_list = NULL; |
@@ -359,9 +361,8 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map) | |||
359 | case _DRM_FRAME_BUFFER: | 361 | case _DRM_FRAME_BUFFER: |
360 | if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { | 362 | if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { |
361 | int retcode; | 363 | int retcode; |
362 | retcode = mtrr_del(map->mtrr, map->offset, | 364 | retcode = mtrr_del(map->mtrr, map->offset, map->size); |
363 | map->size); | 365 | DRM_DEBUG("mtrr_del=%d\n", retcode); |
364 | DRM_DEBUG ("mtrr_del=%d\n", retcode); | ||
365 | } | 366 | } |
366 | break; | 367 | break; |
367 | case _DRM_SHM: | 368 | case _DRM_SHM: |
@@ -381,9 +382,10 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map) | |||
381 | 382 | ||
382 | return 0; | 383 | return 0; |
383 | } | 384 | } |
385 | |||
384 | EXPORT_SYMBOL(drm_rmmap_locked); | 386 | EXPORT_SYMBOL(drm_rmmap_locked); |
385 | 387 | ||
386 | int drm_rmmap(drm_device_t *dev, drm_local_map_t *map) | 388 | int drm_rmmap(drm_device_t * dev, drm_local_map_t * map) |
387 | { | 389 | { |
388 | int ret; | 390 | int ret; |
389 | 391 | ||
@@ -393,6 +395,7 @@ int drm_rmmap(drm_device_t *dev, drm_local_map_t *map) | |||
393 | 395 | ||
394 | return ret; | 396 | return ret; |
395 | } | 397 | } |
398 | |||
396 | EXPORT_SYMBOL(drm_rmmap); | 399 | EXPORT_SYMBOL(drm_rmmap); |
397 | 400 | ||
398 | /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on | 401 | /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on |
@@ -414,7 +417,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp, | |||
414 | struct list_head *list; | 417 | struct list_head *list; |
415 | int ret; | 418 | int ret; |
416 | 419 | ||
417 | if (copy_from_user(&request, (drm_map_t __user *)arg, sizeof(request))) { | 420 | if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) { |
418 | return -EFAULT; | 421 | return -EFAULT; |
419 | } | 422 | } |
420 | 423 | ||
@@ -423,7 +426,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp, | |||
423 | drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head); | 426 | drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head); |
424 | 427 | ||
425 | if (r_list->map && | 428 | if (r_list->map && |
426 | r_list->user_token == (unsigned long) request.handle && | 429 | r_list->user_token == (unsigned long)request.handle && |
427 | r_list->map->flags & _DRM_REMOVABLE) { | 430 | r_list->map->flags & _DRM_REMOVABLE) { |
428 | map = r_list->map; | 431 | map = r_list->map; |
429 | break; | 432 | break; |
@@ -462,7 +465,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp, | |||
462 | * | 465 | * |
463 | * Frees any pages and buffers associated with the given entry. | 466 | * Frees any pages and buffers associated with the given entry. |
464 | */ | 467 | */ |
465 | static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry) | 468 | static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry) |
466 | { | 469 | { |
467 | int i; | 470 | int i; |
468 | 471 | ||
@@ -470,30 +473,27 @@ static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry) | |||
470 | for (i = 0; i < entry->seg_count; i++) { | 473 | for (i = 0; i < entry->seg_count; i++) { |
471 | if (entry->seglist[i]) { | 474 | if (entry->seglist[i]) { |
472 | drm_free_pages(entry->seglist[i], | 475 | drm_free_pages(entry->seglist[i], |
473 | entry->page_order, | 476 | entry->page_order, DRM_MEM_DMA); |
474 | DRM_MEM_DMA); | ||
475 | } | 477 | } |
476 | } | 478 | } |
477 | drm_free(entry->seglist, | 479 | drm_free(entry->seglist, |
478 | entry->seg_count * | 480 | entry->seg_count * |
479 | sizeof(*entry->seglist), | 481 | sizeof(*entry->seglist), DRM_MEM_SEGS); |
480 | DRM_MEM_SEGS); | ||
481 | 482 | ||
482 | entry->seg_count = 0; | 483 | entry->seg_count = 0; |
483 | } | 484 | } |
484 | 485 | ||
485 | if (entry->buf_count) { | 486 | if (entry->buf_count) { |
486 | for (i = 0; i < entry->buf_count; i++) { | 487 | for (i = 0; i < entry->buf_count; i++) { |
487 | if (entry->buflist[i].dev_private) { | 488 | if (entry->buflist[i].dev_private) { |
488 | drm_free(entry->buflist[i].dev_private, | 489 | drm_free(entry->buflist[i].dev_private, |
489 | entry->buflist[i].dev_priv_size, | 490 | entry->buflist[i].dev_priv_size, |
490 | DRM_MEM_BUFS); | 491 | DRM_MEM_BUFS); |
491 | } | 492 | } |
492 | } | 493 | } |
493 | drm_free(entry->buflist, | 494 | drm_free(entry->buflist, |
494 | entry->buf_count * | 495 | entry->buf_count * |
495 | sizeof(*entry->buflist), | 496 | sizeof(*entry->buflist), DRM_MEM_BUFS); |
496 | DRM_MEM_BUFS); | ||
497 | 497 | ||
498 | entry->buf_count = 0; | 498 | entry->buf_count = 0; |
499 | } | 499 | } |
@@ -506,12 +506,12 @@ static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry) | |||
506 | * \param dev drm_device_t to which the buffers are to be added. | 506 | * \param dev drm_device_t to which the buffers are to be added. |
507 | * \param request pointer to a drm_buf_desc_t describing the request. | 507 | * \param request pointer to a drm_buf_desc_t describing the request. |
508 | * \return zero on success or a negative number on failure. | 508 | * \return zero on success or a negative number on failure. |
509 | * | 509 | * |
510 | * After some sanity checks creates a drm_buf structure for each buffer and | 510 | * After some sanity checks creates a drm_buf structure for each buffer and |
511 | * reallocates the buffer list of the same size order to accommodate the new | 511 | * reallocates the buffer list of the same size order to accommodate the new |
512 | * buffers. | 512 | * buffers. |
513 | */ | 513 | */ |
514 | int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request) | 514 | int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) |
515 | { | 515 | { |
516 | drm_device_dma_t *dma = dev->dma; | 516 | drm_device_dma_t *dma = dev->dma; |
517 | drm_buf_entry_t *entry; | 517 | drm_buf_entry_t *entry; |
@@ -528,144 +528,145 @@ int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request) | |||
528 | int i; | 528 | int i; |
529 | drm_buf_t **temp_buflist; | 529 | drm_buf_t **temp_buflist; |
530 | 530 | ||
531 | if ( !dma ) return -EINVAL; | 531 | if (!dma) |
532 | return -EINVAL; | ||
532 | 533 | ||
533 | count = request->count; | 534 | count = request->count; |
534 | order = drm_order(request->size); | 535 | order = drm_order(request->size); |
535 | size = 1 << order; | 536 | size = 1 << order; |
536 | 537 | ||
537 | alignment = (request->flags & _DRM_PAGE_ALIGN) | 538 | alignment = (request->flags & _DRM_PAGE_ALIGN) |
538 | ? PAGE_ALIGN(size) : size; | 539 | ? PAGE_ALIGN(size) : size; |
539 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; | 540 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; |
540 | total = PAGE_SIZE << page_order; | 541 | total = PAGE_SIZE << page_order; |
541 | 542 | ||
542 | byte_count = 0; | 543 | byte_count = 0; |
543 | agp_offset = dev->agp->base + request->agp_start; | 544 | agp_offset = dev->agp->base + request->agp_start; |
544 | 545 | ||
545 | DRM_DEBUG( "count: %d\n", count ); | 546 | DRM_DEBUG("count: %d\n", count); |
546 | DRM_DEBUG( "order: %d\n", order ); | 547 | DRM_DEBUG("order: %d\n", order); |
547 | DRM_DEBUG( "size: %d\n", size ); | 548 | DRM_DEBUG("size: %d\n", size); |
548 | DRM_DEBUG( "agp_offset: %lu\n", agp_offset ); | 549 | DRM_DEBUG("agp_offset: %lu\n", agp_offset); |
549 | DRM_DEBUG( "alignment: %d\n", alignment ); | 550 | DRM_DEBUG("alignment: %d\n", alignment); |
550 | DRM_DEBUG( "page_order: %d\n", page_order ); | 551 | DRM_DEBUG("page_order: %d\n", page_order); |
551 | DRM_DEBUG( "total: %d\n", total ); | 552 | DRM_DEBUG("total: %d\n", total); |
552 | 553 | ||
553 | if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; | 554 | if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) |
554 | if ( dev->queue_count ) return -EBUSY; /* Not while in use */ | 555 | return -EINVAL; |
556 | if (dev->queue_count) | ||
557 | return -EBUSY; /* Not while in use */ | ||
555 | 558 | ||
556 | spin_lock( &dev->count_lock ); | 559 | spin_lock(&dev->count_lock); |
557 | if ( dev->buf_use ) { | 560 | if (dev->buf_use) { |
558 | spin_unlock( &dev->count_lock ); | 561 | spin_unlock(&dev->count_lock); |
559 | return -EBUSY; | 562 | return -EBUSY; |
560 | } | 563 | } |
561 | atomic_inc( &dev->buf_alloc ); | 564 | atomic_inc(&dev->buf_alloc); |
562 | spin_unlock( &dev->count_lock ); | 565 | spin_unlock(&dev->count_lock); |
563 | 566 | ||
564 | down( &dev->struct_sem ); | 567 | down(&dev->struct_sem); |
565 | entry = &dma->bufs[order]; | 568 | entry = &dma->bufs[order]; |
566 | if ( entry->buf_count ) { | 569 | if (entry->buf_count) { |
567 | up( &dev->struct_sem ); | 570 | up(&dev->struct_sem); |
568 | atomic_dec( &dev->buf_alloc ); | 571 | atomic_dec(&dev->buf_alloc); |
569 | return -ENOMEM; /* May only call once for each order */ | 572 | return -ENOMEM; /* May only call once for each order */ |
570 | } | 573 | } |
571 | 574 | ||
572 | if (count < 0 || count > 4096) { | 575 | if (count < 0 || count > 4096) { |
573 | up( &dev->struct_sem ); | 576 | up(&dev->struct_sem); |
574 | atomic_dec( &dev->buf_alloc ); | 577 | atomic_dec(&dev->buf_alloc); |
575 | return -EINVAL; | 578 | return -EINVAL; |
576 | } | 579 | } |
577 | 580 | ||
578 | entry->buflist = drm_alloc( count * sizeof(*entry->buflist), | 581 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), |
579 | DRM_MEM_BUFS ); | 582 | DRM_MEM_BUFS); |
580 | if ( !entry->buflist ) { | 583 | if (!entry->buflist) { |
581 | up( &dev->struct_sem ); | 584 | up(&dev->struct_sem); |
582 | atomic_dec( &dev->buf_alloc ); | 585 | atomic_dec(&dev->buf_alloc); |
583 | return -ENOMEM; | 586 | return -ENOMEM; |
584 | } | 587 | } |
585 | memset( entry->buflist, 0, count * sizeof(*entry->buflist) ); | 588 | memset(entry->buflist, 0, count * sizeof(*entry->buflist)); |
586 | 589 | ||
587 | entry->buf_size = size; | 590 | entry->buf_size = size; |
588 | entry->page_order = page_order; | 591 | entry->page_order = page_order; |
589 | 592 | ||
590 | offset = 0; | 593 | offset = 0; |
591 | 594 | ||
592 | while ( entry->buf_count < count ) { | 595 | while (entry->buf_count < count) { |
593 | buf = &entry->buflist[entry->buf_count]; | 596 | buf = &entry->buflist[entry->buf_count]; |
594 | buf->idx = dma->buf_count + entry->buf_count; | 597 | buf->idx = dma->buf_count + entry->buf_count; |
595 | buf->total = alignment; | 598 | buf->total = alignment; |
596 | buf->order = order; | 599 | buf->order = order; |
597 | buf->used = 0; | 600 | buf->used = 0; |
598 | 601 | ||
599 | buf->offset = (dma->byte_count + offset); | 602 | buf->offset = (dma->byte_count + offset); |
600 | buf->bus_address = agp_offset + offset; | 603 | buf->bus_address = agp_offset + offset; |
601 | buf->address = (void *)(agp_offset + offset); | 604 | buf->address = (void *)(agp_offset + offset); |
602 | buf->next = NULL; | 605 | buf->next = NULL; |
603 | buf->waiting = 0; | 606 | buf->waiting = 0; |
604 | buf->pending = 0; | 607 | buf->pending = 0; |
605 | init_waitqueue_head( &buf->dma_wait ); | 608 | init_waitqueue_head(&buf->dma_wait); |
606 | buf->filp = NULL; | 609 | buf->filp = NULL; |
607 | 610 | ||
608 | buf->dev_priv_size = dev->driver->dev_priv_size; | 611 | buf->dev_priv_size = dev->driver->dev_priv_size; |
609 | buf->dev_private = drm_alloc( buf->dev_priv_size, | 612 | buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); |
610 | DRM_MEM_BUFS ); | 613 | if (!buf->dev_private) { |
611 | if(!buf->dev_private) { | ||
612 | /* Set count correctly so we free the proper amount. */ | 614 | /* Set count correctly so we free the proper amount. */ |
613 | entry->buf_count = count; | 615 | entry->buf_count = count; |
614 | drm_cleanup_buf_error(dev,entry); | 616 | drm_cleanup_buf_error(dev, entry); |
615 | up( &dev->struct_sem ); | 617 | up(&dev->struct_sem); |
616 | atomic_dec( &dev->buf_alloc ); | 618 | atomic_dec(&dev->buf_alloc); |
617 | return -ENOMEM; | 619 | return -ENOMEM; |
618 | } | 620 | } |
619 | memset( buf->dev_private, 0, buf->dev_priv_size ); | 621 | memset(buf->dev_private, 0, buf->dev_priv_size); |
620 | 622 | ||
621 | DRM_DEBUG( "buffer %d @ %p\n", | 623 | DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); |
622 | entry->buf_count, buf->address ); | ||
623 | 624 | ||
624 | offset += alignment; | 625 | offset += alignment; |
625 | entry->buf_count++; | 626 | entry->buf_count++; |
626 | byte_count += PAGE_SIZE << page_order; | 627 | byte_count += PAGE_SIZE << page_order; |
627 | } | 628 | } |
628 | 629 | ||
629 | DRM_DEBUG( "byte_count: %d\n", byte_count ); | 630 | DRM_DEBUG("byte_count: %d\n", byte_count); |
630 | 631 | ||
631 | temp_buflist = drm_realloc( dma->buflist, | 632 | temp_buflist = drm_realloc(dma->buflist, |
632 | dma->buf_count * sizeof(*dma->buflist), | 633 | dma->buf_count * sizeof(*dma->buflist), |
633 | (dma->buf_count + entry->buf_count) | 634 | (dma->buf_count + entry->buf_count) |
634 | * sizeof(*dma->buflist), | 635 | * sizeof(*dma->buflist), DRM_MEM_BUFS); |
635 | DRM_MEM_BUFS ); | 636 | if (!temp_buflist) { |
636 | if(!temp_buflist) { | ||
637 | /* Free the entry because it isn't valid */ | 637 | /* Free the entry because it isn't valid */ |
638 | drm_cleanup_buf_error(dev,entry); | 638 | drm_cleanup_buf_error(dev, entry); |
639 | up( &dev->struct_sem ); | 639 | up(&dev->struct_sem); |
640 | atomic_dec( &dev->buf_alloc ); | 640 | atomic_dec(&dev->buf_alloc); |
641 | return -ENOMEM; | 641 | return -ENOMEM; |
642 | } | 642 | } |
643 | dma->buflist = temp_buflist; | 643 | dma->buflist = temp_buflist; |
644 | 644 | ||
645 | for ( i = 0 ; i < entry->buf_count ; i++ ) { | 645 | for (i = 0; i < entry->buf_count; i++) { |
646 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; | 646 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; |
647 | } | 647 | } |
648 | 648 | ||
649 | dma->buf_count += entry->buf_count; | 649 | dma->buf_count += entry->buf_count; |
650 | dma->byte_count += byte_count; | 650 | dma->byte_count += byte_count; |
651 | 651 | ||
652 | DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count ); | 652 | DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); |
653 | DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count ); | 653 | DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); |
654 | 654 | ||
655 | up( &dev->struct_sem ); | 655 | up(&dev->struct_sem); |
656 | 656 | ||
657 | request->count = entry->buf_count; | 657 | request->count = entry->buf_count; |
658 | request->size = size; | 658 | request->size = size; |
659 | 659 | ||
660 | dma->flags = _DRM_DMA_USE_AGP; | 660 | dma->flags = _DRM_DMA_USE_AGP; |
661 | 661 | ||
662 | atomic_dec( &dev->buf_alloc ); | 662 | atomic_dec(&dev->buf_alloc); |
663 | return 0; | 663 | return 0; |
664 | } | 664 | } |
665 | |||
665 | EXPORT_SYMBOL(drm_addbufs_agp); | 666 | EXPORT_SYMBOL(drm_addbufs_agp); |
666 | #endif /* __OS_HAS_AGP */ | 667 | #endif /* __OS_HAS_AGP */ |
667 | 668 | ||
668 | int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request) | 669 | int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) |
669 | { | 670 | { |
670 | drm_device_dma_t *dma = dev->dma; | 671 | drm_device_dma_t *dma = dev->dma; |
671 | int count; | 672 | int count; |
@@ -684,178 +685,174 @@ int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request) | |||
684 | unsigned long *temp_pagelist; | 685 | unsigned long *temp_pagelist; |
685 | drm_buf_t **temp_buflist; | 686 | drm_buf_t **temp_buflist; |
686 | 687 | ||
687 | if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL; | 688 | if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) |
688 | if ( !dma ) return -EINVAL; | 689 | return -EINVAL; |
690 | if (!dma) | ||
691 | return -EINVAL; | ||
689 | 692 | ||
690 | count = request->count; | 693 | count = request->count; |
691 | order = drm_order(request->size); | 694 | order = drm_order(request->size); |
692 | size = 1 << order; | 695 | size = 1 << order; |
693 | 696 | ||
694 | DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n", | 697 | DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n", |
695 | request->count, request->size, size, | 698 | request->count, request->size, size, order, dev->queue_count); |
696 | order, dev->queue_count ); | ||
697 | 699 | ||
698 | if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; | 700 | if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) |
699 | if ( dev->queue_count ) return -EBUSY; /* Not while in use */ | 701 | return -EINVAL; |
702 | if (dev->queue_count) | ||
703 | return -EBUSY; /* Not while in use */ | ||
700 | 704 | ||
701 | alignment = (request->flags & _DRM_PAGE_ALIGN) | 705 | alignment = (request->flags & _DRM_PAGE_ALIGN) |
702 | ? PAGE_ALIGN(size) : size; | 706 | ? PAGE_ALIGN(size) : size; |
703 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; | 707 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; |
704 | total = PAGE_SIZE << page_order; | 708 | total = PAGE_SIZE << page_order; |
705 | 709 | ||
706 | spin_lock( &dev->count_lock ); | 710 | spin_lock(&dev->count_lock); |
707 | if ( dev->buf_use ) { | 711 | if (dev->buf_use) { |
708 | spin_unlock( &dev->count_lock ); | 712 | spin_unlock(&dev->count_lock); |
709 | return -EBUSY; | 713 | return -EBUSY; |
710 | } | 714 | } |
711 | atomic_inc( &dev->buf_alloc ); | 715 | atomic_inc(&dev->buf_alloc); |
712 | spin_unlock( &dev->count_lock ); | 716 | spin_unlock(&dev->count_lock); |
713 | 717 | ||
714 | down( &dev->struct_sem ); | 718 | down(&dev->struct_sem); |
715 | entry = &dma->bufs[order]; | 719 | entry = &dma->bufs[order]; |
716 | if ( entry->buf_count ) { | 720 | if (entry->buf_count) { |
717 | up( &dev->struct_sem ); | 721 | up(&dev->struct_sem); |
718 | atomic_dec( &dev->buf_alloc ); | 722 | atomic_dec(&dev->buf_alloc); |
719 | return -ENOMEM; /* May only call once for each order */ | 723 | return -ENOMEM; /* May only call once for each order */ |
720 | } | 724 | } |
721 | 725 | ||
722 | if (count < 0 || count > 4096) { | 726 | if (count < 0 || count > 4096) { |
723 | up( &dev->struct_sem ); | 727 | up(&dev->struct_sem); |
724 | atomic_dec( &dev->buf_alloc ); | 728 | atomic_dec(&dev->buf_alloc); |
725 | return -EINVAL; | 729 | return -EINVAL; |
726 | } | 730 | } |
727 | 731 | ||
728 | entry->buflist = drm_alloc( count * sizeof(*entry->buflist), | 732 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), |
729 | DRM_MEM_BUFS ); | 733 | DRM_MEM_BUFS); |
730 | if ( !entry->buflist ) { | 734 | if (!entry->buflist) { |
731 | up( &dev->struct_sem ); | 735 | up(&dev->struct_sem); |
732 | atomic_dec( &dev->buf_alloc ); | 736 | atomic_dec(&dev->buf_alloc); |
733 | return -ENOMEM; | 737 | return -ENOMEM; |
734 | } | 738 | } |
735 | memset( entry->buflist, 0, count * sizeof(*entry->buflist) ); | 739 | memset(entry->buflist, 0, count * sizeof(*entry->buflist)); |
736 | 740 | ||
737 | entry->seglist = drm_alloc( count * sizeof(*entry->seglist), | 741 | entry->seglist = drm_alloc(count * sizeof(*entry->seglist), |
738 | DRM_MEM_SEGS ); | 742 | DRM_MEM_SEGS); |
739 | if ( !entry->seglist ) { | 743 | if (!entry->seglist) { |
740 | drm_free( entry->buflist, | 744 | drm_free(entry->buflist, |
741 | count * sizeof(*entry->buflist), | 745 | count * sizeof(*entry->buflist), DRM_MEM_BUFS); |
742 | DRM_MEM_BUFS ); | 746 | up(&dev->struct_sem); |
743 | up( &dev->struct_sem ); | 747 | atomic_dec(&dev->buf_alloc); |
744 | atomic_dec( &dev->buf_alloc ); | ||
745 | return -ENOMEM; | 748 | return -ENOMEM; |
746 | } | 749 | } |
747 | memset( entry->seglist, 0, count * sizeof(*entry->seglist) ); | 750 | memset(entry->seglist, 0, count * sizeof(*entry->seglist)); |
748 | 751 | ||
749 | /* Keep the original pagelist until we know all the allocations | 752 | /* Keep the original pagelist until we know all the allocations |
750 | * have succeeded | 753 | * have succeeded |
751 | */ | 754 | */ |
752 | temp_pagelist = drm_alloc( (dma->page_count + (count << page_order)) | 755 | temp_pagelist = drm_alloc((dma->page_count + (count << page_order)) |
753 | * sizeof(*dma->pagelist), | 756 | * sizeof(*dma->pagelist), DRM_MEM_PAGES); |
754 | DRM_MEM_PAGES ); | ||
755 | if (!temp_pagelist) { | 757 | if (!temp_pagelist) { |
756 | drm_free( entry->buflist, | 758 | drm_free(entry->buflist, |
757 | count * sizeof(*entry->buflist), | 759 | count * sizeof(*entry->buflist), DRM_MEM_BUFS); |
758 | DRM_MEM_BUFS ); | 760 | drm_free(entry->seglist, |
759 | drm_free( entry->seglist, | 761 | count * sizeof(*entry->seglist), DRM_MEM_SEGS); |
760 | count * sizeof(*entry->seglist), | 762 | up(&dev->struct_sem); |
761 | DRM_MEM_SEGS ); | 763 | atomic_dec(&dev->buf_alloc); |
762 | up( &dev->struct_sem ); | ||
763 | atomic_dec( &dev->buf_alloc ); | ||
764 | return -ENOMEM; | 764 | return -ENOMEM; |
765 | } | 765 | } |
766 | memcpy(temp_pagelist, | 766 | memcpy(temp_pagelist, |
767 | dma->pagelist, | 767 | dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); |
768 | dma->page_count * sizeof(*dma->pagelist)); | 768 | DRM_DEBUG("pagelist: %d entries\n", |
769 | DRM_DEBUG( "pagelist: %d entries\n", | 769 | dma->page_count + (count << page_order)); |
770 | dma->page_count + (count << page_order) ); | ||
771 | 770 | ||
772 | entry->buf_size = size; | 771 | entry->buf_size = size; |
773 | entry->page_order = page_order; | 772 | entry->page_order = page_order; |
774 | byte_count = 0; | 773 | byte_count = 0; |
775 | page_count = 0; | 774 | page_count = 0; |
776 | 775 | ||
777 | while ( entry->buf_count < count ) { | 776 | while (entry->buf_count < count) { |
778 | page = drm_alloc_pages( page_order, DRM_MEM_DMA ); | 777 | page = drm_alloc_pages(page_order, DRM_MEM_DMA); |
779 | if ( !page ) { | 778 | if (!page) { |
780 | /* Set count correctly so we free the proper amount. */ | 779 | /* Set count correctly so we free the proper amount. */ |
781 | entry->buf_count = count; | 780 | entry->buf_count = count; |
782 | entry->seg_count = count; | 781 | entry->seg_count = count; |
783 | drm_cleanup_buf_error(dev, entry); | 782 | drm_cleanup_buf_error(dev, entry); |
784 | drm_free( temp_pagelist, | 783 | drm_free(temp_pagelist, |
785 | (dma->page_count + (count << page_order)) | 784 | (dma->page_count + (count << page_order)) |
786 | * sizeof(*dma->pagelist), | 785 | * sizeof(*dma->pagelist), DRM_MEM_PAGES); |
787 | DRM_MEM_PAGES ); | 786 | up(&dev->struct_sem); |
788 | up( &dev->struct_sem ); | 787 | atomic_dec(&dev->buf_alloc); |
789 | atomic_dec( &dev->buf_alloc ); | ||
790 | return -ENOMEM; | 788 | return -ENOMEM; |
791 | } | 789 | } |
792 | entry->seglist[entry->seg_count++] = page; | 790 | entry->seglist[entry->seg_count++] = page; |
793 | for ( i = 0 ; i < (1 << page_order) ; i++ ) { | 791 | for (i = 0; i < (1 << page_order); i++) { |
794 | DRM_DEBUG( "page %d @ 0x%08lx\n", | 792 | DRM_DEBUG("page %d @ 0x%08lx\n", |
795 | dma->page_count + page_count, | 793 | dma->page_count + page_count, |
796 | page + PAGE_SIZE * i ); | 794 | page + PAGE_SIZE * i); |
797 | temp_pagelist[dma->page_count + page_count++] | 795 | temp_pagelist[dma->page_count + page_count++] |
798 | = page + PAGE_SIZE * i; | 796 | = page + PAGE_SIZE * i; |
799 | } | 797 | } |
800 | for ( offset = 0 ; | 798 | for (offset = 0; |
801 | offset + size <= total && entry->buf_count < count ; | 799 | offset + size <= total && entry->buf_count < count; |
802 | offset += alignment, ++entry->buf_count ) { | 800 | offset += alignment, ++entry->buf_count) { |
803 | buf = &entry->buflist[entry->buf_count]; | 801 | buf = &entry->buflist[entry->buf_count]; |
804 | buf->idx = dma->buf_count + entry->buf_count; | 802 | buf->idx = dma->buf_count + entry->buf_count; |
805 | buf->total = alignment; | 803 | buf->total = alignment; |
806 | buf->order = order; | 804 | buf->order = order; |
807 | buf->used = 0; | 805 | buf->used = 0; |
808 | buf->offset = (dma->byte_count + byte_count + offset); | 806 | buf->offset = (dma->byte_count + byte_count + offset); |
809 | buf->address = (void *)(page + offset); | 807 | buf->address = (void *)(page + offset); |
810 | buf->next = NULL; | 808 | buf->next = NULL; |
811 | buf->waiting = 0; | 809 | buf->waiting = 0; |
812 | buf->pending = 0; | 810 | buf->pending = 0; |
813 | init_waitqueue_head( &buf->dma_wait ); | 811 | init_waitqueue_head(&buf->dma_wait); |
814 | buf->filp = NULL; | 812 | buf->filp = NULL; |
815 | 813 | ||
816 | buf->dev_priv_size = dev->driver->dev_priv_size; | 814 | buf->dev_priv_size = dev->driver->dev_priv_size; |
817 | buf->dev_private = drm_alloc( buf->dev_priv_size, | 815 | buf->dev_private = drm_alloc(buf->dev_priv_size, |
818 | DRM_MEM_BUFS ); | 816 | DRM_MEM_BUFS); |
819 | if(!buf->dev_private) { | 817 | if (!buf->dev_private) { |
820 | /* Set count correctly so we free the proper amount. */ | 818 | /* Set count correctly so we free the proper amount. */ |
821 | entry->buf_count = count; | 819 | entry->buf_count = count; |
822 | entry->seg_count = count; | 820 | entry->seg_count = count; |
823 | drm_cleanup_buf_error(dev,entry); | 821 | drm_cleanup_buf_error(dev, entry); |
824 | drm_free( temp_pagelist, | 822 | drm_free(temp_pagelist, |
825 | (dma->page_count + (count << page_order)) | 823 | (dma->page_count + |
826 | * sizeof(*dma->pagelist), | 824 | (count << page_order)) |
827 | DRM_MEM_PAGES ); | 825 | * sizeof(*dma->pagelist), |
828 | up( &dev->struct_sem ); | 826 | DRM_MEM_PAGES); |
829 | atomic_dec( &dev->buf_alloc ); | 827 | up(&dev->struct_sem); |
828 | atomic_dec(&dev->buf_alloc); | ||
830 | return -ENOMEM; | 829 | return -ENOMEM; |
831 | } | 830 | } |
832 | memset( buf->dev_private, 0, buf->dev_priv_size ); | 831 | memset(buf->dev_private, 0, buf->dev_priv_size); |
833 | 832 | ||
834 | DRM_DEBUG( "buffer %d @ %p\n", | 833 | DRM_DEBUG("buffer %d @ %p\n", |
835 | entry->buf_count, buf->address ); | 834 | entry->buf_count, buf->address); |
836 | } | 835 | } |
837 | byte_count += PAGE_SIZE << page_order; | 836 | byte_count += PAGE_SIZE << page_order; |
838 | } | 837 | } |
839 | 838 | ||
840 | temp_buflist = drm_realloc( dma->buflist, | 839 | temp_buflist = drm_realloc(dma->buflist, |
841 | dma->buf_count * sizeof(*dma->buflist), | 840 | dma->buf_count * sizeof(*dma->buflist), |
842 | (dma->buf_count + entry->buf_count) | 841 | (dma->buf_count + entry->buf_count) |
843 | * sizeof(*dma->buflist), | 842 | * sizeof(*dma->buflist), DRM_MEM_BUFS); |
844 | DRM_MEM_BUFS ); | ||
845 | if (!temp_buflist) { | 843 | if (!temp_buflist) { |
846 | /* Free the entry because it isn't valid */ | 844 | /* Free the entry because it isn't valid */ |
847 | drm_cleanup_buf_error(dev,entry); | 845 | drm_cleanup_buf_error(dev, entry); |
848 | drm_free( temp_pagelist, | 846 | drm_free(temp_pagelist, |
849 | (dma->page_count + (count << page_order)) | 847 | (dma->page_count + (count << page_order)) |
850 | * sizeof(*dma->pagelist), | 848 | * sizeof(*dma->pagelist), DRM_MEM_PAGES); |
851 | DRM_MEM_PAGES ); | 849 | up(&dev->struct_sem); |
852 | up( &dev->struct_sem ); | 850 | atomic_dec(&dev->buf_alloc); |
853 | atomic_dec( &dev->buf_alloc ); | ||
854 | return -ENOMEM; | 851 | return -ENOMEM; |
855 | } | 852 | } |
856 | dma->buflist = temp_buflist; | 853 | dma->buflist = temp_buflist; |
857 | 854 | ||
858 | for ( i = 0 ; i < entry->buf_count ; i++ ) { | 855 | for (i = 0; i < entry->buf_count; i++) { |
859 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; | 856 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; |
860 | } | 857 | } |
861 | 858 | ||
@@ -864,8 +861,8 @@ int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request) | |||
864 | */ | 861 | */ |
865 | if (dma->page_count) { | 862 | if (dma->page_count) { |
866 | drm_free(dma->pagelist, | 863 | drm_free(dma->pagelist, |
867 | dma->page_count * sizeof(*dma->pagelist), | 864 | dma->page_count * sizeof(*dma->pagelist), |
868 | DRM_MEM_PAGES); | 865 | DRM_MEM_PAGES); |
869 | } | 866 | } |
870 | dma->pagelist = temp_pagelist; | 867 | dma->pagelist = temp_pagelist; |
871 | 868 | ||
@@ -874,18 +871,19 @@ int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request) | |||
874 | dma->page_count += entry->seg_count << page_order; | 871 | dma->page_count += entry->seg_count << page_order; |
875 | dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); | 872 | dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); |
876 | 873 | ||
877 | up( &dev->struct_sem ); | 874 | up(&dev->struct_sem); |
878 | 875 | ||
879 | request->count = entry->buf_count; | 876 | request->count = entry->buf_count; |
880 | request->size = size; | 877 | request->size = size; |
881 | 878 | ||
882 | atomic_dec( &dev->buf_alloc ); | 879 | atomic_dec(&dev->buf_alloc); |
883 | return 0; | 880 | return 0; |
884 | 881 | ||
885 | } | 882 | } |
883 | |||
886 | EXPORT_SYMBOL(drm_addbufs_pci); | 884 | EXPORT_SYMBOL(drm_addbufs_pci); |
887 | 885 | ||
888 | static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request) | 886 | static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) |
889 | { | 887 | { |
890 | drm_device_dma_t *dma = dev->dma; | 888 | drm_device_dma_t *dma = dev->dma; |
891 | drm_buf_entry_t *entry; | 889 | drm_buf_entry_t *entry; |
@@ -902,146 +900,147 @@ static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request) | |||
902 | int i; | 900 | int i; |
903 | drm_buf_t **temp_buflist; | 901 | drm_buf_t **temp_buflist; |
904 | 902 | ||
905 | if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL; | 903 | if (!drm_core_check_feature(dev, DRIVER_SG)) |
906 | 904 | return -EINVAL; | |
907 | if ( !dma ) return -EINVAL; | 905 | |
906 | if (!dma) | ||
907 | return -EINVAL; | ||
908 | 908 | ||
909 | count = request->count; | 909 | count = request->count; |
910 | order = drm_order(request->size); | 910 | order = drm_order(request->size); |
911 | size = 1 << order; | 911 | size = 1 << order; |
912 | 912 | ||
913 | alignment = (request->flags & _DRM_PAGE_ALIGN) | 913 | alignment = (request->flags & _DRM_PAGE_ALIGN) |
914 | ? PAGE_ALIGN(size) : size; | 914 | ? PAGE_ALIGN(size) : size; |
915 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; | 915 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; |
916 | total = PAGE_SIZE << page_order; | 916 | total = PAGE_SIZE << page_order; |
917 | 917 | ||
918 | byte_count = 0; | 918 | byte_count = 0; |
919 | agp_offset = request->agp_start; | 919 | agp_offset = request->agp_start; |
920 | 920 | ||
921 | DRM_DEBUG( "count: %d\n", count ); | 921 | DRM_DEBUG("count: %d\n", count); |
922 | DRM_DEBUG( "order: %d\n", order ); | 922 | DRM_DEBUG("order: %d\n", order); |
923 | DRM_DEBUG( "size: %d\n", size ); | 923 | DRM_DEBUG("size: %d\n", size); |
924 | DRM_DEBUG( "agp_offset: %lu\n", agp_offset ); | 924 | DRM_DEBUG("agp_offset: %lu\n", agp_offset); |
925 | DRM_DEBUG( "alignment: %d\n", alignment ); | 925 | DRM_DEBUG("alignment: %d\n", alignment); |
926 | DRM_DEBUG( "page_order: %d\n", page_order ); | 926 | DRM_DEBUG("page_order: %d\n", page_order); |
927 | DRM_DEBUG( "total: %d\n", total ); | 927 | DRM_DEBUG("total: %d\n", total); |
928 | 928 | ||
929 | if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; | 929 | if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) |
930 | if ( dev->queue_count ) return -EBUSY; /* Not while in use */ | 930 | return -EINVAL; |
931 | if (dev->queue_count) | ||
932 | return -EBUSY; /* Not while in use */ | ||
931 | 933 | ||
932 | spin_lock( &dev->count_lock ); | 934 | spin_lock(&dev->count_lock); |
933 | if ( dev->buf_use ) { | 935 | if (dev->buf_use) { |
934 | spin_unlock( &dev->count_lock ); | 936 | spin_unlock(&dev->count_lock); |
935 | return -EBUSY; | 937 | return -EBUSY; |
936 | } | 938 | } |
937 | atomic_inc( &dev->buf_alloc ); | 939 | atomic_inc(&dev->buf_alloc); |
938 | spin_unlock( &dev->count_lock ); | 940 | spin_unlock(&dev->count_lock); |
939 | 941 | ||
940 | down( &dev->struct_sem ); | 942 | down(&dev->struct_sem); |
941 | entry = &dma->bufs[order]; | 943 | entry = &dma->bufs[order]; |
942 | if ( entry->buf_count ) { | 944 | if (entry->buf_count) { |
943 | up( &dev->struct_sem ); | 945 | up(&dev->struct_sem); |
944 | atomic_dec( &dev->buf_alloc ); | 946 | atomic_dec(&dev->buf_alloc); |
945 | return -ENOMEM; /* May only call once for each order */ | 947 | return -ENOMEM; /* May only call once for each order */ |
946 | } | 948 | } |
947 | 949 | ||
948 | if (count < 0 || count > 4096) { | 950 | if (count < 0 || count > 4096) { |
949 | up( &dev->struct_sem ); | 951 | up(&dev->struct_sem); |
950 | atomic_dec( &dev->buf_alloc ); | 952 | atomic_dec(&dev->buf_alloc); |
951 | return -EINVAL; | 953 | return -EINVAL; |
952 | } | 954 | } |
953 | 955 | ||
954 | entry->buflist = drm_alloc( count * sizeof(*entry->buflist), | 956 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), |
955 | DRM_MEM_BUFS ); | 957 | DRM_MEM_BUFS); |
956 | if ( !entry->buflist ) { | 958 | if (!entry->buflist) { |
957 | up( &dev->struct_sem ); | 959 | up(&dev->struct_sem); |
958 | atomic_dec( &dev->buf_alloc ); | 960 | atomic_dec(&dev->buf_alloc); |
959 | return -ENOMEM; | 961 | return -ENOMEM; |
960 | } | 962 | } |
961 | memset( entry->buflist, 0, count * sizeof(*entry->buflist) ); | 963 | memset(entry->buflist, 0, count * sizeof(*entry->buflist)); |
962 | 964 | ||
963 | entry->buf_size = size; | 965 | entry->buf_size = size; |
964 | entry->page_order = page_order; | 966 | entry->page_order = page_order; |
965 | 967 | ||
966 | offset = 0; | 968 | offset = 0; |
967 | 969 | ||
968 | while ( entry->buf_count < count ) { | 970 | while (entry->buf_count < count) { |
969 | buf = &entry->buflist[entry->buf_count]; | 971 | buf = &entry->buflist[entry->buf_count]; |
970 | buf->idx = dma->buf_count + entry->buf_count; | 972 | buf->idx = dma->buf_count + entry->buf_count; |
971 | buf->total = alignment; | 973 | buf->total = alignment; |
972 | buf->order = order; | 974 | buf->order = order; |
973 | buf->used = 0; | 975 | buf->used = 0; |
974 | 976 | ||
975 | buf->offset = (dma->byte_count + offset); | 977 | buf->offset = (dma->byte_count + offset); |
976 | buf->bus_address = agp_offset + offset; | 978 | buf->bus_address = agp_offset + offset; |
977 | buf->address = (void *)(agp_offset + offset | 979 | buf->address = (void *)(agp_offset + offset |
978 | + (unsigned long)dev->sg->virtual); | 980 | + (unsigned long)dev->sg->virtual); |
979 | buf->next = NULL; | 981 | buf->next = NULL; |
980 | buf->waiting = 0; | 982 | buf->waiting = 0; |
981 | buf->pending = 0; | 983 | buf->pending = 0; |
982 | init_waitqueue_head( &buf->dma_wait ); | 984 | init_waitqueue_head(&buf->dma_wait); |
983 | buf->filp = NULL; | 985 | buf->filp = NULL; |
984 | 986 | ||
985 | buf->dev_priv_size = dev->driver->dev_priv_size; | 987 | buf->dev_priv_size = dev->driver->dev_priv_size; |
986 | buf->dev_private = drm_alloc( buf->dev_priv_size, | 988 | buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); |
987 | DRM_MEM_BUFS ); | 989 | if (!buf->dev_private) { |
988 | if(!buf->dev_private) { | ||
989 | /* Set count correctly so we free the proper amount. */ | 990 | /* Set count correctly so we free the proper amount. */ |
990 | entry->buf_count = count; | 991 | entry->buf_count = count; |
991 | drm_cleanup_buf_error(dev,entry); | 992 | drm_cleanup_buf_error(dev, entry); |
992 | up( &dev->struct_sem ); | 993 | up(&dev->struct_sem); |
993 | atomic_dec( &dev->buf_alloc ); | 994 | atomic_dec(&dev->buf_alloc); |
994 | return -ENOMEM; | 995 | return -ENOMEM; |
995 | } | 996 | } |
996 | 997 | ||
997 | memset( buf->dev_private, 0, buf->dev_priv_size ); | 998 | memset(buf->dev_private, 0, buf->dev_priv_size); |
998 | 999 | ||
999 | DRM_DEBUG( "buffer %d @ %p\n", | 1000 | DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); |
1000 | entry->buf_count, buf->address ); | ||
1001 | 1001 | ||
1002 | offset += alignment; | 1002 | offset += alignment; |
1003 | entry->buf_count++; | 1003 | entry->buf_count++; |
1004 | byte_count += PAGE_SIZE << page_order; | 1004 | byte_count += PAGE_SIZE << page_order; |
1005 | } | 1005 | } |
1006 | 1006 | ||
1007 | DRM_DEBUG( "byte_count: %d\n", byte_count ); | 1007 | DRM_DEBUG("byte_count: %d\n", byte_count); |
1008 | 1008 | ||
1009 | temp_buflist = drm_realloc( dma->buflist, | 1009 | temp_buflist = drm_realloc(dma->buflist, |
1010 | dma->buf_count * sizeof(*dma->buflist), | 1010 | dma->buf_count * sizeof(*dma->buflist), |
1011 | (dma->buf_count + entry->buf_count) | 1011 | (dma->buf_count + entry->buf_count) |
1012 | * sizeof(*dma->buflist), | 1012 | * sizeof(*dma->buflist), DRM_MEM_BUFS); |
1013 | DRM_MEM_BUFS ); | 1013 | if (!temp_buflist) { |
1014 | if(!temp_buflist) { | ||
1015 | /* Free the entry because it isn't valid */ | 1014 | /* Free the entry because it isn't valid */ |
1016 | drm_cleanup_buf_error(dev,entry); | 1015 | drm_cleanup_buf_error(dev, entry); |
1017 | up( &dev->struct_sem ); | 1016 | up(&dev->struct_sem); |
1018 | atomic_dec( &dev->buf_alloc ); | 1017 | atomic_dec(&dev->buf_alloc); |
1019 | return -ENOMEM; | 1018 | return -ENOMEM; |
1020 | } | 1019 | } |
1021 | dma->buflist = temp_buflist; | 1020 | dma->buflist = temp_buflist; |
1022 | 1021 | ||
1023 | for ( i = 0 ; i < entry->buf_count ; i++ ) { | 1022 | for (i = 0; i < entry->buf_count; i++) { |
1024 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; | 1023 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; |
1025 | } | 1024 | } |
1026 | 1025 | ||
1027 | dma->buf_count += entry->buf_count; | 1026 | dma->buf_count += entry->buf_count; |
1028 | dma->byte_count += byte_count; | 1027 | dma->byte_count += byte_count; |
1029 | 1028 | ||
1030 | DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count ); | 1029 | DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); |
1031 | DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count ); | 1030 | DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); |
1032 | 1031 | ||
1033 | up( &dev->struct_sem ); | 1032 | up(&dev->struct_sem); |
1034 | 1033 | ||
1035 | request->count = entry->buf_count; | 1034 | request->count = entry->buf_count; |
1036 | request->size = size; | 1035 | request->size = size; |
1037 | 1036 | ||
1038 | dma->flags = _DRM_DMA_USE_SG; | 1037 | dma->flags = _DRM_DMA_USE_SG; |
1039 | 1038 | ||
1040 | atomic_dec( &dev->buf_alloc ); | 1039 | atomic_dec(&dev->buf_alloc); |
1041 | return 0; | 1040 | return 0; |
1042 | } | 1041 | } |
1043 | 1042 | ||
1044 | static int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request) | 1043 | static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) |
1045 | { | 1044 | { |
1046 | drm_device_dma_t *dma = dev->dma; | 1045 | drm_device_dma_t *dma = dev->dma; |
1047 | drm_buf_entry_t *entry; | 1046 | drm_buf_entry_t *entry; |
@@ -1060,7 +1059,7 @@ static int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request) | |||
1060 | 1059 | ||
1061 | if (!drm_core_check_feature(dev, DRIVER_FB_DMA)) | 1060 | if (!drm_core_check_feature(dev, DRIVER_FB_DMA)) |
1062 | return -EINVAL; | 1061 | return -EINVAL; |
1063 | 1062 | ||
1064 | if (!dma) | 1063 | if (!dma) |
1065 | return -EINVAL; | 1064 | return -EINVAL; |
1066 | 1065 | ||
@@ -1210,43 +1209,41 @@ static int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request) | |||
1210 | * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent | 1209 | * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent |
1211 | * PCI memory respectively. | 1210 | * PCI memory respectively. |
1212 | */ | 1211 | */ |
1213 | int drm_addbufs( struct inode *inode, struct file *filp, | 1212 | int drm_addbufs(struct inode *inode, struct file *filp, |
1214 | unsigned int cmd, unsigned long arg ) | 1213 | unsigned int cmd, unsigned long arg) |
1215 | { | 1214 | { |
1216 | drm_buf_desc_t request; | 1215 | drm_buf_desc_t request; |
1217 | drm_file_t *priv = filp->private_data; | 1216 | drm_file_t *priv = filp->private_data; |
1218 | drm_device_t *dev = priv->head->dev; | 1217 | drm_device_t *dev = priv->head->dev; |
1219 | int ret; | 1218 | int ret; |
1220 | 1219 | ||
1221 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | 1220 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) |
1222 | return -EINVAL; | 1221 | return -EINVAL; |
1223 | 1222 | ||
1224 | if ( copy_from_user( &request, (drm_buf_desc_t __user *)arg, | 1223 | if (copy_from_user(&request, (drm_buf_desc_t __user *) arg, |
1225 | sizeof(request) ) ) | 1224 | sizeof(request))) |
1226 | return -EFAULT; | 1225 | return -EFAULT; |
1227 | 1226 | ||
1228 | #if __OS_HAS_AGP | 1227 | #if __OS_HAS_AGP |
1229 | if ( request.flags & _DRM_AGP_BUFFER ) | 1228 | if (request.flags & _DRM_AGP_BUFFER) |
1230 | ret=drm_addbufs_agp(dev, &request); | 1229 | ret = drm_addbufs_agp(dev, &request); |
1231 | else | 1230 | else |
1232 | #endif | 1231 | #endif |
1233 | if ( request.flags & _DRM_SG_BUFFER ) | 1232 | if (request.flags & _DRM_SG_BUFFER) |
1234 | ret=drm_addbufs_sg(dev, &request); | 1233 | ret = drm_addbufs_sg(dev, &request); |
1235 | else if ( request.flags & _DRM_FB_BUFFER) | 1234 | else if (request.flags & _DRM_FB_BUFFER) |
1236 | ret=drm_addbufs_fb(dev, &request); | 1235 | ret = drm_addbufs_fb(dev, &request); |
1237 | else | 1236 | else |
1238 | ret=drm_addbufs_pci(dev, &request); | 1237 | ret = drm_addbufs_pci(dev, &request); |
1239 | 1238 | ||
1240 | if (ret==0) { | 1239 | if (ret == 0) { |
1241 | if (copy_to_user((void __user *)arg, &request, | 1240 | if (copy_to_user((void __user *)arg, &request, sizeof(request))) { |
1242 | sizeof(request))) { | ||
1243 | ret = -EFAULT; | 1241 | ret = -EFAULT; |
1244 | } | 1242 | } |
1245 | } | 1243 | } |
1246 | return ret; | 1244 | return ret; |
1247 | } | 1245 | } |
1248 | 1246 | ||
1249 | |||
1250 | /** | 1247 | /** |
1251 | * Get information about the buffer mappings. | 1248 | * Get information about the buffer mappings. |
1252 | * | 1249 | * |
@@ -1264,8 +1261,8 @@ int drm_addbufs( struct inode *inode, struct file *filp, | |||
1264 | * lock, preventing of allocating more buffers after this call. Information | 1261 | * lock, preventing of allocating more buffers after this call. Information |
1265 | * about each requested buffer is then copied into user space. | 1262 | * about each requested buffer is then copied into user space. |
1266 | */ | 1263 | */ |
1267 | int drm_infobufs( struct inode *inode, struct file *filp, | 1264 | int drm_infobufs(struct inode *inode, struct file *filp, |
1268 | unsigned int cmd, unsigned long arg ) | 1265 | unsigned int cmd, unsigned long arg) |
1269 | { | 1266 | { |
1270 | drm_file_t *priv = filp->private_data; | 1267 | drm_file_t *priv = filp->private_data; |
1271 | drm_device_t *dev = priv->head->dev; | 1268 | drm_device_t *dev = priv->head->dev; |
@@ -1278,58 +1275,61 @@ int drm_infobufs( struct inode *inode, struct file *filp, | |||
1278 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | 1275 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) |
1279 | return -EINVAL; | 1276 | return -EINVAL; |
1280 | 1277 | ||
1281 | if ( !dma ) return -EINVAL; | 1278 | if (!dma) |
1279 | return -EINVAL; | ||
1282 | 1280 | ||
1283 | spin_lock( &dev->count_lock ); | 1281 | spin_lock(&dev->count_lock); |
1284 | if ( atomic_read( &dev->buf_alloc ) ) { | 1282 | if (atomic_read(&dev->buf_alloc)) { |
1285 | spin_unlock( &dev->count_lock ); | 1283 | spin_unlock(&dev->count_lock); |
1286 | return -EBUSY; | 1284 | return -EBUSY; |
1287 | } | 1285 | } |
1288 | ++dev->buf_use; /* Can't allocate more after this call */ | 1286 | ++dev->buf_use; /* Can't allocate more after this call */ |
1289 | spin_unlock( &dev->count_lock ); | 1287 | spin_unlock(&dev->count_lock); |
1290 | 1288 | ||
1291 | if ( copy_from_user( &request, argp, sizeof(request) ) ) | 1289 | if (copy_from_user(&request, argp, sizeof(request))) |
1292 | return -EFAULT; | 1290 | return -EFAULT; |
1293 | 1291 | ||
1294 | for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) { | 1292 | for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { |
1295 | if ( dma->bufs[i].buf_count ) ++count; | 1293 | if (dma->bufs[i].buf_count) |
1294 | ++count; | ||
1296 | } | 1295 | } |
1297 | 1296 | ||
1298 | DRM_DEBUG( "count = %d\n", count ); | 1297 | DRM_DEBUG("count = %d\n", count); |
1299 | 1298 | ||
1300 | if ( request.count >= count ) { | 1299 | if (request.count >= count) { |
1301 | for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) { | 1300 | for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { |
1302 | if ( dma->bufs[i].buf_count ) { | 1301 | if (dma->bufs[i].buf_count) { |
1303 | drm_buf_desc_t __user *to = &request.list[count]; | 1302 | drm_buf_desc_t __user *to = |
1303 | &request.list[count]; | ||
1304 | drm_buf_entry_t *from = &dma->bufs[i]; | 1304 | drm_buf_entry_t *from = &dma->bufs[i]; |
1305 | drm_freelist_t *list = &dma->bufs[i].freelist; | 1305 | drm_freelist_t *list = &dma->bufs[i].freelist; |
1306 | if ( copy_to_user( &to->count, | 1306 | if (copy_to_user(&to->count, |
1307 | &from->buf_count, | 1307 | &from->buf_count, |
1308 | sizeof(from->buf_count) ) || | 1308 | sizeof(from->buf_count)) || |
1309 | copy_to_user( &to->size, | 1309 | copy_to_user(&to->size, |
1310 | &from->buf_size, | 1310 | &from->buf_size, |
1311 | sizeof(from->buf_size) ) || | 1311 | sizeof(from->buf_size)) || |
1312 | copy_to_user( &to->low_mark, | 1312 | copy_to_user(&to->low_mark, |
1313 | &list->low_mark, | 1313 | &list->low_mark, |
1314 | sizeof(list->low_mark) ) || | 1314 | sizeof(list->low_mark)) || |
1315 | copy_to_user( &to->high_mark, | 1315 | copy_to_user(&to->high_mark, |
1316 | &list->high_mark, | 1316 | &list->high_mark, |
1317 | sizeof(list->high_mark) ) ) | 1317 | sizeof(list->high_mark))) |
1318 | return -EFAULT; | 1318 | return -EFAULT; |
1319 | 1319 | ||
1320 | DRM_DEBUG( "%d %d %d %d %d\n", | 1320 | DRM_DEBUG("%d %d %d %d %d\n", |
1321 | i, | 1321 | i, |
1322 | dma->bufs[i].buf_count, | 1322 | dma->bufs[i].buf_count, |
1323 | dma->bufs[i].buf_size, | 1323 | dma->bufs[i].buf_size, |
1324 | dma->bufs[i].freelist.low_mark, | 1324 | dma->bufs[i].freelist.low_mark, |
1325 | dma->bufs[i].freelist.high_mark ); | 1325 | dma->bufs[i].freelist.high_mark); |
1326 | ++count; | 1326 | ++count; |
1327 | } | 1327 | } |
1328 | } | 1328 | } |
1329 | } | 1329 | } |
1330 | request.count = count; | 1330 | request.count = count; |
1331 | 1331 | ||
1332 | if ( copy_to_user( argp, &request, sizeof(request) ) ) | 1332 | if (copy_to_user(argp, &request, sizeof(request))) |
1333 | return -EFAULT; | 1333 | return -EFAULT; |
1334 | 1334 | ||
1335 | return 0; | 1335 | return 0; |
@@ -1349,8 +1349,8 @@ int drm_infobufs( struct inode *inode, struct file *filp, | |||
1349 | * | 1349 | * |
1350 | * \note This ioctl is deprecated and mostly never used. | 1350 | * \note This ioctl is deprecated and mostly never used. |
1351 | */ | 1351 | */ |
1352 | int drm_markbufs( struct inode *inode, struct file *filp, | 1352 | int drm_markbufs(struct inode *inode, struct file *filp, |
1353 | unsigned int cmd, unsigned long arg ) | 1353 | unsigned int cmd, unsigned long arg) |
1354 | { | 1354 | { |
1355 | drm_file_t *priv = filp->private_data; | 1355 | drm_file_t *priv = filp->private_data; |
1356 | drm_device_t *dev = priv->head->dev; | 1356 | drm_device_t *dev = priv->head->dev; |
@@ -1362,44 +1362,45 @@ int drm_markbufs( struct inode *inode, struct file *filp, | |||
1362 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | 1362 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) |
1363 | return -EINVAL; | 1363 | return -EINVAL; |
1364 | 1364 | ||
1365 | if ( !dma ) return -EINVAL; | 1365 | if (!dma) |
1366 | return -EINVAL; | ||
1366 | 1367 | ||
1367 | if ( copy_from_user( &request, | 1368 | if (copy_from_user(&request, |
1368 | (drm_buf_desc_t __user *)arg, | 1369 | (drm_buf_desc_t __user *) arg, sizeof(request))) |
1369 | sizeof(request) ) ) | ||
1370 | return -EFAULT; | 1370 | return -EFAULT; |
1371 | 1371 | ||
1372 | DRM_DEBUG( "%d, %d, %d\n", | 1372 | DRM_DEBUG("%d, %d, %d\n", |
1373 | request.size, request.low_mark, request.high_mark ); | 1373 | request.size, request.low_mark, request.high_mark); |
1374 | order = drm_order( request.size ); | 1374 | order = drm_order(request.size); |
1375 | if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; | 1375 | if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) |
1376 | return -EINVAL; | ||
1376 | entry = &dma->bufs[order]; | 1377 | entry = &dma->bufs[order]; |
1377 | 1378 | ||
1378 | if ( request.low_mark < 0 || request.low_mark > entry->buf_count ) | 1379 | if (request.low_mark < 0 || request.low_mark > entry->buf_count) |
1379 | return -EINVAL; | 1380 | return -EINVAL; |
1380 | if ( request.high_mark < 0 || request.high_mark > entry->buf_count ) | 1381 | if (request.high_mark < 0 || request.high_mark > entry->buf_count) |
1381 | return -EINVAL; | 1382 | return -EINVAL; |
1382 | 1383 | ||
1383 | entry->freelist.low_mark = request.low_mark; | 1384 | entry->freelist.low_mark = request.low_mark; |
1384 | entry->freelist.high_mark = request.high_mark; | 1385 | entry->freelist.high_mark = request.high_mark; |
1385 | 1386 | ||
1386 | return 0; | 1387 | return 0; |
1387 | } | 1388 | } |
1388 | 1389 | ||
1389 | /** | 1390 | /** |
1390 | * Unreserve the buffers in list, previously reserved using drmDMA. | 1391 | * Unreserve the buffers in list, previously reserved using drmDMA. |
1391 | * | 1392 | * |
1392 | * \param inode device inode. | 1393 | * \param inode device inode. |
1393 | * \param filp file pointer. | 1394 | * \param filp file pointer. |
1394 | * \param cmd command. | 1395 | * \param cmd command. |
1395 | * \param arg pointer to a drm_buf_free structure. | 1396 | * \param arg pointer to a drm_buf_free structure. |
1396 | * \return zero on success or a negative number on failure. | 1397 | * \return zero on success or a negative number on failure. |
1397 | * | 1398 | * |
1398 | * Calls free_buffer() for each used buffer. | 1399 | * Calls free_buffer() for each used buffer. |
1399 | * This function is primarily used for debugging. | 1400 | * This function is primarily used for debugging. |
1400 | */ | 1401 | */ |
1401 | int drm_freebufs( struct inode *inode, struct file *filp, | 1402 | int drm_freebufs(struct inode *inode, struct file *filp, |
1402 | unsigned int cmd, unsigned long arg ) | 1403 | unsigned int cmd, unsigned long arg) |
1403 | { | 1404 | { |
1404 | drm_file_t *priv = filp->private_data; | 1405 | drm_file_t *priv = filp->private_data; |
1405 | drm_device_t *dev = priv->head->dev; | 1406 | drm_device_t *dev = priv->head->dev; |
@@ -1412,31 +1413,29 @@ int drm_freebufs( struct inode *inode, struct file *filp, | |||
1412 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | 1413 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) |
1413 | return -EINVAL; | 1414 | return -EINVAL; |
1414 | 1415 | ||
1415 | if ( !dma ) return -EINVAL; | 1416 | if (!dma) |
1417 | return -EINVAL; | ||
1416 | 1418 | ||
1417 | if ( copy_from_user( &request, | 1419 | if (copy_from_user(&request, |
1418 | (drm_buf_free_t __user *)arg, | 1420 | (drm_buf_free_t __user *) arg, sizeof(request))) |
1419 | sizeof(request) ) ) | ||
1420 | return -EFAULT; | 1421 | return -EFAULT; |
1421 | 1422 | ||
1422 | DRM_DEBUG( "%d\n", request.count ); | 1423 | DRM_DEBUG("%d\n", request.count); |
1423 | for ( i = 0 ; i < request.count ; i++ ) { | 1424 | for (i = 0; i < request.count; i++) { |
1424 | if ( copy_from_user( &idx, | 1425 | if (copy_from_user(&idx, &request.list[i], sizeof(idx))) |
1425 | &request.list[i], | ||
1426 | sizeof(idx) ) ) | ||
1427 | return -EFAULT; | 1426 | return -EFAULT; |
1428 | if ( idx < 0 || idx >= dma->buf_count ) { | 1427 | if (idx < 0 || idx >= dma->buf_count) { |
1429 | DRM_ERROR( "Index %d (of %d max)\n", | 1428 | DRM_ERROR("Index %d (of %d max)\n", |
1430 | idx, dma->buf_count - 1 ); | 1429 | idx, dma->buf_count - 1); |
1431 | return -EINVAL; | 1430 | return -EINVAL; |
1432 | } | 1431 | } |
1433 | buf = dma->buflist[idx]; | 1432 | buf = dma->buflist[idx]; |
1434 | if ( buf->filp != filp ) { | 1433 | if (buf->filp != filp) { |
1435 | DRM_ERROR( "Process %d freeing buffer not owned\n", | 1434 | DRM_ERROR("Process %d freeing buffer not owned\n", |
1436 | current->pid ); | 1435 | current->pid); |
1437 | return -EINVAL; | 1436 | return -EINVAL; |
1438 | } | 1437 | } |
1439 | drm_free_buffer( dev, buf ); | 1438 | drm_free_buffer(dev, buf); |
1440 | } | 1439 | } |
1441 | 1440 | ||
1442 | return 0; | 1441 | return 0; |
@@ -1455,8 +1454,8 @@ int drm_freebufs( struct inode *inode, struct file *filp, | |||
1455 | * about each buffer into user space. The PCI buffers are already mapped on the | 1454 | * about each buffer into user space. The PCI buffers are already mapped on the |
1456 | * addbufs_pci() call. | 1455 | * addbufs_pci() call. |
1457 | */ | 1456 | */ |
1458 | int drm_mapbufs( struct inode *inode, struct file *filp, | 1457 | int drm_mapbufs(struct inode *inode, struct file *filp, |
1459 | unsigned int cmd, unsigned long arg ) | 1458 | unsigned int cmd, unsigned long arg) |
1460 | { | 1459 | { |
1461 | drm_file_t *priv = filp->private_data; | 1460 | drm_file_t *priv = filp->private_data; |
1462 | drm_device_t *dev = priv->head->dev; | 1461 | drm_device_t *dev = priv->head->dev; |
@@ -1472,86 +1471,84 @@ int drm_mapbufs( struct inode *inode, struct file *filp, | |||
1472 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | 1471 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) |
1473 | return -EINVAL; | 1472 | return -EINVAL; |
1474 | 1473 | ||
1475 | if ( !dma ) return -EINVAL; | 1474 | if (!dma) |
1475 | return -EINVAL; | ||
1476 | 1476 | ||
1477 | spin_lock( &dev->count_lock ); | 1477 | spin_lock(&dev->count_lock); |
1478 | if ( atomic_read( &dev->buf_alloc ) ) { | 1478 | if (atomic_read(&dev->buf_alloc)) { |
1479 | spin_unlock( &dev->count_lock ); | 1479 | spin_unlock(&dev->count_lock); |
1480 | return -EBUSY; | 1480 | return -EBUSY; |
1481 | } | 1481 | } |
1482 | dev->buf_use++; /* Can't allocate more after this call */ | 1482 | dev->buf_use++; /* Can't allocate more after this call */ |
1483 | spin_unlock( &dev->count_lock ); | 1483 | spin_unlock(&dev->count_lock); |
1484 | 1484 | ||
1485 | if ( copy_from_user( &request, argp, sizeof(request) ) ) | 1485 | if (copy_from_user(&request, argp, sizeof(request))) |
1486 | return -EFAULT; | 1486 | return -EFAULT; |
1487 | 1487 | ||
1488 | if ( request.count >= dma->buf_count ) { | 1488 | if (request.count >= dma->buf_count) { |
1489 | if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) | 1489 | if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) |
1490 | || (drm_core_check_feature(dev, DRIVER_SG) | 1490 | || (drm_core_check_feature(dev, DRIVER_SG) |
1491 | && (dma->flags & _DRM_DMA_USE_SG)) | 1491 | && (dma->flags & _DRM_DMA_USE_SG)) |
1492 | || (drm_core_check_feature(dev, DRIVER_FB_DMA) | 1492 | || (drm_core_check_feature(dev, DRIVER_FB_DMA) |
1493 | && (dma->flags & _DRM_DMA_USE_FB))) { | 1493 | && (dma->flags & _DRM_DMA_USE_FB))) { |
1494 | drm_map_t *map = dev->agp_buffer_map; | 1494 | drm_map_t *map = dev->agp_buffer_map; |
1495 | unsigned long token = dev->agp_buffer_token; | 1495 | unsigned long token = dev->agp_buffer_token; |
1496 | 1496 | ||
1497 | if ( !map ) { | 1497 | if (!map) { |
1498 | retcode = -EINVAL; | 1498 | retcode = -EINVAL; |
1499 | goto done; | 1499 | goto done; |
1500 | } | 1500 | } |
1501 | 1501 | ||
1502 | down_write( ¤t->mm->mmap_sem ); | 1502 | down_write(¤t->mm->mmap_sem); |
1503 | virtual = do_mmap( filp, 0, map->size, | 1503 | virtual = do_mmap(filp, 0, map->size, |
1504 | PROT_READ | PROT_WRITE, | 1504 | PROT_READ | PROT_WRITE, |
1505 | MAP_SHARED, | 1505 | MAP_SHARED, token); |
1506 | token ); | 1506 | up_write(¤t->mm->mmap_sem); |
1507 | up_write( ¤t->mm->mmap_sem ); | ||
1508 | } else { | 1507 | } else { |
1509 | down_write( ¤t->mm->mmap_sem ); | 1508 | down_write(¤t->mm->mmap_sem); |
1510 | virtual = do_mmap( filp, 0, dma->byte_count, | 1509 | virtual = do_mmap(filp, 0, dma->byte_count, |
1511 | PROT_READ | PROT_WRITE, | 1510 | PROT_READ | PROT_WRITE, |
1512 | MAP_SHARED, 0 ); | 1511 | MAP_SHARED, 0); |
1513 | up_write( ¤t->mm->mmap_sem ); | 1512 | up_write(¤t->mm->mmap_sem); |
1514 | } | 1513 | } |
1515 | if ( virtual > -1024UL ) { | 1514 | if (virtual > -1024UL) { |
1516 | /* Real error */ | 1515 | /* Real error */ |
1517 | retcode = (signed long)virtual; | 1516 | retcode = (signed long)virtual; |
1518 | goto done; | 1517 | goto done; |
1519 | } | 1518 | } |
1520 | request.virtual = (void __user *)virtual; | 1519 | request.virtual = (void __user *)virtual; |
1521 | 1520 | ||
1522 | for ( i = 0 ; i < dma->buf_count ; i++ ) { | 1521 | for (i = 0; i < dma->buf_count; i++) { |
1523 | if ( copy_to_user( &request.list[i].idx, | 1522 | if (copy_to_user(&request.list[i].idx, |
1524 | &dma->buflist[i]->idx, | 1523 | &dma->buflist[i]->idx, |
1525 | sizeof(request.list[0].idx) ) ) { | 1524 | sizeof(request.list[0].idx))) { |
1526 | retcode = -EFAULT; | 1525 | retcode = -EFAULT; |
1527 | goto done; | 1526 | goto done; |
1528 | } | 1527 | } |
1529 | if ( copy_to_user( &request.list[i].total, | 1528 | if (copy_to_user(&request.list[i].total, |
1530 | &dma->buflist[i]->total, | 1529 | &dma->buflist[i]->total, |
1531 | sizeof(request.list[0].total) ) ) { | 1530 | sizeof(request.list[0].total))) { |
1532 | retcode = -EFAULT; | 1531 | retcode = -EFAULT; |
1533 | goto done; | 1532 | goto done; |
1534 | } | 1533 | } |
1535 | if ( copy_to_user( &request.list[i].used, | 1534 | if (copy_to_user(&request.list[i].used, |
1536 | &zero, | 1535 | &zero, sizeof(zero))) { |
1537 | sizeof(zero) ) ) { | ||
1538 | retcode = -EFAULT; | 1536 | retcode = -EFAULT; |
1539 | goto done; | 1537 | goto done; |
1540 | } | 1538 | } |
1541 | address = virtual + dma->buflist[i]->offset; /* *** */ | 1539 | address = virtual + dma->buflist[i]->offset; /* *** */ |
1542 | if ( copy_to_user( &request.list[i].address, | 1540 | if (copy_to_user(&request.list[i].address, |
1543 | &address, | 1541 | &address, sizeof(address))) { |
1544 | sizeof(address) ) ) { | ||
1545 | retcode = -EFAULT; | 1542 | retcode = -EFAULT; |
1546 | goto done; | 1543 | goto done; |
1547 | } | 1544 | } |
1548 | } | 1545 | } |
1549 | } | 1546 | } |
1550 | done: | 1547 | done: |
1551 | request.count = dma->buf_count; | 1548 | request.count = dma->buf_count; |
1552 | DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode ); | 1549 | DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode); |
1553 | 1550 | ||
1554 | if ( copy_to_user( argp, &request, sizeof(request) ) ) | 1551 | if (copy_to_user(argp, &request, sizeof(request))) |
1555 | return -EFAULT; | 1552 | return -EFAULT; |
1556 | 1553 | ||
1557 | return retcode; | 1554 | return retcode; |
@@ -1560,23 +1557,23 @@ int drm_mapbufs( struct inode *inode, struct file *filp, | |||
1560 | /** | 1557 | /** |
1561 | * Compute size order. Returns the exponent of the smaller power of two which | 1558 | * Compute size order. Returns the exponent of the smaller power of two which |
1562 | * is greater or equal to given number. | 1559 | * is greater or equal to given number. |
1563 | * | 1560 | * |
1564 | * \param size size. | 1561 | * \param size size. |
1565 | * \return order. | 1562 | * \return order. |
1566 | * | 1563 | * |
1567 | * \todo Can be made faster. | 1564 | * \todo Can be made faster. |
1568 | */ | 1565 | */ |
1569 | int drm_order( unsigned long size ) | 1566 | int drm_order(unsigned long size) |
1570 | { | 1567 | { |
1571 | int order; | 1568 | int order; |
1572 | unsigned long tmp; | 1569 | unsigned long tmp; |
1573 | 1570 | ||
1574 | for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) | 1571 | for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ; |
1575 | ; | ||
1576 | 1572 | ||
1577 | if (size & (size - 1)) | 1573 | if (size & (size - 1)) |
1578 | ++order; | 1574 | ++order; |
1579 | 1575 | ||
1580 | return order; | 1576 | return order; |
1581 | } | 1577 | } |
1578 | |||
1582 | EXPORT_SYMBOL(drm_order); | 1579 | EXPORT_SYMBOL(drm_order); |