aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_bufs.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2008-05-28 20:09:59 -0400
committerDave Airlie <airlied@redhat.com>2008-07-13 20:45:01 -0400
commitc0e09200dc0813972442e550a5905a132768e56c (patch)
treed38e635a30ff8b0a2b98b9d7f97cab1501f8209e /drivers/gpu/drm/drm_bufs.c
parentbce7f793daec3e65ec5c5705d2457b81fe7b5725 (diff)
drm: reorganise drm tree to be more future proof.
With the coming of kernel based modesetting and the memory manager stuff, the everything in one directory approach was getting very ugly and starting to be unmanageable. This restructures the drm along the lines of other kernel components. It creates a drivers/gpu/drm directory and moves the hw drivers into subdirectores. It moves the includes into an include/drm, and sets up the unifdef for the userspace headers we should be exporting. Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/drm_bufs.c')
-rw-r--r--drivers/gpu/drm/drm_bufs.c1601
1 files changed, 1601 insertions, 0 deletions
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
new file mode 100644
index 000000000000..bde64b84166e
--- /dev/null
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -0,0 +1,1601 @@
1/**
2 * \file drm_bufs.c
3 * Generic buffer template
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11 *
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include <linux/vmalloc.h>
37#include "drmP.h"
38
39unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource)
40{
41 return pci_resource_start(dev->pdev, resource);
42}
43EXPORT_SYMBOL(drm_get_resource_start);
44
45unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource)
46{
47 return pci_resource_len(dev->pdev, resource);
48}
49
50EXPORT_SYMBOL(drm_get_resource_len);
51
52static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
53 drm_local_map_t *map)
54{
55 struct drm_map_list *entry;
56 list_for_each_entry(entry, &dev->maplist, head) {
57 if (entry->map && map->type == entry->map->type &&
58 ((entry->map->offset == map->offset) ||
59 (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
60 return entry;
61 }
62 }
63
64 return NULL;
65}
66
67static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
68 unsigned long user_token, int hashed_handle)
69{
70 int use_hashed_handle;
71#if (BITS_PER_LONG == 64)
72 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
73#elif (BITS_PER_LONG == 32)
74 use_hashed_handle = hashed_handle;
75#else
76#error Unsupported long size. Neither 64 nor 32 bits.
77#endif
78
79 if (!use_hashed_handle) {
80 int ret;
81 hash->key = user_token >> PAGE_SHIFT;
82 ret = drm_ht_insert_item(&dev->map_hash, hash);
83 if (ret != -EINVAL)
84 return ret;
85 }
86 return drm_ht_just_insert_please(&dev->map_hash, hash,
87 user_token, 32 - PAGE_SHIFT - 3,
88 0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
89}
90
91/**
92 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
93 *
94 * \param inode device inode.
95 * \param file_priv DRM file private.
96 * \param cmd command.
97 * \param arg pointer to a drm_map structure.
98 * \return zero on success or a negative value on error.
99 *
100 * Adjusts the memory offset to its absolute value according to the mapping
101 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
102 * applicable and if supported by the kernel.
103 */
104static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
105 unsigned int size, enum drm_map_type type,
106 enum drm_map_flags flags,
107 struct drm_map_list ** maplist)
108{
109 struct drm_map *map;
110 struct drm_map_list *list;
111 drm_dma_handle_t *dmah;
112 unsigned long user_token;
113 int ret;
114
115 map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
116 if (!map)
117 return -ENOMEM;
118
119 map->offset = offset;
120 map->size = size;
121 map->flags = flags;
122 map->type = type;
123
124 /* Only allow shared memory to be removable since we only keep enough
125 * book keeping information about shared memory to allow for removal
126 * when processes fork.
127 */
128 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
129 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
130 return -EINVAL;
131 }
132 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
133 map->offset, map->size, map->type);
134 if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
135 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
136 return -EINVAL;
137 }
138 map->mtrr = -1;
139 map->handle = NULL;
140
141 switch (map->type) {
142 case _DRM_REGISTERS:
143 case _DRM_FRAME_BUFFER:
144#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
145 if (map->offset + (map->size-1) < map->offset ||
146 map->offset < virt_to_phys(high_memory)) {
147 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
148 return -EINVAL;
149 }
150#endif
151#ifdef __alpha__
152 map->offset += dev->hose->mem_space->start;
153#endif
154 /* Some drivers preinitialize some maps, without the X Server
155 * needing to be aware of it. Therefore, we just return success
156 * when the server tries to create a duplicate map.
157 */
158 list = drm_find_matching_map(dev, map);
159 if (list != NULL) {
160 if (list->map->size != map->size) {
161 DRM_DEBUG("Matching maps of type %d with "
162 "mismatched sizes, (%ld vs %ld)\n",
163 map->type, map->size,
164 list->map->size);
165 list->map->size = map->size;
166 }
167
168 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
169 *maplist = list;
170 return 0;
171 }
172
173 if (drm_core_has_MTRR(dev)) {
174 if (map->type == _DRM_FRAME_BUFFER ||
175 (map->flags & _DRM_WRITE_COMBINING)) {
176 map->mtrr = mtrr_add(map->offset, map->size,
177 MTRR_TYPE_WRCOMB, 1);
178 }
179 }
180 if (map->type == _DRM_REGISTERS) {
181 map->handle = ioremap(map->offset, map->size);
182 if (!map->handle) {
183 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
184 return -ENOMEM;
185 }
186 }
187
188 break;
189 case _DRM_SHM:
190 list = drm_find_matching_map(dev, map);
191 if (list != NULL) {
192 if(list->map->size != map->size) {
193 DRM_DEBUG("Matching maps of type %d with "
194 "mismatched sizes, (%ld vs %ld)\n",
195 map->type, map->size, list->map->size);
196 list->map->size = map->size;
197 }
198
199 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
200 *maplist = list;
201 return 0;
202 }
203 map->handle = vmalloc_user(map->size);
204 DRM_DEBUG("%lu %d %p\n",
205 map->size, drm_order(map->size), map->handle);
206 if (!map->handle) {
207 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
208 return -ENOMEM;
209 }
210 map->offset = (unsigned long)map->handle;
211 if (map->flags & _DRM_CONTAINS_LOCK) {
212 /* Prevent a 2nd X Server from creating a 2nd lock */
213 if (dev->lock.hw_lock != NULL) {
214 vfree(map->handle);
215 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
216 return -EBUSY;
217 }
218 dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */
219 }
220 break;
221 case _DRM_AGP: {
222 struct drm_agp_mem *entry;
223 int valid = 0;
224
225 if (!drm_core_has_AGP(dev)) {
226 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
227 return -EINVAL;
228 }
229#ifdef __alpha__
230 map->offset += dev->hose->mem_space->start;
231#endif
232 /* In some cases (i810 driver), user space may have already
233 * added the AGP base itself, because dev->agp->base previously
234 * only got set during AGP enable. So, only add the base
235 * address if the map's offset isn't already within the
236 * aperture.
237 */
238 if (map->offset < dev->agp->base ||
239 map->offset > dev->agp->base +
240 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
241 map->offset += dev->agp->base;
242 }
243 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
244
245 /* This assumes the DRM is in total control of AGP space.
246 * It's not always the case as AGP can be in the control
247 * of user space (i.e. i810 driver). So this loop will get
248 * skipped and we double check that dev->agp->memory is
249 * actually set as well as being invalid before EPERM'ing
250 */
251 list_for_each_entry(entry, &dev->agp->memory, head) {
252 if ((map->offset >= entry->bound) &&
253 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
254 valid = 1;
255 break;
256 }
257 }
258 if (!list_empty(&dev->agp->memory) && !valid) {
259 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
260 return -EPERM;
261 }
262 DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
263
264 break;
265 }
266 case _DRM_SCATTER_GATHER:
267 if (!dev->sg) {
268 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
269 return -EINVAL;
270 }
271 map->offset += (unsigned long)dev->sg->virtual;
272 break;
273 case _DRM_CONSISTENT:
274 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
275 * As we're limiting the address to 2^32-1 (or less),
276 * casting it down to 32 bits is no problem, but we
277 * need to point to a 64bit variable first. */
278 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
279 if (!dmah) {
280 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
281 return -ENOMEM;
282 }
283 map->handle = dmah->vaddr;
284 map->offset = (unsigned long)dmah->busaddr;
285 kfree(dmah);
286 break;
287 default:
288 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
289 return -EINVAL;
290 }
291
292 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
293 if (!list) {
294 if (map->type == _DRM_REGISTERS)
295 iounmap(map->handle);
296 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
297 return -EINVAL;
298 }
299 memset(list, 0, sizeof(*list));
300 list->map = map;
301
302 mutex_lock(&dev->struct_mutex);
303 list_add(&list->head, &dev->maplist);
304
305 /* Assign a 32-bit handle */
306 /* We do it here so that dev->struct_mutex protects the increment */
307 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
308 map->offset;
309 ret = drm_map_handle(dev, &list->hash, user_token, 0);
310 if (ret) {
311 if (map->type == _DRM_REGISTERS)
312 iounmap(map->handle);
313 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
314 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
315 mutex_unlock(&dev->struct_mutex);
316 return ret;
317 }
318
319 list->user_token = list->hash.key << PAGE_SHIFT;
320 mutex_unlock(&dev->struct_mutex);
321
322 *maplist = list;
323 return 0;
324 }
325
326int drm_addmap(struct drm_device * dev, unsigned int offset,
327 unsigned int size, enum drm_map_type type,
328 enum drm_map_flags flags, drm_local_map_t ** map_ptr)
329{
330 struct drm_map_list *list;
331 int rc;
332
333 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
334 if (!rc)
335 *map_ptr = list->map;
336 return rc;
337}
338
339EXPORT_SYMBOL(drm_addmap);
340
341int drm_addmap_ioctl(struct drm_device *dev, void *data,
342 struct drm_file *file_priv)
343{
344 struct drm_map *map = data;
345 struct drm_map_list *maplist;
346 int err;
347
348 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP))
349 return -EPERM;
350
351 err = drm_addmap_core(dev, map->offset, map->size, map->type,
352 map->flags, &maplist);
353
354 if (err)
355 return err;
356
357 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
358 map->handle = (void *)(unsigned long)maplist->user_token;
359 return 0;
360}
361
362/**
363 * Remove a map private from list and deallocate resources if the mapping
364 * isn't in use.
365 *
366 * \param inode device inode.
367 * \param file_priv DRM file private.
368 * \param cmd command.
369 * \param arg pointer to a struct drm_map structure.
370 * \return zero on success or a negative value on error.
371 *
372 * Searches the map on drm_device::maplist, removes it from the list, see if
373 * its being used, and free any associate resource (such as MTRR's) if it's not
374 * being on use.
375 *
376 * \sa drm_addmap
377 */
378int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
379{
380 struct drm_map_list *r_list = NULL, *list_t;
381 drm_dma_handle_t dmah;
382 int found = 0;
383
384 /* Find the list entry for the map and remove it */
385 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
386 if (r_list->map == map) {
387 list_del(&r_list->head);
388 drm_ht_remove_key(&dev->map_hash,
389 r_list->user_token >> PAGE_SHIFT);
390 drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
391 found = 1;
392 break;
393 }
394 }
395
396 if (!found)
397 return -EINVAL;
398
399 switch (map->type) {
400 case _DRM_REGISTERS:
401 iounmap(map->handle);
402 /* FALLTHROUGH */
403 case _DRM_FRAME_BUFFER:
404 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
405 int retcode;
406 retcode = mtrr_del(map->mtrr, map->offset, map->size);
407 DRM_DEBUG("mtrr_del=%d\n", retcode);
408 }
409 break;
410 case _DRM_SHM:
411 vfree(map->handle);
412 break;
413 case _DRM_AGP:
414 case _DRM_SCATTER_GATHER:
415 break;
416 case _DRM_CONSISTENT:
417 dmah.vaddr = map->handle;
418 dmah.busaddr = map->offset;
419 dmah.size = map->size;
420 __drm_pci_free(dev, &dmah);
421 break;
422 }
423 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
424
425 return 0;
426}
427
428int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
429{
430 int ret;
431
432 mutex_lock(&dev->struct_mutex);
433 ret = drm_rmmap_locked(dev, map);
434 mutex_unlock(&dev->struct_mutex);
435
436 return ret;
437}
438EXPORT_SYMBOL(drm_rmmap);
439
440/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
441 * the last close of the device, and this is necessary for cleanup when things
442 * exit uncleanly. Therefore, having userland manually remove mappings seems
443 * like a pointless exercise since they're going away anyway.
444 *
445 * One use case might be after addmap is allowed for normal users for SHM and
446 * gets used by drivers that the server doesn't need to care about. This seems
447 * unlikely.
448 */
449int drm_rmmap_ioctl(struct drm_device *dev, void *data,
450 struct drm_file *file_priv)
451{
452 struct drm_map *request = data;
453 drm_local_map_t *map = NULL;
454 struct drm_map_list *r_list;
455 int ret;
456
457 mutex_lock(&dev->struct_mutex);
458 list_for_each_entry(r_list, &dev->maplist, head) {
459 if (r_list->map &&
460 r_list->user_token == (unsigned long)request->handle &&
461 r_list->map->flags & _DRM_REMOVABLE) {
462 map = r_list->map;
463 break;
464 }
465 }
466
467 /* List has wrapped around to the head pointer, or its empty we didn't
468 * find anything.
469 */
470 if (list_empty(&dev->maplist) || !map) {
471 mutex_unlock(&dev->struct_mutex);
472 return -EINVAL;
473 }
474
475 /* Register and framebuffer maps are permanent */
476 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
477 mutex_unlock(&dev->struct_mutex);
478 return 0;
479 }
480
481 ret = drm_rmmap_locked(dev, map);
482
483 mutex_unlock(&dev->struct_mutex);
484
485 return ret;
486}
487
488/**
489 * Cleanup after an error on one of the addbufs() functions.
490 *
491 * \param dev DRM device.
492 * \param entry buffer entry where the error occurred.
493 *
494 * Frees any pages and buffers associated with the given entry.
495 */
496static void drm_cleanup_buf_error(struct drm_device * dev,
497 struct drm_buf_entry * entry)
498{
499 int i;
500
501 if (entry->seg_count) {
502 for (i = 0; i < entry->seg_count; i++) {
503 if (entry->seglist[i]) {
504 drm_pci_free(dev, entry->seglist[i]);
505 }
506 }
507 drm_free(entry->seglist,
508 entry->seg_count *
509 sizeof(*entry->seglist), DRM_MEM_SEGS);
510
511 entry->seg_count = 0;
512 }
513
514 if (entry->buf_count) {
515 for (i = 0; i < entry->buf_count; i++) {
516 if (entry->buflist[i].dev_private) {
517 drm_free(entry->buflist[i].dev_private,
518 entry->buflist[i].dev_priv_size,
519 DRM_MEM_BUFS);
520 }
521 }
522 drm_free(entry->buflist,
523 entry->buf_count *
524 sizeof(*entry->buflist), DRM_MEM_BUFS);
525
526 entry->buf_count = 0;
527 }
528}
529
530#if __OS_HAS_AGP
531/**
532 * Add AGP buffers for DMA transfers.
533 *
534 * \param dev struct drm_device to which the buffers are to be added.
535 * \param request pointer to a struct drm_buf_desc describing the request.
536 * \return zero on success or a negative number on failure.
537 *
538 * After some sanity checks creates a drm_buf structure for each buffer and
539 * reallocates the buffer list of the same size order to accommodate the new
540 * buffers.
541 */
542int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
543{
544 struct drm_device_dma *dma = dev->dma;
545 struct drm_buf_entry *entry;
546 struct drm_agp_mem *agp_entry;
547 struct drm_buf *buf;
548 unsigned long offset;
549 unsigned long agp_offset;
550 int count;
551 int order;
552 int size;
553 int alignment;
554 int page_order;
555 int total;
556 int byte_count;
557 int i, valid;
558 struct drm_buf **temp_buflist;
559
560 if (!dma)
561 return -EINVAL;
562
563 count = request->count;
564 order = drm_order(request->size);
565 size = 1 << order;
566
567 alignment = (request->flags & _DRM_PAGE_ALIGN)
568 ? PAGE_ALIGN(size) : size;
569 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
570 total = PAGE_SIZE << page_order;
571
572 byte_count = 0;
573 agp_offset = dev->agp->base + request->agp_start;
574
575 DRM_DEBUG("count: %d\n", count);
576 DRM_DEBUG("order: %d\n", order);
577 DRM_DEBUG("size: %d\n", size);
578 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
579 DRM_DEBUG("alignment: %d\n", alignment);
580 DRM_DEBUG("page_order: %d\n", page_order);
581 DRM_DEBUG("total: %d\n", total);
582
583 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
584 return -EINVAL;
585 if (dev->queue_count)
586 return -EBUSY; /* Not while in use */
587
588 /* Make sure buffers are located in AGP memory that we own */
589 valid = 0;
590 list_for_each_entry(agp_entry, &dev->agp->memory, head) {
591 if ((agp_offset >= agp_entry->bound) &&
592 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
593 valid = 1;
594 break;
595 }
596 }
597 if (!list_empty(&dev->agp->memory) && !valid) {
598 DRM_DEBUG("zone invalid\n");
599 return -EINVAL;
600 }
601 spin_lock(&dev->count_lock);
602 if (dev->buf_use) {
603 spin_unlock(&dev->count_lock);
604 return -EBUSY;
605 }
606 atomic_inc(&dev->buf_alloc);
607 spin_unlock(&dev->count_lock);
608
609 mutex_lock(&dev->struct_mutex);
610 entry = &dma->bufs[order];
611 if (entry->buf_count) {
612 mutex_unlock(&dev->struct_mutex);
613 atomic_dec(&dev->buf_alloc);
614 return -ENOMEM; /* May only call once for each order */
615 }
616
617 if (count < 0 || count > 4096) {
618 mutex_unlock(&dev->struct_mutex);
619 atomic_dec(&dev->buf_alloc);
620 return -EINVAL;
621 }
622
623 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
624 DRM_MEM_BUFS);
625 if (!entry->buflist) {
626 mutex_unlock(&dev->struct_mutex);
627 atomic_dec(&dev->buf_alloc);
628 return -ENOMEM;
629 }
630 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
631
632 entry->buf_size = size;
633 entry->page_order = page_order;
634
635 offset = 0;
636
637 while (entry->buf_count < count) {
638 buf = &entry->buflist[entry->buf_count];
639 buf->idx = dma->buf_count + entry->buf_count;
640 buf->total = alignment;
641 buf->order = order;
642 buf->used = 0;
643
644 buf->offset = (dma->byte_count + offset);
645 buf->bus_address = agp_offset + offset;
646 buf->address = (void *)(agp_offset + offset);
647 buf->next = NULL;
648 buf->waiting = 0;
649 buf->pending = 0;
650 init_waitqueue_head(&buf->dma_wait);
651 buf->file_priv = NULL;
652
653 buf->dev_priv_size = dev->driver->dev_priv_size;
654 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
655 if (!buf->dev_private) {
656 /* Set count correctly so we free the proper amount. */
657 entry->buf_count = count;
658 drm_cleanup_buf_error(dev, entry);
659 mutex_unlock(&dev->struct_mutex);
660 atomic_dec(&dev->buf_alloc);
661 return -ENOMEM;
662 }
663 memset(buf->dev_private, 0, buf->dev_priv_size);
664
665 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
666
667 offset += alignment;
668 entry->buf_count++;
669 byte_count += PAGE_SIZE << page_order;
670 }
671
672 DRM_DEBUG("byte_count: %d\n", byte_count);
673
674 temp_buflist = drm_realloc(dma->buflist,
675 dma->buf_count * sizeof(*dma->buflist),
676 (dma->buf_count + entry->buf_count)
677 * sizeof(*dma->buflist), DRM_MEM_BUFS);
678 if (!temp_buflist) {
679 /* Free the entry because it isn't valid */
680 drm_cleanup_buf_error(dev, entry);
681 mutex_unlock(&dev->struct_mutex);
682 atomic_dec(&dev->buf_alloc);
683 return -ENOMEM;
684 }
685 dma->buflist = temp_buflist;
686
687 for (i = 0; i < entry->buf_count; i++) {
688 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
689 }
690
691 dma->buf_count += entry->buf_count;
692 dma->seg_count += entry->seg_count;
693 dma->page_count += byte_count >> PAGE_SHIFT;
694 dma->byte_count += byte_count;
695
696 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
697 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
698
699 mutex_unlock(&dev->struct_mutex);
700
701 request->count = entry->buf_count;
702 request->size = size;
703
704 dma->flags = _DRM_DMA_USE_AGP;
705
706 atomic_dec(&dev->buf_alloc);
707 return 0;
708}
709EXPORT_SYMBOL(drm_addbufs_agp);
710#endif /* __OS_HAS_AGP */
711
712int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
713{
714 struct drm_device_dma *dma = dev->dma;
715 int count;
716 int order;
717 int size;
718 int total;
719 int page_order;
720 struct drm_buf_entry *entry;
721 drm_dma_handle_t *dmah;
722 struct drm_buf *buf;
723 int alignment;
724 unsigned long offset;
725 int i;
726 int byte_count;
727 int page_count;
728 unsigned long *temp_pagelist;
729 struct drm_buf **temp_buflist;
730
731 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
732 return -EINVAL;
733
734 if (!dma)
735 return -EINVAL;
736
737 if (!capable(CAP_SYS_ADMIN))
738 return -EPERM;
739
740 count = request->count;
741 order = drm_order(request->size);
742 size = 1 << order;
743
744 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
745 request->count, request->size, size, order, dev->queue_count);
746
747 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
748 return -EINVAL;
749 if (dev->queue_count)
750 return -EBUSY; /* Not while in use */
751
752 alignment = (request->flags & _DRM_PAGE_ALIGN)
753 ? PAGE_ALIGN(size) : size;
754 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
755 total = PAGE_SIZE << page_order;
756
757 spin_lock(&dev->count_lock);
758 if (dev->buf_use) {
759 spin_unlock(&dev->count_lock);
760 return -EBUSY;
761 }
762 atomic_inc(&dev->buf_alloc);
763 spin_unlock(&dev->count_lock);
764
765 mutex_lock(&dev->struct_mutex);
766 entry = &dma->bufs[order];
767 if (entry->buf_count) {
768 mutex_unlock(&dev->struct_mutex);
769 atomic_dec(&dev->buf_alloc);
770 return -ENOMEM; /* May only call once for each order */
771 }
772
773 if (count < 0 || count > 4096) {
774 mutex_unlock(&dev->struct_mutex);
775 atomic_dec(&dev->buf_alloc);
776 return -EINVAL;
777 }
778
779 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
780 DRM_MEM_BUFS);
781 if (!entry->buflist) {
782 mutex_unlock(&dev->struct_mutex);
783 atomic_dec(&dev->buf_alloc);
784 return -ENOMEM;
785 }
786 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
787
788 entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
789 DRM_MEM_SEGS);
790 if (!entry->seglist) {
791 drm_free(entry->buflist,
792 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
793 mutex_unlock(&dev->struct_mutex);
794 atomic_dec(&dev->buf_alloc);
795 return -ENOMEM;
796 }
797 memset(entry->seglist, 0, count * sizeof(*entry->seglist));
798
799 /* Keep the original pagelist until we know all the allocations
800 * have succeeded
801 */
802 temp_pagelist = drm_alloc((dma->page_count + (count << page_order))
803 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
804 if (!temp_pagelist) {
805 drm_free(entry->buflist,
806 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
807 drm_free(entry->seglist,
808 count * sizeof(*entry->seglist), DRM_MEM_SEGS);
809 mutex_unlock(&dev->struct_mutex);
810 atomic_dec(&dev->buf_alloc);
811 return -ENOMEM;
812 }
813 memcpy(temp_pagelist,
814 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
815 DRM_DEBUG("pagelist: %d entries\n",
816 dma->page_count + (count << page_order));
817
818 entry->buf_size = size;
819 entry->page_order = page_order;
820 byte_count = 0;
821 page_count = 0;
822
823 while (entry->buf_count < count) {
824
825 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
826
827 if (!dmah) {
828 /* Set count correctly so we free the proper amount. */
829 entry->buf_count = count;
830 entry->seg_count = count;
831 drm_cleanup_buf_error(dev, entry);
832 drm_free(temp_pagelist,
833 (dma->page_count + (count << page_order))
834 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
835 mutex_unlock(&dev->struct_mutex);
836 atomic_dec(&dev->buf_alloc);
837 return -ENOMEM;
838 }
839 entry->seglist[entry->seg_count++] = dmah;
840 for (i = 0; i < (1 << page_order); i++) {
841 DRM_DEBUG("page %d @ 0x%08lx\n",
842 dma->page_count + page_count,
843 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
844 temp_pagelist[dma->page_count + page_count++]
845 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
846 }
847 for (offset = 0;
848 offset + size <= total && entry->buf_count < count;
849 offset += alignment, ++entry->buf_count) {
850 buf = &entry->buflist[entry->buf_count];
851 buf->idx = dma->buf_count + entry->buf_count;
852 buf->total = alignment;
853 buf->order = order;
854 buf->used = 0;
855 buf->offset = (dma->byte_count + byte_count + offset);
856 buf->address = (void *)(dmah->vaddr + offset);
857 buf->bus_address = dmah->busaddr + offset;
858 buf->next = NULL;
859 buf->waiting = 0;
860 buf->pending = 0;
861 init_waitqueue_head(&buf->dma_wait);
862 buf->file_priv = NULL;
863
864 buf->dev_priv_size = dev->driver->dev_priv_size;
865 buf->dev_private = drm_alloc(buf->dev_priv_size,
866 DRM_MEM_BUFS);
867 if (!buf->dev_private) {
868 /* Set count correctly so we free the proper amount. */
869 entry->buf_count = count;
870 entry->seg_count = count;
871 drm_cleanup_buf_error(dev, entry);
872 drm_free(temp_pagelist,
873 (dma->page_count +
874 (count << page_order))
875 * sizeof(*dma->pagelist),
876 DRM_MEM_PAGES);
877 mutex_unlock(&dev->struct_mutex);
878 atomic_dec(&dev->buf_alloc);
879 return -ENOMEM;
880 }
881 memset(buf->dev_private, 0, buf->dev_priv_size);
882
883 DRM_DEBUG("buffer %d @ %p\n",
884 entry->buf_count, buf->address);
885 }
886 byte_count += PAGE_SIZE << page_order;
887 }
888
889 temp_buflist = drm_realloc(dma->buflist,
890 dma->buf_count * sizeof(*dma->buflist),
891 (dma->buf_count + entry->buf_count)
892 * sizeof(*dma->buflist), DRM_MEM_BUFS);
893 if (!temp_buflist) {
894 /* Free the entry because it isn't valid */
895 drm_cleanup_buf_error(dev, entry);
896 drm_free(temp_pagelist,
897 (dma->page_count + (count << page_order))
898 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
899 mutex_unlock(&dev->struct_mutex);
900 atomic_dec(&dev->buf_alloc);
901 return -ENOMEM;
902 }
903 dma->buflist = temp_buflist;
904
905 for (i = 0; i < entry->buf_count; i++) {
906 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
907 }
908
909 /* No allocations failed, so now we can replace the orginal pagelist
910 * with the new one.
911 */
912 if (dma->page_count) {
913 drm_free(dma->pagelist,
914 dma->page_count * sizeof(*dma->pagelist),
915 DRM_MEM_PAGES);
916 }
917 dma->pagelist = temp_pagelist;
918
919 dma->buf_count += entry->buf_count;
920 dma->seg_count += entry->seg_count;
921 dma->page_count += entry->seg_count << page_order;
922 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
923
924 mutex_unlock(&dev->struct_mutex);
925
926 request->count = entry->buf_count;
927 request->size = size;
928
929 if (request->flags & _DRM_PCI_BUFFER_RO)
930 dma->flags = _DRM_DMA_USE_PCI_RO;
931
932 atomic_dec(&dev->buf_alloc);
933 return 0;
934
935}
936EXPORT_SYMBOL(drm_addbufs_pci);
937
938static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
939{
940 struct drm_device_dma *dma = dev->dma;
941 struct drm_buf_entry *entry;
942 struct drm_buf *buf;
943 unsigned long offset;
944 unsigned long agp_offset;
945 int count;
946 int order;
947 int size;
948 int alignment;
949 int page_order;
950 int total;
951 int byte_count;
952 int i;
953 struct drm_buf **temp_buflist;
954
955 if (!drm_core_check_feature(dev, DRIVER_SG))
956 return -EINVAL;
957
958 if (!dma)
959 return -EINVAL;
960
961 if (!capable(CAP_SYS_ADMIN))
962 return -EPERM;
963
964 count = request->count;
965 order = drm_order(request->size);
966 size = 1 << order;
967
968 alignment = (request->flags & _DRM_PAGE_ALIGN)
969 ? PAGE_ALIGN(size) : size;
970 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
971 total = PAGE_SIZE << page_order;
972
973 byte_count = 0;
974 agp_offset = request->agp_start;
975
976 DRM_DEBUG("count: %d\n", count);
977 DRM_DEBUG("order: %d\n", order);
978 DRM_DEBUG("size: %d\n", size);
979 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
980 DRM_DEBUG("alignment: %d\n", alignment);
981 DRM_DEBUG("page_order: %d\n", page_order);
982 DRM_DEBUG("total: %d\n", total);
983
984 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
985 return -EINVAL;
986 if (dev->queue_count)
987 return -EBUSY; /* Not while in use */
988
989 spin_lock(&dev->count_lock);
990 if (dev->buf_use) {
991 spin_unlock(&dev->count_lock);
992 return -EBUSY;
993 }
994 atomic_inc(&dev->buf_alloc);
995 spin_unlock(&dev->count_lock);
996
997 mutex_lock(&dev->struct_mutex);
998 entry = &dma->bufs[order];
999 if (entry->buf_count) {
1000 mutex_unlock(&dev->struct_mutex);
1001 atomic_dec(&dev->buf_alloc);
1002 return -ENOMEM; /* May only call once for each order */
1003 }
1004
1005 if (count < 0 || count > 4096) {
1006 mutex_unlock(&dev->struct_mutex);
1007 atomic_dec(&dev->buf_alloc);
1008 return -EINVAL;
1009 }
1010
1011 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1012 DRM_MEM_BUFS);
1013 if (!entry->buflist) {
1014 mutex_unlock(&dev->struct_mutex);
1015 atomic_dec(&dev->buf_alloc);
1016 return -ENOMEM;
1017 }
1018 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1019
1020 entry->buf_size = size;
1021 entry->page_order = page_order;
1022
1023 offset = 0;
1024
1025 while (entry->buf_count < count) {
1026 buf = &entry->buflist[entry->buf_count];
1027 buf->idx = dma->buf_count + entry->buf_count;
1028 buf->total = alignment;
1029 buf->order = order;
1030 buf->used = 0;
1031
1032 buf->offset = (dma->byte_count + offset);
1033 buf->bus_address = agp_offset + offset;
1034 buf->address = (void *)(agp_offset + offset
1035 + (unsigned long)dev->sg->virtual);
1036 buf->next = NULL;
1037 buf->waiting = 0;
1038 buf->pending = 0;
1039 init_waitqueue_head(&buf->dma_wait);
1040 buf->file_priv = NULL;
1041
1042 buf->dev_priv_size = dev->driver->dev_priv_size;
1043 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1044 if (!buf->dev_private) {
1045 /* Set count correctly so we free the proper amount. */
1046 entry->buf_count = count;
1047 drm_cleanup_buf_error(dev, entry);
1048 mutex_unlock(&dev->struct_mutex);
1049 atomic_dec(&dev->buf_alloc);
1050 return -ENOMEM;
1051 }
1052
1053 memset(buf->dev_private, 0, buf->dev_priv_size);
1054
1055 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1056
1057 offset += alignment;
1058 entry->buf_count++;
1059 byte_count += PAGE_SIZE << page_order;
1060 }
1061
1062 DRM_DEBUG("byte_count: %d\n", byte_count);
1063
1064 temp_buflist = drm_realloc(dma->buflist,
1065 dma->buf_count * sizeof(*dma->buflist),
1066 (dma->buf_count + entry->buf_count)
1067 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1068 if (!temp_buflist) {
1069 /* Free the entry because it isn't valid */
1070 drm_cleanup_buf_error(dev, entry);
1071 mutex_unlock(&dev->struct_mutex);
1072 atomic_dec(&dev->buf_alloc);
1073 return -ENOMEM;
1074 }
1075 dma->buflist = temp_buflist;
1076
1077 for (i = 0; i < entry->buf_count; i++) {
1078 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1079 }
1080
1081 dma->buf_count += entry->buf_count;
1082 dma->seg_count += entry->seg_count;
1083 dma->page_count += byte_count >> PAGE_SHIFT;
1084 dma->byte_count += byte_count;
1085
1086 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1087 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1088
1089 mutex_unlock(&dev->struct_mutex);
1090
1091 request->count = entry->buf_count;
1092 request->size = size;
1093
1094 dma->flags = _DRM_DMA_USE_SG;
1095
1096 atomic_dec(&dev->buf_alloc);
1097 return 0;
1098}
1099
1100static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
1101{
1102 struct drm_device_dma *dma = dev->dma;
1103 struct drm_buf_entry *entry;
1104 struct drm_buf *buf;
1105 unsigned long offset;
1106 unsigned long agp_offset;
1107 int count;
1108 int order;
1109 int size;
1110 int alignment;
1111 int page_order;
1112 int total;
1113 int byte_count;
1114 int i;
1115 struct drm_buf **temp_buflist;
1116
1117 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1118 return -EINVAL;
1119
1120 if (!dma)
1121 return -EINVAL;
1122
1123 if (!capable(CAP_SYS_ADMIN))
1124 return -EPERM;
1125
1126 count = request->count;
1127 order = drm_order(request->size);
1128 size = 1 << order;
1129
1130 alignment = (request->flags & _DRM_PAGE_ALIGN)
1131 ? PAGE_ALIGN(size) : size;
1132 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1133 total = PAGE_SIZE << page_order;
1134
1135 byte_count = 0;
1136 agp_offset = request->agp_start;
1137
1138 DRM_DEBUG("count: %d\n", count);
1139 DRM_DEBUG("order: %d\n", order);
1140 DRM_DEBUG("size: %d\n", size);
1141 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1142 DRM_DEBUG("alignment: %d\n", alignment);
1143 DRM_DEBUG("page_order: %d\n", page_order);
1144 DRM_DEBUG("total: %d\n", total);
1145
1146 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1147 return -EINVAL;
1148 if (dev->queue_count)
1149 return -EBUSY; /* Not while in use */
1150
1151 spin_lock(&dev->count_lock);
1152 if (dev->buf_use) {
1153 spin_unlock(&dev->count_lock);
1154 return -EBUSY;
1155 }
1156 atomic_inc(&dev->buf_alloc);
1157 spin_unlock(&dev->count_lock);
1158
1159 mutex_lock(&dev->struct_mutex);
1160 entry = &dma->bufs[order];
1161 if (entry->buf_count) {
1162 mutex_unlock(&dev->struct_mutex);
1163 atomic_dec(&dev->buf_alloc);
1164 return -ENOMEM; /* May only call once for each order */
1165 }
1166
1167 if (count < 0 || count > 4096) {
1168 mutex_unlock(&dev->struct_mutex);
1169 atomic_dec(&dev->buf_alloc);
1170 return -EINVAL;
1171 }
1172
1173 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1174 DRM_MEM_BUFS);
1175 if (!entry->buflist) {
1176 mutex_unlock(&dev->struct_mutex);
1177 atomic_dec(&dev->buf_alloc);
1178 return -ENOMEM;
1179 }
1180 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1181
1182 entry->buf_size = size;
1183 entry->page_order = page_order;
1184
1185 offset = 0;
1186
1187 while (entry->buf_count < count) {
1188 buf = &entry->buflist[entry->buf_count];
1189 buf->idx = dma->buf_count + entry->buf_count;
1190 buf->total = alignment;
1191 buf->order = order;
1192 buf->used = 0;
1193
1194 buf->offset = (dma->byte_count + offset);
1195 buf->bus_address = agp_offset + offset;
1196 buf->address = (void *)(agp_offset + offset);
1197 buf->next = NULL;
1198 buf->waiting = 0;
1199 buf->pending = 0;
1200 init_waitqueue_head(&buf->dma_wait);
1201 buf->file_priv = NULL;
1202
1203 buf->dev_priv_size = dev->driver->dev_priv_size;
1204 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1205 if (!buf->dev_private) {
1206 /* Set count correctly so we free the proper amount. */
1207 entry->buf_count = count;
1208 drm_cleanup_buf_error(dev, entry);
1209 mutex_unlock(&dev->struct_mutex);
1210 atomic_dec(&dev->buf_alloc);
1211 return -ENOMEM;
1212 }
1213 memset(buf->dev_private, 0, buf->dev_priv_size);
1214
1215 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1216
1217 offset += alignment;
1218 entry->buf_count++;
1219 byte_count += PAGE_SIZE << page_order;
1220 }
1221
1222 DRM_DEBUG("byte_count: %d\n", byte_count);
1223
1224 temp_buflist = drm_realloc(dma->buflist,
1225 dma->buf_count * sizeof(*dma->buflist),
1226 (dma->buf_count + entry->buf_count)
1227 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1228 if (!temp_buflist) {
1229 /* Free the entry because it isn't valid */
1230 drm_cleanup_buf_error(dev, entry);
1231 mutex_unlock(&dev->struct_mutex);
1232 atomic_dec(&dev->buf_alloc);
1233 return -ENOMEM;
1234 }
1235 dma->buflist = temp_buflist;
1236
1237 for (i = 0; i < entry->buf_count; i++) {
1238 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1239 }
1240
1241 dma->buf_count += entry->buf_count;
1242 dma->seg_count += entry->seg_count;
1243 dma->page_count += byte_count >> PAGE_SHIFT;
1244 dma->byte_count += byte_count;
1245
1246 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1247 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1248
1249 mutex_unlock(&dev->struct_mutex);
1250
1251 request->count = entry->buf_count;
1252 request->size = size;
1253
1254 dma->flags = _DRM_DMA_USE_FB;
1255
1256 atomic_dec(&dev->buf_alloc);
1257 return 0;
1258}
1259
1260
1261/**
1262 * Add buffers for DMA transfers (ioctl).
1263 *
1264 * \param inode device inode.
1265 * \param file_priv DRM file private.
1266 * \param cmd command.
1267 * \param arg pointer to a struct drm_buf_desc request.
1268 * \return zero on success or a negative number on failure.
1269 *
1270 * According with the memory type specified in drm_buf_desc::flags and the
1271 * build options, it dispatches the call either to addbufs_agp(),
1272 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1273 * PCI memory respectively.
1274 */
1275int drm_addbufs(struct drm_device *dev, void *data,
1276 struct drm_file *file_priv)
1277{
1278 struct drm_buf_desc *request = data;
1279 int ret;
1280
1281 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1282 return -EINVAL;
1283
1284#if __OS_HAS_AGP
1285 if (request->flags & _DRM_AGP_BUFFER)
1286 ret = drm_addbufs_agp(dev, request);
1287 else
1288#endif
1289 if (request->flags & _DRM_SG_BUFFER)
1290 ret = drm_addbufs_sg(dev, request);
1291 else if (request->flags & _DRM_FB_BUFFER)
1292 ret = drm_addbufs_fb(dev, request);
1293 else
1294 ret = drm_addbufs_pci(dev, request);
1295
1296 return ret;
1297}
1298
1299/**
1300 * Get information about the buffer mappings.
1301 *
1302 * This was originally mean for debugging purposes, or by a sophisticated
1303 * client library to determine how best to use the available buffers (e.g.,
1304 * large buffers can be used for image transfer).
1305 *
1306 * \param inode device inode.
1307 * \param file_priv DRM file private.
1308 * \param cmd command.
1309 * \param arg pointer to a drm_buf_info structure.
1310 * \return zero on success or a negative number on failure.
1311 *
1312 * Increments drm_device::buf_use while holding the drm_device::count_lock
1313 * lock, preventing of allocating more buffers after this call. Information
1314 * about each requested buffer is then copied into user space.
1315 */
1316int drm_infobufs(struct drm_device *dev, void *data,
1317 struct drm_file *file_priv)
1318{
1319 struct drm_device_dma *dma = dev->dma;
1320 struct drm_buf_info *request = data;
1321 int i;
1322 int count;
1323
1324 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1325 return -EINVAL;
1326
1327 if (!dma)
1328 return -EINVAL;
1329
1330 spin_lock(&dev->count_lock);
1331 if (atomic_read(&dev->buf_alloc)) {
1332 spin_unlock(&dev->count_lock);
1333 return -EBUSY;
1334 }
1335 ++dev->buf_use; /* Can't allocate more after this call */
1336 spin_unlock(&dev->count_lock);
1337
1338 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1339 if (dma->bufs[i].buf_count)
1340 ++count;
1341 }
1342
1343 DRM_DEBUG("count = %d\n", count);
1344
1345 if (request->count >= count) {
1346 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1347 if (dma->bufs[i].buf_count) {
1348 struct drm_buf_desc __user *to =
1349 &request->list[count];
1350 struct drm_buf_entry *from = &dma->bufs[i];
1351 struct drm_freelist *list = &dma->bufs[i].freelist;
1352 if (copy_to_user(&to->count,
1353 &from->buf_count,
1354 sizeof(from->buf_count)) ||
1355 copy_to_user(&to->size,
1356 &from->buf_size,
1357 sizeof(from->buf_size)) ||
1358 copy_to_user(&to->low_mark,
1359 &list->low_mark,
1360 sizeof(list->low_mark)) ||
1361 copy_to_user(&to->high_mark,
1362 &list->high_mark,
1363 sizeof(list->high_mark)))
1364 return -EFAULT;
1365
1366 DRM_DEBUG("%d %d %d %d %d\n",
1367 i,
1368 dma->bufs[i].buf_count,
1369 dma->bufs[i].buf_size,
1370 dma->bufs[i].freelist.low_mark,
1371 dma->bufs[i].freelist.high_mark);
1372 ++count;
1373 }
1374 }
1375 }
1376 request->count = count;
1377
1378 return 0;
1379}
1380
1381/**
1382 * Specifies a low and high water mark for buffer allocation
1383 *
1384 * \param inode device inode.
1385 * \param file_priv DRM file private.
1386 * \param cmd command.
1387 * \param arg a pointer to a drm_buf_desc structure.
1388 * \return zero on success or a negative number on failure.
1389 *
1390 * Verifies that the size order is bounded between the admissible orders and
1391 * updates the respective drm_device_dma::bufs entry low and high water mark.
1392 *
1393 * \note This ioctl is deprecated and mostly never used.
1394 */
1395int drm_markbufs(struct drm_device *dev, void *data,
1396 struct drm_file *file_priv)
1397{
1398 struct drm_device_dma *dma = dev->dma;
1399 struct drm_buf_desc *request = data;
1400 int order;
1401 struct drm_buf_entry *entry;
1402
1403 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1404 return -EINVAL;
1405
1406 if (!dma)
1407 return -EINVAL;
1408
1409 DRM_DEBUG("%d, %d, %d\n",
1410 request->size, request->low_mark, request->high_mark);
1411 order = drm_order(request->size);
1412 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1413 return -EINVAL;
1414 entry = &dma->bufs[order];
1415
1416 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1417 return -EINVAL;
1418 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1419 return -EINVAL;
1420
1421 entry->freelist.low_mark = request->low_mark;
1422 entry->freelist.high_mark = request->high_mark;
1423
1424 return 0;
1425}
1426
1427/**
1428 * Unreserve the buffers in list, previously reserved using drmDMA.
1429 *
1430 * \param inode device inode.
1431 * \param file_priv DRM file private.
1432 * \param cmd command.
1433 * \param arg pointer to a drm_buf_free structure.
1434 * \return zero on success or a negative number on failure.
1435 *
1436 * Calls free_buffer() for each used buffer.
1437 * This function is primarily used for debugging.
1438 */
1439int drm_freebufs(struct drm_device *dev, void *data,
1440 struct drm_file *file_priv)
1441{
1442 struct drm_device_dma *dma = dev->dma;
1443 struct drm_buf_free *request = data;
1444 int i;
1445 int idx;
1446 struct drm_buf *buf;
1447
1448 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1449 return -EINVAL;
1450
1451 if (!dma)
1452 return -EINVAL;
1453
1454 DRM_DEBUG("%d\n", request->count);
1455 for (i = 0; i < request->count; i++) {
1456 if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1457 return -EFAULT;
1458 if (idx < 0 || idx >= dma->buf_count) {
1459 DRM_ERROR("Index %d (of %d max)\n",
1460 idx, dma->buf_count - 1);
1461 return -EINVAL;
1462 }
1463 buf = dma->buflist[idx];
1464 if (buf->file_priv != file_priv) {
1465 DRM_ERROR("Process %d freeing buffer not owned\n",
1466 task_pid_nr(current));
1467 return -EINVAL;
1468 }
1469 drm_free_buffer(dev, buf);
1470 }
1471
1472 return 0;
1473}
1474
1475/**
1476 * Maps all of the DMA buffers into client-virtual space (ioctl).
1477 *
1478 * \param inode device inode.
1479 * \param file_priv DRM file private.
1480 * \param cmd command.
1481 * \param arg pointer to a drm_buf_map structure.
1482 * \return zero on success or a negative number on failure.
1483 *
1484 * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
1485 * about each buffer into user space. For PCI buffers, it calls do_mmap() with
1486 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1487 * drm_mmap_dma().
1488 */
1489int drm_mapbufs(struct drm_device *dev, void *data,
1490 struct drm_file *file_priv)
1491{
1492 struct drm_device_dma *dma = dev->dma;
1493 int retcode = 0;
1494 const int zero = 0;
1495 unsigned long virtual;
1496 unsigned long address;
1497 struct drm_buf_map *request = data;
1498 int i;
1499
1500 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1501 return -EINVAL;
1502
1503 if (!dma)
1504 return -EINVAL;
1505
1506 spin_lock(&dev->count_lock);
1507 if (atomic_read(&dev->buf_alloc)) {
1508 spin_unlock(&dev->count_lock);
1509 return -EBUSY;
1510 }
1511 dev->buf_use++; /* Can't allocate more after this call */
1512 spin_unlock(&dev->count_lock);
1513
1514 if (request->count >= dma->buf_count) {
1515 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1516 || (drm_core_check_feature(dev, DRIVER_SG)
1517 && (dma->flags & _DRM_DMA_USE_SG))
1518 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1519 && (dma->flags & _DRM_DMA_USE_FB))) {
1520 struct drm_map *map = dev->agp_buffer_map;
1521 unsigned long token = dev->agp_buffer_token;
1522
1523 if (!map) {
1524 retcode = -EINVAL;
1525 goto done;
1526 }
1527 down_write(&current->mm->mmap_sem);
1528 virtual = do_mmap(file_priv->filp, 0, map->size,
1529 PROT_READ | PROT_WRITE,
1530 MAP_SHARED,
1531 token);
1532 up_write(&current->mm->mmap_sem);
1533 } else {
1534 down_write(&current->mm->mmap_sem);
1535 virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
1536 PROT_READ | PROT_WRITE,
1537 MAP_SHARED, 0);
1538 up_write(&current->mm->mmap_sem);
1539 }
1540 if (virtual > -1024UL) {
1541 /* Real error */
1542 retcode = (signed long)virtual;
1543 goto done;
1544 }
1545 request->virtual = (void __user *)virtual;
1546
1547 for (i = 0; i < dma->buf_count; i++) {
1548 if (copy_to_user(&request->list[i].idx,
1549 &dma->buflist[i]->idx,
1550 sizeof(request->list[0].idx))) {
1551 retcode = -EFAULT;
1552 goto done;
1553 }
1554 if (copy_to_user(&request->list[i].total,
1555 &dma->buflist[i]->total,
1556 sizeof(request->list[0].total))) {
1557 retcode = -EFAULT;
1558 goto done;
1559 }
1560 if (copy_to_user(&request->list[i].used,
1561 &zero, sizeof(zero))) {
1562 retcode = -EFAULT;
1563 goto done;
1564 }
1565 address = virtual + dma->buflist[i]->offset; /* *** */
1566 if (copy_to_user(&request->list[i].address,
1567 &address, sizeof(address))) {
1568 retcode = -EFAULT;
1569 goto done;
1570 }
1571 }
1572 }
1573 done:
1574 request->count = dma->buf_count;
1575 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1576
1577 return retcode;
1578}
1579
1580/**
1581 * Compute size order. Returns the exponent of the smaller power of two which
1582 * is greater or equal to given number.
1583 *
1584 * \param size size.
1585 * \return order.
1586 *
1587 * \todo Can be made faster.
1588 */
1589int drm_order(unsigned long size)
1590{
1591 int order;
1592 unsigned long tmp;
1593
1594 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1595
1596 if (size & (size - 1))
1597 ++order;
1598
1599 return order;
1600}
1601EXPORT_SYMBOL(drm_order);