diff options
author | Dave Airlie <airlied@starflyer.(none)> | 2005-07-10 00:46:12 -0400 |
---|---|---|
committer | Dave Airlie <airlied@linux.ie> | 2005-07-10 00:46:12 -0400 |
commit | b84397d6390ef04e8080d66bf528418ab5e75dc0 (patch) | |
tree | 39c78eb64f60eee701cf31663e37370f402603d3 /drivers/char/drm/drm_bufs.c | |
parent | 2d0f9eaff8e1d08b9707f5d24fe6b0ac95d231e3 (diff) |
drm: add framebuffer maps
The patch makes drmAddBufs/drmMapBufs can handle buffers in video memory
The attached patch adds a new buffer type DRM_FB_BUFFER. It works like
AGP memory but uses video memory.
From: Austin Yuan <austinyuan@viatech.com.cn>
Signed-off-by: Dave Airlie <airlied@linux.ie>
Diffstat (limited to 'drivers/char/drm/drm_bufs.c')
-rw-r--r-- | drivers/char/drm/drm_bufs.c | 175 |
1 files changed, 173 insertions, 2 deletions
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c index 89f301ffd97e..7c20dc344b99 100644 --- a/drivers/char/drm/drm_bufs.c +++ b/drivers/char/drm/drm_bufs.c | |||
@@ -935,6 +935,172 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp, | |||
935 | return 0; | 935 | return 0; |
936 | } | 936 | } |
937 | 937 | ||
938 | int drm_addbufs_fb(struct inode *inode, struct file *filp, | ||
939 | unsigned int cmd, unsigned long arg) | ||
940 | { | ||
941 | drm_file_t *priv = filp->private_data; | ||
942 | drm_device_t *dev = priv->head->dev; | ||
943 | drm_device_dma_t *dma = dev->dma; | ||
944 | drm_buf_desc_t request; | ||
945 | drm_buf_entry_t *entry; | ||
946 | drm_buf_t *buf; | ||
947 | unsigned long offset; | ||
948 | unsigned long agp_offset; | ||
949 | int count; | ||
950 | int order; | ||
951 | int size; | ||
952 | int alignment; | ||
953 | int page_order; | ||
954 | int total; | ||
955 | int byte_count; | ||
956 | int i; | ||
957 | drm_buf_t **temp_buflist; | ||
958 | drm_buf_desc_t __user *argp = (void __user *)arg; | ||
959 | |||
960 | if (!drm_core_check_feature(dev, DRIVER_FB_DMA)) | ||
961 | return -EINVAL; | ||
962 | |||
963 | if (!dma) | ||
964 | return -EINVAL; | ||
965 | |||
966 | if (copy_from_user(&request, argp, sizeof(request))) | ||
967 | return -EFAULT; | ||
968 | |||
969 | count = request.count; | ||
970 | order = drm_order(request.size); | ||
971 | size = 1 << order; | ||
972 | |||
973 | alignment = (request.flags & _DRM_PAGE_ALIGN) | ||
974 | ? PAGE_ALIGN(size) : size; | ||
975 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; | ||
976 | total = PAGE_SIZE << page_order; | ||
977 | |||
978 | byte_count = 0; | ||
979 | agp_offset = request.agp_start; | ||
980 | |||
981 | DRM_DEBUG("count: %d\n", count); | ||
982 | DRM_DEBUG("order: %d\n", order); | ||
983 | DRM_DEBUG("size: %d\n", size); | ||
984 | DRM_DEBUG("agp_offset: %lu\n", agp_offset); | ||
985 | DRM_DEBUG("alignment: %d\n", alignment); | ||
986 | DRM_DEBUG("page_order: %d\n", page_order); | ||
987 | DRM_DEBUG("total: %d\n", total); | ||
988 | |||
989 | if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) | ||
990 | return -EINVAL; | ||
991 | if (dev->queue_count) | ||
992 | return -EBUSY; /* Not while in use */ | ||
993 | |||
994 | spin_lock(&dev->count_lock); | ||
995 | if (dev->buf_use) { | ||
996 | spin_unlock(&dev->count_lock); | ||
997 | return -EBUSY; | ||
998 | } | ||
999 | atomic_inc(&dev->buf_alloc); | ||
1000 | spin_unlock(&dev->count_lock); | ||
1001 | |||
1002 | down(&dev->struct_sem); | ||
1003 | entry = &dma->bufs[order]; | ||
1004 | if (entry->buf_count) { | ||
1005 | up(&dev->struct_sem); | ||
1006 | atomic_dec(&dev->buf_alloc); | ||
1007 | return -ENOMEM; /* May only call once for each order */ | ||
1008 | } | ||
1009 | |||
1010 | if (count < 0 || count > 4096) { | ||
1011 | up(&dev->struct_sem); | ||
1012 | atomic_dec(&dev->buf_alloc); | ||
1013 | return -EINVAL; | ||
1014 | } | ||
1015 | |||
1016 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), | ||
1017 | DRM_MEM_BUFS); | ||
1018 | if (!entry->buflist) { | ||
1019 | up(&dev->struct_sem); | ||
1020 | atomic_dec(&dev->buf_alloc); | ||
1021 | return -ENOMEM; | ||
1022 | } | ||
1023 | memset(entry->buflist, 0, count * sizeof(*entry->buflist)); | ||
1024 | |||
1025 | entry->buf_size = size; | ||
1026 | entry->page_order = page_order; | ||
1027 | |||
1028 | offset = 0; | ||
1029 | |||
1030 | while (entry->buf_count < count) { | ||
1031 | buf = &entry->buflist[entry->buf_count]; | ||
1032 | buf->idx = dma->buf_count + entry->buf_count; | ||
1033 | buf->total = alignment; | ||
1034 | buf->order = order; | ||
1035 | buf->used = 0; | ||
1036 | |||
1037 | buf->offset = (dma->byte_count + offset); | ||
1038 | buf->bus_address = agp_offset + offset; | ||
1039 | buf->address = (void *)(agp_offset + offset); | ||
1040 | buf->next = NULL; | ||
1041 | buf->waiting = 0; | ||
1042 | buf->pending = 0; | ||
1043 | init_waitqueue_head(&buf->dma_wait); | ||
1044 | buf->filp = NULL; | ||
1045 | |||
1046 | buf->dev_priv_size = dev->driver->dev_priv_size; | ||
1047 | buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); | ||
1048 | if (!buf->dev_private) { | ||
1049 | /* Set count correctly so we free the proper amount. */ | ||
1050 | entry->buf_count = count; | ||
1051 | drm_cleanup_buf_error(dev, entry); | ||
1052 | up(&dev->struct_sem); | ||
1053 | atomic_dec(&dev->buf_alloc); | ||
1054 | return -ENOMEM; | ||
1055 | } | ||
1056 | memset(buf->dev_private, 0, buf->dev_priv_size); | ||
1057 | |||
1058 | DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); | ||
1059 | |||
1060 | offset += alignment; | ||
1061 | entry->buf_count++; | ||
1062 | byte_count += PAGE_SIZE << page_order; | ||
1063 | } | ||
1064 | |||
1065 | DRM_DEBUG("byte_count: %d\n", byte_count); | ||
1066 | |||
1067 | temp_buflist = drm_realloc(dma->buflist, | ||
1068 | dma->buf_count * sizeof(*dma->buflist), | ||
1069 | (dma->buf_count + entry->buf_count) | ||
1070 | * sizeof(*dma->buflist), DRM_MEM_BUFS); | ||
1071 | if (!temp_buflist) { | ||
1072 | /* Free the entry because it isn't valid */ | ||
1073 | drm_cleanup_buf_error(dev, entry); | ||
1074 | up(&dev->struct_sem); | ||
1075 | atomic_dec(&dev->buf_alloc); | ||
1076 | return -ENOMEM; | ||
1077 | } | ||
1078 | dma->buflist = temp_buflist; | ||
1079 | |||
1080 | for (i = 0; i < entry->buf_count; i++) { | ||
1081 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; | ||
1082 | } | ||
1083 | |||
1084 | dma->buf_count += entry->buf_count; | ||
1085 | dma->byte_count += byte_count; | ||
1086 | |||
1087 | DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); | ||
1088 | DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); | ||
1089 | |||
1090 | up(&dev->struct_sem); | ||
1091 | |||
1092 | request.count = entry->buf_count; | ||
1093 | request.size = size; | ||
1094 | |||
1095 | if (copy_to_user(argp, &request, sizeof(request))) | ||
1096 | return -EFAULT; | ||
1097 | |||
1098 | dma->flags = _DRM_DMA_USE_FB; | ||
1099 | |||
1100 | atomic_dec(&dev->buf_alloc); | ||
1101 | return 0; | ||
1102 | } | ||
1103 | |||
938 | /** | 1104 | /** |
939 | * Add buffers for DMA transfers (ioctl). | 1105 | * Add buffers for DMA transfers (ioctl). |
940 | * | 1106 | * |
@@ -970,6 +1136,8 @@ int drm_addbufs( struct inode *inode, struct file *filp, | |||
970 | #endif | 1136 | #endif |
971 | if ( request.flags & _DRM_SG_BUFFER ) | 1137 | if ( request.flags & _DRM_SG_BUFFER ) |
972 | return drm_addbufs_sg( inode, filp, cmd, arg ); | 1138 | return drm_addbufs_sg( inode, filp, cmd, arg ); |
1139 | else if ( request.flags & _DRM_FB_BUFFER) | ||
1140 | return drm_addbufs_fb( inode, filp, cmd, arg ); | ||
973 | else | 1141 | else |
974 | return drm_addbufs_pci( inode, filp, cmd, arg ); | 1142 | return drm_addbufs_pci( inode, filp, cmd, arg ); |
975 | } | 1143 | } |
@@ -1214,8 +1382,11 @@ int drm_mapbufs( struct inode *inode, struct file *filp, | |||
1214 | return -EFAULT; | 1382 | return -EFAULT; |
1215 | 1383 | ||
1216 | if ( request.count >= dma->buf_count ) { | 1384 | if ( request.count >= dma->buf_count ) { |
1217 | if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) || | 1385 | if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) |
1218 | (drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG)) ) { | 1386 | || (drm_core_check_feature(dev, DRIVER_SG) |
1387 | && (dma->flags & _DRM_DMA_USE_SG)) | ||
1388 | || (drm_core_check_feature(dev, DRIVER_FB_DMA) | ||
1389 | && (dma->flags & _DRM_DMA_USE_FB))) { | ||
1219 | drm_map_t *map = dev->agp_buffer_map; | 1390 | drm_map_t *map = dev->agp_buffer_map; |
1220 | 1391 | ||
1221 | if ( !map ) { | 1392 | if ( !map ) { |