diff options
author | Jesse Barnes <jbarnes@virtuousgeek.org> | 2008-11-05 13:31:53 -0500 |
---|---|---|
committer | Dave Airlie <airlied@linux.ie> | 2008-12-29 02:47:22 -0500 |
commit | a2c0a97b784f837300f7b0869c82ab712c600952 (patch) | |
tree | aca1cdf3d32e1cfa7387350483f6a70c74a24ffd /drivers/gpu | |
parent | a9587470f753d670d910293ecbf1c7b66c99de50 (diff) |
drm: GEM mmap support
Add core support for mapping of GEM objects. Drivers should provide a
vm_operations_struct if they want to support page faulting of objects.
The code for handling GEM object offsets was taken from TTM, which was
written by Thomas Hellström.
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/drm_bufs.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_drv.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_fops.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_gem.c | 109 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_hashtab.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_vm.c | 7 |
6 files changed, 139 insertions, 3 deletions
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index dc3ce3e0a0a4..7fb690bcd492 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c | |||
@@ -262,6 +262,9 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset, | |||
262 | DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size); | 262 | DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size); |
263 | 263 | ||
264 | break; | 264 | break; |
265 | case _DRM_GEM: | ||
266 | DRM_ERROR("tried to rmmap GEM object\n"); | ||
267 | break; | ||
265 | } | 268 | } |
266 | case _DRM_SCATTER_GATHER: | 269 | case _DRM_SCATTER_GATHER: |
267 | if (!dev->sg) { | 270 | if (!dev->sg) { |
@@ -429,6 +432,9 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) | |||
429 | dmah.size = map->size; | 432 | dmah.size = map->size; |
430 | __drm_pci_free(dev, &dmah); | 433 | __drm_pci_free(dev, &dmah); |
431 | break; | 434 | break; |
435 | case _DRM_GEM: | ||
436 | DRM_ERROR("tried to rmmap GEM object\n"); | ||
437 | break; | ||
432 | } | 438 | } |
433 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 439 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
434 | 440 | ||
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 9f04ca37df6d..98a781375f60 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -209,6 +209,7 @@ int drm_lastclose(struct drm_device * dev) | |||
209 | if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | 209 | if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) |
210 | drm_dma_takedown(dev); | 210 | drm_dma_takedown(dev); |
211 | 211 | ||
212 | dev->dev_mapping = NULL; | ||
212 | mutex_unlock(&dev->struct_mutex); | 213 | mutex_unlock(&dev->struct_mutex); |
213 | 214 | ||
214 | DRM_DEBUG("lastclose completed\n"); | 215 | DRM_DEBUG("lastclose completed\n"); |
@@ -273,6 +274,8 @@ EXPORT_SYMBOL(drm_init); | |||
273 | */ | 274 | */ |
274 | static void drm_cleanup(struct drm_device * dev) | 275 | static void drm_cleanup(struct drm_device * dev) |
275 | { | 276 | { |
277 | struct drm_driver *driver = dev->driver; | ||
278 | |||
276 | DRM_DEBUG("\n"); | 279 | DRM_DEBUG("\n"); |
277 | 280 | ||
278 | if (!dev) { | 281 | if (!dev) { |
@@ -304,6 +307,9 @@ static void drm_cleanup(struct drm_device * dev) | |||
304 | drm_ht_remove(&dev->map_hash); | 307 | drm_ht_remove(&dev->map_hash); |
305 | drm_ctxbitmap_cleanup(dev); | 308 | drm_ctxbitmap_cleanup(dev); |
306 | 309 | ||
310 | if (driver->driver_features & DRIVER_GEM) | ||
311 | drm_gem_destroy(dev); | ||
312 | |||
307 | drm_put_minor(&dev->primary); | 313 | drm_put_minor(&dev->primary); |
308 | if (drm_put_dev(dev)) | 314 | if (drm_put_dev(dev)) |
309 | DRM_ERROR("Cannot unload module\n"); | 315 | DRM_ERROR("Cannot unload module\n"); |
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index f2285237df49..3a6c439652a5 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -133,11 +133,21 @@ int drm_open(struct inode *inode, struct file *filp) | |||
133 | spin_lock(&dev->count_lock); | 133 | spin_lock(&dev->count_lock); |
134 | if (!dev->open_count++) { | 134 | if (!dev->open_count++) { |
135 | spin_unlock(&dev->count_lock); | 135 | spin_unlock(&dev->count_lock); |
136 | return drm_setup(dev); | 136 | retcode = drm_setup(dev); |
137 | goto out; | ||
137 | } | 138 | } |
138 | spin_unlock(&dev->count_lock); | 139 | spin_unlock(&dev->count_lock); |
139 | } | 140 | } |
140 | 141 | ||
142 | out: | ||
143 | mutex_lock(&dev->struct_mutex); | ||
144 | if (dev->dev_mapping == NULL) | ||
145 | dev->dev_mapping = inode->i_mapping; | ||
146 | else if (dev->dev_mapping != inode->i_mapping) | ||
147 | WARN(1, "dev->dev_mapping not inode mapping (%p expected %p)\n", | ||
148 | dev->dev_mapping, inode->i_mapping); | ||
149 | mutex_unlock(&dev->struct_mutex); | ||
150 | |||
141 | return retcode; | 151 | return retcode; |
142 | } | 152 | } |
143 | EXPORT_SYMBOL(drm_open); | 153 | EXPORT_SYMBOL(drm_open); |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index ccd1afdede02..b3939de6affd 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -64,6 +64,13 @@ | |||
64 | * up at a later date, and as our interface with shmfs for memory allocation. | 64 | * up at a later date, and as our interface with shmfs for memory allocation. |
65 | */ | 65 | */ |
66 | 66 | ||
67 | /* | ||
68 | * We make up offsets for buffer objects so we can recognize them at | ||
69 | * mmap time. | ||
70 | */ | ||
71 | #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) | ||
72 | #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) | ||
73 | |||
67 | /** | 74 | /** |
68 | * Initialize the GEM device fields | 75 | * Initialize the GEM device fields |
69 | */ | 76 | */ |
@@ -71,6 +78,8 @@ | |||
71 | int | 78 | int |
72 | drm_gem_init(struct drm_device *dev) | 79 | drm_gem_init(struct drm_device *dev) |
73 | { | 80 | { |
81 | struct drm_gem_mm *mm; | ||
82 | |||
74 | spin_lock_init(&dev->object_name_lock); | 83 | spin_lock_init(&dev->object_name_lock); |
75 | idr_init(&dev->object_name_idr); | 84 | idr_init(&dev->object_name_idr); |
76 | atomic_set(&dev->object_count, 0); | 85 | atomic_set(&dev->object_count, 0); |
@@ -79,9 +88,41 @@ drm_gem_init(struct drm_device *dev) | |||
79 | atomic_set(&dev->pin_memory, 0); | 88 | atomic_set(&dev->pin_memory, 0); |
80 | atomic_set(&dev->gtt_count, 0); | 89 | atomic_set(&dev->gtt_count, 0); |
81 | atomic_set(&dev->gtt_memory, 0); | 90 | atomic_set(&dev->gtt_memory, 0); |
91 | |||
92 | mm = drm_calloc(1, sizeof(struct drm_gem_mm), DRM_MEM_MM); | ||
93 | if (!mm) { | ||
94 | DRM_ERROR("out of memory\n"); | ||
95 | return -ENOMEM; | ||
96 | } | ||
97 | |||
98 | dev->mm_private = mm; | ||
99 | |||
100 | if (drm_ht_create(&mm->offset_hash, 19)) { | ||
101 | drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); | ||
102 | return -ENOMEM; | ||
103 | } | ||
104 | |||
105 | if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, | ||
106 | DRM_FILE_PAGE_OFFSET_SIZE)) { | ||
107 | drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); | ||
108 | drm_ht_remove(&mm->offset_hash); | ||
109 | return -ENOMEM; | ||
110 | } | ||
111 | |||
82 | return 0; | 112 | return 0; |
83 | } | 113 | } |
84 | 114 | ||
115 | void | ||
116 | drm_gem_destroy(struct drm_device *dev) | ||
117 | { | ||
118 | struct drm_gem_mm *mm = dev->mm_private; | ||
119 | |||
120 | drm_mm_takedown(&mm->offset_manager); | ||
121 | drm_ht_remove(&mm->offset_hash); | ||
122 | drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); | ||
123 | dev->mm_private = NULL; | ||
124 | } | ||
125 | |||
85 | /** | 126 | /** |
86 | * Allocate a GEM object of the specified size with shmfs backing store | 127 | * Allocate a GEM object of the specified size with shmfs backing store |
87 | */ | 128 | */ |
@@ -419,3 +460,71 @@ drm_gem_object_handle_free(struct kref *kref) | |||
419 | } | 460 | } |
420 | EXPORT_SYMBOL(drm_gem_object_handle_free); | 461 | EXPORT_SYMBOL(drm_gem_object_handle_free); |
421 | 462 | ||
463 | /** | ||
464 | * drm_gem_mmap - memory map routine for GEM objects | ||
465 | * @filp: DRM file pointer | ||
466 | * @vma: VMA for the area to be mapped | ||
467 | * | ||
468 | * If a driver supports GEM object mapping, mmap calls on the DRM file | ||
469 | * descriptor will end up here. | ||
470 | * | ||
471 | * If we find the object based on the offset passed in (vma->vm_pgoff will | ||
472 | * contain the fake offset we created when the GTT map ioctl was called on | ||
473 | * the object), we set up the driver fault handler so that any accesses | ||
474 | * to the object can be trapped, to perform migration, GTT binding, surface | ||
475 | * register allocation, or performance monitoring. | ||
476 | */ | ||
477 | int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | ||
478 | { | ||
479 | struct drm_file *priv = filp->private_data; | ||
480 | struct drm_device *dev = priv->minor->dev; | ||
481 | struct drm_gem_mm *mm = dev->mm_private; | ||
482 | struct drm_map *map = NULL; | ||
483 | struct drm_gem_object *obj; | ||
484 | struct drm_hash_item *hash; | ||
485 | unsigned long prot; | ||
486 | int ret = 0; | ||
487 | |||
488 | mutex_lock(&dev->struct_mutex); | ||
489 | |||
490 | if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { | ||
491 | mutex_unlock(&dev->struct_mutex); | ||
492 | return drm_mmap(filp, vma); | ||
493 | } | ||
494 | |||
495 | map = drm_hash_entry(hash, struct drm_map_list, hash)->map; | ||
496 | if (!map || | ||
497 | ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) { | ||
498 | ret = -EPERM; | ||
499 | goto out_unlock; | ||
500 | } | ||
501 | |||
502 | /* Check for valid size. */ | ||
503 | if (map->size < vma->vm_end - vma->vm_start) { | ||
504 | ret = -EINVAL; | ||
505 | goto out_unlock; | ||
506 | } | ||
507 | |||
508 | obj = map->handle; | ||
509 | if (!obj->dev->driver->gem_vm_ops) { | ||
510 | ret = -EINVAL; | ||
511 | goto out_unlock; | ||
512 | } | ||
513 | |||
514 | vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; | ||
515 | vma->vm_ops = obj->dev->driver->gem_vm_ops; | ||
516 | vma->vm_private_data = map->handle; | ||
517 | /* FIXME: use pgprot_writecombine when available */ | ||
518 | prot = pgprot_val(vma->vm_page_prot); | ||
519 | prot |= _PAGE_CACHE_WC; | ||
520 | vma->vm_page_prot = __pgprot(prot); | ||
521 | |||
522 | vma->vm_file = filp; /* Needed for drm_vm_open() */ | ||
523 | drm_vm_open_locked(vma); | ||
524 | |||
525 | out_unlock: | ||
526 | mutex_unlock(&dev->struct_mutex); | ||
527 | |||
528 | return ret; | ||
529 | } | ||
530 | EXPORT_SYMBOL(drm_gem_mmap); | ||
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c index 33160673a7b7..af539f7d87dd 100644 --- a/drivers/gpu/drm/drm_hashtab.c +++ b/drivers/gpu/drm/drm_hashtab.c | |||
@@ -127,6 +127,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) | |||
127 | } | 127 | } |
128 | return 0; | 128 | return 0; |
129 | } | 129 | } |
130 | EXPORT_SYMBOL(drm_ht_insert_item); | ||
130 | 131 | ||
131 | /* | 132 | /* |
132 | * Just insert an item and return any "bits" bit key that hasn't been | 133 | * Just insert an item and return any "bits" bit key that hasn't been |
@@ -188,6 +189,7 @@ int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item) | |||
188 | ht->fill--; | 189 | ht->fill--; |
189 | return 0; | 190 | return 0; |
190 | } | 191 | } |
192 | EXPORT_SYMBOL(drm_ht_remove_item); | ||
191 | 193 | ||
192 | void drm_ht_remove(struct drm_open_hash *ht) | 194 | void drm_ht_remove(struct drm_open_hash *ht) |
193 | { | 195 | { |
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index c234c6f24a8d..3ffae021d280 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c | |||
@@ -267,6 +267,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) | |||
267 | dmah.size = map->size; | 267 | dmah.size = map->size; |
268 | __drm_pci_free(dev, &dmah); | 268 | __drm_pci_free(dev, &dmah); |
269 | break; | 269 | break; |
270 | case _DRM_GEM: | ||
271 | DRM_ERROR("tried to rmmap GEM object\n"); | ||
272 | break; | ||
270 | } | 273 | } |
271 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 274 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
272 | } | 275 | } |
@@ -399,7 +402,7 @@ static struct vm_operations_struct drm_vm_sg_ops = { | |||
399 | * Create a new drm_vma_entry structure as the \p vma private data entry and | 402 | * Create a new drm_vma_entry structure as the \p vma private data entry and |
400 | * add it to drm_device::vmalist. | 403 | * add it to drm_device::vmalist. |
401 | */ | 404 | */ |
402 | static void drm_vm_open_locked(struct vm_area_struct *vma) | 405 | void drm_vm_open_locked(struct vm_area_struct *vma) |
403 | { | 406 | { |
404 | struct drm_file *priv = vma->vm_file->private_data; | 407 | struct drm_file *priv = vma->vm_file->private_data; |
405 | struct drm_device *dev = priv->minor->dev; | 408 | struct drm_device *dev = priv->minor->dev; |
@@ -540,7 +543,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs); | |||
540 | * according to the mapping type and remaps the pages. Finally sets the file | 543 | * according to the mapping type and remaps the pages. Finally sets the file |
541 | * pointer and calls vm_open(). | 544 | * pointer and calls vm_open(). |
542 | */ | 545 | */ |
543 | static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) | 546 | int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) |
544 | { | 547 | { |
545 | struct drm_file *priv = filp->private_data; | 548 | struct drm_file *priv = filp->private_data; |
546 | struct drm_device *dev = priv->minor->dev; | 549 | struct drm_device *dev = priv->minor->dev; |