diff options
| -rw-r--r-- | drivers/char/drm/README.drm | 16 | ||||
| -rw-r--r-- | drivers/char/drm/drm.h | 4 | ||||
| -rw-r--r-- | drivers/char/drm/drmP.h | 23 | ||||
| -rw-r--r-- | drivers/char/drm/drm_bufs.c | 75 | ||||
| -rw-r--r-- | drivers/char/drm/drm_drv.c | 9 | ||||
| -rw-r--r-- | drivers/char/drm/drm_fops.c | 96 | ||||
| -rw-r--r-- | drivers/char/drm/drm_hashtab.c | 17 | ||||
| -rw-r--r-- | drivers/char/drm/drm_hashtab.h | 1 | ||||
| -rw-r--r-- | drivers/char/drm/drm_irq.c | 4 | ||||
| -rw-r--r-- | drivers/char/drm/drm_lock.c | 134 | ||||
| -rw-r--r-- | drivers/char/drm/drm_mm.c | 2 | ||||
| -rw-r--r-- | drivers/char/drm/drm_pciids.h | 3 | ||||
| -rw-r--r-- | drivers/char/drm/drm_proc.c | 2 | ||||
| -rw-r--r-- | drivers/char/drm/drm_stub.c | 1 | ||||
| -rw-r--r-- | drivers/char/drm/drm_vm.c | 102 | ||||
| -rw-r--r-- | drivers/char/drm/i915_dma.c | 3 | ||||
| -rw-r--r-- | drivers/char/drm/radeon_cp.c | 8 | ||||
| -rw-r--r-- | drivers/char/drm/sis_drv.c | 2 | ||||
| -rw-r--r-- | drivers/char/drm/via_drv.c | 3 | ||||
| -rw-r--r-- | drivers/char/drm/via_mm.h | 40 |
20 files changed, 349 insertions, 196 deletions
diff --git a/drivers/char/drm/README.drm b/drivers/char/drm/README.drm index 6441e01e587c..af74cd79a279 100644 --- a/drivers/char/drm/README.drm +++ b/drivers/char/drm/README.drm | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | ************************************************************ | 1 | ************************************************************ |
| 2 | * For the very latest on DRI development, please see: * | 2 | * For the very latest on DRI development, please see: * |
| 3 | * http://dri.sourceforge.net/ * | 3 | * http://dri.freedesktop.org/ * |
| 4 | ************************************************************ | 4 | ************************************************************ |
| 5 | 5 | ||
| 6 | The Direct Rendering Manager (drm) is a device-independent kernel-level | 6 | The Direct Rendering Manager (drm) is a device-independent kernel-level |
| @@ -26,21 +26,19 @@ ways: | |||
| 26 | 26 | ||
| 27 | 27 | ||
| 28 | Documentation on the DRI is available from: | 28 | Documentation on the DRI is available from: |
| 29 | http://precisioninsight.com/piinsights.html | 29 | http://dri.freedesktop.org/wiki/Documentation |
| 30 | http://sourceforge.net/project/showfiles.php?group_id=387 | ||
| 31 | http://dri.sourceforge.net/doc/ | ||
| 30 | 32 | ||
| 31 | For specific information about kernel-level support, see: | 33 | For specific information about kernel-level support, see: |
| 32 | 34 | ||
| 33 | The Direct Rendering Manager, Kernel Support for the Direct Rendering | 35 | The Direct Rendering Manager, Kernel Support for the Direct Rendering |
| 34 | Infrastructure | 36 | Infrastructure |
| 35 | http://precisioninsight.com/dr/drm.html | 37 | http://dri.sourceforge.net/doc/drm_low_level.html |
| 36 | 38 | ||
| 37 | Hardware Locking for the Direct Rendering Infrastructure | 39 | Hardware Locking for the Direct Rendering Infrastructure |
| 38 | http://precisioninsight.com/dr/locking.html | 40 | http://dri.sourceforge.net/doc/hardware_locking_low_level.html |
| 39 | 41 | ||
| 40 | A Security Analysis of the Direct Rendering Infrastructure | 42 | A Security Analysis of the Direct Rendering Infrastructure |
| 41 | http://precisioninsight.com/dr/security.html | 43 | http://dri.sourceforge.net/doc/security_low_level.html |
| 42 | 44 | ||
| 43 | ************************************************************ | ||
| 44 | * For the very latest on DRI development, please see: * | ||
| 45 | * http://dri.sourceforge.net/ * | ||
| 46 | ************************************************************ | ||
diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h index 8db9041e306c..089198491f16 100644 --- a/drivers/char/drm/drm.h +++ b/drivers/char/drm/drm.h | |||
| @@ -654,11 +654,13 @@ typedef struct drm_set_version { | |||
| 654 | 654 | ||
| 655 | /** | 655 | /** |
| 656 | * Device specific ioctls should only be in their respective headers | 656 | * Device specific ioctls should only be in their respective headers |
| 657 | * The device specific ioctl range is from 0x40 to 0x79. | 657 | * The device specific ioctl range is from 0x40 to 0x99. |
| 658 | * Generic IOCTLS restart at 0xA0. | ||
| 658 | * | 659 | * |
| 659 | * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and | 660 | * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and |
| 660 | * drmCommandReadWrite(). | 661 | * drmCommandReadWrite(). |
| 661 | */ | 662 | */ |
| 662 | #define DRM_COMMAND_BASE 0x40 | 663 | #define DRM_COMMAND_BASE 0x40 |
| 664 | #define DRM_COMMAND_END 0xA0 | ||
| 663 | 665 | ||
| 664 | #endif | 666 | #endif |
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h index 85d99e21e188..80041d5b792d 100644 --- a/drivers/char/drm/drmP.h +++ b/drivers/char/drm/drmP.h | |||
| @@ -414,6 +414,10 @@ typedef struct drm_lock_data { | |||
| 414 | struct file *filp; /**< File descr of lock holder (0=kernel) */ | 414 | struct file *filp; /**< File descr of lock holder (0=kernel) */ |
| 415 | wait_queue_head_t lock_queue; /**< Queue of blocked processes */ | 415 | wait_queue_head_t lock_queue; /**< Queue of blocked processes */ |
| 416 | unsigned long lock_time; /**< Time of last lock in jiffies */ | 416 | unsigned long lock_time; /**< Time of last lock in jiffies */ |
| 417 | spinlock_t spinlock; | ||
| 418 | uint32_t kernel_waiters; | ||
| 419 | uint32_t user_waiters; | ||
| 420 | int idle_has_lock; | ||
| 417 | } drm_lock_data_t; | 421 | } drm_lock_data_t; |
| 418 | 422 | ||
| 419 | /** | 423 | /** |
| @@ -590,6 +594,8 @@ struct drm_driver { | |||
| 590 | void (*reclaim_buffers) (struct drm_device * dev, struct file * filp); | 594 | void (*reclaim_buffers) (struct drm_device * dev, struct file * filp); |
| 591 | void (*reclaim_buffers_locked) (struct drm_device *dev, | 595 | void (*reclaim_buffers_locked) (struct drm_device *dev, |
| 592 | struct file *filp); | 596 | struct file *filp); |
| 597 | void (*reclaim_buffers_idlelocked) (struct drm_device *dev, | ||
| 598 | struct file * filp); | ||
| 593 | unsigned long (*get_map_ofs) (drm_map_t * map); | 599 | unsigned long (*get_map_ofs) (drm_map_t * map); |
| 594 | unsigned long (*get_reg_ofs) (struct drm_device * dev); | 600 | unsigned long (*get_reg_ofs) (struct drm_device * dev); |
| 595 | void (*set_version) (struct drm_device * dev, drm_set_version_t * sv); | 601 | void (*set_version) (struct drm_device * dev, drm_set_version_t * sv); |
| @@ -764,7 +770,7 @@ static __inline__ int drm_core_check_feature(struct drm_device *dev, | |||
| 764 | } | 770 | } |
| 765 | 771 | ||
| 766 | #ifdef __alpha__ | 772 | #ifdef __alpha__ |
| 767 | #define drm_get_pci_domain(dev) dev->hose->bus->number | 773 | #define drm_get_pci_domain(dev) dev->hose->index |
| 768 | #else | 774 | #else |
| 769 | #define drm_get_pci_domain(dev) 0 | 775 | #define drm_get_pci_domain(dev) 0 |
| 770 | #endif | 776 | #endif |
| @@ -915,9 +921,18 @@ extern int drm_lock(struct inode *inode, struct file *filp, | |||
| 915 | unsigned int cmd, unsigned long arg); | 921 | unsigned int cmd, unsigned long arg); |
| 916 | extern int drm_unlock(struct inode *inode, struct file *filp, | 922 | extern int drm_unlock(struct inode *inode, struct file *filp, |
| 917 | unsigned int cmd, unsigned long arg); | 923 | unsigned int cmd, unsigned long arg); |
| 918 | extern int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context); | 924 | extern int drm_lock_take(drm_lock_data_t *lock_data, unsigned int context); |
| 919 | extern int drm_lock_free(drm_device_t * dev, | 925 | extern int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context); |
| 920 | __volatile__ unsigned int *lock, unsigned int context); | 926 | extern void drm_idlelock_take(drm_lock_data_t *lock_data); |
| 927 | extern void drm_idlelock_release(drm_lock_data_t *lock_data); | ||
| 928 | |||
| 929 | /* | ||
| 930 | * These are exported to drivers so that they can implement fencing using | ||
| 931 | * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. | ||
| 932 | */ | ||
| 933 | |||
| 934 | extern int drm_i_have_hw_lock(struct file *filp); | ||
| 935 | extern int drm_kernel_take_hw_lock(struct file *filp); | ||
| 921 | 936 | ||
| 922 | /* Buffer management support (drm_bufs.h) */ | 937 | /* Buffer management support (drm_bufs.h) */ |
| 923 | extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request); | 938 | extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request); |
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c index a6828cc14e58..c11345856ffe 100644 --- a/drivers/char/drm/drm_bufs.c +++ b/drivers/char/drm/drm_bufs.c | |||
| @@ -57,7 +57,8 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t *dev, | |||
| 57 | list_for_each(list, &dev->maplist->head) { | 57 | list_for_each(list, &dev->maplist->head) { |
| 58 | drm_map_list_t *entry = list_entry(list, drm_map_list_t, head); | 58 | drm_map_list_t *entry = list_entry(list, drm_map_list_t, head); |
| 59 | if (entry->map && map->type == entry->map->type && | 59 | if (entry->map && map->type == entry->map->type && |
| 60 | entry->map->offset == map->offset) { | 60 | ((entry->map->offset == map->offset) || |
| 61 | (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) { | ||
| 61 | return entry; | 62 | return entry; |
| 62 | } | 63 | } |
| 63 | } | 64 | } |
| @@ -180,8 +181,20 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, | |||
| 180 | if (map->type == _DRM_REGISTERS) | 181 | if (map->type == _DRM_REGISTERS) |
| 181 | map->handle = ioremap(map->offset, map->size); | 182 | map->handle = ioremap(map->offset, map->size); |
| 182 | break; | 183 | break; |
| 183 | |||
| 184 | case _DRM_SHM: | 184 | case _DRM_SHM: |
| 185 | list = drm_find_matching_map(dev, map); | ||
| 186 | if (list != NULL) { | ||
| 187 | if(list->map->size != map->size) { | ||
| 188 | DRM_DEBUG("Matching maps of type %d with " | ||
| 189 | "mismatched sizes, (%ld vs %ld)\n", | ||
| 190 | map->type, map->size, list->map->size); | ||
| 191 | list->map->size = map->size; | ||
| 192 | } | ||
| 193 | |||
| 194 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | ||
| 195 | *maplist = list; | ||
| 196 | return 0; | ||
| 197 | } | ||
| 185 | map->handle = vmalloc_user(map->size); | 198 | map->handle = vmalloc_user(map->size); |
| 186 | DRM_DEBUG("%lu %d %p\n", | 199 | DRM_DEBUG("%lu %d %p\n", |
| 187 | map->size, drm_order(map->size), map->handle); | 200 | map->size, drm_order(map->size), map->handle); |
| @@ -200,15 +213,45 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, | |||
| 200 | dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */ | 213 | dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */ |
| 201 | } | 214 | } |
| 202 | break; | 215 | break; |
| 203 | case _DRM_AGP: | 216 | case _DRM_AGP: { |
| 204 | if (drm_core_has_AGP(dev)) { | 217 | drm_agp_mem_t *entry; |
| 218 | int valid = 0; | ||
| 219 | |||
| 220 | if (!drm_core_has_AGP(dev)) { | ||
| 221 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | ||
| 222 | return -EINVAL; | ||
| 223 | } | ||
| 205 | #ifdef __alpha__ | 224 | #ifdef __alpha__ |
| 206 | map->offset += dev->hose->mem_space->start; | 225 | map->offset += dev->hose->mem_space->start; |
| 207 | #endif | 226 | #endif |
| 208 | map->offset += dev->agp->base; | 227 | /* Note: dev->agp->base may actually be 0 when the DRM |
| 209 | map->mtrr = dev->agp->agp_mtrr; /* for getmap */ | 228 | * is not in control of AGP space. But if user space is |
| 229 | * it should already have added the AGP base itself. | ||
| 230 | */ | ||
| 231 | map->offset += dev->agp->base; | ||
| 232 | map->mtrr = dev->agp->agp_mtrr; /* for getmap */ | ||
| 233 | |||
| 234 | /* This assumes the DRM is in total control of AGP space. | ||
| 235 | * It's not always the case as AGP can be in the control | ||
| 236 | * of user space (i.e. i810 driver). So this loop will get | ||
| 237 | * skipped and we double check that dev->agp->memory is | ||
| 238 | * actually set as well as being invalid before EPERM'ing | ||
| 239 | */ | ||
| 240 | for (entry = dev->agp->memory; entry; entry = entry->next) { | ||
| 241 | if ((map->offset >= entry->bound) && | ||
| 242 | (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { | ||
| 243 | valid = 1; | ||
| 244 | break; | ||
| 245 | } | ||
| 210 | } | 246 | } |
| 247 | if (dev->agp->memory && !valid) { | ||
| 248 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | ||
| 249 | return -EPERM; | ||
| 250 | } | ||
| 251 | DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size); | ||
| 252 | |||
| 211 | break; | 253 | break; |
| 254 | } | ||
| 212 | case _DRM_SCATTER_GATHER: | 255 | case _DRM_SCATTER_GATHER: |
| 213 | if (!dev->sg) { | 256 | if (!dev->sg) { |
| 214 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 257 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
| @@ -267,7 +310,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, | |||
| 267 | 310 | ||
| 268 | *maplist = list; | 311 | *maplist = list; |
| 269 | return 0; | 312 | return 0; |
| 270 | } | 313 | } |
| 271 | 314 | ||
| 272 | int drm_addmap(drm_device_t * dev, unsigned int offset, | 315 | int drm_addmap(drm_device_t * dev, unsigned int offset, |
| 273 | unsigned int size, drm_map_type_t type, | 316 | unsigned int size, drm_map_type_t type, |
| @@ -519,6 +562,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) | |||
| 519 | { | 562 | { |
| 520 | drm_device_dma_t *dma = dev->dma; | 563 | drm_device_dma_t *dma = dev->dma; |
| 521 | drm_buf_entry_t *entry; | 564 | drm_buf_entry_t *entry; |
| 565 | drm_agp_mem_t *agp_entry; | ||
| 522 | drm_buf_t *buf; | 566 | drm_buf_t *buf; |
| 523 | unsigned long offset; | 567 | unsigned long offset; |
| 524 | unsigned long agp_offset; | 568 | unsigned long agp_offset; |
| @@ -529,7 +573,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) | |||
| 529 | int page_order; | 573 | int page_order; |
| 530 | int total; | 574 | int total; |
| 531 | int byte_count; | 575 | int byte_count; |
| 532 | int i; | 576 | int i, valid; |
| 533 | drm_buf_t **temp_buflist; | 577 | drm_buf_t **temp_buflist; |
| 534 | 578 | ||
| 535 | if (!dma) | 579 | if (!dma) |
| @@ -560,6 +604,19 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) | |||
| 560 | if (dev->queue_count) | 604 | if (dev->queue_count) |
| 561 | return -EBUSY; /* Not while in use */ | 605 | return -EBUSY; /* Not while in use */ |
| 562 | 606 | ||
| 607 | /* Make sure buffers are located in AGP memory that we own */ | ||
| 608 | valid = 0; | ||
| 609 | for (agp_entry = dev->agp->memory; agp_entry; agp_entry = agp_entry->next) { | ||
| 610 | if ((agp_offset >= agp_entry->bound) && | ||
| 611 | (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { | ||
| 612 | valid = 1; | ||
| 613 | break; | ||
| 614 | } | ||
| 615 | } | ||
| 616 | if (dev->agp->memory && !valid) { | ||
| 617 | DRM_DEBUG("zone invalid\n"); | ||
| 618 | return -EINVAL; | ||
| 619 | } | ||
| 563 | spin_lock(&dev->count_lock); | 620 | spin_lock(&dev->count_lock); |
| 564 | if (dev->buf_use) { | 621 | if (dev->buf_use) { |
| 565 | spin_unlock(&dev->count_lock); | 622 | spin_unlock(&dev->count_lock); |
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c index f5b9b2480c14..26bec30ee86e 100644 --- a/drivers/char/drm/drm_drv.c +++ b/drivers/char/drm/drm_drv.c | |||
| @@ -496,11 +496,14 @@ int drm_ioctl(struct inode *inode, struct file *filp, | |||
| 496 | (long)old_encode_dev(priv->head->device), | 496 | (long)old_encode_dev(priv->head->device), |
| 497 | priv->authenticated); | 497 | priv->authenticated); |
| 498 | 498 | ||
| 499 | if (nr < DRIVER_IOCTL_COUNT) | 499 | if ((nr >= DRIVER_IOCTL_COUNT) && |
| 500 | ioctl = &drm_ioctls[nr]; | 500 | ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) |
| 501 | else if ((nr >= DRM_COMMAND_BASE) | 501 | goto err_i1; |
| 502 | if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) | ||
| 502 | && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) | 503 | && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) |
| 503 | ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; | 504 | ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; |
| 505 | else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) | ||
| 506 | ioctl = &drm_ioctls[nr]; | ||
| 504 | else | 507 | else |
| 505 | goto err_i1; | 508 | goto err_i1; |
| 506 | 509 | ||
diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c index 898f47dafec0..3b159cab3bc8 100644 --- a/drivers/char/drm/drm_fops.c +++ b/drivers/char/drm/drm_fops.c | |||
| @@ -46,6 +46,7 @@ static int drm_setup(drm_device_t * dev) | |||
| 46 | drm_local_map_t *map; | 46 | drm_local_map_t *map; |
| 47 | int i; | 47 | int i; |
| 48 | int ret; | 48 | int ret; |
| 49 | u32 sareapage; | ||
| 49 | 50 | ||
| 50 | if (dev->driver->firstopen) { | 51 | if (dev->driver->firstopen) { |
| 51 | ret = dev->driver->firstopen(dev); | 52 | ret = dev->driver->firstopen(dev); |
| @@ -56,7 +57,8 @@ static int drm_setup(drm_device_t * dev) | |||
| 56 | dev->magicfree.next = NULL; | 57 | dev->magicfree.next = NULL; |
| 57 | 58 | ||
| 58 | /* prebuild the SAREA */ | 59 | /* prebuild the SAREA */ |
| 59 | i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map); | 60 | sareapage = max_t(unsigned, SAREA_MAX, PAGE_SIZE); |
| 61 | i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map); | ||
| 60 | if (i != 0) | 62 | if (i != 0) |
| 61 | return i; | 63 | return i; |
| 62 | 64 | ||
| @@ -84,7 +86,7 @@ static int drm_setup(drm_device_t * dev) | |||
| 84 | INIT_LIST_HEAD(&dev->ctxlist->head); | 86 | INIT_LIST_HEAD(&dev->ctxlist->head); |
| 85 | 87 | ||
| 86 | dev->vmalist = NULL; | 88 | dev->vmalist = NULL; |
| 87 | dev->sigdata.lock = dev->lock.hw_lock = NULL; | 89 | dev->sigdata.lock = NULL; |
| 88 | init_waitqueue_head(&dev->lock.lock_queue); | 90 | init_waitqueue_head(&dev->lock.lock_queue); |
| 89 | dev->queue_count = 0; | 91 | dev->queue_count = 0; |
| 90 | dev->queue_reserved = 0; | 92 | dev->queue_reserved = 0; |
| @@ -354,58 +356,56 @@ int drm_release(struct inode *inode, struct file *filp) | |||
| 354 | current->pid, (long)old_encode_dev(priv->head->device), | 356 | current->pid, (long)old_encode_dev(priv->head->device), |
| 355 | dev->open_count); | 357 | dev->open_count); |
| 356 | 358 | ||
| 357 | if (priv->lock_count && dev->lock.hw_lock && | 359 | if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { |
| 358 | _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && | 360 | if (drm_i_have_hw_lock(filp)) { |
| 359 | dev->lock.filp == filp) { | ||
| 360 | DRM_DEBUG("File %p released, freeing lock for context %d\n", | ||
| 361 | filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); | ||
| 362 | |||
| 363 | if (dev->driver->reclaim_buffers_locked) | ||
| 364 | dev->driver->reclaim_buffers_locked(dev, filp); | 361 | dev->driver->reclaim_buffers_locked(dev, filp); |
| 365 | 362 | } else { | |
| 366 | drm_lock_free(dev, &dev->lock.hw_lock->lock, | 363 | unsigned long _end=jiffies + 3*DRM_HZ; |
| 367 | _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); | 364 | int locked = 0; |
| 368 | 365 | ||
| 369 | /* FIXME: may require heavy-handed reset of | 366 | drm_idlelock_take(&dev->lock); |
| 370 | hardware at this point, possibly | 367 | |
| 371 | processed via a callback to the X | 368 | /* |
| 372 | server. */ | 369 | * Wait for a while. |
| 373 | } else if (dev->driver->reclaim_buffers_locked && priv->lock_count | 370 | */ |
| 374 | && dev->lock.hw_lock) { | 371 | |
| 375 | /* The lock is required to reclaim buffers */ | 372 | do{ |
| 376 | DECLARE_WAITQUEUE(entry, current); | 373 | spin_lock(&dev->lock.spinlock); |
| 377 | 374 | locked = dev->lock.idle_has_lock; | |
| 378 | add_wait_queue(&dev->lock.lock_queue, &entry); | 375 | spin_unlock(&dev->lock.spinlock); |
| 379 | for (;;) { | 376 | if (locked) |
| 380 | __set_current_state(TASK_INTERRUPTIBLE); | 377 | break; |
| 381 | if (!dev->lock.hw_lock) { | 378 | schedule(); |
| 382 | /* Device has been unregistered */ | 379 | } while (!time_after_eq(jiffies, _end)); |
| 383 | retcode = -EINTR; | 380 | |
| 384 | break; | 381 | if (!locked) { |
| 382 | DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n" | ||
| 383 | "\tdriver to use reclaim_buffers_idlelocked() instead.\n" | ||
| 384 | "\tI will go on reclaiming the buffers anyway.\n"); | ||
| 385 | } | 385 | } |
| 386 | if (drm_lock_take(&dev->lock.hw_lock->lock, | 386 | |
| 387 | DRM_KERNEL_CONTEXT)) { | ||
| 388 | dev->lock.filp = filp; | ||
| 389 | dev->lock.lock_time = jiffies; | ||
| 390 | atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); | ||
| 391 | break; /* Got lock */ | ||
| 392 | } | ||
| 393 | /* Contention */ | ||
| 394 | schedule(); | ||
| 395 | if (signal_pending(current)) { | ||
| 396 | retcode = -ERESTARTSYS; | ||
| 397 | break; | ||
| 398 | } | ||
| 399 | } | ||
| 400 | __set_current_state(TASK_RUNNING); | ||
| 401 | remove_wait_queue(&dev->lock.lock_queue, &entry); | ||
| 402 | if (!retcode) { | ||
| 403 | dev->driver->reclaim_buffers_locked(dev, filp); | 387 | dev->driver->reclaim_buffers_locked(dev, filp); |
| 404 | drm_lock_free(dev, &dev->lock.hw_lock->lock, | 388 | drm_idlelock_release(&dev->lock); |
| 405 | DRM_KERNEL_CONTEXT); | ||
| 406 | } | 389 | } |
| 407 | } | 390 | } |
| 408 | 391 | ||
| 392 | if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) { | ||
| 393 | |||
| 394 | drm_idlelock_take(&dev->lock); | ||
| 395 | dev->driver->reclaim_buffers_idlelocked(dev, filp); | ||
| 396 | drm_idlelock_release(&dev->lock); | ||
| 397 | |||
| 398 | } | ||
| 399 | |||
| 400 | if (drm_i_have_hw_lock(filp)) { | ||
| 401 | DRM_DEBUG("File %p released, freeing lock for context %d\n", | ||
| 402 | filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); | ||
| 403 | |||
| 404 | drm_lock_free(&dev->lock, | ||
| 405 | _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); | ||
| 406 | } | ||
| 407 | |||
| 408 | |||
| 409 | if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && | 409 | if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && |
| 410 | !dev->driver->reclaim_buffers_locked) { | 410 | !dev->driver->reclaim_buffers_locked) { |
| 411 | dev->driver->reclaim_buffers(dev, filp); | 411 | dev->driver->reclaim_buffers(dev, filp); |
diff --git a/drivers/char/drm/drm_hashtab.c b/drivers/char/drm/drm_hashtab.c index a0b2d6802ae4..31acb621dcce 100644 --- a/drivers/char/drm/drm_hashtab.c +++ b/drivers/char/drm/drm_hashtab.c | |||
| @@ -43,7 +43,16 @@ int drm_ht_create(drm_open_hash_t *ht, unsigned int order) | |||
| 43 | ht->size = 1 << order; | 43 | ht->size = 1 << order; |
| 44 | ht->order = order; | 44 | ht->order = order; |
| 45 | ht->fill = 0; | 45 | ht->fill = 0; |
| 46 | ht->table = vmalloc(ht->size*sizeof(*ht->table)); | 46 | ht->table = NULL; |
| 47 | ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE); | ||
| 48 | if (!ht->use_vmalloc) { | ||
| 49 | ht->table = drm_calloc(ht->size, sizeof(*ht->table), | ||
| 50 | DRM_MEM_HASHTAB); | ||
| 51 | } | ||
| 52 | if (!ht->table) { | ||
| 53 | ht->use_vmalloc = 1; | ||
| 54 | ht->table = vmalloc(ht->size*sizeof(*ht->table)); | ||
| 55 | } | ||
| 47 | if (!ht->table) { | 56 | if (!ht->table) { |
| 48 | DRM_ERROR("Out of memory for hash table\n"); | 57 | DRM_ERROR("Out of memory for hash table\n"); |
| 49 | return -ENOMEM; | 58 | return -ENOMEM; |
| @@ -183,7 +192,11 @@ int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item) | |||
| 183 | void drm_ht_remove(drm_open_hash_t *ht) | 192 | void drm_ht_remove(drm_open_hash_t *ht) |
| 184 | { | 193 | { |
| 185 | if (ht->table) { | 194 | if (ht->table) { |
| 186 | vfree(ht->table); | 195 | if (ht->use_vmalloc) |
| 196 | vfree(ht->table); | ||
| 197 | else | ||
| 198 | drm_free(ht->table, ht->size * sizeof(*ht->table), | ||
| 199 | DRM_MEM_HASHTAB); | ||
| 187 | ht->table = NULL; | 200 | ht->table = NULL; |
| 188 | } | 201 | } |
| 189 | } | 202 | } |
diff --git a/drivers/char/drm/drm_hashtab.h b/drivers/char/drm/drm_hashtab.h index 40afec05bff8..613091c970af 100644 --- a/drivers/char/drm/drm_hashtab.h +++ b/drivers/char/drm/drm_hashtab.h | |||
| @@ -47,6 +47,7 @@ typedef struct drm_open_hash{ | |||
| 47 | unsigned int order; | 47 | unsigned int order; |
| 48 | unsigned int fill; | 48 | unsigned int fill; |
| 49 | struct hlist_head *table; | 49 | struct hlist_head *table; |
| 50 | int use_vmalloc; | ||
| 50 | } drm_open_hash_t; | 51 | } drm_open_hash_t; |
| 51 | 52 | ||
| 52 | 53 | ||
diff --git a/drivers/char/drm/drm_irq.c b/drivers/char/drm/drm_irq.c index 9d00c51fe2c4..2e75331fd83e 100644 --- a/drivers/char/drm/drm_irq.c +++ b/drivers/char/drm/drm_irq.c | |||
| @@ -424,7 +424,7 @@ static void drm_locked_tasklet_func(unsigned long data) | |||
| 424 | spin_lock_irqsave(&dev->tasklet_lock, irqflags); | 424 | spin_lock_irqsave(&dev->tasklet_lock, irqflags); |
| 425 | 425 | ||
| 426 | if (!dev->locked_tasklet_func || | 426 | if (!dev->locked_tasklet_func || |
| 427 | !drm_lock_take(&dev->lock.hw_lock->lock, | 427 | !drm_lock_take(&dev->lock, |
| 428 | DRM_KERNEL_CONTEXT)) { | 428 | DRM_KERNEL_CONTEXT)) { |
| 429 | spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); | 429 | spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); |
| 430 | return; | 430 | return; |
| @@ -435,7 +435,7 @@ static void drm_locked_tasklet_func(unsigned long data) | |||
| 435 | 435 | ||
| 436 | dev->locked_tasklet_func(dev); | 436 | dev->locked_tasklet_func(dev); |
| 437 | 437 | ||
| 438 | drm_lock_free(dev, &dev->lock.hw_lock->lock, | 438 | drm_lock_free(&dev->lock, |
| 439 | DRM_KERNEL_CONTEXT); | 439 | DRM_KERNEL_CONTEXT); |
| 440 | 440 | ||
| 441 | dev->locked_tasklet_func = NULL; | 441 | dev->locked_tasklet_func = NULL; |
diff --git a/drivers/char/drm/drm_lock.c b/drivers/char/drm/drm_lock.c index e9993ba461a2..befd1af19dfe 100644 --- a/drivers/char/drm/drm_lock.c +++ b/drivers/char/drm/drm_lock.c | |||
| @@ -35,9 +35,6 @@ | |||
| 35 | 35 | ||
| 36 | #include "drmP.h" | 36 | #include "drmP.h" |
| 37 | 37 | ||
| 38 | static int drm_lock_transfer(drm_device_t * dev, | ||
| 39 | __volatile__ unsigned int *lock, | ||
| 40 | unsigned int context); | ||
| 41 | static int drm_notifier(void *priv); | 38 | static int drm_notifier(void *priv); |
| 42 | 39 | ||
| 43 | /** | 40 | /** |
| @@ -80,6 +77,9 @@ int drm_lock(struct inode *inode, struct file *filp, | |||
| 80 | return -EINVAL; | 77 | return -EINVAL; |
| 81 | 78 | ||
| 82 | add_wait_queue(&dev->lock.lock_queue, &entry); | 79 | add_wait_queue(&dev->lock.lock_queue, &entry); |
| 80 | spin_lock(&dev->lock.spinlock); | ||
| 81 | dev->lock.user_waiters++; | ||
| 82 | spin_unlock(&dev->lock.spinlock); | ||
| 83 | for (;;) { | 83 | for (;;) { |
| 84 | __set_current_state(TASK_INTERRUPTIBLE); | 84 | __set_current_state(TASK_INTERRUPTIBLE); |
| 85 | if (!dev->lock.hw_lock) { | 85 | if (!dev->lock.hw_lock) { |
| @@ -87,7 +87,7 @@ int drm_lock(struct inode *inode, struct file *filp, | |||
| 87 | ret = -EINTR; | 87 | ret = -EINTR; |
| 88 | break; | 88 | break; |
| 89 | } | 89 | } |
| 90 | if (drm_lock_take(&dev->lock.hw_lock->lock, lock.context)) { | 90 | if (drm_lock_take(&dev->lock, lock.context)) { |
| 91 | dev->lock.filp = filp; | 91 | dev->lock.filp = filp; |
| 92 | dev->lock.lock_time = jiffies; | 92 | dev->lock.lock_time = jiffies; |
| 93 | atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); | 93 | atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); |
| @@ -101,12 +101,14 @@ int drm_lock(struct inode *inode, struct file *filp, | |||
| 101 | break; | 101 | break; |
| 102 | } | 102 | } |
| 103 | } | 103 | } |
| 104 | spin_lock(&dev->lock.spinlock); | ||
| 105 | dev->lock.user_waiters--; | ||
| 106 | spin_unlock(&dev->lock.spinlock); | ||
| 104 | __set_current_state(TASK_RUNNING); | 107 | __set_current_state(TASK_RUNNING); |
| 105 | remove_wait_queue(&dev->lock.lock_queue, &entry); | 108 | remove_wait_queue(&dev->lock.lock_queue, &entry); |
| 106 | 109 | ||
| 107 | DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock"); | 110 | DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" ); |
| 108 | if (ret) | 111 | if (ret) return ret; |
| 109 | return ret; | ||
| 110 | 112 | ||
| 111 | sigemptyset(&dev->sigmask); | 113 | sigemptyset(&dev->sigmask); |
| 112 | sigaddset(&dev->sigmask, SIGSTOP); | 114 | sigaddset(&dev->sigmask, SIGSTOP); |
| @@ -127,14 +129,12 @@ int drm_lock(struct inode *inode, struct file *filp, | |||
| 127 | } | 129 | } |
| 128 | } | 130 | } |
| 129 | 131 | ||
| 130 | /* dev->driver->kernel_context_switch isn't used by any of the x86 | ||
| 131 | * drivers but is used by the Sparc driver. | ||
| 132 | */ | ||
| 133 | if (dev->driver->kernel_context_switch && | 132 | if (dev->driver->kernel_context_switch && |
| 134 | dev->last_context != lock.context) { | 133 | dev->last_context != lock.context) { |
| 135 | dev->driver->kernel_context_switch(dev, dev->last_context, | 134 | dev->driver->kernel_context_switch(dev, dev->last_context, |
| 136 | lock.context); | 135 | lock.context); |
| 137 | } | 136 | } |
| 137 | |||
| 138 | return 0; | 138 | return 0; |
| 139 | } | 139 | } |
| 140 | 140 | ||
| @@ -184,12 +184,8 @@ int drm_unlock(struct inode *inode, struct file *filp, | |||
| 184 | if (dev->driver->kernel_context_switch_unlock) | 184 | if (dev->driver->kernel_context_switch_unlock) |
| 185 | dev->driver->kernel_context_switch_unlock(dev); | 185 | dev->driver->kernel_context_switch_unlock(dev); |
| 186 | else { | 186 | else { |
| 187 | drm_lock_transfer(dev, &dev->lock.hw_lock->lock, | 187 | if (drm_lock_free(&dev->lock,lock.context)) { |
| 188 | DRM_KERNEL_CONTEXT); | 188 | /* FIXME: Should really bail out here. */ |
| 189 | |||
| 190 | if (drm_lock_free(dev, &dev->lock.hw_lock->lock, | ||
| 191 | DRM_KERNEL_CONTEXT)) { | ||
| 192 | DRM_ERROR("\n"); | ||
| 193 | } | 189 | } |
| 194 | } | 190 | } |
| 195 | 191 | ||
| @@ -206,18 +202,26 @@ int drm_unlock(struct inode *inode, struct file *filp, | |||
| 206 | * | 202 | * |
| 207 | * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. | 203 | * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. |
| 208 | */ | 204 | */ |
| 209 | int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context) | 205 | int drm_lock_take(drm_lock_data_t *lock_data, |
| 206 | unsigned int context) | ||
| 210 | { | 207 | { |
| 211 | unsigned int old, new, prev; | 208 | unsigned int old, new, prev; |
| 209 | volatile unsigned int *lock = &lock_data->hw_lock->lock; | ||
| 212 | 210 | ||
| 211 | spin_lock(&lock_data->spinlock); | ||
| 213 | do { | 212 | do { |
| 214 | old = *lock; | 213 | old = *lock; |
| 215 | if (old & _DRM_LOCK_HELD) | 214 | if (old & _DRM_LOCK_HELD) |
| 216 | new = old | _DRM_LOCK_CONT; | 215 | new = old | _DRM_LOCK_CONT; |
| 217 | else | 216 | else { |
| 218 | new = context | _DRM_LOCK_HELD; | 217 | new = context | _DRM_LOCK_HELD | |
| 218 | ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ? | ||
| 219 | _DRM_LOCK_CONT : 0); | ||
| 220 | } | ||
| 219 | prev = cmpxchg(lock, old, new); | 221 | prev = cmpxchg(lock, old, new); |
| 220 | } while (prev != old); | 222 | } while (prev != old); |
| 223 | spin_unlock(&lock_data->spinlock); | ||
| 224 | |||
| 221 | if (_DRM_LOCKING_CONTEXT(old) == context) { | 225 | if (_DRM_LOCKING_CONTEXT(old) == context) { |
| 222 | if (old & _DRM_LOCK_HELD) { | 226 | if (old & _DRM_LOCK_HELD) { |
| 223 | if (context != DRM_KERNEL_CONTEXT) { | 227 | if (context != DRM_KERNEL_CONTEXT) { |
| @@ -227,7 +231,8 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context) | |||
| 227 | return 0; | 231 | return 0; |
| 228 | } | 232 | } |
| 229 | } | 233 | } |
| 230 | if (new == (context | _DRM_LOCK_HELD)) { | 234 | |
| 235 | if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) { | ||
| 231 | /* Have lock */ | 236 | /* Have lock */ |
| 232 | return 1; | 237 | return 1; |
| 233 | } | 238 | } |
| @@ -246,13 +251,13 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context) | |||
| 246 | * Resets the lock file pointer. | 251 | * Resets the lock file pointer. |
| 247 | * Marks the lock as held by the given context, via the \p cmpxchg instruction. | 252 | * Marks the lock as held by the given context, via the \p cmpxchg instruction. |
| 248 | */ | 253 | */ |
| 249 | static int drm_lock_transfer(drm_device_t * dev, | 254 | static int drm_lock_transfer(drm_lock_data_t *lock_data, |
| 250 | __volatile__ unsigned int *lock, | ||
| 251 | unsigned int context) | 255 | unsigned int context) |
| 252 | { | 256 | { |
| 253 | unsigned int old, new, prev; | 257 | unsigned int old, new, prev; |
| 258 | volatile unsigned int *lock = &lock_data->hw_lock->lock; | ||
| 254 | 259 | ||
| 255 | dev->lock.filp = NULL; | 260 | lock_data->filp = NULL; |
| 256 | do { | 261 | do { |
| 257 | old = *lock; | 262 | old = *lock; |
| 258 | new = context | _DRM_LOCK_HELD; | 263 | new = context | _DRM_LOCK_HELD; |
| @@ -272,23 +277,32 @@ static int drm_lock_transfer(drm_device_t * dev, | |||
| 272 | * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task | 277 | * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task |
| 273 | * waiting on the lock queue. | 278 | * waiting on the lock queue. |
| 274 | */ | 279 | */ |
| 275 | int drm_lock_free(drm_device_t * dev, | 280 | int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context) |
| 276 | __volatile__ unsigned int *lock, unsigned int context) | ||
| 277 | { | 281 | { |
| 278 | unsigned int old, new, prev; | 282 | unsigned int old, new, prev; |
| 283 | volatile unsigned int *lock = &lock_data->hw_lock->lock; | ||
| 284 | |||
| 285 | spin_lock(&lock_data->spinlock); | ||
| 286 | if (lock_data->kernel_waiters != 0) { | ||
| 287 | drm_lock_transfer(lock_data, 0); | ||
| 288 | lock_data->idle_has_lock = 1; | ||
| 289 | spin_unlock(&lock_data->spinlock); | ||
| 290 | return 1; | ||
| 291 | } | ||
| 292 | spin_unlock(&lock_data->spinlock); | ||
| 279 | 293 | ||
| 280 | dev->lock.filp = NULL; | ||
| 281 | do { | 294 | do { |
| 282 | old = *lock; | 295 | old = *lock; |
| 283 | new = 0; | 296 | new = _DRM_LOCKING_CONTEXT(old); |
| 284 | prev = cmpxchg(lock, old, new); | 297 | prev = cmpxchg(lock, old, new); |
| 285 | } while (prev != old); | 298 | } while (prev != old); |
| 299 | |||
| 286 | if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { | 300 | if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { |
| 287 | DRM_ERROR("%d freed heavyweight lock held by %d\n", | 301 | DRM_ERROR("%d freed heavyweight lock held by %d\n", |
| 288 | context, _DRM_LOCKING_CONTEXT(old)); | 302 | context, _DRM_LOCKING_CONTEXT(old)); |
| 289 | return 1; | 303 | return 1; |
| 290 | } | 304 | } |
| 291 | wake_up_interruptible(&dev->lock.lock_queue); | 305 | wake_up_interruptible(&lock_data->lock_queue); |
| 292 | return 0; | 306 | return 0; |
| 293 | } | 307 | } |
| 294 | 308 | ||
| @@ -322,3 +336,67 @@ static int drm_notifier(void *priv) | |||
| 322 | } while (prev != old); | 336 | } while (prev != old); |
| 323 | return 0; | 337 | return 0; |
| 324 | } | 338 | } |
| 339 | |||
| 340 | /** | ||
| 341 | * This function returns immediately and takes the hw lock | ||
| 342 | * with the kernel context if it is free, otherwise it gets the highest priority when and if | ||
| 343 | * it is eventually released. | ||
| 344 | * | ||
| 345 | * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held | ||
| 346 | * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause | ||
| 347 | * a deadlock, which is why the "idlelock" was invented). | ||
| 348 | * | ||
| 349 | * This should be sufficient to wait for GPU idle without | ||
| 350 | * having to worry about starvation. | ||
| 351 | */ | ||
| 352 | |||
| 353 | void drm_idlelock_take(drm_lock_data_t *lock_data) | ||
| 354 | { | ||
| 355 | int ret = 0; | ||
| 356 | |||
| 357 | spin_lock(&lock_data->spinlock); | ||
| 358 | lock_data->kernel_waiters++; | ||
| 359 | if (!lock_data->idle_has_lock) { | ||
| 360 | |||
| 361 | spin_unlock(&lock_data->spinlock); | ||
| 362 | ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT); | ||
| 363 | spin_lock(&lock_data->spinlock); | ||
| 364 | |||
| 365 | if (ret == 1) | ||
| 366 | lock_data->idle_has_lock = 1; | ||
| 367 | } | ||
| 368 | spin_unlock(&lock_data->spinlock); | ||
| 369 | } | ||
| 370 | EXPORT_SYMBOL(drm_idlelock_take); | ||
| 371 | |||
| 372 | void drm_idlelock_release(drm_lock_data_t *lock_data) | ||
| 373 | { | ||
| 374 | unsigned int old, prev; | ||
| 375 | volatile unsigned int *lock = &lock_data->hw_lock->lock; | ||
| 376 | |||
| 377 | spin_lock(&lock_data->spinlock); | ||
| 378 | if (--lock_data->kernel_waiters == 0) { | ||
| 379 | if (lock_data->idle_has_lock) { | ||
| 380 | do { | ||
| 381 | old = *lock; | ||
| 382 | prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT); | ||
| 383 | } while (prev != old); | ||
| 384 | wake_up_interruptible(&lock_data->lock_queue); | ||
| 385 | lock_data->idle_has_lock = 0; | ||
| 386 | } | ||
| 387 | } | ||
| 388 | spin_unlock(&lock_data->spinlock); | ||
| 389 | } | ||
| 390 | EXPORT_SYMBOL(drm_idlelock_release); | ||
| 391 | |||
| 392 | |||
| 393 | int drm_i_have_hw_lock(struct file *filp) | ||
| 394 | { | ||
| 395 | DRM_DEVICE; | ||
| 396 | |||
| 397 | return (priv->lock_count && dev->lock.hw_lock && | ||
| 398 | _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && | ||
| 399 | dev->lock.filp == filp); | ||
| 400 | } | ||
| 401 | |||
| 402 | EXPORT_SYMBOL(drm_i_have_hw_lock); | ||
diff --git a/drivers/char/drm/drm_mm.c b/drivers/char/drm/drm_mm.c index 9b46b85027d0..2ec1d9f26264 100644 --- a/drivers/char/drm/drm_mm.c +++ b/drivers/char/drm/drm_mm.c | |||
| @@ -274,7 +274,6 @@ int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size) | |||
| 274 | return drm_mm_create_tail_node(mm, start, size); | 274 | return drm_mm_create_tail_node(mm, start, size); |
| 275 | } | 275 | } |
| 276 | 276 | ||
| 277 | EXPORT_SYMBOL(drm_mm_init); | ||
| 278 | 277 | ||
| 279 | void drm_mm_takedown(drm_mm_t * mm) | 278 | void drm_mm_takedown(drm_mm_t * mm) |
| 280 | { | 279 | { |
| @@ -295,4 +294,3 @@ void drm_mm_takedown(drm_mm_t * mm) | |||
| 295 | drm_free(entry, sizeof(*entry), DRM_MEM_MM); | 294 | drm_free(entry, sizeof(*entry), DRM_MEM_MM); |
| 296 | } | 295 | } |
| 297 | 296 | ||
| 298 | EXPORT_SYMBOL(drm_mm_takedown); | ||
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h index ad54b845978b..01cf482d2d00 100644 --- a/drivers/char/drm/drm_pciids.h +++ b/drivers/char/drm/drm_pciids.h | |||
| @@ -230,10 +230,10 @@ | |||
| 230 | {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 230 | {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ |
| 231 | {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 231 | {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ |
| 232 | {0x1106, 0x3304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 232 | {0x1106, 0x3304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ |
| 233 | {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | ||
| 234 | {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 233 | {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ |
| 235 | {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 234 | {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ |
| 236 | {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \ | 235 | {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \ |
| 236 | {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \ | ||
| 237 | {0, 0, 0} | 237 | {0, 0, 0} |
| 238 | 238 | ||
| 239 | #define i810_PCI_IDS \ | 239 | #define i810_PCI_IDS \ |
| @@ -296,5 +296,6 @@ | |||
| 296 | {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 296 | {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ |
| 297 | {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 297 | {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ |
| 298 | {0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 298 | {0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ |
| 299 | {0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | ||
| 299 | {0, 0, 0} | 300 | {0, 0, 0} |
| 300 | 301 | ||
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c index 7fd0da712142..b204498d1a28 100644 --- a/drivers/char/drm/drm_proc.c +++ b/drivers/char/drm/drm_proc.c | |||
| @@ -72,7 +72,7 @@ static struct drm_proc_list { | |||
| 72 | #endif | 72 | #endif |
| 73 | }; | 73 | }; |
| 74 | 74 | ||
| 75 | #define DRM_PROC_ENTRIES (sizeof(drm_proc_list)/sizeof(drm_proc_list[0])) | 75 | #define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list) |
| 76 | 76 | ||
| 77 | /** | 77 | /** |
| 78 | * Initialize the DRI proc filesystem for a device. | 78 | * Initialize the DRI proc filesystem for a device. |
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c index 120d10256feb..19408adcc775 100644 --- a/drivers/char/drm/drm_stub.c +++ b/drivers/char/drm/drm_stub.c | |||
| @@ -62,6 +62,7 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev, | |||
| 62 | spin_lock_init(&dev->count_lock); | 62 | spin_lock_init(&dev->count_lock); |
| 63 | spin_lock_init(&dev->drw_lock); | 63 | spin_lock_init(&dev->drw_lock); |
| 64 | spin_lock_init(&dev->tasklet_lock); | 64 | spin_lock_init(&dev->tasklet_lock); |
| 65 | spin_lock_init(&dev->lock.spinlock); | ||
| 65 | init_timer(&dev->timer); | 66 | init_timer(&dev->timer); |
| 66 | mutex_init(&dev->struct_mutex); | 67 | mutex_init(&dev->struct_mutex); |
| 67 | mutex_init(&dev->ctxlist_mutex); | 68 | mutex_init(&dev->ctxlist_mutex); |
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c index 54a632848955..35540cfb43dd 100644 --- a/drivers/char/drm/drm_vm.c +++ b/drivers/char/drm/drm_vm.c | |||
| @@ -41,6 +41,30 @@ | |||
| 41 | static void drm_vm_open(struct vm_area_struct *vma); | 41 | static void drm_vm_open(struct vm_area_struct *vma); |
| 42 | static void drm_vm_close(struct vm_area_struct *vma); | 42 | static void drm_vm_close(struct vm_area_struct *vma); |
| 43 | 43 | ||
| 44 | static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) | ||
| 45 | { | ||
| 46 | pgprot_t tmp = vm_get_page_prot(vma->vm_flags); | ||
| 47 | |||
| 48 | #if defined(__i386__) || defined(__x86_64__) | ||
| 49 | if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) { | ||
| 50 | pgprot_val(tmp) |= _PAGE_PCD; | ||
| 51 | pgprot_val(tmp) &= ~_PAGE_PWT; | ||
| 52 | } | ||
| 53 | #elif defined(__powerpc__) | ||
| 54 | pgprot_val(tmp) |= _PAGE_NO_CACHE; | ||
| 55 | if (map_type == _DRM_REGISTERS) | ||
| 56 | pgprot_val(tmp) |= _PAGE_GUARDED; | ||
| 57 | #endif | ||
| 58 | #if defined(__ia64__) | ||
| 59 | if (efi_range_is_wc(vma->vm_start, vma->vm_end - | ||
| 60 | vma->vm_start)) | ||
| 61 | tmp = pgprot_writecombine(tmp); | ||
| 62 | else | ||
| 63 | tmp = pgprot_noncached(tmp); | ||
| 64 | #endif | ||
| 65 | return tmp; | ||
| 66 | } | ||
| 67 | |||
| 44 | /** | 68 | /** |
| 45 | * \c nopage method for AGP virtual memory. | 69 | * \c nopage method for AGP virtual memory. |
| 46 | * | 70 | * |
| @@ -151,8 +175,7 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, | |||
| 151 | 175 | ||
| 152 | offset = address - vma->vm_start; | 176 | offset = address - vma->vm_start; |
| 153 | i = (unsigned long)map->handle + offset; | 177 | i = (unsigned long)map->handle + offset; |
| 154 | page = (map->type == _DRM_CONSISTENT) ? | 178 | page = vmalloc_to_page((void *)i); |
| 155 | virt_to_page((void *)i) : vmalloc_to_page((void *)i); | ||
| 156 | if (!page) | 179 | if (!page) |
| 157 | return NOPAGE_SIGBUS; | 180 | return NOPAGE_SIGBUS; |
| 158 | get_page(page); | 181 | get_page(page); |
| @@ -389,7 +412,7 @@ static struct vm_operations_struct drm_vm_sg_ops = { | |||
| 389 | * Create a new drm_vma_entry structure as the \p vma private data entry and | 412 | * Create a new drm_vma_entry structure as the \p vma private data entry and |
| 390 | * add it to drm_device::vmalist. | 413 | * add it to drm_device::vmalist. |
| 391 | */ | 414 | */ |
| 392 | static void drm_vm_open(struct vm_area_struct *vma) | 415 | static void drm_vm_open_locked(struct vm_area_struct *vma) |
| 393 | { | 416 | { |
| 394 | drm_file_t *priv = vma->vm_file->private_data; | 417 | drm_file_t *priv = vma->vm_file->private_data; |
| 395 | drm_device_t *dev = priv->head->dev; | 418 | drm_device_t *dev = priv->head->dev; |
| @@ -401,15 +424,23 @@ static void drm_vm_open(struct vm_area_struct *vma) | |||
| 401 | 424 | ||
| 402 | vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); | 425 | vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); |
| 403 | if (vma_entry) { | 426 | if (vma_entry) { |
| 404 | mutex_lock(&dev->struct_mutex); | ||
| 405 | vma_entry->vma = vma; | 427 | vma_entry->vma = vma; |
| 406 | vma_entry->next = dev->vmalist; | 428 | vma_entry->next = dev->vmalist; |
| 407 | vma_entry->pid = current->pid; | 429 | vma_entry->pid = current->pid; |
| 408 | dev->vmalist = vma_entry; | 430 | dev->vmalist = vma_entry; |
| 409 | mutex_unlock(&dev->struct_mutex); | ||
| 410 | } | 431 | } |
| 411 | } | 432 | } |
| 412 | 433 | ||
| 434 | static void drm_vm_open(struct vm_area_struct *vma) | ||
| 435 | { | ||
| 436 | drm_file_t *priv = vma->vm_file->private_data; | ||
| 437 | drm_device_t *dev = priv->head->dev; | ||
| 438 | |||
| 439 | mutex_lock(&dev->struct_mutex); | ||
| 440 | drm_vm_open_locked(vma); | ||
| 441 | mutex_unlock(&dev->struct_mutex); | ||
| 442 | } | ||
| 443 | |||
| 413 | /** | 444 | /** |
| 414 | * \c close method for all virtual memory types. | 445 | * \c close method for all virtual memory types. |
| 415 | * | 446 | * |
| @@ -460,7 +491,6 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) | |||
| 460 | drm_device_dma_t *dma; | 491 | drm_device_dma_t *dma; |
| 461 | unsigned long length = vma->vm_end - vma->vm_start; | 492 | unsigned long length = vma->vm_end - vma->vm_start; |
| 462 | 493 | ||
| 463 | lock_kernel(); | ||
| 464 | dev = priv->head->dev; | 494 | dev = priv->head->dev; |
| 465 | dma = dev->dma; | 495 | dma = dev->dma; |
| 466 | DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", | 496 | DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", |
| @@ -468,10 +498,8 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) | |||
| 468 | 498 | ||
| 469 | /* Length must match exact page count */ | 499 | /* Length must match exact page count */ |
| 470 | if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { | 500 | if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { |
| 471 | unlock_kernel(); | ||
| 472 | return -EINVAL; | 501 | return -EINVAL; |
| 473 | } | 502 | } |
| 474 | unlock_kernel(); | ||
| 475 | 503 | ||
| 476 | if (!capable(CAP_SYS_ADMIN) && | 504 | if (!capable(CAP_SYS_ADMIN) && |
| 477 | (dma->flags & _DRM_DMA_USE_PCI_RO)) { | 505 | (dma->flags & _DRM_DMA_USE_PCI_RO)) { |
| @@ -494,7 +522,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) | |||
| 494 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ | 522 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ |
| 495 | 523 | ||
| 496 | vma->vm_file = filp; /* Needed for drm_vm_open() */ | 524 | vma->vm_file = filp; /* Needed for drm_vm_open() */ |
| 497 | drm_vm_open(vma); | 525 | drm_vm_open_locked(vma); |
| 498 | return 0; | 526 | return 0; |
| 499 | } | 527 | } |
| 500 | 528 | ||
| @@ -529,7 +557,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs); | |||
| 529 | * according to the mapping type and remaps the pages. Finally sets the file | 557 | * according to the mapping type and remaps the pages. Finally sets the file |
| 530 | * pointer and calls vm_open(). | 558 | * pointer and calls vm_open(). |
| 531 | */ | 559 | */ |
| 532 | int drm_mmap(struct file *filp, struct vm_area_struct *vma) | 560 | static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) |
| 533 | { | 561 | { |
| 534 | drm_file_t *priv = filp->private_data; | 562 | drm_file_t *priv = filp->private_data; |
| 535 | drm_device_t *dev = priv->head->dev; | 563 | drm_device_t *dev = priv->head->dev; |
| @@ -565,7 +593,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) | |||
| 565 | return -EPERM; | 593 | return -EPERM; |
| 566 | 594 | ||
| 567 | /* Check for valid size. */ | 595 | /* Check for valid size. */ |
| 568 | if (map->size != vma->vm_end - vma->vm_start) | 596 | if (map->size < vma->vm_end - vma->vm_start) |
| 569 | return -EINVAL; | 597 | return -EINVAL; |
| 570 | 598 | ||
| 571 | if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { | 599 | if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { |
| @@ -600,37 +628,16 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) | |||
| 600 | /* fall through to _DRM_FRAME_BUFFER... */ | 628 | /* fall through to _DRM_FRAME_BUFFER... */ |
| 601 | case _DRM_FRAME_BUFFER: | 629 | case _DRM_FRAME_BUFFER: |
| 602 | case _DRM_REGISTERS: | 630 | case _DRM_REGISTERS: |
| 603 | #if defined(__i386__) || defined(__x86_64__) | ||
| 604 | if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) { | ||
| 605 | pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; | ||
| 606 | pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; | ||
| 607 | } | ||
| 608 | #elif defined(__powerpc__) | ||
| 609 | pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; | ||
| 610 | if (map->type == _DRM_REGISTERS) | ||
| 611 | pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED; | ||
| 612 | #endif | ||
| 613 | vma->vm_flags |= VM_IO; /* not in core dump */ | ||
| 614 | #if defined(__ia64__) | ||
| 615 | if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) | ||
| 616 | vma->vm_page_prot = | ||
| 617 | pgprot_writecombine(vma->vm_page_prot); | ||
| 618 | else | ||
| 619 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
| 620 | #endif | ||
| 621 | offset = dev->driver->get_reg_ofs(dev); | 631 | offset = dev->driver->get_reg_ofs(dev); |
| 632 | vma->vm_flags |= VM_IO; /* not in core dump */ | ||
| 633 | vma->vm_page_prot = drm_io_prot(map->type, vma); | ||
| 622 | #ifdef __sparc__ | 634 | #ifdef __sparc__ |
| 623 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 635 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
| 636 | #endif | ||
| 624 | if (io_remap_pfn_range(vma, vma->vm_start, | 637 | if (io_remap_pfn_range(vma, vma->vm_start, |
| 625 | (map->offset + offset) >> PAGE_SHIFT, | 638 | (map->offset + offset) >> PAGE_SHIFT, |
| 626 | vma->vm_end - vma->vm_start, | 639 | vma->vm_end - vma->vm_start, |
| 627 | vma->vm_page_prot)) | 640 | vma->vm_page_prot)) |
| 628 | #else | ||
| 629 | if (io_remap_pfn_range(vma, vma->vm_start, | ||
| 630 | (map->offset + offset) >> PAGE_SHIFT, | ||
| 631 | vma->vm_end - vma->vm_start, | ||
| 632 | vma->vm_page_prot)) | ||
| 633 | #endif | ||
| 634 | return -EAGAIN; | 641 | return -EAGAIN; |
| 635 | DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," | 642 | DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," |
| 636 | " offset = 0x%lx\n", | 643 | " offset = 0x%lx\n", |
| @@ -638,10 +645,15 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) | |||
| 638 | vma->vm_start, vma->vm_end, map->offset + offset); | 645 | vma->vm_start, vma->vm_end, map->offset + offset); |
| 639 | vma->vm_ops = &drm_vm_ops; | 646 | vma->vm_ops = &drm_vm_ops; |
| 640 | break; | 647 | break; |
| 641 | case _DRM_SHM: | ||
| 642 | case _DRM_CONSISTENT: | 648 | case _DRM_CONSISTENT: |
| 643 | /* Consistent memory is really like shared memory. It's only | 649 | /* Consistent memory is really like shared memory. But |
| 644 | * allocate in a different way */ | 650 | * it's allocated in a different way, so avoid nopage */ |
| 651 | if (remap_pfn_range(vma, vma->vm_start, | ||
| 652 | page_to_pfn(virt_to_page(map->handle)), | ||
| 653 | vma->vm_end - vma->vm_start, vma->vm_page_prot)) | ||
| 654 | return -EAGAIN; | ||
| 655 | /* fall through to _DRM_SHM */ | ||
| 656 | case _DRM_SHM: | ||
| 645 | vma->vm_ops = &drm_vm_shm_ops; | 657 | vma->vm_ops = &drm_vm_shm_ops; |
| 646 | vma->vm_private_data = (void *)map; | 658 | vma->vm_private_data = (void *)map; |
| 647 | /* Don't let this area swap. Change when | 659 | /* Don't let this area swap. Change when |
| @@ -659,8 +671,20 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) | |||
| 659 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ | 671 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ |
| 660 | 672 | ||
| 661 | vma->vm_file = filp; /* Needed for drm_vm_open() */ | 673 | vma->vm_file = filp; /* Needed for drm_vm_open() */ |
| 662 | drm_vm_open(vma); | 674 | drm_vm_open_locked(vma); |
| 663 | return 0; | 675 | return 0; |
| 664 | } | 676 | } |
| 665 | 677 | ||
| 678 | int drm_mmap(struct file *filp, struct vm_area_struct *vma) | ||
| 679 | { | ||
| 680 | drm_file_t *priv = filp->private_data; | ||
| 681 | drm_device_t *dev = priv->head->dev; | ||
| 682 | int ret; | ||
| 683 | |||
| 684 | mutex_lock(&dev->struct_mutex); | ||
| 685 | ret = drm_mmap_locked(filp, vma); | ||
| 686 | mutex_unlock(&dev->struct_mutex); | ||
| 687 | |||
| 688 | return ret; | ||
| 689 | } | ||
| 666 | EXPORT_SYMBOL(drm_mmap); | 690 | EXPORT_SYMBOL(drm_mmap); |
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c index 9354ce3b0093..1ba15d9a171a 100644 --- a/drivers/char/drm/i915_dma.c +++ b/drivers/char/drm/i915_dma.c | |||
| @@ -34,7 +34,8 @@ | |||
| 34 | #define IS_I965G(dev) (dev->pci_device == 0x2972 || \ | 34 | #define IS_I965G(dev) (dev->pci_device == 0x2972 || \ |
| 35 | dev->pci_device == 0x2982 || \ | 35 | dev->pci_device == 0x2982 || \ |
| 36 | dev->pci_device == 0x2992 || \ | 36 | dev->pci_device == 0x2992 || \ |
| 37 | dev->pci_device == 0x29A2) | 37 | dev->pci_device == 0x29A2 || \ |
| 38 | dev->pci_device == 0x2A02) | ||
| 38 | 39 | ||
| 39 | /* Really want an OS-independent resettable timer. Would like to have | 40 | /* Really want an OS-independent resettable timer. Would like to have |
| 40 | * this loop run for (eg) 3 sec, but have the timer reset every time | 41 | * this loop run for (eg) 3 sec, but have the timer reset every time |
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c index 5ed965688293..c1850ecac302 100644 --- a/drivers/char/drm/radeon_cp.c +++ b/drivers/char/drm/radeon_cp.c | |||
| @@ -1560,8 +1560,8 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) | |||
| 1560 | if (dev_priv->flags & RADEON_IS_AGP) { | 1560 | if (dev_priv->flags & RADEON_IS_AGP) { |
| 1561 | base = dev->agp->base; | 1561 | base = dev->agp->base; |
| 1562 | /* Check if valid */ | 1562 | /* Check if valid */ |
| 1563 | if ((base + dev_priv->gart_size) > dev_priv->fb_location && | 1563 | if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location && |
| 1564 | base < (dev_priv->fb_location + dev_priv->fb_size)) { | 1564 | base < (dev_priv->fb_location + dev_priv->fb_size - 1)) { |
| 1565 | DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n", | 1565 | DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n", |
| 1566 | dev->agp->base); | 1566 | dev->agp->base); |
| 1567 | base = 0; | 1567 | base = 0; |
| @@ -1571,8 +1571,8 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) | |||
| 1571 | /* If not or if AGP is at 0 (Macs), try to put it elsewhere */ | 1571 | /* If not or if AGP is at 0 (Macs), try to put it elsewhere */ |
| 1572 | if (base == 0) { | 1572 | if (base == 0) { |
| 1573 | base = dev_priv->fb_location + dev_priv->fb_size; | 1573 | base = dev_priv->fb_location + dev_priv->fb_size; |
| 1574 | if (((base + dev_priv->gart_size) & 0xfffffffful) | 1574 | if (base < dev_priv->fb_location || |
| 1575 | < base) | 1575 | ((base + dev_priv->gart_size) & 0xfffffffful) < base) |
| 1576 | base = dev_priv->fb_location | 1576 | base = dev_priv->fb_location |
| 1577 | - dev_priv->gart_size; | 1577 | - dev_priv->gart_size; |
| 1578 | } | 1578 | } |
diff --git a/drivers/char/drm/sis_drv.c b/drivers/char/drm/sis_drv.c index 3d5b3218b6ff..690e0af8e7c2 100644 --- a/drivers/char/drm/sis_drv.c +++ b/drivers/char/drm/sis_drv.c | |||
| @@ -71,7 +71,7 @@ static struct drm_driver driver = { | |||
| 71 | .context_dtor = NULL, | 71 | .context_dtor = NULL, |
| 72 | .dma_quiescent = sis_idle, | 72 | .dma_quiescent = sis_idle, |
| 73 | .reclaim_buffers = NULL, | 73 | .reclaim_buffers = NULL, |
| 74 | .reclaim_buffers_locked = sis_reclaim_buffers_locked, | 74 | .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked, |
| 75 | .lastclose = sis_lastclose, | 75 | .lastclose = sis_lastclose, |
| 76 | .get_map_ofs = drm_core_get_map_ofs, | 76 | .get_map_ofs = drm_core_get_map_ofs, |
| 77 | .get_reg_ofs = drm_core_get_reg_ofs, | 77 | .get_reg_ofs = drm_core_get_reg_ofs, |
diff --git a/drivers/char/drm/via_drv.c b/drivers/char/drm/via_drv.c index bb9dde8b1911..2d4957ab256a 100644 --- a/drivers/char/drm/via_drv.c +++ b/drivers/char/drm/via_drv.c | |||
| @@ -52,7 +52,8 @@ static struct drm_driver driver = { | |||
| 52 | .dma_quiescent = via_driver_dma_quiescent, | 52 | .dma_quiescent = via_driver_dma_quiescent, |
| 53 | .dri_library_name = dri_library_name, | 53 | .dri_library_name = dri_library_name, |
| 54 | .reclaim_buffers = drm_core_reclaim_buffers, | 54 | .reclaim_buffers = drm_core_reclaim_buffers, |
| 55 | .reclaim_buffers_locked = via_reclaim_buffers_locked, | 55 | .reclaim_buffers_locked = NULL, |
| 56 | .reclaim_buffers_idlelocked = via_reclaim_buffers_locked, | ||
| 56 | .lastclose = via_lastclose, | 57 | .lastclose = via_lastclose, |
| 57 | .get_map_ofs = drm_core_get_map_ofs, | 58 | .get_map_ofs = drm_core_get_map_ofs, |
| 58 | .get_reg_ofs = drm_core_get_reg_ofs, | 59 | .get_reg_ofs = drm_core_get_reg_ofs, |
diff --git a/drivers/char/drm/via_mm.h b/drivers/char/drm/via_mm.h deleted file mode 100644 index d57efda57c76..000000000000 --- a/drivers/char/drm/via_mm.h +++ /dev/null | |||
| @@ -1,40 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. | ||
| 3 | * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. | ||
| 4 | * | ||
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 6 | * copy of this software and associated documentation files (the "Software"), | ||
| 7 | * to deal in the Software without restriction, including without limitation | ||
| 8 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | ||
| 9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 10 | * Software is furnished to do so, subject to the following conditions: | ||
| 11 | * | ||
| 12 | * The above copyright notice and this permission notice (including the | ||
| 13 | * next paragraph) shall be included in all copies or substantial portions | ||
| 14 | * of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 22 | * DEALINGS IN THE SOFTWARE. | ||
| 23 | */ | ||
| 24 | #ifndef _via_drm_mm_h_ | ||
| 25 | #define _via_drm_mm_h_ | ||
| 26 | |||
| 27 | typedef struct { | ||
| 28 | unsigned int context; | ||
| 29 | unsigned int size; | ||
| 30 | unsigned long offset; | ||
| 31 | unsigned long free; | ||
| 32 | } drm_via_mm_t; | ||
| 33 | |||
| 34 | typedef struct { | ||
| 35 | unsigned int size; | ||
| 36 | unsigned long handle; | ||
| 37 | void *virtual; | ||
| 38 | } drm_via_dma_t; | ||
| 39 | |||
| 40 | #endif | ||
