diff options
Diffstat (limited to 'drivers/gpu')
35 files changed, 347 insertions, 215 deletions
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c index 55d03ed05000..529a0dbe9fc6 100644 --- a/drivers/gpu/drm/drm_buffer.c +++ b/drivers/gpu/drm/drm_buffer.c | |||
@@ -98,8 +98,8 @@ EXPORT_SYMBOL(drm_buffer_alloc); | |||
98 | * user_data: A pointer the data that is copied to the buffer. | 98 | * user_data: A pointer the data that is copied to the buffer. |
99 | * size: The Number of bytes to copy. | 99 | * size: The Number of bytes to copy. |
100 | */ | 100 | */ |
101 | extern int drm_buffer_copy_from_user(struct drm_buffer *buf, | 101 | int drm_buffer_copy_from_user(struct drm_buffer *buf, |
102 | void __user *user_data, int size) | 102 | void __user *user_data, int size) |
103 | { | 103 | { |
104 | int nr_pages = size / PAGE_SIZE + 1; | 104 | int nr_pages = size / PAGE_SIZE + 1; |
105 | int idx; | 105 | int idx; |
@@ -163,7 +163,7 @@ void *drm_buffer_read_object(struct drm_buffer *buf, | |||
163 | { | 163 | { |
164 | int idx = drm_buffer_index(buf); | 164 | int idx = drm_buffer_index(buf); |
165 | int page = drm_buffer_page(buf); | 165 | int page = drm_buffer_page(buf); |
166 | void *obj = 0; | 166 | void *obj = NULL; |
167 | 167 | ||
168 | if (idx + objsize <= PAGE_SIZE) { | 168 | if (idx + objsize <= PAGE_SIZE) { |
169 | obj = &buf->data[page][idx]; | 169 | obj = &buf->data[page][idx]; |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index bf92d07510df..5663d2719063 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -148,7 +148,7 @@ int drm_gem_object_init(struct drm_device *dev, | |||
148 | return -ENOMEM; | 148 | return -ENOMEM; |
149 | 149 | ||
150 | kref_init(&obj->refcount); | 150 | kref_init(&obj->refcount); |
151 | kref_init(&obj->handlecount); | 151 | atomic_set(&obj->handle_count, 0); |
152 | obj->size = size; | 152 | obj->size = size; |
153 | 153 | ||
154 | atomic_inc(&dev->object_count); | 154 | atomic_inc(&dev->object_count); |
@@ -462,28 +462,6 @@ drm_gem_object_free(struct kref *kref) | |||
462 | } | 462 | } |
463 | EXPORT_SYMBOL(drm_gem_object_free); | 463 | EXPORT_SYMBOL(drm_gem_object_free); |
464 | 464 | ||
465 | /** | ||
466 | * Called after the last reference to the object has been lost. | ||
467 | * Must be called without holding struct_mutex | ||
468 | * | ||
469 | * Frees the object | ||
470 | */ | ||
471 | void | ||
472 | drm_gem_object_free_unlocked(struct kref *kref) | ||
473 | { | ||
474 | struct drm_gem_object *obj = (struct drm_gem_object *) kref; | ||
475 | struct drm_device *dev = obj->dev; | ||
476 | |||
477 | if (dev->driver->gem_free_object_unlocked != NULL) | ||
478 | dev->driver->gem_free_object_unlocked(obj); | ||
479 | else if (dev->driver->gem_free_object != NULL) { | ||
480 | mutex_lock(&dev->struct_mutex); | ||
481 | dev->driver->gem_free_object(obj); | ||
482 | mutex_unlock(&dev->struct_mutex); | ||
483 | } | ||
484 | } | ||
485 | EXPORT_SYMBOL(drm_gem_object_free_unlocked); | ||
486 | |||
487 | static void drm_gem_object_ref_bug(struct kref *list_kref) | 465 | static void drm_gem_object_ref_bug(struct kref *list_kref) |
488 | { | 466 | { |
489 | BUG(); | 467 | BUG(); |
@@ -496,12 +474,8 @@ static void drm_gem_object_ref_bug(struct kref *list_kref) | |||
496 | * called before drm_gem_object_free or we'll be touching | 474 | * called before drm_gem_object_free or we'll be touching |
497 | * freed memory | 475 | * freed memory |
498 | */ | 476 | */ |
499 | void | 477 | void drm_gem_object_handle_free(struct drm_gem_object *obj) |
500 | drm_gem_object_handle_free(struct kref *kref) | ||
501 | { | 478 | { |
502 | struct drm_gem_object *obj = container_of(kref, | ||
503 | struct drm_gem_object, | ||
504 | handlecount); | ||
505 | struct drm_device *dev = obj->dev; | 479 | struct drm_device *dev = obj->dev; |
506 | 480 | ||
507 | /* Remove any name for this object */ | 481 | /* Remove any name for this object */ |
@@ -528,6 +502,10 @@ void drm_gem_vm_open(struct vm_area_struct *vma) | |||
528 | struct drm_gem_object *obj = vma->vm_private_data; | 502 | struct drm_gem_object *obj = vma->vm_private_data; |
529 | 503 | ||
530 | drm_gem_object_reference(obj); | 504 | drm_gem_object_reference(obj); |
505 | |||
506 | mutex_lock(&obj->dev->struct_mutex); | ||
507 | drm_vm_open_locked(vma); | ||
508 | mutex_unlock(&obj->dev->struct_mutex); | ||
531 | } | 509 | } |
532 | EXPORT_SYMBOL(drm_gem_vm_open); | 510 | EXPORT_SYMBOL(drm_gem_vm_open); |
533 | 511 | ||
@@ -535,7 +513,10 @@ void drm_gem_vm_close(struct vm_area_struct *vma) | |||
535 | { | 513 | { |
536 | struct drm_gem_object *obj = vma->vm_private_data; | 514 | struct drm_gem_object *obj = vma->vm_private_data; |
537 | 515 | ||
538 | drm_gem_object_unreference_unlocked(obj); | 516 | mutex_lock(&obj->dev->struct_mutex); |
517 | drm_vm_close_locked(vma); | ||
518 | drm_gem_object_unreference(obj); | ||
519 | mutex_unlock(&obj->dev->struct_mutex); | ||
539 | } | 520 | } |
540 | EXPORT_SYMBOL(drm_gem_vm_close); | 521 | EXPORT_SYMBOL(drm_gem_vm_close); |
541 | 522 | ||
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 2ef2c7827243..974e970ce3f8 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c | |||
@@ -255,7 +255,7 @@ int drm_gem_one_name_info(int id, void *ptr, void *data) | |||
255 | 255 | ||
256 | seq_printf(m, "%6d %8zd %7d %8d\n", | 256 | seq_printf(m, "%6d %8zd %7d %8d\n", |
257 | obj->name, obj->size, | 257 | obj->name, obj->size, |
258 | atomic_read(&obj->handlecount.refcount), | 258 | atomic_read(&obj->handle_count), |
259 | atomic_read(&obj->refcount.refcount)); | 259 | atomic_read(&obj->refcount.refcount)); |
260 | return 0; | 260 | return 0; |
261 | } | 261 | } |
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index fda67468e603..5df450683aab 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c | |||
@@ -433,15 +433,7 @@ static void drm_vm_open(struct vm_area_struct *vma) | |||
433 | mutex_unlock(&dev->struct_mutex); | 433 | mutex_unlock(&dev->struct_mutex); |
434 | } | 434 | } |
435 | 435 | ||
436 | /** | 436 | void drm_vm_close_locked(struct vm_area_struct *vma) |
437 | * \c close method for all virtual memory types. | ||
438 | * | ||
439 | * \param vma virtual memory area. | ||
440 | * | ||
441 | * Search the \p vma private data entry in drm_device::vmalist, unlink it, and | ||
442 | * free it. | ||
443 | */ | ||
444 | static void drm_vm_close(struct vm_area_struct *vma) | ||
445 | { | 437 | { |
446 | struct drm_file *priv = vma->vm_file->private_data; | 438 | struct drm_file *priv = vma->vm_file->private_data; |
447 | struct drm_device *dev = priv->minor->dev; | 439 | struct drm_device *dev = priv->minor->dev; |
@@ -451,7 +443,6 @@ static void drm_vm_close(struct vm_area_struct *vma) | |||
451 | vma->vm_start, vma->vm_end - vma->vm_start); | 443 | vma->vm_start, vma->vm_end - vma->vm_start); |
452 | atomic_dec(&dev->vma_count); | 444 | atomic_dec(&dev->vma_count); |
453 | 445 | ||
454 | mutex_lock(&dev->struct_mutex); | ||
455 | list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { | 446 | list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { |
456 | if (pt->vma == vma) { | 447 | if (pt->vma == vma) { |
457 | list_del(&pt->head); | 448 | list_del(&pt->head); |
@@ -459,6 +450,23 @@ static void drm_vm_close(struct vm_area_struct *vma) | |||
459 | break; | 450 | break; |
460 | } | 451 | } |
461 | } | 452 | } |
453 | } | ||
454 | |||
455 | /** | ||
456 | * \c close method for all virtual memory types. | ||
457 | * | ||
458 | * \param vma virtual memory area. | ||
459 | * | ||
460 | * Search the \p vma private data entry in drm_device::vmalist, unlink it, and | ||
461 | * free it. | ||
462 | */ | ||
463 | static void drm_vm_close(struct vm_area_struct *vma) | ||
464 | { | ||
465 | struct drm_file *priv = vma->vm_file->private_data; | ||
466 | struct drm_device *dev = priv->minor->dev; | ||
467 | |||
468 | mutex_lock(&dev->struct_mutex); | ||
469 | drm_vm_close_locked(vma); | ||
462 | mutex_unlock(&dev->struct_mutex); | 470 | mutex_unlock(&dev->struct_mutex); |
463 | } | 471 | } |
464 | 472 | ||
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 61b4caf220fa..fb07e73581e8 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c | |||
@@ -116,7 +116,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) | |||
116 | static const struct file_operations i810_buffer_fops = { | 116 | static const struct file_operations i810_buffer_fops = { |
117 | .open = drm_open, | 117 | .open = drm_open, |
118 | .release = drm_release, | 118 | .release = drm_release, |
119 | .unlocked_ioctl = drm_ioctl, | 119 | .unlocked_ioctl = i810_ioctl, |
120 | .mmap = i810_mmap_buffers, | 120 | .mmap = i810_mmap_buffers, |
121 | .fasync = drm_fasync, | 121 | .fasync = drm_fasync, |
122 | }; | 122 | }; |
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c index 671aa18415ac..cc92c7e6236f 100644 --- a/drivers/gpu/drm/i830/i830_dma.c +++ b/drivers/gpu/drm/i830/i830_dma.c | |||
@@ -118,7 +118,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma) | |||
118 | static const struct file_operations i830_buffer_fops = { | 118 | static const struct file_operations i830_buffer_fops = { |
119 | .open = drm_open, | 119 | .open = drm_open, |
120 | .release = drm_release, | 120 | .release = drm_release, |
121 | .unlocked_ioctl = drm_ioctl, | 121 | .unlocked_ioctl = i830_ioctl, |
122 | .mmap = i830_mmap_buffers, | 122 | .mmap = i830_mmap_buffers, |
123 | .fasync = drm_fasync, | 123 | .fasync = drm_fasync, |
124 | }; | 124 | }; |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 9d67b4853030..c74e4e8006d4 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1787,9 +1787,9 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) | |||
1787 | } | 1787 | } |
1788 | } | 1788 | } |
1789 | 1789 | ||
1790 | div_u64(diff, diff1); | 1790 | diff = div_u64(diff, diff1); |
1791 | ret = ((m * diff) + c); | 1791 | ret = ((m * diff) + c); |
1792 | div_u64(ret, 10); | 1792 | ret = div_u64(ret, 10); |
1793 | 1793 | ||
1794 | dev_priv->last_count1 = total_count; | 1794 | dev_priv->last_count1 = total_count; |
1795 | dev_priv->last_time1 = now; | 1795 | dev_priv->last_time1 = now; |
@@ -1858,7 +1858,7 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv) | |||
1858 | 1858 | ||
1859 | /* More magic constants... */ | 1859 | /* More magic constants... */ |
1860 | diff = diff * 1181; | 1860 | diff = diff * 1181; |
1861 | div_u64(diff, diffms * 10); | 1861 | diff = div_u64(diff, diffms * 10); |
1862 | dev_priv->gfx_power = diff; | 1862 | dev_priv->gfx_power = diff; |
1863 | } | 1863 | } |
1864 | 1864 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index cf4ffbee1c00..90b1d6753b9d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -136,14 +136,12 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, | |||
136 | return -ENOMEM; | 136 | return -ENOMEM; |
137 | 137 | ||
138 | ret = drm_gem_handle_create(file_priv, obj, &handle); | 138 | ret = drm_gem_handle_create(file_priv, obj, &handle); |
139 | /* drop reference from allocate - handle holds it now */ | ||
140 | drm_gem_object_unreference_unlocked(obj); | ||
139 | if (ret) { | 141 | if (ret) { |
140 | drm_gem_object_unreference_unlocked(obj); | ||
141 | return ret; | 142 | return ret; |
142 | } | 143 | } |
143 | 144 | ||
144 | /* Sink the floating reference from kref_init(handlecount) */ | ||
145 | drm_gem_object_handle_unreference_unlocked(obj); | ||
146 | |||
147 | args->handle = handle; | 145 | args->handle = handle; |
148 | return 0; | 146 | return 0; |
149 | } | 147 | } |
@@ -471,14 +469,17 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
471 | return -ENOENT; | 469 | return -ENOENT; |
472 | obj_priv = to_intel_bo(obj); | 470 | obj_priv = to_intel_bo(obj); |
473 | 471 | ||
474 | /* Bounds check source. | 472 | /* Bounds check source. */ |
475 | * | 473 | if (args->offset > obj->size || args->size > obj->size - args->offset) { |
476 | * XXX: This could use review for overflow issues... | 474 | ret = -EINVAL; |
477 | */ | 475 | goto err; |
478 | if (args->offset > obj->size || args->size > obj->size || | 476 | } |
479 | args->offset + args->size > obj->size) { | 477 | |
480 | drm_gem_object_unreference_unlocked(obj); | 478 | if (!access_ok(VERIFY_WRITE, |
481 | return -EINVAL; | 479 | (char __user *)(uintptr_t)args->data_ptr, |
480 | args->size)) { | ||
481 | ret = -EFAULT; | ||
482 | goto err; | ||
482 | } | 483 | } |
483 | 484 | ||
484 | if (i915_gem_object_needs_bit17_swizzle(obj)) { | 485 | if (i915_gem_object_needs_bit17_swizzle(obj)) { |
@@ -490,8 +491,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
490 | file_priv); | 491 | file_priv); |
491 | } | 492 | } |
492 | 493 | ||
494 | err: | ||
493 | drm_gem_object_unreference_unlocked(obj); | 495 | drm_gem_object_unreference_unlocked(obj); |
494 | |||
495 | return ret; | 496 | return ret; |
496 | } | 497 | } |
497 | 498 | ||
@@ -580,8 +581,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
580 | 581 | ||
581 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 582 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
582 | remain = args->size; | 583 | remain = args->size; |
583 | if (!access_ok(VERIFY_READ, user_data, remain)) | ||
584 | return -EFAULT; | ||
585 | 584 | ||
586 | 585 | ||
587 | mutex_lock(&dev->struct_mutex); | 586 | mutex_lock(&dev->struct_mutex); |
@@ -934,14 +933,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
934 | return -ENOENT; | 933 | return -ENOENT; |
935 | obj_priv = to_intel_bo(obj); | 934 | obj_priv = to_intel_bo(obj); |
936 | 935 | ||
937 | /* Bounds check destination. | 936 | /* Bounds check destination. */ |
938 | * | 937 | if (args->offset > obj->size || args->size > obj->size - args->offset) { |
939 | * XXX: This could use review for overflow issues... | 938 | ret = -EINVAL; |
940 | */ | 939 | goto err; |
941 | if (args->offset > obj->size || args->size > obj->size || | 940 | } |
942 | args->offset + args->size > obj->size) { | 941 | |
943 | drm_gem_object_unreference_unlocked(obj); | 942 | if (!access_ok(VERIFY_READ, |
944 | return -EINVAL; | 943 | (char __user *)(uintptr_t)args->data_ptr, |
944 | args->size)) { | ||
945 | ret = -EFAULT; | ||
946 | goto err; | ||
945 | } | 947 | } |
946 | 948 | ||
947 | /* We can only do the GTT pwrite on untiled buffers, as otherwise | 949 | /* We can only do the GTT pwrite on untiled buffers, as otherwise |
@@ -975,8 +977,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
975 | DRM_INFO("pwrite failed %d\n", ret); | 977 | DRM_INFO("pwrite failed %d\n", ret); |
976 | #endif | 978 | #endif |
977 | 979 | ||
980 | err: | ||
978 | drm_gem_object_unreference_unlocked(obj); | 981 | drm_gem_object_unreference_unlocked(obj); |
979 | |||
980 | return ret; | 982 | return ret; |
981 | } | 983 | } |
982 | 984 | ||
@@ -2400,7 +2402,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) | |||
2400 | I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); | 2402 | I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); |
2401 | break; | 2403 | break; |
2402 | case 3: | 2404 | case 3: |
2403 | if (obj_priv->fence_reg > 8) | 2405 | if (obj_priv->fence_reg >= 8) |
2404 | fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4; | 2406 | fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4; |
2405 | else | 2407 | else |
2406 | case 2: | 2408 | case 2: |
@@ -3258,6 +3260,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3258 | (int) reloc->offset, | 3260 | (int) reloc->offset, |
3259 | reloc->read_domains, | 3261 | reloc->read_domains, |
3260 | reloc->write_domain); | 3262 | reloc->write_domain); |
3263 | drm_gem_object_unreference(target_obj); | ||
3264 | i915_gem_object_unpin(obj); | ||
3261 | return -EINVAL; | 3265 | return -EINVAL; |
3262 | } | 3266 | } |
3263 | if (reloc->write_domain & I915_GEM_DOMAIN_CPU || | 3267 | if (reloc->write_domain & I915_GEM_DOMAIN_CPU || |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index e85246ef691c..5c428fa3e0b3 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -93,7 +93,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen | |||
93 | { | 93 | { |
94 | drm_i915_private_t *dev_priv = dev->dev_private; | 94 | drm_i915_private_t *dev_priv = dev->dev_private; |
95 | struct list_head eviction_list, unwind_list; | 95 | struct list_head eviction_list, unwind_list; |
96 | struct drm_i915_gem_object *obj_priv, *tmp_obj_priv; | 96 | struct drm_i915_gem_object *obj_priv; |
97 | struct list_head *render_iter, *bsd_iter; | 97 | struct list_head *render_iter, *bsd_iter; |
98 | int ret = 0; | 98 | int ret = 0; |
99 | 99 | ||
@@ -175,39 +175,34 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen | |||
175 | return -ENOSPC; | 175 | return -ENOSPC; |
176 | 176 | ||
177 | found: | 177 | found: |
178 | /* drm_mm doesn't allow any other other operations while | ||
179 | * scanning, therefore store to be evicted objects on a | ||
180 | * temporary list. */ | ||
178 | INIT_LIST_HEAD(&eviction_list); | 181 | INIT_LIST_HEAD(&eviction_list); |
179 | list_for_each_entry_safe(obj_priv, tmp_obj_priv, | 182 | while (!list_empty(&unwind_list)) { |
180 | &unwind_list, evict_list) { | 183 | obj_priv = list_first_entry(&unwind_list, |
184 | struct drm_i915_gem_object, | ||
185 | evict_list); | ||
181 | if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { | 186 | if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { |
182 | /* drm_mm doesn't allow any other other operations while | ||
183 | * scanning, therefore store to be evicted objects on a | ||
184 | * temporary list. */ | ||
185 | list_move(&obj_priv->evict_list, &eviction_list); | 187 | list_move(&obj_priv->evict_list, &eviction_list); |
186 | } else | 188 | continue; |
187 | drm_gem_object_unreference(&obj_priv->base); | 189 | } |
190 | list_del(&obj_priv->evict_list); | ||
191 | drm_gem_object_unreference(&obj_priv->base); | ||
188 | } | 192 | } |
189 | 193 | ||
190 | /* Unbinding will emit any required flushes */ | 194 | /* Unbinding will emit any required flushes */ |
191 | list_for_each_entry_safe(obj_priv, tmp_obj_priv, | 195 | while (!list_empty(&eviction_list)) { |
192 | &eviction_list, evict_list) { | 196 | obj_priv = list_first_entry(&eviction_list, |
193 | #if WATCH_LRU | 197 | struct drm_i915_gem_object, |
194 | DRM_INFO("%s: evicting %p\n", __func__, &obj_priv->base); | 198 | evict_list); |
195 | #endif | 199 | if (ret == 0) |
196 | ret = i915_gem_object_unbind(&obj_priv->base); | 200 | ret = i915_gem_object_unbind(&obj_priv->base); |
197 | if (ret) | 201 | list_del(&obj_priv->evict_list); |
198 | return ret; | ||
199 | |||
200 | drm_gem_object_unreference(&obj_priv->base); | 202 | drm_gem_object_unreference(&obj_priv->base); |
201 | } | 203 | } |
202 | 204 | ||
203 | /* The just created free hole should be on the top of the free stack | 205 | return ret; |
204 | * maintained by drm_mm, so this BUG_ON actually executes in O(1). | ||
205 | * Furthermore all accessed data has just recently been used, so it | ||
206 | * should be really fast, too. */ | ||
207 | BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size, | ||
208 | alignment, 0)); | ||
209 | |||
210 | return 0; | ||
211 | } | 206 | } |
212 | 207 | ||
213 | int | 208 | int |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b5bf51a4502d..979228594599 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1013,8 +1013,8 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) | |||
1013 | DRM_DEBUG_KMS("vblank wait timed out\n"); | 1013 | DRM_DEBUG_KMS("vblank wait timed out\n"); |
1014 | } | 1014 | } |
1015 | 1015 | ||
1016 | /** | 1016 | /* |
1017 | * intel_wait_for_vblank_off - wait for vblank after disabling a pipe | 1017 | * intel_wait_for_pipe_off - wait for pipe to turn off |
1018 | * @dev: drm device | 1018 | * @dev: drm device |
1019 | * @pipe: pipe to wait for | 1019 | * @pipe: pipe to wait for |
1020 | * | 1020 | * |
@@ -1022,25 +1022,39 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) | |||
1022 | * spinning on the vblank interrupt status bit, since we won't actually | 1022 | * spinning on the vblank interrupt status bit, since we won't actually |
1023 | * see an interrupt when the pipe is disabled. | 1023 | * see an interrupt when the pipe is disabled. |
1024 | * | 1024 | * |
1025 | * So this function waits for the display line value to settle (it | 1025 | * On Gen4 and above: |
1026 | * usually ends up stopping at the start of the next frame). | 1026 | * wait for the pipe register state bit to turn off |
1027 | * | ||
1028 | * Otherwise: | ||
1029 | * wait for the display line value to settle (it usually | ||
1030 | * ends up stopping at the start of the next frame). | ||
1031 | * | ||
1027 | */ | 1032 | */ |
1028 | void intel_wait_for_vblank_off(struct drm_device *dev, int pipe) | 1033 | static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) |
1029 | { | 1034 | { |
1030 | struct drm_i915_private *dev_priv = dev->dev_private; | 1035 | struct drm_i915_private *dev_priv = dev->dev_private; |
1031 | int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); | 1036 | |
1032 | unsigned long timeout = jiffies + msecs_to_jiffies(100); | 1037 | if (INTEL_INFO(dev)->gen >= 4) { |
1033 | u32 last_line; | 1038 | int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF); |
1034 | 1039 | ||
1035 | /* Wait for the display line to settle */ | 1040 | /* Wait for the Pipe State to go off */ |
1036 | do { | 1041 | if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, |
1037 | last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; | 1042 | 100, 0)) |
1038 | mdelay(5); | 1043 | DRM_DEBUG_KMS("pipe_off wait timed out\n"); |
1039 | } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && | 1044 | } else { |
1040 | time_after(timeout, jiffies)); | 1045 | u32 last_line; |
1041 | 1046 | int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); | |
1042 | if (time_after(jiffies, timeout)) | 1047 | unsigned long timeout = jiffies + msecs_to_jiffies(100); |
1043 | DRM_DEBUG_KMS("vblank wait timed out\n"); | 1048 | |
1049 | /* Wait for the display line to settle */ | ||
1050 | do { | ||
1051 | last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; | ||
1052 | mdelay(5); | ||
1053 | } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && | ||
1054 | time_after(timeout, jiffies)); | ||
1055 | if (time_after(jiffies, timeout)) | ||
1056 | DRM_DEBUG_KMS("pipe_off wait timed out\n"); | ||
1057 | } | ||
1044 | } | 1058 | } |
1045 | 1059 | ||
1046 | /* Parameters have changed, update FBC info */ | 1060 | /* Parameters have changed, update FBC info */ |
@@ -2328,13 +2342,13 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2328 | I915_READ(dspbase_reg); | 2342 | I915_READ(dspbase_reg); |
2329 | } | 2343 | } |
2330 | 2344 | ||
2331 | /* Wait for vblank for the disable to take effect */ | ||
2332 | intel_wait_for_vblank_off(dev, pipe); | ||
2333 | |||
2334 | /* Don't disable pipe A or pipe A PLLs if needed */ | 2345 | /* Don't disable pipe A or pipe A PLLs if needed */ |
2335 | if (pipeconf_reg == PIPEACONF && | 2346 | if (pipeconf_reg == PIPEACONF && |
2336 | (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | 2347 | (dev_priv->quirks & QUIRK_PIPEA_FORCE)) { |
2348 | /* Wait for vblank for the disable to take effect */ | ||
2349 | intel_wait_for_vblank(dev, pipe); | ||
2337 | goto skip_pipe_off; | 2350 | goto skip_pipe_off; |
2351 | } | ||
2338 | 2352 | ||
2339 | /* Next, disable display pipes */ | 2353 | /* Next, disable display pipes */ |
2340 | temp = I915_READ(pipeconf_reg); | 2354 | temp = I915_READ(pipeconf_reg); |
@@ -2343,8 +2357,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2343 | I915_READ(pipeconf_reg); | 2357 | I915_READ(pipeconf_reg); |
2344 | } | 2358 | } |
2345 | 2359 | ||
2346 | /* Wait for vblank for the disable to take effect. */ | 2360 | /* Wait for the pipe to turn off */ |
2347 | intel_wait_for_vblank_off(dev, pipe); | 2361 | intel_wait_for_pipe_off(dev, pipe); |
2348 | 2362 | ||
2349 | temp = I915_READ(dpll_reg); | 2363 | temp = I915_READ(dpll_reg); |
2350 | if ((temp & DPLL_VCO_ENABLE) != 0) { | 2364 | if ((temp & DPLL_VCO_ENABLE) != 0) { |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 1a51ee07de3e..9ab8708ac6ba 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -1138,18 +1138,14 @@ static bool | |||
1138 | intel_dp_set_link_train(struct intel_dp *intel_dp, | 1138 | intel_dp_set_link_train(struct intel_dp *intel_dp, |
1139 | uint32_t dp_reg_value, | 1139 | uint32_t dp_reg_value, |
1140 | uint8_t dp_train_pat, | 1140 | uint8_t dp_train_pat, |
1141 | uint8_t train_set[4], | 1141 | uint8_t train_set[4]) |
1142 | bool first) | ||
1143 | { | 1142 | { |
1144 | struct drm_device *dev = intel_dp->base.enc.dev; | 1143 | struct drm_device *dev = intel_dp->base.enc.dev; |
1145 | struct drm_i915_private *dev_priv = dev->dev_private; | 1144 | struct drm_i915_private *dev_priv = dev->dev_private; |
1146 | struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc); | ||
1147 | int ret; | 1145 | int ret; |
1148 | 1146 | ||
1149 | I915_WRITE(intel_dp->output_reg, dp_reg_value); | 1147 | I915_WRITE(intel_dp->output_reg, dp_reg_value); |
1150 | POSTING_READ(intel_dp->output_reg); | 1148 | POSTING_READ(intel_dp->output_reg); |
1151 | if (first) | ||
1152 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
1153 | 1149 | ||
1154 | intel_dp_aux_native_write_1(intel_dp, | 1150 | intel_dp_aux_native_write_1(intel_dp, |
1155 | DP_TRAINING_PATTERN_SET, | 1151 | DP_TRAINING_PATTERN_SET, |
@@ -1174,10 +1170,15 @@ intel_dp_link_train(struct intel_dp *intel_dp) | |||
1174 | uint8_t voltage; | 1170 | uint8_t voltage; |
1175 | bool clock_recovery = false; | 1171 | bool clock_recovery = false; |
1176 | bool channel_eq = false; | 1172 | bool channel_eq = false; |
1177 | bool first = true; | ||
1178 | int tries; | 1173 | int tries; |
1179 | u32 reg; | 1174 | u32 reg; |
1180 | uint32_t DP = intel_dp->DP; | 1175 | uint32_t DP = intel_dp->DP; |
1176 | struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc); | ||
1177 | |||
1178 | /* Enable output, wait for it to become active */ | ||
1179 | I915_WRITE(intel_dp->output_reg, intel_dp->DP); | ||
1180 | POSTING_READ(intel_dp->output_reg); | ||
1181 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
1181 | 1182 | ||
1182 | /* Write the link configuration data */ | 1183 | /* Write the link configuration data */ |
1183 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, | 1184 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, |
@@ -1210,9 +1211,8 @@ intel_dp_link_train(struct intel_dp *intel_dp) | |||
1210 | reg = DP | DP_LINK_TRAIN_PAT_1; | 1211 | reg = DP | DP_LINK_TRAIN_PAT_1; |
1211 | 1212 | ||
1212 | if (!intel_dp_set_link_train(intel_dp, reg, | 1213 | if (!intel_dp_set_link_train(intel_dp, reg, |
1213 | DP_TRAINING_PATTERN_1, train_set, first)) | 1214 | DP_TRAINING_PATTERN_1, train_set)) |
1214 | break; | 1215 | break; |
1215 | first = false; | ||
1216 | /* Set training pattern 1 */ | 1216 | /* Set training pattern 1 */ |
1217 | 1217 | ||
1218 | udelay(100); | 1218 | udelay(100); |
@@ -1266,8 +1266,7 @@ intel_dp_link_train(struct intel_dp *intel_dp) | |||
1266 | 1266 | ||
1267 | /* channel eq pattern */ | 1267 | /* channel eq pattern */ |
1268 | if (!intel_dp_set_link_train(intel_dp, reg, | 1268 | if (!intel_dp_set_link_train(intel_dp, reg, |
1269 | DP_TRAINING_PATTERN_2, train_set, | 1269 | DP_TRAINING_PATTERN_2, train_set)) |
1270 | false)) | ||
1271 | break; | 1270 | break; |
1272 | 1271 | ||
1273 | udelay(400); | 1272 | udelay(400); |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index ad312ca6b3e5..8828b3ac6414 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -229,7 +229,6 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
229 | struct drm_crtc *crtc); | 229 | struct drm_crtc *crtc); |
230 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | 230 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
231 | struct drm_file *file_priv); | 231 | struct drm_file *file_priv); |
232 | extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe); | ||
233 | extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); | 232 | extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); |
234 | extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); | 233 | extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); |
235 | extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | 234 | extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 7bdc96256bf5..56ad9df2ccb5 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -237,8 +237,10 @@ int intel_fbdev_destroy(struct drm_device *dev, | |||
237 | drm_fb_helper_fini(&ifbdev->helper); | 237 | drm_fb_helper_fini(&ifbdev->helper); |
238 | 238 | ||
239 | drm_framebuffer_cleanup(&ifb->base); | 239 | drm_framebuffer_cleanup(&ifb->base); |
240 | if (ifb->obj) | 240 | if (ifb->obj) { |
241 | drm_gem_object_handle_unreference(ifb->obj); | ||
241 | drm_gem_object_unreference(ifb->obj); | 242 | drm_gem_object_unreference(ifb->obj); |
243 | } | ||
242 | 244 | ||
243 | return 0; | 245 | return 0; |
244 | } | 246 | } |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index e8e902d614ed..ee73e428a84a 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -2170,8 +2170,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) | |||
2170 | return true; | 2170 | return true; |
2171 | 2171 | ||
2172 | err: | 2172 | err: |
2173 | intel_sdvo_destroy_enhance_property(connector); | 2173 | intel_sdvo_destroy(connector); |
2174 | kfree(intel_sdvo_connector); | ||
2175 | return false; | 2174 | return false; |
2176 | } | 2175 | } |
2177 | 2176 | ||
@@ -2243,8 +2242,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) | |||
2243 | return true; | 2242 | return true; |
2244 | 2243 | ||
2245 | err: | 2244 | err: |
2246 | intel_sdvo_destroy_enhance_property(connector); | 2245 | intel_sdvo_destroy(connector); |
2247 | kfree(intel_sdvo_connector); | ||
2248 | return false; | 2246 | return false; |
2249 | } | 2247 | } |
2250 | 2248 | ||
@@ -2522,11 +2520,10 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, | |||
2522 | uint16_t response; | 2520 | uint16_t response; |
2523 | } enhancements; | 2521 | } enhancements; |
2524 | 2522 | ||
2525 | if (!intel_sdvo_get_value(intel_sdvo, | 2523 | enhancements.response = 0; |
2526 | SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, | 2524 | intel_sdvo_get_value(intel_sdvo, |
2527 | &enhancements, sizeof(enhancements))) | 2525 | SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, |
2528 | return false; | 2526 | &enhancements, sizeof(enhancements)); |
2529 | |||
2530 | if (enhancements.response == 0) { | 2527 | if (enhancements.response == 0) { |
2531 | DRM_DEBUG_KMS("No enhancement is supported\n"); | 2528 | DRM_DEBUG_KMS("No enhancement is supported\n"); |
2532 | return true; | 2529 | return true; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 87186a4bbf03..fc737037f751 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -558,8 +558,10 @@ nouveau_connector_get_modes(struct drm_connector *connector) | |||
558 | if (nv_encoder->dcb->type == OUTPUT_LVDS && | 558 | if (nv_encoder->dcb->type == OUTPUT_LVDS && |
559 | (nv_encoder->dcb->lvdsconf.use_straps_for_mode || | 559 | (nv_encoder->dcb->lvdsconf.use_straps_for_mode || |
560 | dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) { | 560 | dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) { |
561 | nv_connector->native_mode = drm_mode_create(dev); | 561 | struct drm_display_mode mode; |
562 | nouveau_bios_fp_mode(dev, nv_connector->native_mode); | 562 | |
563 | nouveau_bios_fp_mode(dev, &mode); | ||
564 | nv_connector->native_mode = drm_mode_duplicate(dev, &mode); | ||
563 | } | 565 | } |
564 | 566 | ||
565 | /* Find the native mode if this is a digital panel, if we didn't | 567 | /* Find the native mode if this is a digital panel, if we didn't |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index dbd30b2e43fd..d2047713dc59 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -352,6 +352,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev) | |||
352 | 352 | ||
353 | if (nouveau_fb->nvbo) { | 353 | if (nouveau_fb->nvbo) { |
354 | nouveau_bo_unmap(nouveau_fb->nvbo); | 354 | nouveau_bo_unmap(nouveau_fb->nvbo); |
355 | drm_gem_object_handle_unreference_unlocked(nouveau_fb->nvbo->gem); | ||
355 | drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); | 356 | drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); |
356 | nouveau_fb->nvbo = NULL; | 357 | nouveau_fb->nvbo = NULL; |
357 | } | 358 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index ead7b8fc53fc..19620a6709f5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -167,11 +167,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |||
167 | goto out; | 167 | goto out; |
168 | 168 | ||
169 | ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); | 169 | ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); |
170 | /* drop reference from allocate - handle holds it now */ | ||
171 | drm_gem_object_unreference_unlocked(nvbo->gem); | ||
170 | out: | 172 | out: |
171 | drm_gem_object_handle_unreference_unlocked(nvbo->gem); | ||
172 | |||
173 | if (ret) | ||
174 | drm_gem_object_unreference_unlocked(nvbo->gem); | ||
175 | return ret; | 173 | return ret; |
176 | } | 174 | } |
177 | 175 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 3ec181ff50ce..3c9964a8fbad 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c | |||
@@ -79,6 +79,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan) | |||
79 | mutex_lock(&dev->struct_mutex); | 79 | mutex_lock(&dev->struct_mutex); |
80 | nouveau_bo_unpin(chan->notifier_bo); | 80 | nouveau_bo_unpin(chan->notifier_bo); |
81 | mutex_unlock(&dev->struct_mutex); | 81 | mutex_unlock(&dev->struct_mutex); |
82 | drm_gem_object_handle_unreference_unlocked(chan->notifier_bo->gem); | ||
82 | drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); | 83 | drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); |
83 | drm_mm_takedown(&chan->notifier_heap); | 84 | drm_mm_takedown(&chan->notifier_heap); |
84 | } | 85 | } |
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index 1bc72c3190a9..fe359a239df3 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h | |||
@@ -4999,7 +4999,7 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS | |||
4999 | #define SW_I2C_CNTL_WRITE1BIT 6 | 4999 | #define SW_I2C_CNTL_WRITE1BIT 6 |
5000 | 5000 | ||
5001 | //==============================VESA definition Portion=============================== | 5001 | //==============================VESA definition Portion=============================== |
5002 | #define VESA_OEM_PRODUCT_REV '01.00' | 5002 | #define VESA_OEM_PRODUCT_REV "01.00" |
5003 | #define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support | 5003 | #define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support |
5004 | #define VESA_MODE_WIN_ATTRIBUTE 7 | 5004 | #define VESA_MODE_WIN_ATTRIBUTE 7 |
5005 | #define VESA_WIN_SIZE 64 | 5005 | #define VESA_WIN_SIZE 64 |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index afc18d87fdca..7a04959ba0ee 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -2729,7 +2729,7 @@ int r600_ib_test(struct radeon_device *rdev) | |||
2729 | if (i < rdev->usec_timeout) { | 2729 | if (i < rdev->usec_timeout) { |
2730 | DRM_INFO("ib test succeeded in %u usecs\n", i); | 2730 | DRM_INFO("ib test succeeded in %u usecs\n", i); |
2731 | } else { | 2731 | } else { |
2732 | DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n", | 2732 | DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", |
2733 | scratch, tmp); | 2733 | scratch, tmp); |
2734 | r = -EINVAL; | 2734 | r = -EINVAL; |
2735 | } | 2735 | } |
@@ -3528,7 +3528,8 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) | |||
3528 | /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read | 3528 | /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read |
3529 | * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL | 3529 | * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL |
3530 | */ | 3530 | */ |
3531 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { | 3531 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && |
3532 | rdev->vram_scratch.ptr) { | ||
3532 | void __iomem *ptr = (void *)rdev->vram_scratch.ptr; | 3533 | void __iomem *ptr = (void *)rdev->vram_scratch.ptr; |
3533 | u32 tmp; | 3534 | u32 tmp; |
3534 | 3535 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index ebae14c4b768..68932ba7b8a4 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -317,6 +317,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
317 | *connector_type = DRM_MODE_CONNECTOR_DVID; | 317 | *connector_type = DRM_MODE_CONNECTOR_DVID; |
318 | } | 318 | } |
319 | 319 | ||
320 | /* MSI K9A2GM V2/V3 board has no HDMI or DVI */ | ||
321 | if ((dev->pdev->device == 0x796e) && | ||
322 | (dev->pdev->subsystem_vendor == 0x1462) && | ||
323 | (dev->pdev->subsystem_device == 0x7302)) { | ||
324 | if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) || | ||
325 | (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) | ||
326 | return false; | ||
327 | } | ||
328 | |||
320 | /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ | 329 | /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ |
321 | if ((dev->pdev->device == 0x7941) && | 330 | if ((dev->pdev->device == 0x7941) && |
322 | (dev->pdev->subsystem_vendor == 0x147b) && | 331 | (dev->pdev->subsystem_vendor == 0x147b) && |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 127a395f70fb..b92d2f2fcbed 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -349,6 +349,8 @@ static void radeon_print_display_setup(struct drm_device *dev) | |||
349 | DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); | 349 | DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); |
350 | if (devices & ATOM_DEVICE_DFP5_SUPPORT) | 350 | if (devices & ATOM_DEVICE_DFP5_SUPPORT) |
351 | DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); | 351 | DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); |
352 | if (devices & ATOM_DEVICE_DFP6_SUPPORT) | ||
353 | DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]); | ||
352 | if (devices & ATOM_DEVICE_TV1_SUPPORT) | 354 | if (devices & ATOM_DEVICE_TV1_SUPPORT) |
353 | DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); | 355 | DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); |
354 | if (devices & ATOM_DEVICE_CV_SUPPORT) | 356 | if (devices & ATOM_DEVICE_CV_SUPPORT) |
@@ -841,8 +843,9 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) | |||
841 | { | 843 | { |
842 | struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); | 844 | struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); |
843 | 845 | ||
844 | if (radeon_fb->obj) | 846 | if (radeon_fb->obj) { |
845 | drm_gem_object_unreference_unlocked(radeon_fb->obj); | 847 | drm_gem_object_unreference_unlocked(radeon_fb->obj); |
848 | } | ||
846 | drm_framebuffer_cleanup(fb); | 849 | drm_framebuffer_cleanup(fb); |
847 | kfree(radeon_fb); | 850 | kfree(radeon_fb); |
848 | } | 851 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index c74a8b20d941..9cdf6a35bc2c 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -94,8 +94,10 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) | |||
94 | ret = radeon_bo_reserve(rbo, false); | 94 | ret = radeon_bo_reserve(rbo, false); |
95 | if (likely(ret == 0)) { | 95 | if (likely(ret == 0)) { |
96 | radeon_bo_kunmap(rbo); | 96 | radeon_bo_kunmap(rbo); |
97 | radeon_bo_unpin(rbo); | ||
97 | radeon_bo_unreserve(rbo); | 98 | radeon_bo_unreserve(rbo); |
98 | } | 99 | } |
100 | drm_gem_object_handle_unreference(gobj); | ||
99 | drm_gem_object_unreference_unlocked(gobj); | 101 | drm_gem_object_unreference_unlocked(gobj); |
100 | } | 102 | } |
101 | 103 | ||
@@ -325,8 +327,6 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb | |||
325 | { | 327 | { |
326 | struct fb_info *info; | 328 | struct fb_info *info; |
327 | struct radeon_framebuffer *rfb = &rfbdev->rfb; | 329 | struct radeon_framebuffer *rfb = &rfbdev->rfb; |
328 | struct radeon_bo *rbo; | ||
329 | int r; | ||
330 | 330 | ||
331 | if (rfbdev->helper.fbdev) { | 331 | if (rfbdev->helper.fbdev) { |
332 | info = rfbdev->helper.fbdev; | 332 | info = rfbdev->helper.fbdev; |
@@ -338,14 +338,8 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb | |||
338 | } | 338 | } |
339 | 339 | ||
340 | if (rfb->obj) { | 340 | if (rfb->obj) { |
341 | rbo = rfb->obj->driver_private; | 341 | radeonfb_destroy_pinned_object(rfb->obj); |
342 | r = radeon_bo_reserve(rbo, false); | 342 | rfb->obj = NULL; |
343 | if (likely(r == 0)) { | ||
344 | radeon_bo_kunmap(rbo); | ||
345 | radeon_bo_unpin(rbo); | ||
346 | radeon_bo_unreserve(rbo); | ||
347 | } | ||
348 | drm_gem_object_unreference_unlocked(rfb->obj); | ||
349 | } | 343 | } |
350 | drm_fb_helper_fini(&rfbdev->helper); | 344 | drm_fb_helper_fini(&rfbdev->helper); |
351 | drm_framebuffer_cleanup(&rfb->base); | 345 | drm_framebuffer_cleanup(&rfb->base); |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index c578f265b24c..d1e595d91723 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -201,11 +201,11 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, | |||
201 | return r; | 201 | return r; |
202 | } | 202 | } |
203 | r = drm_gem_handle_create(filp, gobj, &handle); | 203 | r = drm_gem_handle_create(filp, gobj, &handle); |
204 | /* drop reference from allocate - handle holds it now */ | ||
205 | drm_gem_object_unreference_unlocked(gobj); | ||
204 | if (r) { | 206 | if (r) { |
205 | drm_gem_object_unreference_unlocked(gobj); | ||
206 | return r; | 207 | return r; |
207 | } | 208 | } |
208 | drm_gem_object_handle_unreference_unlocked(gobj); | ||
209 | args->handle = handle; | 209 | args->handle = handle; |
210 | return 0; | 210 | return 0; |
211 | } | 211 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 5eee3c41d124..8fbbe1c6ebbd 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -203,6 +203,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
203 | */ | 203 | */ |
204 | int radeon_driver_firstopen_kms(struct drm_device *dev) | 204 | int radeon_driver_firstopen_kms(struct drm_device *dev) |
205 | { | 205 | { |
206 | struct radeon_device *rdev = dev->dev_private; | ||
207 | |||
208 | if (rdev->powered_down) | ||
209 | return -EINVAL; | ||
206 | return 0; | 210 | return 0; |
207 | } | 211 | } |
208 | 212 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 7cffb3e04232..3451a82adba7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -351,6 +351,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | |||
351 | INIT_LIST_HEAD(&fbo->lru); | 351 | INIT_LIST_HEAD(&fbo->lru); |
352 | INIT_LIST_HEAD(&fbo->swap); | 352 | INIT_LIST_HEAD(&fbo->swap); |
353 | fbo->vm_node = NULL; | 353 | fbo->vm_node = NULL; |
354 | atomic_set(&fbo->cpu_writers, 0); | ||
354 | 355 | ||
355 | fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); | 356 | fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); |
356 | kref_init(&fbo->list_kref); | 357 | kref_init(&fbo->list_kref); |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index ca904799f018..b1e02fffd3cc 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
@@ -69,7 +69,7 @@ struct ttm_page_pool { | |||
69 | spinlock_t lock; | 69 | spinlock_t lock; |
70 | bool fill_lock; | 70 | bool fill_lock; |
71 | struct list_head list; | 71 | struct list_head list; |
72 | int gfp_flags; | 72 | gfp_t gfp_flags; |
73 | unsigned npages; | 73 | unsigned npages; |
74 | char *name; | 74 | char *name; |
75 | unsigned long nfrees; | 75 | unsigned long nfrees; |
@@ -475,7 +475,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages, | |||
475 | * This function is reentrant if caller updates count depending on number of | 475 | * This function is reentrant if caller updates count depending on number of |
476 | * pages returned in pages array. | 476 | * pages returned in pages array. |
477 | */ | 477 | */ |
478 | static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, | 478 | static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, |
479 | int ttm_flags, enum ttm_caching_state cstate, unsigned count) | 479 | int ttm_flags, enum ttm_caching_state cstate, unsigned count) |
480 | { | 480 | { |
481 | struct page **caching_array; | 481 | struct page **caching_array; |
@@ -666,7 +666,7 @@ int ttm_get_pages(struct list_head *pages, int flags, | |||
666 | { | 666 | { |
667 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); | 667 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); |
668 | struct page *p = NULL; | 668 | struct page *p = NULL; |
669 | int gfp_flags = GFP_USER; | 669 | gfp_t gfp_flags = GFP_USER; |
670 | int r; | 670 | int r; |
671 | 671 | ||
672 | /* set zero flag for page allocation if required */ | 672 | /* set zero flag for page allocation if required */ |
@@ -818,7 +818,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) | |||
818 | return 0; | 818 | return 0; |
819 | } | 819 | } |
820 | 820 | ||
821 | void ttm_page_alloc_fini() | 821 | void ttm_page_alloc_fini(void) |
822 | { | 822 | { |
823 | int i; | 823 | int i; |
824 | 824 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 72ec2e2b6e97..a96ed6d9d010 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -148,13 +148,16 @@ static struct pci_device_id vmw_pci_id_list[] = { | |||
148 | {0, 0, 0} | 148 | {0, 0, 0} |
149 | }; | 149 | }; |
150 | 150 | ||
151 | static char *vmw_devname = "vmwgfx"; | 151 | static int enable_fbdev; |
152 | 152 | ||
153 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); | 153 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); |
154 | static void vmw_master_init(struct vmw_master *); | 154 | static void vmw_master_init(struct vmw_master *); |
155 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, | 155 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, |
156 | void *ptr); | 156 | void *ptr); |
157 | 157 | ||
158 | MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); | ||
159 | module_param_named(enable_fbdev, enable_fbdev, int, 0600); | ||
160 | |||
158 | static void vmw_print_capabilities(uint32_t capabilities) | 161 | static void vmw_print_capabilities(uint32_t capabilities) |
159 | { | 162 | { |
160 | DRM_INFO("Capabilities:\n"); | 163 | DRM_INFO("Capabilities:\n"); |
@@ -192,8 +195,6 @@ static int vmw_request_device(struct vmw_private *dev_priv) | |||
192 | { | 195 | { |
193 | int ret; | 196 | int ret; |
194 | 197 | ||
195 | vmw_kms_save_vga(dev_priv); | ||
196 | |||
197 | ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); | 198 | ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); |
198 | if (unlikely(ret != 0)) { | 199 | if (unlikely(ret != 0)) { |
199 | DRM_ERROR("Unable to initialize FIFO.\n"); | 200 | DRM_ERROR("Unable to initialize FIFO.\n"); |
@@ -206,9 +207,35 @@ static int vmw_request_device(struct vmw_private *dev_priv) | |||
206 | static void vmw_release_device(struct vmw_private *dev_priv) | 207 | static void vmw_release_device(struct vmw_private *dev_priv) |
207 | { | 208 | { |
208 | vmw_fifo_release(dev_priv, &dev_priv->fifo); | 209 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
209 | vmw_kms_restore_vga(dev_priv); | ||
210 | } | 210 | } |
211 | 211 | ||
212 | int vmw_3d_resource_inc(struct vmw_private *dev_priv) | ||
213 | { | ||
214 | int ret = 0; | ||
215 | |||
216 | mutex_lock(&dev_priv->release_mutex); | ||
217 | if (unlikely(dev_priv->num_3d_resources++ == 0)) { | ||
218 | ret = vmw_request_device(dev_priv); | ||
219 | if (unlikely(ret != 0)) | ||
220 | --dev_priv->num_3d_resources; | ||
221 | } | ||
222 | mutex_unlock(&dev_priv->release_mutex); | ||
223 | return ret; | ||
224 | } | ||
225 | |||
226 | |||
227 | void vmw_3d_resource_dec(struct vmw_private *dev_priv) | ||
228 | { | ||
229 | int32_t n3d; | ||
230 | |||
231 | mutex_lock(&dev_priv->release_mutex); | ||
232 | if (unlikely(--dev_priv->num_3d_resources == 0)) | ||
233 | vmw_release_device(dev_priv); | ||
234 | n3d = (int32_t) dev_priv->num_3d_resources; | ||
235 | mutex_unlock(&dev_priv->release_mutex); | ||
236 | |||
237 | BUG_ON(n3d < 0); | ||
238 | } | ||
212 | 239 | ||
213 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | 240 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
214 | { | 241 | { |
@@ -228,6 +255,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
228 | dev_priv->last_read_sequence = (uint32_t) -100; | 255 | dev_priv->last_read_sequence = (uint32_t) -100; |
229 | mutex_init(&dev_priv->hw_mutex); | 256 | mutex_init(&dev_priv->hw_mutex); |
230 | mutex_init(&dev_priv->cmdbuf_mutex); | 257 | mutex_init(&dev_priv->cmdbuf_mutex); |
258 | mutex_init(&dev_priv->release_mutex); | ||
231 | rwlock_init(&dev_priv->resource_lock); | 259 | rwlock_init(&dev_priv->resource_lock); |
232 | idr_init(&dev_priv->context_idr); | 260 | idr_init(&dev_priv->context_idr); |
233 | idr_init(&dev_priv->surface_idr); | 261 | idr_init(&dev_priv->surface_idr); |
@@ -244,6 +272,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
244 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); | 272 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); |
245 | dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); | 273 | dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); |
246 | 274 | ||
275 | dev_priv->enable_fb = enable_fbdev; | ||
276 | |||
247 | mutex_lock(&dev_priv->hw_mutex); | 277 | mutex_lock(&dev_priv->hw_mutex); |
248 | 278 | ||
249 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); | 279 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); |
@@ -343,17 +373,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
343 | 373 | ||
344 | dev->dev_private = dev_priv; | 374 | dev->dev_private = dev_priv; |
345 | 375 | ||
346 | if (!dev->devname) | ||
347 | dev->devname = vmw_devname; | ||
348 | |||
349 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { | ||
350 | ret = drm_irq_install(dev); | ||
351 | if (unlikely(ret != 0)) { | ||
352 | DRM_ERROR("Failed installing irq: %d\n", ret); | ||
353 | goto out_no_irq; | ||
354 | } | ||
355 | } | ||
356 | |||
357 | ret = pci_request_regions(dev->pdev, "vmwgfx probe"); | 376 | ret = pci_request_regions(dev->pdev, "vmwgfx probe"); |
358 | dev_priv->stealth = (ret != 0); | 377 | dev_priv->stealth = (ret != 0); |
359 | if (dev_priv->stealth) { | 378 | if (dev_priv->stealth) { |
@@ -369,26 +388,52 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
369 | goto out_no_device; | 388 | goto out_no_device; |
370 | } | 389 | } |
371 | } | 390 | } |
372 | ret = vmw_request_device(dev_priv); | 391 | ret = vmw_kms_init(dev_priv); |
373 | if (unlikely(ret != 0)) | 392 | if (unlikely(ret != 0)) |
374 | goto out_no_device; | 393 | goto out_no_kms; |
375 | vmw_kms_init(dev_priv); | ||
376 | vmw_overlay_init(dev_priv); | 394 | vmw_overlay_init(dev_priv); |
377 | vmw_fb_init(dev_priv); | 395 | if (dev_priv->enable_fb) { |
396 | ret = vmw_3d_resource_inc(dev_priv); | ||
397 | if (unlikely(ret != 0)) | ||
398 | goto out_no_fifo; | ||
399 | vmw_kms_save_vga(dev_priv); | ||
400 | vmw_fb_init(dev_priv); | ||
401 | DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? | ||
402 | "Detected device 3D availability.\n" : | ||
403 | "Detected no device 3D availability.\n"); | ||
404 | } else { | ||
405 | DRM_INFO("Delayed 3D detection since we're not " | ||
406 | "running the device in SVGA mode yet.\n"); | ||
407 | } | ||
408 | |||
409 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { | ||
410 | ret = drm_irq_install(dev); | ||
411 | if (unlikely(ret != 0)) { | ||
412 | DRM_ERROR("Failed installing irq: %d\n", ret); | ||
413 | goto out_no_irq; | ||
414 | } | ||
415 | } | ||
378 | 416 | ||
379 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; | 417 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; |
380 | register_pm_notifier(&dev_priv->pm_nb); | 418 | register_pm_notifier(&dev_priv->pm_nb); |
381 | 419 | ||
382 | DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n"); | ||
383 | |||
384 | return 0; | 420 | return 0; |
385 | 421 | ||
386 | out_no_device: | ||
387 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | ||
388 | drm_irq_uninstall(dev_priv->dev); | ||
389 | if (dev->devname == vmw_devname) | ||
390 | dev->devname = NULL; | ||
391 | out_no_irq: | 422 | out_no_irq: |
423 | if (dev_priv->enable_fb) { | ||
424 | vmw_fb_close(dev_priv); | ||
425 | vmw_kms_restore_vga(dev_priv); | ||
426 | vmw_3d_resource_dec(dev_priv); | ||
427 | } | ||
428 | out_no_fifo: | ||
429 | vmw_overlay_close(dev_priv); | ||
430 | vmw_kms_close(dev_priv); | ||
431 | out_no_kms: | ||
432 | if (dev_priv->stealth) | ||
433 | pci_release_region(dev->pdev, 2); | ||
434 | else | ||
435 | pci_release_regions(dev->pdev); | ||
436 | out_no_device: | ||
392 | ttm_object_device_release(&dev_priv->tdev); | 437 | ttm_object_device_release(&dev_priv->tdev); |
393 | out_err4: | 438 | out_err4: |
394 | iounmap(dev_priv->mmio_virt); | 439 | iounmap(dev_priv->mmio_virt); |
@@ -415,19 +460,20 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
415 | 460 | ||
416 | unregister_pm_notifier(&dev_priv->pm_nb); | 461 | unregister_pm_notifier(&dev_priv->pm_nb); |
417 | 462 | ||
418 | vmw_fb_close(dev_priv); | 463 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
464 | drm_irq_uninstall(dev_priv->dev); | ||
465 | if (dev_priv->enable_fb) { | ||
466 | vmw_fb_close(dev_priv); | ||
467 | vmw_kms_restore_vga(dev_priv); | ||
468 | vmw_3d_resource_dec(dev_priv); | ||
469 | } | ||
419 | vmw_kms_close(dev_priv); | 470 | vmw_kms_close(dev_priv); |
420 | vmw_overlay_close(dev_priv); | 471 | vmw_overlay_close(dev_priv); |
421 | vmw_release_device(dev_priv); | ||
422 | if (dev_priv->stealth) | 472 | if (dev_priv->stealth) |
423 | pci_release_region(dev->pdev, 2); | 473 | pci_release_region(dev->pdev, 2); |
424 | else | 474 | else |
425 | pci_release_regions(dev->pdev); | 475 | pci_release_regions(dev->pdev); |
426 | 476 | ||
427 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | ||
428 | drm_irq_uninstall(dev_priv->dev); | ||
429 | if (dev->devname == vmw_devname) | ||
430 | dev->devname = NULL; | ||
431 | ttm_object_device_release(&dev_priv->tdev); | 477 | ttm_object_device_release(&dev_priv->tdev); |
432 | iounmap(dev_priv->mmio_virt); | 478 | iounmap(dev_priv->mmio_virt); |
433 | drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, | 479 | drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, |
@@ -500,7 +546,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, | |||
500 | struct drm_ioctl_desc *ioctl = | 546 | struct drm_ioctl_desc *ioctl = |
501 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; | 547 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; |
502 | 548 | ||
503 | if (unlikely(ioctl->cmd != cmd)) { | 549 | if (unlikely(ioctl->cmd_drv != cmd)) { |
504 | DRM_ERROR("Invalid command format, ioctl %d\n", | 550 | DRM_ERROR("Invalid command format, ioctl %d\n", |
505 | nr - DRM_COMMAND_BASE); | 551 | nr - DRM_COMMAND_BASE); |
506 | return -EINVAL; | 552 | return -EINVAL; |
@@ -589,6 +635,16 @@ static int vmw_master_set(struct drm_device *dev, | |||
589 | struct vmw_master *vmaster = vmw_master(file_priv->master); | 635 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
590 | int ret = 0; | 636 | int ret = 0; |
591 | 637 | ||
638 | if (!dev_priv->enable_fb) { | ||
639 | ret = vmw_3d_resource_inc(dev_priv); | ||
640 | if (unlikely(ret != 0)) | ||
641 | return ret; | ||
642 | vmw_kms_save_vga(dev_priv); | ||
643 | mutex_lock(&dev_priv->hw_mutex); | ||
644 | vmw_write(dev_priv, SVGA_REG_TRACES, 0); | ||
645 | mutex_unlock(&dev_priv->hw_mutex); | ||
646 | } | ||
647 | |||
592 | if (active) { | 648 | if (active) { |
593 | BUG_ON(active != &dev_priv->fbdev_master); | 649 | BUG_ON(active != &dev_priv->fbdev_master); |
594 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); | 650 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); |
@@ -617,7 +673,13 @@ static int vmw_master_set(struct drm_device *dev, | |||
617 | return 0; | 673 | return 0; |
618 | 674 | ||
619 | out_no_active_lock: | 675 | out_no_active_lock: |
620 | vmw_release_device(dev_priv); | 676 | if (!dev_priv->enable_fb) { |
677 | mutex_lock(&dev_priv->hw_mutex); | ||
678 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); | ||
679 | mutex_unlock(&dev_priv->hw_mutex); | ||
680 | vmw_kms_restore_vga(dev_priv); | ||
681 | vmw_3d_resource_dec(dev_priv); | ||
682 | } | ||
621 | return ret; | 683 | return ret; |
622 | } | 684 | } |
623 | 685 | ||
@@ -645,11 +707,23 @@ static void vmw_master_drop(struct drm_device *dev, | |||
645 | 707 | ||
646 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); | 708 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
647 | 709 | ||
710 | if (!dev_priv->enable_fb) { | ||
711 | ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
712 | if (unlikely(ret != 0)) | ||
713 | DRM_ERROR("Unable to clean VRAM on master drop.\n"); | ||
714 | mutex_lock(&dev_priv->hw_mutex); | ||
715 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); | ||
716 | mutex_unlock(&dev_priv->hw_mutex); | ||
717 | vmw_kms_restore_vga(dev_priv); | ||
718 | vmw_3d_resource_dec(dev_priv); | ||
719 | } | ||
720 | |||
648 | dev_priv->active_master = &dev_priv->fbdev_master; | 721 | dev_priv->active_master = &dev_priv->fbdev_master; |
649 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | 722 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); |
650 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); | 723 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); |
651 | 724 | ||
652 | vmw_fb_on(dev_priv); | 725 | if (dev_priv->enable_fb) |
726 | vmw_fb_on(dev_priv); | ||
653 | } | 727 | } |
654 | 728 | ||
655 | 729 | ||
@@ -722,6 +796,7 @@ static struct drm_driver driver = { | |||
722 | .irq_postinstall = vmw_irq_postinstall, | 796 | .irq_postinstall = vmw_irq_postinstall, |
723 | .irq_uninstall = vmw_irq_uninstall, | 797 | .irq_uninstall = vmw_irq_uninstall, |
724 | .irq_handler = vmw_irq_handler, | 798 | .irq_handler = vmw_irq_handler, |
799 | .get_vblank_counter = vmw_get_vblank_counter, | ||
725 | .reclaim_buffers_locked = NULL, | 800 | .reclaim_buffers_locked = NULL, |
726 | .get_map_ofs = drm_core_get_map_ofs, | 801 | .get_map_ofs = drm_core_get_map_ofs, |
727 | .get_reg_ofs = drm_core_get_reg_ofs, | 802 | .get_reg_ofs = drm_core_get_reg_ofs, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 429f917b60bf..58de6393f611 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -277,6 +277,7 @@ struct vmw_private { | |||
277 | 277 | ||
278 | bool stealth; | 278 | bool stealth; |
279 | bool is_opened; | 279 | bool is_opened; |
280 | bool enable_fb; | ||
280 | 281 | ||
281 | /** | 282 | /** |
282 | * Master management. | 283 | * Master management. |
@@ -285,6 +286,9 @@ struct vmw_private { | |||
285 | struct vmw_master *active_master; | 286 | struct vmw_master *active_master; |
286 | struct vmw_master fbdev_master; | 287 | struct vmw_master fbdev_master; |
287 | struct notifier_block pm_nb; | 288 | struct notifier_block pm_nb; |
289 | |||
290 | struct mutex release_mutex; | ||
291 | uint32_t num_3d_resources; | ||
288 | }; | 292 | }; |
289 | 293 | ||
290 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) | 294 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) |
@@ -319,6 +323,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv, | |||
319 | return val; | 323 | return val; |
320 | } | 324 | } |
321 | 325 | ||
326 | int vmw_3d_resource_inc(struct vmw_private *dev_priv); | ||
327 | void vmw_3d_resource_dec(struct vmw_private *dev_priv); | ||
328 | |||
322 | /** | 329 | /** |
323 | * GMR utilities - vmwgfx_gmr.c | 330 | * GMR utilities - vmwgfx_gmr.c |
324 | */ | 331 | */ |
@@ -511,6 +518,7 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv, | |||
511 | unsigned bbp, unsigned depth); | 518 | unsigned bbp, unsigned depth); |
512 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | 519 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, |
513 | struct drm_file *file_priv); | 520 | struct drm_file *file_priv); |
521 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); | ||
514 | 522 | ||
515 | /** | 523 | /** |
516 | * Overlay control - vmwgfx_overlay.c | 524 | * Overlay control - vmwgfx_overlay.c |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 870967a97c15..409e172f4abf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
@@ -615,6 +615,11 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, | |||
615 | if (unlikely(ret != 0)) | 615 | if (unlikely(ret != 0)) |
616 | goto err_unlock; | 616 | goto err_unlock; |
617 | 617 | ||
618 | if (bo->mem.mem_type == TTM_PL_VRAM && | ||
619 | bo->mem.mm_node->start < bo->num_pages) | ||
620 | (void) ttm_bo_validate(bo, &vmw_sys_placement, false, | ||
621 | false, false); | ||
622 | |||
618 | ret = ttm_bo_validate(bo, &ne_placement, false, false, false); | 623 | ret = ttm_bo_validate(bo, &ne_placement, false, false, false); |
619 | 624 | ||
620 | /* Could probably bug on */ | 625 | /* Could probably bug on */ |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index e6a1eb7ea954..0fe31766e4cf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
@@ -106,6 +106,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
106 | mutex_lock(&dev_priv->hw_mutex); | 106 | mutex_lock(&dev_priv->hw_mutex); |
107 | dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); | 107 | dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); |
108 | dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); | 108 | dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); |
109 | dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); | ||
109 | vmw_write(dev_priv, SVGA_REG_ENABLE, 1); | 110 | vmw_write(dev_priv, SVGA_REG_ENABLE, 1); |
110 | 111 | ||
111 | min = 4; | 112 | min = 4; |
@@ -175,6 +176,8 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
175 | dev_priv->config_done_state); | 176 | dev_priv->config_done_state); |
176 | vmw_write(dev_priv, SVGA_REG_ENABLE, | 177 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
177 | dev_priv->enable_state); | 178 | dev_priv->enable_state); |
179 | vmw_write(dev_priv, SVGA_REG_TRACES, | ||
180 | dev_priv->traces_state); | ||
178 | 181 | ||
179 | mutex_unlock(&dev_priv->hw_mutex); | 182 | mutex_unlock(&dev_priv->hw_mutex); |
180 | vmw_fence_queue_takedown(&fifo->fence_queue); | 183 | vmw_fence_queue_takedown(&fifo->fence_queue); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 64d7f47df868..e882ba099f0c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -898,7 +898,19 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv) | |||
898 | save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); | 898 | save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); |
899 | save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); | 899 | save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); |
900 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | 900 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); |
901 | if (i == 0 && vmw_priv->num_displays == 1 && | ||
902 | save->width == 0 && save->height == 0) { | ||
903 | |||
904 | /* | ||
905 | * It should be fairly safe to assume that these | ||
906 | * values are uninitialized. | ||
907 | */ | ||
908 | |||
909 | save->width = vmw_priv->vga_width - save->pos_x; | ||
910 | save->height = vmw_priv->vga_height - save->pos_y; | ||
911 | } | ||
901 | } | 912 | } |
913 | |||
902 | return 0; | 914 | return 0; |
903 | } | 915 | } |
904 | 916 | ||
@@ -984,3 +996,8 @@ out_unlock: | |||
984 | ttm_read_unlock(&vmaster->lock); | 996 | ttm_read_unlock(&vmaster->lock); |
985 | return ret; | 997 | return ret; |
986 | } | 998 | } |
999 | |||
1000 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc) | ||
1001 | { | ||
1002 | return 0; | ||
1003 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 7083b1a24df3..11cb39e3accb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | |||
@@ -27,6 +27,8 @@ | |||
27 | 27 | ||
28 | #include "vmwgfx_kms.h" | 28 | #include "vmwgfx_kms.h" |
29 | 29 | ||
30 | #define VMWGFX_LDU_NUM_DU 8 | ||
31 | |||
30 | #define vmw_crtc_to_ldu(x) \ | 32 | #define vmw_crtc_to_ldu(x) \ |
31 | container_of(x, struct vmw_legacy_display_unit, base.crtc) | 33 | container_of(x, struct vmw_legacy_display_unit, base.crtc) |
32 | #define vmw_encoder_to_ldu(x) \ | 34 | #define vmw_encoder_to_ldu(x) \ |
@@ -536,6 +538,10 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) | |||
536 | 538 | ||
537 | int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) | 539 | int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) |
538 | { | 540 | { |
541 | struct drm_device *dev = dev_priv->dev; | ||
542 | int i; | ||
543 | int ret; | ||
544 | |||
539 | if (dev_priv->ldu_priv) { | 545 | if (dev_priv->ldu_priv) { |
540 | DRM_INFO("ldu system already on\n"); | 546 | DRM_INFO("ldu system already on\n"); |
541 | return -EINVAL; | 547 | return -EINVAL; |
@@ -553,23 +559,24 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) | |||
553 | 559 | ||
554 | drm_mode_create_dirty_info_property(dev_priv->dev); | 560 | drm_mode_create_dirty_info_property(dev_priv->dev); |
555 | 561 | ||
556 | vmw_ldu_init(dev_priv, 0); | ||
557 | /* for old hardware without multimon only enable one display */ | ||
558 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { | 562 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { |
559 | vmw_ldu_init(dev_priv, 1); | 563 | for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i) |
560 | vmw_ldu_init(dev_priv, 2); | 564 | vmw_ldu_init(dev_priv, i); |
561 | vmw_ldu_init(dev_priv, 3); | 565 | ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU); |
562 | vmw_ldu_init(dev_priv, 4); | 566 | } else { |
563 | vmw_ldu_init(dev_priv, 5); | 567 | /* for old hardware without multimon only enable one display */ |
564 | vmw_ldu_init(dev_priv, 6); | 568 | vmw_ldu_init(dev_priv, 0); |
565 | vmw_ldu_init(dev_priv, 7); | 569 | ret = drm_vblank_init(dev, 1); |
566 | } | 570 | } |
567 | 571 | ||
568 | return 0; | 572 | return ret; |
569 | } | 573 | } |
570 | 574 | ||
571 | int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) | 575 | int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) |
572 | { | 576 | { |
577 | struct drm_device *dev = dev_priv->dev; | ||
578 | |||
579 | drm_vblank_cleanup(dev); | ||
573 | if (!dev_priv->ldu_priv) | 580 | if (!dev_priv->ldu_priv) |
574 | return -ENOSYS; | 581 | return -ENOSYS; |
575 | 582 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 5f2d5df01e5c..c8c40e9979db 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -211,6 +211,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) | |||
211 | cmd->body.cid = cpu_to_le32(res->id); | 211 | cmd->body.cid = cpu_to_le32(res->id); |
212 | 212 | ||
213 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 213 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
214 | vmw_3d_resource_dec(dev_priv); | ||
214 | } | 215 | } |
215 | 216 | ||
216 | static int vmw_context_init(struct vmw_private *dev_priv, | 217 | static int vmw_context_init(struct vmw_private *dev_priv, |
@@ -247,6 +248,7 @@ static int vmw_context_init(struct vmw_private *dev_priv, | |||
247 | cmd->body.cid = cpu_to_le32(res->id); | 248 | cmd->body.cid = cpu_to_le32(res->id); |
248 | 249 | ||
249 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 250 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
251 | (void) vmw_3d_resource_inc(dev_priv); | ||
250 | vmw_resource_activate(res, vmw_hw_context_destroy); | 252 | vmw_resource_activate(res, vmw_hw_context_destroy); |
251 | return 0; | 253 | return 0; |
252 | } | 254 | } |
@@ -406,6 +408,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) | |||
406 | cmd->body.sid = cpu_to_le32(res->id); | 408 | cmd->body.sid = cpu_to_le32(res->id); |
407 | 409 | ||
408 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 410 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
411 | vmw_3d_resource_dec(dev_priv); | ||
409 | } | 412 | } |
410 | 413 | ||
411 | void vmw_surface_res_free(struct vmw_resource *res) | 414 | void vmw_surface_res_free(struct vmw_resource *res) |
@@ -473,6 +476,7 @@ int vmw_surface_init(struct vmw_private *dev_priv, | |||
473 | } | 476 | } |
474 | 477 | ||
475 | vmw_fifo_commit(dev_priv, submit_size); | 478 | vmw_fifo_commit(dev_priv, submit_size); |
479 | (void) vmw_3d_resource_inc(dev_priv); | ||
476 | vmw_resource_activate(res, vmw_hw_surface_destroy); | 480 | vmw_resource_activate(res, vmw_hw_surface_destroy); |
477 | return 0; | 481 | return 0; |
478 | } | 482 | } |
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index b87569e96b16..f366f968155a 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c | |||
@@ -598,7 +598,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev, | |||
598 | pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); | 598 | pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); |
599 | } | 599 | } |
600 | 600 | ||
601 | void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) | 601 | static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) |
602 | { | 602 | { |
603 | struct vga_device *vgadev; | 603 | struct vga_device *vgadev; |
604 | unsigned long flags; | 604 | unsigned long flags; |