diff options
Diffstat (limited to 'drivers/gpu/drm')
67 files changed, 1323 insertions, 766 deletions
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c index 55d03ed05000..529a0dbe9fc6 100644 --- a/drivers/gpu/drm/drm_buffer.c +++ b/drivers/gpu/drm/drm_buffer.c | |||
@@ -98,8 +98,8 @@ EXPORT_SYMBOL(drm_buffer_alloc); | |||
98 | * user_data: A pointer the data that is copied to the buffer. | 98 | * user_data: A pointer the data that is copied to the buffer. |
99 | * size: The Number of bytes to copy. | 99 | * size: The Number of bytes to copy. |
100 | */ | 100 | */ |
101 | extern int drm_buffer_copy_from_user(struct drm_buffer *buf, | 101 | int drm_buffer_copy_from_user(struct drm_buffer *buf, |
102 | void __user *user_data, int size) | 102 | void __user *user_data, int size) |
103 | { | 103 | { |
104 | int nr_pages = size / PAGE_SIZE + 1; | 104 | int nr_pages = size / PAGE_SIZE + 1; |
105 | int idx; | 105 | int idx; |
@@ -163,7 +163,7 @@ void *drm_buffer_read_object(struct drm_buffer *buf, | |||
163 | { | 163 | { |
164 | int idx = drm_buffer_index(buf); | 164 | int idx = drm_buffer_index(buf); |
165 | int page = drm_buffer_page(buf); | 165 | int page = drm_buffer_page(buf); |
166 | void *obj = 0; | 166 | void *obj = NULL; |
167 | 167 | ||
168 | if (idx + objsize <= PAGE_SIZE) { | 168 | if (idx + objsize <= PAGE_SIZE) { |
169 | obj = &buf->data[page][idx]; | 169 | obj = &buf->data[page][idx]; |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 7e31d4348340..dcbeb98f195a 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -34,6 +34,9 @@ | |||
34 | #include "drm_crtc_helper.h" | 34 | #include "drm_crtc_helper.h" |
35 | #include "drm_fb_helper.h" | 35 | #include "drm_fb_helper.h" |
36 | 36 | ||
37 | static bool drm_kms_helper_poll = true; | ||
38 | module_param_named(poll, drm_kms_helper_poll, bool, 0600); | ||
39 | |||
37 | static void drm_mode_validate_flag(struct drm_connector *connector, | 40 | static void drm_mode_validate_flag(struct drm_connector *connector, |
38 | int flags) | 41 | int flags) |
39 | { | 42 | { |
@@ -99,8 +102,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
99 | connector->status = connector_status_disconnected; | 102 | connector->status = connector_status_disconnected; |
100 | if (connector->funcs->force) | 103 | if (connector->funcs->force) |
101 | connector->funcs->force(connector); | 104 | connector->funcs->force(connector); |
102 | } else | 105 | } else { |
103 | connector->status = connector->funcs->detect(connector); | 106 | connector->status = connector->funcs->detect(connector, true); |
107 | drm_kms_helper_poll_enable(dev); | ||
108 | } | ||
104 | 109 | ||
105 | if (connector->status == connector_status_disconnected) { | 110 | if (connector->status == connector_status_disconnected) { |
106 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", | 111 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", |
@@ -110,11 +115,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
110 | } | 115 | } |
111 | 116 | ||
112 | count = (*connector_funcs->get_modes)(connector); | 117 | count = (*connector_funcs->get_modes)(connector); |
113 | if (!count) { | 118 | if (count == 0 && connector->status == connector_status_connected) |
114 | count = drm_add_modes_noedid(connector, 1024, 768); | 119 | count = drm_add_modes_noedid(connector, 1024, 768); |
115 | if (!count) | 120 | if (count == 0) |
116 | return 0; | 121 | goto prune; |
117 | } | ||
118 | 122 | ||
119 | drm_mode_connector_list_update(connector); | 123 | drm_mode_connector_list_update(connector); |
120 | 124 | ||
@@ -633,13 +637,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
633 | mode_changed = true; | 637 | mode_changed = true; |
634 | 638 | ||
635 | if (mode_changed) { | 639 | if (mode_changed) { |
636 | old_fb = set->crtc->fb; | ||
637 | set->crtc->fb = set->fb; | ||
638 | set->crtc->enabled = (set->mode != NULL); | 640 | set->crtc->enabled = (set->mode != NULL); |
639 | if (set->mode != NULL) { | 641 | if (set->mode != NULL) { |
640 | DRM_DEBUG_KMS("attempting to set mode from" | 642 | DRM_DEBUG_KMS("attempting to set mode from" |
641 | " userspace\n"); | 643 | " userspace\n"); |
642 | drm_mode_debug_printmodeline(set->mode); | 644 | drm_mode_debug_printmodeline(set->mode); |
645 | old_fb = set->crtc->fb; | ||
646 | set->crtc->fb = set->fb; | ||
643 | if (!drm_crtc_helper_set_mode(set->crtc, set->mode, | 647 | if (!drm_crtc_helper_set_mode(set->crtc, set->mode, |
644 | set->x, set->y, | 648 | set->x, set->y, |
645 | old_fb)) { | 649 | old_fb)) { |
@@ -840,6 +844,9 @@ static void output_poll_execute(struct work_struct *work) | |||
840 | enum drm_connector_status old_status, status; | 844 | enum drm_connector_status old_status, status; |
841 | bool repoll = false, changed = false; | 845 | bool repoll = false, changed = false; |
842 | 846 | ||
847 | if (!drm_kms_helper_poll) | ||
848 | return; | ||
849 | |||
843 | mutex_lock(&dev->mode_config.mutex); | 850 | mutex_lock(&dev->mode_config.mutex); |
844 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 851 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
845 | 852 | ||
@@ -859,7 +866,7 @@ static void output_poll_execute(struct work_struct *work) | |||
859 | !(connector->polled & DRM_CONNECTOR_POLL_HPD)) | 866 | !(connector->polled & DRM_CONNECTOR_POLL_HPD)) |
860 | continue; | 867 | continue; |
861 | 868 | ||
862 | status = connector->funcs->detect(connector); | 869 | status = connector->funcs->detect(connector, false); |
863 | if (old_status != status) | 870 | if (old_status != status) |
864 | changed = true; | 871 | changed = true; |
865 | } | 872 | } |
@@ -890,6 +897,9 @@ void drm_kms_helper_poll_enable(struct drm_device *dev) | |||
890 | bool poll = false; | 897 | bool poll = false; |
891 | struct drm_connector *connector; | 898 | struct drm_connector *connector; |
892 | 899 | ||
900 | if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll) | ||
901 | return; | ||
902 | |||
893 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 903 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
894 | if (connector->polled) | 904 | if (connector->polled) |
895 | poll = true; | 905 | poll = true; |
@@ -919,8 +929,10 @@ void drm_helper_hpd_irq_event(struct drm_device *dev) | |||
919 | { | 929 | { |
920 | if (!dev->mode_config.poll_enabled) | 930 | if (!dev->mode_config.poll_enabled) |
921 | return; | 931 | return; |
932 | |||
922 | /* kill timer and schedule immediate execution, this doesn't block */ | 933 | /* kill timer and schedule immediate execution, this doesn't block */ |
923 | cancel_delayed_work(&dev->mode_config.output_poll_work); | 934 | cancel_delayed_work(&dev->mode_config.output_poll_work); |
924 | queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0); | 935 | if (drm_kms_helper_poll) |
936 | queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0); | ||
925 | } | 937 | } |
926 | EXPORT_SYMBOL(drm_helper_hpd_irq_event); | 938 | EXPORT_SYMBOL(drm_helper_hpd_irq_event); |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index bf92d07510df..5663d2719063 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -148,7 +148,7 @@ int drm_gem_object_init(struct drm_device *dev, | |||
148 | return -ENOMEM; | 148 | return -ENOMEM; |
149 | 149 | ||
150 | kref_init(&obj->refcount); | 150 | kref_init(&obj->refcount); |
151 | kref_init(&obj->handlecount); | 151 | atomic_set(&obj->handle_count, 0); |
152 | obj->size = size; | 152 | obj->size = size; |
153 | 153 | ||
154 | atomic_inc(&dev->object_count); | 154 | atomic_inc(&dev->object_count); |
@@ -462,28 +462,6 @@ drm_gem_object_free(struct kref *kref) | |||
462 | } | 462 | } |
463 | EXPORT_SYMBOL(drm_gem_object_free); | 463 | EXPORT_SYMBOL(drm_gem_object_free); |
464 | 464 | ||
465 | /** | ||
466 | * Called after the last reference to the object has been lost. | ||
467 | * Must be called without holding struct_mutex | ||
468 | * | ||
469 | * Frees the object | ||
470 | */ | ||
471 | void | ||
472 | drm_gem_object_free_unlocked(struct kref *kref) | ||
473 | { | ||
474 | struct drm_gem_object *obj = (struct drm_gem_object *) kref; | ||
475 | struct drm_device *dev = obj->dev; | ||
476 | |||
477 | if (dev->driver->gem_free_object_unlocked != NULL) | ||
478 | dev->driver->gem_free_object_unlocked(obj); | ||
479 | else if (dev->driver->gem_free_object != NULL) { | ||
480 | mutex_lock(&dev->struct_mutex); | ||
481 | dev->driver->gem_free_object(obj); | ||
482 | mutex_unlock(&dev->struct_mutex); | ||
483 | } | ||
484 | } | ||
485 | EXPORT_SYMBOL(drm_gem_object_free_unlocked); | ||
486 | |||
487 | static void drm_gem_object_ref_bug(struct kref *list_kref) | 465 | static void drm_gem_object_ref_bug(struct kref *list_kref) |
488 | { | 466 | { |
489 | BUG(); | 467 | BUG(); |
@@ -496,12 +474,8 @@ static void drm_gem_object_ref_bug(struct kref *list_kref) | |||
496 | * called before drm_gem_object_free or we'll be touching | 474 | * called before drm_gem_object_free or we'll be touching |
497 | * freed memory | 475 | * freed memory |
498 | */ | 476 | */ |
499 | void | 477 | void drm_gem_object_handle_free(struct drm_gem_object *obj) |
500 | drm_gem_object_handle_free(struct kref *kref) | ||
501 | { | 478 | { |
502 | struct drm_gem_object *obj = container_of(kref, | ||
503 | struct drm_gem_object, | ||
504 | handlecount); | ||
505 | struct drm_device *dev = obj->dev; | 479 | struct drm_device *dev = obj->dev; |
506 | 480 | ||
507 | /* Remove any name for this object */ | 481 | /* Remove any name for this object */ |
@@ -528,6 +502,10 @@ void drm_gem_vm_open(struct vm_area_struct *vma) | |||
528 | struct drm_gem_object *obj = vma->vm_private_data; | 502 | struct drm_gem_object *obj = vma->vm_private_data; |
529 | 503 | ||
530 | drm_gem_object_reference(obj); | 504 | drm_gem_object_reference(obj); |
505 | |||
506 | mutex_lock(&obj->dev->struct_mutex); | ||
507 | drm_vm_open_locked(vma); | ||
508 | mutex_unlock(&obj->dev->struct_mutex); | ||
531 | } | 509 | } |
532 | EXPORT_SYMBOL(drm_gem_vm_open); | 510 | EXPORT_SYMBOL(drm_gem_vm_open); |
533 | 511 | ||
@@ -535,7 +513,10 @@ void drm_gem_vm_close(struct vm_area_struct *vma) | |||
535 | { | 513 | { |
536 | struct drm_gem_object *obj = vma->vm_private_data; | 514 | struct drm_gem_object *obj = vma->vm_private_data; |
537 | 515 | ||
538 | drm_gem_object_unreference_unlocked(obj); | 516 | mutex_lock(&obj->dev->struct_mutex); |
517 | drm_vm_close_locked(vma); | ||
518 | drm_gem_object_unreference(obj); | ||
519 | mutex_unlock(&obj->dev->struct_mutex); | ||
539 | } | 520 | } |
540 | EXPORT_SYMBOL(drm_gem_vm_close); | 521 | EXPORT_SYMBOL(drm_gem_vm_close); |
541 | 522 | ||
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 2ef2c7827243..974e970ce3f8 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c | |||
@@ -255,7 +255,7 @@ int drm_gem_one_name_info(int id, void *ptr, void *data) | |||
255 | 255 | ||
256 | seq_printf(m, "%6d %8zd %7d %8d\n", | 256 | seq_printf(m, "%6d %8zd %7d %8d\n", |
257 | obj->name, obj->size, | 257 | obj->name, obj->size, |
258 | atomic_read(&obj->handlecount.refcount), | 258 | atomic_read(&obj->handle_count), |
259 | atomic_read(&obj->refcount.refcount)); | 259 | atomic_read(&obj->refcount.refcount)); |
260 | return 0; | 260 | return 0; |
261 | } | 261 | } |
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index e20f78b542a7..f5bd9e590c80 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c | |||
@@ -164,6 +164,8 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, | |||
164 | dev->hose = pdev->sysdata; | 164 | dev->hose = pdev->sysdata; |
165 | #endif | 165 | #endif |
166 | 166 | ||
167 | mutex_lock(&drm_global_mutex); | ||
168 | |||
167 | if ((ret = drm_fill_in_dev(dev, ent, driver))) { | 169 | if ((ret = drm_fill_in_dev(dev, ent, driver))) { |
168 | printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); | 170 | printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); |
169 | goto err_g2; | 171 | goto err_g2; |
@@ -199,6 +201,7 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, | |||
199 | driver->name, driver->major, driver->minor, driver->patchlevel, | 201 | driver->name, driver->major, driver->minor, driver->patchlevel, |
200 | driver->date, pci_name(pdev), dev->primary->index); | 202 | driver->date, pci_name(pdev), dev->primary->index); |
201 | 203 | ||
204 | mutex_unlock(&drm_global_mutex); | ||
202 | return 0; | 205 | return 0; |
203 | 206 | ||
204 | err_g4: | 207 | err_g4: |
@@ -210,6 +213,7 @@ err_g2: | |||
210 | pci_disable_device(pdev); | 213 | pci_disable_device(pdev); |
211 | err_g1: | 214 | err_g1: |
212 | kfree(dev); | 215 | kfree(dev); |
216 | mutex_unlock(&drm_global_mutex); | ||
213 | return ret; | 217 | return ret; |
214 | } | 218 | } |
215 | EXPORT_SYMBOL(drm_get_pci_dev); | 219 | EXPORT_SYMBOL(drm_get_pci_dev); |
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c index 460e9a3afa8d..92d1d0fb7b75 100644 --- a/drivers/gpu/drm/drm_platform.c +++ b/drivers/gpu/drm/drm_platform.c | |||
@@ -53,6 +53,8 @@ int drm_get_platform_dev(struct platform_device *platdev, | |||
53 | dev->platformdev = platdev; | 53 | dev->platformdev = platdev; |
54 | dev->dev = &platdev->dev; | 54 | dev->dev = &platdev->dev; |
55 | 55 | ||
56 | mutex_lock(&drm_global_mutex); | ||
57 | |||
56 | ret = drm_fill_in_dev(dev, NULL, driver); | 58 | ret = drm_fill_in_dev(dev, NULL, driver); |
57 | 59 | ||
58 | if (ret) { | 60 | if (ret) { |
@@ -87,6 +89,8 @@ int drm_get_platform_dev(struct platform_device *platdev, | |||
87 | 89 | ||
88 | list_add_tail(&dev->driver_item, &driver->device_list); | 90 | list_add_tail(&dev->driver_item, &driver->device_list); |
89 | 91 | ||
92 | mutex_unlock(&drm_global_mutex); | ||
93 | |||
90 | DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", | 94 | DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", |
91 | driver->name, driver->major, driver->minor, driver->patchlevel, | 95 | driver->name, driver->major, driver->minor, driver->patchlevel, |
92 | driver->date, dev->primary->index); | 96 | driver->date, dev->primary->index); |
@@ -100,6 +104,7 @@ err_g2: | |||
100 | drm_put_minor(&dev->control); | 104 | drm_put_minor(&dev->control); |
101 | err_g1: | 105 | err_g1: |
102 | kfree(dev); | 106 | kfree(dev); |
107 | mutex_unlock(&drm_global_mutex); | ||
103 | return ret; | 108 | return ret; |
104 | } | 109 | } |
105 | EXPORT_SYMBOL(drm_get_platform_dev); | 110 | EXPORT_SYMBOL(drm_get_platform_dev); |
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 86118a742231..85da4c40694c 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c | |||
@@ -159,7 +159,7 @@ static ssize_t status_show(struct device *device, | |||
159 | struct drm_connector *connector = to_drm_connector(device); | 159 | struct drm_connector *connector = to_drm_connector(device); |
160 | enum drm_connector_status status; | 160 | enum drm_connector_status status; |
161 | 161 | ||
162 | status = connector->funcs->detect(connector); | 162 | status = connector->funcs->detect(connector, true); |
163 | return snprintf(buf, PAGE_SIZE, "%s\n", | 163 | return snprintf(buf, PAGE_SIZE, "%s\n", |
164 | drm_get_connector_status_name(status)); | 164 | drm_get_connector_status_name(status)); |
165 | } | 165 | } |
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index fda67468e603..5df450683aab 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c | |||
@@ -433,15 +433,7 @@ static void drm_vm_open(struct vm_area_struct *vma) | |||
433 | mutex_unlock(&dev->struct_mutex); | 433 | mutex_unlock(&dev->struct_mutex); |
434 | } | 434 | } |
435 | 435 | ||
436 | /** | 436 | void drm_vm_close_locked(struct vm_area_struct *vma) |
437 | * \c close method for all virtual memory types. | ||
438 | * | ||
439 | * \param vma virtual memory area. | ||
440 | * | ||
441 | * Search the \p vma private data entry in drm_device::vmalist, unlink it, and | ||
442 | * free it. | ||
443 | */ | ||
444 | static void drm_vm_close(struct vm_area_struct *vma) | ||
445 | { | 437 | { |
446 | struct drm_file *priv = vma->vm_file->private_data; | 438 | struct drm_file *priv = vma->vm_file->private_data; |
447 | struct drm_device *dev = priv->minor->dev; | 439 | struct drm_device *dev = priv->minor->dev; |
@@ -451,7 +443,6 @@ static void drm_vm_close(struct vm_area_struct *vma) | |||
451 | vma->vm_start, vma->vm_end - vma->vm_start); | 443 | vma->vm_start, vma->vm_end - vma->vm_start); |
452 | atomic_dec(&dev->vma_count); | 444 | atomic_dec(&dev->vma_count); |
453 | 445 | ||
454 | mutex_lock(&dev->struct_mutex); | ||
455 | list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { | 446 | list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { |
456 | if (pt->vma == vma) { | 447 | if (pt->vma == vma) { |
457 | list_del(&pt->head); | 448 | list_del(&pt->head); |
@@ -459,6 +450,23 @@ static void drm_vm_close(struct vm_area_struct *vma) | |||
459 | break; | 450 | break; |
460 | } | 451 | } |
461 | } | 452 | } |
453 | } | ||
454 | |||
455 | /** | ||
456 | * \c close method for all virtual memory types. | ||
457 | * | ||
458 | * \param vma virtual memory area. | ||
459 | * | ||
460 | * Search the \p vma private data entry in drm_device::vmalist, unlink it, and | ||
461 | * free it. | ||
462 | */ | ||
463 | static void drm_vm_close(struct vm_area_struct *vma) | ||
464 | { | ||
465 | struct drm_file *priv = vma->vm_file->private_data; | ||
466 | struct drm_device *dev = priv->minor->dev; | ||
467 | |||
468 | mutex_lock(&dev->struct_mutex); | ||
469 | drm_vm_close_locked(vma); | ||
462 | mutex_unlock(&dev->struct_mutex); | 470 | mutex_unlock(&dev->struct_mutex); |
463 | } | 471 | } |
464 | 472 | ||
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 61b4caf220fa..fb07e73581e8 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c | |||
@@ -116,7 +116,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) | |||
116 | static const struct file_operations i810_buffer_fops = { | 116 | static const struct file_operations i810_buffer_fops = { |
117 | .open = drm_open, | 117 | .open = drm_open, |
118 | .release = drm_release, | 118 | .release = drm_release, |
119 | .unlocked_ioctl = drm_ioctl, | 119 | .unlocked_ioctl = i810_ioctl, |
120 | .mmap = i810_mmap_buffers, | 120 | .mmap = i810_mmap_buffers, |
121 | .fasync = drm_fasync, | 121 | .fasync = drm_fasync, |
122 | }; | 122 | }; |
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c index 671aa18415ac..cc92c7e6236f 100644 --- a/drivers/gpu/drm/i830/i830_dma.c +++ b/drivers/gpu/drm/i830/i830_dma.c | |||
@@ -118,7 +118,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma) | |||
118 | static const struct file_operations i830_buffer_fops = { | 118 | static const struct file_operations i830_buffer_fops = { |
119 | .open = drm_open, | 119 | .open = drm_open, |
120 | .release = drm_release, | 120 | .release = drm_release, |
121 | .unlocked_ioctl = drm_ioctl, | 121 | .unlocked_ioctl = i830_ioctl, |
122 | .mmap = i830_mmap_buffers, | 122 | .mmap = i830_mmap_buffers, |
123 | .fasync = drm_fasync, | 123 | .fasync = drm_fasync, |
124 | }; | 124 | }; |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 92d5605a34d1..5e43d7076789 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include "drmP.h" | 32 | #include "drmP.h" |
33 | #include "drm.h" | 33 | #include "drm.h" |
34 | #include "intel_drv.h" | ||
34 | #include "i915_drm.h" | 35 | #include "i915_drm.h" |
35 | #include "i915_drv.h" | 36 | #include "i915_drv.h" |
36 | 37 | ||
@@ -121,6 +122,54 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
121 | return 0; | 122 | return 0; |
122 | } | 123 | } |
123 | 124 | ||
125 | static int i915_gem_pageflip_info(struct seq_file *m, void *data) | ||
126 | { | ||
127 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
128 | struct drm_device *dev = node->minor->dev; | ||
129 | unsigned long flags; | ||
130 | struct intel_crtc *crtc; | ||
131 | |||
132 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { | ||
133 | const char *pipe = crtc->pipe ? "B" : "A"; | ||
134 | const char *plane = crtc->plane ? "B" : "A"; | ||
135 | struct intel_unpin_work *work; | ||
136 | |||
137 | spin_lock_irqsave(&dev->event_lock, flags); | ||
138 | work = crtc->unpin_work; | ||
139 | if (work == NULL) { | ||
140 | seq_printf(m, "No flip due on pipe %s (plane %s)\n", | ||
141 | pipe, plane); | ||
142 | } else { | ||
143 | if (!work->pending) { | ||
144 | seq_printf(m, "Flip queued on pipe %s (plane %s)\n", | ||
145 | pipe, plane); | ||
146 | } else { | ||
147 | seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n", | ||
148 | pipe, plane); | ||
149 | } | ||
150 | if (work->enable_stall_check) | ||
151 | seq_printf(m, "Stall check enabled, "); | ||
152 | else | ||
153 | seq_printf(m, "Stall check waiting for page flip ioctl, "); | ||
154 | seq_printf(m, "%d prepares\n", work->pending); | ||
155 | |||
156 | if (work->old_fb_obj) { | ||
157 | struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj); | ||
158 | if(obj_priv) | ||
159 | seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); | ||
160 | } | ||
161 | if (work->pending_flip_obj) { | ||
162 | struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj); | ||
163 | if(obj_priv) | ||
164 | seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); | ||
165 | } | ||
166 | } | ||
167 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
168 | } | ||
169 | |||
170 | return 0; | ||
171 | } | ||
172 | |||
124 | static int i915_gem_request_info(struct seq_file *m, void *data) | 173 | static int i915_gem_request_info(struct seq_file *m, void *data) |
125 | { | 174 | { |
126 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 175 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
@@ -777,6 +826,7 @@ static struct drm_info_list i915_debugfs_list[] = { | |||
777 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, | 826 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, |
778 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, | 827 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, |
779 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, | 828 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, |
829 | {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, | ||
780 | {"i915_gem_request", i915_gem_request_info, 0}, | 830 | {"i915_gem_request", i915_gem_request_info, 0}, |
781 | {"i915_gem_seqno", i915_gem_seqno_info, 0}, | 831 | {"i915_gem_seqno", i915_gem_seqno_info, 0}, |
782 | {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, | 832 | {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index a7ec93e62f81..c74e4e8006d4 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -620,8 +620,10 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, | |||
620 | ret = copy_from_user(cliprects, batch->cliprects, | 620 | ret = copy_from_user(cliprects, batch->cliprects, |
621 | batch->num_cliprects * | 621 | batch->num_cliprects * |
622 | sizeof(struct drm_clip_rect)); | 622 | sizeof(struct drm_clip_rect)); |
623 | if (ret != 0) | 623 | if (ret != 0) { |
624 | ret = -EFAULT; | ||
624 | goto fail_free; | 625 | goto fail_free; |
626 | } | ||
625 | } | 627 | } |
626 | 628 | ||
627 | mutex_lock(&dev->struct_mutex); | 629 | mutex_lock(&dev->struct_mutex); |
@@ -662,8 +664,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, | |||
662 | return -ENOMEM; | 664 | return -ENOMEM; |
663 | 665 | ||
664 | ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); | 666 | ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); |
665 | if (ret != 0) | 667 | if (ret != 0) { |
668 | ret = -EFAULT; | ||
666 | goto fail_batch_free; | 669 | goto fail_batch_free; |
670 | } | ||
667 | 671 | ||
668 | if (cmdbuf->num_cliprects) { | 672 | if (cmdbuf->num_cliprects) { |
669 | cliprects = kcalloc(cmdbuf->num_cliprects, | 673 | cliprects = kcalloc(cmdbuf->num_cliprects, |
@@ -676,8 +680,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, | |||
676 | ret = copy_from_user(cliprects, cmdbuf->cliprects, | 680 | ret = copy_from_user(cliprects, cmdbuf->cliprects, |
677 | cmdbuf->num_cliprects * | 681 | cmdbuf->num_cliprects * |
678 | sizeof(struct drm_clip_rect)); | 682 | sizeof(struct drm_clip_rect)); |
679 | if (ret != 0) | 683 | if (ret != 0) { |
684 | ret = -EFAULT; | ||
680 | goto fail_clip_free; | 685 | goto fail_clip_free; |
686 | } | ||
681 | } | 687 | } |
682 | 688 | ||
683 | mutex_lock(&dev->struct_mutex); | 689 | mutex_lock(&dev->struct_mutex); |
@@ -885,7 +891,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev) | |||
885 | int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; | 891 | int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; |
886 | u32 temp_lo, temp_hi = 0; | 892 | u32 temp_lo, temp_hi = 0; |
887 | u64 mchbar_addr; | 893 | u64 mchbar_addr; |
888 | int ret = 0; | 894 | int ret; |
889 | 895 | ||
890 | if (IS_I965G(dev)) | 896 | if (IS_I965G(dev)) |
891 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); | 897 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); |
@@ -895,22 +901,23 @@ intel_alloc_mchbar_resource(struct drm_device *dev) | |||
895 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ | 901 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ |
896 | #ifdef CONFIG_PNP | 902 | #ifdef CONFIG_PNP |
897 | if (mchbar_addr && | 903 | if (mchbar_addr && |
898 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { | 904 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) |
899 | ret = 0; | 905 | return 0; |
900 | goto out; | ||
901 | } | ||
902 | #endif | 906 | #endif |
903 | 907 | ||
904 | /* Get some space for it */ | 908 | /* Get some space for it */ |
905 | ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res, | 909 | dev_priv->mch_res.name = "i915 MCHBAR"; |
910 | dev_priv->mch_res.flags = IORESOURCE_MEM; | ||
911 | ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, | ||
912 | &dev_priv->mch_res, | ||
906 | MCHBAR_SIZE, MCHBAR_SIZE, | 913 | MCHBAR_SIZE, MCHBAR_SIZE, |
907 | PCIBIOS_MIN_MEM, | 914 | PCIBIOS_MIN_MEM, |
908 | 0, pcibios_align_resource, | 915 | 0, pcibios_align_resource, |
909 | dev_priv->bridge_dev); | 916 | dev_priv->bridge_dev); |
910 | if (ret) { | 917 | if (ret) { |
911 | DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); | 918 | DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); |
912 | dev_priv->mch_res.start = 0; | 919 | dev_priv->mch_res.start = 0; |
913 | goto out; | 920 | return ret; |
914 | } | 921 | } |
915 | 922 | ||
916 | if (IS_I965G(dev)) | 923 | if (IS_I965G(dev)) |
@@ -919,8 +926,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev) | |||
919 | 926 | ||
920 | pci_write_config_dword(dev_priv->bridge_dev, reg, | 927 | pci_write_config_dword(dev_priv->bridge_dev, reg, |
921 | lower_32_bits(dev_priv->mch_res.start)); | 928 | lower_32_bits(dev_priv->mch_res.start)); |
922 | out: | 929 | return 0; |
923 | return ret; | ||
924 | } | 930 | } |
925 | 931 | ||
926 | /* Setup MCHBAR if possible, return true if we should disable it again */ | 932 | /* Setup MCHBAR if possible, return true if we should disable it again */ |
@@ -1781,9 +1787,9 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) | |||
1781 | } | 1787 | } |
1782 | } | 1788 | } |
1783 | 1789 | ||
1784 | div_u64(diff, diff1); | 1790 | diff = div_u64(diff, diff1); |
1785 | ret = ((m * diff) + c); | 1791 | ret = ((m * diff) + c); |
1786 | div_u64(ret, 10); | 1792 | ret = div_u64(ret, 10); |
1787 | 1793 | ||
1788 | dev_priv->last_count1 = total_count; | 1794 | dev_priv->last_count1 = total_count; |
1789 | dev_priv->last_time1 = now; | 1795 | dev_priv->last_time1 = now; |
@@ -1852,7 +1858,7 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv) | |||
1852 | 1858 | ||
1853 | /* More magic constants... */ | 1859 | /* More magic constants... */ |
1854 | diff = diff * 1181; | 1860 | diff = diff * 1181; |
1855 | div_u64(diff, diffms * 10); | 1861 | diff = div_u64(diff, diffms * 10); |
1856 | dev_priv->gfx_power = diff; | 1862 | dev_priv->gfx_power = diff; |
1857 | } | 1863 | } |
1858 | 1864 | ||
@@ -2082,6 +2088,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2082 | goto free_priv; | 2088 | goto free_priv; |
2083 | } | 2089 | } |
2084 | 2090 | ||
2091 | /* overlay on gen2 is broken and can't address above 1G */ | ||
2092 | if (IS_GEN2(dev)) | ||
2093 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); | ||
2094 | |||
2085 | dev_priv->regs = ioremap(base, size); | 2095 | dev_priv->regs = ioremap(base, size); |
2086 | if (!dev_priv->regs) { | 2096 | if (!dev_priv->regs) { |
2087 | DRM_ERROR("failed to map registers\n"); | 2097 | DRM_ERROR("failed to map registers\n"); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 00befce8fbb7..6dbe14cc4f74 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -61,91 +61,86 @@ extern int intel_agp_enabled; | |||
61 | .driver_data = (unsigned long) info } | 61 | .driver_data = (unsigned long) info } |
62 | 62 | ||
63 | static const struct intel_device_info intel_i830_info = { | 63 | static const struct intel_device_info intel_i830_info = { |
64 | .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, | 64 | .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, |
65 | }; | 65 | }; |
66 | 66 | ||
67 | static const struct intel_device_info intel_845g_info = { | 67 | static const struct intel_device_info intel_845g_info = { |
68 | .is_i8xx = 1, | 68 | .gen = 2, .is_i8xx = 1, |
69 | }; | 69 | }; |
70 | 70 | ||
71 | static const struct intel_device_info intel_i85x_info = { | 71 | static const struct intel_device_info intel_i85x_info = { |
72 | .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, | 72 | .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, |
73 | .cursor_needs_physical = 1, | 73 | .cursor_needs_physical = 1, |
74 | }; | 74 | }; |
75 | 75 | ||
76 | static const struct intel_device_info intel_i865g_info = { | 76 | static const struct intel_device_info intel_i865g_info = { |
77 | .is_i8xx = 1, | 77 | .gen = 2, .is_i8xx = 1, |
78 | }; | 78 | }; |
79 | 79 | ||
80 | static const struct intel_device_info intel_i915g_info = { | 80 | static const struct intel_device_info intel_i915g_info = { |
81 | .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, | 81 | .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, |
82 | }; | 82 | }; |
83 | static const struct intel_device_info intel_i915gm_info = { | 83 | static const struct intel_device_info intel_i915gm_info = { |
84 | .is_i9xx = 1, .is_mobile = 1, | 84 | .gen = 3, .is_i9xx = 1, .is_mobile = 1, |
85 | .cursor_needs_physical = 1, | 85 | .cursor_needs_physical = 1, |
86 | }; | 86 | }; |
87 | static const struct intel_device_info intel_i945g_info = { | 87 | static const struct intel_device_info intel_i945g_info = { |
88 | .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, | 88 | .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, |
89 | }; | 89 | }; |
90 | static const struct intel_device_info intel_i945gm_info = { | 90 | static const struct intel_device_info intel_i945gm_info = { |
91 | .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, | 91 | .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, |
92 | .has_hotplug = 1, .cursor_needs_physical = 1, | 92 | .has_hotplug = 1, .cursor_needs_physical = 1, |
93 | }; | 93 | }; |
94 | 94 | ||
95 | static const struct intel_device_info intel_i965g_info = { | 95 | static const struct intel_device_info intel_i965g_info = { |
96 | .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, | 96 | .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, |
97 | .has_hotplug = 1, | ||
97 | }; | 98 | }; |
98 | 99 | ||
99 | static const struct intel_device_info intel_i965gm_info = { | 100 | static const struct intel_device_info intel_i965gm_info = { |
100 | .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1, | 101 | .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1, |
101 | .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, | 102 | .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, |
102 | .has_hotplug = 1, | ||
103 | }; | 103 | }; |
104 | 104 | ||
105 | static const struct intel_device_info intel_g33_info = { | 105 | static const struct intel_device_info intel_g33_info = { |
106 | .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1, | 106 | .gen = 3, .is_g33 = 1, .is_i9xx = 1, |
107 | .has_hotplug = 1, | 107 | .need_gfx_hws = 1, .has_hotplug = 1, |
108 | }; | 108 | }; |
109 | 109 | ||
110 | static const struct intel_device_info intel_g45_info = { | 110 | static const struct intel_device_info intel_g45_info = { |
111 | .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, | 111 | .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, |
112 | .has_pipe_cxsr = 1, | 112 | .has_pipe_cxsr = 1, .has_hotplug = 1, |
113 | .has_hotplug = 1, | ||
114 | }; | 113 | }; |
115 | 114 | ||
116 | static const struct intel_device_info intel_gm45_info = { | 115 | static const struct intel_device_info intel_gm45_info = { |
117 | .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, | 116 | .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, |
118 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, | 117 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, |
119 | .has_pipe_cxsr = 1, | 118 | .has_pipe_cxsr = 1, .has_hotplug = 1, |
120 | .has_hotplug = 1, | ||
121 | }; | 119 | }; |
122 | 120 | ||
123 | static const struct intel_device_info intel_pineview_info = { | 121 | static const struct intel_device_info intel_pineview_info = { |
124 | .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, | 122 | .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, |
125 | .need_gfx_hws = 1, | 123 | .need_gfx_hws = 1, .has_hotplug = 1, |
126 | .has_hotplug = 1, | ||
127 | }; | 124 | }; |
128 | 125 | ||
129 | static const struct intel_device_info intel_ironlake_d_info = { | 126 | static const struct intel_device_info intel_ironlake_d_info = { |
130 | .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, | 127 | .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, |
131 | .has_pipe_cxsr = 1, | 128 | .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, |
132 | .has_hotplug = 1, | ||
133 | }; | 129 | }; |
134 | 130 | ||
135 | static const struct intel_device_info intel_ironlake_m_info = { | 131 | static const struct intel_device_info intel_ironlake_m_info = { |
136 | .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, | 132 | .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, |
137 | .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, | 133 | .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, |
138 | .has_hotplug = 1, | ||
139 | }; | 134 | }; |
140 | 135 | ||
141 | static const struct intel_device_info intel_sandybridge_d_info = { | 136 | static const struct intel_device_info intel_sandybridge_d_info = { |
142 | .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, | 137 | .gen = 6, .is_i965g = 1, .is_i9xx = 1, |
143 | .has_hotplug = 1, .is_gen6 = 1, | 138 | .need_gfx_hws = 1, .has_hotplug = 1, |
144 | }; | 139 | }; |
145 | 140 | ||
146 | static const struct intel_device_info intel_sandybridge_m_info = { | 141 | static const struct intel_device_info intel_sandybridge_m_info = { |
147 | .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1, | 142 | .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, |
148 | .has_hotplug = 1, .is_gen6 = 1, | 143 | .need_gfx_hws = 1, .has_hotplug = 1, |
149 | }; | 144 | }; |
150 | 145 | ||
151 | static const struct pci_device_id pciidlist[] = { /* aka */ | 146 | static const struct pci_device_id pciidlist[] = { /* aka */ |
@@ -175,13 +170,18 @@ static const struct pci_device_id pciidlist[] = { /* aka */ | |||
175 | INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ | 170 | INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ |
176 | INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ | 171 | INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ |
177 | INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ | 172 | INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ |
173 | INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */ | ||
178 | INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), | 174 | INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), |
179 | INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), | 175 | INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), |
180 | INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), | 176 | INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), |
181 | INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), | 177 | INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), |
182 | INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), | 178 | INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), |
179 | INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info), | ||
180 | INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info), | ||
183 | INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), | 181 | INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), |
182 | INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), | ||
184 | INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), | 183 | INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), |
184 | INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), | ||
185 | {0, 0, 0} | 185 | {0, 0, 0} |
186 | }; | 186 | }; |
187 | 187 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 047cd7ce7e1b..af4a263cf257 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -191,6 +191,7 @@ struct drm_i915_display_funcs { | |||
191 | }; | 191 | }; |
192 | 192 | ||
193 | struct intel_device_info { | 193 | struct intel_device_info { |
194 | u8 gen; | ||
194 | u8 is_mobile : 1; | 195 | u8 is_mobile : 1; |
195 | u8 is_i8xx : 1; | 196 | u8 is_i8xx : 1; |
196 | u8 is_i85x : 1; | 197 | u8 is_i85x : 1; |
@@ -206,7 +207,6 @@ struct intel_device_info { | |||
206 | u8 is_broadwater : 1; | 207 | u8 is_broadwater : 1; |
207 | u8 is_crestline : 1; | 208 | u8 is_crestline : 1; |
208 | u8 is_ironlake : 1; | 209 | u8 is_ironlake : 1; |
209 | u8 is_gen6 : 1; | ||
210 | u8 has_fbc : 1; | 210 | u8 has_fbc : 1; |
211 | u8 has_rc6 : 1; | 211 | u8 has_rc6 : 1; |
212 | u8 has_pipe_cxsr : 1; | 212 | u8 has_pipe_cxsr : 1; |
@@ -1162,7 +1162,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove | |||
1162 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) | 1162 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) |
1163 | #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) | 1163 | #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) |
1164 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) | 1164 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) |
1165 | #define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx) | ||
1166 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) | 1165 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) |
1167 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) | 1166 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) |
1168 | #define IS_I945G(dev) ((dev)->pci_device == 0x2772) | 1167 | #define IS_I945G(dev) ((dev)->pci_device == 0x2772) |
@@ -1181,27 +1180,13 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove | |||
1181 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) | 1180 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) |
1182 | #define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) | 1181 | #define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) |
1183 | #define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) | 1182 | #define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) |
1184 | #define IS_GEN6(dev) (INTEL_INFO(dev)->is_gen6) | ||
1185 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) | 1183 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) |
1186 | 1184 | ||
1187 | #define IS_GEN3(dev) (IS_I915G(dev) || \ | 1185 | #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) |
1188 | IS_I915GM(dev) || \ | 1186 | #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) |
1189 | IS_I945G(dev) || \ | 1187 | #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) |
1190 | IS_I945GM(dev) || \ | 1188 | #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) |
1191 | IS_G33(dev) || \ | 1189 | #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) |
1192 | IS_PINEVIEW(dev)) | ||
1193 | #define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \ | ||
1194 | (dev)->pci_device == 0x2982 || \ | ||
1195 | (dev)->pci_device == 0x2992 || \ | ||
1196 | (dev)->pci_device == 0x29A2 || \ | ||
1197 | (dev)->pci_device == 0x2A02 || \ | ||
1198 | (dev)->pci_device == 0x2A12 || \ | ||
1199 | (dev)->pci_device == 0x2E02 || \ | ||
1200 | (dev)->pci_device == 0x2E12 || \ | ||
1201 | (dev)->pci_device == 0x2E22 || \ | ||
1202 | (dev)->pci_device == 0x2E32 || \ | ||
1203 | (dev)->pci_device == 0x2A42 || \ | ||
1204 | (dev)->pci_device == 0x2E42) | ||
1205 | 1190 | ||
1206 | #define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev)) | 1191 | #define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev)) |
1207 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) | 1192 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index df5a7135c261..90b1d6753b9d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | #include <linux/swap.h> | 35 | #include <linux/swap.h> |
36 | #include <linux/pci.h> | 36 | #include <linux/pci.h> |
37 | #include <linux/intel-gtt.h> | ||
37 | 38 | ||
38 | static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); | 39 | static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); |
39 | static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); | 40 | static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); |
@@ -135,12 +136,13 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, | |||
135 | return -ENOMEM; | 136 | return -ENOMEM; |
136 | 137 | ||
137 | ret = drm_gem_handle_create(file_priv, obj, &handle); | 138 | ret = drm_gem_handle_create(file_priv, obj, &handle); |
139 | /* drop reference from allocate - handle holds it now */ | ||
138 | drm_gem_object_unreference_unlocked(obj); | 140 | drm_gem_object_unreference_unlocked(obj); |
139 | if (ret) | 141 | if (ret) { |
140 | return ret; | 142 | return ret; |
143 | } | ||
141 | 144 | ||
142 | args->handle = handle; | 145 | args->handle = handle; |
143 | |||
144 | return 0; | 146 | return 0; |
145 | } | 147 | } |
146 | 148 | ||
@@ -467,14 +469,17 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
467 | return -ENOENT; | 469 | return -ENOENT; |
468 | obj_priv = to_intel_bo(obj); | 470 | obj_priv = to_intel_bo(obj); |
469 | 471 | ||
470 | /* Bounds check source. | 472 | /* Bounds check source. */ |
471 | * | 473 | if (args->offset > obj->size || args->size > obj->size - args->offset) { |
472 | * XXX: This could use review for overflow issues... | 474 | ret = -EINVAL; |
473 | */ | 475 | goto err; |
474 | if (args->offset > obj->size || args->size > obj->size || | 476 | } |
475 | args->offset + args->size > obj->size) { | 477 | |
476 | drm_gem_object_unreference_unlocked(obj); | 478 | if (!access_ok(VERIFY_WRITE, |
477 | return -EINVAL; | 479 | (char __user *)(uintptr_t)args->data_ptr, |
480 | args->size)) { | ||
481 | ret = -EFAULT; | ||
482 | goto err; | ||
478 | } | 483 | } |
479 | 484 | ||
480 | if (i915_gem_object_needs_bit17_swizzle(obj)) { | 485 | if (i915_gem_object_needs_bit17_swizzle(obj)) { |
@@ -486,8 +491,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
486 | file_priv); | 491 | file_priv); |
487 | } | 492 | } |
488 | 493 | ||
494 | err: | ||
489 | drm_gem_object_unreference_unlocked(obj); | 495 | drm_gem_object_unreference_unlocked(obj); |
490 | |||
491 | return ret; | 496 | return ret; |
492 | } | 497 | } |
493 | 498 | ||
@@ -576,8 +581,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
576 | 581 | ||
577 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 582 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
578 | remain = args->size; | 583 | remain = args->size; |
579 | if (!access_ok(VERIFY_READ, user_data, remain)) | ||
580 | return -EFAULT; | ||
581 | 584 | ||
582 | 585 | ||
583 | mutex_lock(&dev->struct_mutex); | 586 | mutex_lock(&dev->struct_mutex); |
@@ -930,14 +933,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
930 | return -ENOENT; | 933 | return -ENOENT; |
931 | obj_priv = to_intel_bo(obj); | 934 | obj_priv = to_intel_bo(obj); |
932 | 935 | ||
933 | /* Bounds check destination. | 936 | /* Bounds check destination. */ |
934 | * | 937 | if (args->offset > obj->size || args->size > obj->size - args->offset) { |
935 | * XXX: This could use review for overflow issues... | 938 | ret = -EINVAL; |
936 | */ | 939 | goto err; |
937 | if (args->offset > obj->size || args->size > obj->size || | 940 | } |
938 | args->offset + args->size > obj->size) { | 941 | |
939 | drm_gem_object_unreference_unlocked(obj); | 942 | if (!access_ok(VERIFY_READ, |
940 | return -EINVAL; | 943 | (char __user *)(uintptr_t)args->data_ptr, |
944 | args->size)) { | ||
945 | ret = -EFAULT; | ||
946 | goto err; | ||
941 | } | 947 | } |
942 | 948 | ||
943 | /* We can only do the GTT pwrite on untiled buffers, as otherwise | 949 | /* We can only do the GTT pwrite on untiled buffers, as otherwise |
@@ -971,8 +977,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
971 | DRM_INFO("pwrite failed %d\n", ret); | 977 | DRM_INFO("pwrite failed %d\n", ret); |
972 | #endif | 978 | #endif |
973 | 979 | ||
980 | err: | ||
974 | drm_gem_object_unreference_unlocked(obj); | 981 | drm_gem_object_unreference_unlocked(obj); |
975 | |||
976 | return ret; | 982 | return ret; |
977 | } | 983 | } |
978 | 984 | ||
@@ -2347,14 +2353,21 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) | |||
2347 | 2353 | ||
2348 | reg->obj = obj; | 2354 | reg->obj = obj; |
2349 | 2355 | ||
2350 | if (IS_GEN6(dev)) | 2356 | switch (INTEL_INFO(dev)->gen) { |
2357 | case 6: | ||
2351 | sandybridge_write_fence_reg(reg); | 2358 | sandybridge_write_fence_reg(reg); |
2352 | else if (IS_I965G(dev)) | 2359 | break; |
2360 | case 5: | ||
2361 | case 4: | ||
2353 | i965_write_fence_reg(reg); | 2362 | i965_write_fence_reg(reg); |
2354 | else if (IS_I9XX(dev)) | 2363 | break; |
2364 | case 3: | ||
2355 | i915_write_fence_reg(reg); | 2365 | i915_write_fence_reg(reg); |
2356 | else | 2366 | break; |
2367 | case 2: | ||
2357 | i830_write_fence_reg(reg); | 2368 | i830_write_fence_reg(reg); |
2369 | break; | ||
2370 | } | ||
2358 | 2371 | ||
2359 | trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg, | 2372 | trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg, |
2360 | obj_priv->tiling_mode); | 2373 | obj_priv->tiling_mode); |
@@ -2377,22 +2390,26 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) | |||
2377 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2390 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2378 | struct drm_i915_fence_reg *reg = | 2391 | struct drm_i915_fence_reg *reg = |
2379 | &dev_priv->fence_regs[obj_priv->fence_reg]; | 2392 | &dev_priv->fence_regs[obj_priv->fence_reg]; |
2393 | uint32_t fence_reg; | ||
2380 | 2394 | ||
2381 | if (IS_GEN6(dev)) { | 2395 | switch (INTEL_INFO(dev)->gen) { |
2396 | case 6: | ||
2382 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + | 2397 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + |
2383 | (obj_priv->fence_reg * 8), 0); | 2398 | (obj_priv->fence_reg * 8), 0); |
2384 | } else if (IS_I965G(dev)) { | 2399 | break; |
2400 | case 5: | ||
2401 | case 4: | ||
2385 | I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); | 2402 | I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); |
2386 | } else { | 2403 | break; |
2387 | uint32_t fence_reg; | 2404 | case 3: |
2388 | 2405 | if (obj_priv->fence_reg >= 8) | |
2389 | if (obj_priv->fence_reg < 8) | 2406 | fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4; |
2390 | fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4; | ||
2391 | else | 2407 | else |
2392 | fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - | 2408 | case 2: |
2393 | 8) * 4; | 2409 | fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4; |
2394 | 2410 | ||
2395 | I915_WRITE(fence_reg, 0); | 2411 | I915_WRITE(fence_reg, 0); |
2412 | break; | ||
2396 | } | 2413 | } |
2397 | 2414 | ||
2398 | reg->obj = NULL; | 2415 | reg->obj = NULL; |
@@ -3243,6 +3260,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3243 | (int) reloc->offset, | 3260 | (int) reloc->offset, |
3244 | reloc->read_domains, | 3261 | reloc->read_domains, |
3245 | reloc->write_domain); | 3262 | reloc->write_domain); |
3263 | drm_gem_object_unreference(target_obj); | ||
3264 | i915_gem_object_unpin(obj); | ||
3246 | return -EINVAL; | 3265 | return -EINVAL; |
3247 | } | 3266 | } |
3248 | if (reloc->write_domain & I915_GEM_DOMAIN_CPU || | 3267 | if (reloc->write_domain & I915_GEM_DOMAIN_CPU || |
@@ -3585,6 +3604,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3585 | if (ret != 0) { | 3604 | if (ret != 0) { |
3586 | DRM_ERROR("copy %d cliprects failed: %d\n", | 3605 | DRM_ERROR("copy %d cliprects failed: %d\n", |
3587 | args->num_cliprects, ret); | 3606 | args->num_cliprects, ret); |
3607 | ret = -EFAULT; | ||
3588 | goto pre_mutex_err; | 3608 | goto pre_mutex_err; |
3589 | } | 3609 | } |
3590 | } | 3610 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 72cae3cccad8..5c428fa3e0b3 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -79,6 +79,7 @@ mark_free(struct drm_i915_gem_object *obj_priv, | |||
79 | struct list_head *unwind) | 79 | struct list_head *unwind) |
80 | { | 80 | { |
81 | list_add(&obj_priv->evict_list, unwind); | 81 | list_add(&obj_priv->evict_list, unwind); |
82 | drm_gem_object_reference(&obj_priv->base); | ||
82 | return drm_mm_scan_add_block(obj_priv->gtt_space); | 83 | return drm_mm_scan_add_block(obj_priv->gtt_space); |
83 | } | 84 | } |
84 | 85 | ||
@@ -92,7 +93,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen | |||
92 | { | 93 | { |
93 | drm_i915_private_t *dev_priv = dev->dev_private; | 94 | drm_i915_private_t *dev_priv = dev->dev_private; |
94 | struct list_head eviction_list, unwind_list; | 95 | struct list_head eviction_list, unwind_list; |
95 | struct drm_i915_gem_object *obj_priv, *tmp_obj_priv; | 96 | struct drm_i915_gem_object *obj_priv; |
96 | struct list_head *render_iter, *bsd_iter; | 97 | struct list_head *render_iter, *bsd_iter; |
97 | int ret = 0; | 98 | int ret = 0; |
98 | 99 | ||
@@ -165,6 +166,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen | |||
165 | list_for_each_entry(obj_priv, &unwind_list, evict_list) { | 166 | list_for_each_entry(obj_priv, &unwind_list, evict_list) { |
166 | ret = drm_mm_scan_remove_block(obj_priv->gtt_space); | 167 | ret = drm_mm_scan_remove_block(obj_priv->gtt_space); |
167 | BUG_ON(ret); | 168 | BUG_ON(ret); |
169 | drm_gem_object_unreference(&obj_priv->base); | ||
168 | } | 170 | } |
169 | 171 | ||
170 | /* We expect the caller to unpin, evict all and try again, or give up. | 172 | /* We expect the caller to unpin, evict all and try again, or give up. |
@@ -173,36 +175,34 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen | |||
173 | return -ENOSPC; | 175 | return -ENOSPC; |
174 | 176 | ||
175 | found: | 177 | found: |
178 | /* drm_mm doesn't allow any other other operations while | ||
179 | * scanning, therefore store to be evicted objects on a | ||
180 | * temporary list. */ | ||
176 | INIT_LIST_HEAD(&eviction_list); | 181 | INIT_LIST_HEAD(&eviction_list); |
177 | list_for_each_entry_safe(obj_priv, tmp_obj_priv, | 182 | while (!list_empty(&unwind_list)) { |
178 | &unwind_list, evict_list) { | 183 | obj_priv = list_first_entry(&unwind_list, |
184 | struct drm_i915_gem_object, | ||
185 | evict_list); | ||
179 | if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { | 186 | if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { |
180 | /* drm_mm doesn't allow any other other operations while | ||
181 | * scanning, therefore store to be evicted objects on a | ||
182 | * temporary list. */ | ||
183 | list_move(&obj_priv->evict_list, &eviction_list); | 187 | list_move(&obj_priv->evict_list, &eviction_list); |
188 | continue; | ||
184 | } | 189 | } |
190 | list_del(&obj_priv->evict_list); | ||
191 | drm_gem_object_unreference(&obj_priv->base); | ||
185 | } | 192 | } |
186 | 193 | ||
187 | /* Unbinding will emit any required flushes */ | 194 | /* Unbinding will emit any required flushes */ |
188 | list_for_each_entry_safe(obj_priv, tmp_obj_priv, | 195 | while (!list_empty(&eviction_list)) { |
189 | &eviction_list, evict_list) { | 196 | obj_priv = list_first_entry(&eviction_list, |
190 | #if WATCH_LRU | 197 | struct drm_i915_gem_object, |
191 | DRM_INFO("%s: evicting %p\n", __func__, obj); | 198 | evict_list); |
192 | #endif | 199 | if (ret == 0) |
193 | ret = i915_gem_object_unbind(&obj_priv->base); | 200 | ret = i915_gem_object_unbind(&obj_priv->base); |
194 | if (ret) | 201 | list_del(&obj_priv->evict_list); |
195 | return ret; | 202 | drm_gem_object_unreference(&obj_priv->base); |
196 | } | 203 | } |
197 | 204 | ||
198 | /* The just created free hole should be on the top of the free stack | 205 | return ret; |
199 | * maintained by drm_mm, so this BUG_ON actually executes in O(1). | ||
200 | * Furthermore all accessed data has just recently been used, so it | ||
201 | * should be really fast, too. */ | ||
202 | BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size, | ||
203 | alignment, 0)); | ||
204 | |||
205 | return 0; | ||
206 | } | 206 | } |
207 | 207 | ||
208 | int | 208 | int |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 16861b800fee..744225ebb4b2 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -887,6 +887,49 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) | |||
887 | queue_work(dev_priv->wq, &dev_priv->error_work); | 887 | queue_work(dev_priv->wq, &dev_priv->error_work); |
888 | } | 888 | } |
889 | 889 | ||
890 | static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) | ||
891 | { | ||
892 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
893 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | ||
894 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
895 | struct drm_i915_gem_object *obj_priv; | ||
896 | struct intel_unpin_work *work; | ||
897 | unsigned long flags; | ||
898 | bool stall_detected; | ||
899 | |||
900 | /* Ignore early vblank irqs */ | ||
901 | if (intel_crtc == NULL) | ||
902 | return; | ||
903 | |||
904 | spin_lock_irqsave(&dev->event_lock, flags); | ||
905 | work = intel_crtc->unpin_work; | ||
906 | |||
907 | if (work == NULL || work->pending || !work->enable_stall_check) { | ||
908 | /* Either the pending flip IRQ arrived, or we're too early. Don't check */ | ||
909 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
910 | return; | ||
911 | } | ||
912 | |||
913 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ | ||
914 | obj_priv = to_intel_bo(work->pending_flip_obj); | ||
915 | if(IS_I965G(dev)) { | ||
916 | int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; | ||
917 | stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset; | ||
918 | } else { | ||
919 | int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR; | ||
920 | stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset + | ||
921 | crtc->y * crtc->fb->pitch + | ||
922 | crtc->x * crtc->fb->bits_per_pixel/8); | ||
923 | } | ||
924 | |||
925 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
926 | |||
927 | if (stall_detected) { | ||
928 | DRM_DEBUG_DRIVER("Pageflip stall detected\n"); | ||
929 | intel_prepare_page_flip(dev, intel_crtc->plane); | ||
930 | } | ||
931 | } | ||
932 | |||
890 | irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | 933 | irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) |
891 | { | 934 | { |
892 | struct drm_device *dev = (struct drm_device *) arg; | 935 | struct drm_device *dev = (struct drm_device *) arg; |
@@ -1004,15 +1047,19 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
1004 | if (pipea_stats & vblank_status) { | 1047 | if (pipea_stats & vblank_status) { |
1005 | vblank++; | 1048 | vblank++; |
1006 | drm_handle_vblank(dev, 0); | 1049 | drm_handle_vblank(dev, 0); |
1007 | if (!dev_priv->flip_pending_is_done) | 1050 | if (!dev_priv->flip_pending_is_done) { |
1051 | i915_pageflip_stall_check(dev, 0); | ||
1008 | intel_finish_page_flip(dev, 0); | 1052 | intel_finish_page_flip(dev, 0); |
1053 | } | ||
1009 | } | 1054 | } |
1010 | 1055 | ||
1011 | if (pipeb_stats & vblank_status) { | 1056 | if (pipeb_stats & vblank_status) { |
1012 | vblank++; | 1057 | vblank++; |
1013 | drm_handle_vblank(dev, 1); | 1058 | drm_handle_vblank(dev, 1); |
1014 | if (!dev_priv->flip_pending_is_done) | 1059 | if (!dev_priv->flip_pending_is_done) { |
1060 | i915_pageflip_stall_check(dev, 1); | ||
1015 | intel_finish_page_flip(dev, 1); | 1061 | intel_finish_page_flip(dev, 1); |
1062 | } | ||
1016 | } | 1063 | } |
1017 | 1064 | ||
1018 | if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || | 1065 | if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || |
@@ -1303,17 +1350,25 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1303 | i915_seqno_passed(i915_get_gem_seqno(dev, | 1350 | i915_seqno_passed(i915_get_gem_seqno(dev, |
1304 | &dev_priv->render_ring), | 1351 | &dev_priv->render_ring), |
1305 | i915_get_tail_request(dev)->seqno)) { | 1352 | i915_get_tail_request(dev)->seqno)) { |
1353 | bool missed_wakeup = false; | ||
1354 | |||
1306 | dev_priv->hangcheck_count = 0; | 1355 | dev_priv->hangcheck_count = 0; |
1307 | 1356 | ||
1308 | /* Issue a wake-up to catch stuck h/w. */ | 1357 | /* Issue a wake-up to catch stuck h/w. */ |
1309 | if (dev_priv->render_ring.waiting_gem_seqno | | 1358 | if (dev_priv->render_ring.waiting_gem_seqno && |
1310 | dev_priv->bsd_ring.waiting_gem_seqno) { | 1359 | waitqueue_active(&dev_priv->render_ring.irq_queue)) { |
1311 | DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); | 1360 | DRM_WAKEUP(&dev_priv->render_ring.irq_queue); |
1312 | if (dev_priv->render_ring.waiting_gem_seqno) | 1361 | missed_wakeup = true; |
1313 | DRM_WAKEUP(&dev_priv->render_ring.irq_queue); | ||
1314 | if (dev_priv->bsd_ring.waiting_gem_seqno) | ||
1315 | DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); | ||
1316 | } | 1362 | } |
1363 | |||
1364 | if (dev_priv->bsd_ring.waiting_gem_seqno && | ||
1365 | waitqueue_active(&dev_priv->bsd_ring.irq_queue)) { | ||
1366 | DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); | ||
1367 | missed_wakeup = true; | ||
1368 | } | ||
1369 | |||
1370 | if (missed_wakeup) | ||
1371 | DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); | ||
1317 | return; | 1372 | return; |
1318 | } | 1373 | } |
1319 | 1374 | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 67e3ec1a6af9..4f5e15577e89 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -319,6 +319,7 @@ | |||
319 | 319 | ||
320 | #define MI_MODE 0x0209c | 320 | #define MI_MODE 0x0209c |
321 | # define VS_TIMER_DISPATCH (1 << 6) | 321 | # define VS_TIMER_DISPATCH (1 << 6) |
322 | # define MI_FLUSH_ENABLE (1 << 11) | ||
322 | 323 | ||
323 | #define SCPD0 0x0209c /* 915+ only */ | 324 | #define SCPD0 0x0209c /* 915+ only */ |
324 | #define IER 0x020a0 | 325 | #define IER 0x020a0 |
@@ -2205,9 +2206,17 @@ | |||
2205 | #define WM1_LP_SR_EN (1<<31) | 2206 | #define WM1_LP_SR_EN (1<<31) |
2206 | #define WM1_LP_LATENCY_SHIFT 24 | 2207 | #define WM1_LP_LATENCY_SHIFT 24 |
2207 | #define WM1_LP_LATENCY_MASK (0x7f<<24) | 2208 | #define WM1_LP_LATENCY_MASK (0x7f<<24) |
2209 | #define WM1_LP_FBC_LP1_MASK (0xf<<20) | ||
2210 | #define WM1_LP_FBC_LP1_SHIFT 20 | ||
2208 | #define WM1_LP_SR_MASK (0x1ff<<8) | 2211 | #define WM1_LP_SR_MASK (0x1ff<<8) |
2209 | #define WM1_LP_SR_SHIFT 8 | 2212 | #define WM1_LP_SR_SHIFT 8 |
2210 | #define WM1_LP_CURSOR_MASK (0x3f) | 2213 | #define WM1_LP_CURSOR_MASK (0x3f) |
2214 | #define WM2_LP_ILK 0x4510c | ||
2215 | #define WM2_LP_EN (1<<31) | ||
2216 | #define WM3_LP_ILK 0x45110 | ||
2217 | #define WM3_LP_EN (1<<31) | ||
2218 | #define WM1S_LP_ILK 0x45120 | ||
2219 | #define WM1S_LP_EN (1<<31) | ||
2211 | 2220 | ||
2212 | /* Memory latency timer register */ | 2221 | /* Memory latency timer register */ |
2213 | #define MLTR_ILK 0x11222 | 2222 | #define MLTR_ILK 0x11222 |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 2c6b98f2440e..31f08581e93a 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -789,16 +789,25 @@ int i915_save_state(struct drm_device *dev) | |||
789 | dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); | 789 | dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); |
790 | 790 | ||
791 | /* Fences */ | 791 | /* Fences */ |
792 | if (IS_I965G(dev)) { | 792 | switch (INTEL_INFO(dev)->gen) { |
793 | case 6: | ||
794 | for (i = 0; i < 16; i++) | ||
795 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); | ||
796 | break; | ||
797 | case 5: | ||
798 | case 4: | ||
793 | for (i = 0; i < 16; i++) | 799 | for (i = 0; i < 16; i++) |
794 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); | 800 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); |
795 | } else { | 801 | break; |
796 | for (i = 0; i < 8; i++) | 802 | case 3: |
797 | dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | ||
798 | |||
799 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | 803 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
800 | for (i = 0; i < 8; i++) | 804 | for (i = 0; i < 8; i++) |
801 | dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); | 805 | dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); |
806 | case 2: | ||
807 | for (i = 0; i < 8; i++) | ||
808 | dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | ||
809 | break; | ||
810 | |||
802 | } | 811 | } |
803 | 812 | ||
804 | return 0; | 813 | return 0; |
@@ -815,15 +824,24 @@ int i915_restore_state(struct drm_device *dev) | |||
815 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); | 824 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); |
816 | 825 | ||
817 | /* Fences */ | 826 | /* Fences */ |
818 | if (IS_I965G(dev)) { | 827 | switch (INTEL_INFO(dev)->gen) { |
828 | case 6: | ||
829 | for (i = 0; i < 16; i++) | ||
830 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); | ||
831 | break; | ||
832 | case 5: | ||
833 | case 4: | ||
819 | for (i = 0; i < 16; i++) | 834 | for (i = 0; i < 16; i++) |
820 | I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); | 835 | I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); |
821 | } else { | 836 | break; |
822 | for (i = 0; i < 8; i++) | 837 | case 3: |
823 | I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); | 838 | case 2: |
824 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | 839 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
825 | for (i = 0; i < 8; i++) | 840 | for (i = 0; i < 8; i++) |
826 | I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); | 841 | I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); |
842 | for (i = 0; i < 8; i++) | ||
843 | I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); | ||
844 | break; | ||
827 | } | 845 | } |
828 | 846 | ||
829 | i915_restore_display(dev); | 847 | i915_restore_display(dev); |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 4b7735196cd5..197d4f32585a 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -188,7 +188,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) | |||
188 | 188 | ||
189 | if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, | 189 | if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, |
190 | 1000, 1)) | 190 | 1000, 1)) |
191 | DRM_ERROR("timed out waiting for FORCE_TRIGGER"); | 191 | DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); |
192 | 192 | ||
193 | if (turn_off_dac) { | 193 | if (turn_off_dac) { |
194 | I915_WRITE(PCH_ADPA, temp); | 194 | I915_WRITE(PCH_ADPA, temp); |
@@ -245,7 +245,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) | |||
245 | if (wait_for((I915_READ(PORT_HOTPLUG_EN) & | 245 | if (wait_for((I915_READ(PORT_HOTPLUG_EN) & |
246 | CRT_HOTPLUG_FORCE_DETECT) == 0, | 246 | CRT_HOTPLUG_FORCE_DETECT) == 0, |
247 | 1000, 1)) | 247 | 1000, 1)) |
248 | DRM_ERROR("timed out waiting for FORCE_DETECT to go off"); | 248 | DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off"); |
249 | } | 249 | } |
250 | 250 | ||
251 | stat = I915_READ(PORT_HOTPLUG_STAT); | 251 | stat = I915_READ(PORT_HOTPLUG_STAT); |
@@ -400,7 +400,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder | |||
400 | return status; | 400 | return status; |
401 | } | 401 | } |
402 | 402 | ||
403 | static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) | 403 | static enum drm_connector_status |
404 | intel_crt_detect(struct drm_connector *connector, bool force) | ||
404 | { | 405 | { |
405 | struct drm_device *dev = connector->dev; | 406 | struct drm_device *dev = connector->dev; |
406 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 407 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
@@ -419,6 +420,9 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto | |||
419 | if (intel_crt_detect_ddc(encoder)) | 420 | if (intel_crt_detect_ddc(encoder)) |
420 | return connector_status_connected; | 421 | return connector_status_connected; |
421 | 422 | ||
423 | if (!force) | ||
424 | return connector->status; | ||
425 | |||
422 | /* for pre-945g platforms use load detect */ | 426 | /* for pre-945g platforms use load detect */ |
423 | if (encoder->crtc && encoder->crtc->enabled) { | 427 | if (encoder->crtc && encoder->crtc->enabled) { |
424 | status = intel_crt_load_detect(encoder->crtc, intel_encoder); | 428 | status = intel_crt_load_detect(encoder->crtc, intel_encoder); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 11a3394f5fe1..979228594599 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -990,6 +990,22 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) | |||
990 | struct drm_i915_private *dev_priv = dev->dev_private; | 990 | struct drm_i915_private *dev_priv = dev->dev_private; |
991 | int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT); | 991 | int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT); |
992 | 992 | ||
993 | /* Clear existing vblank status. Note this will clear any other | ||
994 | * sticky status fields as well. | ||
995 | * | ||
996 | * This races with i915_driver_irq_handler() with the result | ||
997 | * that either function could miss a vblank event. Here it is not | ||
998 | * fatal, as we will either wait upon the next vblank interrupt or | ||
999 | * timeout. Generally speaking intel_wait_for_vblank() is only | ||
1000 | * called during modeset at which time the GPU should be idle and | ||
1001 | * should *not* be performing page flips and thus not waiting on | ||
1002 | * vblanks... | ||
1003 | * Currently, the result of us stealing a vblank from the irq | ||
1004 | * handler is that a single frame will be skipped during swapbuffers. | ||
1005 | */ | ||
1006 | I915_WRITE(pipestat_reg, | ||
1007 | I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); | ||
1008 | |||
993 | /* Wait for vblank interrupt bit to set */ | 1009 | /* Wait for vblank interrupt bit to set */ |
994 | if (wait_for((I915_READ(pipestat_reg) & | 1010 | if (wait_for((I915_READ(pipestat_reg) & |
995 | PIPE_VBLANK_INTERRUPT_STATUS), | 1011 | PIPE_VBLANK_INTERRUPT_STATUS), |
@@ -997,8 +1013,8 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) | |||
997 | DRM_DEBUG_KMS("vblank wait timed out\n"); | 1013 | DRM_DEBUG_KMS("vblank wait timed out\n"); |
998 | } | 1014 | } |
999 | 1015 | ||
1000 | /** | 1016 | /* |
1001 | * intel_wait_for_vblank_off - wait for vblank after disabling a pipe | 1017 | * intel_wait_for_pipe_off - wait for pipe to turn off |
1002 | * @dev: drm device | 1018 | * @dev: drm device |
1003 | * @pipe: pipe to wait for | 1019 | * @pipe: pipe to wait for |
1004 | * | 1020 | * |
@@ -1006,25 +1022,39 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) | |||
1006 | * spinning on the vblank interrupt status bit, since we won't actually | 1022 | * spinning on the vblank interrupt status bit, since we won't actually |
1007 | * see an interrupt when the pipe is disabled. | 1023 | * see an interrupt when the pipe is disabled. |
1008 | * | 1024 | * |
1009 | * So this function waits for the display line value to settle (it | 1025 | * On Gen4 and above: |
1010 | * usually ends up stopping at the start of the next frame). | 1026 | * wait for the pipe register state bit to turn off |
1027 | * | ||
1028 | * Otherwise: | ||
1029 | * wait for the display line value to settle (it usually | ||
1030 | * ends up stopping at the start of the next frame). | ||
1031 | * | ||
1011 | */ | 1032 | */ |
1012 | void intel_wait_for_vblank_off(struct drm_device *dev, int pipe) | 1033 | static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) |
1013 | { | 1034 | { |
1014 | struct drm_i915_private *dev_priv = dev->dev_private; | 1035 | struct drm_i915_private *dev_priv = dev->dev_private; |
1015 | int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); | 1036 | |
1016 | unsigned long timeout = jiffies + msecs_to_jiffies(100); | 1037 | if (INTEL_INFO(dev)->gen >= 4) { |
1017 | u32 last_line; | 1038 | int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF); |
1018 | 1039 | ||
1019 | /* Wait for the display line to settle */ | 1040 | /* Wait for the Pipe State to go off */ |
1020 | do { | 1041 | if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, |
1021 | last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; | 1042 | 100, 0)) |
1022 | mdelay(5); | 1043 | DRM_DEBUG_KMS("pipe_off wait timed out\n"); |
1023 | } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && | 1044 | } else { |
1024 | time_after(timeout, jiffies)); | 1045 | u32 last_line; |
1025 | 1046 | int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); | |
1026 | if (time_after(jiffies, timeout)) | 1047 | unsigned long timeout = jiffies + msecs_to_jiffies(100); |
1027 | DRM_DEBUG_KMS("vblank wait timed out\n"); | 1048 | |
1049 | /* Wait for the display line to settle */ | ||
1050 | do { | ||
1051 | last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; | ||
1052 | mdelay(5); | ||
1053 | } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && | ||
1054 | time_after(timeout, jiffies)); | ||
1055 | if (time_after(jiffies, timeout)) | ||
1056 | DRM_DEBUG_KMS("pipe_off wait timed out\n"); | ||
1057 | } | ||
1028 | } | 1058 | } |
1029 | 1059 | ||
1030 | /* Parameters have changed, update FBC info */ | 1060 | /* Parameters have changed, update FBC info */ |
@@ -1486,7 +1516,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1486 | dspcntr &= ~DISPPLANE_TILED; | 1516 | dspcntr &= ~DISPPLANE_TILED; |
1487 | } | 1517 | } |
1488 | 1518 | ||
1489 | if (IS_IRONLAKE(dev)) | 1519 | if (HAS_PCH_SPLIT(dev)) |
1490 | /* must disable */ | 1520 | /* must disable */ |
1491 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; | 1521 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; |
1492 | 1522 | ||
@@ -1495,20 +1525,19 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1495 | Start = obj_priv->gtt_offset; | 1525 | Start = obj_priv->gtt_offset; |
1496 | Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); | 1526 | Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); |
1497 | 1527 | ||
1498 | DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); | 1528 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", |
1529 | Start, Offset, x, y, fb->pitch); | ||
1499 | I915_WRITE(dspstride, fb->pitch); | 1530 | I915_WRITE(dspstride, fb->pitch); |
1500 | if (IS_I965G(dev)) { | 1531 | if (IS_I965G(dev)) { |
1501 | I915_WRITE(dspbase, Offset); | ||
1502 | I915_READ(dspbase); | ||
1503 | I915_WRITE(dspsurf, Start); | 1532 | I915_WRITE(dspsurf, Start); |
1504 | I915_READ(dspsurf); | ||
1505 | I915_WRITE(dsptileoff, (y << 16) | x); | 1533 | I915_WRITE(dsptileoff, (y << 16) | x); |
1534 | I915_WRITE(dspbase, Offset); | ||
1506 | } else { | 1535 | } else { |
1507 | I915_WRITE(dspbase, Start + Offset); | 1536 | I915_WRITE(dspbase, Start + Offset); |
1508 | I915_READ(dspbase); | ||
1509 | } | 1537 | } |
1538 | POSTING_READ(dspbase); | ||
1510 | 1539 | ||
1511 | if ((IS_I965G(dev) || plane == 0)) | 1540 | if (IS_I965G(dev) || plane == 0) |
1512 | intel_update_fbc(crtc, &crtc->mode); | 1541 | intel_update_fbc(crtc, &crtc->mode); |
1513 | 1542 | ||
1514 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 1543 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
@@ -1522,7 +1551,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1522 | struct drm_framebuffer *old_fb) | 1551 | struct drm_framebuffer *old_fb) |
1523 | { | 1552 | { |
1524 | struct drm_device *dev = crtc->dev; | 1553 | struct drm_device *dev = crtc->dev; |
1525 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1526 | struct drm_i915_master_private *master_priv; | 1554 | struct drm_i915_master_private *master_priv; |
1527 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1555 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1528 | struct intel_framebuffer *intel_fb; | 1556 | struct intel_framebuffer *intel_fb; |
@@ -1530,13 +1558,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1530 | struct drm_gem_object *obj; | 1558 | struct drm_gem_object *obj; |
1531 | int pipe = intel_crtc->pipe; | 1559 | int pipe = intel_crtc->pipe; |
1532 | int plane = intel_crtc->plane; | 1560 | int plane = intel_crtc->plane; |
1533 | unsigned long Start, Offset; | ||
1534 | int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR); | ||
1535 | int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF); | ||
1536 | int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE; | ||
1537 | int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF); | ||
1538 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; | ||
1539 | u32 dspcntr; | ||
1540 | int ret; | 1561 | int ret; |
1541 | 1562 | ||
1542 | /* no fb bound */ | 1563 | /* no fb bound */ |
@@ -1572,71 +1593,18 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1572 | return ret; | 1593 | return ret; |
1573 | } | 1594 | } |
1574 | 1595 | ||
1575 | dspcntr = I915_READ(dspcntr_reg); | 1596 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y); |
1576 | /* Mask out pixel format bits in case we change it */ | 1597 | if (ret) { |
1577 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; | ||
1578 | switch (crtc->fb->bits_per_pixel) { | ||
1579 | case 8: | ||
1580 | dspcntr |= DISPPLANE_8BPP; | ||
1581 | break; | ||
1582 | case 16: | ||
1583 | if (crtc->fb->depth == 15) | ||
1584 | dspcntr |= DISPPLANE_15_16BPP; | ||
1585 | else | ||
1586 | dspcntr |= DISPPLANE_16BPP; | ||
1587 | break; | ||
1588 | case 24: | ||
1589 | case 32: | ||
1590 | if (crtc->fb->depth == 30) | ||
1591 | dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA; | ||
1592 | else | ||
1593 | dspcntr |= DISPPLANE_32BPP_NO_ALPHA; | ||
1594 | break; | ||
1595 | default: | ||
1596 | DRM_ERROR("Unknown color depth\n"); | ||
1597 | i915_gem_object_unpin(obj); | 1598 | i915_gem_object_unpin(obj); |
1598 | mutex_unlock(&dev->struct_mutex); | 1599 | mutex_unlock(&dev->struct_mutex); |
1599 | return -EINVAL; | 1600 | return ret; |
1600 | } | ||
1601 | if (IS_I965G(dev)) { | ||
1602 | if (obj_priv->tiling_mode != I915_TILING_NONE) | ||
1603 | dspcntr |= DISPPLANE_TILED; | ||
1604 | else | ||
1605 | dspcntr &= ~DISPPLANE_TILED; | ||
1606 | } | ||
1607 | |||
1608 | if (HAS_PCH_SPLIT(dev)) | ||
1609 | /* must disable */ | ||
1610 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; | ||
1611 | |||
1612 | I915_WRITE(dspcntr_reg, dspcntr); | ||
1613 | |||
1614 | Start = obj_priv->gtt_offset; | ||
1615 | Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); | ||
1616 | |||
1617 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", | ||
1618 | Start, Offset, x, y, crtc->fb->pitch); | ||
1619 | I915_WRITE(dspstride, crtc->fb->pitch); | ||
1620 | if (IS_I965G(dev)) { | ||
1621 | I915_WRITE(dspsurf, Start); | ||
1622 | I915_WRITE(dsptileoff, (y << 16) | x); | ||
1623 | I915_WRITE(dspbase, Offset); | ||
1624 | } else { | ||
1625 | I915_WRITE(dspbase, Start + Offset); | ||
1626 | } | 1601 | } |
1627 | POSTING_READ(dspbase); | ||
1628 | |||
1629 | if ((IS_I965G(dev) || plane == 0)) | ||
1630 | intel_update_fbc(crtc, &crtc->mode); | ||
1631 | |||
1632 | intel_wait_for_vblank(dev, pipe); | ||
1633 | 1602 | ||
1634 | if (old_fb) { | 1603 | if (old_fb) { |
1635 | intel_fb = to_intel_framebuffer(old_fb); | 1604 | intel_fb = to_intel_framebuffer(old_fb); |
1636 | obj_priv = to_intel_bo(intel_fb->obj); | 1605 | obj_priv = to_intel_bo(intel_fb->obj); |
1637 | i915_gem_object_unpin(intel_fb->obj); | 1606 | i915_gem_object_unpin(intel_fb->obj); |
1638 | } | 1607 | } |
1639 | intel_increase_pllclock(crtc, true); | ||
1640 | 1608 | ||
1641 | mutex_unlock(&dev->struct_mutex); | 1609 | mutex_unlock(&dev->struct_mutex); |
1642 | 1610 | ||
@@ -1911,9 +1879,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1911 | int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; | 1879 | int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; |
1912 | int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; | 1880 | int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; |
1913 | int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; | 1881 | int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; |
1914 | int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; | ||
1915 | int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; | ||
1916 | int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS; | ||
1917 | int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; | 1882 | int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; |
1918 | int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; | 1883 | int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; |
1919 | int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; | 1884 | int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; |
@@ -1982,15 +1947,19 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1982 | } | 1947 | } |
1983 | 1948 | ||
1984 | /* Enable panel fitting for LVDS */ | 1949 | /* Enable panel fitting for LVDS */ |
1985 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) | 1950 | if (dev_priv->pch_pf_size && |
1986 | || HAS_eDP || intel_pch_has_edp(crtc)) { | 1951 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) |
1987 | if (dev_priv->pch_pf_size) { | 1952 | || HAS_eDP || intel_pch_has_edp(crtc))) { |
1988 | temp = I915_READ(pf_ctl_reg); | 1953 | /* Force use of hard-coded filter coefficients |
1989 | I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3); | 1954 | * as some pre-programmed values are broken, |
1990 | I915_WRITE(pf_win_pos, dev_priv->pch_pf_pos); | 1955 | * e.g. x201. |
1991 | I915_WRITE(pf_win_size, dev_priv->pch_pf_size); | 1956 | */ |
1992 | } else | 1957 | I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, |
1993 | I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); | 1958 | PF_ENABLE | PF_FILTER_MED_3x3); |
1959 | I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS, | ||
1960 | dev_priv->pch_pf_pos); | ||
1961 | I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, | ||
1962 | dev_priv->pch_pf_size); | ||
1994 | } | 1963 | } |
1995 | 1964 | ||
1996 | /* Enable CPU pipe */ | 1965 | /* Enable CPU pipe */ |
@@ -2115,7 +2084,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2115 | I915_WRITE(transconf_reg, temp | TRANS_ENABLE); | 2084 | I915_WRITE(transconf_reg, temp | TRANS_ENABLE); |
2116 | I915_READ(transconf_reg); | 2085 | I915_READ(transconf_reg); |
2117 | 2086 | ||
2118 | if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 10, 0)) | 2087 | if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1)) |
2119 | DRM_ERROR("failed to enable transcoder\n"); | 2088 | DRM_ERROR("failed to enable transcoder\n"); |
2120 | } | 2089 | } |
2121 | 2090 | ||
@@ -2155,14 +2124,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2155 | udelay(100); | 2124 | udelay(100); |
2156 | 2125 | ||
2157 | /* Disable PF */ | 2126 | /* Disable PF */ |
2158 | temp = I915_READ(pf_ctl_reg); | 2127 | I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0); |
2159 | if ((temp & PF_ENABLE) != 0) { | 2128 | I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0); |
2160 | I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); | ||
2161 | I915_READ(pf_ctl_reg); | ||
2162 | } | ||
2163 | I915_WRITE(pf_win_size, 0); | ||
2164 | POSTING_READ(pf_win_size); | ||
2165 | |||
2166 | 2129 | ||
2167 | /* disable CPU FDI tx and PCH FDI rx */ | 2130 | /* disable CPU FDI tx and PCH FDI rx */ |
2168 | temp = I915_READ(fdi_tx_reg); | 2131 | temp = I915_READ(fdi_tx_reg); |
@@ -2379,13 +2342,13 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2379 | I915_READ(dspbase_reg); | 2342 | I915_READ(dspbase_reg); |
2380 | } | 2343 | } |
2381 | 2344 | ||
2382 | /* Wait for vblank for the disable to take effect */ | ||
2383 | intel_wait_for_vblank_off(dev, pipe); | ||
2384 | |||
2385 | /* Don't disable pipe A or pipe A PLLs if needed */ | 2345 | /* Don't disable pipe A or pipe A PLLs if needed */ |
2386 | if (pipeconf_reg == PIPEACONF && | 2346 | if (pipeconf_reg == PIPEACONF && |
2387 | (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | 2347 | (dev_priv->quirks & QUIRK_PIPEA_FORCE)) { |
2348 | /* Wait for vblank for the disable to take effect */ | ||
2349 | intel_wait_for_vblank(dev, pipe); | ||
2388 | goto skip_pipe_off; | 2350 | goto skip_pipe_off; |
2351 | } | ||
2389 | 2352 | ||
2390 | /* Next, disable display pipes */ | 2353 | /* Next, disable display pipes */ |
2391 | temp = I915_READ(pipeconf_reg); | 2354 | temp = I915_READ(pipeconf_reg); |
@@ -2394,8 +2357,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2394 | I915_READ(pipeconf_reg); | 2357 | I915_READ(pipeconf_reg); |
2395 | } | 2358 | } |
2396 | 2359 | ||
2397 | /* Wait for vblank for the disable to take effect. */ | 2360 | /* Wait for the pipe to turn off */ |
2398 | intel_wait_for_vblank_off(dev, pipe); | 2361 | intel_wait_for_pipe_off(dev, pipe); |
2399 | 2362 | ||
2400 | temp = I915_READ(dpll_reg); | 2363 | temp = I915_READ(dpll_reg); |
2401 | if ((temp & DPLL_VCO_ENABLE) != 0) { | 2364 | if ((temp & DPLL_VCO_ENABLE) != 0) { |
@@ -2421,6 +2384,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2421 | int pipe = intel_crtc->pipe; | 2384 | int pipe = intel_crtc->pipe; |
2422 | bool enabled; | 2385 | bool enabled; |
2423 | 2386 | ||
2387 | if (intel_crtc->dpms_mode == mode) | ||
2388 | return; | ||
2389 | |||
2424 | intel_crtc->dpms_mode = mode; | 2390 | intel_crtc->dpms_mode = mode; |
2425 | intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON; | 2391 | intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON; |
2426 | 2392 | ||
@@ -2511,11 +2477,19 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, | |||
2511 | struct drm_display_mode *adjusted_mode) | 2477 | struct drm_display_mode *adjusted_mode) |
2512 | { | 2478 | { |
2513 | struct drm_device *dev = crtc->dev; | 2479 | struct drm_device *dev = crtc->dev; |
2480 | |||
2514 | if (HAS_PCH_SPLIT(dev)) { | 2481 | if (HAS_PCH_SPLIT(dev)) { |
2515 | /* FDI link clock is fixed at 2.7G */ | 2482 | /* FDI link clock is fixed at 2.7G */ |
2516 | if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) | 2483 | if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) |
2517 | return false; | 2484 | return false; |
2518 | } | 2485 | } |
2486 | |||
2487 | /* XXX some encoders set the crtcinfo, others don't. | ||
2488 | * Obviously we need some form of conflict resolution here... | ||
2489 | */ | ||
2490 | if (adjusted_mode->crtc_htotal == 0) | ||
2491 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
2492 | |||
2519 | return true; | 2493 | return true; |
2520 | } | 2494 | } |
2521 | 2495 | ||
@@ -2815,14 +2789,8 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, | |||
2815 | /* Don't promote wm_size to unsigned... */ | 2789 | /* Don't promote wm_size to unsigned... */ |
2816 | if (wm_size > (long)wm->max_wm) | 2790 | if (wm_size > (long)wm->max_wm) |
2817 | wm_size = wm->max_wm; | 2791 | wm_size = wm->max_wm; |
2818 | if (wm_size <= 0) { | 2792 | if (wm_size <= 0) |
2819 | wm_size = wm->default_wm; | 2793 | wm_size = wm->default_wm; |
2820 | DRM_ERROR("Insufficient FIFO for plane, expect flickering:" | ||
2821 | " entries required = %ld, available = %lu.\n", | ||
2822 | entries_required + wm->guard_size, | ||
2823 | wm->fifo_size); | ||
2824 | } | ||
2825 | |||
2826 | return wm_size; | 2794 | return wm_size; |
2827 | } | 2795 | } |
2828 | 2796 | ||
@@ -3436,8 +3404,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, | |||
3436 | reg_value = I915_READ(WM1_LP_ILK); | 3404 | reg_value = I915_READ(WM1_LP_ILK); |
3437 | reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK | | 3405 | reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK | |
3438 | WM1_LP_CURSOR_MASK); | 3406 | WM1_LP_CURSOR_MASK); |
3439 | reg_value |= WM1_LP_SR_EN | | 3407 | reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | |
3440 | (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | | ||
3441 | (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm; | 3408 | (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm; |
3442 | 3409 | ||
3443 | I915_WRITE(WM1_LP_ILK, reg_value); | 3410 | I915_WRITE(WM1_LP_ILK, reg_value); |
@@ -3554,10 +3521,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3554 | u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; | 3521 | u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; |
3555 | bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; | 3522 | bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; |
3556 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; | 3523 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; |
3557 | bool is_edp = false; | 3524 | struct intel_encoder *has_edp_encoder = NULL; |
3558 | struct drm_mode_config *mode_config = &dev->mode_config; | 3525 | struct drm_mode_config *mode_config = &dev->mode_config; |
3559 | struct drm_encoder *encoder; | 3526 | struct drm_encoder *encoder; |
3560 | struct intel_encoder *intel_encoder = NULL; | ||
3561 | const intel_limit_t *limit; | 3527 | const intel_limit_t *limit; |
3562 | int ret; | 3528 | int ret; |
3563 | struct fdi_m_n m_n = {0}; | 3529 | struct fdi_m_n m_n = {0}; |
@@ -3578,12 +3544,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3578 | drm_vblank_pre_modeset(dev, pipe); | 3544 | drm_vblank_pre_modeset(dev, pipe); |
3579 | 3545 | ||
3580 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { | 3546 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { |
3547 | struct intel_encoder *intel_encoder; | ||
3581 | 3548 | ||
3582 | if (!encoder || encoder->crtc != crtc) | 3549 | if (encoder->crtc != crtc) |
3583 | continue; | 3550 | continue; |
3584 | 3551 | ||
3585 | intel_encoder = enc_to_intel_encoder(encoder); | 3552 | intel_encoder = enc_to_intel_encoder(encoder); |
3586 | |||
3587 | switch (intel_encoder->type) { | 3553 | switch (intel_encoder->type) { |
3588 | case INTEL_OUTPUT_LVDS: | 3554 | case INTEL_OUTPUT_LVDS: |
3589 | is_lvds = true; | 3555 | is_lvds = true; |
@@ -3607,7 +3573,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3607 | is_dp = true; | 3573 | is_dp = true; |
3608 | break; | 3574 | break; |
3609 | case INTEL_OUTPUT_EDP: | 3575 | case INTEL_OUTPUT_EDP: |
3610 | is_edp = true; | 3576 | has_edp_encoder = intel_encoder; |
3611 | break; | 3577 | break; |
3612 | } | 3578 | } |
3613 | 3579 | ||
@@ -3685,10 +3651,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3685 | int lane = 0, link_bw, bpp; | 3651 | int lane = 0, link_bw, bpp; |
3686 | /* eDP doesn't require FDI link, so just set DP M/N | 3652 | /* eDP doesn't require FDI link, so just set DP M/N |
3687 | according to current link config */ | 3653 | according to current link config */ |
3688 | if (is_edp) { | 3654 | if (has_edp_encoder) { |
3689 | target_clock = mode->clock; | 3655 | target_clock = mode->clock; |
3690 | intel_edp_link_config(intel_encoder, | 3656 | intel_edp_link_config(has_edp_encoder, |
3691 | &lane, &link_bw); | 3657 | &lane, &link_bw); |
3692 | } else { | 3658 | } else { |
3693 | /* DP over FDI requires target mode clock | 3659 | /* DP over FDI requires target mode clock |
3694 | instead of link clock */ | 3660 | instead of link clock */ |
@@ -3709,7 +3675,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3709 | temp |= PIPE_8BPC; | 3675 | temp |= PIPE_8BPC; |
3710 | else | 3676 | else |
3711 | temp |= PIPE_6BPC; | 3677 | temp |= PIPE_6BPC; |
3712 | } else if (is_edp || (is_dp && intel_pch_has_edp(crtc))) { | 3678 | } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) { |
3713 | switch (dev_priv->edp_bpp/3) { | 3679 | switch (dev_priv->edp_bpp/3) { |
3714 | case 8: | 3680 | case 8: |
3715 | temp |= PIPE_8BPC; | 3681 | temp |= PIPE_8BPC; |
@@ -3782,7 +3748,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3782 | 3748 | ||
3783 | udelay(200); | 3749 | udelay(200); |
3784 | 3750 | ||
3785 | if (is_edp) { | 3751 | if (has_edp_encoder) { |
3786 | if (dev_priv->lvds_use_ssc) { | 3752 | if (dev_priv->lvds_use_ssc) { |
3787 | temp |= DREF_SSC1_ENABLE; | 3753 | temp |= DREF_SSC1_ENABLE; |
3788 | I915_WRITE(PCH_DREF_CONTROL, temp); | 3754 | I915_WRITE(PCH_DREF_CONTROL, temp); |
@@ -3931,7 +3897,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3931 | dpll_reg = pch_dpll_reg; | 3897 | dpll_reg = pch_dpll_reg; |
3932 | } | 3898 | } |
3933 | 3899 | ||
3934 | if (!is_edp) { | 3900 | if (!has_edp_encoder) { |
3935 | I915_WRITE(fp_reg, fp); | 3901 | I915_WRITE(fp_reg, fp); |
3936 | I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); | 3902 | I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); |
3937 | I915_READ(dpll_reg); | 3903 | I915_READ(dpll_reg); |
@@ -4026,7 +3992,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4026 | } | 3992 | } |
4027 | } | 3993 | } |
4028 | 3994 | ||
4029 | if (!is_edp) { | 3995 | if (!has_edp_encoder) { |
4030 | I915_WRITE(fp_reg, fp); | 3996 | I915_WRITE(fp_reg, fp); |
4031 | I915_WRITE(dpll_reg, dpll); | 3997 | I915_WRITE(dpll_reg, dpll); |
4032 | I915_READ(dpll_reg); | 3998 | I915_READ(dpll_reg); |
@@ -4105,7 +4071,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4105 | I915_WRITE(link_m1_reg, m_n.link_m); | 4071 | I915_WRITE(link_m1_reg, m_n.link_m); |
4106 | I915_WRITE(link_n1_reg, m_n.link_n); | 4072 | I915_WRITE(link_n1_reg, m_n.link_n); |
4107 | 4073 | ||
4108 | if (is_edp) { | 4074 | if (has_edp_encoder) { |
4109 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); | 4075 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); |
4110 | } else { | 4076 | } else { |
4111 | /* enable FDI RX PLL too */ | 4077 | /* enable FDI RX PLL too */ |
@@ -4911,15 +4877,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) | |||
4911 | kfree(intel_crtc); | 4877 | kfree(intel_crtc); |
4912 | } | 4878 | } |
4913 | 4879 | ||
4914 | struct intel_unpin_work { | ||
4915 | struct work_struct work; | ||
4916 | struct drm_device *dev; | ||
4917 | struct drm_gem_object *old_fb_obj; | ||
4918 | struct drm_gem_object *pending_flip_obj; | ||
4919 | struct drm_pending_vblank_event *event; | ||
4920 | int pending; | ||
4921 | }; | ||
4922 | |||
4923 | static void intel_unpin_work_fn(struct work_struct *__work) | 4880 | static void intel_unpin_work_fn(struct work_struct *__work) |
4924 | { | 4881 | { |
4925 | struct intel_unpin_work *work = | 4882 | struct intel_unpin_work *work = |
@@ -5007,7 +4964,8 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane) | |||
5007 | 4964 | ||
5008 | spin_lock_irqsave(&dev->event_lock, flags); | 4965 | spin_lock_irqsave(&dev->event_lock, flags); |
5009 | if (intel_crtc->unpin_work) { | 4966 | if (intel_crtc->unpin_work) { |
5010 | intel_crtc->unpin_work->pending = 1; | 4967 | if ((++intel_crtc->unpin_work->pending) > 1) |
4968 | DRM_ERROR("Prepared flip multiple times\n"); | ||
5011 | } else { | 4969 | } else { |
5012 | DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); | 4970 | DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); |
5013 | } | 4971 | } |
@@ -5026,9 +4984,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5026 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4984 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5027 | struct intel_unpin_work *work; | 4985 | struct intel_unpin_work *work; |
5028 | unsigned long flags, offset; | 4986 | unsigned long flags, offset; |
5029 | int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; | 4987 | int pipe = intel_crtc->pipe; |
5030 | int ret, pipesrc; | 4988 | u32 pf, pipesrc; |
5031 | u32 flip_mask; | 4989 | int ret; |
5032 | 4990 | ||
5033 | work = kzalloc(sizeof *work, GFP_KERNEL); | 4991 | work = kzalloc(sizeof *work, GFP_KERNEL); |
5034 | if (work == NULL) | 4992 | if (work == NULL) |
@@ -5077,42 +5035,73 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5077 | atomic_inc(&obj_priv->pending_flip); | 5035 | atomic_inc(&obj_priv->pending_flip); |
5078 | work->pending_flip_obj = obj; | 5036 | work->pending_flip_obj = obj; |
5079 | 5037 | ||
5080 | if (intel_crtc->plane) | ||
5081 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | ||
5082 | else | ||
5083 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | ||
5084 | |||
5085 | if (IS_GEN3(dev) || IS_GEN2(dev)) { | 5038 | if (IS_GEN3(dev) || IS_GEN2(dev)) { |
5039 | u32 flip_mask; | ||
5040 | |||
5041 | if (intel_crtc->plane) | ||
5042 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | ||
5043 | else | ||
5044 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | ||
5045 | |||
5086 | BEGIN_LP_RING(2); | 5046 | BEGIN_LP_RING(2); |
5087 | OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); | 5047 | OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); |
5088 | OUT_RING(0); | 5048 | OUT_RING(0); |
5089 | ADVANCE_LP_RING(); | 5049 | ADVANCE_LP_RING(); |
5090 | } | 5050 | } |
5091 | 5051 | ||
5052 | work->enable_stall_check = true; | ||
5053 | |||
5092 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ | 5054 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ |
5093 | offset = obj_priv->gtt_offset; | 5055 | offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; |
5094 | offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8); | ||
5095 | 5056 | ||
5096 | BEGIN_LP_RING(4); | 5057 | BEGIN_LP_RING(4); |
5097 | if (IS_I965G(dev)) { | 5058 | switch(INTEL_INFO(dev)->gen) { |
5059 | case 2: | ||
5098 | OUT_RING(MI_DISPLAY_FLIP | | 5060 | OUT_RING(MI_DISPLAY_FLIP | |
5099 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5061 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
5100 | OUT_RING(fb->pitch); | 5062 | OUT_RING(fb->pitch); |
5101 | OUT_RING(offset | obj_priv->tiling_mode); | 5063 | OUT_RING(obj_priv->gtt_offset + offset); |
5102 | pipesrc = I915_READ(pipesrc_reg); | 5064 | OUT_RING(MI_NOOP); |
5103 | OUT_RING(pipesrc & 0x0fff0fff); | 5065 | break; |
5104 | } else if (IS_GEN3(dev)) { | 5066 | |
5067 | case 3: | ||
5105 | OUT_RING(MI_DISPLAY_FLIP_I915 | | 5068 | OUT_RING(MI_DISPLAY_FLIP_I915 | |
5106 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5069 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
5107 | OUT_RING(fb->pitch); | 5070 | OUT_RING(fb->pitch); |
5108 | OUT_RING(offset); | 5071 | OUT_RING(obj_priv->gtt_offset + offset); |
5109 | OUT_RING(MI_NOOP); | 5072 | OUT_RING(MI_NOOP); |
5110 | } else { | 5073 | break; |
5074 | |||
5075 | case 4: | ||
5076 | case 5: | ||
5077 | /* i965+ uses the linear or tiled offsets from the | ||
5078 | * Display Registers (which do not change across a page-flip) | ||
5079 | * so we need only reprogram the base address. | ||
5080 | */ | ||
5111 | OUT_RING(MI_DISPLAY_FLIP | | 5081 | OUT_RING(MI_DISPLAY_FLIP | |
5112 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5082 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
5113 | OUT_RING(fb->pitch); | 5083 | OUT_RING(fb->pitch); |
5114 | OUT_RING(offset); | 5084 | OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); |
5115 | OUT_RING(MI_NOOP); | 5085 | |
5086 | /* XXX Enabling the panel-fitter across page-flip is so far | ||
5087 | * untested on non-native modes, so ignore it for now. | ||
5088 | * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; | ||
5089 | */ | ||
5090 | pf = 0; | ||
5091 | pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; | ||
5092 | OUT_RING(pf | pipesrc); | ||
5093 | break; | ||
5094 | |||
5095 | case 6: | ||
5096 | OUT_RING(MI_DISPLAY_FLIP | | ||
5097 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
5098 | OUT_RING(fb->pitch | obj_priv->tiling_mode); | ||
5099 | OUT_RING(obj_priv->gtt_offset); | ||
5100 | |||
5101 | pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; | ||
5102 | pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; | ||
5103 | OUT_RING(pf | pipesrc); | ||
5104 | break; | ||
5116 | } | 5105 | } |
5117 | ADVANCE_LP_RING(); | 5106 | ADVANCE_LP_RING(); |
5118 | 5107 | ||
@@ -5193,7 +5182,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
5193 | dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; | 5182 | dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; |
5194 | 5183 | ||
5195 | intel_crtc->cursor_addr = 0; | 5184 | intel_crtc->cursor_addr = 0; |
5196 | intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; | 5185 | intel_crtc->dpms_mode = -1; |
5197 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); | 5186 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); |
5198 | 5187 | ||
5199 | intel_crtc->busy = false; | 5188 | intel_crtc->busy = false; |
@@ -5701,6 +5690,9 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5701 | I915_WRITE(DISP_ARB_CTL, | 5690 | I915_WRITE(DISP_ARB_CTL, |
5702 | (I915_READ(DISP_ARB_CTL) | | 5691 | (I915_READ(DISP_ARB_CTL) | |
5703 | DISP_FBC_WM_DIS)); | 5692 | DISP_FBC_WM_DIS)); |
5693 | I915_WRITE(WM3_LP_ILK, 0); | ||
5694 | I915_WRITE(WM2_LP_ILK, 0); | ||
5695 | I915_WRITE(WM1_LP_ILK, 0); | ||
5704 | } | 5696 | } |
5705 | /* | 5697 | /* |
5706 | * Based on the document from hardware guys the following bits | 5698 | * Based on the document from hardware guys the following bits |
@@ -5722,8 +5714,7 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5722 | ILK_DPFC_DIS2 | | 5714 | ILK_DPFC_DIS2 | |
5723 | ILK_CLK_FBC); | 5715 | ILK_CLK_FBC); |
5724 | } | 5716 | } |
5725 | if (IS_GEN6(dev)) | 5717 | return; |
5726 | return; | ||
5727 | } else if (IS_G4X(dev)) { | 5718 | } else if (IS_G4X(dev)) { |
5728 | uint32_t dspclk_gate; | 5719 | uint32_t dspclk_gate; |
5729 | I915_WRITE(RENCLK_GATE_D1, 0); | 5720 | I915_WRITE(RENCLK_GATE_D1, 0); |
@@ -5784,11 +5775,9 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5784 | OUT_RING(MI_FLUSH); | 5775 | OUT_RING(MI_FLUSH); |
5785 | ADVANCE_LP_RING(); | 5776 | ADVANCE_LP_RING(); |
5786 | } | 5777 | } |
5787 | } else { | 5778 | } else |
5788 | DRM_DEBUG_KMS("Failed to allocate render context." | 5779 | DRM_DEBUG_KMS("Failed to allocate render context." |
5789 | "Disable RC6\n"); | 5780 | "Disable RC6\n"); |
5790 | return; | ||
5791 | } | ||
5792 | } | 5781 | } |
5793 | 5782 | ||
5794 | if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { | 5783 | if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 9caccd03dccb..9ab8708ac6ba 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -239,7 +239,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
239 | uint32_t ch_data = ch_ctl + 4; | 239 | uint32_t ch_data = ch_ctl + 4; |
240 | int i; | 240 | int i; |
241 | int recv_bytes; | 241 | int recv_bytes; |
242 | uint32_t ctl; | ||
243 | uint32_t status; | 242 | uint32_t status; |
244 | uint32_t aux_clock_divider; | 243 | uint32_t aux_clock_divider; |
245 | int try, precharge; | 244 | int try, precharge; |
@@ -263,41 +262,43 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
263 | else | 262 | else |
264 | precharge = 5; | 263 | precharge = 5; |
265 | 264 | ||
265 | if (I915_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) { | ||
266 | DRM_ERROR("dp_aux_ch not started status 0x%08x\n", | ||
267 | I915_READ(ch_ctl)); | ||
268 | return -EBUSY; | ||
269 | } | ||
270 | |||
266 | /* Must try at least 3 times according to DP spec */ | 271 | /* Must try at least 3 times according to DP spec */ |
267 | for (try = 0; try < 5; try++) { | 272 | for (try = 0; try < 5; try++) { |
268 | /* Load the send data into the aux channel data registers */ | 273 | /* Load the send data into the aux channel data registers */ |
269 | for (i = 0; i < send_bytes; i += 4) { | 274 | for (i = 0; i < send_bytes; i += 4) |
270 | uint32_t d = pack_aux(send + i, send_bytes - i); | 275 | I915_WRITE(ch_data + i, |
271 | 276 | pack_aux(send + i, send_bytes - i)); | |
272 | I915_WRITE(ch_data + i, d); | ||
273 | } | ||
274 | |||
275 | ctl = (DP_AUX_CH_CTL_SEND_BUSY | | ||
276 | DP_AUX_CH_CTL_TIME_OUT_400us | | ||
277 | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | | ||
278 | (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | | ||
279 | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | | ||
280 | DP_AUX_CH_CTL_DONE | | ||
281 | DP_AUX_CH_CTL_TIME_OUT_ERROR | | ||
282 | DP_AUX_CH_CTL_RECEIVE_ERROR); | ||
283 | 277 | ||
284 | /* Send the command and wait for it to complete */ | 278 | /* Send the command and wait for it to complete */ |
285 | I915_WRITE(ch_ctl, ctl); | 279 | I915_WRITE(ch_ctl, |
286 | (void) I915_READ(ch_ctl); | 280 | DP_AUX_CH_CTL_SEND_BUSY | |
281 | DP_AUX_CH_CTL_TIME_OUT_400us | | ||
282 | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | | ||
283 | (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | | ||
284 | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | | ||
285 | DP_AUX_CH_CTL_DONE | | ||
286 | DP_AUX_CH_CTL_TIME_OUT_ERROR | | ||
287 | DP_AUX_CH_CTL_RECEIVE_ERROR); | ||
287 | for (;;) { | 288 | for (;;) { |
288 | udelay(100); | ||
289 | status = I915_READ(ch_ctl); | 289 | status = I915_READ(ch_ctl); |
290 | if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) | 290 | if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) |
291 | break; | 291 | break; |
292 | udelay(100); | ||
292 | } | 293 | } |
293 | 294 | ||
294 | /* Clear done status and any errors */ | 295 | /* Clear done status and any errors */ |
295 | I915_WRITE(ch_ctl, (status | | 296 | I915_WRITE(ch_ctl, |
296 | DP_AUX_CH_CTL_DONE | | 297 | status | |
297 | DP_AUX_CH_CTL_TIME_OUT_ERROR | | 298 | DP_AUX_CH_CTL_DONE | |
298 | DP_AUX_CH_CTL_RECEIVE_ERROR)); | 299 | DP_AUX_CH_CTL_TIME_OUT_ERROR | |
299 | (void) I915_READ(ch_ctl); | 300 | DP_AUX_CH_CTL_RECEIVE_ERROR); |
300 | if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0) | 301 | if (status & DP_AUX_CH_CTL_DONE) |
301 | break; | 302 | break; |
302 | } | 303 | } |
303 | 304 | ||
@@ -324,15 +325,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
324 | /* Unload any bytes sent back from the other side */ | 325 | /* Unload any bytes sent back from the other side */ |
325 | recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> | 326 | recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> |
326 | DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); | 327 | DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); |
327 | |||
328 | if (recv_bytes > recv_size) | 328 | if (recv_bytes > recv_size) |
329 | recv_bytes = recv_size; | 329 | recv_bytes = recv_size; |
330 | 330 | ||
331 | for (i = 0; i < recv_bytes; i += 4) { | 331 | for (i = 0; i < recv_bytes; i += 4) |
332 | uint32_t d = I915_READ(ch_data + i); | 332 | unpack_aux(I915_READ(ch_data + i), |
333 | 333 | recv + i, recv_bytes - i); | |
334 | unpack_aux(d, recv + i, recv_bytes - i); | ||
335 | } | ||
336 | 334 | ||
337 | return recv_bytes; | 335 | return recv_bytes; |
338 | } | 336 | } |
@@ -1140,18 +1138,14 @@ static bool | |||
1140 | intel_dp_set_link_train(struct intel_dp *intel_dp, | 1138 | intel_dp_set_link_train(struct intel_dp *intel_dp, |
1141 | uint32_t dp_reg_value, | 1139 | uint32_t dp_reg_value, |
1142 | uint8_t dp_train_pat, | 1140 | uint8_t dp_train_pat, |
1143 | uint8_t train_set[4], | 1141 | uint8_t train_set[4]) |
1144 | bool first) | ||
1145 | { | 1142 | { |
1146 | struct drm_device *dev = intel_dp->base.enc.dev; | 1143 | struct drm_device *dev = intel_dp->base.enc.dev; |
1147 | struct drm_i915_private *dev_priv = dev->dev_private; | 1144 | struct drm_i915_private *dev_priv = dev->dev_private; |
1148 | struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc); | ||
1149 | int ret; | 1145 | int ret; |
1150 | 1146 | ||
1151 | I915_WRITE(intel_dp->output_reg, dp_reg_value); | 1147 | I915_WRITE(intel_dp->output_reg, dp_reg_value); |
1152 | POSTING_READ(intel_dp->output_reg); | 1148 | POSTING_READ(intel_dp->output_reg); |
1153 | if (first) | ||
1154 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
1155 | 1149 | ||
1156 | intel_dp_aux_native_write_1(intel_dp, | 1150 | intel_dp_aux_native_write_1(intel_dp, |
1157 | DP_TRAINING_PATTERN_SET, | 1151 | DP_TRAINING_PATTERN_SET, |
@@ -1176,10 +1170,15 @@ intel_dp_link_train(struct intel_dp *intel_dp) | |||
1176 | uint8_t voltage; | 1170 | uint8_t voltage; |
1177 | bool clock_recovery = false; | 1171 | bool clock_recovery = false; |
1178 | bool channel_eq = false; | 1172 | bool channel_eq = false; |
1179 | bool first = true; | ||
1180 | int tries; | 1173 | int tries; |
1181 | u32 reg; | 1174 | u32 reg; |
1182 | uint32_t DP = intel_dp->DP; | 1175 | uint32_t DP = intel_dp->DP; |
1176 | struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc); | ||
1177 | |||
1178 | /* Enable output, wait for it to become active */ | ||
1179 | I915_WRITE(intel_dp->output_reg, intel_dp->DP); | ||
1180 | POSTING_READ(intel_dp->output_reg); | ||
1181 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
1183 | 1182 | ||
1184 | /* Write the link configuration data */ | 1183 | /* Write the link configuration data */ |
1185 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, | 1184 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, |
@@ -1212,9 +1211,8 @@ intel_dp_link_train(struct intel_dp *intel_dp) | |||
1212 | reg = DP | DP_LINK_TRAIN_PAT_1; | 1211 | reg = DP | DP_LINK_TRAIN_PAT_1; |
1213 | 1212 | ||
1214 | if (!intel_dp_set_link_train(intel_dp, reg, | 1213 | if (!intel_dp_set_link_train(intel_dp, reg, |
1215 | DP_TRAINING_PATTERN_1, train_set, first)) | 1214 | DP_TRAINING_PATTERN_1, train_set)) |
1216 | break; | 1215 | break; |
1217 | first = false; | ||
1218 | /* Set training pattern 1 */ | 1216 | /* Set training pattern 1 */ |
1219 | 1217 | ||
1220 | udelay(100); | 1218 | udelay(100); |
@@ -1268,8 +1266,7 @@ intel_dp_link_train(struct intel_dp *intel_dp) | |||
1268 | 1266 | ||
1269 | /* channel eq pattern */ | 1267 | /* channel eq pattern */ |
1270 | if (!intel_dp_set_link_train(intel_dp, reg, | 1268 | if (!intel_dp_set_link_train(intel_dp, reg, |
1271 | DP_TRAINING_PATTERN_2, train_set, | 1269 | DP_TRAINING_PATTERN_2, train_set)) |
1272 | false)) | ||
1273 | break; | 1270 | break; |
1274 | 1271 | ||
1275 | udelay(400); | 1272 | udelay(400); |
@@ -1388,7 +1385,7 @@ ironlake_dp_detect(struct drm_connector *connector) | |||
1388 | * \return false if DP port is disconnected. | 1385 | * \return false if DP port is disconnected. |
1389 | */ | 1386 | */ |
1390 | static enum drm_connector_status | 1387 | static enum drm_connector_status |
1391 | intel_dp_detect(struct drm_connector *connector) | 1388 | intel_dp_detect(struct drm_connector *connector, bool force) |
1392 | { | 1389 | { |
1393 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1390 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1394 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1391 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0e92aa07b382..8828b3ac6414 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -176,6 +176,16 @@ struct intel_crtc { | |||
176 | #define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) | 176 | #define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) |
177 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) | 177 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) |
178 | 178 | ||
179 | struct intel_unpin_work { | ||
180 | struct work_struct work; | ||
181 | struct drm_device *dev; | ||
182 | struct drm_gem_object *old_fb_obj; | ||
183 | struct drm_gem_object *pending_flip_obj; | ||
184 | struct drm_pending_vblank_event *event; | ||
185 | int pending; | ||
186 | bool enable_stall_check; | ||
187 | }; | ||
188 | |||
179 | struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, | 189 | struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, |
180 | const char *name); | 190 | const char *name); |
181 | void intel_i2c_destroy(struct i2c_adapter *adapter); | 191 | void intel_i2c_destroy(struct i2c_adapter *adapter); |
@@ -219,7 +229,6 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
219 | struct drm_crtc *crtc); | 229 | struct drm_crtc *crtc); |
220 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | 230 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
221 | struct drm_file *file_priv); | 231 | struct drm_file *file_priv); |
222 | extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe); | ||
223 | extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); | 232 | extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); |
224 | extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); | 233 | extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); |
225 | extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | 234 | extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index a399f4b2c1c5..7c9ec1472d46 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
@@ -221,7 +221,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, | |||
221 | * | 221 | * |
222 | * Unimplemented. | 222 | * Unimplemented. |
223 | */ | 223 | */ |
224 | static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) | 224 | static enum drm_connector_status |
225 | intel_dvo_detect(struct drm_connector *connector, bool force) | ||
225 | { | 226 | { |
226 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 227 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
227 | struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); | 228 | struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 7bdc96256bf5..b61966c126d3 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -237,8 +237,10 @@ int intel_fbdev_destroy(struct drm_device *dev, | |||
237 | drm_fb_helper_fini(&ifbdev->helper); | 237 | drm_fb_helper_fini(&ifbdev->helper); |
238 | 238 | ||
239 | drm_framebuffer_cleanup(&ifb->base); | 239 | drm_framebuffer_cleanup(&ifb->base); |
240 | if (ifb->obj) | 240 | if (ifb->obj) { |
241 | drm_gem_object_unreference(ifb->obj); | 241 | drm_gem_object_unreference(ifb->obj); |
242 | ifb->obj = NULL; | ||
243 | } | ||
242 | 244 | ||
243 | return 0; | 245 | return 0; |
244 | } | 246 | } |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index ccd4c97e6524..926934a482ec 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -139,7 +139,7 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, | |||
139 | } | 139 | } |
140 | 140 | ||
141 | static enum drm_connector_status | 141 | static enum drm_connector_status |
142 | intel_hdmi_detect(struct drm_connector *connector) | 142 | intel_hdmi_detect(struct drm_connector *connector, bool force) |
143 | { | 143 | { |
144 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 144 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
145 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 145 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index b819c1081147..6ec39a86ed06 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -445,7 +445,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, | |||
445 | * connected and closed means disconnected. We also send hotplug events as | 445 | * connected and closed means disconnected. We also send hotplug events as |
446 | * needed, using lid status notification from the input layer. | 446 | * needed, using lid status notification from the input layer. |
447 | */ | 447 | */ |
448 | static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) | 448 | static enum drm_connector_status |
449 | intel_lvds_detect(struct drm_connector *connector, bool force) | ||
449 | { | 450 | { |
450 | struct drm_device *dev = connector->dev; | 451 | struct drm_device *dev = connector->dev; |
451 | enum drm_connector_status status = connector_status_connected; | 452 | enum drm_connector_status status = connector_status_connected; |
@@ -540,7 +541,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | |||
540 | * the LID nofication event. | 541 | * the LID nofication event. |
541 | */ | 542 | */ |
542 | if (connector) | 543 | if (connector) |
543 | connector->status = connector->funcs->detect(connector); | 544 | connector->status = connector->funcs->detect(connector, |
545 | false); | ||
546 | |||
544 | /* Don't force modeset on machines where it causes a GPU lockup */ | 547 | /* Don't force modeset on machines where it causes a GPU lockup */ |
545 | if (dmi_check_system(intel_no_modeset_on_lid)) | 548 | if (dmi_check_system(intel_no_modeset_on_lid)) |
546 | return NOTIFY_OK; | 549 | return NOTIFY_OK; |
@@ -875,8 +878,6 @@ void intel_lvds_init(struct drm_device *dev) | |||
875 | 878 | ||
876 | intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); | 879 | intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); |
877 | intel_encoder->crtc_mask = (1 << 1); | 880 | intel_encoder->crtc_mask = (1 << 1); |
878 | if (IS_I965G(dev)) | ||
879 | intel_encoder->crtc_mask |= (1 << 0); | ||
880 | drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); | 881 | drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); |
881 | drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); | 882 | drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); |
882 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | 883 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 4f00390d7c61..1d306a458be6 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -25,6 +25,8 @@ | |||
25 | * | 25 | * |
26 | * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c | 26 | * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c |
27 | */ | 27 | */ |
28 | |||
29 | #include <linux/seq_file.h> | ||
28 | #include "drmP.h" | 30 | #include "drmP.h" |
29 | #include "drm.h" | 31 | #include "drm.h" |
30 | #include "i915_drm.h" | 32 | #include "i915_drm.h" |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 51e9c9e718c4..cb3508f78bc3 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -220,9 +220,13 @@ static int init_render_ring(struct drm_device *dev, | |||
220 | { | 220 | { |
221 | drm_i915_private_t *dev_priv = dev->dev_private; | 221 | drm_i915_private_t *dev_priv = dev->dev_private; |
222 | int ret = init_ring_common(dev, ring); | 222 | int ret = init_ring_common(dev, ring); |
223 | int mode; | ||
224 | |||
223 | if (IS_I9XX(dev) && !IS_GEN3(dev)) { | 225 | if (IS_I9XX(dev) && !IS_GEN3(dev)) { |
224 | I915_WRITE(MI_MODE, | 226 | mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; |
225 | (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); | 227 | if (IS_GEN6(dev)) |
228 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; | ||
229 | I915_WRITE(MI_MODE, mode); | ||
226 | } | 230 | } |
227 | return ret; | 231 | return ret; |
228 | } | 232 | } |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 093e914e8a41..ee73e428a84a 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -1061,8 +1061,9 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
1061 | if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode)) | 1061 | if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode)) |
1062 | return false; | 1062 | return false; |
1063 | 1063 | ||
1064 | if (!intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode)) | 1064 | (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, |
1065 | return false; | 1065 | mode, |
1066 | adjusted_mode); | ||
1066 | } else if (intel_sdvo->is_lvds) { | 1067 | } else if (intel_sdvo->is_lvds) { |
1067 | drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0); | 1068 | drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0); |
1068 | 1069 | ||
@@ -1070,8 +1071,9 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
1070 | intel_sdvo->sdvo_lvds_fixed_mode)) | 1071 | intel_sdvo->sdvo_lvds_fixed_mode)) |
1071 | return false; | 1072 | return false; |
1072 | 1073 | ||
1073 | if (!intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode)) | 1074 | (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, |
1074 | return false; | 1075 | mode, |
1076 | adjusted_mode); | ||
1075 | } | 1077 | } |
1076 | 1078 | ||
1077 | /* Make the CRTC code factor in the SDVO pixel multiplier. The | 1079 | /* Make the CRTC code factor in the SDVO pixel multiplier. The |
@@ -1108,10 +1110,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1108 | in_out.in0 = intel_sdvo->attached_output; | 1110 | in_out.in0 = intel_sdvo->attached_output; |
1109 | in_out.in1 = 0; | 1111 | in_out.in1 = 0; |
1110 | 1112 | ||
1111 | if (!intel_sdvo_set_value(intel_sdvo, | 1113 | intel_sdvo_set_value(intel_sdvo, |
1112 | SDVO_CMD_SET_IN_OUT_MAP, | 1114 | SDVO_CMD_SET_IN_OUT_MAP, |
1113 | &in_out, sizeof(in_out))) | 1115 | &in_out, sizeof(in_out)); |
1114 | return; | ||
1115 | 1116 | ||
1116 | if (intel_sdvo->is_hdmi) { | 1117 | if (intel_sdvo->is_hdmi) { |
1117 | if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode)) | 1118 | if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode)) |
@@ -1122,11 +1123,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1122 | 1123 | ||
1123 | /* We have tried to get input timing in mode_fixup, and filled into | 1124 | /* We have tried to get input timing in mode_fixup, and filled into |
1124 | adjusted_mode */ | 1125 | adjusted_mode */ |
1125 | if (intel_sdvo->is_tv || intel_sdvo->is_lvds) { | 1126 | intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); |
1126 | intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); | 1127 | if (intel_sdvo->is_tv || intel_sdvo->is_lvds) |
1127 | input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags; | 1128 | input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags; |
1128 | } else | ||
1129 | intel_sdvo_get_dtd_from_mode(&input_dtd, mode); | ||
1130 | 1129 | ||
1131 | /* If it's a TV, we already set the output timing in mode_fixup. | 1130 | /* If it's a TV, we already set the output timing in mode_fixup. |
1132 | * Otherwise, the output timing is equal to the input timing. | 1131 | * Otherwise, the output timing is equal to the input timing. |
@@ -1137,8 +1136,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1137 | intel_sdvo->attached_output)) | 1136 | intel_sdvo->attached_output)) |
1138 | return; | 1137 | return; |
1139 | 1138 | ||
1140 | if (!intel_sdvo_set_output_timing(intel_sdvo, &input_dtd)) | 1139 | (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd); |
1141 | return; | ||
1142 | } | 1140 | } |
1143 | 1141 | ||
1144 | /* Set the input timing to the screen. Assume always input 0. */ | 1142 | /* Set the input timing to the screen. Assume always input 0. */ |
@@ -1165,8 +1163,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1165 | intel_sdvo_set_input_timing(encoder, &input_dtd); | 1163 | intel_sdvo_set_input_timing(encoder, &input_dtd); |
1166 | } | 1164 | } |
1167 | #else | 1165 | #else |
1168 | if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) | 1166 | (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd); |
1169 | return; | ||
1170 | #endif | 1167 | #endif |
1171 | 1168 | ||
1172 | sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode); | 1169 | sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode); |
@@ -1420,7 +1417,7 @@ intel_analog_is_connected(struct drm_device *dev) | |||
1420 | if (!analog_connector) | 1417 | if (!analog_connector) |
1421 | return false; | 1418 | return false; |
1422 | 1419 | ||
1423 | if (analog_connector->funcs->detect(analog_connector) == | 1420 | if (analog_connector->funcs->detect(analog_connector, false) == |
1424 | connector_status_disconnected) | 1421 | connector_status_disconnected) |
1425 | return false; | 1422 | return false; |
1426 | 1423 | ||
@@ -1489,7 +1486,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) | |||
1489 | return status; | 1486 | return status; |
1490 | } | 1487 | } |
1491 | 1488 | ||
1492 | static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) | 1489 | static enum drm_connector_status |
1490 | intel_sdvo_detect(struct drm_connector *connector, bool force) | ||
1493 | { | 1491 | { |
1494 | uint16_t response; | 1492 | uint16_t response; |
1495 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1493 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
@@ -1932,6 +1930,41 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { | |||
1932 | .destroy = intel_sdvo_enc_destroy, | 1930 | .destroy = intel_sdvo_enc_destroy, |
1933 | }; | 1931 | }; |
1934 | 1932 | ||
1933 | static void | ||
1934 | intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo) | ||
1935 | { | ||
1936 | uint16_t mask = 0; | ||
1937 | unsigned int num_bits; | ||
1938 | |||
1939 | /* Make a mask of outputs less than or equal to our own priority in the | ||
1940 | * list. | ||
1941 | */ | ||
1942 | switch (sdvo->controlled_output) { | ||
1943 | case SDVO_OUTPUT_LVDS1: | ||
1944 | mask |= SDVO_OUTPUT_LVDS1; | ||
1945 | case SDVO_OUTPUT_LVDS0: | ||
1946 | mask |= SDVO_OUTPUT_LVDS0; | ||
1947 | case SDVO_OUTPUT_TMDS1: | ||
1948 | mask |= SDVO_OUTPUT_TMDS1; | ||
1949 | case SDVO_OUTPUT_TMDS0: | ||
1950 | mask |= SDVO_OUTPUT_TMDS0; | ||
1951 | case SDVO_OUTPUT_RGB1: | ||
1952 | mask |= SDVO_OUTPUT_RGB1; | ||
1953 | case SDVO_OUTPUT_RGB0: | ||
1954 | mask |= SDVO_OUTPUT_RGB0; | ||
1955 | break; | ||
1956 | } | ||
1957 | |||
1958 | /* Count bits to find what number we are in the priority list. */ | ||
1959 | mask &= sdvo->caps.output_flags; | ||
1960 | num_bits = hweight16(mask); | ||
1961 | /* If more than 3 outputs, default to DDC bus 3 for now. */ | ||
1962 | if (num_bits > 3) | ||
1963 | num_bits = 3; | ||
1964 | |||
1965 | /* Corresponds to SDVO_CONTROL_BUS_DDCx */ | ||
1966 | sdvo->ddc_bus = 1 << num_bits; | ||
1967 | } | ||
1935 | 1968 | ||
1936 | /** | 1969 | /** |
1937 | * Choose the appropriate DDC bus for control bus switch command for this | 1970 | * Choose the appropriate DDC bus for control bus switch command for this |
@@ -1951,7 +1984,10 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, | |||
1951 | else | 1984 | else |
1952 | mapping = &(dev_priv->sdvo_mappings[1]); | 1985 | mapping = &(dev_priv->sdvo_mappings[1]); |
1953 | 1986 | ||
1954 | sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); | 1987 | if (mapping->initialized) |
1988 | sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); | ||
1989 | else | ||
1990 | intel_sdvo_guess_ddc_bus(sdvo); | ||
1955 | } | 1991 | } |
1956 | 1992 | ||
1957 | static bool | 1993 | static bool |
@@ -2134,8 +2170,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) | |||
2134 | return true; | 2170 | return true; |
2135 | 2171 | ||
2136 | err: | 2172 | err: |
2137 | intel_sdvo_destroy_enhance_property(connector); | 2173 | intel_sdvo_destroy(connector); |
2138 | kfree(intel_sdvo_connector); | ||
2139 | return false; | 2174 | return false; |
2140 | } | 2175 | } |
2141 | 2176 | ||
@@ -2207,8 +2242,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) | |||
2207 | return true; | 2242 | return true; |
2208 | 2243 | ||
2209 | err: | 2244 | err: |
2210 | intel_sdvo_destroy_enhance_property(connector); | 2245 | intel_sdvo_destroy(connector); |
2211 | kfree(intel_sdvo_connector); | ||
2212 | return false; | 2246 | return false; |
2213 | } | 2247 | } |
2214 | 2248 | ||
@@ -2486,11 +2520,10 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, | |||
2486 | uint16_t response; | 2520 | uint16_t response; |
2487 | } enhancements; | 2521 | } enhancements; |
2488 | 2522 | ||
2489 | if (!intel_sdvo_get_value(intel_sdvo, | 2523 | enhancements.response = 0; |
2490 | SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, | 2524 | intel_sdvo_get_value(intel_sdvo, |
2491 | &enhancements, sizeof(enhancements))) | 2525 | SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, |
2492 | return false; | 2526 | &enhancements, sizeof(enhancements)); |
2493 | |||
2494 | if (enhancements.response == 0) { | 2527 | if (enhancements.response == 0) { |
2495 | DRM_DEBUG_KMS("No enhancement is supported\n"); | 2528 | DRM_DEBUG_KMS("No enhancement is supported\n"); |
2496 | return true; | 2529 | return true; |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index d2029efee982..4a117e318a73 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1231,7 +1231,6 @@ intel_tv_detect_type (struct intel_tv *intel_tv) | |||
1231 | struct drm_encoder *encoder = &intel_tv->base.enc; | 1231 | struct drm_encoder *encoder = &intel_tv->base.enc; |
1232 | struct drm_device *dev = encoder->dev; | 1232 | struct drm_device *dev = encoder->dev; |
1233 | struct drm_i915_private *dev_priv = dev->dev_private; | 1233 | struct drm_i915_private *dev_priv = dev->dev_private; |
1234 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | ||
1235 | unsigned long irqflags; | 1234 | unsigned long irqflags; |
1236 | u32 tv_ctl, save_tv_ctl; | 1235 | u32 tv_ctl, save_tv_ctl; |
1237 | u32 tv_dac, save_tv_dac; | 1236 | u32 tv_dac, save_tv_dac; |
@@ -1268,11 +1267,15 @@ intel_tv_detect_type (struct intel_tv *intel_tv) | |||
1268 | DAC_C_0_7_V); | 1267 | DAC_C_0_7_V); |
1269 | I915_WRITE(TV_CTL, tv_ctl); | 1268 | I915_WRITE(TV_CTL, tv_ctl); |
1270 | I915_WRITE(TV_DAC, tv_dac); | 1269 | I915_WRITE(TV_DAC, tv_dac); |
1271 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 1270 | POSTING_READ(TV_DAC); |
1271 | msleep(20); | ||
1272 | |||
1272 | tv_dac = I915_READ(TV_DAC); | 1273 | tv_dac = I915_READ(TV_DAC); |
1273 | I915_WRITE(TV_DAC, save_tv_dac); | 1274 | I915_WRITE(TV_DAC, save_tv_dac); |
1274 | I915_WRITE(TV_CTL, save_tv_ctl); | 1275 | I915_WRITE(TV_CTL, save_tv_ctl); |
1275 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 1276 | POSTING_READ(TV_CTL); |
1277 | msleep(20); | ||
1278 | |||
1276 | /* | 1279 | /* |
1277 | * A B C | 1280 | * A B C |
1278 | * 0 1 1 Composite | 1281 | * 0 1 1 Composite |
@@ -1338,7 +1341,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector) | |||
1338 | * we have a pipe programmed in order to probe the TV. | 1341 | * we have a pipe programmed in order to probe the TV. |
1339 | */ | 1342 | */ |
1340 | static enum drm_connector_status | 1343 | static enum drm_connector_status |
1341 | intel_tv_detect(struct drm_connector *connector) | 1344 | intel_tv_detect(struct drm_connector *connector, bool force) |
1342 | { | 1345 | { |
1343 | struct drm_display_mode mode; | 1346 | struct drm_display_mode mode; |
1344 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1347 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
@@ -1350,7 +1353,7 @@ intel_tv_detect(struct drm_connector *connector) | |||
1350 | 1353 | ||
1351 | if (encoder->crtc && encoder->crtc->enabled) { | 1354 | if (encoder->crtc && encoder->crtc->enabled) { |
1352 | type = intel_tv_detect_type(intel_tv); | 1355 | type = intel_tv_detect_type(intel_tv); |
1353 | } else { | 1356 | } else if (force) { |
1354 | struct drm_crtc *crtc; | 1357 | struct drm_crtc *crtc; |
1355 | int dpms_mode; | 1358 | int dpms_mode; |
1356 | 1359 | ||
@@ -1361,10 +1364,9 @@ intel_tv_detect(struct drm_connector *connector) | |||
1361 | intel_release_load_detect_pipe(&intel_tv->base, connector, | 1364 | intel_release_load_detect_pipe(&intel_tv->base, connector, |
1362 | dpms_mode); | 1365 | dpms_mode); |
1363 | } else | 1366 | } else |
1364 | type = -1; | 1367 | return connector_status_unknown; |
1365 | } | 1368 | } else |
1366 | 1369 | return connector->status; | |
1367 | intel_tv->type = type; | ||
1368 | 1370 | ||
1369 | if (type < 0) | 1371 | if (type < 0) |
1370 | return connector_status_disconnected; | 1372 | return connector_status_disconnected; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index a1473fff06ac..fc737037f751 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -168,7 +168,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector, | |||
168 | } | 168 | } |
169 | 169 | ||
170 | static enum drm_connector_status | 170 | static enum drm_connector_status |
171 | nouveau_connector_detect(struct drm_connector *connector) | 171 | nouveau_connector_detect(struct drm_connector *connector, bool force) |
172 | { | 172 | { |
173 | struct drm_device *dev = connector->dev; | 173 | struct drm_device *dev = connector->dev; |
174 | struct nouveau_connector *nv_connector = nouveau_connector(connector); | 174 | struct nouveau_connector *nv_connector = nouveau_connector(connector); |
@@ -246,7 +246,7 @@ detect_analog: | |||
246 | } | 246 | } |
247 | 247 | ||
248 | static enum drm_connector_status | 248 | static enum drm_connector_status |
249 | nouveau_connector_detect_lvds(struct drm_connector *connector) | 249 | nouveau_connector_detect_lvds(struct drm_connector *connector, bool force) |
250 | { | 250 | { |
251 | struct drm_device *dev = connector->dev; | 251 | struct drm_device *dev = connector->dev; |
252 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 252 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
@@ -267,7 +267,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector) | |||
267 | 267 | ||
268 | /* Try retrieving EDID via DDC */ | 268 | /* Try retrieving EDID via DDC */ |
269 | if (!dev_priv->vbios.fp_no_ddc) { | 269 | if (!dev_priv->vbios.fp_no_ddc) { |
270 | status = nouveau_connector_detect(connector); | 270 | status = nouveau_connector_detect(connector, force); |
271 | if (status == connector_status_connected) | 271 | if (status == connector_status_connected) |
272 | goto out; | 272 | goto out; |
273 | } | 273 | } |
@@ -558,8 +558,10 @@ nouveau_connector_get_modes(struct drm_connector *connector) | |||
558 | if (nv_encoder->dcb->type == OUTPUT_LVDS && | 558 | if (nv_encoder->dcb->type == OUTPUT_LVDS && |
559 | (nv_encoder->dcb->lvdsconf.use_straps_for_mode || | 559 | (nv_encoder->dcb->lvdsconf.use_straps_for_mode || |
560 | dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) { | 560 | dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) { |
561 | nv_connector->native_mode = drm_mode_create(dev); | 561 | struct drm_display_mode mode; |
562 | nouveau_bios_fp_mode(dev, nv_connector->native_mode); | 562 | |
563 | nouveau_bios_fp_mode(dev, &mode); | ||
564 | nv_connector->native_mode = drm_mode_duplicate(dev, &mode); | ||
563 | } | 565 | } |
564 | 566 | ||
565 | /* Find the native mode if this is a digital panel, if we didn't | 567 | /* Find the native mode if this is a digital panel, if we didn't |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 6b208ffafa8d..87ac21ec23d2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -64,16 +64,17 @@ nouveau_fence_update(struct nouveau_channel *chan) | |||
64 | struct nouveau_fence *fence; | 64 | struct nouveau_fence *fence; |
65 | uint32_t sequence; | 65 | uint32_t sequence; |
66 | 66 | ||
67 | spin_lock(&chan->fence.lock); | ||
68 | |||
67 | if (USE_REFCNT) | 69 | if (USE_REFCNT) |
68 | sequence = nvchan_rd32(chan, 0x48); | 70 | sequence = nvchan_rd32(chan, 0x48); |
69 | else | 71 | else |
70 | sequence = atomic_read(&chan->fence.last_sequence_irq); | 72 | sequence = atomic_read(&chan->fence.last_sequence_irq); |
71 | 73 | ||
72 | if (chan->fence.sequence_ack == sequence) | 74 | if (chan->fence.sequence_ack == sequence) |
73 | return; | 75 | goto out; |
74 | chan->fence.sequence_ack = sequence; | 76 | chan->fence.sequence_ack = sequence; |
75 | 77 | ||
76 | spin_lock(&chan->fence.lock); | ||
77 | list_for_each_safe(entry, tmp, &chan->fence.pending) { | 78 | list_for_each_safe(entry, tmp, &chan->fence.pending) { |
78 | fence = list_entry(entry, struct nouveau_fence, entry); | 79 | fence = list_entry(entry, struct nouveau_fence, entry); |
79 | 80 | ||
@@ -85,6 +86,7 @@ nouveau_fence_update(struct nouveau_channel *chan) | |||
85 | if (sequence == chan->fence.sequence_ack) | 86 | if (sequence == chan->fence.sequence_ack) |
86 | break; | 87 | break; |
87 | } | 88 | } |
89 | out: | ||
88 | spin_unlock(&chan->fence.lock); | 90 | spin_unlock(&chan->fence.lock); |
89 | } | 91 | } |
90 | 92 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 581c67cd7b24..19620a6709f5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -167,11 +167,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |||
167 | goto out; | 167 | goto out; |
168 | 168 | ||
169 | ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); | 169 | ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); |
170 | /* drop reference from allocate - handle holds it now */ | ||
171 | drm_gem_object_unreference_unlocked(nvbo->gem); | ||
170 | out: | 172 | out: |
171 | drm_gem_object_handle_unreference_unlocked(nvbo->gem); | ||
172 | |||
173 | if (ret) | ||
174 | drm_gem_object_unreference_unlocked(nvbo->gem); | ||
175 | return ret; | 173 | return ret; |
176 | } | 174 | } |
177 | 175 | ||
@@ -245,7 +243,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence) | |||
245 | list_del(&nvbo->entry); | 243 | list_del(&nvbo->entry); |
246 | nvbo->reserved_by = NULL; | 244 | nvbo->reserved_by = NULL; |
247 | ttm_bo_unreserve(&nvbo->bo); | 245 | ttm_bo_unreserve(&nvbo->bo); |
248 | drm_gem_object_unreference(nvbo->gem); | 246 | drm_gem_object_unreference_unlocked(nvbo->gem); |
249 | } | 247 | } |
250 | } | 248 | } |
251 | 249 | ||
@@ -300,7 +298,7 @@ retry: | |||
300 | validate_fini(op, NULL); | 298 | validate_fini(op, NULL); |
301 | if (ret == -EAGAIN) | 299 | if (ret == -EAGAIN) |
302 | ret = ttm_bo_wait_unreserved(&nvbo->bo, false); | 300 | ret = ttm_bo_wait_unreserved(&nvbo->bo, false); |
303 | drm_gem_object_unreference(gem); | 301 | drm_gem_object_unreference_unlocked(gem); |
304 | if (ret) { | 302 | if (ret) { |
305 | NV_ERROR(dev, "fail reserve\n"); | 303 | NV_ERROR(dev, "fail reserve\n"); |
306 | return ret; | 304 | return ret; |
@@ -616,8 +614,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
616 | return PTR_ERR(bo); | 614 | return PTR_ERR(bo); |
617 | } | 615 | } |
618 | 616 | ||
619 | mutex_lock(&dev->struct_mutex); | ||
620 | |||
621 | /* Mark push buffers as being used on PFIFO, the validation code | 617 | /* Mark push buffers as being used on PFIFO, the validation code |
622 | * will then make sure that if the pushbuf bo moves, that they | 618 | * will then make sure that if the pushbuf bo moves, that they |
623 | * happen on the kernel channel, which will in turn cause a sync | 619 | * happen on the kernel channel, which will in turn cause a sync |
@@ -731,7 +727,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
731 | out: | 727 | out: |
732 | validate_fini(&op, fence); | 728 | validate_fini(&op, fence); |
733 | nouveau_fence_unref((void**)&fence); | 729 | nouveau_fence_unref((void**)&fence); |
734 | mutex_unlock(&dev->struct_mutex); | ||
735 | kfree(bo); | 730 | kfree(bo); |
736 | kfree(push); | 731 | kfree(push); |
737 | 732 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index c95bf9b681dd..91ef93cf1f35 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
@@ -139,6 +139,8 @@ nv50_instmem_init(struct drm_device *dev) | |||
139 | chan->file_priv = (struct drm_file *)-2; | 139 | chan->file_priv = (struct drm_file *)-2; |
140 | dev_priv->fifos[0] = dev_priv->fifos[127] = chan; | 140 | dev_priv->fifos[0] = dev_priv->fifos[127] = chan; |
141 | 141 | ||
142 | INIT_LIST_HEAD(&chan->ramht_refs); | ||
143 | |||
142 | /* Channel's PRAMIN object + heap */ | 144 | /* Channel's PRAMIN object + heap */ |
143 | ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0, | 145 | ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0, |
144 | NULL, &chan->ramin); | 146 | NULL, &chan->ramin); |
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index 1bc72c3190a9..fe359a239df3 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h | |||
@@ -4999,7 +4999,7 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS | |||
4999 | #define SW_I2C_CNTL_WRITE1BIT 6 | 4999 | #define SW_I2C_CNTL_WRITE1BIT 6 |
5000 | 5000 | ||
5001 | //==============================VESA definition Portion=============================== | 5001 | //==============================VESA definition Portion=============================== |
5002 | #define VESA_OEM_PRODUCT_REV '01.00' | 5002 | #define VESA_OEM_PRODUCT_REV "01.00" |
5003 | #define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support | 5003 | #define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support |
5004 | #define VESA_MODE_WIN_ATTRIBUTE 7 | 5004 | #define VESA_MODE_WIN_ATTRIBUTE 7 |
5005 | #define VESA_WIN_SIZE 64 | 5005 | #define VESA_WIN_SIZE 64 |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 577239a24fd5..cd0290f946cf 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -332,6 +332,11 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc, | |||
332 | args.usV_SyncWidth = | 332 | args.usV_SyncWidth = |
333 | cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start); | 333 | cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start); |
334 | 334 | ||
335 | args.ucOverscanRight = radeon_crtc->h_border; | ||
336 | args.ucOverscanLeft = radeon_crtc->h_border; | ||
337 | args.ucOverscanBottom = radeon_crtc->v_border; | ||
338 | args.ucOverscanTop = radeon_crtc->v_border; | ||
339 | |||
335 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) | 340 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) |
336 | misc |= ATOM_VSYNC_POLARITY; | 341 | misc |= ATOM_VSYNC_POLARITY; |
337 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) | 342 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) |
@@ -534,6 +539,21 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
534 | pll->algo = PLL_ALGO_LEGACY; | 539 | pll->algo = PLL_ALGO_LEGACY; |
535 | pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; | 540 | pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; |
536 | } | 541 | } |
542 | /* There is some evidence (often anecdotal) that RV515/RV620 LVDS | ||
543 | * (on some boards at least) prefers the legacy algo. I'm not | ||
544 | * sure whether this should handled generically or on a | ||
545 | * case-by-case quirk basis. Both algos should work fine in the | ||
546 | * majority of cases. | ||
547 | */ | ||
548 | if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) && | ||
549 | ((rdev->family == CHIP_RV515) || | ||
550 | (rdev->family == CHIP_RV620))) { | ||
551 | /* allow the user to overrride just in case */ | ||
552 | if (radeon_new_pll == 1) | ||
553 | pll->algo = PLL_ALGO_NEW; | ||
554 | else | ||
555 | pll->algo = PLL_ALGO_LEGACY; | ||
556 | } | ||
537 | } else { | 557 | } else { |
538 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) | 558 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) |
539 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; | 559 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; |
@@ -1056,11 +1076,11 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
1056 | 1076 | ||
1057 | if (rdev->family >= CHIP_RV770) { | 1077 | if (rdev->family >= CHIP_RV770) { |
1058 | if (radeon_crtc->crtc_id) { | 1078 | if (radeon_crtc->crtc_id) { |
1059 | WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0); | 1079 | WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); |
1060 | WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0); | 1080 | WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); |
1061 | } else { | 1081 | } else { |
1062 | WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0); | 1082 | WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); |
1063 | WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0); | 1083 | WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); |
1064 | } | 1084 | } |
1065 | } | 1085 | } |
1066 | WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | 1086 | WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, |
@@ -1197,8 +1217,18 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, | |||
1197 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 1217 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
1198 | struct drm_device *dev = crtc->dev; | 1218 | struct drm_device *dev = crtc->dev; |
1199 | struct radeon_device *rdev = dev->dev_private; | 1219 | struct radeon_device *rdev = dev->dev_private; |
1220 | struct drm_encoder *encoder; | ||
1221 | bool is_tvcv = false; | ||
1200 | 1222 | ||
1201 | /* TODO color tiling */ | 1223 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
1224 | /* find tv std */ | ||
1225 | if (encoder->crtc == crtc) { | ||
1226 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1227 | if (radeon_encoder->active_device & | ||
1228 | (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) | ||
1229 | is_tvcv = true; | ||
1230 | } | ||
1231 | } | ||
1202 | 1232 | ||
1203 | atombios_disable_ss(crtc); | 1233 | atombios_disable_ss(crtc); |
1204 | /* always set DCPLL */ | 1234 | /* always set DCPLL */ |
@@ -1207,9 +1237,14 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, | |||
1207 | atombios_crtc_set_pll(crtc, adjusted_mode); | 1237 | atombios_crtc_set_pll(crtc, adjusted_mode); |
1208 | atombios_enable_ss(crtc); | 1238 | atombios_enable_ss(crtc); |
1209 | 1239 | ||
1210 | if (ASIC_IS_AVIVO(rdev)) | 1240 | if (ASIC_IS_DCE4(rdev)) |
1211 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); | 1241 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); |
1212 | else { | 1242 | else if (ASIC_IS_AVIVO(rdev)) { |
1243 | if (is_tvcv) | ||
1244 | atombios_crtc_set_timing(crtc, adjusted_mode); | ||
1245 | else | ||
1246 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); | ||
1247 | } else { | ||
1213 | atombios_crtc_set_timing(crtc, adjusted_mode); | 1248 | atombios_crtc_set_timing(crtc, adjusted_mode); |
1214 | if (radeon_crtc->crtc_id == 0) | 1249 | if (radeon_crtc->crtc_id == 0) |
1215 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); | 1250 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 957d5067ad9c..79082d4398ae 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -675,6 +675,43 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev) | |||
675 | return 0; | 675 | return 0; |
676 | } | 676 | } |
677 | 677 | ||
678 | static int evergreen_cp_start(struct radeon_device *rdev) | ||
679 | { | ||
680 | int r; | ||
681 | uint32_t cp_me; | ||
682 | |||
683 | r = radeon_ring_lock(rdev, 7); | ||
684 | if (r) { | ||
685 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | ||
686 | return r; | ||
687 | } | ||
688 | radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); | ||
689 | radeon_ring_write(rdev, 0x1); | ||
690 | radeon_ring_write(rdev, 0x0); | ||
691 | radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1); | ||
692 | radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); | ||
693 | radeon_ring_write(rdev, 0); | ||
694 | radeon_ring_write(rdev, 0); | ||
695 | radeon_ring_unlock_commit(rdev); | ||
696 | |||
697 | cp_me = 0xff; | ||
698 | WREG32(CP_ME_CNTL, cp_me); | ||
699 | |||
700 | r = radeon_ring_lock(rdev, 4); | ||
701 | if (r) { | ||
702 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | ||
703 | return r; | ||
704 | } | ||
705 | /* init some VGT regs */ | ||
706 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); | ||
707 | radeon_ring_write(rdev, (VGT_VERTEX_REUSE_BLOCK_CNTL - PACKET3_SET_CONTEXT_REG_START) >> 2); | ||
708 | radeon_ring_write(rdev, 0xe); | ||
709 | radeon_ring_write(rdev, 0x10); | ||
710 | radeon_ring_unlock_commit(rdev); | ||
711 | |||
712 | return 0; | ||
713 | } | ||
714 | |||
678 | int evergreen_cp_resume(struct radeon_device *rdev) | 715 | int evergreen_cp_resume(struct radeon_device *rdev) |
679 | { | 716 | { |
680 | u32 tmp; | 717 | u32 tmp; |
@@ -719,7 +756,7 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
719 | rdev->cp.rptr = RREG32(CP_RB_RPTR); | 756 | rdev->cp.rptr = RREG32(CP_RB_RPTR); |
720 | rdev->cp.wptr = RREG32(CP_RB_WPTR); | 757 | rdev->cp.wptr = RREG32(CP_RB_WPTR); |
721 | 758 | ||
722 | r600_cp_start(rdev); | 759 | evergreen_cp_start(rdev); |
723 | rdev->cp.ready = true; | 760 | rdev->cp.ready = true; |
724 | r = radeon_ring_test(rdev); | 761 | r = radeon_ring_test(rdev); |
725 | if (r) { | 762 | if (r) { |
@@ -1123,14 +1160,25 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1123 | EVERGREEN_MAX_BACKENDS_MASK)); | 1160 | EVERGREEN_MAX_BACKENDS_MASK)); |
1124 | break; | 1161 | break; |
1125 | } | 1162 | } |
1126 | } else | 1163 | } else { |
1127 | gb_backend_map = | 1164 | switch (rdev->family) { |
1128 | evergreen_get_tile_pipe_to_backend_map(rdev, | 1165 | case CHIP_CYPRESS: |
1129 | rdev->config.evergreen.max_tile_pipes, | 1166 | case CHIP_HEMLOCK: |
1130 | rdev->config.evergreen.max_backends, | 1167 | gb_backend_map = 0x66442200; |
1131 | ((EVERGREEN_MAX_BACKENDS_MASK << | 1168 | break; |
1132 | rdev->config.evergreen.max_backends) & | 1169 | case CHIP_JUNIPER: |
1133 | EVERGREEN_MAX_BACKENDS_MASK)); | 1170 | gb_backend_map = 0x00006420; |
1171 | break; | ||
1172 | default: | ||
1173 | gb_backend_map = | ||
1174 | evergreen_get_tile_pipe_to_backend_map(rdev, | ||
1175 | rdev->config.evergreen.max_tile_pipes, | ||
1176 | rdev->config.evergreen.max_backends, | ||
1177 | ((EVERGREEN_MAX_BACKENDS_MASK << | ||
1178 | rdev->config.evergreen.max_backends) & | ||
1179 | EVERGREEN_MAX_BACKENDS_MASK)); | ||
1180 | } | ||
1181 | } | ||
1134 | 1182 | ||
1135 | rdev->config.evergreen.tile_config = gb_addr_config; | 1183 | rdev->config.evergreen.tile_config = gb_addr_config; |
1136 | WREG32(GB_BACKEND_MAP, gb_backend_map); | 1184 | WREG32(GB_BACKEND_MAP, gb_backend_map); |
@@ -2054,11 +2102,6 @@ int evergreen_resume(struct radeon_device *rdev) | |||
2054 | */ | 2102 | */ |
2055 | /* post card */ | 2103 | /* post card */ |
2056 | atom_asic_init(rdev->mode_info.atom_context); | 2104 | atom_asic_init(rdev->mode_info.atom_context); |
2057 | /* Initialize clocks */ | ||
2058 | r = radeon_clocks_init(rdev); | ||
2059 | if (r) { | ||
2060 | return r; | ||
2061 | } | ||
2062 | 2105 | ||
2063 | r = evergreen_startup(rdev); | 2106 | r = evergreen_startup(rdev); |
2064 | if (r) { | 2107 | if (r) { |
@@ -2164,9 +2207,6 @@ int evergreen_init(struct radeon_device *rdev) | |||
2164 | radeon_surface_init(rdev); | 2207 | radeon_surface_init(rdev); |
2165 | /* Initialize clocks */ | 2208 | /* Initialize clocks */ |
2166 | radeon_get_clock_info(rdev->ddev); | 2209 | radeon_get_clock_info(rdev->ddev); |
2167 | r = radeon_clocks_init(rdev); | ||
2168 | if (r) | ||
2169 | return r; | ||
2170 | /* Fence driver */ | 2210 | /* Fence driver */ |
2171 | r = radeon_fence_driver_init(rdev); | 2211 | r = radeon_fence_driver_init(rdev); |
2172 | if (r) | 2212 | if (r) |
@@ -2236,7 +2276,6 @@ void evergreen_fini(struct radeon_device *rdev) | |||
2236 | evergreen_pcie_gart_fini(rdev); | 2276 | evergreen_pcie_gart_fini(rdev); |
2237 | radeon_gem_fini(rdev); | 2277 | radeon_gem_fini(rdev); |
2238 | radeon_fence_driver_fini(rdev); | 2278 | radeon_fence_driver_fini(rdev); |
2239 | radeon_clocks_fini(rdev); | ||
2240 | radeon_agp_fini(rdev); | 2279 | radeon_agp_fini(rdev); |
2241 | radeon_bo_fini(rdev); | 2280 | radeon_bo_fini(rdev); |
2242 | radeon_atombios_fini(rdev); | 2281 | radeon_atombios_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index e817a0bb5eb4..e151f16a8f86 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -2020,18 +2020,7 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *l | |||
2020 | return false; | 2020 | return false; |
2021 | } | 2021 | } |
2022 | elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies); | 2022 | elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies); |
2023 | if (elapsed >= 3000) { | 2023 | if (elapsed >= 10000) { |
2024 | /* very likely the improbable case where current | ||
2025 | * rptr is equal to last recorded, a while ago, rptr | ||
2026 | * this is more likely a false positive update tracking | ||
2027 | * information which should force us to be recall at | ||
2028 | * latter point | ||
2029 | */ | ||
2030 | lockup->last_cp_rptr = cp->rptr; | ||
2031 | lockup->last_jiffies = jiffies; | ||
2032 | return false; | ||
2033 | } | ||
2034 | if (elapsed >= 1000) { | ||
2035 | dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed); | 2024 | dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed); |
2036 | return true; | 2025 | return true; |
2037 | } | 2026 | } |
@@ -3308,13 +3297,14 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
3308 | unsigned long size; | 3297 | unsigned long size; |
3309 | unsigned prim_walk; | 3298 | unsigned prim_walk; |
3310 | unsigned nverts; | 3299 | unsigned nverts; |
3300 | unsigned num_cb = track->num_cb; | ||
3311 | 3301 | ||
3312 | for (i = 0; i < track->num_cb; i++) { | 3302 | if (!track->zb_cb_clear && !track->color_channel_mask && |
3303 | !track->blend_read_enable) | ||
3304 | num_cb = 0; | ||
3305 | |||
3306 | for (i = 0; i < num_cb; i++) { | ||
3313 | if (track->cb[i].robj == NULL) { | 3307 | if (track->cb[i].robj == NULL) { |
3314 | if (!(track->zb_cb_clear || track->color_channel_mask || | ||
3315 | track->blend_read_enable)) { | ||
3316 | continue; | ||
3317 | } | ||
3318 | DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); | 3308 | DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); |
3319 | return -EINVAL; | 3309 | return -EINVAL; |
3320 | } | 3310 | } |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index d0ebae9dde25..7a04959ba0ee 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -2119,10 +2119,7 @@ int r600_cp_start(struct radeon_device *rdev) | |||
2119 | } | 2119 | } |
2120 | radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); | 2120 | radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); |
2121 | radeon_ring_write(rdev, 0x1); | 2121 | radeon_ring_write(rdev, 0x1); |
2122 | if (rdev->family >= CHIP_CEDAR) { | 2122 | if (rdev->family >= CHIP_RV770) { |
2123 | radeon_ring_write(rdev, 0x0); | ||
2124 | radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1); | ||
2125 | } else if (rdev->family >= CHIP_RV770) { | ||
2126 | radeon_ring_write(rdev, 0x0); | 2123 | radeon_ring_write(rdev, 0x0); |
2127 | radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1); | 2124 | radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1); |
2128 | } else { | 2125 | } else { |
@@ -2489,11 +2486,6 @@ int r600_resume(struct radeon_device *rdev) | |||
2489 | */ | 2486 | */ |
2490 | /* post card */ | 2487 | /* post card */ |
2491 | atom_asic_init(rdev->mode_info.atom_context); | 2488 | atom_asic_init(rdev->mode_info.atom_context); |
2492 | /* Initialize clocks */ | ||
2493 | r = radeon_clocks_init(rdev); | ||
2494 | if (r) { | ||
2495 | return r; | ||
2496 | } | ||
2497 | 2489 | ||
2498 | r = r600_startup(rdev); | 2490 | r = r600_startup(rdev); |
2499 | if (r) { | 2491 | if (r) { |
@@ -2586,9 +2578,6 @@ int r600_init(struct radeon_device *rdev) | |||
2586 | radeon_surface_init(rdev); | 2578 | radeon_surface_init(rdev); |
2587 | /* Initialize clocks */ | 2579 | /* Initialize clocks */ |
2588 | radeon_get_clock_info(rdev->ddev); | 2580 | radeon_get_clock_info(rdev->ddev); |
2589 | r = radeon_clocks_init(rdev); | ||
2590 | if (r) | ||
2591 | return r; | ||
2592 | /* Fence driver */ | 2581 | /* Fence driver */ |
2593 | r = radeon_fence_driver_init(rdev); | 2582 | r = radeon_fence_driver_init(rdev); |
2594 | if (r) | 2583 | if (r) |
@@ -2663,7 +2652,6 @@ void r600_fini(struct radeon_device *rdev) | |||
2663 | radeon_agp_fini(rdev); | 2652 | radeon_agp_fini(rdev); |
2664 | radeon_gem_fini(rdev); | 2653 | radeon_gem_fini(rdev); |
2665 | radeon_fence_driver_fini(rdev); | 2654 | radeon_fence_driver_fini(rdev); |
2666 | radeon_clocks_fini(rdev); | ||
2667 | radeon_bo_fini(rdev); | 2655 | radeon_bo_fini(rdev); |
2668 | radeon_atombios_fini(rdev); | 2656 | radeon_atombios_fini(rdev); |
2669 | kfree(rdev->bios); | 2657 | kfree(rdev->bios); |
@@ -2741,7 +2729,7 @@ int r600_ib_test(struct radeon_device *rdev) | |||
2741 | if (i < rdev->usec_timeout) { | 2729 | if (i < rdev->usec_timeout) { |
2742 | DRM_INFO("ib test succeeded in %u usecs\n", i); | 2730 | DRM_INFO("ib test succeeded in %u usecs\n", i); |
2743 | } else { | 2731 | } else { |
2744 | DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n", | 2732 | DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", |
2745 | scratch, tmp); | 2733 | scratch, tmp); |
2746 | r = -EINVAL; | 2734 | r = -EINVAL; |
2747 | } | 2735 | } |
@@ -3540,8 +3528,9 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) | |||
3540 | /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read | 3528 | /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read |
3541 | * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL | 3529 | * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL |
3542 | */ | 3530 | */ |
3543 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { | 3531 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && |
3544 | void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; | 3532 | rdev->vram_scratch.ptr) { |
3533 | void __iomem *ptr = (void *)rdev->vram_scratch.ptr; | ||
3545 | u32 tmp; | 3534 | u32 tmp; |
3546 | 3535 | ||
3547 | WREG32(HDP_DEBUG1, 0); | 3536 | WREG32(HDP_DEBUG1, 0); |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index d13622ae74e9..9ceb2a1ce799 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
@@ -1,3 +1,28 @@ | |||
1 | /* | ||
2 | * Copyright 2009 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2009 Red Hat Inc. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice (including the next | ||
13 | * paragraph) shall be included in all copies or substantial portions of the | ||
14 | * Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | */ | ||
25 | |||
1 | #include "drmP.h" | 26 | #include "drmP.h" |
2 | #include "drm.h" | 27 | #include "drm.h" |
3 | #include "radeon_drm.h" | 28 | #include "radeon_drm.h" |
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h index fdc3b378cbb0..f437d36dd98c 100644 --- a/drivers/gpu/drm/radeon/r600_blit_shaders.h +++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h | |||
@@ -1,3 +1,27 @@ | |||
1 | /* | ||
2 | * Copyright 2009 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2009 Red Hat Inc. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice (including the next | ||
13 | * paragraph) shall be included in all copies or substantial portions of the | ||
14 | * Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | */ | ||
1 | 25 | ||
2 | #ifndef R600_BLIT_SHADERS_H | 26 | #ifndef R600_BLIT_SHADERS_H |
3 | #define R600_BLIT_SHADERS_H | 27 | #define R600_BLIT_SHADERS_H |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index d8864949e387..250a3a918193 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -1170,9 +1170,8 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i | |||
1170 | /* using get ib will give us the offset into the mipmap bo */ | 1170 | /* using get ib will give us the offset into the mipmap bo */ |
1171 | word0 = radeon_get_ib_value(p, idx + 3) << 8; | 1171 | word0 = radeon_get_ib_value(p, idx + 3) << 8; |
1172 | if ((mipmap_size + word0) > radeon_bo_size(mipmap)) { | 1172 | if ((mipmap_size + word0) > radeon_bo_size(mipmap)) { |
1173 | dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", | 1173 | /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", |
1174 | w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture)); | 1174 | w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/ |
1175 | return -EINVAL; | ||
1176 | } | 1175 | } |
1177 | return 0; | 1176 | return 0; |
1178 | } | 1177 | } |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 3dfcfa3ca425..a168d644bf9e 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -1013,6 +1013,11 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | |||
1013 | int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, | 1013 | int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, |
1014 | struct drm_file *filp); | 1014 | struct drm_file *filp); |
1015 | 1015 | ||
1016 | /* VRAM scratch page for HDP bug */ | ||
1017 | struct r700_vram_scratch { | ||
1018 | struct radeon_bo *robj; | ||
1019 | volatile uint32_t *ptr; | ||
1020 | }; | ||
1016 | 1021 | ||
1017 | /* | 1022 | /* |
1018 | * Core structure, functions and helpers. | 1023 | * Core structure, functions and helpers. |
@@ -1079,6 +1084,7 @@ struct radeon_device { | |||
1079 | const struct firmware *pfp_fw; /* r6/700 PFP firmware */ | 1084 | const struct firmware *pfp_fw; /* r6/700 PFP firmware */ |
1080 | const struct firmware *rlc_fw; /* r6/700 RLC firmware */ | 1085 | const struct firmware *rlc_fw; /* r6/700 RLC firmware */ |
1081 | struct r600_blit r600_blit; | 1086 | struct r600_blit r600_blit; |
1087 | struct r700_vram_scratch vram_scratch; | ||
1082 | int msi_enabled; /* msi enabled */ | 1088 | int msi_enabled; /* msi enabled */ |
1083 | struct r600_ih ih; /* r6/700 interrupt ring */ | 1089 | struct r600_ih ih; /* r6/700 interrupt ring */ |
1084 | struct workqueue_struct *wq; | 1090 | struct workqueue_struct *wq; |
@@ -1333,8 +1339,6 @@ extern bool radeon_card_posted(struct radeon_device *rdev); | |||
1333 | extern void radeon_update_bandwidth_info(struct radeon_device *rdev); | 1339 | extern void radeon_update_bandwidth_info(struct radeon_device *rdev); |
1334 | extern void radeon_update_display_priority(struct radeon_device *rdev); | 1340 | extern void radeon_update_display_priority(struct radeon_device *rdev); |
1335 | extern bool radeon_boot_test_post_card(struct radeon_device *rdev); | 1341 | extern bool radeon_boot_test_post_card(struct radeon_device *rdev); |
1336 | extern int radeon_clocks_init(struct radeon_device *rdev); | ||
1337 | extern void radeon_clocks_fini(struct radeon_device *rdev); | ||
1338 | extern void radeon_scratch_init(struct radeon_device *rdev); | 1342 | extern void radeon_scratch_init(struct radeon_device *rdev); |
1339 | extern void radeon_surface_init(struct radeon_device *rdev); | 1343 | extern void radeon_surface_init(struct radeon_device *rdev); |
1340 | extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); | 1344 | extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index a21bf88e8c2d..25e1dd197791 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -858,21 +858,3 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
858 | return 0; | 858 | return 0; |
859 | } | 859 | } |
860 | 860 | ||
861 | /* | ||
862 | * Wrapper around modesetting bits. Move to radeon_clocks.c? | ||
863 | */ | ||
864 | int radeon_clocks_init(struct radeon_device *rdev) | ||
865 | { | ||
866 | int r; | ||
867 | |||
868 | r = radeon_static_clocks_init(rdev->ddev); | ||
869 | if (r) { | ||
870 | return r; | ||
871 | } | ||
872 | DRM_INFO("Clocks initialized !\n"); | ||
873 | return 0; | ||
874 | } | ||
875 | |||
876 | void radeon_clocks_fini(struct radeon_device *rdev) | ||
877 | { | ||
878 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 61141981880d..68932ba7b8a4 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -85,6 +85,19 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev | |||
85 | for (i = 0; i < num_indices; i++) { | 85 | for (i = 0; i < num_indices; i++) { |
86 | gpio = &i2c_info->asGPIO_Info[i]; | 86 | gpio = &i2c_info->asGPIO_Info[i]; |
87 | 87 | ||
88 | /* some evergreen boards have bad data for this entry */ | ||
89 | if (ASIC_IS_DCE4(rdev)) { | ||
90 | if ((i == 7) && | ||
91 | (gpio->usClkMaskRegisterIndex == 0x1936) && | ||
92 | (gpio->sucI2cId.ucAccess == 0)) { | ||
93 | gpio->sucI2cId.ucAccess = 0x97; | ||
94 | gpio->ucDataMaskShift = 8; | ||
95 | gpio->ucDataEnShift = 8; | ||
96 | gpio->ucDataY_Shift = 8; | ||
97 | gpio->ucDataA_Shift = 8; | ||
98 | } | ||
99 | } | ||
100 | |||
88 | if (gpio->sucI2cId.ucAccess == id) { | 101 | if (gpio->sucI2cId.ucAccess == id) { |
89 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; | 102 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; |
90 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; | 103 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; |
@@ -147,6 +160,20 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) | |||
147 | for (i = 0; i < num_indices; i++) { | 160 | for (i = 0; i < num_indices; i++) { |
148 | gpio = &i2c_info->asGPIO_Info[i]; | 161 | gpio = &i2c_info->asGPIO_Info[i]; |
149 | i2c.valid = false; | 162 | i2c.valid = false; |
163 | |||
164 | /* some evergreen boards have bad data for this entry */ | ||
165 | if (ASIC_IS_DCE4(rdev)) { | ||
166 | if ((i == 7) && | ||
167 | (gpio->usClkMaskRegisterIndex == 0x1936) && | ||
168 | (gpio->sucI2cId.ucAccess == 0)) { | ||
169 | gpio->sucI2cId.ucAccess = 0x97; | ||
170 | gpio->ucDataMaskShift = 8; | ||
171 | gpio->ucDataEnShift = 8; | ||
172 | gpio->ucDataY_Shift = 8; | ||
173 | gpio->ucDataA_Shift = 8; | ||
174 | } | ||
175 | } | ||
176 | |||
150 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; | 177 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; |
151 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; | 178 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; |
152 | i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; | 179 | i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; |
@@ -290,6 +317,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
290 | *connector_type = DRM_MODE_CONNECTOR_DVID; | 317 | *connector_type = DRM_MODE_CONNECTOR_DVID; |
291 | } | 318 | } |
292 | 319 | ||
320 | /* MSI K9A2GM V2/V3 board has no HDMI or DVI */ | ||
321 | if ((dev->pdev->device == 0x796e) && | ||
322 | (dev->pdev->subsystem_vendor == 0x1462) && | ||
323 | (dev->pdev->subsystem_device == 0x7302)) { | ||
324 | if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) || | ||
325 | (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) | ||
326 | return false; | ||
327 | } | ||
328 | |||
293 | /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ | 329 | /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ |
294 | if ((dev->pdev->device == 0x7941) && | 330 | if ((dev->pdev->device == 0x7941) && |
295 | (dev->pdev->subsystem_vendor == 0x147b) && | 331 | (dev->pdev->subsystem_vendor == 0x147b) && |
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index 14448a740ba6..5249af8931e6 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c | |||
@@ -327,6 +327,14 @@ void radeon_get_clock_info(struct drm_device *dev) | |||
327 | mpll->max_feedback_div = 0xff; | 327 | mpll->max_feedback_div = 0xff; |
328 | mpll->best_vco = 0; | 328 | mpll->best_vco = 0; |
329 | 329 | ||
330 | if (!rdev->clock.default_sclk) | ||
331 | rdev->clock.default_sclk = radeon_get_engine_clock(rdev); | ||
332 | if ((!rdev->clock.default_mclk) && rdev->asic->get_memory_clock) | ||
333 | rdev->clock.default_mclk = radeon_get_memory_clock(rdev); | ||
334 | |||
335 | rdev->pm.current_sclk = rdev->clock.default_sclk; | ||
336 | rdev->pm.current_mclk = rdev->clock.default_mclk; | ||
337 | |||
330 | } | 338 | } |
331 | 339 | ||
332 | /* 10 khz */ | 340 | /* 10 khz */ |
@@ -897,53 +905,3 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) | |||
897 | } | 905 | } |
898 | } | 906 | } |
899 | 907 | ||
900 | static void radeon_apply_clock_quirks(struct radeon_device *rdev) | ||
901 | { | ||
902 | uint32_t tmp; | ||
903 | |||
904 | /* XXX make sure engine is idle */ | ||
905 | |||
906 | if (rdev->family < CHIP_RS600) { | ||
907 | tmp = RREG32_PLL(RADEON_SCLK_CNTL); | ||
908 | if (ASIC_IS_R300(rdev) || ASIC_IS_RV100(rdev)) | ||
909 | tmp |= RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_VIP; | ||
910 | if ((rdev->family == CHIP_RV250) | ||
911 | || (rdev->family == CHIP_RV280)) | ||
912 | tmp |= | ||
913 | RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_DISP2; | ||
914 | if ((rdev->family == CHIP_RV350) | ||
915 | || (rdev->family == CHIP_RV380)) | ||
916 | tmp |= R300_SCLK_FORCE_VAP; | ||
917 | if (rdev->family == CHIP_R420) | ||
918 | tmp |= R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX; | ||
919 | WREG32_PLL(RADEON_SCLK_CNTL, tmp); | ||
920 | } else if (rdev->family < CHIP_R600) { | ||
921 | tmp = RREG32_PLL(AVIVO_CP_DYN_CNTL); | ||
922 | tmp |= AVIVO_CP_FORCEON; | ||
923 | WREG32_PLL(AVIVO_CP_DYN_CNTL, tmp); | ||
924 | |||
925 | tmp = RREG32_PLL(AVIVO_E2_DYN_CNTL); | ||
926 | tmp |= AVIVO_E2_FORCEON; | ||
927 | WREG32_PLL(AVIVO_E2_DYN_CNTL, tmp); | ||
928 | |||
929 | tmp = RREG32_PLL(AVIVO_IDCT_DYN_CNTL); | ||
930 | tmp |= AVIVO_IDCT_FORCEON; | ||
931 | WREG32_PLL(AVIVO_IDCT_DYN_CNTL, tmp); | ||
932 | } | ||
933 | } | ||
934 | |||
935 | int radeon_static_clocks_init(struct drm_device *dev) | ||
936 | { | ||
937 | struct radeon_device *rdev = dev->dev_private; | ||
938 | |||
939 | /* XXX make sure engine is idle */ | ||
940 | |||
941 | if (radeon_dynclks != -1) { | ||
942 | if (radeon_dynclks) { | ||
943 | if (rdev->asic->set_clock_gating) | ||
944 | radeon_set_clock_gating(rdev, 1); | ||
945 | } | ||
946 | } | ||
947 | radeon_apply_clock_quirks(rdev); | ||
948 | return 0; | ||
949 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index bd74e428bd14..a04b7a6ad95f 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -1485,6 +1485,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1485 | /* PowerMac8,1 ? */ | 1485 | /* PowerMac8,1 ? */ |
1486 | /* imac g5 isight */ | 1486 | /* imac g5 isight */ |
1487 | rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; | 1487 | rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; |
1488 | } else if ((rdev->pdev->device == 0x4a48) && | ||
1489 | (rdev->pdev->subsystem_vendor == 0x1002) && | ||
1490 | (rdev->pdev->subsystem_device == 0x4a48)) { | ||
1491 | /* Mac X800 */ | ||
1492 | rdev->mode_info.connector_table = CT_MAC_X800; | ||
1488 | } else | 1493 | } else |
1489 | #endif /* CONFIG_PPC_PMAC */ | 1494 | #endif /* CONFIG_PPC_PMAC */ |
1490 | #ifdef CONFIG_PPC64 | 1495 | #ifdef CONFIG_PPC64 |
@@ -1961,6 +1966,48 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1961 | CONNECTOR_OBJECT_ID_VGA, | 1966 | CONNECTOR_OBJECT_ID_VGA, |
1962 | &hpd); | 1967 | &hpd); |
1963 | break; | 1968 | break; |
1969 | case CT_MAC_X800: | ||
1970 | DRM_INFO("Connector Table: %d (mac x800)\n", | ||
1971 | rdev->mode_info.connector_table); | ||
1972 | /* DVI - primary dac, internal tmds */ | ||
1973 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); | ||
1974 | hpd.hpd = RADEON_HPD_1; /* ??? */ | ||
1975 | radeon_add_legacy_encoder(dev, | ||
1976 | radeon_get_encoder_enum(dev, | ||
1977 | ATOM_DEVICE_DFP1_SUPPORT, | ||
1978 | 0), | ||
1979 | ATOM_DEVICE_DFP1_SUPPORT); | ||
1980 | radeon_add_legacy_encoder(dev, | ||
1981 | radeon_get_encoder_enum(dev, | ||
1982 | ATOM_DEVICE_CRT1_SUPPORT, | ||
1983 | 1), | ||
1984 | ATOM_DEVICE_CRT1_SUPPORT); | ||
1985 | radeon_add_legacy_connector(dev, 0, | ||
1986 | ATOM_DEVICE_DFP1_SUPPORT | | ||
1987 | ATOM_DEVICE_CRT1_SUPPORT, | ||
1988 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c, | ||
1989 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, | ||
1990 | &hpd); | ||
1991 | /* DVI - tv dac, dvo */ | ||
1992 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); | ||
1993 | hpd.hpd = RADEON_HPD_2; /* ??? */ | ||
1994 | radeon_add_legacy_encoder(dev, | ||
1995 | radeon_get_encoder_enum(dev, | ||
1996 | ATOM_DEVICE_DFP2_SUPPORT, | ||
1997 | 0), | ||
1998 | ATOM_DEVICE_DFP2_SUPPORT); | ||
1999 | radeon_add_legacy_encoder(dev, | ||
2000 | radeon_get_encoder_enum(dev, | ||
2001 | ATOM_DEVICE_CRT2_SUPPORT, | ||
2002 | 2), | ||
2003 | ATOM_DEVICE_CRT2_SUPPORT); | ||
2004 | radeon_add_legacy_connector(dev, 1, | ||
2005 | ATOM_DEVICE_DFP2_SUPPORT | | ||
2006 | ATOM_DEVICE_CRT2_SUPPORT, | ||
2007 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c, | ||
2008 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, | ||
2009 | &hpd); | ||
2010 | break; | ||
1964 | default: | 2011 | default: |
1965 | DRM_INFO("Connector table: %d (invalid)\n", | 2012 | DRM_INFO("Connector table: %d (invalid)\n", |
1966 | rdev->mode_info.connector_table); | 2013 | rdev->mode_info.connector_table); |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 1a5ee392e9c7..ecc1a8fafbfd 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -481,7 +481,8 @@ static int radeon_lvds_mode_valid(struct drm_connector *connector, | |||
481 | return MODE_OK; | 481 | return MODE_OK; |
482 | } | 482 | } |
483 | 483 | ||
484 | static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector) | 484 | static enum drm_connector_status |
485 | radeon_lvds_detect(struct drm_connector *connector, bool force) | ||
485 | { | 486 | { |
486 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 487 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
487 | struct drm_encoder *encoder = radeon_best_single_encoder(connector); | 488 | struct drm_encoder *encoder = radeon_best_single_encoder(connector); |
@@ -594,7 +595,8 @@ static int radeon_vga_mode_valid(struct drm_connector *connector, | |||
594 | return MODE_OK; | 595 | return MODE_OK; |
595 | } | 596 | } |
596 | 597 | ||
597 | static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector) | 598 | static enum drm_connector_status |
599 | radeon_vga_detect(struct drm_connector *connector, bool force) | ||
598 | { | 600 | { |
599 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 601 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
600 | struct drm_encoder *encoder; | 602 | struct drm_encoder *encoder; |
@@ -691,7 +693,8 @@ static int radeon_tv_mode_valid(struct drm_connector *connector, | |||
691 | return MODE_OK; | 693 | return MODE_OK; |
692 | } | 694 | } |
693 | 695 | ||
694 | static enum drm_connector_status radeon_tv_detect(struct drm_connector *connector) | 696 | static enum drm_connector_status |
697 | radeon_tv_detect(struct drm_connector *connector, bool force) | ||
695 | { | 698 | { |
696 | struct drm_encoder *encoder; | 699 | struct drm_encoder *encoder; |
697 | struct drm_encoder_helper_funcs *encoder_funcs; | 700 | struct drm_encoder_helper_funcs *encoder_funcs; |
@@ -748,7 +751,8 @@ static int radeon_dvi_get_modes(struct drm_connector *connector) | |||
748 | * we have to check if this analog encoder is shared with anyone else (TV) | 751 | * we have to check if this analog encoder is shared with anyone else (TV) |
749 | * if its shared we have to set the other connector to disconnected. | 752 | * if its shared we have to set the other connector to disconnected. |
750 | */ | 753 | */ |
751 | static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector) | 754 | static enum drm_connector_status |
755 | radeon_dvi_detect(struct drm_connector *connector, bool force) | ||
752 | { | 756 | { |
753 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 757 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
754 | struct drm_encoder *encoder = NULL; | 758 | struct drm_encoder *encoder = NULL; |
@@ -972,7 +976,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector) | |||
972 | return ret; | 976 | return ret; |
973 | } | 977 | } |
974 | 978 | ||
975 | static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector) | 979 | static enum drm_connector_status |
980 | radeon_dp_detect(struct drm_connector *connector, bool force) | ||
976 | { | 981 | { |
977 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 982 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
978 | enum drm_connector_status ret = connector_status_disconnected; | 983 | enum drm_connector_status ret = connector_status_disconnected; |
@@ -1051,10 +1056,16 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1051 | uint32_t subpixel_order = SubPixelNone; | 1056 | uint32_t subpixel_order = SubPixelNone; |
1052 | bool shared_ddc = false; | 1057 | bool shared_ddc = false; |
1053 | 1058 | ||
1054 | /* fixme - tv/cv/din */ | ||
1055 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) | 1059 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) |
1056 | return; | 1060 | return; |
1057 | 1061 | ||
1062 | /* if the user selected tv=0 don't try and add the connector */ | ||
1063 | if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) || | ||
1064 | (connector_type == DRM_MODE_CONNECTOR_Composite) || | ||
1065 | (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) && | ||
1066 | (radeon_tv == 0)) | ||
1067 | return; | ||
1068 | |||
1058 | /* see if we already added it */ | 1069 | /* see if we already added it */ |
1059 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 1070 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
1060 | radeon_connector = to_radeon_connector(connector); | 1071 | radeon_connector = to_radeon_connector(connector); |
@@ -1209,19 +1220,17 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1209 | case DRM_MODE_CONNECTOR_SVIDEO: | 1220 | case DRM_MODE_CONNECTOR_SVIDEO: |
1210 | case DRM_MODE_CONNECTOR_Composite: | 1221 | case DRM_MODE_CONNECTOR_Composite: |
1211 | case DRM_MODE_CONNECTOR_9PinDIN: | 1222 | case DRM_MODE_CONNECTOR_9PinDIN: |
1212 | if (radeon_tv == 1) { | 1223 | drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); |
1213 | drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); | 1224 | drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); |
1214 | drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); | 1225 | radeon_connector->dac_load_detect = true; |
1215 | radeon_connector->dac_load_detect = true; | 1226 | drm_connector_attach_property(&radeon_connector->base, |
1216 | drm_connector_attach_property(&radeon_connector->base, | 1227 | rdev->mode_info.load_detect_property, |
1217 | rdev->mode_info.load_detect_property, | 1228 | 1); |
1218 | 1); | 1229 | drm_connector_attach_property(&radeon_connector->base, |
1219 | drm_connector_attach_property(&radeon_connector->base, | 1230 | rdev->mode_info.tv_std_property, |
1220 | rdev->mode_info.tv_std_property, | 1231 | radeon_atombios_get_tv_info(rdev)); |
1221 | radeon_atombios_get_tv_info(rdev)); | 1232 | /* no HPD on analog connectors */ |
1222 | /* no HPD on analog connectors */ | 1233 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; |
1223 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; | ||
1224 | } | ||
1225 | break; | 1234 | break; |
1226 | case DRM_MODE_CONNECTOR_LVDS: | 1235 | case DRM_MODE_CONNECTOR_LVDS: |
1227 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); | 1236 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); |
@@ -1272,10 +1281,16 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1272 | struct radeon_connector *radeon_connector; | 1281 | struct radeon_connector *radeon_connector; |
1273 | uint32_t subpixel_order = SubPixelNone; | 1282 | uint32_t subpixel_order = SubPixelNone; |
1274 | 1283 | ||
1275 | /* fixme - tv/cv/din */ | ||
1276 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) | 1284 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) |
1277 | return; | 1285 | return; |
1278 | 1286 | ||
1287 | /* if the user selected tv=0 don't try and add the connector */ | ||
1288 | if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) || | ||
1289 | (connector_type == DRM_MODE_CONNECTOR_Composite) || | ||
1290 | (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) && | ||
1291 | (radeon_tv == 0)) | ||
1292 | return; | ||
1293 | |||
1279 | /* see if we already added it */ | 1294 | /* see if we already added it */ |
1280 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 1295 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
1281 | radeon_connector = to_radeon_connector(connector); | 1296 | radeon_connector = to_radeon_connector(connector); |
@@ -1347,26 +1362,24 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1347 | case DRM_MODE_CONNECTOR_SVIDEO: | 1362 | case DRM_MODE_CONNECTOR_SVIDEO: |
1348 | case DRM_MODE_CONNECTOR_Composite: | 1363 | case DRM_MODE_CONNECTOR_Composite: |
1349 | case DRM_MODE_CONNECTOR_9PinDIN: | 1364 | case DRM_MODE_CONNECTOR_9PinDIN: |
1350 | if (radeon_tv == 1) { | 1365 | drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); |
1351 | drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); | 1366 | drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); |
1352 | drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); | 1367 | radeon_connector->dac_load_detect = true; |
1353 | radeon_connector->dac_load_detect = true; | 1368 | /* RS400,RC410,RS480 chipset seems to report a lot |
1354 | /* RS400,RC410,RS480 chipset seems to report a lot | 1369 | * of false positive on load detect, we haven't yet |
1355 | * of false positive on load detect, we haven't yet | 1370 | * found a way to make load detect reliable on those |
1356 | * found a way to make load detect reliable on those | 1371 | * chipset, thus just disable it for TV. |
1357 | * chipset, thus just disable it for TV. | 1372 | */ |
1358 | */ | 1373 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) |
1359 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) | 1374 | radeon_connector->dac_load_detect = false; |
1360 | radeon_connector->dac_load_detect = false; | 1375 | drm_connector_attach_property(&radeon_connector->base, |
1361 | drm_connector_attach_property(&radeon_connector->base, | 1376 | rdev->mode_info.load_detect_property, |
1362 | rdev->mode_info.load_detect_property, | 1377 | radeon_connector->dac_load_detect); |
1363 | radeon_connector->dac_load_detect); | 1378 | drm_connector_attach_property(&radeon_connector->base, |
1364 | drm_connector_attach_property(&radeon_connector->base, | 1379 | rdev->mode_info.tv_std_property, |
1365 | rdev->mode_info.tv_std_property, | 1380 | radeon_combios_get_tv_info(rdev)); |
1366 | radeon_combios_get_tv_info(rdev)); | 1381 | /* no HPD on analog connectors */ |
1367 | /* no HPD on analog connectors */ | 1382 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; |
1368 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; | ||
1369 | } | ||
1370 | break; | 1383 | break; |
1371 | case DRM_MODE_CONNECTOR_LVDS: | 1384 | case DRM_MODE_CONNECTOR_LVDS: |
1372 | drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); | 1385 | drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 69b3c2291e92..256d204a6d24 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -293,30 +293,20 @@ bool radeon_card_posted(struct radeon_device *rdev) | |||
293 | void radeon_update_bandwidth_info(struct radeon_device *rdev) | 293 | void radeon_update_bandwidth_info(struct radeon_device *rdev) |
294 | { | 294 | { |
295 | fixed20_12 a; | 295 | fixed20_12 a; |
296 | u32 sclk, mclk; | 296 | u32 sclk = rdev->pm.current_sclk; |
297 | u32 mclk = rdev->pm.current_mclk; | ||
297 | 298 | ||
298 | if (rdev->flags & RADEON_IS_IGP) { | 299 | /* sclk/mclk in Mhz */ |
299 | sclk = radeon_get_engine_clock(rdev); | 300 | a.full = dfixed_const(100); |
300 | mclk = rdev->clock.default_mclk; | 301 | rdev->pm.sclk.full = dfixed_const(sclk); |
301 | 302 | rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); | |
302 | a.full = dfixed_const(100); | 303 | rdev->pm.mclk.full = dfixed_const(mclk); |
303 | rdev->pm.sclk.full = dfixed_const(sclk); | 304 | rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); |
304 | rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); | ||
305 | rdev->pm.mclk.full = dfixed_const(mclk); | ||
306 | rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); | ||
307 | 305 | ||
306 | if (rdev->flags & RADEON_IS_IGP) { | ||
308 | a.full = dfixed_const(16); | 307 | a.full = dfixed_const(16); |
309 | /* core_bandwidth = sclk(Mhz) * 16 */ | 308 | /* core_bandwidth = sclk(Mhz) * 16 */ |
310 | rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); | 309 | rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); |
311 | } else { | ||
312 | sclk = radeon_get_engine_clock(rdev); | ||
313 | mclk = radeon_get_memory_clock(rdev); | ||
314 | |||
315 | a.full = dfixed_const(100); | ||
316 | rdev->pm.sclk.full = dfixed_const(sclk); | ||
317 | rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); | ||
318 | rdev->pm.mclk.full = dfixed_const(mclk); | ||
319 | rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); | ||
320 | } | 310 | } |
321 | } | 311 | } |
322 | 312 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 6dd434ad2429..b92d2f2fcbed 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -349,6 +349,8 @@ static void radeon_print_display_setup(struct drm_device *dev) | |||
349 | DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); | 349 | DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); |
350 | if (devices & ATOM_DEVICE_DFP5_SUPPORT) | 350 | if (devices & ATOM_DEVICE_DFP5_SUPPORT) |
351 | DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); | 351 | DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); |
352 | if (devices & ATOM_DEVICE_DFP6_SUPPORT) | ||
353 | DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]); | ||
352 | if (devices & ATOM_DEVICE_TV1_SUPPORT) | 354 | if (devices & ATOM_DEVICE_TV1_SUPPORT) |
353 | DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); | 355 | DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); |
354 | if (devices & ATOM_DEVICE_CV_SUPPORT) | 356 | if (devices & ATOM_DEVICE_CV_SUPPORT) |
@@ -841,8 +843,9 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) | |||
841 | { | 843 | { |
842 | struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); | 844 | struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); |
843 | 845 | ||
844 | if (radeon_fb->obj) | 846 | if (radeon_fb->obj) { |
845 | drm_gem_object_unreference_unlocked(radeon_fb->obj); | 847 | drm_gem_object_unreference_unlocked(radeon_fb->obj); |
848 | } | ||
846 | drm_framebuffer_cleanup(fb); | 849 | drm_framebuffer_cleanup(fb); |
847 | kfree(radeon_fb); | 850 | kfree(radeon_fb); |
848 | } | 851 | } |
@@ -1140,17 +1143,18 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | |||
1140 | radeon_crtc->rmx_type = radeon_encoder->rmx_type; | 1143 | radeon_crtc->rmx_type = radeon_encoder->rmx_type; |
1141 | else | 1144 | else |
1142 | radeon_crtc->rmx_type = RMX_OFF; | 1145 | radeon_crtc->rmx_type = RMX_OFF; |
1143 | src_v = crtc->mode.vdisplay; | ||
1144 | dst_v = radeon_crtc->native_mode.vdisplay; | ||
1145 | src_h = crtc->mode.hdisplay; | ||
1146 | dst_h = radeon_crtc->native_mode.vdisplay; | ||
1147 | /* copy native mode */ | 1146 | /* copy native mode */ |
1148 | memcpy(&radeon_crtc->native_mode, | 1147 | memcpy(&radeon_crtc->native_mode, |
1149 | &radeon_encoder->native_mode, | 1148 | &radeon_encoder->native_mode, |
1150 | sizeof(struct drm_display_mode)); | 1149 | sizeof(struct drm_display_mode)); |
1150 | src_v = crtc->mode.vdisplay; | ||
1151 | dst_v = radeon_crtc->native_mode.vdisplay; | ||
1152 | src_h = crtc->mode.hdisplay; | ||
1153 | dst_h = radeon_crtc->native_mode.hdisplay; | ||
1151 | 1154 | ||
1152 | /* fix up for overscan on hdmi */ | 1155 | /* fix up for overscan on hdmi */ |
1153 | if (ASIC_IS_AVIVO(rdev) && | 1156 | if (ASIC_IS_AVIVO(rdev) && |
1157 | (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) && | ||
1154 | ((radeon_encoder->underscan_type == UNDERSCAN_ON) || | 1158 | ((radeon_encoder->underscan_type == UNDERSCAN_ON) || |
1155 | ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && | 1159 | ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && |
1156 | drm_detect_hdmi_monitor(radeon_connector->edid) && | 1160 | drm_detect_hdmi_monitor(radeon_connector->edid) && |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index c74a8b20d941..40b0c087b592 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -94,6 +94,7 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) | |||
94 | ret = radeon_bo_reserve(rbo, false); | 94 | ret = radeon_bo_reserve(rbo, false); |
95 | if (likely(ret == 0)) { | 95 | if (likely(ret == 0)) { |
96 | radeon_bo_kunmap(rbo); | 96 | radeon_bo_kunmap(rbo); |
97 | radeon_bo_unpin(rbo); | ||
97 | radeon_bo_unreserve(rbo); | 98 | radeon_bo_unreserve(rbo); |
98 | } | 99 | } |
99 | drm_gem_object_unreference_unlocked(gobj); | 100 | drm_gem_object_unreference_unlocked(gobj); |
@@ -325,8 +326,6 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb | |||
325 | { | 326 | { |
326 | struct fb_info *info; | 327 | struct fb_info *info; |
327 | struct radeon_framebuffer *rfb = &rfbdev->rfb; | 328 | struct radeon_framebuffer *rfb = &rfbdev->rfb; |
328 | struct radeon_bo *rbo; | ||
329 | int r; | ||
330 | 329 | ||
331 | if (rfbdev->helper.fbdev) { | 330 | if (rfbdev->helper.fbdev) { |
332 | info = rfbdev->helper.fbdev; | 331 | info = rfbdev->helper.fbdev; |
@@ -338,14 +337,8 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb | |||
338 | } | 337 | } |
339 | 338 | ||
340 | if (rfb->obj) { | 339 | if (rfb->obj) { |
341 | rbo = rfb->obj->driver_private; | 340 | radeonfb_destroy_pinned_object(rfb->obj); |
342 | r = radeon_bo_reserve(rbo, false); | 341 | rfb->obj = NULL; |
343 | if (likely(r == 0)) { | ||
344 | radeon_bo_kunmap(rbo); | ||
345 | radeon_bo_unpin(rbo); | ||
346 | radeon_bo_unreserve(rbo); | ||
347 | } | ||
348 | drm_gem_object_unreference_unlocked(rfb->obj); | ||
349 | } | 342 | } |
350 | drm_fb_helper_fini(&rfbdev->helper); | 343 | drm_fb_helper_fini(&rfbdev->helper); |
351 | drm_framebuffer_cleanup(&rfb->base); | 344 | drm_framebuffer_cleanup(&rfb->base); |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index c578f265b24c..d1e595d91723 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -201,11 +201,11 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, | |||
201 | return r; | 201 | return r; |
202 | } | 202 | } |
203 | r = drm_gem_handle_create(filp, gobj, &handle); | 203 | r = drm_gem_handle_create(filp, gobj, &handle); |
204 | /* drop reference from allocate - handle holds it now */ | ||
205 | drm_gem_object_unreference_unlocked(gobj); | ||
204 | if (r) { | 206 | if (r) { |
205 | drm_gem_object_unreference_unlocked(gobj); | ||
206 | return r; | 207 | return r; |
207 | } | 208 | } |
208 | drm_gem_object_handle_unreference_unlocked(gobj); | ||
209 | args->handle = handle; | 209 | args->handle = handle; |
210 | return 0; | 210 | return 0; |
211 | } | 211 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 0416804d8f30..6a13ee38a5b9 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
@@ -213,7 +213,7 @@ static void post_xfer(struct i2c_adapter *i2c_adap) | |||
213 | 213 | ||
214 | static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) | 214 | static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) |
215 | { | 215 | { |
216 | u32 sclk = radeon_get_engine_clock(rdev); | 216 | u32 sclk = rdev->pm.current_sclk; |
217 | u32 prescale = 0; | 217 | u32 prescale = 0; |
218 | u32 nm; | 218 | u32 nm; |
219 | u8 n, m, loop; | 219 | u8 n, m, loop; |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 5eee3c41d124..8fbbe1c6ebbd 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -203,6 +203,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
203 | */ | 203 | */ |
204 | int radeon_driver_firstopen_kms(struct drm_device *dev) | 204 | int radeon_driver_firstopen_kms(struct drm_device *dev) |
205 | { | 205 | { |
206 | struct radeon_device *rdev = dev->dev_private; | ||
207 | |||
208 | if (rdev->powered_down) | ||
209 | return -EINVAL; | ||
206 | return 0; | 210 | return 0; |
207 | } | 211 | } |
208 | 212 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 8f93e2b4b0c8..17a6602b5885 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -204,7 +204,7 @@ struct radeon_i2c_chan { | |||
204 | 204 | ||
205 | /* mostly for macs, but really any system without connector tables */ | 205 | /* mostly for macs, but really any system without connector tables */ |
206 | enum radeon_connector_table { | 206 | enum radeon_connector_table { |
207 | CT_NONE, | 207 | CT_NONE = 0, |
208 | CT_GENERIC, | 208 | CT_GENERIC, |
209 | CT_IBOOK, | 209 | CT_IBOOK, |
210 | CT_POWERBOOK_EXTERNAL, | 210 | CT_POWERBOOK_EXTERNAL, |
@@ -215,6 +215,7 @@ enum radeon_connector_table { | |||
215 | CT_IMAC_G5_ISIGHT, | 215 | CT_IMAC_G5_ISIGHT, |
216 | CT_EMAC, | 216 | CT_EMAC, |
217 | CT_RN50_POWER, | 217 | CT_RN50_POWER, |
218 | CT_MAC_X800, | ||
218 | }; | 219 | }; |
219 | 220 | ||
220 | enum radeon_dvo_chip { | 221 | enum radeon_dvo_chip { |
@@ -600,7 +601,6 @@ extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct d | |||
600 | void radeon_enc_destroy(struct drm_encoder *encoder); | 601 | void radeon_enc_destroy(struct drm_encoder *encoder); |
601 | void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); | 602 | void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); |
602 | void radeon_combios_asic_init(struct drm_device *dev); | 603 | void radeon_combios_asic_init(struct drm_device *dev); |
603 | extern int radeon_static_clocks_init(struct drm_device *dev); | ||
604 | bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | 604 | bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, |
605 | struct drm_display_mode *mode, | 605 | struct drm_display_mode *mode, |
606 | struct drm_display_mode *adjusted_mode); | 606 | struct drm_display_mode *adjusted_mode); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index f1c796810117..bfa59db374d2 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -905,6 +905,54 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
905 | 905 | ||
906 | } | 906 | } |
907 | 907 | ||
908 | static int rv770_vram_scratch_init(struct radeon_device *rdev) | ||
909 | { | ||
910 | int r; | ||
911 | u64 gpu_addr; | ||
912 | |||
913 | if (rdev->vram_scratch.robj == NULL) { | ||
914 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, | ||
915 | true, RADEON_GEM_DOMAIN_VRAM, | ||
916 | &rdev->vram_scratch.robj); | ||
917 | if (r) { | ||
918 | return r; | ||
919 | } | ||
920 | } | ||
921 | |||
922 | r = radeon_bo_reserve(rdev->vram_scratch.robj, false); | ||
923 | if (unlikely(r != 0)) | ||
924 | return r; | ||
925 | r = radeon_bo_pin(rdev->vram_scratch.robj, | ||
926 | RADEON_GEM_DOMAIN_VRAM, &gpu_addr); | ||
927 | if (r) { | ||
928 | radeon_bo_unreserve(rdev->vram_scratch.robj); | ||
929 | return r; | ||
930 | } | ||
931 | r = radeon_bo_kmap(rdev->vram_scratch.robj, | ||
932 | (void **)&rdev->vram_scratch.ptr); | ||
933 | if (r) | ||
934 | radeon_bo_unpin(rdev->vram_scratch.robj); | ||
935 | radeon_bo_unreserve(rdev->vram_scratch.robj); | ||
936 | |||
937 | return r; | ||
938 | } | ||
939 | |||
940 | static void rv770_vram_scratch_fini(struct radeon_device *rdev) | ||
941 | { | ||
942 | int r; | ||
943 | |||
944 | if (rdev->vram_scratch.robj == NULL) { | ||
945 | return; | ||
946 | } | ||
947 | r = radeon_bo_reserve(rdev->vram_scratch.robj, false); | ||
948 | if (likely(r == 0)) { | ||
949 | radeon_bo_kunmap(rdev->vram_scratch.robj); | ||
950 | radeon_bo_unpin(rdev->vram_scratch.robj); | ||
951 | radeon_bo_unreserve(rdev->vram_scratch.robj); | ||
952 | } | ||
953 | radeon_bo_unref(&rdev->vram_scratch.robj); | ||
954 | } | ||
955 | |||
908 | int rv770_mc_init(struct radeon_device *rdev) | 956 | int rv770_mc_init(struct radeon_device *rdev) |
909 | { | 957 | { |
910 | u32 tmp; | 958 | u32 tmp; |
@@ -970,6 +1018,9 @@ static int rv770_startup(struct radeon_device *rdev) | |||
970 | if (r) | 1018 | if (r) |
971 | return r; | 1019 | return r; |
972 | } | 1020 | } |
1021 | r = rv770_vram_scratch_init(rdev); | ||
1022 | if (r) | ||
1023 | return r; | ||
973 | rv770_gpu_init(rdev); | 1024 | rv770_gpu_init(rdev); |
974 | r = r600_blit_init(rdev); | 1025 | r = r600_blit_init(rdev); |
975 | if (r) { | 1026 | if (r) { |
@@ -1023,11 +1074,6 @@ int rv770_resume(struct radeon_device *rdev) | |||
1023 | */ | 1074 | */ |
1024 | /* post card */ | 1075 | /* post card */ |
1025 | atom_asic_init(rdev->mode_info.atom_context); | 1076 | atom_asic_init(rdev->mode_info.atom_context); |
1026 | /* Initialize clocks */ | ||
1027 | r = radeon_clocks_init(rdev); | ||
1028 | if (r) { | ||
1029 | return r; | ||
1030 | } | ||
1031 | 1077 | ||
1032 | r = rv770_startup(rdev); | 1078 | r = rv770_startup(rdev); |
1033 | if (r) { | 1079 | if (r) { |
@@ -1118,9 +1164,6 @@ int rv770_init(struct radeon_device *rdev) | |||
1118 | radeon_surface_init(rdev); | 1164 | radeon_surface_init(rdev); |
1119 | /* Initialize clocks */ | 1165 | /* Initialize clocks */ |
1120 | radeon_get_clock_info(rdev->ddev); | 1166 | radeon_get_clock_info(rdev->ddev); |
1121 | r = radeon_clocks_init(rdev); | ||
1122 | if (r) | ||
1123 | return r; | ||
1124 | /* Fence driver */ | 1167 | /* Fence driver */ |
1125 | r = radeon_fence_driver_init(rdev); | 1168 | r = radeon_fence_driver_init(rdev); |
1126 | if (r) | 1169 | if (r) |
@@ -1195,9 +1238,9 @@ void rv770_fini(struct radeon_device *rdev) | |||
1195 | r600_irq_fini(rdev); | 1238 | r600_irq_fini(rdev); |
1196 | radeon_irq_kms_fini(rdev); | 1239 | radeon_irq_kms_fini(rdev); |
1197 | rv770_pcie_gart_fini(rdev); | 1240 | rv770_pcie_gart_fini(rdev); |
1241 | rv770_vram_scratch_fini(rdev); | ||
1198 | radeon_gem_fini(rdev); | 1242 | radeon_gem_fini(rdev); |
1199 | radeon_fence_driver_fini(rdev); | 1243 | radeon_fence_driver_fini(rdev); |
1200 | radeon_clocks_fini(rdev); | ||
1201 | radeon_agp_fini(rdev); | 1244 | radeon_agp_fini(rdev); |
1202 | radeon_bo_fini(rdev); | 1245 | radeon_bo_fini(rdev); |
1203 | radeon_atombios_fini(rdev); | 1246 | radeon_atombios_fini(rdev); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index cb4cf7ef4d1e..db809e034cc4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -442,6 +442,43 @@ out_err: | |||
442 | } | 442 | } |
443 | 443 | ||
444 | /** | 444 | /** |
445 | * Call bo::reserved and with the lru lock held. | ||
446 | * Will release GPU memory type usage on destruction. | ||
447 | * This is the place to put in driver specific hooks. | ||
448 | * Will release the bo::reserved lock and the | ||
449 | * lru lock on exit. | ||
450 | */ | ||
451 | |||
452 | static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) | ||
453 | { | ||
454 | struct ttm_bo_global *glob = bo->glob; | ||
455 | |||
456 | if (bo->ttm) { | ||
457 | |||
458 | /** | ||
459 | * Release the lru_lock, since we don't want to have | ||
460 | * an atomic requirement on ttm_tt[unbind|destroy]. | ||
461 | */ | ||
462 | |||
463 | spin_unlock(&glob->lru_lock); | ||
464 | ttm_tt_unbind(bo->ttm); | ||
465 | ttm_tt_destroy(bo->ttm); | ||
466 | bo->ttm = NULL; | ||
467 | spin_lock(&glob->lru_lock); | ||
468 | } | ||
469 | |||
470 | if (bo->mem.mm_node) { | ||
471 | drm_mm_put_block(bo->mem.mm_node); | ||
472 | bo->mem.mm_node = NULL; | ||
473 | } | ||
474 | |||
475 | atomic_set(&bo->reserved, 0); | ||
476 | wake_up_all(&bo->event_queue); | ||
477 | spin_unlock(&glob->lru_lock); | ||
478 | } | ||
479 | |||
480 | |||
481 | /** | ||
445 | * If bo idle, remove from delayed- and lru lists, and unref. | 482 | * If bo idle, remove from delayed- and lru lists, and unref. |
446 | * If not idle, and already on delayed list, do nothing. | 483 | * If not idle, and already on delayed list, do nothing. |
447 | * If not idle, and not on delayed list, put on delayed list, | 484 | * If not idle, and not on delayed list, put on delayed list, |
@@ -456,6 +493,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) | |||
456 | int ret; | 493 | int ret; |
457 | 494 | ||
458 | spin_lock(&bo->lock); | 495 | spin_lock(&bo->lock); |
496 | retry: | ||
459 | (void) ttm_bo_wait(bo, false, false, !remove_all); | 497 | (void) ttm_bo_wait(bo, false, false, !remove_all); |
460 | 498 | ||
461 | if (!bo->sync_obj) { | 499 | if (!bo->sync_obj) { |
@@ -464,31 +502,52 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) | |||
464 | spin_unlock(&bo->lock); | 502 | spin_unlock(&bo->lock); |
465 | 503 | ||
466 | spin_lock(&glob->lru_lock); | 504 | spin_lock(&glob->lru_lock); |
467 | put_count = ttm_bo_del_from_lru(bo); | 505 | ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0); |
506 | |||
507 | /** | ||
508 | * Someone else has the object reserved. Bail and retry. | ||
509 | */ | ||
468 | 510 | ||
469 | ret = ttm_bo_reserve_locked(bo, false, false, false, 0); | 511 | if (unlikely(ret == -EBUSY)) { |
470 | BUG_ON(ret); | 512 | spin_unlock(&glob->lru_lock); |
471 | if (bo->ttm) | 513 | spin_lock(&bo->lock); |
472 | ttm_tt_unbind(bo->ttm); | 514 | goto requeue; |
515 | } | ||
516 | |||
517 | /** | ||
518 | * We can re-check for sync object without taking | ||
519 | * the bo::lock since setting the sync object requires | ||
520 | * also bo::reserved. A busy object at this point may | ||
521 | * be caused by another thread starting an accelerated | ||
522 | * eviction. | ||
523 | */ | ||
524 | |||
525 | if (unlikely(bo->sync_obj)) { | ||
526 | atomic_set(&bo->reserved, 0); | ||
527 | wake_up_all(&bo->event_queue); | ||
528 | spin_unlock(&glob->lru_lock); | ||
529 | spin_lock(&bo->lock); | ||
530 | if (remove_all) | ||
531 | goto retry; | ||
532 | else | ||
533 | goto requeue; | ||
534 | } | ||
535 | |||
536 | put_count = ttm_bo_del_from_lru(bo); | ||
473 | 537 | ||
474 | if (!list_empty(&bo->ddestroy)) { | 538 | if (!list_empty(&bo->ddestroy)) { |
475 | list_del_init(&bo->ddestroy); | 539 | list_del_init(&bo->ddestroy); |
476 | ++put_count; | 540 | ++put_count; |
477 | } | 541 | } |
478 | if (bo->mem.mm_node) { | ||
479 | drm_mm_put_block(bo->mem.mm_node); | ||
480 | bo->mem.mm_node = NULL; | ||
481 | } | ||
482 | spin_unlock(&glob->lru_lock); | ||
483 | 542 | ||
484 | atomic_set(&bo->reserved, 0); | 543 | ttm_bo_cleanup_memtype_use(bo); |
485 | 544 | ||
486 | while (put_count--) | 545 | while (put_count--) |
487 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | 546 | kref_put(&bo->list_kref, ttm_bo_ref_bug); |
488 | 547 | ||
489 | return 0; | 548 | return 0; |
490 | } | 549 | } |
491 | 550 | requeue: | |
492 | spin_lock(&glob->lru_lock); | 551 | spin_lock(&glob->lru_lock); |
493 | if (list_empty(&bo->ddestroy)) { | 552 | if (list_empty(&bo->ddestroy)) { |
494 | void *sync_obj = bo->sync_obj; | 553 | void *sync_obj = bo->sync_obj; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 7cffb3e04232..3451a82adba7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -351,6 +351,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | |||
351 | INIT_LIST_HEAD(&fbo->lru); | 351 | INIT_LIST_HEAD(&fbo->lru); |
352 | INIT_LIST_HEAD(&fbo->swap); | 352 | INIT_LIST_HEAD(&fbo->swap); |
353 | fbo->vm_node = NULL; | 353 | fbo->vm_node = NULL; |
354 | atomic_set(&fbo->cpu_writers, 0); | ||
354 | 355 | ||
355 | fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); | 356 | fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); |
356 | kref_init(&fbo->list_kref); | 357 | kref_init(&fbo->list_kref); |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index ca904799f018..b1e02fffd3cc 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
@@ -69,7 +69,7 @@ struct ttm_page_pool { | |||
69 | spinlock_t lock; | 69 | spinlock_t lock; |
70 | bool fill_lock; | 70 | bool fill_lock; |
71 | struct list_head list; | 71 | struct list_head list; |
72 | int gfp_flags; | 72 | gfp_t gfp_flags; |
73 | unsigned npages; | 73 | unsigned npages; |
74 | char *name; | 74 | char *name; |
75 | unsigned long nfrees; | 75 | unsigned long nfrees; |
@@ -475,7 +475,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages, | |||
475 | * This function is reentrant if caller updates count depending on number of | 475 | * This function is reentrant if caller updates count depending on number of |
476 | * pages returned in pages array. | 476 | * pages returned in pages array. |
477 | */ | 477 | */ |
478 | static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, | 478 | static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, |
479 | int ttm_flags, enum ttm_caching_state cstate, unsigned count) | 479 | int ttm_flags, enum ttm_caching_state cstate, unsigned count) |
480 | { | 480 | { |
481 | struct page **caching_array; | 481 | struct page **caching_array; |
@@ -666,7 +666,7 @@ int ttm_get_pages(struct list_head *pages, int flags, | |||
666 | { | 666 | { |
667 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); | 667 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); |
668 | struct page *p = NULL; | 668 | struct page *p = NULL; |
669 | int gfp_flags = GFP_USER; | 669 | gfp_t gfp_flags = GFP_USER; |
670 | int r; | 670 | int r; |
671 | 671 | ||
672 | /* set zero flag for page allocation if required */ | 672 | /* set zero flag for page allocation if required */ |
@@ -818,7 +818,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) | |||
818 | return 0; | 818 | return 0; |
819 | } | 819 | } |
820 | 820 | ||
821 | void ttm_page_alloc_fini() | 821 | void ttm_page_alloc_fini(void) |
822 | { | 822 | { |
823 | int i; | 823 | int i; |
824 | 824 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 72ec2e2b6e97..a96ed6d9d010 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -148,13 +148,16 @@ static struct pci_device_id vmw_pci_id_list[] = { | |||
148 | {0, 0, 0} | 148 | {0, 0, 0} |
149 | }; | 149 | }; |
150 | 150 | ||
151 | static char *vmw_devname = "vmwgfx"; | 151 | static int enable_fbdev; |
152 | 152 | ||
153 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); | 153 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); |
154 | static void vmw_master_init(struct vmw_master *); | 154 | static void vmw_master_init(struct vmw_master *); |
155 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, | 155 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, |
156 | void *ptr); | 156 | void *ptr); |
157 | 157 | ||
158 | MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); | ||
159 | module_param_named(enable_fbdev, enable_fbdev, int, 0600); | ||
160 | |||
158 | static void vmw_print_capabilities(uint32_t capabilities) | 161 | static void vmw_print_capabilities(uint32_t capabilities) |
159 | { | 162 | { |
160 | DRM_INFO("Capabilities:\n"); | 163 | DRM_INFO("Capabilities:\n"); |
@@ -192,8 +195,6 @@ static int vmw_request_device(struct vmw_private *dev_priv) | |||
192 | { | 195 | { |
193 | int ret; | 196 | int ret; |
194 | 197 | ||
195 | vmw_kms_save_vga(dev_priv); | ||
196 | |||
197 | ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); | 198 | ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); |
198 | if (unlikely(ret != 0)) { | 199 | if (unlikely(ret != 0)) { |
199 | DRM_ERROR("Unable to initialize FIFO.\n"); | 200 | DRM_ERROR("Unable to initialize FIFO.\n"); |
@@ -206,9 +207,35 @@ static int vmw_request_device(struct vmw_private *dev_priv) | |||
206 | static void vmw_release_device(struct vmw_private *dev_priv) | 207 | static void vmw_release_device(struct vmw_private *dev_priv) |
207 | { | 208 | { |
208 | vmw_fifo_release(dev_priv, &dev_priv->fifo); | 209 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
209 | vmw_kms_restore_vga(dev_priv); | ||
210 | } | 210 | } |
211 | 211 | ||
212 | int vmw_3d_resource_inc(struct vmw_private *dev_priv) | ||
213 | { | ||
214 | int ret = 0; | ||
215 | |||
216 | mutex_lock(&dev_priv->release_mutex); | ||
217 | if (unlikely(dev_priv->num_3d_resources++ == 0)) { | ||
218 | ret = vmw_request_device(dev_priv); | ||
219 | if (unlikely(ret != 0)) | ||
220 | --dev_priv->num_3d_resources; | ||
221 | } | ||
222 | mutex_unlock(&dev_priv->release_mutex); | ||
223 | return ret; | ||
224 | } | ||
225 | |||
226 | |||
227 | void vmw_3d_resource_dec(struct vmw_private *dev_priv) | ||
228 | { | ||
229 | int32_t n3d; | ||
230 | |||
231 | mutex_lock(&dev_priv->release_mutex); | ||
232 | if (unlikely(--dev_priv->num_3d_resources == 0)) | ||
233 | vmw_release_device(dev_priv); | ||
234 | n3d = (int32_t) dev_priv->num_3d_resources; | ||
235 | mutex_unlock(&dev_priv->release_mutex); | ||
236 | |||
237 | BUG_ON(n3d < 0); | ||
238 | } | ||
212 | 239 | ||
213 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | 240 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
214 | { | 241 | { |
@@ -228,6 +255,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
228 | dev_priv->last_read_sequence = (uint32_t) -100; | 255 | dev_priv->last_read_sequence = (uint32_t) -100; |
229 | mutex_init(&dev_priv->hw_mutex); | 256 | mutex_init(&dev_priv->hw_mutex); |
230 | mutex_init(&dev_priv->cmdbuf_mutex); | 257 | mutex_init(&dev_priv->cmdbuf_mutex); |
258 | mutex_init(&dev_priv->release_mutex); | ||
231 | rwlock_init(&dev_priv->resource_lock); | 259 | rwlock_init(&dev_priv->resource_lock); |
232 | idr_init(&dev_priv->context_idr); | 260 | idr_init(&dev_priv->context_idr); |
233 | idr_init(&dev_priv->surface_idr); | 261 | idr_init(&dev_priv->surface_idr); |
@@ -244,6 +272,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
244 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); | 272 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); |
245 | dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); | 273 | dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); |
246 | 274 | ||
275 | dev_priv->enable_fb = enable_fbdev; | ||
276 | |||
247 | mutex_lock(&dev_priv->hw_mutex); | 277 | mutex_lock(&dev_priv->hw_mutex); |
248 | 278 | ||
249 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); | 279 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); |
@@ -343,17 +373,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
343 | 373 | ||
344 | dev->dev_private = dev_priv; | 374 | dev->dev_private = dev_priv; |
345 | 375 | ||
346 | if (!dev->devname) | ||
347 | dev->devname = vmw_devname; | ||
348 | |||
349 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { | ||
350 | ret = drm_irq_install(dev); | ||
351 | if (unlikely(ret != 0)) { | ||
352 | DRM_ERROR("Failed installing irq: %d\n", ret); | ||
353 | goto out_no_irq; | ||
354 | } | ||
355 | } | ||
356 | |||
357 | ret = pci_request_regions(dev->pdev, "vmwgfx probe"); | 376 | ret = pci_request_regions(dev->pdev, "vmwgfx probe"); |
358 | dev_priv->stealth = (ret != 0); | 377 | dev_priv->stealth = (ret != 0); |
359 | if (dev_priv->stealth) { | 378 | if (dev_priv->stealth) { |
@@ -369,26 +388,52 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
369 | goto out_no_device; | 388 | goto out_no_device; |
370 | } | 389 | } |
371 | } | 390 | } |
372 | ret = vmw_request_device(dev_priv); | 391 | ret = vmw_kms_init(dev_priv); |
373 | if (unlikely(ret != 0)) | 392 | if (unlikely(ret != 0)) |
374 | goto out_no_device; | 393 | goto out_no_kms; |
375 | vmw_kms_init(dev_priv); | ||
376 | vmw_overlay_init(dev_priv); | 394 | vmw_overlay_init(dev_priv); |
377 | vmw_fb_init(dev_priv); | 395 | if (dev_priv->enable_fb) { |
396 | ret = vmw_3d_resource_inc(dev_priv); | ||
397 | if (unlikely(ret != 0)) | ||
398 | goto out_no_fifo; | ||
399 | vmw_kms_save_vga(dev_priv); | ||
400 | vmw_fb_init(dev_priv); | ||
401 | DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? | ||
402 | "Detected device 3D availability.\n" : | ||
403 | "Detected no device 3D availability.\n"); | ||
404 | } else { | ||
405 | DRM_INFO("Delayed 3D detection since we're not " | ||
406 | "running the device in SVGA mode yet.\n"); | ||
407 | } | ||
408 | |||
409 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { | ||
410 | ret = drm_irq_install(dev); | ||
411 | if (unlikely(ret != 0)) { | ||
412 | DRM_ERROR("Failed installing irq: %d\n", ret); | ||
413 | goto out_no_irq; | ||
414 | } | ||
415 | } | ||
378 | 416 | ||
379 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; | 417 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; |
380 | register_pm_notifier(&dev_priv->pm_nb); | 418 | register_pm_notifier(&dev_priv->pm_nb); |
381 | 419 | ||
382 | DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n"); | ||
383 | |||
384 | return 0; | 420 | return 0; |
385 | 421 | ||
386 | out_no_device: | ||
387 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | ||
388 | drm_irq_uninstall(dev_priv->dev); | ||
389 | if (dev->devname == vmw_devname) | ||
390 | dev->devname = NULL; | ||
391 | out_no_irq: | 422 | out_no_irq: |
423 | if (dev_priv->enable_fb) { | ||
424 | vmw_fb_close(dev_priv); | ||
425 | vmw_kms_restore_vga(dev_priv); | ||
426 | vmw_3d_resource_dec(dev_priv); | ||
427 | } | ||
428 | out_no_fifo: | ||
429 | vmw_overlay_close(dev_priv); | ||
430 | vmw_kms_close(dev_priv); | ||
431 | out_no_kms: | ||
432 | if (dev_priv->stealth) | ||
433 | pci_release_region(dev->pdev, 2); | ||
434 | else | ||
435 | pci_release_regions(dev->pdev); | ||
436 | out_no_device: | ||
392 | ttm_object_device_release(&dev_priv->tdev); | 437 | ttm_object_device_release(&dev_priv->tdev); |
393 | out_err4: | 438 | out_err4: |
394 | iounmap(dev_priv->mmio_virt); | 439 | iounmap(dev_priv->mmio_virt); |
@@ -415,19 +460,20 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
415 | 460 | ||
416 | unregister_pm_notifier(&dev_priv->pm_nb); | 461 | unregister_pm_notifier(&dev_priv->pm_nb); |
417 | 462 | ||
418 | vmw_fb_close(dev_priv); | 463 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
464 | drm_irq_uninstall(dev_priv->dev); | ||
465 | if (dev_priv->enable_fb) { | ||
466 | vmw_fb_close(dev_priv); | ||
467 | vmw_kms_restore_vga(dev_priv); | ||
468 | vmw_3d_resource_dec(dev_priv); | ||
469 | } | ||
419 | vmw_kms_close(dev_priv); | 470 | vmw_kms_close(dev_priv); |
420 | vmw_overlay_close(dev_priv); | 471 | vmw_overlay_close(dev_priv); |
421 | vmw_release_device(dev_priv); | ||
422 | if (dev_priv->stealth) | 472 | if (dev_priv->stealth) |
423 | pci_release_region(dev->pdev, 2); | 473 | pci_release_region(dev->pdev, 2); |
424 | else | 474 | else |
425 | pci_release_regions(dev->pdev); | 475 | pci_release_regions(dev->pdev); |
426 | 476 | ||
427 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | ||
428 | drm_irq_uninstall(dev_priv->dev); | ||
429 | if (dev->devname == vmw_devname) | ||
430 | dev->devname = NULL; | ||
431 | ttm_object_device_release(&dev_priv->tdev); | 477 | ttm_object_device_release(&dev_priv->tdev); |
432 | iounmap(dev_priv->mmio_virt); | 478 | iounmap(dev_priv->mmio_virt); |
433 | drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, | 479 | drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, |
@@ -500,7 +546,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, | |||
500 | struct drm_ioctl_desc *ioctl = | 546 | struct drm_ioctl_desc *ioctl = |
501 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; | 547 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; |
502 | 548 | ||
503 | if (unlikely(ioctl->cmd != cmd)) { | 549 | if (unlikely(ioctl->cmd_drv != cmd)) { |
504 | DRM_ERROR("Invalid command format, ioctl %d\n", | 550 | DRM_ERROR("Invalid command format, ioctl %d\n", |
505 | nr - DRM_COMMAND_BASE); | 551 | nr - DRM_COMMAND_BASE); |
506 | return -EINVAL; | 552 | return -EINVAL; |
@@ -589,6 +635,16 @@ static int vmw_master_set(struct drm_device *dev, | |||
589 | struct vmw_master *vmaster = vmw_master(file_priv->master); | 635 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
590 | int ret = 0; | 636 | int ret = 0; |
591 | 637 | ||
638 | if (!dev_priv->enable_fb) { | ||
639 | ret = vmw_3d_resource_inc(dev_priv); | ||
640 | if (unlikely(ret != 0)) | ||
641 | return ret; | ||
642 | vmw_kms_save_vga(dev_priv); | ||
643 | mutex_lock(&dev_priv->hw_mutex); | ||
644 | vmw_write(dev_priv, SVGA_REG_TRACES, 0); | ||
645 | mutex_unlock(&dev_priv->hw_mutex); | ||
646 | } | ||
647 | |||
592 | if (active) { | 648 | if (active) { |
593 | BUG_ON(active != &dev_priv->fbdev_master); | 649 | BUG_ON(active != &dev_priv->fbdev_master); |
594 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); | 650 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); |
@@ -617,7 +673,13 @@ static int vmw_master_set(struct drm_device *dev, | |||
617 | return 0; | 673 | return 0; |
618 | 674 | ||
619 | out_no_active_lock: | 675 | out_no_active_lock: |
620 | vmw_release_device(dev_priv); | 676 | if (!dev_priv->enable_fb) { |
677 | mutex_lock(&dev_priv->hw_mutex); | ||
678 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); | ||
679 | mutex_unlock(&dev_priv->hw_mutex); | ||
680 | vmw_kms_restore_vga(dev_priv); | ||
681 | vmw_3d_resource_dec(dev_priv); | ||
682 | } | ||
621 | return ret; | 683 | return ret; |
622 | } | 684 | } |
623 | 685 | ||
@@ -645,11 +707,23 @@ static void vmw_master_drop(struct drm_device *dev, | |||
645 | 707 | ||
646 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); | 708 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
647 | 709 | ||
710 | if (!dev_priv->enable_fb) { | ||
711 | ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
712 | if (unlikely(ret != 0)) | ||
713 | DRM_ERROR("Unable to clean VRAM on master drop.\n"); | ||
714 | mutex_lock(&dev_priv->hw_mutex); | ||
715 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); | ||
716 | mutex_unlock(&dev_priv->hw_mutex); | ||
717 | vmw_kms_restore_vga(dev_priv); | ||
718 | vmw_3d_resource_dec(dev_priv); | ||
719 | } | ||
720 | |||
648 | dev_priv->active_master = &dev_priv->fbdev_master; | 721 | dev_priv->active_master = &dev_priv->fbdev_master; |
649 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | 722 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); |
650 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); | 723 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); |
651 | 724 | ||
652 | vmw_fb_on(dev_priv); | 725 | if (dev_priv->enable_fb) |
726 | vmw_fb_on(dev_priv); | ||
653 | } | 727 | } |
654 | 728 | ||
655 | 729 | ||
@@ -722,6 +796,7 @@ static struct drm_driver driver = { | |||
722 | .irq_postinstall = vmw_irq_postinstall, | 796 | .irq_postinstall = vmw_irq_postinstall, |
723 | .irq_uninstall = vmw_irq_uninstall, | 797 | .irq_uninstall = vmw_irq_uninstall, |
724 | .irq_handler = vmw_irq_handler, | 798 | .irq_handler = vmw_irq_handler, |
799 | .get_vblank_counter = vmw_get_vblank_counter, | ||
725 | .reclaim_buffers_locked = NULL, | 800 | .reclaim_buffers_locked = NULL, |
726 | .get_map_ofs = drm_core_get_map_ofs, | 801 | .get_map_ofs = drm_core_get_map_ofs, |
727 | .get_reg_ofs = drm_core_get_reg_ofs, | 802 | .get_reg_ofs = drm_core_get_reg_ofs, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 429f917b60bf..58de6393f611 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -277,6 +277,7 @@ struct vmw_private { | |||
277 | 277 | ||
278 | bool stealth; | 278 | bool stealth; |
279 | bool is_opened; | 279 | bool is_opened; |
280 | bool enable_fb; | ||
280 | 281 | ||
281 | /** | 282 | /** |
282 | * Master management. | 283 | * Master management. |
@@ -285,6 +286,9 @@ struct vmw_private { | |||
285 | struct vmw_master *active_master; | 286 | struct vmw_master *active_master; |
286 | struct vmw_master fbdev_master; | 287 | struct vmw_master fbdev_master; |
287 | struct notifier_block pm_nb; | 288 | struct notifier_block pm_nb; |
289 | |||
290 | struct mutex release_mutex; | ||
291 | uint32_t num_3d_resources; | ||
288 | }; | 292 | }; |
289 | 293 | ||
290 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) | 294 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) |
@@ -319,6 +323,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv, | |||
319 | return val; | 323 | return val; |
320 | } | 324 | } |
321 | 325 | ||
326 | int vmw_3d_resource_inc(struct vmw_private *dev_priv); | ||
327 | void vmw_3d_resource_dec(struct vmw_private *dev_priv); | ||
328 | |||
322 | /** | 329 | /** |
323 | * GMR utilities - vmwgfx_gmr.c | 330 | * GMR utilities - vmwgfx_gmr.c |
324 | */ | 331 | */ |
@@ -511,6 +518,7 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv, | |||
511 | unsigned bbp, unsigned depth); | 518 | unsigned bbp, unsigned depth); |
512 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | 519 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, |
513 | struct drm_file *file_priv); | 520 | struct drm_file *file_priv); |
521 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); | ||
514 | 522 | ||
515 | /** | 523 | /** |
516 | * Overlay control - vmwgfx_overlay.c | 524 | * Overlay control - vmwgfx_overlay.c |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 870967a97c15..409e172f4abf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
@@ -615,6 +615,11 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, | |||
615 | if (unlikely(ret != 0)) | 615 | if (unlikely(ret != 0)) |
616 | goto err_unlock; | 616 | goto err_unlock; |
617 | 617 | ||
618 | if (bo->mem.mem_type == TTM_PL_VRAM && | ||
619 | bo->mem.mm_node->start < bo->num_pages) | ||
620 | (void) ttm_bo_validate(bo, &vmw_sys_placement, false, | ||
621 | false, false); | ||
622 | |||
618 | ret = ttm_bo_validate(bo, &ne_placement, false, false, false); | 623 | ret = ttm_bo_validate(bo, &ne_placement, false, false, false); |
619 | 624 | ||
620 | /* Could probably bug on */ | 625 | /* Could probably bug on */ |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index e6a1eb7ea954..0fe31766e4cf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
@@ -106,6 +106,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
106 | mutex_lock(&dev_priv->hw_mutex); | 106 | mutex_lock(&dev_priv->hw_mutex); |
107 | dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); | 107 | dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); |
108 | dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); | 108 | dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); |
109 | dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); | ||
109 | vmw_write(dev_priv, SVGA_REG_ENABLE, 1); | 110 | vmw_write(dev_priv, SVGA_REG_ENABLE, 1); |
110 | 111 | ||
111 | min = 4; | 112 | min = 4; |
@@ -175,6 +176,8 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
175 | dev_priv->config_done_state); | 176 | dev_priv->config_done_state); |
176 | vmw_write(dev_priv, SVGA_REG_ENABLE, | 177 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
177 | dev_priv->enable_state); | 178 | dev_priv->enable_state); |
179 | vmw_write(dev_priv, SVGA_REG_TRACES, | ||
180 | dev_priv->traces_state); | ||
178 | 181 | ||
179 | mutex_unlock(&dev_priv->hw_mutex); | 182 | mutex_unlock(&dev_priv->hw_mutex); |
180 | vmw_fence_queue_takedown(&fifo->fence_queue); | 183 | vmw_fence_queue_takedown(&fifo->fence_queue); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 64d7f47df868..e882ba099f0c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -898,7 +898,19 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv) | |||
898 | save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); | 898 | save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); |
899 | save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); | 899 | save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); |
900 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | 900 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); |
901 | if (i == 0 && vmw_priv->num_displays == 1 && | ||
902 | save->width == 0 && save->height == 0) { | ||
903 | |||
904 | /* | ||
905 | * It should be fairly safe to assume that these | ||
906 | * values are uninitialized. | ||
907 | */ | ||
908 | |||
909 | save->width = vmw_priv->vga_width - save->pos_x; | ||
910 | save->height = vmw_priv->vga_height - save->pos_y; | ||
911 | } | ||
901 | } | 912 | } |
913 | |||
902 | return 0; | 914 | return 0; |
903 | } | 915 | } |
904 | 916 | ||
@@ -984,3 +996,8 @@ out_unlock: | |||
984 | ttm_read_unlock(&vmaster->lock); | 996 | ttm_read_unlock(&vmaster->lock); |
985 | return ret; | 997 | return ret; |
986 | } | 998 | } |
999 | |||
1000 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc) | ||
1001 | { | ||
1002 | return 0; | ||
1003 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 2ff5cf78235f..11cb39e3accb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | |||
@@ -27,6 +27,8 @@ | |||
27 | 27 | ||
28 | #include "vmwgfx_kms.h" | 28 | #include "vmwgfx_kms.h" |
29 | 29 | ||
30 | #define VMWGFX_LDU_NUM_DU 8 | ||
31 | |||
30 | #define vmw_crtc_to_ldu(x) \ | 32 | #define vmw_crtc_to_ldu(x) \ |
31 | container_of(x, struct vmw_legacy_display_unit, base.crtc) | 33 | container_of(x, struct vmw_legacy_display_unit, base.crtc) |
32 | #define vmw_encoder_to_ldu(x) \ | 34 | #define vmw_encoder_to_ldu(x) \ |
@@ -335,7 +337,8 @@ static void vmw_ldu_connector_restore(struct drm_connector *connector) | |||
335 | } | 337 | } |
336 | 338 | ||
337 | static enum drm_connector_status | 339 | static enum drm_connector_status |
338 | vmw_ldu_connector_detect(struct drm_connector *connector) | 340 | vmw_ldu_connector_detect(struct drm_connector *connector, |
341 | bool force) | ||
339 | { | 342 | { |
340 | if (vmw_connector_to_ldu(connector)->pref_active) | 343 | if (vmw_connector_to_ldu(connector)->pref_active) |
341 | return connector_status_connected; | 344 | return connector_status_connected; |
@@ -516,7 +519,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) | |||
516 | 519 | ||
517 | drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, | 520 | drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, |
518 | DRM_MODE_CONNECTOR_LVDS); | 521 | DRM_MODE_CONNECTOR_LVDS); |
519 | connector->status = vmw_ldu_connector_detect(connector); | 522 | connector->status = vmw_ldu_connector_detect(connector, true); |
520 | 523 | ||
521 | drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, | 524 | drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, |
522 | DRM_MODE_ENCODER_LVDS); | 525 | DRM_MODE_ENCODER_LVDS); |
@@ -535,6 +538,10 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) | |||
535 | 538 | ||
536 | int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) | 539 | int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) |
537 | { | 540 | { |
541 | struct drm_device *dev = dev_priv->dev; | ||
542 | int i; | ||
543 | int ret; | ||
544 | |||
538 | if (dev_priv->ldu_priv) { | 545 | if (dev_priv->ldu_priv) { |
539 | DRM_INFO("ldu system already on\n"); | 546 | DRM_INFO("ldu system already on\n"); |
540 | return -EINVAL; | 547 | return -EINVAL; |
@@ -552,23 +559,24 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) | |||
552 | 559 | ||
553 | drm_mode_create_dirty_info_property(dev_priv->dev); | 560 | drm_mode_create_dirty_info_property(dev_priv->dev); |
554 | 561 | ||
555 | vmw_ldu_init(dev_priv, 0); | ||
556 | /* for old hardware without multimon only enable one display */ | ||
557 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { | 562 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { |
558 | vmw_ldu_init(dev_priv, 1); | 563 | for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i) |
559 | vmw_ldu_init(dev_priv, 2); | 564 | vmw_ldu_init(dev_priv, i); |
560 | vmw_ldu_init(dev_priv, 3); | 565 | ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU); |
561 | vmw_ldu_init(dev_priv, 4); | 566 | } else { |
562 | vmw_ldu_init(dev_priv, 5); | 567 | /* for old hardware without multimon only enable one display */ |
563 | vmw_ldu_init(dev_priv, 6); | 568 | vmw_ldu_init(dev_priv, 0); |
564 | vmw_ldu_init(dev_priv, 7); | 569 | ret = drm_vblank_init(dev, 1); |
565 | } | 570 | } |
566 | 571 | ||
567 | return 0; | 572 | return ret; |
568 | } | 573 | } |
569 | 574 | ||
570 | int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) | 575 | int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) |
571 | { | 576 | { |
577 | struct drm_device *dev = dev_priv->dev; | ||
578 | |||
579 | drm_vblank_cleanup(dev); | ||
572 | if (!dev_priv->ldu_priv) | 580 | if (!dev_priv->ldu_priv) |
573 | return -ENOSYS; | 581 | return -ENOSYS; |
574 | 582 | ||
@@ -610,7 +618,7 @@ int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num, | |||
610 | ldu->pref_height = 600; | 618 | ldu->pref_height = 600; |
611 | ldu->pref_active = false; | 619 | ldu->pref_active = false; |
612 | } | 620 | } |
613 | con->status = vmw_ldu_connector_detect(con); | 621 | con->status = vmw_ldu_connector_detect(con, true); |
614 | } | 622 | } |
615 | 623 | ||
616 | mutex_unlock(&dev->mode_config.mutex); | 624 | mutex_unlock(&dev->mode_config.mutex); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 5f2d5df01e5c..c8c40e9979db 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -211,6 +211,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) | |||
211 | cmd->body.cid = cpu_to_le32(res->id); | 211 | cmd->body.cid = cpu_to_le32(res->id); |
212 | 212 | ||
213 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 213 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
214 | vmw_3d_resource_dec(dev_priv); | ||
214 | } | 215 | } |
215 | 216 | ||
216 | static int vmw_context_init(struct vmw_private *dev_priv, | 217 | static int vmw_context_init(struct vmw_private *dev_priv, |
@@ -247,6 +248,7 @@ static int vmw_context_init(struct vmw_private *dev_priv, | |||
247 | cmd->body.cid = cpu_to_le32(res->id); | 248 | cmd->body.cid = cpu_to_le32(res->id); |
248 | 249 | ||
249 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 250 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
251 | (void) vmw_3d_resource_inc(dev_priv); | ||
250 | vmw_resource_activate(res, vmw_hw_context_destroy); | 252 | vmw_resource_activate(res, vmw_hw_context_destroy); |
251 | return 0; | 253 | return 0; |
252 | } | 254 | } |
@@ -406,6 +408,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) | |||
406 | cmd->body.sid = cpu_to_le32(res->id); | 408 | cmd->body.sid = cpu_to_le32(res->id); |
407 | 409 | ||
408 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 410 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
411 | vmw_3d_resource_dec(dev_priv); | ||
409 | } | 412 | } |
410 | 413 | ||
411 | void vmw_surface_res_free(struct vmw_resource *res) | 414 | void vmw_surface_res_free(struct vmw_resource *res) |
@@ -473,6 +476,7 @@ int vmw_surface_init(struct vmw_private *dev_priv, | |||
473 | } | 476 | } |
474 | 477 | ||
475 | vmw_fifo_commit(dev_priv, submit_size); | 478 | vmw_fifo_commit(dev_priv, submit_size); |
479 | (void) vmw_3d_resource_inc(dev_priv); | ||
476 | vmw_resource_activate(res, vmw_hw_surface_destroy); | 480 | vmw_resource_activate(res, vmw_hw_surface_destroy); |
477 | return 0; | 481 | return 0; |
478 | } | 482 | } |