aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-02-20 21:04:53 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-02-20 21:04:53 -0500
commit2ec77fc93ca8731368fbe8e71f805c0569d4bcee (patch)
tree22bfee5226f29860c3529cf31b377e27e9422d6b /drivers
parentbe71cb5b526709b8e42c707dc9e8c5b034ac8d1c (diff)
parent3d16118dc825a654043dfe3e14371fdf2976994d (diff)
Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (26 commits) drm/radeon: update sarea copies of last_ variables on resume. drm/i915: Keep refs on the object over the lifetime of vmas for GTT mmap. drm/i915: take struct mutex around fb unref drm: Use spread spectrum when the bios tells us it's ok. drm: Collapse identical i8xx_clock() and i9xx_clock(). drm: Bring PLL limits in sync with DDX values. drm: Add locking around cursor gem operations. drm: Propagate failure from setting crtc base. drm: Check for a NULL encoder when reverting on error path drm/i915: Cleanup the hws on ringbuffer constrution failure. drm/i915: Don't add panel_fixed_mode to the probed modes list at LVDS init. drm: Release user fbs in drm_release drm/i915: Unpin the fb on error during construction. drm/i915: Unpin the hws if we fail to kmap. drm/i915: Unpin the ringbuffer if we fail to ioremap it. drm/i915: unpin for an invalid memory domain. drm/i915: Release and unlock on mmap_gtt error path. drm/i915: Set framebuffer alignment based upon the fence constraints. drm: Do not leak a new reference for flink() on an existing name drm/i915: Fix potential AB-BA deadlock in i915_gem_execbuffer() ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/drm_crtc.c3
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c21
-rw-r--r--drivers/gpu/drm/drm_fops.c3
-rw-r--r--drivers/gpu/drm/drm_gem.c79
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c119
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c6
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c8
-rw-r--r--drivers/gpu/drm/i915/intel_display.c160
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c8
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c21
15 files changed, 271 insertions, 167 deletions
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index bfce0992fefb..94a768871734 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1741,9 +1741,8 @@ out:
1741 * RETURNS: 1741 * RETURNS:
1742 * Zero on success, errno on failure. 1742 * Zero on success, errno on failure.
1743 */ 1743 */
1744void drm_fb_release(struct file *filp) 1744void drm_fb_release(struct drm_file *priv)
1745{ 1745{
1746 struct drm_file *priv = filp->private_data;
1747 struct drm_device *dev = priv->minor->dev; 1746 struct drm_device *dev = priv->minor->dev;
1748 struct drm_framebuffer *fb, *tfb; 1747 struct drm_framebuffer *fb, *tfb;
1749 1748
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 964c5eb1fada..733028b4d45e 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -512,8 +512,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
512 if (drm_mode_equal(&saved_mode, &crtc->mode)) { 512 if (drm_mode_equal(&saved_mode, &crtc->mode)) {
513 if (saved_x != crtc->x || saved_y != crtc->y || 513 if (saved_x != crtc->x || saved_y != crtc->y ||
514 depth_changed || bpp_changed) { 514 depth_changed || bpp_changed) {
515 crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, 515 ret = !crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y,
516 old_fb); 516 old_fb);
517 goto done; 517 goto done;
518 } 518 }
519 } 519 }
@@ -552,7 +552,9 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
552 /* Set up the DPLL and any encoders state that needs to adjust or depend 552 /* Set up the DPLL and any encoders state that needs to adjust or depend
553 * on the DPLL. 553 * on the DPLL.
554 */ 554 */
555 crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb); 555 ret = !crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
556 if (!ret)
557 goto done;
556 558
557 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 559 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
558 560
@@ -752,6 +754,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
752 if (!drm_crtc_helper_set_mode(set->crtc, set->mode, 754 if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
753 set->x, set->y, 755 set->x, set->y,
754 old_fb)) { 756 old_fb)) {
757 DRM_ERROR("failed to set mode on crtc %p\n",
758 set->crtc);
755 ret = -EINVAL; 759 ret = -EINVAL;
756 goto fail_set_mode; 760 goto fail_set_mode;
757 } 761 }
@@ -765,7 +769,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
765 old_fb = set->crtc->fb; 769 old_fb = set->crtc->fb;
766 if (set->crtc->fb != set->fb) 770 if (set->crtc->fb != set->fb)
767 set->crtc->fb = set->fb; 771 set->crtc->fb = set->fb;
768 crtc_funcs->mode_set_base(set->crtc, set->x, set->y, old_fb); 772 ret = crtc_funcs->mode_set_base(set->crtc,
773 set->x, set->y, old_fb);
774 if (ret != 0)
775 goto fail_set_mode;
769 } 776 }
770 777
771 kfree(save_encoders); 778 kfree(save_encoders);
@@ -775,8 +782,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
775fail_set_mode: 782fail_set_mode:
776 set->crtc->enabled = save_enabled; 783 set->crtc->enabled = save_enabled;
777 count = 0; 784 count = 0;
778 list_for_each_entry(connector, &dev->mode_config.connector_list, head) 785 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
786 if (!connector->encoder)
787 continue;
788
779 connector->encoder->crtc = save_crtcs[count++]; 789 connector->encoder->crtc = save_crtcs[count++];
790 }
780fail_no_encoder: 791fail_no_encoder:
781 kfree(save_crtcs); 792 kfree(save_crtcs);
782 count = 0; 793 count = 0;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index b06a53715853..6c020fe5431c 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -457,6 +457,9 @@ int drm_release(struct inode *inode, struct file *filp)
457 if (dev->driver->driver_features & DRIVER_GEM) 457 if (dev->driver->driver_features & DRIVER_GEM)
458 drm_gem_release(dev, file_priv); 458 drm_gem_release(dev, file_priv);
459 459
460 if (dev->driver->driver_features & DRIVER_MODESET)
461 drm_fb_release(file_priv);
462
460 mutex_lock(&dev->ctxlist_mutex); 463 mutex_lock(&dev->ctxlist_mutex);
461 if (!list_empty(&dev->ctxlist)) { 464 if (!list_empty(&dev->ctxlist)) {
462 struct drm_ctx_list *pos, *n; 465 struct drm_ctx_list *pos, *n;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 6915fb82d0b0..88d3368ffddd 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -104,8 +104,8 @@ drm_gem_init(struct drm_device *dev)
104 104
105 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, 105 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
106 DRM_FILE_PAGE_OFFSET_SIZE)) { 106 DRM_FILE_PAGE_OFFSET_SIZE)) {
107 drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM);
108 drm_ht_remove(&mm->offset_hash); 107 drm_ht_remove(&mm->offset_hash);
108 drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM);
109 return -ENOMEM; 109 return -ENOMEM;
110 } 110 }
111 111
@@ -295,35 +295,37 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
295 return -EBADF; 295 return -EBADF;
296 296
297again: 297again:
298 if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) 298 if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
299 return -ENOMEM; 299 ret = -ENOMEM;
300 goto err;
301 }
300 302
301 spin_lock(&dev->object_name_lock); 303 spin_lock(&dev->object_name_lock);
302 if (obj->name) { 304 if (!obj->name) {
303 args->name = obj->name; 305 ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
306 &obj->name);
307 args->name = (uint64_t) obj->name;
304 spin_unlock(&dev->object_name_lock); 308 spin_unlock(&dev->object_name_lock);
305 return 0;
306 }
307 ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
308 &obj->name);
309 spin_unlock(&dev->object_name_lock);
310 if (ret == -EAGAIN)
311 goto again;
312 309
313 if (ret != 0) { 310 if (ret == -EAGAIN)
314 mutex_lock(&dev->struct_mutex); 311 goto again;
315 drm_gem_object_unreference(obj);
316 mutex_unlock(&dev->struct_mutex);
317 return ret;
318 }
319 312
320 /* 313 if (ret != 0)
321 * Leave the reference from the lookup around as the 314 goto err;
322 * name table now holds one
323 */
324 args->name = (uint64_t) obj->name;
325 315
326 return 0; 316 /* Allocate a reference for the name table. */
317 drm_gem_object_reference(obj);
318 } else {
319 args->name = (uint64_t) obj->name;
320 spin_unlock(&dev->object_name_lock);
321 ret = 0;
322 }
323
324err:
325 mutex_lock(&dev->struct_mutex);
326 drm_gem_object_unreference(obj);
327 mutex_unlock(&dev->struct_mutex);
328 return ret;
327} 329}
328 330
329/** 331/**
@@ -448,6 +450,7 @@ drm_gem_object_handle_free(struct kref *kref)
448 spin_lock(&dev->object_name_lock); 450 spin_lock(&dev->object_name_lock);
449 if (obj->name) { 451 if (obj->name) {
450 idr_remove(&dev->object_name_idr, obj->name); 452 idr_remove(&dev->object_name_idr, obj->name);
453 obj->name = 0;
451 spin_unlock(&dev->object_name_lock); 454 spin_unlock(&dev->object_name_lock);
452 /* 455 /*
453 * The object name held a reference to this object, drop 456 * The object name held a reference to this object, drop
@@ -460,6 +463,26 @@ drm_gem_object_handle_free(struct kref *kref)
460} 463}
461EXPORT_SYMBOL(drm_gem_object_handle_free); 464EXPORT_SYMBOL(drm_gem_object_handle_free);
462 465
466void drm_gem_vm_open(struct vm_area_struct *vma)
467{
468 struct drm_gem_object *obj = vma->vm_private_data;
469
470 drm_gem_object_reference(obj);
471}
472EXPORT_SYMBOL(drm_gem_vm_open);
473
474void drm_gem_vm_close(struct vm_area_struct *vma)
475{
476 struct drm_gem_object *obj = vma->vm_private_data;
477 struct drm_device *dev = obj->dev;
478
479 mutex_lock(&dev->struct_mutex);
480 drm_gem_object_unreference(obj);
481 mutex_unlock(&dev->struct_mutex);
482}
483EXPORT_SYMBOL(drm_gem_vm_close);
484
485
463/** 486/**
464 * drm_gem_mmap - memory map routine for GEM objects 487 * drm_gem_mmap - memory map routine for GEM objects
465 * @filp: DRM file pointer 488 * @filp: DRM file pointer
@@ -521,6 +544,14 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
521#endif 544#endif
522 vma->vm_page_prot = __pgprot(prot); 545 vma->vm_page_prot = __pgprot(prot);
523 546
547 /* Take a ref for this mapping of the object, so that the fault
548 * handler can dereference the mmap offset's pointer to the object.
549 * This reference is cleaned up by the corresponding vm_close
550 * (which should happen whether the vma was created by this call, or
551 * by a vm_open due to mremap or partial unmap or whatever).
552 */
553 drm_gem_object_reference(obj);
554
524 vma->vm_file = filp; /* Needed for drm_vm_open() */ 555 vma->vm_file = filp; /* Needed for drm_vm_open() */
525 drm_vm_open_locked(vma); 556 drm_vm_open_locked(vma);
526 557
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index aac12ee31a46..a31cbdbc3c54 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -94,6 +94,8 @@ static int i915_resume(struct drm_device *dev)
94 94
95static struct vm_operations_struct i915_gem_vm_ops = { 95static struct vm_operations_struct i915_gem_vm_ops = {
96 .fault = i915_gem_fault, 96 .fault = i915_gem_fault,
97 .open = drm_gem_vm_open,
98 .close = drm_gem_vm_close,
97}; 99};
98 100
99static struct drm_driver driver = { 101static struct drm_driver driver = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7325363164f8..135a08f615cd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -184,6 +184,8 @@ typedef struct drm_i915_private {
184 unsigned int lvds_dither:1; 184 unsigned int lvds_dither:1;
185 unsigned int lvds_vbt:1; 185 unsigned int lvds_vbt:1;
186 unsigned int int_crt_support:1; 186 unsigned int int_crt_support:1;
187 unsigned int lvds_use_ssc:1;
188 int lvds_ssc_freq;
187 189
188 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ 190 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
189 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 191 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 818576654092..ac534c9a2f81 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -607,8 +607,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
607 case -EAGAIN: 607 case -EAGAIN:
608 return VM_FAULT_OOM; 608 return VM_FAULT_OOM;
609 case -EFAULT: 609 case -EFAULT:
610 case -EBUSY:
611 DRM_ERROR("can't insert pfn?? fault or busy...\n");
612 return VM_FAULT_SIGBUS; 610 return VM_FAULT_SIGBUS;
613 default: 611 default:
614 return VM_FAULT_NOPAGE; 612 return VM_FAULT_NOPAGE;
@@ -684,6 +682,30 @@ out_free_list:
684 return ret; 682 return ret;
685} 683}
686 684
685static void
686i915_gem_free_mmap_offset(struct drm_gem_object *obj)
687{
688 struct drm_device *dev = obj->dev;
689 struct drm_i915_gem_object *obj_priv = obj->driver_private;
690 struct drm_gem_mm *mm = dev->mm_private;
691 struct drm_map_list *list;
692
693 list = &obj->map_list;
694 drm_ht_remove_item(&mm->offset_hash, &list->hash);
695
696 if (list->file_offset_node) {
697 drm_mm_put_block(list->file_offset_node);
698 list->file_offset_node = NULL;
699 }
700
701 if (list->map) {
702 drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER);
703 list->map = NULL;
704 }
705
706 obj_priv->mmap_offset = 0;
707}
708
687/** 709/**
688 * i915_gem_get_gtt_alignment - return required GTT alignment for an object 710 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
689 * @obj: object to check 711 * @obj: object to check
@@ -758,8 +780,11 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
758 780
759 if (!obj_priv->mmap_offset) { 781 if (!obj_priv->mmap_offset) {
760 ret = i915_gem_create_mmap_offset(obj); 782 ret = i915_gem_create_mmap_offset(obj);
761 if (ret) 783 if (ret) {
784 drm_gem_object_unreference(obj);
785 mutex_unlock(&dev->struct_mutex);
762 return ret; 786 return ret;
787 }
763 } 788 }
764 789
765 args->offset = obj_priv->mmap_offset; 790 args->offset = obj_priv->mmap_offset;
@@ -2251,6 +2276,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2251 (int) reloc.offset, 2276 (int) reloc.offset,
2252 reloc.read_domains, 2277 reloc.read_domains,
2253 reloc.write_domain); 2278 reloc.write_domain);
2279 drm_gem_object_unreference(target_obj);
2280 i915_gem_object_unpin(obj);
2254 return -EINVAL; 2281 return -EINVAL;
2255 } 2282 }
2256 2283
@@ -2480,13 +2507,15 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2480 if (dev_priv->mm.wedged) { 2507 if (dev_priv->mm.wedged) {
2481 DRM_ERROR("Execbuf while wedged\n"); 2508 DRM_ERROR("Execbuf while wedged\n");
2482 mutex_unlock(&dev->struct_mutex); 2509 mutex_unlock(&dev->struct_mutex);
2483 return -EIO; 2510 ret = -EIO;
2511 goto pre_mutex_err;
2484 } 2512 }
2485 2513
2486 if (dev_priv->mm.suspended) { 2514 if (dev_priv->mm.suspended) {
2487 DRM_ERROR("Execbuf while VT-switched.\n"); 2515 DRM_ERROR("Execbuf while VT-switched.\n");
2488 mutex_unlock(&dev->struct_mutex); 2516 mutex_unlock(&dev->struct_mutex);
2489 return -EBUSY; 2517 ret = -EBUSY;
2518 goto pre_mutex_err;
2490 } 2519 }
2491 2520
2492 /* Look up object handles */ 2521 /* Look up object handles */
@@ -2632,15 +2661,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2632 2661
2633 i915_verify_inactive(dev, __FILE__, __LINE__); 2662 i915_verify_inactive(dev, __FILE__, __LINE__);
2634 2663
2635 /* Copy the new buffer offsets back to the user's exec list. */
2636 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
2637 (uintptr_t) args->buffers_ptr,
2638 exec_list,
2639 sizeof(*exec_list) * args->buffer_count);
2640 if (ret)
2641 DRM_ERROR("failed to copy %d exec entries "
2642 "back to user (%d)\n",
2643 args->buffer_count, ret);
2644err: 2664err:
2645 for (i = 0; i < pinned; i++) 2665 for (i = 0; i < pinned; i++)
2646 i915_gem_object_unpin(object_list[i]); 2666 i915_gem_object_unpin(object_list[i]);
@@ -2650,6 +2670,18 @@ err:
2650 2670
2651 mutex_unlock(&dev->struct_mutex); 2671 mutex_unlock(&dev->struct_mutex);
2652 2672
2673 if (!ret) {
2674 /* Copy the new buffer offsets back to the user's exec list. */
2675 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
2676 (uintptr_t) args->buffers_ptr,
2677 exec_list,
2678 sizeof(*exec_list) * args->buffer_count);
2679 if (ret)
2680 DRM_ERROR("failed to copy %d exec entries "
2681 "back to user (%d)\n",
2682 args->buffer_count, ret);
2683 }
2684
2653pre_mutex_err: 2685pre_mutex_err:
2654 drm_free(object_list, sizeof(*object_list) * args->buffer_count, 2686 drm_free(object_list, sizeof(*object_list) * args->buffer_count,
2655 DRM_MEM_DRIVER); 2687 DRM_MEM_DRIVER);
@@ -2753,6 +2785,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2753 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { 2785 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
2754 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", 2786 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
2755 args->handle); 2787 args->handle);
2788 drm_gem_object_unreference(obj);
2756 mutex_unlock(&dev->struct_mutex); 2789 mutex_unlock(&dev->struct_mutex);
2757 return -EINVAL; 2790 return -EINVAL;
2758 } 2791 }
@@ -2885,9 +2918,6 @@ int i915_gem_init_object(struct drm_gem_object *obj)
2885void i915_gem_free_object(struct drm_gem_object *obj) 2918void i915_gem_free_object(struct drm_gem_object *obj)
2886{ 2919{
2887 struct drm_device *dev = obj->dev; 2920 struct drm_device *dev = obj->dev;
2888 struct drm_gem_mm *mm = dev->mm_private;
2889 struct drm_map_list *list;
2890 struct drm_map *map;
2891 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2921 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2892 2922
2893 while (obj_priv->pin_count > 0) 2923 while (obj_priv->pin_count > 0)
@@ -2898,19 +2928,7 @@ void i915_gem_free_object(struct drm_gem_object *obj)
2898 2928
2899 i915_gem_object_unbind(obj); 2929 i915_gem_object_unbind(obj);
2900 2930
2901 list = &obj->map_list; 2931 i915_gem_free_mmap_offset(obj);
2902 drm_ht_remove_item(&mm->offset_hash, &list->hash);
2903
2904 if (list->file_offset_node) {
2905 drm_mm_put_block(list->file_offset_node);
2906 list->file_offset_node = NULL;
2907 }
2908
2909 map = list->map;
2910 if (map) {
2911 drm_free(map, sizeof(*map), DRM_MEM_DRIVER);
2912 list->map = NULL;
2913 }
2914 2932
2915 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); 2933 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
2916 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 2934 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
@@ -3095,6 +3113,7 @@ i915_gem_init_hws(struct drm_device *dev)
3095 if (dev_priv->hw_status_page == NULL) { 3113 if (dev_priv->hw_status_page == NULL) {
3096 DRM_ERROR("Failed to map status page.\n"); 3114 DRM_ERROR("Failed to map status page.\n");
3097 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 3115 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3116 i915_gem_object_unpin(obj);
3098 drm_gem_object_unreference(obj); 3117 drm_gem_object_unreference(obj);
3099 return -EINVAL; 3118 return -EINVAL;
3100 } 3119 }
@@ -3107,6 +3126,27 @@ i915_gem_init_hws(struct drm_device *dev)
3107 return 0; 3126 return 0;
3108} 3127}
3109 3128
3129static void
3130i915_gem_cleanup_hws(struct drm_device *dev)
3131{
3132 drm_i915_private_t *dev_priv = dev->dev_private;
3133 struct drm_gem_object *obj = dev_priv->hws_obj;
3134 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3135
3136 if (dev_priv->hws_obj == NULL)
3137 return;
3138
3139 kunmap(obj_priv->page_list[0]);
3140 i915_gem_object_unpin(obj);
3141 drm_gem_object_unreference(obj);
3142 dev_priv->hws_obj = NULL;
3143 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3144 dev_priv->hw_status_page = NULL;
3145
3146 /* Write high address into HWS_PGA when disabling. */
3147 I915_WRITE(HWS_PGA, 0x1ffff000);
3148}
3149
3110int 3150int
3111i915_gem_init_ringbuffer(struct drm_device *dev) 3151i915_gem_init_ringbuffer(struct drm_device *dev)
3112{ 3152{
@@ -3124,6 +3164,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
3124 obj = drm_gem_object_alloc(dev, 128 * 1024); 3164 obj = drm_gem_object_alloc(dev, 128 * 1024);
3125 if (obj == NULL) { 3165 if (obj == NULL) {
3126 DRM_ERROR("Failed to allocate ringbuffer\n"); 3166 DRM_ERROR("Failed to allocate ringbuffer\n");
3167 i915_gem_cleanup_hws(dev);
3127 return -ENOMEM; 3168 return -ENOMEM;
3128 } 3169 }
3129 obj_priv = obj->driver_private; 3170 obj_priv = obj->driver_private;
@@ -3131,6 +3172,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
3131 ret = i915_gem_object_pin(obj, 4096); 3172 ret = i915_gem_object_pin(obj, 4096);
3132 if (ret != 0) { 3173 if (ret != 0) {
3133 drm_gem_object_unreference(obj); 3174 drm_gem_object_unreference(obj);
3175 i915_gem_cleanup_hws(dev);
3134 return ret; 3176 return ret;
3135 } 3177 }
3136 3178
@@ -3148,7 +3190,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
3148 if (ring->map.handle == NULL) { 3190 if (ring->map.handle == NULL) {
3149 DRM_ERROR("Failed to map ringbuffer.\n"); 3191 DRM_ERROR("Failed to map ringbuffer.\n");
3150 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); 3192 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
3193 i915_gem_object_unpin(obj);
3151 drm_gem_object_unreference(obj); 3194 drm_gem_object_unreference(obj);
3195 i915_gem_cleanup_hws(dev);
3152 return -EINVAL; 3196 return -EINVAL;
3153 } 3197 }
3154 ring->ring_obj = obj; 3198 ring->ring_obj = obj;
@@ -3228,20 +3272,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3228 dev_priv->ring.ring_obj = NULL; 3272 dev_priv->ring.ring_obj = NULL;
3229 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); 3273 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
3230 3274
3231 if (dev_priv->hws_obj != NULL) { 3275 i915_gem_cleanup_hws(dev);
3232 struct drm_gem_object *obj = dev_priv->hws_obj;
3233 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3234
3235 kunmap(obj_priv->page_list[0]);
3236 i915_gem_object_unpin(obj);
3237 drm_gem_object_unreference(obj);
3238 dev_priv->hws_obj = NULL;
3239 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3240 dev_priv->hw_status_page = NULL;
3241
3242 /* Write high address into HWS_PGA when disabling. */
3243 I915_WRITE(HWS_PGA, 0x1ffff000);
3244 }
3245} 3276}
3246 3277
3247int 3278int
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index fa1685cba840..7fb4191ef934 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -299,9 +299,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
299 } 299 }
300 obj_priv->stride = args->stride; 300 obj_priv->stride = args->stride;
301 301
302 mutex_unlock(&dev->struct_mutex);
303
304 drm_gem_object_unreference(obj); 302 drm_gem_object_unreference(obj);
303 mutex_unlock(&dev->struct_mutex);
305 304
306 return 0; 305 return 0;
307} 306}
@@ -340,9 +339,8 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
340 DRM_ERROR("unknown tiling mode\n"); 339 DRM_ERROR("unknown tiling mode\n");
341 } 340 }
342 341
343 mutex_unlock(&dev->struct_mutex);
344
345 drm_gem_object_unreference(obj); 342 drm_gem_object_unreference(obj);
343 mutex_unlock(&dev->struct_mutex);
346 344
347 return 0; 345 return 0;
348} 346}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 4ca82a025525..65be30dccc77 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -135,6 +135,14 @@ parse_general_features(struct drm_i915_private *dev_priv,
135 if (general) { 135 if (general) {
136 dev_priv->int_tv_support = general->int_tv_support; 136 dev_priv->int_tv_support = general->int_tv_support;
137 dev_priv->int_crt_support = general->int_crt_support; 137 dev_priv->int_crt_support = general->int_crt_support;
138 dev_priv->lvds_use_ssc = general->enable_ssc;
139
140 if (dev_priv->lvds_use_ssc) {
141 if (IS_I855(dev_priv->dev))
142 dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48;
143 else
144 dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96;
145 }
138 } 146 }
139} 147}
140 148
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index bbdd72909a11..4d2baf7b00be 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -90,12 +90,12 @@ typedef struct {
90#define I9XX_DOT_MAX 400000 90#define I9XX_DOT_MAX 400000
91#define I9XX_VCO_MIN 1400000 91#define I9XX_VCO_MIN 1400000
92#define I9XX_VCO_MAX 2800000 92#define I9XX_VCO_MAX 2800000
93#define I9XX_N_MIN 3 93#define I9XX_N_MIN 1
94#define I9XX_N_MAX 8 94#define I9XX_N_MAX 6
95#define I9XX_M_MIN 70 95#define I9XX_M_MIN 70
96#define I9XX_M_MAX 120 96#define I9XX_M_MAX 120
97#define I9XX_M1_MIN 10 97#define I9XX_M1_MIN 10
98#define I9XX_M1_MAX 20 98#define I9XX_M1_MAX 22
99#define I9XX_M2_MIN 5 99#define I9XX_M2_MIN 5
100#define I9XX_M2_MAX 9 100#define I9XX_M2_MAX 9
101#define I9XX_P_SDVO_DAC_MIN 5 101#define I9XX_P_SDVO_DAC_MIN 5
@@ -189,9 +189,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
189 return limit; 189 return limit;
190} 190}
191 191
192/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ 192static void intel_clock(int refclk, intel_clock_t *clock)
193
194static void i8xx_clock(int refclk, intel_clock_t *clock)
195{ 193{
196 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 194 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
197 clock->p = clock->p1 * clock->p2; 195 clock->p = clock->p1 * clock->p2;
@@ -199,25 +197,6 @@ static void i8xx_clock(int refclk, intel_clock_t *clock)
199 clock->dot = clock->vco / clock->p; 197 clock->dot = clock->vco / clock->p;
200} 198}
201 199
202/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
203
204static void i9xx_clock(int refclk, intel_clock_t *clock)
205{
206 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
207 clock->p = clock->p1 * clock->p2;
208 clock->vco = refclk * clock->m / (clock->n + 2);
209 clock->dot = clock->vco / clock->p;
210}
211
212static void intel_clock(struct drm_device *dev, int refclk,
213 intel_clock_t *clock)
214{
215 if (IS_I9XX(dev))
216 i9xx_clock (refclk, clock);
217 else
218 i8xx_clock (refclk, clock);
219}
220
221/** 200/**
222 * Returns whether any output on the specified pipe is of the specified type 201 * Returns whether any output on the specified pipe is of the specified type
223 */ 202 */
@@ -238,7 +217,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
238 return false; 217 return false;
239} 218}
240 219
241#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; } 220#define INTELPllInvalid(s) do { DRM_DEBUG(s); return false; } while (0)
242/** 221/**
243 * Returns whether the given set of divisors are valid for a given refclk with 222 * Returns whether the given set of divisors are valid for a given refclk with
244 * the given connectors. 223 * the given connectors.
@@ -318,7 +297,7 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
318 clock.p1 <= limit->p1.max; clock.p1++) { 297 clock.p1 <= limit->p1.max; clock.p1++) {
319 int this_err; 298 int this_err;
320 299
321 intel_clock(dev, refclk, &clock); 300 intel_clock(refclk, &clock);
322 301
323 if (!intel_PLL_is_valid(crtc, &clock)) 302 if (!intel_PLL_is_valid(crtc, &clock))
324 continue; 303 continue;
@@ -343,7 +322,7 @@ intel_wait_for_vblank(struct drm_device *dev)
343 udelay(20000); 322 udelay(20000);
344} 323}
345 324
346static void 325static int
347intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 326intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
348 struct drm_framebuffer *old_fb) 327 struct drm_framebuffer *old_fb)
349{ 328{
@@ -361,11 +340,21 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
361 int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; 340 int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
362 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; 341 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
363 u32 dspcntr, alignment; 342 u32 dspcntr, alignment;
343 int ret;
364 344
365 /* no fb bound */ 345 /* no fb bound */
366 if (!crtc->fb) { 346 if (!crtc->fb) {
367 DRM_DEBUG("No FB bound\n"); 347 DRM_DEBUG("No FB bound\n");
368 return; 348 return 0;
349 }
350
351 switch (pipe) {
352 case 0:
353 case 1:
354 break;
355 default:
356 DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
357 return -EINVAL;
369 } 358 }
370 359
371 intel_fb = to_intel_framebuffer(crtc->fb); 360 intel_fb = to_intel_framebuffer(crtc->fb);
@@ -377,28 +366,30 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
377 alignment = 64 * 1024; 366 alignment = 64 * 1024;
378 break; 367 break;
379 case I915_TILING_X: 368 case I915_TILING_X:
380 if (IS_I9XX(dev)) 369 /* pin() will align the object as required by fence */
381 alignment = 1024 * 1024; 370 alignment = 0;
382 else
383 alignment = 512 * 1024;
384 break; 371 break;
385 case I915_TILING_Y: 372 case I915_TILING_Y:
386 /* FIXME: Is this true? */ 373 /* FIXME: Is this true? */
387 DRM_ERROR("Y tiled not allowed for scan out buffers\n"); 374 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
388 return; 375 return -EINVAL;
389 default: 376 default:
390 BUG(); 377 BUG();
391 } 378 }
392 379
393 if (i915_gem_object_pin(intel_fb->obj, alignment)) 380 mutex_lock(&dev->struct_mutex);
394 return; 381 ret = i915_gem_object_pin(intel_fb->obj, alignment);
395 382 if (ret != 0) {
396 i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1); 383 mutex_unlock(&dev->struct_mutex);
397 384 return ret;
398 Start = obj_priv->gtt_offset; 385 }
399 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
400 386
401 I915_WRITE(dspstride, crtc->fb->pitch); 387 ret = i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1);
388 if (ret != 0) {
389 i915_gem_object_unpin(intel_fb->obj);
390 mutex_unlock(&dev->struct_mutex);
391 return ret;
392 }
402 393
403 dspcntr = I915_READ(dspcntr_reg); 394 dspcntr = I915_READ(dspcntr_reg);
404 /* Mask out pixel format bits in case we change it */ 395 /* Mask out pixel format bits in case we change it */
@@ -419,11 +410,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
419 break; 410 break;
420 default: 411 default:
421 DRM_ERROR("Unknown color depth\n"); 412 DRM_ERROR("Unknown color depth\n");
422 return; 413 i915_gem_object_unpin(intel_fb->obj);
414 mutex_unlock(&dev->struct_mutex);
415 return -EINVAL;
423 } 416 }
424 I915_WRITE(dspcntr_reg, dspcntr); 417 I915_WRITE(dspcntr_reg, dspcntr);
425 418
419 Start = obj_priv->gtt_offset;
420 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
421
426 DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); 422 DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
423 I915_WRITE(dspstride, crtc->fb->pitch);
427 if (IS_I965G(dev)) { 424 if (IS_I965G(dev)) {
428 I915_WRITE(dspbase, Offset); 425 I915_WRITE(dspbase, Offset);
429 I915_READ(dspbase); 426 I915_READ(dspbase);
@@ -440,27 +437,24 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
440 intel_fb = to_intel_framebuffer(old_fb); 437 intel_fb = to_intel_framebuffer(old_fb);
441 i915_gem_object_unpin(intel_fb->obj); 438 i915_gem_object_unpin(intel_fb->obj);
442 } 439 }
440 mutex_unlock(&dev->struct_mutex);
443 441
444 if (!dev->primary->master) 442 if (!dev->primary->master)
445 return; 443 return 0;
446 444
447 master_priv = dev->primary->master->driver_priv; 445 master_priv = dev->primary->master->driver_priv;
448 if (!master_priv->sarea_priv) 446 if (!master_priv->sarea_priv)
449 return; 447 return 0;
450 448
451 switch (pipe) { 449 if (pipe) {
452 case 0:
453 master_priv->sarea_priv->pipeA_x = x;
454 master_priv->sarea_priv->pipeA_y = y;
455 break;
456 case 1:
457 master_priv->sarea_priv->pipeB_x = x; 450 master_priv->sarea_priv->pipeB_x = x;
458 master_priv->sarea_priv->pipeB_y = y; 451 master_priv->sarea_priv->pipeB_y = y;
459 break; 452 } else {
460 default: 453 master_priv->sarea_priv->pipeA_x = x;
461 DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); 454 master_priv->sarea_priv->pipeA_y = y;
462 break;
463 } 455 }
456
457 return 0;
464} 458}
465 459
466 460
@@ -708,11 +702,11 @@ static int intel_panel_fitter_pipe (struct drm_device *dev)
708 return 1; 702 return 1;
709} 703}
710 704
711static void intel_crtc_mode_set(struct drm_crtc *crtc, 705static int intel_crtc_mode_set(struct drm_crtc *crtc,
712 struct drm_display_mode *mode, 706 struct drm_display_mode *mode,
713 struct drm_display_mode *adjusted_mode, 707 struct drm_display_mode *adjusted_mode,
714 int x, int y, 708 int x, int y,
715 struct drm_framebuffer *old_fb) 709 struct drm_framebuffer *old_fb)
716{ 710{
717 struct drm_device *dev = crtc->dev; 711 struct drm_device *dev = crtc->dev;
718 struct drm_i915_private *dev_priv = dev->dev_private; 712 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -732,13 +726,14 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc,
732 int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; 726 int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
733 int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; 727 int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
734 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; 728 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
735 int refclk; 729 int refclk, num_outputs = 0;
736 intel_clock_t clock; 730 intel_clock_t clock;
737 u32 dpll = 0, fp = 0, dspcntr, pipeconf; 731 u32 dpll = 0, fp = 0, dspcntr, pipeconf;
738 bool ok, is_sdvo = false, is_dvo = false; 732 bool ok, is_sdvo = false, is_dvo = false;
739 bool is_crt = false, is_lvds = false, is_tv = false; 733 bool is_crt = false, is_lvds = false, is_tv = false;
740 struct drm_mode_config *mode_config = &dev->mode_config; 734 struct drm_mode_config *mode_config = &dev->mode_config;
741 struct drm_connector *connector; 735 struct drm_connector *connector;
736 int ret;
742 737
743 drm_vblank_pre_modeset(dev, pipe); 738 drm_vblank_pre_modeset(dev, pipe);
744 739
@@ -768,9 +763,14 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc,
768 is_crt = true; 763 is_crt = true;
769 break; 764 break;
770 } 765 }
766
767 num_outputs++;
771 } 768 }
772 769
773 if (IS_I9XX(dev)) { 770 if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) {
771 refclk = dev_priv->lvds_ssc_freq * 1000;
772 DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000);
773 } else if (IS_I9XX(dev)) {
774 refclk = 96000; 774 refclk = 96000;
775 } else { 775 } else {
776 refclk = 48000; 776 refclk = 48000;
@@ -779,7 +779,7 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc,
779 ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock); 779 ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock);
780 if (!ok) { 780 if (!ok) {
781 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 781 DRM_ERROR("Couldn't find PLL settings for mode!\n");
782 return; 782 return -EINVAL;
783 } 783 }
784 784
785 fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 785 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
@@ -829,11 +829,14 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc,
829 } 829 }
830 } 830 }
831 831
832 if (is_tv) { 832 if (is_sdvo && is_tv)
833 dpll |= PLL_REF_INPUT_TVCLKINBC;
834 else if (is_tv)
833 /* XXX: just matching BIOS for now */ 835 /* XXX: just matching BIOS for now */
834/* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 836 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
835 dpll |= 3; 837 dpll |= 3;
836 } 838 else if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2)
839 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
837 else 840 else
838 dpll |= PLL_REF_INPUT_DREFCLK; 841 dpll |= PLL_REF_INPUT_DREFCLK;
839 842
@@ -950,9 +953,13 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc,
950 I915_WRITE(dspcntr_reg, dspcntr); 953 I915_WRITE(dspcntr_reg, dspcntr);
951 954
952 /* Flush the plane changes */ 955 /* Flush the plane changes */
953 intel_pipe_set_base(crtc, x, y, old_fb); 956 ret = intel_pipe_set_base(crtc, x, y, old_fb);
957 if (ret != 0)
958 return ret;
954 959
955 drm_vblank_post_modeset(dev, pipe); 960 drm_vblank_post_modeset(dev, pipe);
961
962 return 0;
956} 963}
957 964
958/** Loads the palette/gamma unit for the CRTC with the prepared values */ 965/** Loads the palette/gamma unit for the CRTC with the prepared values */
@@ -1023,18 +1030,19 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1023 } 1030 }
1024 1031
1025 /* we only need to pin inside GTT if cursor is non-phy */ 1032 /* we only need to pin inside GTT if cursor is non-phy */
1033 mutex_lock(&dev->struct_mutex);
1026 if (!dev_priv->cursor_needs_physical) { 1034 if (!dev_priv->cursor_needs_physical) {
1027 ret = i915_gem_object_pin(bo, PAGE_SIZE); 1035 ret = i915_gem_object_pin(bo, PAGE_SIZE);
1028 if (ret) { 1036 if (ret) {
1029 DRM_ERROR("failed to pin cursor bo\n"); 1037 DRM_ERROR("failed to pin cursor bo\n");
1030 goto fail; 1038 goto fail_locked;
1031 } 1039 }
1032 addr = obj_priv->gtt_offset; 1040 addr = obj_priv->gtt_offset;
1033 } else { 1041 } else {
1034 ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); 1042 ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
1035 if (ret) { 1043 if (ret) {
1036 DRM_ERROR("failed to attach phys object\n"); 1044 DRM_ERROR("failed to attach phys object\n");
1037 goto fail; 1045 goto fail_locked;
1038 } 1046 }
1039 addr = obj_priv->phys_obj->handle->busaddr; 1047 addr = obj_priv->phys_obj->handle->busaddr;
1040 } 1048 }
@@ -1054,10 +1062,9 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1054 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 1062 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
1055 } else 1063 } else
1056 i915_gem_object_unpin(intel_crtc->cursor_bo); 1064 i915_gem_object_unpin(intel_crtc->cursor_bo);
1057 mutex_lock(&dev->struct_mutex);
1058 drm_gem_object_unreference(intel_crtc->cursor_bo); 1065 drm_gem_object_unreference(intel_crtc->cursor_bo);
1059 mutex_unlock(&dev->struct_mutex);
1060 } 1066 }
1067 mutex_unlock(&dev->struct_mutex);
1061 1068
1062 intel_crtc->cursor_addr = addr; 1069 intel_crtc->cursor_addr = addr;
1063 intel_crtc->cursor_bo = bo; 1070 intel_crtc->cursor_bo = bo;
@@ -1065,6 +1072,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1065 return 0; 1072 return 0;
1066fail: 1073fail:
1067 mutex_lock(&dev->struct_mutex); 1074 mutex_lock(&dev->struct_mutex);
1075fail_locked:
1068 drm_gem_object_unreference(bo); 1076 drm_gem_object_unreference(bo);
1069 mutex_unlock(&dev->struct_mutex); 1077 mutex_unlock(&dev->struct_mutex);
1070 return ret; 1078 return ret;
@@ -1292,7 +1300,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1292 } 1300 }
1293 1301
1294 /* XXX: Handle the 100Mhz refclk */ 1302 /* XXX: Handle the 100Mhz refclk */
1295 i9xx_clock(96000, &clock); 1303 intel_clock(96000, &clock);
1296 } else { 1304 } else {
1297 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); 1305 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
1298 1306
@@ -1304,9 +1312,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1304 if ((dpll & PLL_REF_INPUT_MASK) == 1312 if ((dpll & PLL_REF_INPUT_MASK) ==
1305 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 1313 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
1306 /* XXX: might not be 66MHz */ 1314 /* XXX: might not be 66MHz */
1307 i8xx_clock(66000, &clock); 1315 intel_clock(66000, &clock);
1308 } else 1316 } else
1309 i8xx_clock(48000, &clock); 1317 intel_clock(48000, &clock);
1310 } else { 1318 } else {
1311 if (dpll & PLL_P1_DIVIDE_BY_TWO) 1319 if (dpll & PLL_P1_DIVIDE_BY_TWO)
1312 clock.p1 = 2; 1320 clock.p1 = 2;
@@ -1319,7 +1327,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1319 else 1327 else
1320 clock.p2 = 2; 1328 clock.p2 = 2;
1321 1329
1322 i8xx_clock(48000, &clock); 1330 intel_clock(48000, &clock);
1323 } 1331 }
1324 } 1332 }
1325 1333
@@ -1598,7 +1606,9 @@ intel_user_framebuffer_create(struct drm_device *dev,
1598 1606
1599 ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); 1607 ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
1600 if (ret) { 1608 if (ret) {
1609 mutex_lock(&dev->struct_mutex);
1601 drm_gem_object_unreference(obj); 1610 drm_gem_object_unreference(obj);
1611 mutex_unlock(&dev->struct_mutex);
1602 return NULL; 1612 return NULL;
1603 } 1613 }
1604 1614
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index afd1217b8a02..b7f0ebe9f810 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -473,7 +473,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
473 ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo); 473 ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo);
474 if (ret) { 474 if (ret) {
475 DRM_ERROR("failed to allocate fb.\n"); 475 DRM_ERROR("failed to allocate fb.\n");
476 goto out_unref; 476 goto out_unpin;
477 } 477 }
478 478
479 list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); 479 list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
@@ -484,7 +484,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
484 info = framebuffer_alloc(sizeof(struct intelfb_par), device); 484 info = framebuffer_alloc(sizeof(struct intelfb_par), device);
485 if (!info) { 485 if (!info) {
486 ret = -ENOMEM; 486 ret = -ENOMEM;
487 goto out_unref; 487 goto out_unpin;
488 } 488 }
489 489
490 par = info->par; 490 par = info->par;
@@ -513,7 +513,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
513 size); 513 size);
514 if (!info->screen_base) { 514 if (!info->screen_base) {
515 ret = -ENOSPC; 515 ret = -ENOSPC;
516 goto out_unref; 516 goto out_unpin;
517 } 517 }
518 info->screen_size = size; 518 info->screen_size = size;
519 519
@@ -608,6 +608,8 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
608 mutex_unlock(&dev->struct_mutex); 608 mutex_unlock(&dev->struct_mutex);
609 return 0; 609 return 0;
610 610
611out_unpin:
612 i915_gem_object_unpin(fbo);
611out_unref: 613out_unref:
612 drm_gem_object_unreference(fbo); 614 drm_gem_object_unreference(fbo);
613 mutex_unlock(&dev->struct_mutex); 615 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 6d4f91265354..0d211af98854 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -481,8 +481,6 @@ void intel_lvds_init(struct drm_device *dev)
481 if (dev_priv->panel_fixed_mode) { 481 if (dev_priv->panel_fixed_mode) {
482 dev_priv->panel_fixed_mode->type |= 482 dev_priv->panel_fixed_mode->type |=
483 DRM_MODE_TYPE_PREFERRED; 483 DRM_MODE_TYPE_PREFERRED;
484 drm_mode_probed_add(connector,
485 dev_priv->panel_fixed_mode);
486 goto out; 484 goto out;
487 } 485 }
488 } 486 }
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index a30508b639ba..fbe6f3931b1b 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -193,7 +193,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
193 193
194#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} 194#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
195/** Mapping of command numbers to names, for debug output */ 195/** Mapping of command numbers to names, for debug output */
196const static struct _sdvo_cmd_name { 196static const struct _sdvo_cmd_name {
197 u8 cmd; 197 u8 cmd;
198 char *name; 198 char *name;
199} sdvo_cmd_names[] = { 199} sdvo_cmd_names[] = {
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index fbb35dc56f5c..56485d67369b 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -411,7 +411,7 @@ struct tv_mode {
411 * These values account for -1s required. 411 * These values account for -1s required.
412 */ 412 */
413 413
414const static struct tv_mode tv_modes[] = { 414static const struct tv_mode tv_modes[] = {
415 { 415 {
416 .name = "NTSC-M", 416 .name = "NTSC-M",
417 .clock = 107520, 417 .clock = 107520,
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index df4cf97e5d97..92965dbb3c14 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -557,8 +557,10 @@ static int radeon_do_engine_reset(struct drm_device * dev)
557} 557}
558 558
559static void radeon_cp_init_ring_buffer(struct drm_device * dev, 559static void radeon_cp_init_ring_buffer(struct drm_device * dev,
560 drm_radeon_private_t * dev_priv) 560 drm_radeon_private_t *dev_priv,
561 struct drm_file *file_priv)
561{ 562{
563 struct drm_radeon_master_private *master_priv;
562 u32 ring_start, cur_read_ptr; 564 u32 ring_start, cur_read_ptr;
563 u32 tmp; 565 u32 tmp;
564 566
@@ -677,6 +679,14 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
677 dev_priv->scratch[2] = 0; 679 dev_priv->scratch[2] = 0;
678 RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0); 680 RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0);
679 681
682 /* reset sarea copies of these */
683 master_priv = file_priv->master->driver_priv;
684 if (master_priv->sarea_priv) {
685 master_priv->sarea_priv->last_frame = 0;
686 master_priv->sarea_priv->last_dispatch = 0;
687 master_priv->sarea_priv->last_clear = 0;
688 }
689
680 radeon_do_wait_for_idle(dev_priv); 690 radeon_do_wait_for_idle(dev_priv);
681 691
682 /* Sync everything up */ 692 /* Sync everything up */
@@ -1215,7 +1225,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
1215 } 1225 }
1216 1226
1217 radeon_cp_load_microcode(dev_priv); 1227 radeon_cp_load_microcode(dev_priv);
1218 radeon_cp_init_ring_buffer(dev, dev_priv); 1228 radeon_cp_init_ring_buffer(dev, dev_priv, file_priv);
1219 1229
1220 dev_priv->last_buf = 0; 1230 dev_priv->last_buf = 0;
1221 1231
@@ -1281,7 +1291,7 @@ static int radeon_do_cleanup_cp(struct drm_device * dev)
1281 * 1291 *
1282 * Charl P. Botha <http://cpbotha.net> 1292 * Charl P. Botha <http://cpbotha.net>
1283 */ 1293 */
1284static int radeon_do_resume_cp(struct drm_device * dev) 1294static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv)
1285{ 1295{
1286 drm_radeon_private_t *dev_priv = dev->dev_private; 1296 drm_radeon_private_t *dev_priv = dev->dev_private;
1287 1297
@@ -1304,7 +1314,7 @@ static int radeon_do_resume_cp(struct drm_device * dev)
1304 } 1314 }
1305 1315
1306 radeon_cp_load_microcode(dev_priv); 1316 radeon_cp_load_microcode(dev_priv);
1307 radeon_cp_init_ring_buffer(dev, dev_priv); 1317 radeon_cp_init_ring_buffer(dev, dev_priv, file_priv);
1308 1318
1309 radeon_do_engine_reset(dev); 1319 radeon_do_engine_reset(dev);
1310 radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); 1320 radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
@@ -1479,8 +1489,7 @@ int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_pri
1479 */ 1489 */
1480int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv) 1490int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv)
1481{ 1491{
1482 1492 return radeon_do_resume_cp(dev, file_priv);
1483 return radeon_do_resume_cp(dev);
1484} 1493}
1485 1494
1486int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) 1495int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)