aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-26 07:22:13 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-26 07:22:13 -0500
commit4434e5156409eb3ec98f5ad7f0a0c07ebafe970d (patch)
treec7f148cdae7a6cf4013253c6c2dbd1c49e837e32 /drivers/gpu/drm/i915
parent2b8f836fb196acede88b6cc772e9057e0a9c0223 (diff)
parent694593e3374a67d95ece6a275a1f181644c2c4d8 (diff)
Merge branches 'sched/cleanups', 'sched/urgent' and 'linus' into sched/core
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c29
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c185
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c6
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c8
-rw-r--r--drivers/gpu/drm/i915/intel_display.c161
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c8
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
11 files changed, 253 insertions, 155 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 81f1cff56fd5..2d797ffe8137 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -202,7 +202,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
202 dev_priv->ring.map.flags = 0; 202 dev_priv->ring.map.flags = 0;
203 dev_priv->ring.map.mtrr = 0; 203 dev_priv->ring.map.mtrr = 0;
204 204
205 drm_core_ioremap(&dev_priv->ring.map, dev); 205 drm_core_ioremap_wc(&dev_priv->ring.map, dev);
206 206
207 if (dev_priv->ring.map.handle == NULL) { 207 if (dev_priv->ring.map.handle == NULL) {
208 i915_dma_cleanup(dev); 208 i915_dma_cleanup(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index aac12ee31a46..b293ef0bae71 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -27,6 +27,7 @@
27 * 27 *
28 */ 28 */
29 29
30#include <linux/device.h>
30#include "drmP.h" 31#include "drmP.h"
31#include "drm.h" 32#include "drm.h"
32#include "i915_drm.h" 33#include "i915_drm.h"
@@ -66,6 +67,14 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
66 67
67 i915_save_state(dev); 68 i915_save_state(dev);
68 69
70 /* If KMS is active, we do the leavevt stuff here */
71 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
72 if (i915_gem_idle(dev))
73 dev_err(&dev->pdev->dev,
74 "GEM idle failed, resume may fail\n");
75 drm_irq_uninstall(dev);
76 }
77
69 intel_opregion_free(dev); 78 intel_opregion_free(dev);
70 79
71 if (state.event == PM_EVENT_SUSPEND) { 80 if (state.event == PM_EVENT_SUSPEND) {
@@ -79,6 +88,9 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
79 88
80static int i915_resume(struct drm_device *dev) 89static int i915_resume(struct drm_device *dev)
81{ 90{
91 struct drm_i915_private *dev_priv = dev->dev_private;
92 int ret = 0;
93
82 pci_set_power_state(dev->pdev, PCI_D0); 94 pci_set_power_state(dev->pdev, PCI_D0);
83 pci_restore_state(dev->pdev); 95 pci_restore_state(dev->pdev);
84 if (pci_enable_device(dev->pdev)) 96 if (pci_enable_device(dev->pdev))
@@ -89,11 +101,26 @@ static int i915_resume(struct drm_device *dev)
89 101
90 intel_opregion_init(dev); 102 intel_opregion_init(dev);
91 103
92 return 0; 104 /* KMS EnterVT equivalent */
105 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
106 mutex_lock(&dev->struct_mutex);
107 dev_priv->mm.suspended = 0;
108
109 ret = i915_gem_init_ringbuffer(dev);
110 if (ret != 0)
111 ret = -1;
112 mutex_unlock(&dev->struct_mutex);
113
114 drm_irq_install(dev);
115 }
116
117 return ret;
93} 118}
94 119
95static struct vm_operations_struct i915_gem_vm_ops = { 120static struct vm_operations_struct i915_gem_vm_ops = {
96 .fault = i915_gem_fault, 121 .fault = i915_gem_fault,
122 .open = drm_gem_vm_open,
123 .close = drm_gem_vm_close,
97}; 124};
98 125
99static struct drm_driver driver = { 126static struct drm_driver driver = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7325363164f8..17fa40858d26 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -184,6 +184,8 @@ typedef struct drm_i915_private {
184 unsigned int lvds_dither:1; 184 unsigned int lvds_dither:1;
185 unsigned int lvds_vbt:1; 185 unsigned int lvds_vbt:1;
186 unsigned int int_crt_support:1; 186 unsigned int int_crt_support:1;
187 unsigned int lvds_use_ssc:1;
188 int lvds_ssc_freq;
187 189
188 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ 190 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
189 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 191 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
@@ -616,6 +618,7 @@ int i915_gem_init_ringbuffer(struct drm_device *dev);
616void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 618void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
617int i915_gem_do_init(struct drm_device *dev, unsigned long start, 619int i915_gem_do_init(struct drm_device *dev, unsigned long start,
618 unsigned long end); 620 unsigned long end);
621int i915_gem_idle(struct drm_device *dev);
619int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 622int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
620int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, 623int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
621 int write); 624 int write);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 818576654092..28b726d07a0c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -34,10 +34,6 @@
34 34
35#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 35#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
36 36
37static void
38i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
39 uint32_t read_domains,
40 uint32_t write_domain);
41static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); 37static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
42static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); 38static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
43static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); 39static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
@@ -607,8 +603,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
607 case -EAGAIN: 603 case -EAGAIN:
608 return VM_FAULT_OOM; 604 return VM_FAULT_OOM;
609 case -EFAULT: 605 case -EFAULT:
610 case -EBUSY:
611 DRM_ERROR("can't insert pfn?? fault or busy...\n");
612 return VM_FAULT_SIGBUS; 606 return VM_FAULT_SIGBUS;
613 default: 607 default:
614 return VM_FAULT_NOPAGE; 608 return VM_FAULT_NOPAGE;
@@ -684,6 +678,30 @@ out_free_list:
684 return ret; 678 return ret;
685} 679}
686 680
681static void
682i915_gem_free_mmap_offset(struct drm_gem_object *obj)
683{
684 struct drm_device *dev = obj->dev;
685 struct drm_i915_gem_object *obj_priv = obj->driver_private;
686 struct drm_gem_mm *mm = dev->mm_private;
687 struct drm_map_list *list;
688
689 list = &obj->map_list;
690 drm_ht_remove_item(&mm->offset_hash, &list->hash);
691
692 if (list->file_offset_node) {
693 drm_mm_put_block(list->file_offset_node);
694 list->file_offset_node = NULL;
695 }
696
697 if (list->map) {
698 drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER);
699 list->map = NULL;
700 }
701
702 obj_priv->mmap_offset = 0;
703}
704
687/** 705/**
688 * i915_gem_get_gtt_alignment - return required GTT alignment for an object 706 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
689 * @obj: object to check 707 * @obj: object to check
@@ -758,8 +776,11 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
758 776
759 if (!obj_priv->mmap_offset) { 777 if (!obj_priv->mmap_offset) {
760 ret = i915_gem_create_mmap_offset(obj); 778 ret = i915_gem_create_mmap_offset(obj);
761 if (ret) 779 if (ret) {
780 drm_gem_object_unreference(obj);
781 mutex_unlock(&dev->struct_mutex);
762 return ret; 782 return ret;
783 }
763 } 784 }
764 785
765 args->offset = obj_priv->mmap_offset; 786 args->offset = obj_priv->mmap_offset;
@@ -1030,6 +1051,9 @@ i915_gem_retire_requests(struct drm_device *dev)
1030 drm_i915_private_t *dev_priv = dev->dev_private; 1051 drm_i915_private_t *dev_priv = dev->dev_private;
1031 uint32_t seqno; 1052 uint32_t seqno;
1032 1053
1054 if (!dev_priv->hw_status_page)
1055 return;
1056
1033 seqno = i915_get_gem_seqno(dev); 1057 seqno = i915_get_gem_seqno(dev);
1034 1058
1035 while (!list_empty(&dev_priv->mm.request_list)) { 1059 while (!list_empty(&dev_priv->mm.request_list)) {
@@ -1996,30 +2020,28 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
1996 * drm_agp_chipset_flush 2020 * drm_agp_chipset_flush
1997 */ 2021 */
1998static void 2022static void
1999i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, 2023i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2000 uint32_t read_domains,
2001 uint32_t write_domain)
2002{ 2024{
2003 struct drm_device *dev = obj->dev; 2025 struct drm_device *dev = obj->dev;
2004 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2026 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2005 uint32_t invalidate_domains = 0; 2027 uint32_t invalidate_domains = 0;
2006 uint32_t flush_domains = 0; 2028 uint32_t flush_domains = 0;
2007 2029
2008 BUG_ON(read_domains & I915_GEM_DOMAIN_CPU); 2030 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2009 BUG_ON(write_domain == I915_GEM_DOMAIN_CPU); 2031 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
2010 2032
2011#if WATCH_BUF 2033#if WATCH_BUF
2012 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", 2034 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2013 __func__, obj, 2035 __func__, obj,
2014 obj->read_domains, read_domains, 2036 obj->read_domains, obj->pending_read_domains,
2015 obj->write_domain, write_domain); 2037 obj->write_domain, obj->pending_write_domain);
2016#endif 2038#endif
2017 /* 2039 /*
2018 * If the object isn't moving to a new write domain, 2040 * If the object isn't moving to a new write domain,
2019 * let the object stay in multiple read domains 2041 * let the object stay in multiple read domains
2020 */ 2042 */
2021 if (write_domain == 0) 2043 if (obj->pending_write_domain == 0)
2022 read_domains |= obj->read_domains; 2044 obj->pending_read_domains |= obj->read_domains;
2023 else 2045 else
2024 obj_priv->dirty = 1; 2046 obj_priv->dirty = 1;
2025 2047
@@ -2029,15 +2051,17 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
2029 * any read domains which differ from the old 2051 * any read domains which differ from the old
2030 * write domain 2052 * write domain
2031 */ 2053 */
2032 if (obj->write_domain && obj->write_domain != read_domains) { 2054 if (obj->write_domain &&
2055 obj->write_domain != obj->pending_read_domains) {
2033 flush_domains |= obj->write_domain; 2056 flush_domains |= obj->write_domain;
2034 invalidate_domains |= read_domains & ~obj->write_domain; 2057 invalidate_domains |=
2058 obj->pending_read_domains & ~obj->write_domain;
2035 } 2059 }
2036 /* 2060 /*
2037 * Invalidate any read caches which may have 2061 * Invalidate any read caches which may have
2038 * stale data. That is, any new read domains. 2062 * stale data. That is, any new read domains.
2039 */ 2063 */
2040 invalidate_domains |= read_domains & ~obj->read_domains; 2064 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
2041 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { 2065 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
2042#if WATCH_BUF 2066#if WATCH_BUF
2043 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", 2067 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
@@ -2046,9 +2070,15 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
2046 i915_gem_clflush_object(obj); 2070 i915_gem_clflush_object(obj);
2047 } 2071 }
2048 2072
2049 if ((write_domain | flush_domains) != 0) 2073 /* The actual obj->write_domain will be updated with
2050 obj->write_domain = write_domain; 2074 * pending_write_domain after we emit the accumulated flush for all
2051 obj->read_domains = read_domains; 2075 * of our domain changes in execbuffers (which clears objects'
2076 * write_domains). So if we have a current write domain that we
2077 * aren't changing, set pending_write_domain to that.
2078 */
2079 if (flush_domains == 0 && obj->pending_write_domain == 0)
2080 obj->pending_write_domain = obj->write_domain;
2081 obj->read_domains = obj->pending_read_domains;
2052 2082
2053 dev->invalidate_domains |= invalidate_domains; 2083 dev->invalidate_domains |= invalidate_domains;
2054 dev->flush_domains |= flush_domains; 2084 dev->flush_domains |= flush_domains;
@@ -2251,6 +2281,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2251 (int) reloc.offset, 2281 (int) reloc.offset,
2252 reloc.read_domains, 2282 reloc.read_domains,
2253 reloc.write_domain); 2283 reloc.write_domain);
2284 drm_gem_object_unreference(target_obj);
2285 i915_gem_object_unpin(obj);
2254 return -EINVAL; 2286 return -EINVAL;
2255 } 2287 }
2256 2288
@@ -2480,13 +2512,15 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2480 if (dev_priv->mm.wedged) { 2512 if (dev_priv->mm.wedged) {
2481 DRM_ERROR("Execbuf while wedged\n"); 2513 DRM_ERROR("Execbuf while wedged\n");
2482 mutex_unlock(&dev->struct_mutex); 2514 mutex_unlock(&dev->struct_mutex);
2483 return -EIO; 2515 ret = -EIO;
2516 goto pre_mutex_err;
2484 } 2517 }
2485 2518
2486 if (dev_priv->mm.suspended) { 2519 if (dev_priv->mm.suspended) {
2487 DRM_ERROR("Execbuf while VT-switched.\n"); 2520 DRM_ERROR("Execbuf while VT-switched.\n");
2488 mutex_unlock(&dev->struct_mutex); 2521 mutex_unlock(&dev->struct_mutex);
2489 return -EBUSY; 2522 ret = -EBUSY;
2523 goto pre_mutex_err;
2490 } 2524 }
2491 2525
2492 /* Look up object handles */ 2526 /* Look up object handles */
@@ -2554,9 +2588,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2554 struct drm_gem_object *obj = object_list[i]; 2588 struct drm_gem_object *obj = object_list[i];
2555 2589
2556 /* Compute new gpu domains and update invalidate/flush */ 2590 /* Compute new gpu domains and update invalidate/flush */
2557 i915_gem_object_set_to_gpu_domain(obj, 2591 i915_gem_object_set_to_gpu_domain(obj);
2558 obj->pending_read_domains,
2559 obj->pending_write_domain);
2560 } 2592 }
2561 2593
2562 i915_verify_inactive(dev, __FILE__, __LINE__); 2594 i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -2575,6 +2607,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2575 (void)i915_add_request(dev, dev->flush_domains); 2607 (void)i915_add_request(dev, dev->flush_domains);
2576 } 2608 }
2577 2609
2610 for (i = 0; i < args->buffer_count; i++) {
2611 struct drm_gem_object *obj = object_list[i];
2612
2613 obj->write_domain = obj->pending_write_domain;
2614 }
2615
2578 i915_verify_inactive(dev, __FILE__, __LINE__); 2616 i915_verify_inactive(dev, __FILE__, __LINE__);
2579 2617
2580#if WATCH_COHERENCY 2618#if WATCH_COHERENCY
@@ -2632,15 +2670,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2632 2670
2633 i915_verify_inactive(dev, __FILE__, __LINE__); 2671 i915_verify_inactive(dev, __FILE__, __LINE__);
2634 2672
2635 /* Copy the new buffer offsets back to the user's exec list. */
2636 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
2637 (uintptr_t) args->buffers_ptr,
2638 exec_list,
2639 sizeof(*exec_list) * args->buffer_count);
2640 if (ret)
2641 DRM_ERROR("failed to copy %d exec entries "
2642 "back to user (%d)\n",
2643 args->buffer_count, ret);
2644err: 2673err:
2645 for (i = 0; i < pinned; i++) 2674 for (i = 0; i < pinned; i++)
2646 i915_gem_object_unpin(object_list[i]); 2675 i915_gem_object_unpin(object_list[i]);
@@ -2650,6 +2679,18 @@ err:
2650 2679
2651 mutex_unlock(&dev->struct_mutex); 2680 mutex_unlock(&dev->struct_mutex);
2652 2681
2682 if (!ret) {
2683 /* Copy the new buffer offsets back to the user's exec list. */
2684 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
2685 (uintptr_t) args->buffers_ptr,
2686 exec_list,
2687 sizeof(*exec_list) * args->buffer_count);
2688 if (ret)
2689 DRM_ERROR("failed to copy %d exec entries "
2690 "back to user (%d)\n",
2691 args->buffer_count, ret);
2692 }
2693
2653pre_mutex_err: 2694pre_mutex_err:
2654 drm_free(object_list, sizeof(*object_list) * args->buffer_count, 2695 drm_free(object_list, sizeof(*object_list) * args->buffer_count,
2655 DRM_MEM_DRIVER); 2696 DRM_MEM_DRIVER);
@@ -2753,6 +2794,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2753 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { 2794 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
2754 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", 2795 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
2755 args->handle); 2796 args->handle);
2797 drm_gem_object_unreference(obj);
2756 mutex_unlock(&dev->struct_mutex); 2798 mutex_unlock(&dev->struct_mutex);
2757 return -EINVAL; 2799 return -EINVAL;
2758 } 2800 }
@@ -2833,6 +2875,13 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2833 return -EBADF; 2875 return -EBADF;
2834 } 2876 }
2835 2877
2878 /* Update the active list for the hardware's current position.
2879 * Otherwise this only updates on a delayed timer or when irqs are
2880 * actually unmasked, and our working set ends up being larger than
2881 * required.
2882 */
2883 i915_gem_retire_requests(dev);
2884
2836 obj_priv = obj->driver_private; 2885 obj_priv = obj->driver_private;
2837 /* Don't count being on the flushing list against the object being 2886 /* Don't count being on the flushing list against the object being
2838 * done. Otherwise, a buffer left on the flushing list but not getting 2887 * done. Otherwise, a buffer left on the flushing list but not getting
@@ -2885,9 +2934,6 @@ int i915_gem_init_object(struct drm_gem_object *obj)
2885void i915_gem_free_object(struct drm_gem_object *obj) 2934void i915_gem_free_object(struct drm_gem_object *obj)
2886{ 2935{
2887 struct drm_device *dev = obj->dev; 2936 struct drm_device *dev = obj->dev;
2888 struct drm_gem_mm *mm = dev->mm_private;
2889 struct drm_map_list *list;
2890 struct drm_map *map;
2891 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2937 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2892 2938
2893 while (obj_priv->pin_count > 0) 2939 while (obj_priv->pin_count > 0)
@@ -2898,19 +2944,7 @@ void i915_gem_free_object(struct drm_gem_object *obj)
2898 2944
2899 i915_gem_object_unbind(obj); 2945 i915_gem_object_unbind(obj);
2900 2946
2901 list = &obj->map_list; 2947 i915_gem_free_mmap_offset(obj);
2902 drm_ht_remove_item(&mm->offset_hash, &list->hash);
2903
2904 if (list->file_offset_node) {
2905 drm_mm_put_block(list->file_offset_node);
2906 list->file_offset_node = NULL;
2907 }
2908
2909 map = list->map;
2910 if (map) {
2911 drm_free(map, sizeof(*map), DRM_MEM_DRIVER);
2912 list->map = NULL;
2913 }
2914 2948
2915 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); 2949 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
2916 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 2950 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
@@ -2949,7 +2983,7 @@ i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
2949 return 0; 2983 return 0;
2950} 2984}
2951 2985
2952static int 2986int
2953i915_gem_idle(struct drm_device *dev) 2987i915_gem_idle(struct drm_device *dev)
2954{ 2988{
2955 drm_i915_private_t *dev_priv = dev->dev_private; 2989 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -3095,6 +3129,7 @@ i915_gem_init_hws(struct drm_device *dev)
3095 if (dev_priv->hw_status_page == NULL) { 3129 if (dev_priv->hw_status_page == NULL) {
3096 DRM_ERROR("Failed to map status page.\n"); 3130 DRM_ERROR("Failed to map status page.\n");
3097 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 3131 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3132 i915_gem_object_unpin(obj);
3098 drm_gem_object_unreference(obj); 3133 drm_gem_object_unreference(obj);
3099 return -EINVAL; 3134 return -EINVAL;
3100 } 3135 }
@@ -3107,6 +3142,31 @@ i915_gem_init_hws(struct drm_device *dev)
3107 return 0; 3142 return 0;
3108} 3143}
3109 3144
3145static void
3146i915_gem_cleanup_hws(struct drm_device *dev)
3147{
3148 drm_i915_private_t *dev_priv = dev->dev_private;
3149 struct drm_gem_object *obj;
3150 struct drm_i915_gem_object *obj_priv;
3151
3152 if (dev_priv->hws_obj == NULL)
3153 return;
3154
3155 obj = dev_priv->hws_obj;
3156 obj_priv = obj->driver_private;
3157
3158 kunmap(obj_priv->page_list[0]);
3159 i915_gem_object_unpin(obj);
3160 drm_gem_object_unreference(obj);
3161 dev_priv->hws_obj = NULL;
3162
3163 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3164 dev_priv->hw_status_page = NULL;
3165
3166 /* Write high address into HWS_PGA when disabling. */
3167 I915_WRITE(HWS_PGA, 0x1ffff000);
3168}
3169
3110int 3170int
3111i915_gem_init_ringbuffer(struct drm_device *dev) 3171i915_gem_init_ringbuffer(struct drm_device *dev)
3112{ 3172{
@@ -3124,6 +3184,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
3124 obj = drm_gem_object_alloc(dev, 128 * 1024); 3184 obj = drm_gem_object_alloc(dev, 128 * 1024);
3125 if (obj == NULL) { 3185 if (obj == NULL) {
3126 DRM_ERROR("Failed to allocate ringbuffer\n"); 3186 DRM_ERROR("Failed to allocate ringbuffer\n");
3187 i915_gem_cleanup_hws(dev);
3127 return -ENOMEM; 3188 return -ENOMEM;
3128 } 3189 }
3129 obj_priv = obj->driver_private; 3190 obj_priv = obj->driver_private;
@@ -3131,6 +3192,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
3131 ret = i915_gem_object_pin(obj, 4096); 3192 ret = i915_gem_object_pin(obj, 4096);
3132 if (ret != 0) { 3193 if (ret != 0) {
3133 drm_gem_object_unreference(obj); 3194 drm_gem_object_unreference(obj);
3195 i915_gem_cleanup_hws(dev);
3134 return ret; 3196 return ret;
3135 } 3197 }
3136 3198
@@ -3148,7 +3210,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
3148 if (ring->map.handle == NULL) { 3210 if (ring->map.handle == NULL) {
3149 DRM_ERROR("Failed to map ringbuffer.\n"); 3211 DRM_ERROR("Failed to map ringbuffer.\n");
3150 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); 3212 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
3213 i915_gem_object_unpin(obj);
3151 drm_gem_object_unreference(obj); 3214 drm_gem_object_unreference(obj);
3215 i915_gem_cleanup_hws(dev);
3152 return -EINVAL; 3216 return -EINVAL;
3153 } 3217 }
3154 ring->ring_obj = obj; 3218 ring->ring_obj = obj;
@@ -3228,20 +3292,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3228 dev_priv->ring.ring_obj = NULL; 3292 dev_priv->ring.ring_obj = NULL;
3229 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); 3293 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
3230 3294
3231 if (dev_priv->hws_obj != NULL) { 3295 i915_gem_cleanup_hws(dev);
3232 struct drm_gem_object *obj = dev_priv->hws_obj;
3233 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3234
3235 kunmap(obj_priv->page_list[0]);
3236 i915_gem_object_unpin(obj);
3237 drm_gem_object_unreference(obj);
3238 dev_priv->hws_obj = NULL;
3239 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3240 dev_priv->hw_status_page = NULL;
3241
3242 /* Write high address into HWS_PGA when disabling. */
3243 I915_WRITE(HWS_PGA, 0x1ffff000);
3244 }
3245} 3296}
3246 3297
3247int 3298int
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index fa1685cba840..7fb4191ef934 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -299,9 +299,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
299 } 299 }
300 obj_priv->stride = args->stride; 300 obj_priv->stride = args->stride;
301 301
302 mutex_unlock(&dev->struct_mutex);
303
304 drm_gem_object_unreference(obj); 302 drm_gem_object_unreference(obj);
303 mutex_unlock(&dev->struct_mutex);
305 304
306 return 0; 305 return 0;
307} 306}
@@ -340,9 +339,8 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
340 DRM_ERROR("unknown tiling mode\n"); 339 DRM_ERROR("unknown tiling mode\n");
341 } 340 }
342 341
343 mutex_unlock(&dev->struct_mutex);
344
345 drm_gem_object_unreference(obj); 342 drm_gem_object_unreference(obj);
343 mutex_unlock(&dev->struct_mutex);
346 344
347 return 0; 345 return 0;
348} 346}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 4ca82a025525..65be30dccc77 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -135,6 +135,14 @@ parse_general_features(struct drm_i915_private *dev_priv,
135 if (general) { 135 if (general) {
136 dev_priv->int_tv_support = general->int_tv_support; 136 dev_priv->int_tv_support = general->int_tv_support;
137 dev_priv->int_crt_support = general->int_crt_support; 137 dev_priv->int_crt_support = general->int_crt_support;
138 dev_priv->lvds_use_ssc = general->enable_ssc;
139
140 if (dev_priv->lvds_use_ssc) {
141 if (IS_I855(dev_priv->dev))
142 dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48;
143 else
144 dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96;
145 }
138 } 146 }
139} 147}
140 148
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index bbdd72909a11..65b635ce28c8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -90,12 +90,12 @@ typedef struct {
90#define I9XX_DOT_MAX 400000 90#define I9XX_DOT_MAX 400000
91#define I9XX_VCO_MIN 1400000 91#define I9XX_VCO_MIN 1400000
92#define I9XX_VCO_MAX 2800000 92#define I9XX_VCO_MAX 2800000
93#define I9XX_N_MIN 3 93#define I9XX_N_MIN 1
94#define I9XX_N_MAX 8 94#define I9XX_N_MAX 6
95#define I9XX_M_MIN 70 95#define I9XX_M_MIN 70
96#define I9XX_M_MAX 120 96#define I9XX_M_MAX 120
97#define I9XX_M1_MIN 10 97#define I9XX_M1_MIN 10
98#define I9XX_M1_MAX 20 98#define I9XX_M1_MAX 22
99#define I9XX_M2_MIN 5 99#define I9XX_M2_MIN 5
100#define I9XX_M2_MAX 9 100#define I9XX_M2_MAX 9
101#define I9XX_P_SDVO_DAC_MIN 5 101#define I9XX_P_SDVO_DAC_MIN 5
@@ -189,9 +189,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
189 return limit; 189 return limit;
190} 190}
191 191
192/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ 192static void intel_clock(int refclk, intel_clock_t *clock)
193
194static void i8xx_clock(int refclk, intel_clock_t *clock)
195{ 193{
196 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 194 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
197 clock->p = clock->p1 * clock->p2; 195 clock->p = clock->p1 * clock->p2;
@@ -199,25 +197,6 @@ static void i8xx_clock(int refclk, intel_clock_t *clock)
199 clock->dot = clock->vco / clock->p; 197 clock->dot = clock->vco / clock->p;
200} 198}
201 199
202/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
203
204static void i9xx_clock(int refclk, intel_clock_t *clock)
205{
206 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
207 clock->p = clock->p1 * clock->p2;
208 clock->vco = refclk * clock->m / (clock->n + 2);
209 clock->dot = clock->vco / clock->p;
210}
211
212static void intel_clock(struct drm_device *dev, int refclk,
213 intel_clock_t *clock)
214{
215 if (IS_I9XX(dev))
216 i9xx_clock (refclk, clock);
217 else
218 i8xx_clock (refclk, clock);
219}
220
221/** 200/**
222 * Returns whether any output on the specified pipe is of the specified type 201 * Returns whether any output on the specified pipe is of the specified type
223 */ 202 */
@@ -238,7 +217,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
238 return false; 217 return false;
239} 218}
240 219
241#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; } 220#define INTELPllInvalid(s) do { DRM_DEBUG(s); return false; } while (0)
242/** 221/**
243 * Returns whether the given set of divisors are valid for a given refclk with 222 * Returns whether the given set of divisors are valid for a given refclk with
244 * the given connectors. 223 * the given connectors.
@@ -318,7 +297,7 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
318 clock.p1 <= limit->p1.max; clock.p1++) { 297 clock.p1 <= limit->p1.max; clock.p1++) {
319 int this_err; 298 int this_err;
320 299
321 intel_clock(dev, refclk, &clock); 300 intel_clock(refclk, &clock);
322 301
323 if (!intel_PLL_is_valid(crtc, &clock)) 302 if (!intel_PLL_is_valid(crtc, &clock))
324 continue; 303 continue;
@@ -343,7 +322,7 @@ intel_wait_for_vblank(struct drm_device *dev)
343 udelay(20000); 322 udelay(20000);
344} 323}
345 324
346static void 325static int
347intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 326intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
348 struct drm_framebuffer *old_fb) 327 struct drm_framebuffer *old_fb)
349{ 328{
@@ -361,11 +340,21 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
361 int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; 340 int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
362 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; 341 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
363 u32 dspcntr, alignment; 342 u32 dspcntr, alignment;
343 int ret;
364 344
365 /* no fb bound */ 345 /* no fb bound */
366 if (!crtc->fb) { 346 if (!crtc->fb) {
367 DRM_DEBUG("No FB bound\n"); 347 DRM_DEBUG("No FB bound\n");
368 return; 348 return 0;
349 }
350
351 switch (pipe) {
352 case 0:
353 case 1:
354 break;
355 default:
356 DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
357 return -EINVAL;
369 } 358 }
370 359
371 intel_fb = to_intel_framebuffer(crtc->fb); 360 intel_fb = to_intel_framebuffer(crtc->fb);
@@ -377,28 +366,30 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
377 alignment = 64 * 1024; 366 alignment = 64 * 1024;
378 break; 367 break;
379 case I915_TILING_X: 368 case I915_TILING_X:
380 if (IS_I9XX(dev)) 369 /* pin() will align the object as required by fence */
381 alignment = 1024 * 1024; 370 alignment = 0;
382 else
383 alignment = 512 * 1024;
384 break; 371 break;
385 case I915_TILING_Y: 372 case I915_TILING_Y:
386 /* FIXME: Is this true? */ 373 /* FIXME: Is this true? */
387 DRM_ERROR("Y tiled not allowed for scan out buffers\n"); 374 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
388 return; 375 return -EINVAL;
389 default: 376 default:
390 BUG(); 377 BUG();
391 } 378 }
392 379
393 if (i915_gem_object_pin(intel_fb->obj, alignment)) 380 mutex_lock(&dev->struct_mutex);
394 return; 381 ret = i915_gem_object_pin(intel_fb->obj, alignment);
395 382 if (ret != 0) {
396 i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1); 383 mutex_unlock(&dev->struct_mutex);
397 384 return ret;
398 Start = obj_priv->gtt_offset; 385 }
399 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
400 386
401 I915_WRITE(dspstride, crtc->fb->pitch); 387 ret = i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1);
388 if (ret != 0) {
389 i915_gem_object_unpin(intel_fb->obj);
390 mutex_unlock(&dev->struct_mutex);
391 return ret;
392 }
402 393
403 dspcntr = I915_READ(dspcntr_reg); 394 dspcntr = I915_READ(dspcntr_reg);
404 /* Mask out pixel format bits in case we change it */ 395 /* Mask out pixel format bits in case we change it */
@@ -419,11 +410,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
419 break; 410 break;
420 default: 411 default:
421 DRM_ERROR("Unknown color depth\n"); 412 DRM_ERROR("Unknown color depth\n");
422 return; 413 i915_gem_object_unpin(intel_fb->obj);
414 mutex_unlock(&dev->struct_mutex);
415 return -EINVAL;
423 } 416 }
424 I915_WRITE(dspcntr_reg, dspcntr); 417 I915_WRITE(dspcntr_reg, dspcntr);
425 418
419 Start = obj_priv->gtt_offset;
420 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
421
426 DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); 422 DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
423 I915_WRITE(dspstride, crtc->fb->pitch);
427 if (IS_I965G(dev)) { 424 if (IS_I965G(dev)) {
428 I915_WRITE(dspbase, Offset); 425 I915_WRITE(dspbase, Offset);
429 I915_READ(dspbase); 426 I915_READ(dspbase);
@@ -440,27 +437,24 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
440 intel_fb = to_intel_framebuffer(old_fb); 437 intel_fb = to_intel_framebuffer(old_fb);
441 i915_gem_object_unpin(intel_fb->obj); 438 i915_gem_object_unpin(intel_fb->obj);
442 } 439 }
440 mutex_unlock(&dev->struct_mutex);
443 441
444 if (!dev->primary->master) 442 if (!dev->primary->master)
445 return; 443 return 0;
446 444
447 master_priv = dev->primary->master->driver_priv; 445 master_priv = dev->primary->master->driver_priv;
448 if (!master_priv->sarea_priv) 446 if (!master_priv->sarea_priv)
449 return; 447 return 0;
450 448
451 switch (pipe) { 449 if (pipe) {
452 case 0:
453 master_priv->sarea_priv->pipeA_x = x;
454 master_priv->sarea_priv->pipeA_y = y;
455 break;
456 case 1:
457 master_priv->sarea_priv->pipeB_x = x; 450 master_priv->sarea_priv->pipeB_x = x;
458 master_priv->sarea_priv->pipeB_y = y; 451 master_priv->sarea_priv->pipeB_y = y;
459 break; 452 } else {
460 default: 453 master_priv->sarea_priv->pipeA_x = x;
461 DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); 454 master_priv->sarea_priv->pipeA_y = y;
462 break;
463 } 455 }
456
457 return 0;
464} 458}
465 459
466 460
@@ -708,11 +702,11 @@ static int intel_panel_fitter_pipe (struct drm_device *dev)
708 return 1; 702 return 1;
709} 703}
710 704
711static void intel_crtc_mode_set(struct drm_crtc *crtc, 705static int intel_crtc_mode_set(struct drm_crtc *crtc,
712 struct drm_display_mode *mode, 706 struct drm_display_mode *mode,
713 struct drm_display_mode *adjusted_mode, 707 struct drm_display_mode *adjusted_mode,
714 int x, int y, 708 int x, int y,
715 struct drm_framebuffer *old_fb) 709 struct drm_framebuffer *old_fb)
716{ 710{
717 struct drm_device *dev = crtc->dev; 711 struct drm_device *dev = crtc->dev;
718 struct drm_i915_private *dev_priv = dev->dev_private; 712 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -732,13 +726,14 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc,
732 int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; 726 int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
733 int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; 727 int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
734 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; 728 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
735 int refclk; 729 int refclk, num_outputs = 0;
736 intel_clock_t clock; 730 intel_clock_t clock;
737 u32 dpll = 0, fp = 0, dspcntr, pipeconf; 731 u32 dpll = 0, fp = 0, dspcntr, pipeconf;
738 bool ok, is_sdvo = false, is_dvo = false; 732 bool ok, is_sdvo = false, is_dvo = false;
739 bool is_crt = false, is_lvds = false, is_tv = false; 733 bool is_crt = false, is_lvds = false, is_tv = false;
740 struct drm_mode_config *mode_config = &dev->mode_config; 734 struct drm_mode_config *mode_config = &dev->mode_config;
741 struct drm_connector *connector; 735 struct drm_connector *connector;
736 int ret;
742 737
743 drm_vblank_pre_modeset(dev, pipe); 738 drm_vblank_pre_modeset(dev, pipe);
744 739
@@ -768,9 +763,14 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc,
768 is_crt = true; 763 is_crt = true;
769 break; 764 break;
770 } 765 }
766
767 num_outputs++;
771 } 768 }
772 769
773 if (IS_I9XX(dev)) { 770 if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) {
771 refclk = dev_priv->lvds_ssc_freq * 1000;
772 DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000);
773 } else if (IS_I9XX(dev)) {
774 refclk = 96000; 774 refclk = 96000;
775 } else { 775 } else {
776 refclk = 48000; 776 refclk = 48000;
@@ -779,7 +779,7 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc,
779 ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock); 779 ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock);
780 if (!ok) { 780 if (!ok) {
781 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 781 DRM_ERROR("Couldn't find PLL settings for mode!\n");
782 return; 782 return -EINVAL;
783 } 783 }
784 784
785 fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 785 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
@@ -829,11 +829,14 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc,
829 } 829 }
830 } 830 }
831 831
832 if (is_tv) { 832 if (is_sdvo && is_tv)
833 dpll |= PLL_REF_INPUT_TVCLKINBC;
834 else if (is_tv)
833 /* XXX: just matching BIOS for now */ 835 /* XXX: just matching BIOS for now */
834/* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 836 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
835 dpll |= 3; 837 dpll |= 3;
836 } 838 else if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2)
839 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
837 else 840 else
838 dpll |= PLL_REF_INPUT_DREFCLK; 841 dpll |= PLL_REF_INPUT_DREFCLK;
839 842
@@ -950,9 +953,13 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc,
950 I915_WRITE(dspcntr_reg, dspcntr); 953 I915_WRITE(dspcntr_reg, dspcntr);
951 954
952 /* Flush the plane changes */ 955 /* Flush the plane changes */
953 intel_pipe_set_base(crtc, x, y, old_fb); 956 ret = intel_pipe_set_base(crtc, x, y, old_fb);
957 if (ret != 0)
958 return ret;
954 959
955 drm_vblank_post_modeset(dev, pipe); 960 drm_vblank_post_modeset(dev, pipe);
961
962 return 0;
956} 963}
957 964
958/** Loads the palette/gamma unit for the CRTC with the prepared values */ 965/** Loads the palette/gamma unit for the CRTC with the prepared values */
@@ -1001,6 +1008,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1001 temp = CURSOR_MODE_DISABLE; 1008 temp = CURSOR_MODE_DISABLE;
1002 addr = 0; 1009 addr = 0;
1003 bo = NULL; 1010 bo = NULL;
1011 mutex_lock(&dev->struct_mutex);
1004 goto finish; 1012 goto finish;
1005 } 1013 }
1006 1014
@@ -1023,18 +1031,19 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1023 } 1031 }
1024 1032
1025 /* we only need to pin inside GTT if cursor is non-phy */ 1033 /* we only need to pin inside GTT if cursor is non-phy */
1034 mutex_lock(&dev->struct_mutex);
1026 if (!dev_priv->cursor_needs_physical) { 1035 if (!dev_priv->cursor_needs_physical) {
1027 ret = i915_gem_object_pin(bo, PAGE_SIZE); 1036 ret = i915_gem_object_pin(bo, PAGE_SIZE);
1028 if (ret) { 1037 if (ret) {
1029 DRM_ERROR("failed to pin cursor bo\n"); 1038 DRM_ERROR("failed to pin cursor bo\n");
1030 goto fail; 1039 goto fail_locked;
1031 } 1040 }
1032 addr = obj_priv->gtt_offset; 1041 addr = obj_priv->gtt_offset;
1033 } else { 1042 } else {
1034 ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); 1043 ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
1035 if (ret) { 1044 if (ret) {
1036 DRM_ERROR("failed to attach phys object\n"); 1045 DRM_ERROR("failed to attach phys object\n");
1037 goto fail; 1046 goto fail_locked;
1038 } 1047 }
1039 addr = obj_priv->phys_obj->handle->busaddr; 1048 addr = obj_priv->phys_obj->handle->busaddr;
1040 } 1049 }
@@ -1054,10 +1063,9 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1054 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 1063 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
1055 } else 1064 } else
1056 i915_gem_object_unpin(intel_crtc->cursor_bo); 1065 i915_gem_object_unpin(intel_crtc->cursor_bo);
1057 mutex_lock(&dev->struct_mutex);
1058 drm_gem_object_unreference(intel_crtc->cursor_bo); 1066 drm_gem_object_unreference(intel_crtc->cursor_bo);
1059 mutex_unlock(&dev->struct_mutex);
1060 } 1067 }
1068 mutex_unlock(&dev->struct_mutex);
1061 1069
1062 intel_crtc->cursor_addr = addr; 1070 intel_crtc->cursor_addr = addr;
1063 intel_crtc->cursor_bo = bo; 1071 intel_crtc->cursor_bo = bo;
@@ -1065,6 +1073,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1065 return 0; 1073 return 0;
1066fail: 1074fail:
1067 mutex_lock(&dev->struct_mutex); 1075 mutex_lock(&dev->struct_mutex);
1076fail_locked:
1068 drm_gem_object_unreference(bo); 1077 drm_gem_object_unreference(bo);
1069 mutex_unlock(&dev->struct_mutex); 1078 mutex_unlock(&dev->struct_mutex);
1070 return ret; 1079 return ret;
@@ -1292,7 +1301,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1292 } 1301 }
1293 1302
1294 /* XXX: Handle the 100Mhz refclk */ 1303 /* XXX: Handle the 100Mhz refclk */
1295 i9xx_clock(96000, &clock); 1304 intel_clock(96000, &clock);
1296 } else { 1305 } else {
1297 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); 1306 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
1298 1307
@@ -1304,9 +1313,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1304 if ((dpll & PLL_REF_INPUT_MASK) == 1313 if ((dpll & PLL_REF_INPUT_MASK) ==
1305 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 1314 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
1306 /* XXX: might not be 66MHz */ 1315 /* XXX: might not be 66MHz */
1307 i8xx_clock(66000, &clock); 1316 intel_clock(66000, &clock);
1308 } else 1317 } else
1309 i8xx_clock(48000, &clock); 1318 intel_clock(48000, &clock);
1310 } else { 1319 } else {
1311 if (dpll & PLL_P1_DIVIDE_BY_TWO) 1320 if (dpll & PLL_P1_DIVIDE_BY_TWO)
1312 clock.p1 = 2; 1321 clock.p1 = 2;
@@ -1319,7 +1328,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1319 else 1328 else
1320 clock.p2 = 2; 1329 clock.p2 = 2;
1321 1330
1322 i8xx_clock(48000, &clock); 1331 intel_clock(48000, &clock);
1323 } 1332 }
1324 } 1333 }
1325 1334
@@ -1598,7 +1607,9 @@ intel_user_framebuffer_create(struct drm_device *dev,
1598 1607
1599 ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); 1608 ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
1600 if (ret) { 1609 if (ret) {
1610 mutex_lock(&dev->struct_mutex);
1601 drm_gem_object_unreference(obj); 1611 drm_gem_object_unreference(obj);
1612 mutex_unlock(&dev->struct_mutex);
1602 return NULL; 1613 return NULL;
1603 } 1614 }
1604 1615
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index afd1217b8a02..b7f0ebe9f810 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -473,7 +473,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
473 ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo); 473 ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo);
474 if (ret) { 474 if (ret) {
475 DRM_ERROR("failed to allocate fb.\n"); 475 DRM_ERROR("failed to allocate fb.\n");
476 goto out_unref; 476 goto out_unpin;
477 } 477 }
478 478
479 list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); 479 list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
@@ -484,7 +484,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
484 info = framebuffer_alloc(sizeof(struct intelfb_par), device); 484 info = framebuffer_alloc(sizeof(struct intelfb_par), device);
485 if (!info) { 485 if (!info) {
486 ret = -ENOMEM; 486 ret = -ENOMEM;
487 goto out_unref; 487 goto out_unpin;
488 } 488 }
489 489
490 par = info->par; 490 par = info->par;
@@ -513,7 +513,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
513 size); 513 size);
514 if (!info->screen_base) { 514 if (!info->screen_base) {
515 ret = -ENOSPC; 515 ret = -ENOSPC;
516 goto out_unref; 516 goto out_unpin;
517 } 517 }
518 info->screen_size = size; 518 info->screen_size = size;
519 519
@@ -608,6 +608,8 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
608 mutex_unlock(&dev->struct_mutex); 608 mutex_unlock(&dev->struct_mutex);
609 return 0; 609 return 0;
610 610
611out_unpin:
612 i915_gem_object_unpin(fbo);
611out_unref: 613out_unref:
612 drm_gem_object_unreference(fbo); 614 drm_gem_object_unreference(fbo);
613 mutex_unlock(&dev->struct_mutex); 615 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 6d4f91265354..0d211af98854 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -481,8 +481,6 @@ void intel_lvds_init(struct drm_device *dev)
481 if (dev_priv->panel_fixed_mode) { 481 if (dev_priv->panel_fixed_mode) {
482 dev_priv->panel_fixed_mode->type |= 482 dev_priv->panel_fixed_mode->type |=
483 DRM_MODE_TYPE_PREFERRED; 483 DRM_MODE_TYPE_PREFERRED;
484 drm_mode_probed_add(connector,
485 dev_priv->panel_fixed_mode);
486 goto out; 484 goto out;
487 } 485 }
488 } 486 }
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index a30508b639ba..fbe6f3931b1b 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -193,7 +193,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
193 193
194#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} 194#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
195/** Mapping of command numbers to names, for debug output */ 195/** Mapping of command numbers to names, for debug output */
196const static struct _sdvo_cmd_name { 196static const struct _sdvo_cmd_name {
197 u8 cmd; 197 u8 cmd;
198 char *name; 198 char *name;
199} sdvo_cmd_names[] = { 199} sdvo_cmd_names[] = {
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index fbb35dc56f5c..56485d67369b 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -411,7 +411,7 @@ struct tv_mode {
411 * These values account for -1s required. 411 * These values account for -1s required.
412 */ 412 */
413 413
414const static struct tv_mode tv_modes[] = { 414static const struct tv_mode tv_modes[] = {
415 { 415 {
416 .name = "NTSC-M", 416 .name = "NTSC-M",
417 .clock = 107520, 417 .clock = 107520,