aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c8
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h15
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c191
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debugfs.c93
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c112
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c2
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c15
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c11
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c23
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c22
13 files changed, 443 insertions, 56 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index a000cf02882..051134c56ae 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -713,18 +713,18 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
713 mutex_unlock(&dev->struct_mutex); 713 mutex_unlock(&dev->struct_mutex);
714 if (ret) { 714 if (ret) {
715 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 715 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
716 goto fail_batch_free; 716 goto fail_clip_free;
717 } 717 }
718 718
719 if (sarea_priv) 719 if (sarea_priv)
720 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 720 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
721 721
722fail_batch_free:
723 drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER);
724fail_clip_free: 722fail_clip_free:
725 drm_free(cliprects, 723 drm_free(cliprects,
726 cmdbuf->num_cliprects * sizeof(struct drm_clip_rect), 724 cmdbuf->num_cliprects * sizeof(struct drm_clip_rect),
727 DRM_MEM_DRIVER); 725 DRM_MEM_DRIVER);
726fail_batch_free:
727 drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER);
728 728
729 return ret; 729 return ret;
730} 730}
@@ -1232,7 +1232,7 @@ int i915_driver_unload(struct drm_device *dev)
1232 if (dev_priv->regs != NULL) 1232 if (dev_priv->regs != NULL)
1233 iounmap(dev_priv->regs); 1233 iounmap(dev_priv->regs);
1234 1234
1235 intel_opregion_free(dev); 1235 intel_opregion_free(dev, 0);
1236 1236
1237 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1237 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1238 intel_modeset_cleanup(dev); 1238 intel_modeset_cleanup(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6503e2210f6..98560e1e899 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -77,7 +77,7 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
77 drm_irq_uninstall(dev); 77 drm_irq_uninstall(dev);
78 } 78 }
79 79
80 intel_opregion_free(dev); 80 intel_opregion_free(dev, 1);
81 81
82 if (state.event == PM_EVENT_SUSPEND) { 82 if (state.event == PM_EVENT_SUSPEND) {
83 /* Shut down the device */ 83 /* Shut down the device */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3750d800304..25065923b8a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -446,6 +446,9 @@ struct drm_i915_gem_object {
446 uint32_t tiling_mode; 446 uint32_t tiling_mode;
447 uint32_t stride; 447 uint32_t stride;
448 448
449 /** Record of address bit 17 of each page at last unbind. */
450 long *bit_17;
451
449 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ 452 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
450 uint32_t agp_type; 453 uint32_t agp_type;
451 454
@@ -635,9 +638,13 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
635void i915_gem_detach_phys_object(struct drm_device *dev, 638void i915_gem_detach_phys_object(struct drm_device *dev,
636 struct drm_gem_object *obj); 639 struct drm_gem_object *obj);
637void i915_gem_free_all_phys_object(struct drm_device *dev); 640void i915_gem_free_all_phys_object(struct drm_device *dev);
641int i915_gem_object_get_pages(struct drm_gem_object *obj);
642void i915_gem_object_put_pages(struct drm_gem_object *obj);
638 643
639/* i915_gem_tiling.c */ 644/* i915_gem_tiling.c */
640void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 645void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
646void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
647void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
641 648
642/* i915_gem_debug.c */ 649/* i915_gem_debug.c */
643void i915_gem_dump_object(struct drm_gem_object *obj, int len, 650void i915_gem_dump_object(struct drm_gem_object *obj, int len,
@@ -667,12 +674,12 @@ extern int i915_restore_state(struct drm_device *dev);
667#ifdef CONFIG_ACPI 674#ifdef CONFIG_ACPI
668/* i915_opregion.c */ 675/* i915_opregion.c */
669extern int intel_opregion_init(struct drm_device *dev, int resume); 676extern int intel_opregion_init(struct drm_device *dev, int resume);
670extern void intel_opregion_free(struct drm_device *dev); 677extern void intel_opregion_free(struct drm_device *dev, int suspend);
671extern void opregion_asle_intr(struct drm_device *dev); 678extern void opregion_asle_intr(struct drm_device *dev);
672extern void opregion_enable_asle(struct drm_device *dev); 679extern void opregion_enable_asle(struct drm_device *dev);
673#else 680#else
674static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; } 681static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; }
675static inline void intel_opregion_free(struct drm_device *dev) { return; } 682static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; }
676static inline void opregion_asle_intr(struct drm_device *dev) { return; } 683static inline void opregion_asle_intr(struct drm_device *dev) { return; }
677static inline void opregion_enable_asle(struct drm_device *dev) { return; } 684static inline void opregion_enable_asle(struct drm_device *dev) { return; }
678#endif 685#endif
@@ -780,7 +787,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
780 (dev)->pci_device == 0x2A42 || \ 787 (dev)->pci_device == 0x2A42 || \
781 (dev)->pci_device == 0x2E02 || \ 788 (dev)->pci_device == 0x2E02 || \
782 (dev)->pci_device == 0x2E12 || \ 789 (dev)->pci_device == 0x2E12 || \
783 (dev)->pci_device == 0x2E22) 790 (dev)->pci_device == 0x2E22 || \
791 (dev)->pci_device == 0x2E32)
784 792
785#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02) 793#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
786 794
@@ -789,6 +797,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
789#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \ 797#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
790 (dev)->pci_device == 0x2E12 || \ 798 (dev)->pci_device == 0x2E12 || \
791 (dev)->pci_device == 0x2E22 || \ 799 (dev)->pci_device == 0x2E22 || \
800 (dev)->pci_device == 0x2E32 || \
792 IS_GM45(dev)) 801 IS_GM45(dev))
793 802
794#define IS_IGDG(dev) ((dev)->pci_device == 0xa001) 803#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1449b452cc6..ee896d91c5b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,8 +43,6 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
43 uint64_t offset, 43 uint64_t offset,
44 uint64_t size); 44 uint64_t size);
45static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); 45static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
46static int i915_gem_object_get_pages(struct drm_gem_object *obj);
47static void i915_gem_object_put_pages(struct drm_gem_object *obj);
48static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 46static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 47static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
50 unsigned alignment); 48 unsigned alignment);
@@ -143,15 +141,27 @@ fast_shmem_read(struct page **pages,
143 int length) 141 int length)
144{ 142{
145 char __iomem *vaddr; 143 char __iomem *vaddr;
146 int ret; 144 int unwritten;
147 145
148 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); 146 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
149 if (vaddr == NULL) 147 if (vaddr == NULL)
150 return -ENOMEM; 148 return -ENOMEM;
151 ret = __copy_to_user_inatomic(data, vaddr + page_offset, length); 149 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
152 kunmap_atomic(vaddr, KM_USER0); 150 kunmap_atomic(vaddr, KM_USER0);
153 151
154 return ret; 152 if (unwritten)
153 return -EFAULT;
154
155 return 0;
156}
157
158static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
159{
160 drm_i915_private_t *dev_priv = obj->dev->dev_private;
161 struct drm_i915_gem_object *obj_priv = obj->driver_private;
162
163 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
164 obj_priv->tiling_mode != I915_TILING_NONE;
155} 165}
156 166
157static inline int 167static inline int
@@ -181,6 +191,64 @@ slow_shmem_copy(struct page *dst_page,
181 return 0; 191 return 0;
182} 192}
183 193
194static inline int
195slow_shmem_bit17_copy(struct page *gpu_page,
196 int gpu_offset,
197 struct page *cpu_page,
198 int cpu_offset,
199 int length,
200 int is_read)
201{
202 char *gpu_vaddr, *cpu_vaddr;
203
204 /* Use the unswizzled path if this page isn't affected. */
205 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
206 if (is_read)
207 return slow_shmem_copy(cpu_page, cpu_offset,
208 gpu_page, gpu_offset, length);
209 else
210 return slow_shmem_copy(gpu_page, gpu_offset,
211 cpu_page, cpu_offset, length);
212 }
213
214 gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
215 if (gpu_vaddr == NULL)
216 return -ENOMEM;
217
218 cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
219 if (cpu_vaddr == NULL) {
220 kunmap_atomic(gpu_vaddr, KM_USER0);
221 return -ENOMEM;
222 }
223
224 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
225 * XORing with the other bits (A9 for Y, A9 and A10 for X)
226 */
227 while (length > 0) {
228 int cacheline_end = ALIGN(gpu_offset + 1, 64);
229 int this_length = min(cacheline_end - gpu_offset, length);
230 int swizzled_gpu_offset = gpu_offset ^ 64;
231
232 if (is_read) {
233 memcpy(cpu_vaddr + cpu_offset,
234 gpu_vaddr + swizzled_gpu_offset,
235 this_length);
236 } else {
237 memcpy(gpu_vaddr + swizzled_gpu_offset,
238 cpu_vaddr + cpu_offset,
239 this_length);
240 }
241 cpu_offset += this_length;
242 gpu_offset += this_length;
243 length -= this_length;
244 }
245
246 kunmap_atomic(cpu_vaddr, KM_USER1);
247 kunmap_atomic(gpu_vaddr, KM_USER0);
248
249 return 0;
250}
251
184/** 252/**
185 * This is the fast shmem pread path, which attempts to copy_from_user directly 253 * This is the fast shmem pread path, which attempts to copy_from_user directly
186 * from the backing pages of the object to the user's address space. On a 254 * from the backing pages of the object to the user's address space. On a
@@ -269,6 +337,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
269 int page_length; 337 int page_length;
270 int ret; 338 int ret;
271 uint64_t data_ptr = args->data_ptr; 339 uint64_t data_ptr = args->data_ptr;
340 int do_bit17_swizzling;
272 341
273 remain = args->size; 342 remain = args->size;
274 343
@@ -286,13 +355,15 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
286 355
287 down_read(&mm->mmap_sem); 356 down_read(&mm->mmap_sem);
288 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, 357 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
289 num_pages, 0, 0, user_pages, NULL); 358 num_pages, 1, 0, user_pages, NULL);
290 up_read(&mm->mmap_sem); 359 up_read(&mm->mmap_sem);
291 if (pinned_pages < num_pages) { 360 if (pinned_pages < num_pages) {
292 ret = -EFAULT; 361 ret = -EFAULT;
293 goto fail_put_user_pages; 362 goto fail_put_user_pages;
294 } 363 }
295 364
365 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
366
296 mutex_lock(&dev->struct_mutex); 367 mutex_lock(&dev->struct_mutex);
297 368
298 ret = i915_gem_object_get_pages(obj); 369 ret = i915_gem_object_get_pages(obj);
@@ -327,11 +398,20 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
327 if ((data_page_offset + page_length) > PAGE_SIZE) 398 if ((data_page_offset + page_length) > PAGE_SIZE)
328 page_length = PAGE_SIZE - data_page_offset; 399 page_length = PAGE_SIZE - data_page_offset;
329 400
330 ret = slow_shmem_copy(user_pages[data_page_index], 401 if (do_bit17_swizzling) {
331 data_page_offset, 402 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
332 obj_priv->pages[shmem_page_index], 403 shmem_page_offset,
333 shmem_page_offset, 404 user_pages[data_page_index],
334 page_length); 405 data_page_offset,
406 page_length,
407 1);
408 } else {
409 ret = slow_shmem_copy(user_pages[data_page_index],
410 data_page_offset,
411 obj_priv->pages[shmem_page_index],
412 shmem_page_offset,
413 page_length);
414 }
335 if (ret) 415 if (ret)
336 goto fail_put_pages; 416 goto fail_put_pages;
337 417
@@ -383,9 +463,14 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
383 return -EINVAL; 463 return -EINVAL;
384 } 464 }
385 465
386 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); 466 if (i915_gem_object_needs_bit17_swizzle(obj)) {
387 if (ret != 0)
388 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); 467 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
468 } else {
469 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
470 if (ret != 0)
471 ret = i915_gem_shmem_pread_slow(dev, obj, args,
472 file_priv);
473 }
389 474
390 drm_gem_object_unreference(obj); 475 drm_gem_object_unreference(obj);
391 476
@@ -727,6 +812,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
727 int page_length; 812 int page_length;
728 int ret; 813 int ret;
729 uint64_t data_ptr = args->data_ptr; 814 uint64_t data_ptr = args->data_ptr;
815 int do_bit17_swizzling;
730 816
731 remain = args->size; 817 remain = args->size;
732 818
@@ -751,6 +837,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
751 goto fail_put_user_pages; 837 goto fail_put_user_pages;
752 } 838 }
753 839
840 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
841
754 mutex_lock(&dev->struct_mutex); 842 mutex_lock(&dev->struct_mutex);
755 843
756 ret = i915_gem_object_get_pages(obj); 844 ret = i915_gem_object_get_pages(obj);
@@ -785,11 +873,20 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
785 if ((data_page_offset + page_length) > PAGE_SIZE) 873 if ((data_page_offset + page_length) > PAGE_SIZE)
786 page_length = PAGE_SIZE - data_page_offset; 874 page_length = PAGE_SIZE - data_page_offset;
787 875
788 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], 876 if (do_bit17_swizzling) {
789 shmem_page_offset, 877 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
790 user_pages[data_page_index], 878 shmem_page_offset,
791 data_page_offset, 879 user_pages[data_page_index],
792 page_length); 880 data_page_offset,
881 page_length,
882 0);
883 } else {
884 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
885 shmem_page_offset,
886 user_pages[data_page_index],
887 data_page_offset,
888 page_length);
889 }
793 if (ret) 890 if (ret)
794 goto fail_put_pages; 891 goto fail_put_pages;
795 892
@@ -854,6 +951,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
854 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, 951 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
855 file_priv); 952 file_priv);
856 } 953 }
954 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
955 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
857 } else { 956 } else {
858 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); 957 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
859 if (ret == -EFAULT) { 958 if (ret == -EFAULT) {
@@ -1285,7 +1384,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1285 return 0; 1384 return 0;
1286} 1385}
1287 1386
1288static void 1387void
1289i915_gem_object_put_pages(struct drm_gem_object *obj) 1388i915_gem_object_put_pages(struct drm_gem_object *obj)
1290{ 1389{
1291 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1390 struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1297,6 +1396,9 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
1297 if (--obj_priv->pages_refcount != 0) 1396 if (--obj_priv->pages_refcount != 0)
1298 return; 1397 return;
1299 1398
1399 if (obj_priv->tiling_mode != I915_TILING_NONE)
1400 i915_gem_object_save_bit_17_swizzle(obj);
1401
1300 for (i = 0; i < page_count; i++) 1402 for (i = 0; i < page_count; i++)
1301 if (obj_priv->pages[i] != NULL) { 1403 if (obj_priv->pages[i] != NULL) {
1302 if (obj_priv->dirty) 1404 if (obj_priv->dirty)
@@ -1494,8 +1596,19 @@ i915_gem_retire_request(struct drm_device *dev,
1494 1596
1495 if (obj->write_domain != 0) 1597 if (obj->write_domain != 0)
1496 i915_gem_object_move_to_flushing(obj); 1598 i915_gem_object_move_to_flushing(obj);
1497 else 1599 else {
1600 /* Take a reference on the object so it won't be
1601 * freed while the spinlock is held. The list
1602 * protection for this spinlock is safe when breaking
1603 * the lock like this since the next thing we do
1604 * is just get the head of the list again.
1605 */
1606 drm_gem_object_reference(obj);
1498 i915_gem_object_move_to_inactive(obj); 1607 i915_gem_object_move_to_inactive(obj);
1608 spin_unlock(&dev_priv->mm.active_list_lock);
1609 drm_gem_object_unreference(obj);
1610 spin_lock(&dev_priv->mm.active_list_lock);
1611 }
1499 } 1612 }
1500out: 1613out:
1501 spin_unlock(&dev_priv->mm.active_list_lock); 1614 spin_unlock(&dev_priv->mm.active_list_lock);
@@ -1884,7 +1997,7 @@ i915_gem_evict_everything(struct drm_device *dev)
1884 return ret; 1997 return ret;
1885} 1998}
1886 1999
1887static int 2000int
1888i915_gem_object_get_pages(struct drm_gem_object *obj) 2001i915_gem_object_get_pages(struct drm_gem_object *obj)
1889{ 2002{
1890 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2003 struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1922,6 +2035,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
1922 } 2035 }
1923 obj_priv->pages[i] = page; 2036 obj_priv->pages[i] = page;
1924 } 2037 }
2038
2039 if (obj_priv->tiling_mode != I915_TILING_NONE)
2040 i915_gem_object_do_bit_17_swizzle(obj);
2041
1925 return 0; 2042 return 0;
1926} 2043}
1927 2044
@@ -3002,13 +3119,13 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3002 drm_free(*relocs, reloc_count * sizeof(**relocs), 3119 drm_free(*relocs, reloc_count * sizeof(**relocs),
3003 DRM_MEM_DRIVER); 3120 DRM_MEM_DRIVER);
3004 *relocs = NULL; 3121 *relocs = NULL;
3005 return ret; 3122 return -EFAULT;
3006 } 3123 }
3007 3124
3008 reloc_index += exec_list[i].relocation_count; 3125 reloc_index += exec_list[i].relocation_count;
3009 } 3126 }
3010 3127
3011 return ret; 3128 return 0;
3012} 3129}
3013 3130
3014static int 3131static int
@@ -3017,23 +3134,28 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
3017 struct drm_i915_gem_relocation_entry *relocs) 3134 struct drm_i915_gem_relocation_entry *relocs)
3018{ 3135{
3019 uint32_t reloc_count = 0, i; 3136 uint32_t reloc_count = 0, i;
3020 int ret; 3137 int ret = 0;
3021 3138
3022 for (i = 0; i < buffer_count; i++) { 3139 for (i = 0; i < buffer_count; i++) {
3023 struct drm_i915_gem_relocation_entry __user *user_relocs; 3140 struct drm_i915_gem_relocation_entry __user *user_relocs;
3141 int unwritten;
3024 3142
3025 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; 3143 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3026 3144
3027 if (ret == 0) { 3145 unwritten = copy_to_user(user_relocs,
3028 ret = copy_to_user(user_relocs, 3146 &relocs[reloc_count],
3029 &relocs[reloc_count], 3147 exec_list[i].relocation_count *
3030 exec_list[i].relocation_count * 3148 sizeof(*relocs));
3031 sizeof(*relocs)); 3149
3150 if (unwritten) {
3151 ret = -EFAULT;
3152 goto err;
3032 } 3153 }
3033 3154
3034 reloc_count += exec_list[i].relocation_count; 3155 reloc_count += exec_list[i].relocation_count;
3035 } 3156 }
3036 3157
3158err:
3037 drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER); 3159 drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER);
3038 3160
3039 return ret; 3161 return ret;
@@ -3243,7 +3365,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3243 exec_offset = exec_list[args->buffer_count - 1].offset; 3365 exec_offset = exec_list[args->buffer_count - 1].offset;
3244 3366
3245#if WATCH_EXEC 3367#if WATCH_EXEC
3246 i915_gem_dump_object(object_list[args->buffer_count - 1], 3368 i915_gem_dump_object(batch_obj,
3247 args->batch_len, 3369 args->batch_len,
3248 __func__, 3370 __func__,
3249 ~0); 3371 ~0);
@@ -3308,10 +3430,12 @@ err:
3308 (uintptr_t) args->buffers_ptr, 3430 (uintptr_t) args->buffers_ptr,
3309 exec_list, 3431 exec_list,
3310 sizeof(*exec_list) * args->buffer_count); 3432 sizeof(*exec_list) * args->buffer_count);
3311 if (ret) 3433 if (ret) {
3434 ret = -EFAULT;
3312 DRM_ERROR("failed to copy %d exec entries " 3435 DRM_ERROR("failed to copy %d exec entries "
3313 "back to user (%d)\n", 3436 "back to user (%d)\n",
3314 args->buffer_count, ret); 3437 args->buffer_count, ret);
3438 }
3315 } 3439 }
3316 3440
3317 /* Copy the updated relocations out regardless of current error 3441 /* Copy the updated relocations out regardless of current error
@@ -3593,6 +3717,7 @@ void i915_gem_free_object(struct drm_gem_object *obj)
3593 i915_gem_free_mmap_offset(obj); 3717 i915_gem_free_mmap_offset(obj);
3594 3718
3595 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); 3719 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
3720 kfree(obj_priv->bit_17);
3596 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 3721 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
3597} 3722}
3598 3723
@@ -3962,8 +4087,10 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3962 dev_priv->mm.suspended = 0; 4087 dev_priv->mm.suspended = 0;
3963 4088
3964 ret = i915_gem_init_ringbuffer(dev); 4089 ret = i915_gem_init_ringbuffer(dev);
3965 if (ret != 0) 4090 if (ret != 0) {
4091 mutex_unlock(&dev->struct_mutex);
3966 return ret; 4092 return ret;
4093 }
3967 4094
3968 spin_lock(&dev_priv->mm.active_list_lock); 4095 spin_lock(&dev_priv->mm.active_list_lock);
3969 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 4096 BUG_ON(!list_empty(&dev_priv->mm.active_list));
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
index a1ac0c5e730..986f1082c59 100644
--- a/drivers/gpu/drm/i915/i915_gem_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c
@@ -234,6 +234,96 @@ static int i915_hws_info(struct seq_file *m, void *data)
234 return 0; 234 return 0;
235} 235}
236 236
237static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
238{
239 int page, i;
240 uint32_t *mem;
241
242 for (page = 0; page < page_count; page++) {
243 mem = kmap(pages[page]);
244 for (i = 0; i < PAGE_SIZE; i += 4)
245 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
246 kunmap(pages[page]);
247 }
248}
249
250static int i915_batchbuffer_info(struct seq_file *m, void *data)
251{
252 struct drm_info_node *node = (struct drm_info_node *) m->private;
253 struct drm_device *dev = node->minor->dev;
254 drm_i915_private_t *dev_priv = dev->dev_private;
255 struct drm_gem_object *obj;
256 struct drm_i915_gem_object *obj_priv;
257 int ret;
258
259 spin_lock(&dev_priv->mm.active_list_lock);
260
261 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
262 obj = obj_priv->obj;
263 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
264 ret = i915_gem_object_get_pages(obj);
265 if (ret) {
266 DRM_ERROR("Failed to get pages: %d\n", ret);
267 spin_unlock(&dev_priv->mm.active_list_lock);
268 return ret;
269 }
270
271 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
272 i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
273
274 i915_gem_object_put_pages(obj);
275 }
276 }
277
278 spin_unlock(&dev_priv->mm.active_list_lock);
279
280 return 0;
281}
282
283static int i915_ringbuffer_data(struct seq_file *m, void *data)
284{
285 struct drm_info_node *node = (struct drm_info_node *) m->private;
286 struct drm_device *dev = node->minor->dev;
287 drm_i915_private_t *dev_priv = dev->dev_private;
288 u8 *virt;
289 uint32_t *ptr, off;
290
291 if (!dev_priv->ring.ring_obj) {
292 seq_printf(m, "No ringbuffer setup\n");
293 return 0;
294 }
295
296 virt = dev_priv->ring.virtual_start;
297
298 for (off = 0; off < dev_priv->ring.Size; off += 4) {
299 ptr = (uint32_t *)(virt + off);
300 seq_printf(m, "%08x : %08x\n", off, *ptr);
301 }
302
303 return 0;
304}
305
306static int i915_ringbuffer_info(struct seq_file *m, void *data)
307{
308 struct drm_info_node *node = (struct drm_info_node *) m->private;
309 struct drm_device *dev = node->minor->dev;
310 drm_i915_private_t *dev_priv = dev->dev_private;
311 unsigned int head, tail, mask;
312
313 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
314 tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
315 mask = dev_priv->ring.tail_mask;
316
317 seq_printf(m, "RingHead : %08x\n", head);
318 seq_printf(m, "RingTail : %08x\n", tail);
319 seq_printf(m, "RingMask : %08x\n", mask);
320 seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size);
321 seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
322
323 return 0;
324}
325
326
237static struct drm_info_list i915_gem_debugfs_list[] = { 327static struct drm_info_list i915_gem_debugfs_list[] = {
238 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 328 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
239 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 329 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
@@ -243,6 +333,9 @@ static struct drm_info_list i915_gem_debugfs_list[] = {
243 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 333 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
244 {"i915_gem_interrupt", i915_interrupt_info, 0}, 334 {"i915_gem_interrupt", i915_interrupt_info, 0},
245 {"i915_gem_hws", i915_hws_info, 0}, 335 {"i915_gem_hws", i915_hws_info, 0},
336 {"i915_ringbuffer_data", i915_ringbuffer_data, 0},
337 {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
338 {"i915_batchbuffers", i915_batchbuffer_info, 0},
246}; 339};
247#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list) 340#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list)
248 341
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 6be3f927c86..52a059354e8 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -25,6 +25,8 @@
25 * 25 *
26 */ 26 */
27 27
28#include "linux/string.h"
29#include "linux/bitops.h"
28#include "drmP.h" 30#include "drmP.h"
29#include "drm.h" 31#include "drm.h"
30#include "i915_drm.h" 32#include "i915_drm.h"
@@ -127,8 +129,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
127 swizzle_y = I915_BIT_6_SWIZZLE_9_11; 129 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
128 } else { 130 } else {
129 /* Bit 17 swizzling by the CPU in addition. */ 131 /* Bit 17 swizzling by the CPU in addition. */
130 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 132 swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
131 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 133 swizzle_y = I915_BIT_6_SWIZZLE_9_17;
132 } 134 }
133 break; 135 break;
134 } 136 }
@@ -281,13 +283,25 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
281 mutex_lock(&dev->struct_mutex); 283 mutex_lock(&dev->struct_mutex);
282 284
283 if (args->tiling_mode == I915_TILING_NONE) { 285 if (args->tiling_mode == I915_TILING_NONE) {
284 obj_priv->tiling_mode = I915_TILING_NONE;
285 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 286 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
286 } else { 287 } else {
287 if (args->tiling_mode == I915_TILING_X) 288 if (args->tiling_mode == I915_TILING_X)
288 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; 289 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
289 else 290 else
290 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; 291 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
292
293 /* Hide bit 17 swizzling from the user. This prevents old Mesa
294 * from aborting the application on sw fallbacks to bit 17,
295 * and we use the pread/pwrite bit17 paths to swizzle for it.
296 * If there was a user that was relying on the swizzle
297 * information for drm_intel_bo_map()ed reads/writes this would
298 * break it, but we don't have any of those.
299 */
300 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
301 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
302 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
303 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
304
291 /* If we can't handle the swizzling, make it untiled. */ 305 /* If we can't handle the swizzling, make it untiled. */
292 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { 306 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
293 args->tiling_mode = I915_TILING_NONE; 307 args->tiling_mode = I915_TILING_NONE;
@@ -354,8 +368,100 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
354 DRM_ERROR("unknown tiling mode\n"); 368 DRM_ERROR("unknown tiling mode\n");
355 } 369 }
356 370
371 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
372 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
373 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
374 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
375 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
376
357 drm_gem_object_unreference(obj); 377 drm_gem_object_unreference(obj);
358 mutex_unlock(&dev->struct_mutex); 378 mutex_unlock(&dev->struct_mutex);
359 379
360 return 0; 380 return 0;
361} 381}
382
383/**
384 * Swap every 64 bytes of this page around, to account for it having a new
385 * bit 17 of its physical address and therefore being interpreted differently
386 * by the GPU.
387 */
388static int
389i915_gem_swizzle_page(struct page *page)
390{
391 char *vaddr;
392 int i;
393 char temp[64];
394
395 vaddr = kmap(page);
396 if (vaddr == NULL)
397 return -ENOMEM;
398
399 for (i = 0; i < PAGE_SIZE; i += 128) {
400 memcpy(temp, &vaddr[i], 64);
401 memcpy(&vaddr[i], &vaddr[i + 64], 64);
402 memcpy(&vaddr[i + 64], temp, 64);
403 }
404
405 kunmap(page);
406
407 return 0;
408}
409
410void
411i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
412{
413 struct drm_device *dev = obj->dev;
414 drm_i915_private_t *dev_priv = dev->dev_private;
415 struct drm_i915_gem_object *obj_priv = obj->driver_private;
416 int page_count = obj->size >> PAGE_SHIFT;
417 int i;
418
419 if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
420 return;
421
422 if (obj_priv->bit_17 == NULL)
423 return;
424
425 for (i = 0; i < page_count; i++) {
426 char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
427 if ((new_bit_17 & 0x1) !=
428 (test_bit(i, obj_priv->bit_17) != 0)) {
429 int ret = i915_gem_swizzle_page(obj_priv->pages[i]);
430 if (ret != 0) {
431 DRM_ERROR("Failed to swizzle page\n");
432 return;
433 }
434 set_page_dirty(obj_priv->pages[i]);
435 }
436 }
437}
438
439void
440i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
441{
442 struct drm_device *dev = obj->dev;
443 drm_i915_private_t *dev_priv = dev->dev_private;
444 struct drm_i915_gem_object *obj_priv = obj->driver_private;
445 int page_count = obj->size >> PAGE_SHIFT;
446 int i;
447
448 if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
449 return;
450
451 if (obj_priv->bit_17 == NULL) {
452 obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
453 sizeof(long), GFP_KERNEL);
454 if (obj_priv->bit_17 == NULL) {
455 DRM_ERROR("Failed to allocate memory for bit 17 "
456 "record\n");
457 return;
458 }
459 }
460
461 for (i = 0; i < page_count; i++) {
462 if (page_to_phys(obj_priv->pages[i]) & (1 << 17))
463 __set_bit(i, obj_priv->bit_17);
464 else
465 __clear_bit(i, obj_priv->bit_17);
466 }
467}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index ee7ce7b78cf..98bb4c878c4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -406,7 +406,7 @@ int i915_irq_emit(struct drm_device *dev, void *data,
406 drm_i915_irq_emit_t *emit = data; 406 drm_i915_irq_emit_t *emit = data;
407 int result; 407 int result;
408 408
409 if (!dev_priv) { 409 if (!dev_priv || !dev_priv->ring.virtual_start) {
410 DRM_ERROR("called with no initialization\n"); 410 DRM_ERROR("called with no initialization\n");
411 return -EINVAL; 411 return -EINVAL;
412 } 412 }
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index 69427722d20..dc425e74a26 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -370,11 +370,8 @@ int intel_opregion_init(struct drm_device *dev, int resume)
370 if (mboxes & MBOX_ACPI) { 370 if (mboxes & MBOX_ACPI) {
371 DRM_DEBUG("Public ACPI methods supported\n"); 371 DRM_DEBUG("Public ACPI methods supported\n");
372 opregion->acpi = base + OPREGION_ACPI_OFFSET; 372 opregion->acpi = base + OPREGION_ACPI_OFFSET;
373 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 373 if (drm_core_check_feature(dev, DRIVER_MODESET))
374 intel_didl_outputs(dev); 374 intel_didl_outputs(dev);
375 if (!resume)
376 acpi_video_register();
377 }
378 } else { 375 } else {
379 DRM_DEBUG("Public ACPI methods not supported\n"); 376 DRM_DEBUG("Public ACPI methods not supported\n");
380 err = -ENOTSUPP; 377 err = -ENOTSUPP;
@@ -389,8 +386,13 @@ int intel_opregion_init(struct drm_device *dev, int resume)
389 if (mboxes & MBOX_ASLE) { 386 if (mboxes & MBOX_ASLE) {
390 DRM_DEBUG("ASLE supported\n"); 387 DRM_DEBUG("ASLE supported\n");
391 opregion->asle = base + OPREGION_ASLE_OFFSET; 388 opregion->asle = base + OPREGION_ASLE_OFFSET;
389 opregion_enable_asle(dev);
392 } 390 }
393 391
392 if (!resume)
393 acpi_video_register();
394
395
394 /* Notify BIOS we are ready to handle ACPI video ext notifs. 396 /* Notify BIOS we are ready to handle ACPI video ext notifs.
395 * Right now, all the events are handled by the ACPI video module. 397 * Right now, all the events are handled by the ACPI video module.
396 * We don't actually need to do anything with them. */ 398 * We don't actually need to do anything with them. */
@@ -408,7 +410,7 @@ err_out:
408 return err; 410 return err;
409} 411}
410 412
411void intel_opregion_free(struct drm_device *dev) 413void intel_opregion_free(struct drm_device *dev, int suspend)
412{ 414{
413 struct drm_i915_private *dev_priv = dev->dev_private; 415 struct drm_i915_private *dev_priv = dev->dev_private;
414 struct intel_opregion *opregion = &dev_priv->opregion; 416 struct intel_opregion *opregion = &dev_priv->opregion;
@@ -416,6 +418,9 @@ void intel_opregion_free(struct drm_device *dev)
416 if (!opregion->enabled) 418 if (!opregion->enabled)
417 return; 419 return;
418 420
421 if (!suspend)
422 acpi_video_exit();
423
419 opregion->acpi->drdy = 0; 424 opregion->acpi->drdy = 0;
420 425
421 system_opregion = NULL; 426 system_opregion = NULL;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e805b590ae7..52119473226 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1446,6 +1446,7 @@
1446#define DISPPLANE_NO_LINE_DOUBLE 0 1446#define DISPPLANE_NO_LINE_DOUBLE 0
1447#define DISPPLANE_STEREO_POLARITY_FIRST 0 1447#define DISPPLANE_STEREO_POLARITY_FIRST 0
1448#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) 1448#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
1449#define DISPPLANE_TILED (1<<10)
1449#define DSPAADDR 0x70184 1450#define DSPAADDR 0x70184
1450#define DSPASTRIDE 0x70188 1451#define DSPASTRIDE 0x70188
1451#define DSPAPOS 0x7018C /* reserved */ 1452#define DSPAPOS 0x7018C /* reserved */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 64773ce5296..bdcda36953b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -367,6 +367,7 @@ static const intel_limit_t intel_limits[] = {
367 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 367 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
368 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 368 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
369 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 369 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
370 .find_pll = intel_find_best_PLL,
370 }, 371 },
371 { /* INTEL_LIMIT_IGD_LVDS */ 372 { /* INTEL_LIMIT_IGD_LVDS */
372 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 373 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
@@ -380,6 +381,7 @@ static const intel_limit_t intel_limits[] = {
380 /* IGD only supports single-channel mode. */ 381 /* IGD only supports single-channel mode. */
381 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 382 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
382 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, 383 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
384 .find_pll = intel_find_best_PLL,
383 }, 385 },
384 386
385}; 387};
@@ -655,6 +657,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
655 int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR); 657 int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR);
656 int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF); 658 int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
657 int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; 659 int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
660 int dsptileoff = (pipe == 0 ? DSPATILEOFF : DSPBTILEOFF);
658 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; 661 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
659 u32 dspcntr, alignment; 662 u32 dspcntr, alignment;
660 int ret; 663 int ret;
@@ -731,6 +734,13 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
731 mutex_unlock(&dev->struct_mutex); 734 mutex_unlock(&dev->struct_mutex);
732 return -EINVAL; 735 return -EINVAL;
733 } 736 }
737 if (IS_I965G(dev)) {
738 if (obj_priv->tiling_mode != I915_TILING_NONE)
739 dspcntr |= DISPPLANE_TILED;
740 else
741 dspcntr &= ~DISPPLANE_TILED;
742 }
743
734 I915_WRITE(dspcntr_reg, dspcntr); 744 I915_WRITE(dspcntr_reg, dspcntr);
735 745
736 Start = obj_priv->gtt_offset; 746 Start = obj_priv->gtt_offset;
@@ -743,6 +753,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
743 I915_READ(dspbase); 753 I915_READ(dspbase);
744 I915_WRITE(dspsurf, Start); 754 I915_WRITE(dspsurf, Start);
745 I915_READ(dspsurf); 755 I915_READ(dspsurf);
756 I915_WRITE(dsptileoff, (y << 16) | x);
746 } else { 757 } else {
747 I915_WRITE(dspbase, Start + Offset); 758 I915_WRITE(dspbase, Start + Offset);
748 I915_READ(dspbase); 759 I915_READ(dspbase);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index b7f0ebe9f81..3e094beecb9 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -864,8 +864,8 @@ static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3)
864 864
865static struct sysrq_key_op sysrq_intelfb_restore_op = { 865static struct sysrq_key_op sysrq_intelfb_restore_op = {
866 .handler = intelfb_sysrq, 866 .handler = intelfb_sysrq,
867 .help_msg = "force fb", 867 .help_msg = "force-fb(G)",
868 .action_msg = "force restore of fb console", 868 .action_msg = "Restore framebuffer console",
869}; 869};
870 870
871int intelfb_probe(struct drm_device *dev) 871int intelfb_probe(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index b06a4a3ff08..55037422538 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -38,7 +38,7 @@
38struct intel_hdmi_priv { 38struct intel_hdmi_priv {
39 u32 sdvox_reg; 39 u32 sdvox_reg;
40 u32 save_SDVOX; 40 u32 save_SDVOX;
41 int has_hdmi_sink; 41 bool has_hdmi_sink;
42}; 42};
43 43
44static void intel_hdmi_mode_set(struct drm_encoder *encoder, 44static void intel_hdmi_mode_set(struct drm_encoder *encoder,
@@ -128,6 +128,22 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
128 return true; 128 return true;
129} 129}
130 130
131static void
132intel_hdmi_sink_detect(struct drm_connector *connector)
133{
134 struct intel_output *intel_output = to_intel_output(connector);
135 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
136 struct edid *edid = NULL;
137
138 edid = drm_get_edid(&intel_output->base,
139 &intel_output->ddc_bus->adapter);
140 if (edid != NULL) {
141 hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
142 kfree(edid);
143 intel_output->base.display_info.raw_edid = NULL;
144 }
145}
146
131static enum drm_connector_status 147static enum drm_connector_status
132intel_hdmi_detect(struct drm_connector *connector) 148intel_hdmi_detect(struct drm_connector *connector)
133{ 149{
@@ -158,9 +174,10 @@ intel_hdmi_detect(struct drm_connector *connector)
158 return connector_status_unknown; 174 return connector_status_unknown;
159 } 175 }
160 176
161 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) 177 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) {
178 intel_hdmi_sink_detect(connector);
162 return connector_status_connected; 179 return connector_status_connected;
163 else 180 } else
164 return connector_status_disconnected; 181 return connector_status_disconnected;
165} 182}
166 183
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 7b31f55f55c..9913651c1e1 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1357,6 +1357,23 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
1357 intel_sdvo_read_response(intel_output, &response, 2); 1357 intel_sdvo_read_response(intel_output, &response, 2);
1358} 1358}
1359 1359
1360static void
1361intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1362{
1363 struct intel_output *intel_output = to_intel_output(connector);
1364 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1365 struct edid *edid = NULL;
1366
1367 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
1368 edid = drm_get_edid(&intel_output->base,
1369 &intel_output->ddc_bus->adapter);
1370 if (edid != NULL) {
1371 sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
1372 kfree(edid);
1373 intel_output->base.display_info.raw_edid = NULL;
1374 }
1375}
1376
1360static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) 1377static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
1361{ 1378{
1362 u8 response[2]; 1379 u8 response[2];
@@ -1371,9 +1388,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1371 if (status != SDVO_CMD_STATUS_SUCCESS) 1388 if (status != SDVO_CMD_STATUS_SUCCESS)
1372 return connector_status_unknown; 1389 return connector_status_unknown;
1373 1390
1374 if ((response[0] != 0) || (response[1] != 0)) 1391 if ((response[0] != 0) || (response[1] != 0)) {
1392 intel_sdvo_hdmi_sink_detect(connector);
1375 return connector_status_connected; 1393 return connector_status_connected;
1376 else 1394 } else
1377 return connector_status_disconnected; 1395 return connector_status_disconnected;
1378} 1396}
1379 1397