aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-04-14 16:16:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-14 16:16:40 -0400
commitb897e6fbc49dd84b2634bca664344d503b907ce9 (patch)
treed3d55c07e0122fe42d88b01033454666b2325002
parent6f66cbc63081fd70e3191b4dbb796746780e5ae1 (diff)
parent68c84342171034120c8a1f6dfb8ef51b14250f11 (diff)
Merge branch 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel
* 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel: drm/i915: fix scheduling while holding the new active list spinlock drm/i915: Allow tiling of objects with bit 17 swizzling by the CPU. drm/i915: Correctly set the write flag for get_user_pages in pread. drm/i915: Fix use of uninitialized var in 40a5f0de drm/i915: indicate framebuffer restore key in SysRq help message drm/i915: sync hdmi detection by hdmi identifier with 2D drm/i915: Fix a mismerge of the IGD patch (new .find_pll hooks missed) drm/i915: Implement batch and ring buffer dumping
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c187
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debugfs.c93
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c111
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c23
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c22
-rw-r--r--include/drm/i915_drm.h3
9 files changed, 412 insertions, 40 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3750d8003048..473a8f7fbdb5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -446,6 +446,9 @@ struct drm_i915_gem_object {
446 uint32_t tiling_mode; 446 uint32_t tiling_mode;
447 uint32_t stride; 447 uint32_t stride;
448 448
449 /** Record of address bit 17 of each page at last unbind. */
450 long *bit_17;
451
449 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ 452 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
450 uint32_t agp_type; 453 uint32_t agp_type;
451 454
@@ -635,9 +638,13 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
635void i915_gem_detach_phys_object(struct drm_device *dev, 638void i915_gem_detach_phys_object(struct drm_device *dev,
636 struct drm_gem_object *obj); 639 struct drm_gem_object *obj);
637void i915_gem_free_all_phys_object(struct drm_device *dev); 640void i915_gem_free_all_phys_object(struct drm_device *dev);
641int i915_gem_object_get_pages(struct drm_gem_object *obj);
642void i915_gem_object_put_pages(struct drm_gem_object *obj);
638 643
639/* i915_gem_tiling.c */ 644/* i915_gem_tiling.c */
640void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 645void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
646void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
647void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
641 648
642/* i915_gem_debug.c */ 649/* i915_gem_debug.c */
643void i915_gem_dump_object(struct drm_gem_object *obj, int len, 650void i915_gem_dump_object(struct drm_gem_object *obj, int len,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1449b452cc63..4642115902d6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,8 +43,6 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
43 uint64_t offset, 43 uint64_t offset,
44 uint64_t size); 44 uint64_t size);
45static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); 45static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
46static int i915_gem_object_get_pages(struct drm_gem_object *obj);
47static void i915_gem_object_put_pages(struct drm_gem_object *obj);
48static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 46static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 47static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
50 unsigned alignment); 48 unsigned alignment);
@@ -143,15 +141,27 @@ fast_shmem_read(struct page **pages,
143 int length) 141 int length)
144{ 142{
145 char __iomem *vaddr; 143 char __iomem *vaddr;
146 int ret; 144 int unwritten;
147 145
148 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); 146 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
149 if (vaddr == NULL) 147 if (vaddr == NULL)
150 return -ENOMEM; 148 return -ENOMEM;
151 ret = __copy_to_user_inatomic(data, vaddr + page_offset, length); 149 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
152 kunmap_atomic(vaddr, KM_USER0); 150 kunmap_atomic(vaddr, KM_USER0);
153 151
154 return ret; 152 if (unwritten)
153 return -EFAULT;
154
155 return 0;
156}
157
158static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
159{
160 drm_i915_private_t *dev_priv = obj->dev->dev_private;
161 struct drm_i915_gem_object *obj_priv = obj->driver_private;
162
163 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
164 obj_priv->tiling_mode != I915_TILING_NONE;
155} 165}
156 166
157static inline int 167static inline int
@@ -181,6 +191,64 @@ slow_shmem_copy(struct page *dst_page,
181 return 0; 191 return 0;
182} 192}
183 193
194static inline int
195slow_shmem_bit17_copy(struct page *gpu_page,
196 int gpu_offset,
197 struct page *cpu_page,
198 int cpu_offset,
199 int length,
200 int is_read)
201{
202 char *gpu_vaddr, *cpu_vaddr;
203
204 /* Use the unswizzled path if this page isn't affected. */
205 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
206 if (is_read)
207 return slow_shmem_copy(cpu_page, cpu_offset,
208 gpu_page, gpu_offset, length);
209 else
210 return slow_shmem_copy(gpu_page, gpu_offset,
211 cpu_page, cpu_offset, length);
212 }
213
214 gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
215 if (gpu_vaddr == NULL)
216 return -ENOMEM;
217
218 cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
219 if (cpu_vaddr == NULL) {
220 kunmap_atomic(gpu_vaddr, KM_USER0);
221 return -ENOMEM;
222 }
223
224 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
225 * XORing with the other bits (A9 for Y, A9 and A10 for X)
226 */
227 while (length > 0) {
228 int cacheline_end = ALIGN(gpu_offset + 1, 64);
229 int this_length = min(cacheline_end - gpu_offset, length);
230 int swizzled_gpu_offset = gpu_offset ^ 64;
231
232 if (is_read) {
233 memcpy(cpu_vaddr + cpu_offset,
234 gpu_vaddr + swizzled_gpu_offset,
235 this_length);
236 } else {
237 memcpy(gpu_vaddr + swizzled_gpu_offset,
238 cpu_vaddr + cpu_offset,
239 this_length);
240 }
241 cpu_offset += this_length;
242 gpu_offset += this_length;
243 length -= this_length;
244 }
245
246 kunmap_atomic(cpu_vaddr, KM_USER1);
247 kunmap_atomic(gpu_vaddr, KM_USER0);
248
249 return 0;
250}
251
184/** 252/**
185 * This is the fast shmem pread path, which attempts to copy_from_user directly 253 * This is the fast shmem pread path, which attempts to copy_from_user directly
186 * from the backing pages of the object to the user's address space. On a 254 * from the backing pages of the object to the user's address space. On a
@@ -269,6 +337,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
269 int page_length; 337 int page_length;
270 int ret; 338 int ret;
271 uint64_t data_ptr = args->data_ptr; 339 uint64_t data_ptr = args->data_ptr;
340 int do_bit17_swizzling;
272 341
273 remain = args->size; 342 remain = args->size;
274 343
@@ -286,13 +355,15 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
286 355
287 down_read(&mm->mmap_sem); 356 down_read(&mm->mmap_sem);
288 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, 357 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
289 num_pages, 0, 0, user_pages, NULL); 358 num_pages, 1, 0, user_pages, NULL);
290 up_read(&mm->mmap_sem); 359 up_read(&mm->mmap_sem);
291 if (pinned_pages < num_pages) { 360 if (pinned_pages < num_pages) {
292 ret = -EFAULT; 361 ret = -EFAULT;
293 goto fail_put_user_pages; 362 goto fail_put_user_pages;
294 } 363 }
295 364
365 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
366
296 mutex_lock(&dev->struct_mutex); 367 mutex_lock(&dev->struct_mutex);
297 368
298 ret = i915_gem_object_get_pages(obj); 369 ret = i915_gem_object_get_pages(obj);
@@ -327,11 +398,20 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
327 if ((data_page_offset + page_length) > PAGE_SIZE) 398 if ((data_page_offset + page_length) > PAGE_SIZE)
328 page_length = PAGE_SIZE - data_page_offset; 399 page_length = PAGE_SIZE - data_page_offset;
329 400
330 ret = slow_shmem_copy(user_pages[data_page_index], 401 if (do_bit17_swizzling) {
331 data_page_offset, 402 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
332 obj_priv->pages[shmem_page_index], 403 shmem_page_offset,
333 shmem_page_offset, 404 user_pages[data_page_index],
334 page_length); 405 data_page_offset,
406 page_length,
407 1);
408 } else {
409 ret = slow_shmem_copy(user_pages[data_page_index],
410 data_page_offset,
411 obj_priv->pages[shmem_page_index],
412 shmem_page_offset,
413 page_length);
414 }
335 if (ret) 415 if (ret)
336 goto fail_put_pages; 416 goto fail_put_pages;
337 417
@@ -383,9 +463,14 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
383 return -EINVAL; 463 return -EINVAL;
384 } 464 }
385 465
386 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); 466 if (i915_gem_object_needs_bit17_swizzle(obj)) {
387 if (ret != 0)
388 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); 467 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
468 } else {
469 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
470 if (ret != 0)
471 ret = i915_gem_shmem_pread_slow(dev, obj, args,
472 file_priv);
473 }
389 474
390 drm_gem_object_unreference(obj); 475 drm_gem_object_unreference(obj);
391 476
@@ -727,6 +812,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
727 int page_length; 812 int page_length;
728 int ret; 813 int ret;
729 uint64_t data_ptr = args->data_ptr; 814 uint64_t data_ptr = args->data_ptr;
815 int do_bit17_swizzling;
730 816
731 remain = args->size; 817 remain = args->size;
732 818
@@ -751,6 +837,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
751 goto fail_put_user_pages; 837 goto fail_put_user_pages;
752 } 838 }
753 839
840 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
841
754 mutex_lock(&dev->struct_mutex); 842 mutex_lock(&dev->struct_mutex);
755 843
756 ret = i915_gem_object_get_pages(obj); 844 ret = i915_gem_object_get_pages(obj);
@@ -785,11 +873,20 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
785 if ((data_page_offset + page_length) > PAGE_SIZE) 873 if ((data_page_offset + page_length) > PAGE_SIZE)
786 page_length = PAGE_SIZE - data_page_offset; 874 page_length = PAGE_SIZE - data_page_offset;
787 875
788 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], 876 if (do_bit17_swizzling) {
789 shmem_page_offset, 877 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
790 user_pages[data_page_index], 878 shmem_page_offset,
791 data_page_offset, 879 user_pages[data_page_index],
792 page_length); 880 data_page_offset,
881 page_length,
882 0);
883 } else {
884 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
885 shmem_page_offset,
886 user_pages[data_page_index],
887 data_page_offset,
888 page_length);
889 }
793 if (ret) 890 if (ret)
794 goto fail_put_pages; 891 goto fail_put_pages;
795 892
@@ -854,6 +951,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
854 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, 951 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
855 file_priv); 952 file_priv);
856 } 953 }
954 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
955 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
857 } else { 956 } else {
858 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); 957 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
859 if (ret == -EFAULT) { 958 if (ret == -EFAULT) {
@@ -1285,7 +1384,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1285 return 0; 1384 return 0;
1286} 1385}
1287 1386
1288static void 1387void
1289i915_gem_object_put_pages(struct drm_gem_object *obj) 1388i915_gem_object_put_pages(struct drm_gem_object *obj)
1290{ 1389{
1291 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1390 struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1297,6 +1396,9 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
1297 if (--obj_priv->pages_refcount != 0) 1396 if (--obj_priv->pages_refcount != 0)
1298 return; 1397 return;
1299 1398
1399 if (obj_priv->tiling_mode != I915_TILING_NONE)
1400 i915_gem_object_save_bit_17_swizzle(obj);
1401
1300 for (i = 0; i < page_count; i++) 1402 for (i = 0; i < page_count; i++)
1301 if (obj_priv->pages[i] != NULL) { 1403 if (obj_priv->pages[i] != NULL) {
1302 if (obj_priv->dirty) 1404 if (obj_priv->dirty)
@@ -1494,8 +1596,19 @@ i915_gem_retire_request(struct drm_device *dev,
1494 1596
1495 if (obj->write_domain != 0) 1597 if (obj->write_domain != 0)
1496 i915_gem_object_move_to_flushing(obj); 1598 i915_gem_object_move_to_flushing(obj);
1497 else 1599 else {
1600 /* Take a reference on the object so it won't be
1601 * freed while the spinlock is held. The list
1602 * protection for this spinlock is safe when breaking
1603 * the lock like this since the next thing we do
1604 * is just get the head of the list again.
1605 */
1606 drm_gem_object_reference(obj);
1498 i915_gem_object_move_to_inactive(obj); 1607 i915_gem_object_move_to_inactive(obj);
1608 spin_unlock(&dev_priv->mm.active_list_lock);
1609 drm_gem_object_unreference(obj);
1610 spin_lock(&dev_priv->mm.active_list_lock);
1611 }
1499 } 1612 }
1500out: 1613out:
1501 spin_unlock(&dev_priv->mm.active_list_lock); 1614 spin_unlock(&dev_priv->mm.active_list_lock);
@@ -1884,7 +1997,7 @@ i915_gem_evict_everything(struct drm_device *dev)
1884 return ret; 1997 return ret;
1885} 1998}
1886 1999
1887static int 2000int
1888i915_gem_object_get_pages(struct drm_gem_object *obj) 2001i915_gem_object_get_pages(struct drm_gem_object *obj)
1889{ 2002{
1890 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2003 struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1922,6 +2035,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
1922 } 2035 }
1923 obj_priv->pages[i] = page; 2036 obj_priv->pages[i] = page;
1924 } 2037 }
2038
2039 if (obj_priv->tiling_mode != I915_TILING_NONE)
2040 i915_gem_object_do_bit_17_swizzle(obj);
2041
1925 return 0; 2042 return 0;
1926} 2043}
1927 2044
@@ -3002,13 +3119,13 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3002 drm_free(*relocs, reloc_count * sizeof(**relocs), 3119 drm_free(*relocs, reloc_count * sizeof(**relocs),
3003 DRM_MEM_DRIVER); 3120 DRM_MEM_DRIVER);
3004 *relocs = NULL; 3121 *relocs = NULL;
3005 return ret; 3122 return -EFAULT;
3006 } 3123 }
3007 3124
3008 reloc_index += exec_list[i].relocation_count; 3125 reloc_index += exec_list[i].relocation_count;
3009 } 3126 }
3010 3127
3011 return ret; 3128 return 0;
3012} 3129}
3013 3130
3014static int 3131static int
@@ -3017,23 +3134,28 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
3017 struct drm_i915_gem_relocation_entry *relocs) 3134 struct drm_i915_gem_relocation_entry *relocs)
3018{ 3135{
3019 uint32_t reloc_count = 0, i; 3136 uint32_t reloc_count = 0, i;
3020 int ret; 3137 int ret = 0;
3021 3138
3022 for (i = 0; i < buffer_count; i++) { 3139 for (i = 0; i < buffer_count; i++) {
3023 struct drm_i915_gem_relocation_entry __user *user_relocs; 3140 struct drm_i915_gem_relocation_entry __user *user_relocs;
3141 int unwritten;
3024 3142
3025 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; 3143 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3026 3144
3027 if (ret == 0) { 3145 unwritten = copy_to_user(user_relocs,
3028 ret = copy_to_user(user_relocs, 3146 &relocs[reloc_count],
3029 &relocs[reloc_count], 3147 exec_list[i].relocation_count *
3030 exec_list[i].relocation_count * 3148 sizeof(*relocs));
3031 sizeof(*relocs)); 3149
3150 if (unwritten) {
3151 ret = -EFAULT;
3152 goto err;
3032 } 3153 }
3033 3154
3034 reloc_count += exec_list[i].relocation_count; 3155 reloc_count += exec_list[i].relocation_count;
3035 } 3156 }
3036 3157
3158err:
3037 drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER); 3159 drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER);
3038 3160
3039 return ret; 3161 return ret;
@@ -3243,7 +3365,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3243 exec_offset = exec_list[args->buffer_count - 1].offset; 3365 exec_offset = exec_list[args->buffer_count - 1].offset;
3244 3366
3245#if WATCH_EXEC 3367#if WATCH_EXEC
3246 i915_gem_dump_object(object_list[args->buffer_count - 1], 3368 i915_gem_dump_object(batch_obj,
3247 args->batch_len, 3369 args->batch_len,
3248 __func__, 3370 __func__,
3249 ~0); 3371 ~0);
@@ -3308,10 +3430,12 @@ err:
3308 (uintptr_t) args->buffers_ptr, 3430 (uintptr_t) args->buffers_ptr,
3309 exec_list, 3431 exec_list,
3310 sizeof(*exec_list) * args->buffer_count); 3432 sizeof(*exec_list) * args->buffer_count);
3311 if (ret) 3433 if (ret) {
3434 ret = -EFAULT;
3312 DRM_ERROR("failed to copy %d exec entries " 3435 DRM_ERROR("failed to copy %d exec entries "
3313 "back to user (%d)\n", 3436 "back to user (%d)\n",
3314 args->buffer_count, ret); 3437 args->buffer_count, ret);
3438 }
3315 } 3439 }
3316 3440
3317 /* Copy the updated relocations out regardless of current error 3441 /* Copy the updated relocations out regardless of current error
@@ -3593,6 +3717,7 @@ void i915_gem_free_object(struct drm_gem_object *obj)
3593 i915_gem_free_mmap_offset(obj); 3717 i915_gem_free_mmap_offset(obj);
3594 3718
3595 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); 3719 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
3720 kfree(obj_priv->bit_17);
3596 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 3721 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
3597} 3722}
3598 3723
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
index a1ac0c5e7307..986f1082c596 100644
--- a/drivers/gpu/drm/i915/i915_gem_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c
@@ -234,6 +234,96 @@ static int i915_hws_info(struct seq_file *m, void *data)
234 return 0; 234 return 0;
235} 235}
236 236
237static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
238{
239 int page, i;
240 uint32_t *mem;
241
242 for (page = 0; page < page_count; page++) {
243 mem = kmap(pages[page]);
244 for (i = 0; i < PAGE_SIZE; i += 4)
245 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
246 kunmap(pages[page]);
247 }
248}
249
250static int i915_batchbuffer_info(struct seq_file *m, void *data)
251{
252 struct drm_info_node *node = (struct drm_info_node *) m->private;
253 struct drm_device *dev = node->minor->dev;
254 drm_i915_private_t *dev_priv = dev->dev_private;
255 struct drm_gem_object *obj;
256 struct drm_i915_gem_object *obj_priv;
257 int ret;
258
259 spin_lock(&dev_priv->mm.active_list_lock);
260
261 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
262 obj = obj_priv->obj;
263 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
264 ret = i915_gem_object_get_pages(obj);
265 if (ret) {
266 DRM_ERROR("Failed to get pages: %d\n", ret);
267 spin_unlock(&dev_priv->mm.active_list_lock);
268 return ret;
269 }
270
271 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
272 i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
273
274 i915_gem_object_put_pages(obj);
275 }
276 }
277
278 spin_unlock(&dev_priv->mm.active_list_lock);
279
280 return 0;
281}
282
283static int i915_ringbuffer_data(struct seq_file *m, void *data)
284{
285 struct drm_info_node *node = (struct drm_info_node *) m->private;
286 struct drm_device *dev = node->minor->dev;
287 drm_i915_private_t *dev_priv = dev->dev_private;
288 u8 *virt;
289 uint32_t *ptr, off;
290
291 if (!dev_priv->ring.ring_obj) {
292 seq_printf(m, "No ringbuffer setup\n");
293 return 0;
294 }
295
296 virt = dev_priv->ring.virtual_start;
297
298 for (off = 0; off < dev_priv->ring.Size; off += 4) {
299 ptr = (uint32_t *)(virt + off);
300 seq_printf(m, "%08x : %08x\n", off, *ptr);
301 }
302
303 return 0;
304}
305
306static int i915_ringbuffer_info(struct seq_file *m, void *data)
307{
308 struct drm_info_node *node = (struct drm_info_node *) m->private;
309 struct drm_device *dev = node->minor->dev;
310 drm_i915_private_t *dev_priv = dev->dev_private;
311 unsigned int head, tail, mask;
312
313 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
314 tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
315 mask = dev_priv->ring.tail_mask;
316
317 seq_printf(m, "RingHead : %08x\n", head);
318 seq_printf(m, "RingTail : %08x\n", tail);
319 seq_printf(m, "RingMask : %08x\n", mask);
320 seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size);
321 seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
322
323 return 0;
324}
325
326
237static struct drm_info_list i915_gem_debugfs_list[] = { 327static struct drm_info_list i915_gem_debugfs_list[] = {
238 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 328 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
239 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 329 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
@@ -243,6 +333,9 @@ static struct drm_info_list i915_gem_debugfs_list[] = {
243 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 333 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
244 {"i915_gem_interrupt", i915_interrupt_info, 0}, 334 {"i915_gem_interrupt", i915_interrupt_info, 0},
245 {"i915_gem_hws", i915_hws_info, 0}, 335 {"i915_gem_hws", i915_hws_info, 0},
336 {"i915_ringbuffer_data", i915_ringbuffer_data, 0},
337 {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
338 {"i915_batchbuffers", i915_batchbuffer_info, 0},
246}; 339};
247#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list) 340#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list)
248 341
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 6be3f927c86a..f27e523c764f 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -25,6 +25,8 @@
25 * 25 *
26 */ 26 */
27 27
28#include "linux/string.h"
29#include "linux/bitops.h"
28#include "drmP.h" 30#include "drmP.h"
29#include "drm.h" 31#include "drm.h"
30#include "i915_drm.h" 32#include "i915_drm.h"
@@ -127,8 +129,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
127 swizzle_y = I915_BIT_6_SWIZZLE_9_11; 129 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
128 } else { 130 } else {
129 /* Bit 17 swizzling by the CPU in addition. */ 131 /* Bit 17 swizzling by the CPU in addition. */
130 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 132 swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
131 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 133 swizzle_y = I915_BIT_6_SWIZZLE_9_17;
132 } 134 }
133 break; 135 break;
134 } 136 }
@@ -288,6 +290,19 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
288 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; 290 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
289 else 291 else
290 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; 292 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
293
294 /* Hide bit 17 swizzling from the user. This prevents old Mesa
295 * from aborting the application on sw fallbacks to bit 17,
296 * and we use the pread/pwrite bit17 paths to swizzle for it.
297 * If there was a user that was relying on the swizzle
298 * information for drm_intel_bo_map()ed reads/writes this would
299 * break it, but we don't have any of those.
300 */
301 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
302 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
303 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
304 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
305
291 /* If we can't handle the swizzling, make it untiled. */ 306 /* If we can't handle the swizzling, make it untiled. */
292 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { 307 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
293 args->tiling_mode = I915_TILING_NONE; 308 args->tiling_mode = I915_TILING_NONE;
@@ -354,8 +369,100 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
354 DRM_ERROR("unknown tiling mode\n"); 369 DRM_ERROR("unknown tiling mode\n");
355 } 370 }
356 371
372 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
373 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
374 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
375 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
376 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
377
357 drm_gem_object_unreference(obj); 378 drm_gem_object_unreference(obj);
358 mutex_unlock(&dev->struct_mutex); 379 mutex_unlock(&dev->struct_mutex);
359 380
360 return 0; 381 return 0;
361} 382}
383
384/**
385 * Swap every 64 bytes of this page around, to account for it having a new
386 * bit 17 of its physical address and therefore being interpreted differently
387 * by the GPU.
388 */
389static int
390i915_gem_swizzle_page(struct page *page)
391{
392 char *vaddr;
393 int i;
394 char temp[64];
395
396 vaddr = kmap(page);
397 if (vaddr == NULL)
398 return -ENOMEM;
399
400 for (i = 0; i < PAGE_SIZE; i += 128) {
401 memcpy(temp, &vaddr[i], 64);
402 memcpy(&vaddr[i], &vaddr[i + 64], 64);
403 memcpy(&vaddr[i + 64], temp, 64);
404 }
405
406 kunmap(page);
407
408 return 0;
409}
410
411void
412i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
413{
414 struct drm_device *dev = obj->dev;
415 drm_i915_private_t *dev_priv = dev->dev_private;
416 struct drm_i915_gem_object *obj_priv = obj->driver_private;
417 int page_count = obj->size >> PAGE_SHIFT;
418 int i;
419
420 if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
421 return;
422
423 if (obj_priv->bit_17 == NULL)
424 return;
425
426 for (i = 0; i < page_count; i++) {
427 char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
428 if ((new_bit_17 & 0x1) !=
429 (test_bit(i, obj_priv->bit_17) != 0)) {
430 int ret = i915_gem_swizzle_page(obj_priv->pages[i]);
431 if (ret != 0) {
432 DRM_ERROR("Failed to swizzle page\n");
433 return;
434 }
435 set_page_dirty(obj_priv->pages[i]);
436 }
437 }
438}
439
440void
441i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
442{
443 struct drm_device *dev = obj->dev;
444 drm_i915_private_t *dev_priv = dev->dev_private;
445 struct drm_i915_gem_object *obj_priv = obj->driver_private;
446 int page_count = obj->size >> PAGE_SHIFT;
447 int i;
448
449 if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
450 return;
451
452 if (obj_priv->bit_17 == NULL) {
453 obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
454 sizeof(long), GFP_KERNEL);
455 if (obj_priv->bit_17 == NULL) {
456 DRM_ERROR("Failed to allocate memory for bit 17 "
457 "record\n");
458 return;
459 }
460 }
461
462 for (i = 0; i < page_count; i++) {
463 if (page_to_phys(obj_priv->pages[i]) & (1 << 17))
464 __set_bit(i, obj_priv->bit_17);
465 else
466 __clear_bit(i, obj_priv->bit_17);
467 }
468}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 64773ce52964..c2c8e95ff14d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -367,6 +367,7 @@ static const intel_limit_t intel_limits[] = {
367 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 367 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
368 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 368 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
369 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 369 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
370 .find_pll = intel_find_best_PLL,
370 }, 371 },
371 { /* INTEL_LIMIT_IGD_LVDS */ 372 { /* INTEL_LIMIT_IGD_LVDS */
372 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 373 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
@@ -380,6 +381,7 @@ static const intel_limit_t intel_limits[] = {
380 /* IGD only supports single-channel mode. */ 381 /* IGD only supports single-channel mode. */
381 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 382 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
382 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, 383 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
384 .find_pll = intel_find_best_PLL,
383 }, 385 },
384 386
385}; 387};
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index b7f0ebe9f810..3e094beecb99 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -864,8 +864,8 @@ static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3)
864 864
865static struct sysrq_key_op sysrq_intelfb_restore_op = { 865static struct sysrq_key_op sysrq_intelfb_restore_op = {
866 .handler = intelfb_sysrq, 866 .handler = intelfb_sysrq,
867 .help_msg = "force fb", 867 .help_msg = "force-fb(G)",
868 .action_msg = "force restore of fb console", 868 .action_msg = "Restore framebuffer console",
869}; 869};
870 870
871int intelfb_probe(struct drm_device *dev) 871int intelfb_probe(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index b06a4a3ff08d..550374225388 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -38,7 +38,7 @@
38struct intel_hdmi_priv { 38struct intel_hdmi_priv {
39 u32 sdvox_reg; 39 u32 sdvox_reg;
40 u32 save_SDVOX; 40 u32 save_SDVOX;
41 int has_hdmi_sink; 41 bool has_hdmi_sink;
42}; 42};
43 43
44static void intel_hdmi_mode_set(struct drm_encoder *encoder, 44static void intel_hdmi_mode_set(struct drm_encoder *encoder,
@@ -128,6 +128,22 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
128 return true; 128 return true;
129} 129}
130 130
131static void
132intel_hdmi_sink_detect(struct drm_connector *connector)
133{
134 struct intel_output *intel_output = to_intel_output(connector);
135 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
136 struct edid *edid = NULL;
137
138 edid = drm_get_edid(&intel_output->base,
139 &intel_output->ddc_bus->adapter);
140 if (edid != NULL) {
141 hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
142 kfree(edid);
143 intel_output->base.display_info.raw_edid = NULL;
144 }
145}
146
131static enum drm_connector_status 147static enum drm_connector_status
132intel_hdmi_detect(struct drm_connector *connector) 148intel_hdmi_detect(struct drm_connector *connector)
133{ 149{
@@ -158,9 +174,10 @@ intel_hdmi_detect(struct drm_connector *connector)
158 return connector_status_unknown; 174 return connector_status_unknown;
159 } 175 }
160 176
161 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) 177 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) {
178 intel_hdmi_sink_detect(connector);
162 return connector_status_connected; 179 return connector_status_connected;
163 else 180 } else
164 return connector_status_disconnected; 181 return connector_status_disconnected;
165} 182}
166 183
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 7b31f55f55c8..9913651c1e17 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1357,6 +1357,23 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
1357 intel_sdvo_read_response(intel_output, &response, 2); 1357 intel_sdvo_read_response(intel_output, &response, 2);
1358} 1358}
1359 1359
1360static void
1361intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1362{
1363 struct intel_output *intel_output = to_intel_output(connector);
1364 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1365 struct edid *edid = NULL;
1366
1367 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
1368 edid = drm_get_edid(&intel_output->base,
1369 &intel_output->ddc_bus->adapter);
1370 if (edid != NULL) {
1371 sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
1372 kfree(edid);
1373 intel_output->base.display_info.raw_edid = NULL;
1374 }
1375}
1376
1360static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) 1377static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
1361{ 1378{
1362 u8 response[2]; 1379 u8 response[2];
@@ -1371,9 +1388,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1371 if (status != SDVO_CMD_STATUS_SUCCESS) 1388 if (status != SDVO_CMD_STATUS_SUCCESS)
1372 return connector_status_unknown; 1389 return connector_status_unknown;
1373 1390
1374 if ((response[0] != 0) || (response[1] != 0)) 1391 if ((response[0] != 0) || (response[1] != 0)) {
1392 intel_sdvo_hdmi_sink_detect(connector);
1375 return connector_status_connected; 1393 return connector_status_connected;
1376 else 1394 } else
1377 return connector_status_disconnected; 1395 return connector_status_disconnected;
1378} 1396}
1379 1397
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 67e3353a56d6..95962fa8398a 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -594,6 +594,9 @@ struct drm_i915_gem_busy {
594#define I915_BIT_6_SWIZZLE_9_10_11 4 594#define I915_BIT_6_SWIZZLE_9_10_11 4
595/* Not seen by userland */ 595/* Not seen by userland */
596#define I915_BIT_6_SWIZZLE_UNKNOWN 5 596#define I915_BIT_6_SWIZZLE_UNKNOWN 5
597/* Seen by userland. */
598#define I915_BIT_6_SWIZZLE_9_17 6
599#define I915_BIT_6_SWIZZLE_9_10_17 7
597 600
598struct drm_i915_gem_set_tiling { 601struct drm_i915_gem_set_tiling {
599 /** Handle of the buffer to have its tiling state updated */ 602 /** Handle of the buffer to have its tiling state updated */