aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2009-03-12 19:56:27 -0400
committerEric Anholt <eric@anholt.net>2009-04-08 13:50:57 -0400
commit280b713b5b0fd84cf2469098aee88acbb5de859c (patch)
tree42a891344f7999da87adb8f6c6092e9adae3f7d4
parente5e9ecde63ba365b510df0f4a9cb3b048a0ad785 (diff)
drm/i915: Allow tiling of objects with bit 17 swizzling by the CPU.
Save the bit 17 state of the pages when freeing the page list, and reswizzle them if necessary when rebinding the pages (in case they were swapped out). Since we have userland with expectations that the swizzle enums let it pread and pwrite contents accurately, we can't expose a new swizzle enum for bit 17 (which it would have to GTT map to handle), so we handle it down in pread and pwrite by swizzling the copy when bit 17 of the page address is set. Signed-off-by: Eric Anholt <eric@anholt.net>
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c130
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c111
-rw-r--r--include/drm/i915_drm.h3
4 files changed, 235 insertions, 14 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index efcd610d4fca..bccd4146d55c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -446,6 +446,9 @@ struct drm_i915_gem_object {
446 uint32_t tiling_mode; 446 uint32_t tiling_mode;
447 uint32_t stride; 447 uint32_t stride;
448 448
449 /** Record of address bit 17 of each page at last unbind. */
450 long *bit_17;
451
449 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ 452 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
450 uint32_t agp_type; 453 uint32_t agp_type;
451 454
@@ -640,6 +643,8 @@ void i915_gem_object_put_pages(struct drm_gem_object *obj);
640 643
641/* i915_gem_tiling.c */ 644/* i915_gem_tiling.c */
642void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 645void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
646void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
647void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
643 648
644/* i915_gem_debug.c */ 649/* i915_gem_debug.c */
645void i915_gem_dump_object(struct drm_gem_object *obj, int len, 650void i915_gem_dump_object(struct drm_gem_object *obj, int len,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3a1189d94a9a..6dca9fc7c1db 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -155,6 +155,15 @@ fast_shmem_read(struct page **pages,
155 return 0; 155 return 0;
156} 156}
157 157
158static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
159{
160 drm_i915_private_t *dev_priv = obj->dev->dev_private;
161 struct drm_i915_gem_object *obj_priv = obj->driver_private;
162
163 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
164 obj_priv->tiling_mode != I915_TILING_NONE;
165}
166
158static inline int 167static inline int
159slow_shmem_copy(struct page *dst_page, 168slow_shmem_copy(struct page *dst_page,
160 int dst_offset, 169 int dst_offset,
@@ -182,6 +191,64 @@ slow_shmem_copy(struct page *dst_page,
182 return 0; 191 return 0;
183} 192}
184 193
194static inline int
195slow_shmem_bit17_copy(struct page *gpu_page,
196 int gpu_offset,
197 struct page *cpu_page,
198 int cpu_offset,
199 int length,
200 int is_read)
201{
202 char *gpu_vaddr, *cpu_vaddr;
203
204 /* Use the unswizzled path if this page isn't affected. */
205 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
206 if (is_read)
207 return slow_shmem_copy(cpu_page, cpu_offset,
208 gpu_page, gpu_offset, length);
209 else
210 return slow_shmem_copy(gpu_page, gpu_offset,
211 cpu_page, cpu_offset, length);
212 }
213
214 gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
215 if (gpu_vaddr == NULL)
216 return -ENOMEM;
217
218 cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
219 if (cpu_vaddr == NULL) {
220 kunmap_atomic(gpu_vaddr, KM_USER0);
221 return -ENOMEM;
222 }
223
224 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
225 * XORing with the other bits (A9 for Y, A9 and A10 for X)
226 */
227 while (length > 0) {
228 int cacheline_end = ALIGN(gpu_offset + 1, 64);
229 int this_length = min(cacheline_end - gpu_offset, length);
230 int swizzled_gpu_offset = gpu_offset ^ 64;
231
232 if (is_read) {
233 memcpy(cpu_vaddr + cpu_offset,
234 gpu_vaddr + swizzled_gpu_offset,
235 this_length);
236 } else {
237 memcpy(gpu_vaddr + swizzled_gpu_offset,
238 cpu_vaddr + cpu_offset,
239 this_length);
240 }
241 cpu_offset += this_length;
242 gpu_offset += this_length;
243 length -= this_length;
244 }
245
246 kunmap_atomic(cpu_vaddr, KM_USER1);
247 kunmap_atomic(gpu_vaddr, KM_USER0);
248
249 return 0;
250}
251
185/** 252/**
186 * This is the fast shmem pread path, which attempts to copy_from_user directly 253 * This is the fast shmem pread path, which attempts to copy_from_user directly
187 * from the backing pages of the object to the user's address space. On a 254 * from the backing pages of the object to the user's address space. On a
@@ -270,6 +337,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
270 int page_length; 337 int page_length;
271 int ret; 338 int ret;
272 uint64_t data_ptr = args->data_ptr; 339 uint64_t data_ptr = args->data_ptr;
340 int do_bit17_swizzling;
273 341
274 remain = args->size; 342 remain = args->size;
275 343
@@ -294,6 +362,8 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
294 goto fail_put_user_pages; 362 goto fail_put_user_pages;
295 } 363 }
296 364
365 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
366
297 mutex_lock(&dev->struct_mutex); 367 mutex_lock(&dev->struct_mutex);
298 368
299 ret = i915_gem_object_get_pages(obj); 369 ret = i915_gem_object_get_pages(obj);
@@ -328,11 +398,20 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
328 if ((data_page_offset + page_length) > PAGE_SIZE) 398 if ((data_page_offset + page_length) > PAGE_SIZE)
329 page_length = PAGE_SIZE - data_page_offset; 399 page_length = PAGE_SIZE - data_page_offset;
330 400
331 ret = slow_shmem_copy(user_pages[data_page_index], 401 if (do_bit17_swizzling) {
332 data_page_offset, 402 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
333 obj_priv->pages[shmem_page_index], 403 shmem_page_offset,
334 shmem_page_offset, 404 user_pages[data_page_index],
335 page_length); 405 data_page_offset,
406 page_length,
407 1);
408 } else {
409 ret = slow_shmem_copy(user_pages[data_page_index],
410 data_page_offset,
411 obj_priv->pages[shmem_page_index],
412 shmem_page_offset,
413 page_length);
414 }
336 if (ret) 415 if (ret)
337 goto fail_put_pages; 416 goto fail_put_pages;
338 417
@@ -384,9 +463,14 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
384 return -EINVAL; 463 return -EINVAL;
385 } 464 }
386 465
387 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); 466 if (i915_gem_object_needs_bit17_swizzle(obj)) {
388 if (ret != 0)
389 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); 467 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
468 } else {
469 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
470 if (ret != 0)
471 ret = i915_gem_shmem_pread_slow(dev, obj, args,
472 file_priv);
473 }
390 474
391 drm_gem_object_unreference(obj); 475 drm_gem_object_unreference(obj);
392 476
@@ -728,6 +812,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
728 int page_length; 812 int page_length;
729 int ret; 813 int ret;
730 uint64_t data_ptr = args->data_ptr; 814 uint64_t data_ptr = args->data_ptr;
815 int do_bit17_swizzling;
731 816
732 remain = args->size; 817 remain = args->size;
733 818
@@ -752,6 +837,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
752 goto fail_put_user_pages; 837 goto fail_put_user_pages;
753 } 838 }
754 839
840 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
841
755 mutex_lock(&dev->struct_mutex); 842 mutex_lock(&dev->struct_mutex);
756 843
757 ret = i915_gem_object_get_pages(obj); 844 ret = i915_gem_object_get_pages(obj);
@@ -786,11 +873,20 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
786 if ((data_page_offset + page_length) > PAGE_SIZE) 873 if ((data_page_offset + page_length) > PAGE_SIZE)
787 page_length = PAGE_SIZE - data_page_offset; 874 page_length = PAGE_SIZE - data_page_offset;
788 875
789 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], 876 if (do_bit17_swizzling) {
790 shmem_page_offset, 877 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
791 user_pages[data_page_index], 878 shmem_page_offset,
792 data_page_offset, 879 user_pages[data_page_index],
793 page_length); 880 data_page_offset,
881 page_length,
882 0);
883 } else {
884 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
885 shmem_page_offset,
886 user_pages[data_page_index],
887 data_page_offset,
888 page_length);
889 }
794 if (ret) 890 if (ret)
795 goto fail_put_pages; 891 goto fail_put_pages;
796 892
@@ -855,6 +951,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
855 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, 951 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
856 file_priv); 952 file_priv);
857 } 953 }
954 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
955 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
858 } else { 956 } else {
859 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); 957 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
860 if (ret == -EFAULT) { 958 if (ret == -EFAULT) {
@@ -1298,6 +1396,9 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
1298 if (--obj_priv->pages_refcount != 0) 1396 if (--obj_priv->pages_refcount != 0)
1299 return; 1397 return;
1300 1398
1399 if (obj_priv->tiling_mode != I915_TILING_NONE)
1400 i915_gem_object_save_bit_17_swizzle(obj);
1401
1301 for (i = 0; i < page_count; i++) 1402 for (i = 0; i < page_count; i++)
1302 if (obj_priv->pages[i] != NULL) { 1403 if (obj_priv->pages[i] != NULL) {
1303 if (obj_priv->dirty) 1404 if (obj_priv->dirty)
@@ -1923,6 +2024,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
1923 } 2024 }
1924 obj_priv->pages[i] = page; 2025 obj_priv->pages[i] = page;
1925 } 2026 }
2027
2028 if (obj_priv->tiling_mode != I915_TILING_NONE)
2029 i915_gem_object_do_bit_17_swizzle(obj);
2030
1926 return 0; 2031 return 0;
1927} 2032}
1928 2033
@@ -3601,6 +3706,7 @@ void i915_gem_free_object(struct drm_gem_object *obj)
3601 i915_gem_free_mmap_offset(obj); 3706 i915_gem_free_mmap_offset(obj);
3602 3707
3603 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); 3708 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
3709 kfree(obj_priv->bit_17);
3604 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 3710 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
3605} 3711}
3606 3712
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 6be3f927c86a..f27e523c764f 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -25,6 +25,8 @@
25 * 25 *
26 */ 26 */
27 27
28#include "linux/string.h"
29#include "linux/bitops.h"
28#include "drmP.h" 30#include "drmP.h"
29#include "drm.h" 31#include "drm.h"
30#include "i915_drm.h" 32#include "i915_drm.h"
@@ -127,8 +129,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
127 swizzle_y = I915_BIT_6_SWIZZLE_9_11; 129 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
128 } else { 130 } else {
129 /* Bit 17 swizzling by the CPU in addition. */ 131 /* Bit 17 swizzling by the CPU in addition. */
130 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 132 swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
131 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 133 swizzle_y = I915_BIT_6_SWIZZLE_9_17;
132 } 134 }
133 break; 135 break;
134 } 136 }
@@ -288,6 +290,19 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
288 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; 290 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
289 else 291 else
290 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; 292 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
293
294 /* Hide bit 17 swizzling from the user. This prevents old Mesa
295 * from aborting the application on sw fallbacks to bit 17,
296 * and we use the pread/pwrite bit17 paths to swizzle for it.
297 * If there was a user that was relying on the swizzle
298 * information for drm_intel_bo_map()ed reads/writes this would
299 * break it, but we don't have any of those.
300 */
301 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
302 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
303 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
304 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
305
291 /* If we can't handle the swizzling, make it untiled. */ 306 /* If we can't handle the swizzling, make it untiled. */
292 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { 307 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
293 args->tiling_mode = I915_TILING_NONE; 308 args->tiling_mode = I915_TILING_NONE;
@@ -354,8 +369,100 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
354 DRM_ERROR("unknown tiling mode\n"); 369 DRM_ERROR("unknown tiling mode\n");
355 } 370 }
356 371
372 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
373 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
374 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
375 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
376 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
377
357 drm_gem_object_unreference(obj); 378 drm_gem_object_unreference(obj);
358 mutex_unlock(&dev->struct_mutex); 379 mutex_unlock(&dev->struct_mutex);
359 380
360 return 0; 381 return 0;
361} 382}
383
384/**
385 * Swap every 64 bytes of this page around, to account for it having a new
386 * bit 17 of its physical address and therefore being interpreted differently
387 * by the GPU.
388 */
389static int
390i915_gem_swizzle_page(struct page *page)
391{
392 char *vaddr;
393 int i;
394 char temp[64];
395
396 vaddr = kmap(page);
397 if (vaddr == NULL)
398 return -ENOMEM;
399
400 for (i = 0; i < PAGE_SIZE; i += 128) {
401 memcpy(temp, &vaddr[i], 64);
402 memcpy(&vaddr[i], &vaddr[i + 64], 64);
403 memcpy(&vaddr[i + 64], temp, 64);
404 }
405
406 kunmap(page);
407
408 return 0;
409}
410
411void
412i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
413{
414 struct drm_device *dev = obj->dev;
415 drm_i915_private_t *dev_priv = dev->dev_private;
416 struct drm_i915_gem_object *obj_priv = obj->driver_private;
417 int page_count = obj->size >> PAGE_SHIFT;
418 int i;
419
420 if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
421 return;
422
423 if (obj_priv->bit_17 == NULL)
424 return;
425
426 for (i = 0; i < page_count; i++) {
427 char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
428 if ((new_bit_17 & 0x1) !=
429 (test_bit(i, obj_priv->bit_17) != 0)) {
430 int ret = i915_gem_swizzle_page(obj_priv->pages[i]);
431 if (ret != 0) {
432 DRM_ERROR("Failed to swizzle page\n");
433 return;
434 }
435 set_page_dirty(obj_priv->pages[i]);
436 }
437 }
438}
439
440void
441i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
442{
443 struct drm_device *dev = obj->dev;
444 drm_i915_private_t *dev_priv = dev->dev_private;
445 struct drm_i915_gem_object *obj_priv = obj->driver_private;
446 int page_count = obj->size >> PAGE_SHIFT;
447 int i;
448
449 if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
450 return;
451
452 if (obj_priv->bit_17 == NULL) {
453 obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
454 sizeof(long), GFP_KERNEL);
455 if (obj_priv->bit_17 == NULL) {
456 DRM_ERROR("Failed to allocate memory for bit 17 "
457 "record\n");
458 return;
459 }
460 }
461
462 for (i = 0; i < page_count; i++) {
463 if (page_to_phys(obj_priv->pages[i]) & (1 << 17))
464 __set_bit(i, obj_priv->bit_17);
465 else
466 __clear_bit(i, obj_priv->bit_17);
467 }
468}
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 67e3353a56d6..95962fa8398a 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -594,6 +594,9 @@ struct drm_i915_gem_busy {
594#define I915_BIT_6_SWIZZLE_9_10_11 4 594#define I915_BIT_6_SWIZZLE_9_10_11 4
595/* Not seen by userland */ 595/* Not seen by userland */
596#define I915_BIT_6_SWIZZLE_UNKNOWN 5 596#define I915_BIT_6_SWIZZLE_UNKNOWN 5
597/* Seen by userland. */
598#define I915_BIT_6_SWIZZLE_9_17 6
599#define I915_BIT_6_SWIZZLE_9_10_17 7
597 600
598struct drm_i915_gem_set_tiling { 601struct drm_i915_gem_set_tiling {
599 /** Handle of the buffer to have its tiling state updated */ 602 /** Handle of the buffer to have its tiling state updated */