diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /drivers/gpu/drm/i915/i915_gem.c | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 1215 |
1 files changed, 793 insertions, 422 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index abfc27b0c2ea..ef3d91dda71a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "i915_drv.h" | 31 | #include "i915_drv.h" |
32 | #include "i915_trace.h" | 32 | #include "i915_trace.h" |
33 | #include "intel_drv.h" | 33 | #include "intel_drv.h" |
34 | #include <linux/slab.h> | ||
34 | #include <linux/swap.h> | 35 | #include <linux/swap.h> |
35 | #include <linux/pci.h> | 36 | #include <linux/pci.h> |
36 | 37 | ||
@@ -128,9 +129,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, | |||
128 | return -ENOMEM; | 129 | return -ENOMEM; |
129 | 130 | ||
130 | ret = drm_gem_handle_create(file_priv, obj, &handle); | 131 | ret = drm_gem_handle_create(file_priv, obj, &handle); |
131 | mutex_lock(&dev->struct_mutex); | 132 | drm_gem_object_handle_unreference_unlocked(obj); |
132 | drm_gem_object_handle_unreference(obj); | ||
133 | mutex_unlock(&dev->struct_mutex); | ||
134 | 133 | ||
135 | if (ret) | 134 | if (ret) |
136 | return ret; | 135 | return ret; |
@@ -164,7 +163,7 @@ fast_shmem_read(struct page **pages, | |||
164 | static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) | 163 | static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) |
165 | { | 164 | { |
166 | drm_i915_private_t *dev_priv = obj->dev->dev_private; | 165 | drm_i915_private_t *dev_priv = obj->dev->dev_private; |
167 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 166 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
168 | 167 | ||
169 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && | 168 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && |
170 | obj_priv->tiling_mode != I915_TILING_NONE; | 169 | obj_priv->tiling_mode != I915_TILING_NONE; |
@@ -265,7 +264,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
265 | struct drm_i915_gem_pread *args, | 264 | struct drm_i915_gem_pread *args, |
266 | struct drm_file *file_priv) | 265 | struct drm_file *file_priv) |
267 | { | 266 | { |
268 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 267 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
269 | ssize_t remain; | 268 | ssize_t remain; |
270 | loff_t offset, page_base; | 269 | loff_t offset, page_base; |
271 | char __user *user_data; | 270 | char __user *user_data; |
@@ -277,7 +276,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
277 | 276 | ||
278 | mutex_lock(&dev->struct_mutex); | 277 | mutex_lock(&dev->struct_mutex); |
279 | 278 | ||
280 | ret = i915_gem_object_get_pages(obj); | 279 | ret = i915_gem_object_get_pages(obj, 0); |
281 | if (ret != 0) | 280 | if (ret != 0) |
282 | goto fail_unlock; | 281 | goto fail_unlock; |
283 | 282 | ||
@@ -286,7 +285,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
286 | if (ret != 0) | 285 | if (ret != 0) |
287 | goto fail_put_pages; | 286 | goto fail_put_pages; |
288 | 287 | ||
289 | obj_priv = obj->driver_private; | 288 | obj_priv = to_intel_bo(obj); |
290 | offset = args->offset; | 289 | offset = args->offset; |
291 | 290 | ||
292 | while (remain > 0) { | 291 | while (remain > 0) { |
@@ -321,40 +320,24 @@ fail_unlock: | |||
321 | return ret; | 320 | return ret; |
322 | } | 321 | } |
323 | 322 | ||
324 | static inline gfp_t | ||
325 | i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj) | ||
326 | { | ||
327 | return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping); | ||
328 | } | ||
329 | |||
330 | static inline void | ||
331 | i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp) | ||
332 | { | ||
333 | mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp); | ||
334 | } | ||
335 | |||
336 | static int | 323 | static int |
337 | i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) | 324 | i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) |
338 | { | 325 | { |
339 | int ret; | 326 | int ret; |
340 | 327 | ||
341 | ret = i915_gem_object_get_pages(obj); | 328 | ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN); |
342 | 329 | ||
343 | /* If we've insufficient memory to map in the pages, attempt | 330 | /* If we've insufficient memory to map in the pages, attempt |
344 | * to make some space by throwing out some old buffers. | 331 | * to make some space by throwing out some old buffers. |
345 | */ | 332 | */ |
346 | if (ret == -ENOMEM) { | 333 | if (ret == -ENOMEM) { |
347 | struct drm_device *dev = obj->dev; | 334 | struct drm_device *dev = obj->dev; |
348 | gfp_t gfp; | ||
349 | 335 | ||
350 | ret = i915_gem_evict_something(dev, obj->size); | 336 | ret = i915_gem_evict_something(dev, obj->size); |
351 | if (ret) | 337 | if (ret) |
352 | return ret; | 338 | return ret; |
353 | 339 | ||
354 | gfp = i915_gem_object_get_page_gfp_mask(obj); | 340 | ret = i915_gem_object_get_pages(obj, 0); |
355 | i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY); | ||
356 | ret = i915_gem_object_get_pages(obj); | ||
357 | i915_gem_object_set_page_gfp_mask (obj, gfp); | ||
358 | } | 341 | } |
359 | 342 | ||
360 | return ret; | 343 | return ret; |
@@ -371,7 +354,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
371 | struct drm_i915_gem_pread *args, | 354 | struct drm_i915_gem_pread *args, |
372 | struct drm_file *file_priv) | 355 | struct drm_file *file_priv) |
373 | { | 356 | { |
374 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 357 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
375 | struct mm_struct *mm = current->mm; | 358 | struct mm_struct *mm = current->mm; |
376 | struct page **user_pages; | 359 | struct page **user_pages; |
377 | ssize_t remain; | 360 | ssize_t remain; |
@@ -420,7 +403,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
420 | if (ret != 0) | 403 | if (ret != 0) |
421 | goto fail_put_pages; | 404 | goto fail_put_pages; |
422 | 405 | ||
423 | obj_priv = obj->driver_private; | 406 | obj_priv = to_intel_bo(obj); |
424 | offset = args->offset; | 407 | offset = args->offset; |
425 | 408 | ||
426 | while (remain > 0) { | 409 | while (remain > 0) { |
@@ -496,7 +479,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
496 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 479 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
497 | if (obj == NULL) | 480 | if (obj == NULL) |
498 | return -EBADF; | 481 | return -EBADF; |
499 | obj_priv = obj->driver_private; | 482 | obj_priv = to_intel_bo(obj); |
500 | 483 | ||
501 | /* Bounds check source. | 484 | /* Bounds check source. |
502 | * | 485 | * |
@@ -504,7 +487,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
504 | */ | 487 | */ |
505 | if (args->offset > obj->size || args->size > obj->size || | 488 | if (args->offset > obj->size || args->size > obj->size || |
506 | args->offset + args->size > obj->size) { | 489 | args->offset + args->size > obj->size) { |
507 | drm_gem_object_unreference(obj); | 490 | drm_gem_object_unreference_unlocked(obj); |
508 | return -EINVAL; | 491 | return -EINVAL; |
509 | } | 492 | } |
510 | 493 | ||
@@ -517,7 +500,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
517 | file_priv); | 500 | file_priv); |
518 | } | 501 | } |
519 | 502 | ||
520 | drm_gem_object_unreference(obj); | 503 | drm_gem_object_unreference_unlocked(obj); |
521 | 504 | ||
522 | return ret; | 505 | return ret; |
523 | } | 506 | } |
@@ -598,7 +581,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
598 | struct drm_i915_gem_pwrite *args, | 581 | struct drm_i915_gem_pwrite *args, |
599 | struct drm_file *file_priv) | 582 | struct drm_file *file_priv) |
600 | { | 583 | { |
601 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 584 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
602 | drm_i915_private_t *dev_priv = dev->dev_private; | 585 | drm_i915_private_t *dev_priv = dev->dev_private; |
603 | ssize_t remain; | 586 | ssize_t remain; |
604 | loff_t offset, page_base; | 587 | loff_t offset, page_base; |
@@ -622,7 +605,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
622 | if (ret) | 605 | if (ret) |
623 | goto fail; | 606 | goto fail; |
624 | 607 | ||
625 | obj_priv = obj->driver_private; | 608 | obj_priv = to_intel_bo(obj); |
626 | offset = obj_priv->gtt_offset + args->offset; | 609 | offset = obj_priv->gtt_offset + args->offset; |
627 | 610 | ||
628 | while (remain > 0) { | 611 | while (remain > 0) { |
@@ -672,7 +655,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
672 | struct drm_i915_gem_pwrite *args, | 655 | struct drm_i915_gem_pwrite *args, |
673 | struct drm_file *file_priv) | 656 | struct drm_file *file_priv) |
674 | { | 657 | { |
675 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 658 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
676 | drm_i915_private_t *dev_priv = dev->dev_private; | 659 | drm_i915_private_t *dev_priv = dev->dev_private; |
677 | ssize_t remain; | 660 | ssize_t remain; |
678 | loff_t gtt_page_base, offset; | 661 | loff_t gtt_page_base, offset; |
@@ -716,7 +699,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
716 | if (ret) | 699 | if (ret) |
717 | goto out_unpin_object; | 700 | goto out_unpin_object; |
718 | 701 | ||
719 | obj_priv = obj->driver_private; | 702 | obj_priv = to_intel_bo(obj); |
720 | offset = obj_priv->gtt_offset + args->offset; | 703 | offset = obj_priv->gtt_offset + args->offset; |
721 | 704 | ||
722 | while (remain > 0) { | 705 | while (remain > 0) { |
@@ -778,7 +761,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
778 | struct drm_i915_gem_pwrite *args, | 761 | struct drm_i915_gem_pwrite *args, |
779 | struct drm_file *file_priv) | 762 | struct drm_file *file_priv) |
780 | { | 763 | { |
781 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 764 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
782 | ssize_t remain; | 765 | ssize_t remain; |
783 | loff_t offset, page_base; | 766 | loff_t offset, page_base; |
784 | char __user *user_data; | 767 | char __user *user_data; |
@@ -790,7 +773,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
790 | 773 | ||
791 | mutex_lock(&dev->struct_mutex); | 774 | mutex_lock(&dev->struct_mutex); |
792 | 775 | ||
793 | ret = i915_gem_object_get_pages(obj); | 776 | ret = i915_gem_object_get_pages(obj, 0); |
794 | if (ret != 0) | 777 | if (ret != 0) |
795 | goto fail_unlock; | 778 | goto fail_unlock; |
796 | 779 | ||
@@ -798,7 +781,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
798 | if (ret != 0) | 781 | if (ret != 0) |
799 | goto fail_put_pages; | 782 | goto fail_put_pages; |
800 | 783 | ||
801 | obj_priv = obj->driver_private; | 784 | obj_priv = to_intel_bo(obj); |
802 | offset = args->offset; | 785 | offset = args->offset; |
803 | obj_priv->dirty = 1; | 786 | obj_priv->dirty = 1; |
804 | 787 | ||
@@ -846,7 +829,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
846 | struct drm_i915_gem_pwrite *args, | 829 | struct drm_i915_gem_pwrite *args, |
847 | struct drm_file *file_priv) | 830 | struct drm_file *file_priv) |
848 | { | 831 | { |
849 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 832 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
850 | struct mm_struct *mm = current->mm; | 833 | struct mm_struct *mm = current->mm; |
851 | struct page **user_pages; | 834 | struct page **user_pages; |
852 | ssize_t remain; | 835 | ssize_t remain; |
@@ -894,7 +877,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
894 | if (ret != 0) | 877 | if (ret != 0) |
895 | goto fail_put_pages; | 878 | goto fail_put_pages; |
896 | 879 | ||
897 | obj_priv = obj->driver_private; | 880 | obj_priv = to_intel_bo(obj); |
898 | offset = args->offset; | 881 | offset = args->offset; |
899 | obj_priv->dirty = 1; | 882 | obj_priv->dirty = 1; |
900 | 883 | ||
@@ -969,7 +952,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
969 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 952 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
970 | if (obj == NULL) | 953 | if (obj == NULL) |
971 | return -EBADF; | 954 | return -EBADF; |
972 | obj_priv = obj->driver_private; | 955 | obj_priv = to_intel_bo(obj); |
973 | 956 | ||
974 | /* Bounds check destination. | 957 | /* Bounds check destination. |
975 | * | 958 | * |
@@ -977,7 +960,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
977 | */ | 960 | */ |
978 | if (args->offset > obj->size || args->size > obj->size || | 961 | if (args->offset > obj->size || args->size > obj->size || |
979 | args->offset + args->size > obj->size) { | 962 | args->offset + args->size > obj->size) { |
980 | drm_gem_object_unreference(obj); | 963 | drm_gem_object_unreference_unlocked(obj); |
981 | return -EINVAL; | 964 | return -EINVAL; |
982 | } | 965 | } |
983 | 966 | ||
@@ -1011,7 +994,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
1011 | DRM_INFO("pwrite failed %d\n", ret); | 994 | DRM_INFO("pwrite failed %d\n", ret); |
1012 | #endif | 995 | #endif |
1013 | 996 | ||
1014 | drm_gem_object_unreference(obj); | 997 | drm_gem_object_unreference_unlocked(obj); |
1015 | 998 | ||
1016 | return ret; | 999 | return ret; |
1017 | } | 1000 | } |
@@ -1051,7 +1034,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1051 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 1034 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
1052 | if (obj == NULL) | 1035 | if (obj == NULL) |
1053 | return -EBADF; | 1036 | return -EBADF; |
1054 | obj_priv = obj->driver_private; | 1037 | obj_priv = to_intel_bo(obj); |
1055 | 1038 | ||
1056 | mutex_lock(&dev->struct_mutex); | 1039 | mutex_lock(&dev->struct_mutex); |
1057 | 1040 | ||
@@ -1113,7 +1096,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
1113 | DRM_INFO("%s: sw_finish %d (%p %zd)\n", | 1096 | DRM_INFO("%s: sw_finish %d (%p %zd)\n", |
1114 | __func__, args->handle, obj, obj->size); | 1097 | __func__, args->handle, obj, obj->size); |
1115 | #endif | 1098 | #endif |
1116 | obj_priv = obj->driver_private; | 1099 | obj_priv = to_intel_bo(obj); |
1117 | 1100 | ||
1118 | /* Pinned buffers may be scanout, so flush the cache */ | 1101 | /* Pinned buffers may be scanout, so flush the cache */ |
1119 | if (obj_priv->pin_count) | 1102 | if (obj_priv->pin_count) |
@@ -1154,9 +1137,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
1154 | PROT_READ | PROT_WRITE, MAP_SHARED, | 1137 | PROT_READ | PROT_WRITE, MAP_SHARED, |
1155 | args->offset); | 1138 | args->offset); |
1156 | up_write(¤t->mm->mmap_sem); | 1139 | up_write(¤t->mm->mmap_sem); |
1157 | mutex_lock(&dev->struct_mutex); | 1140 | drm_gem_object_unreference_unlocked(obj); |
1158 | drm_gem_object_unreference(obj); | ||
1159 | mutex_unlock(&dev->struct_mutex); | ||
1160 | if (IS_ERR((void *)addr)) | 1141 | if (IS_ERR((void *)addr)) |
1161 | return addr; | 1142 | return addr; |
1162 | 1143 | ||
@@ -1186,7 +1167,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1186 | struct drm_gem_object *obj = vma->vm_private_data; | 1167 | struct drm_gem_object *obj = vma->vm_private_data; |
1187 | struct drm_device *dev = obj->dev; | 1168 | struct drm_device *dev = obj->dev; |
1188 | struct drm_i915_private *dev_priv = dev->dev_private; | 1169 | struct drm_i915_private *dev_priv = dev->dev_private; |
1189 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1170 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1190 | pgoff_t page_offset; | 1171 | pgoff_t page_offset; |
1191 | unsigned long pfn; | 1172 | unsigned long pfn; |
1192 | int ret = 0; | 1173 | int ret = 0; |
@@ -1253,7 +1234,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj) | |||
1253 | { | 1234 | { |
1254 | struct drm_device *dev = obj->dev; | 1235 | struct drm_device *dev = obj->dev; |
1255 | struct drm_gem_mm *mm = dev->mm_private; | 1236 | struct drm_gem_mm *mm = dev->mm_private; |
1256 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1237 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1257 | struct drm_map_list *list; | 1238 | struct drm_map_list *list; |
1258 | struct drm_local_map *map; | 1239 | struct drm_local_map *map; |
1259 | int ret = 0; | 1240 | int ret = 0; |
@@ -1288,6 +1269,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj) | |||
1288 | list->hash.key = list->file_offset_node->start; | 1269 | list->hash.key = list->file_offset_node->start; |
1289 | if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) { | 1270 | if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) { |
1290 | DRM_ERROR("failed to add to map hash\n"); | 1271 | DRM_ERROR("failed to add to map hash\n"); |
1272 | ret = -ENOMEM; | ||
1291 | goto out_free_mm; | 1273 | goto out_free_mm; |
1292 | } | 1274 | } |
1293 | 1275 | ||
@@ -1309,7 +1291,7 @@ out_free_list: | |||
1309 | * i915_gem_release_mmap - remove physical page mappings | 1291 | * i915_gem_release_mmap - remove physical page mappings |
1310 | * @obj: obj in question | 1292 | * @obj: obj in question |
1311 | * | 1293 | * |
1312 | * Preserve the reservation of the mmaping with the DRM core code, but | 1294 | * Preserve the reservation of the mmapping with the DRM core code, but |
1313 | * relinquish ownership of the pages back to the system. | 1295 | * relinquish ownership of the pages back to the system. |
1314 | * | 1296 | * |
1315 | * It is vital that we remove the page mapping if we have mapped a tiled | 1297 | * It is vital that we remove the page mapping if we have mapped a tiled |
@@ -1323,7 +1305,7 @@ void | |||
1323 | i915_gem_release_mmap(struct drm_gem_object *obj) | 1305 | i915_gem_release_mmap(struct drm_gem_object *obj) |
1324 | { | 1306 | { |
1325 | struct drm_device *dev = obj->dev; | 1307 | struct drm_device *dev = obj->dev; |
1326 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1308 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1327 | 1309 | ||
1328 | if (dev->dev_mapping) | 1310 | if (dev->dev_mapping) |
1329 | unmap_mapping_range(dev->dev_mapping, | 1311 | unmap_mapping_range(dev->dev_mapping, |
@@ -1334,7 +1316,7 @@ static void | |||
1334 | i915_gem_free_mmap_offset(struct drm_gem_object *obj) | 1316 | i915_gem_free_mmap_offset(struct drm_gem_object *obj) |
1335 | { | 1317 | { |
1336 | struct drm_device *dev = obj->dev; | 1318 | struct drm_device *dev = obj->dev; |
1337 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1319 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1338 | struct drm_gem_mm *mm = dev->mm_private; | 1320 | struct drm_gem_mm *mm = dev->mm_private; |
1339 | struct drm_map_list *list; | 1321 | struct drm_map_list *list; |
1340 | 1322 | ||
@@ -1365,7 +1347,7 @@ static uint32_t | |||
1365 | i915_gem_get_gtt_alignment(struct drm_gem_object *obj) | 1347 | i915_gem_get_gtt_alignment(struct drm_gem_object *obj) |
1366 | { | 1348 | { |
1367 | struct drm_device *dev = obj->dev; | 1349 | struct drm_device *dev = obj->dev; |
1368 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1350 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1369 | int start, i; | 1351 | int start, i; |
1370 | 1352 | ||
1371 | /* | 1353 | /* |
@@ -1424,7 +1406,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
1424 | 1406 | ||
1425 | mutex_lock(&dev->struct_mutex); | 1407 | mutex_lock(&dev->struct_mutex); |
1426 | 1408 | ||
1427 | obj_priv = obj->driver_private; | 1409 | obj_priv = to_intel_bo(obj); |
1428 | 1410 | ||
1429 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 1411 | if (obj_priv->madv != I915_MADV_WILLNEED) { |
1430 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); | 1412 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); |
@@ -1468,7 +1450,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
1468 | void | 1450 | void |
1469 | i915_gem_object_put_pages(struct drm_gem_object *obj) | 1451 | i915_gem_object_put_pages(struct drm_gem_object *obj) |
1470 | { | 1452 | { |
1471 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1453 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1472 | int page_count = obj->size / PAGE_SIZE; | 1454 | int page_count = obj->size / PAGE_SIZE; |
1473 | int i; | 1455 | int i; |
1474 | 1456 | ||
@@ -1485,9 +1467,6 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) | |||
1485 | obj_priv->dirty = 0; | 1467 | obj_priv->dirty = 0; |
1486 | 1468 | ||
1487 | for (i = 0; i < page_count; i++) { | 1469 | for (i = 0; i < page_count; i++) { |
1488 | if (obj_priv->pages[i] == NULL) | ||
1489 | break; | ||
1490 | |||
1491 | if (obj_priv->dirty) | 1470 | if (obj_priv->dirty) |
1492 | set_page_dirty(obj_priv->pages[i]); | 1471 | set_page_dirty(obj_priv->pages[i]); |
1493 | 1472 | ||
@@ -1507,7 +1486,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) | |||
1507 | { | 1486 | { |
1508 | struct drm_device *dev = obj->dev; | 1487 | struct drm_device *dev = obj->dev; |
1509 | drm_i915_private_t *dev_priv = dev->dev_private; | 1488 | drm_i915_private_t *dev_priv = dev->dev_private; |
1510 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1489 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1511 | 1490 | ||
1512 | /* Add a reference if we're newly entering the active list. */ | 1491 | /* Add a reference if we're newly entering the active list. */ |
1513 | if (!obj_priv->active) { | 1492 | if (!obj_priv->active) { |
@@ -1527,7 +1506,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj) | |||
1527 | { | 1506 | { |
1528 | struct drm_device *dev = obj->dev; | 1507 | struct drm_device *dev = obj->dev; |
1529 | drm_i915_private_t *dev_priv = dev->dev_private; | 1508 | drm_i915_private_t *dev_priv = dev->dev_private; |
1530 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1509 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1531 | 1510 | ||
1532 | BUG_ON(!obj_priv->active); | 1511 | BUG_ON(!obj_priv->active); |
1533 | list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); | 1512 | list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); |
@@ -1538,7 +1517,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj) | |||
1538 | static void | 1517 | static void |
1539 | i915_gem_object_truncate(struct drm_gem_object *obj) | 1518 | i915_gem_object_truncate(struct drm_gem_object *obj) |
1540 | { | 1519 | { |
1541 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1520 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1542 | struct inode *inode; | 1521 | struct inode *inode; |
1543 | 1522 | ||
1544 | inode = obj->filp->f_path.dentry->d_inode; | 1523 | inode = obj->filp->f_path.dentry->d_inode; |
@@ -1559,7 +1538,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | |||
1559 | { | 1538 | { |
1560 | struct drm_device *dev = obj->dev; | 1539 | struct drm_device *dev = obj->dev; |
1561 | drm_i915_private_t *dev_priv = dev->dev_private; | 1540 | drm_i915_private_t *dev_priv = dev->dev_private; |
1562 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1541 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1563 | 1542 | ||
1564 | i915_verify_inactive(dev, __FILE__, __LINE__); | 1543 | i915_verify_inactive(dev, __FILE__, __LINE__); |
1565 | if (obj_priv->pin_count != 0) | 1544 | if (obj_priv->pin_count != 0) |
@@ -1567,6 +1546,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | |||
1567 | else | 1546 | else |
1568 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | 1547 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); |
1569 | 1548 | ||
1549 | BUG_ON(!list_empty(&obj_priv->gpu_write_list)); | ||
1550 | |||
1570 | obj_priv->last_rendering_seqno = 0; | 1551 | obj_priv->last_rendering_seqno = 0; |
1571 | if (obj_priv->active) { | 1552 | if (obj_priv->active) { |
1572 | obj_priv->active = 0; | 1553 | obj_priv->active = 0; |
@@ -1575,6 +1556,45 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | |||
1575 | i915_verify_inactive(dev, __FILE__, __LINE__); | 1556 | i915_verify_inactive(dev, __FILE__, __LINE__); |
1576 | } | 1557 | } |
1577 | 1558 | ||
1559 | static void | ||
1560 | i915_gem_process_flushing_list(struct drm_device *dev, | ||
1561 | uint32_t flush_domains, uint32_t seqno) | ||
1562 | { | ||
1563 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1564 | struct drm_i915_gem_object *obj_priv, *next; | ||
1565 | |||
1566 | list_for_each_entry_safe(obj_priv, next, | ||
1567 | &dev_priv->mm.gpu_write_list, | ||
1568 | gpu_write_list) { | ||
1569 | struct drm_gem_object *obj = obj_priv->obj; | ||
1570 | |||
1571 | if ((obj->write_domain & flush_domains) == | ||
1572 | obj->write_domain) { | ||
1573 | uint32_t old_write_domain = obj->write_domain; | ||
1574 | |||
1575 | obj->write_domain = 0; | ||
1576 | list_del_init(&obj_priv->gpu_write_list); | ||
1577 | i915_gem_object_move_to_active(obj, seqno); | ||
1578 | |||
1579 | /* update the fence lru list */ | ||
1580 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
1581 | list_move_tail(&obj_priv->fence_list, | ||
1582 | &dev_priv->mm.fence_list); | ||
1583 | |||
1584 | trace_i915_gem_object_change_domain(obj, | ||
1585 | obj->read_domains, | ||
1586 | old_write_domain); | ||
1587 | } | ||
1588 | } | ||
1589 | } | ||
1590 | |||
1591 | #define PIPE_CONTROL_FLUSH(addr) \ | ||
1592 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ | ||
1593 | PIPE_CONTROL_DEPTH_STALL); \ | ||
1594 | OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ | ||
1595 | OUT_RING(0); \ | ||
1596 | OUT_RING(0); \ | ||
1597 | |||
1578 | /** | 1598 | /** |
1579 | * Creates a new sequence number, emitting a write of it to the status page | 1599 | * Creates a new sequence number, emitting a write of it to the status page |
1580 | * plus an interrupt, which will trigger i915_user_interrupt_handler. | 1600 | * plus an interrupt, which will trigger i915_user_interrupt_handler. |
@@ -1583,7 +1603,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | |||
1583 | * | 1603 | * |
1584 | * Returned sequence numbers are nonzero on success. | 1604 | * Returned sequence numbers are nonzero on success. |
1585 | */ | 1605 | */ |
1586 | static uint32_t | 1606 | uint32_t |
1587 | i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | 1607 | i915_add_request(struct drm_device *dev, struct drm_file *file_priv, |
1588 | uint32_t flush_domains) | 1608 | uint32_t flush_domains) |
1589 | { | 1609 | { |
@@ -1609,15 +1629,49 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | |||
1609 | if (dev_priv->mm.next_gem_seqno == 0) | 1629 | if (dev_priv->mm.next_gem_seqno == 0) |
1610 | dev_priv->mm.next_gem_seqno++; | 1630 | dev_priv->mm.next_gem_seqno++; |
1611 | 1631 | ||
1612 | BEGIN_LP_RING(4); | 1632 | if (HAS_PIPE_CONTROL(dev)) { |
1613 | OUT_RING(MI_STORE_DWORD_INDEX); | 1633 | u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; |
1614 | OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
1615 | OUT_RING(seqno); | ||
1616 | 1634 | ||
1617 | OUT_RING(MI_USER_INTERRUPT); | 1635 | /* |
1618 | ADVANCE_LP_RING(); | 1636 | * Workaround qword write incoherence by flushing the |
1637 | * PIPE_NOTIFY buffers out to memory before requesting | ||
1638 | * an interrupt. | ||
1639 | */ | ||
1640 | BEGIN_LP_RING(32); | ||
1641 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | ||
1642 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); | ||
1643 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
1644 | OUT_RING(seqno); | ||
1645 | OUT_RING(0); | ||
1646 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
1647 | scratch_addr += 128; /* write to separate cachelines */ | ||
1648 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
1649 | scratch_addr += 128; | ||
1650 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
1651 | scratch_addr += 128; | ||
1652 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
1653 | scratch_addr += 128; | ||
1654 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
1655 | scratch_addr += 128; | ||
1656 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
1657 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | ||
1658 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | | ||
1659 | PIPE_CONTROL_NOTIFY); | ||
1660 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
1661 | OUT_RING(seqno); | ||
1662 | OUT_RING(0); | ||
1663 | ADVANCE_LP_RING(); | ||
1664 | } else { | ||
1665 | BEGIN_LP_RING(4); | ||
1666 | OUT_RING(MI_STORE_DWORD_INDEX); | ||
1667 | OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
1668 | OUT_RING(seqno); | ||
1669 | |||
1670 | OUT_RING(MI_USER_INTERRUPT); | ||
1671 | ADVANCE_LP_RING(); | ||
1672 | } | ||
1619 | 1673 | ||
1620 | DRM_DEBUG("%d\n", seqno); | 1674 | DRM_DEBUG_DRIVER("%d\n", seqno); |
1621 | 1675 | ||
1622 | request->seqno = seqno; | 1676 | request->seqno = seqno; |
1623 | request->emitted_jiffies = jiffies; | 1677 | request->emitted_jiffies = jiffies; |
@@ -1633,27 +1687,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | |||
1633 | /* Associate any objects on the flushing list matching the write | 1687 | /* Associate any objects on the flushing list matching the write |
1634 | * domain we're flushing with our flush. | 1688 | * domain we're flushing with our flush. |
1635 | */ | 1689 | */ |
1636 | if (flush_domains != 0) { | 1690 | if (flush_domains != 0) |
1637 | struct drm_i915_gem_object *obj_priv, *next; | 1691 | i915_gem_process_flushing_list(dev, flush_domains, seqno); |
1638 | |||
1639 | list_for_each_entry_safe(obj_priv, next, | ||
1640 | &dev_priv->mm.flushing_list, list) { | ||
1641 | struct drm_gem_object *obj = obj_priv->obj; | ||
1642 | |||
1643 | if ((obj->write_domain & flush_domains) == | ||
1644 | obj->write_domain) { | ||
1645 | uint32_t old_write_domain = obj->write_domain; | ||
1646 | |||
1647 | obj->write_domain = 0; | ||
1648 | i915_gem_object_move_to_active(obj, seqno); | ||
1649 | |||
1650 | trace_i915_gem_object_change_domain(obj, | ||
1651 | obj->read_domains, | ||
1652 | old_write_domain); | ||
1653 | } | ||
1654 | } | ||
1655 | |||
1656 | } | ||
1657 | 1692 | ||
1658 | if (!dev_priv->mm.suspended) { | 1693 | if (!dev_priv->mm.suspended) { |
1659 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | 1694 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); |
@@ -1758,7 +1793,10 @@ i915_get_gem_seqno(struct drm_device *dev) | |||
1758 | { | 1793 | { |
1759 | drm_i915_private_t *dev_priv = dev->dev_private; | 1794 | drm_i915_private_t *dev_priv = dev->dev_private; |
1760 | 1795 | ||
1761 | return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); | 1796 | if (HAS_PIPE_CONTROL(dev)) |
1797 | return ((volatile u32 *)(dev_priv->seqno_page))[0]; | ||
1798 | else | ||
1799 | return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); | ||
1762 | } | 1800 | } |
1763 | 1801 | ||
1764 | /** | 1802 | /** |
@@ -1820,12 +1858,8 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
1820 | mutex_unlock(&dev->struct_mutex); | 1858 | mutex_unlock(&dev->struct_mutex); |
1821 | } | 1859 | } |
1822 | 1860 | ||
1823 | /** | 1861 | int |
1824 | * Waits for a sequence number to be signaled, and cleans up the | 1862 | i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) |
1825 | * request and object lists appropriately for that event. | ||
1826 | */ | ||
1827 | static int | ||
1828 | i915_wait_request(struct drm_device *dev, uint32_t seqno) | ||
1829 | { | 1863 | { |
1830 | drm_i915_private_t *dev_priv = dev->dev_private; | 1864 | drm_i915_private_t *dev_priv = dev->dev_private; |
1831 | u32 ier; | 1865 | u32 ier; |
@@ -1837,7 +1871,7 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno) | |||
1837 | return -EIO; | 1871 | return -EIO; |
1838 | 1872 | ||
1839 | if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { | 1873 | if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { |
1840 | if (IS_IGDNG(dev)) | 1874 | if (HAS_PCH_SPLIT(dev)) |
1841 | ier = I915_READ(DEIER) | I915_READ(GTIER); | 1875 | ier = I915_READ(DEIER) | I915_READ(GTIER); |
1842 | else | 1876 | else |
1843 | ier = I915_READ(IER); | 1877 | ier = I915_READ(IER); |
@@ -1852,10 +1886,15 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno) | |||
1852 | 1886 | ||
1853 | dev_priv->mm.waiting_gem_seqno = seqno; | 1887 | dev_priv->mm.waiting_gem_seqno = seqno; |
1854 | i915_user_irq_get(dev); | 1888 | i915_user_irq_get(dev); |
1855 | ret = wait_event_interruptible(dev_priv->irq_queue, | 1889 | if (interruptible) |
1856 | i915_seqno_passed(i915_get_gem_seqno(dev), | 1890 | ret = wait_event_interruptible(dev_priv->irq_queue, |
1857 | seqno) || | 1891 | i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || |
1858 | atomic_read(&dev_priv->mm.wedged)); | 1892 | atomic_read(&dev_priv->mm.wedged)); |
1893 | else | ||
1894 | wait_event(dev_priv->irq_queue, | ||
1895 | i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || | ||
1896 | atomic_read(&dev_priv->mm.wedged)); | ||
1897 | |||
1859 | i915_user_irq_put(dev); | 1898 | i915_user_irq_put(dev); |
1860 | dev_priv->mm.waiting_gem_seqno = 0; | 1899 | dev_priv->mm.waiting_gem_seqno = 0; |
1861 | 1900 | ||
@@ -1879,6 +1918,16 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno) | |||
1879 | return ret; | 1918 | return ret; |
1880 | } | 1919 | } |
1881 | 1920 | ||
1921 | /** | ||
1922 | * Waits for a sequence number to be signaled, and cleans up the | ||
1923 | * request and object lists appropriately for that event. | ||
1924 | */ | ||
1925 | static int | ||
1926 | i915_wait_request(struct drm_device *dev, uint32_t seqno) | ||
1927 | { | ||
1928 | return i915_do_wait_request(dev, seqno, 1); | ||
1929 | } | ||
1930 | |||
1882 | static void | 1931 | static void |
1883 | i915_gem_flush(struct drm_device *dev, | 1932 | i915_gem_flush(struct drm_device *dev, |
1884 | uint32_t invalidate_domains, | 1933 | uint32_t invalidate_domains, |
@@ -1947,7 +1996,7 @@ i915_gem_flush(struct drm_device *dev, | |||
1947 | #endif | 1996 | #endif |
1948 | BEGIN_LP_RING(2); | 1997 | BEGIN_LP_RING(2); |
1949 | OUT_RING(cmd); | 1998 | OUT_RING(cmd); |
1950 | OUT_RING(0); /* noop */ | 1999 | OUT_RING(MI_NOOP); |
1951 | ADVANCE_LP_RING(); | 2000 | ADVANCE_LP_RING(); |
1952 | } | 2001 | } |
1953 | } | 2002 | } |
@@ -1960,7 +2009,7 @@ static int | |||
1960 | i915_gem_object_wait_rendering(struct drm_gem_object *obj) | 2009 | i915_gem_object_wait_rendering(struct drm_gem_object *obj) |
1961 | { | 2010 | { |
1962 | struct drm_device *dev = obj->dev; | 2011 | struct drm_device *dev = obj->dev; |
1963 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2012 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1964 | int ret; | 2013 | int ret; |
1965 | 2014 | ||
1966 | /* This function only exists to support waiting for existing rendering, | 2015 | /* This function only exists to support waiting for existing rendering, |
@@ -1991,7 +2040,8 @@ int | |||
1991 | i915_gem_object_unbind(struct drm_gem_object *obj) | 2040 | i915_gem_object_unbind(struct drm_gem_object *obj) |
1992 | { | 2041 | { |
1993 | struct drm_device *dev = obj->dev; | 2042 | struct drm_device *dev = obj->dev; |
1994 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2043 | drm_i915_private_t *dev_priv = dev->dev_private; |
2044 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1995 | int ret = 0; | 2045 | int ret = 0; |
1996 | 2046 | ||
1997 | #if WATCH_BUF | 2047 | #if WATCH_BUF |
@@ -2009,9 +2059,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2009 | /* blow away mappings if mapped through GTT */ | 2059 | /* blow away mappings if mapped through GTT */ |
2010 | i915_gem_release_mmap(obj); | 2060 | i915_gem_release_mmap(obj); |
2011 | 2061 | ||
2012 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
2013 | i915_gem_clear_fence_reg(obj); | ||
2014 | |||
2015 | /* Move the object to the CPU domain to ensure that | 2062 | /* Move the object to the CPU domain to ensure that |
2016 | * any possible CPU writes while it's not in the GTT | 2063 | * any possible CPU writes while it's not in the GTT |
2017 | * are flushed when we go to remap it. This will | 2064 | * are flushed when we go to remap it. This will |
@@ -2027,6 +2074,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2027 | 2074 | ||
2028 | BUG_ON(obj_priv->active); | 2075 | BUG_ON(obj_priv->active); |
2029 | 2076 | ||
2077 | /* release the fence reg _after_ flushing */ | ||
2078 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
2079 | i915_gem_clear_fence_reg(obj); | ||
2080 | |||
2030 | if (obj_priv->agp_mem != NULL) { | 2081 | if (obj_priv->agp_mem != NULL) { |
2031 | drm_unbind_agp(obj_priv->agp_mem); | 2082 | drm_unbind_agp(obj_priv->agp_mem); |
2032 | drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); | 2083 | drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); |
@@ -2045,8 +2096,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2045 | } | 2096 | } |
2046 | 2097 | ||
2047 | /* Remove ourselves from the LRU list if present. */ | 2098 | /* Remove ourselves from the LRU list if present. */ |
2099 | spin_lock(&dev_priv->mm.active_list_lock); | ||
2048 | if (!list_empty(&obj_priv->list)) | 2100 | if (!list_empty(&obj_priv->list)) |
2049 | list_del_init(&obj_priv->list); | 2101 | list_del_init(&obj_priv->list); |
2102 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
2050 | 2103 | ||
2051 | if (i915_gem_object_is_purgeable(obj_priv)) | 2104 | if (i915_gem_object_is_purgeable(obj_priv)) |
2052 | i915_gem_object_truncate(obj); | 2105 | i915_gem_object_truncate(obj); |
@@ -2084,10 +2137,33 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size) | |||
2084 | } | 2137 | } |
2085 | 2138 | ||
2086 | static int | 2139 | static int |
2087 | i915_gem_evict_everything(struct drm_device *dev) | 2140 | i915_gpu_idle(struct drm_device *dev) |
2088 | { | 2141 | { |
2089 | drm_i915_private_t *dev_priv = dev->dev_private; | 2142 | drm_i915_private_t *dev_priv = dev->dev_private; |
2143 | bool lists_empty; | ||
2090 | uint32_t seqno; | 2144 | uint32_t seqno; |
2145 | |||
2146 | spin_lock(&dev_priv->mm.active_list_lock); | ||
2147 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && | ||
2148 | list_empty(&dev_priv->mm.active_list); | ||
2149 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
2150 | |||
2151 | if (lists_empty) | ||
2152 | return 0; | ||
2153 | |||
2154 | /* Flush everything onto the inactive list. */ | ||
2155 | i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | ||
2156 | seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); | ||
2157 | if (seqno == 0) | ||
2158 | return -ENOMEM; | ||
2159 | |||
2160 | return i915_wait_request(dev, seqno); | ||
2161 | } | ||
2162 | |||
2163 | static int | ||
2164 | i915_gem_evict_everything(struct drm_device *dev) | ||
2165 | { | ||
2166 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
2091 | int ret; | 2167 | int ret; |
2092 | bool lists_empty; | 2168 | bool lists_empty; |
2093 | 2169 | ||
@@ -2101,15 +2177,12 @@ i915_gem_evict_everything(struct drm_device *dev) | |||
2101 | return -ENOSPC; | 2177 | return -ENOSPC; |
2102 | 2178 | ||
2103 | /* Flush everything (on to the inactive lists) and evict */ | 2179 | /* Flush everything (on to the inactive lists) and evict */ |
2104 | i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 2180 | ret = i915_gpu_idle(dev); |
2105 | seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); | ||
2106 | if (seqno == 0) | ||
2107 | return -ENOMEM; | ||
2108 | |||
2109 | ret = i915_wait_request(dev, seqno); | ||
2110 | if (ret) | 2181 | if (ret) |
2111 | return ret; | 2182 | return ret; |
2112 | 2183 | ||
2184 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | ||
2185 | |||
2113 | ret = i915_gem_evict_from_inactive_list(dev); | 2186 | ret = i915_gem_evict_from_inactive_list(dev); |
2114 | if (ret) | 2187 | if (ret) |
2115 | return ret; | 2188 | return ret; |
@@ -2144,7 +2217,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) | |||
2144 | #if WATCH_LRU | 2217 | #if WATCH_LRU |
2145 | DRM_INFO("%s: evicting %p\n", __func__, obj); | 2218 | DRM_INFO("%s: evicting %p\n", __func__, obj); |
2146 | #endif | 2219 | #endif |
2147 | obj_priv = obj->driver_private; | 2220 | obj_priv = to_intel_bo(obj); |
2148 | BUG_ON(obj_priv->pin_count != 0); | 2221 | BUG_ON(obj_priv->pin_count != 0); |
2149 | BUG_ON(obj_priv->active); | 2222 | BUG_ON(obj_priv->active); |
2150 | 2223 | ||
@@ -2196,11 +2269,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) | |||
2196 | seqno = i915_add_request(dev, NULL, obj->write_domain); | 2269 | seqno = i915_add_request(dev, NULL, obj->write_domain); |
2197 | if (seqno == 0) | 2270 | if (seqno == 0) |
2198 | return -ENOMEM; | 2271 | return -ENOMEM; |
2199 | |||
2200 | ret = i915_wait_request(dev, seqno); | ||
2201 | if (ret) | ||
2202 | return ret; | ||
2203 | |||
2204 | continue; | 2272 | continue; |
2205 | } | 2273 | } |
2206 | } | 2274 | } |
@@ -2217,14 +2285,14 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) | |||
2217 | } | 2285 | } |
2218 | 2286 | ||
2219 | int | 2287 | int |
2220 | i915_gem_object_get_pages(struct drm_gem_object *obj) | 2288 | i915_gem_object_get_pages(struct drm_gem_object *obj, |
2289 | gfp_t gfpmask) | ||
2221 | { | 2290 | { |
2222 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2291 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2223 | int page_count, i; | 2292 | int page_count, i; |
2224 | struct address_space *mapping; | 2293 | struct address_space *mapping; |
2225 | struct inode *inode; | 2294 | struct inode *inode; |
2226 | struct page *page; | 2295 | struct page *page; |
2227 | int ret; | ||
2228 | 2296 | ||
2229 | if (obj_priv->pages_refcount++ != 0) | 2297 | if (obj_priv->pages_refcount++ != 0) |
2230 | return 0; | 2298 | return 0; |
@@ -2243,12 +2311,13 @@ i915_gem_object_get_pages(struct drm_gem_object *obj) | |||
2243 | inode = obj->filp->f_path.dentry->d_inode; | 2311 | inode = obj->filp->f_path.dentry->d_inode; |
2244 | mapping = inode->i_mapping; | 2312 | mapping = inode->i_mapping; |
2245 | for (i = 0; i < page_count; i++) { | 2313 | for (i = 0; i < page_count; i++) { |
2246 | page = read_mapping_page(mapping, i, NULL); | 2314 | page = read_cache_page_gfp(mapping, i, |
2247 | if (IS_ERR(page)) { | 2315 | mapping_gfp_mask (mapping) | |
2248 | ret = PTR_ERR(page); | 2316 | __GFP_COLD | |
2249 | i915_gem_object_put_pages(obj); | 2317 | gfpmask); |
2250 | return ret; | 2318 | if (IS_ERR(page)) |
2251 | } | 2319 | goto err_pages; |
2320 | |||
2252 | obj_priv->pages[i] = page; | 2321 | obj_priv->pages[i] = page; |
2253 | } | 2322 | } |
2254 | 2323 | ||
@@ -2256,6 +2325,37 @@ i915_gem_object_get_pages(struct drm_gem_object *obj) | |||
2256 | i915_gem_object_do_bit_17_swizzle(obj); | 2325 | i915_gem_object_do_bit_17_swizzle(obj); |
2257 | 2326 | ||
2258 | return 0; | 2327 | return 0; |
2328 | |||
2329 | err_pages: | ||
2330 | while (i--) | ||
2331 | page_cache_release(obj_priv->pages[i]); | ||
2332 | |||
2333 | drm_free_large(obj_priv->pages); | ||
2334 | obj_priv->pages = NULL; | ||
2335 | obj_priv->pages_refcount--; | ||
2336 | return PTR_ERR(page); | ||
2337 | } | ||
2338 | |||
2339 | static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg) | ||
2340 | { | ||
2341 | struct drm_gem_object *obj = reg->obj; | ||
2342 | struct drm_device *dev = obj->dev; | ||
2343 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
2344 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2345 | int regnum = obj_priv->fence_reg; | ||
2346 | uint64_t val; | ||
2347 | |||
2348 | val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) & | ||
2349 | 0xfffff000) << 32; | ||
2350 | val |= obj_priv->gtt_offset & 0xfffff000; | ||
2351 | val |= (uint64_t)((obj_priv->stride / 128) - 1) << | ||
2352 | SANDYBRIDGE_FENCE_PITCH_SHIFT; | ||
2353 | |||
2354 | if (obj_priv->tiling_mode == I915_TILING_Y) | ||
2355 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; | ||
2356 | val |= I965_FENCE_REG_VALID; | ||
2357 | |||
2358 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val); | ||
2259 | } | 2359 | } |
2260 | 2360 | ||
2261 | static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) | 2361 | static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) |
@@ -2263,7 +2363,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
2263 | struct drm_gem_object *obj = reg->obj; | 2363 | struct drm_gem_object *obj = reg->obj; |
2264 | struct drm_device *dev = obj->dev; | 2364 | struct drm_device *dev = obj->dev; |
2265 | drm_i915_private_t *dev_priv = dev->dev_private; | 2365 | drm_i915_private_t *dev_priv = dev->dev_private; |
2266 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2366 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2267 | int regnum = obj_priv->fence_reg; | 2367 | int regnum = obj_priv->fence_reg; |
2268 | uint64_t val; | 2368 | uint64_t val; |
2269 | 2369 | ||
@@ -2283,7 +2383,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
2283 | struct drm_gem_object *obj = reg->obj; | 2383 | struct drm_gem_object *obj = reg->obj; |
2284 | struct drm_device *dev = obj->dev; | 2384 | struct drm_device *dev = obj->dev; |
2285 | drm_i915_private_t *dev_priv = dev->dev_private; | 2385 | drm_i915_private_t *dev_priv = dev->dev_private; |
2286 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2386 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2287 | int regnum = obj_priv->fence_reg; | 2387 | int regnum = obj_priv->fence_reg; |
2288 | int tile_width; | 2388 | int tile_width; |
2289 | uint32_t fence_reg, val; | 2389 | uint32_t fence_reg, val; |
@@ -2306,6 +2406,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
2306 | pitch_val = obj_priv->stride / tile_width; | 2406 | pitch_val = obj_priv->stride / tile_width; |
2307 | pitch_val = ffs(pitch_val) - 1; | 2407 | pitch_val = ffs(pitch_val) - 1; |
2308 | 2408 | ||
2409 | if (obj_priv->tiling_mode == I915_TILING_Y && | ||
2410 | HAS_128_BYTE_Y_TILING(dev)) | ||
2411 | WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); | ||
2412 | else | ||
2413 | WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL); | ||
2414 | |||
2309 | val = obj_priv->gtt_offset; | 2415 | val = obj_priv->gtt_offset; |
2310 | if (obj_priv->tiling_mode == I915_TILING_Y) | 2416 | if (obj_priv->tiling_mode == I915_TILING_Y) |
2311 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; | 2417 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; |
@@ -2325,7 +2431,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
2325 | struct drm_gem_object *obj = reg->obj; | 2431 | struct drm_gem_object *obj = reg->obj; |
2326 | struct drm_device *dev = obj->dev; | 2432 | struct drm_device *dev = obj->dev; |
2327 | drm_i915_private_t *dev_priv = dev->dev_private; | 2433 | drm_i915_private_t *dev_priv = dev->dev_private; |
2328 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2434 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2329 | int regnum = obj_priv->fence_reg; | 2435 | int regnum = obj_priv->fence_reg; |
2330 | uint32_t val; | 2436 | uint32_t val; |
2331 | uint32_t pitch_val; | 2437 | uint32_t pitch_val; |
@@ -2354,6 +2460,58 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
2354 | I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); | 2460 | I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); |
2355 | } | 2461 | } |
2356 | 2462 | ||
2463 | static int i915_find_fence_reg(struct drm_device *dev) | ||
2464 | { | ||
2465 | struct drm_i915_fence_reg *reg = NULL; | ||
2466 | struct drm_i915_gem_object *obj_priv = NULL; | ||
2467 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2468 | struct drm_gem_object *obj = NULL; | ||
2469 | int i, avail, ret; | ||
2470 | |||
2471 | /* First try to find a free reg */ | ||
2472 | avail = 0; | ||
2473 | for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { | ||
2474 | reg = &dev_priv->fence_regs[i]; | ||
2475 | if (!reg->obj) | ||
2476 | return i; | ||
2477 | |||
2478 | obj_priv = to_intel_bo(reg->obj); | ||
2479 | if (!obj_priv->pin_count) | ||
2480 | avail++; | ||
2481 | } | ||
2482 | |||
2483 | if (avail == 0) | ||
2484 | return -ENOSPC; | ||
2485 | |||
2486 | /* None available, try to steal one or wait for a user to finish */ | ||
2487 | i = I915_FENCE_REG_NONE; | ||
2488 | list_for_each_entry(obj_priv, &dev_priv->mm.fence_list, | ||
2489 | fence_list) { | ||
2490 | obj = obj_priv->obj; | ||
2491 | |||
2492 | if (obj_priv->pin_count) | ||
2493 | continue; | ||
2494 | |||
2495 | /* found one! */ | ||
2496 | i = obj_priv->fence_reg; | ||
2497 | break; | ||
2498 | } | ||
2499 | |||
2500 | BUG_ON(i == I915_FENCE_REG_NONE); | ||
2501 | |||
2502 | /* We only have a reference on obj from the active list. put_fence_reg | ||
2503 | * might drop that one, causing a use-after-free in it. So hold a | ||
2504 | * private reference to obj like the other callers of put_fence_reg | ||
2505 | * (set_tiling ioctl) do. */ | ||
2506 | drm_gem_object_reference(obj); | ||
2507 | ret = i915_gem_object_put_fence_reg(obj); | ||
2508 | drm_gem_object_unreference(obj); | ||
2509 | if (ret != 0) | ||
2510 | return ret; | ||
2511 | |||
2512 | return i; | ||
2513 | } | ||
2514 | |||
2357 | /** | 2515 | /** |
2358 | * i915_gem_object_get_fence_reg - set up a fence reg for an object | 2516 | * i915_gem_object_get_fence_reg - set up a fence reg for an object |
2359 | * @obj: object to map through a fence reg | 2517 | * @obj: object to map through a fence reg |
@@ -2372,10 +2530,9 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) | |||
2372 | { | 2530 | { |
2373 | struct drm_device *dev = obj->dev; | 2531 | struct drm_device *dev = obj->dev; |
2374 | struct drm_i915_private *dev_priv = dev->dev_private; | 2532 | struct drm_i915_private *dev_priv = dev->dev_private; |
2375 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2533 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2376 | struct drm_i915_fence_reg *reg = NULL; | 2534 | struct drm_i915_fence_reg *reg = NULL; |
2377 | struct drm_i915_gem_object *old_obj_priv = NULL; | 2535 | int ret; |
2378 | int i, ret, avail; | ||
2379 | 2536 | ||
2380 | /* Just update our place in the LRU if our fence is getting used. */ | 2537 | /* Just update our place in the LRU if our fence is getting used. */ |
2381 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { | 2538 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { |
@@ -2403,86 +2560,27 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) | |||
2403 | break; | 2560 | break; |
2404 | } | 2561 | } |
2405 | 2562 | ||
2406 | /* First try to find a free reg */ | 2563 | ret = i915_find_fence_reg(dev); |
2407 | avail = 0; | 2564 | if (ret < 0) |
2408 | for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { | 2565 | return ret; |
2409 | reg = &dev_priv->fence_regs[i]; | ||
2410 | if (!reg->obj) | ||
2411 | break; | ||
2412 | |||
2413 | old_obj_priv = reg->obj->driver_private; | ||
2414 | if (!old_obj_priv->pin_count) | ||
2415 | avail++; | ||
2416 | } | ||
2417 | |||
2418 | /* None available, try to steal one or wait for a user to finish */ | ||
2419 | if (i == dev_priv->num_fence_regs) { | ||
2420 | struct drm_gem_object *old_obj = NULL; | ||
2421 | |||
2422 | if (avail == 0) | ||
2423 | return -ENOSPC; | ||
2424 | |||
2425 | list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list, | ||
2426 | fence_list) { | ||
2427 | old_obj = old_obj_priv->obj; | ||
2428 | |||
2429 | if (old_obj_priv->pin_count) | ||
2430 | continue; | ||
2431 | |||
2432 | /* Take a reference, as otherwise the wait_rendering | ||
2433 | * below may cause the object to get freed out from | ||
2434 | * under us. | ||
2435 | */ | ||
2436 | drm_gem_object_reference(old_obj); | ||
2437 | |||
2438 | /* i915 uses fences for GPU access to tiled buffers */ | ||
2439 | if (IS_I965G(dev) || !old_obj_priv->active) | ||
2440 | break; | ||
2441 | |||
2442 | /* This brings the object to the head of the LRU if it | ||
2443 | * had been written to. The only way this should | ||
2444 | * result in us waiting longer than the expected | ||
2445 | * optimal amount of time is if there was a | ||
2446 | * fence-using buffer later that was read-only. | ||
2447 | */ | ||
2448 | i915_gem_object_flush_gpu_write_domain(old_obj); | ||
2449 | ret = i915_gem_object_wait_rendering(old_obj); | ||
2450 | if (ret != 0) { | ||
2451 | drm_gem_object_unreference(old_obj); | ||
2452 | return ret; | ||
2453 | } | ||
2454 | |||
2455 | break; | ||
2456 | } | ||
2457 | |||
2458 | /* | ||
2459 | * Zap this virtual mapping so we can set up a fence again | ||
2460 | * for this object next time we need it. | ||
2461 | */ | ||
2462 | i915_gem_release_mmap(old_obj); | ||
2463 | |||
2464 | i = old_obj_priv->fence_reg; | ||
2465 | reg = &dev_priv->fence_regs[i]; | ||
2466 | |||
2467 | old_obj_priv->fence_reg = I915_FENCE_REG_NONE; | ||
2468 | list_del_init(&old_obj_priv->fence_list); | ||
2469 | |||
2470 | drm_gem_object_unreference(old_obj); | ||
2471 | } | ||
2472 | 2566 | ||
2473 | obj_priv->fence_reg = i; | 2567 | obj_priv->fence_reg = ret; |
2568 | reg = &dev_priv->fence_regs[obj_priv->fence_reg]; | ||
2474 | list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list); | 2569 | list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list); |
2475 | 2570 | ||
2476 | reg->obj = obj; | 2571 | reg->obj = obj; |
2477 | 2572 | ||
2478 | if (IS_I965G(dev)) | 2573 | if (IS_GEN6(dev)) |
2574 | sandybridge_write_fence_reg(reg); | ||
2575 | else if (IS_I965G(dev)) | ||
2479 | i965_write_fence_reg(reg); | 2576 | i965_write_fence_reg(reg); |
2480 | else if (IS_I9XX(dev)) | 2577 | else if (IS_I9XX(dev)) |
2481 | i915_write_fence_reg(reg); | 2578 | i915_write_fence_reg(reg); |
2482 | else | 2579 | else |
2483 | i830_write_fence_reg(reg); | 2580 | i830_write_fence_reg(reg); |
2484 | 2581 | ||
2485 | trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode); | 2582 | trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg, |
2583 | obj_priv->tiling_mode); | ||
2486 | 2584 | ||
2487 | return 0; | 2585 | return 0; |
2488 | } | 2586 | } |
@@ -2499,11 +2597,14 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) | |||
2499 | { | 2597 | { |
2500 | struct drm_device *dev = obj->dev; | 2598 | struct drm_device *dev = obj->dev; |
2501 | drm_i915_private_t *dev_priv = dev->dev_private; | 2599 | drm_i915_private_t *dev_priv = dev->dev_private; |
2502 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2600 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2503 | 2601 | ||
2504 | if (IS_I965G(dev)) | 2602 | if (IS_GEN6(dev)) { |
2603 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + | ||
2604 | (obj_priv->fence_reg * 8), 0); | ||
2605 | } else if (IS_I965G(dev)) { | ||
2505 | I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); | 2606 | I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); |
2506 | else { | 2607 | } else { |
2507 | uint32_t fence_reg; | 2608 | uint32_t fence_reg; |
2508 | 2609 | ||
2509 | if (obj_priv->fence_reg < 8) | 2610 | if (obj_priv->fence_reg < 8) |
@@ -2532,11 +2633,17 @@ int | |||
2532 | i915_gem_object_put_fence_reg(struct drm_gem_object *obj) | 2633 | i915_gem_object_put_fence_reg(struct drm_gem_object *obj) |
2533 | { | 2634 | { |
2534 | struct drm_device *dev = obj->dev; | 2635 | struct drm_device *dev = obj->dev; |
2535 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2636 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2536 | 2637 | ||
2537 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE) | 2638 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE) |
2538 | return 0; | 2639 | return 0; |
2539 | 2640 | ||
2641 | /* If we've changed tiling, GTT-mappings of the object | ||
2642 | * need to re-fault to ensure that the correct fence register | ||
2643 | * setup is in place. | ||
2644 | */ | ||
2645 | i915_gem_release_mmap(obj); | ||
2646 | |||
2540 | /* On the i915, GPU access to tiled buffers is via a fence, | 2647 | /* On the i915, GPU access to tiled buffers is via a fence, |
2541 | * therefore we must wait for any outstanding access to complete | 2648 | * therefore we must wait for any outstanding access to complete |
2542 | * before clearing the fence. | 2649 | * before clearing the fence. |
@@ -2545,12 +2652,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj) | |||
2545 | int ret; | 2652 | int ret; |
2546 | 2653 | ||
2547 | i915_gem_object_flush_gpu_write_domain(obj); | 2654 | i915_gem_object_flush_gpu_write_domain(obj); |
2548 | i915_gem_object_flush_gtt_write_domain(obj); | ||
2549 | ret = i915_gem_object_wait_rendering(obj); | 2655 | ret = i915_gem_object_wait_rendering(obj); |
2550 | if (ret != 0) | 2656 | if (ret != 0) |
2551 | return ret; | 2657 | return ret; |
2552 | } | 2658 | } |
2553 | 2659 | ||
2660 | i915_gem_object_flush_gtt_write_domain(obj); | ||
2554 | i915_gem_clear_fence_reg (obj); | 2661 | i915_gem_clear_fence_reg (obj); |
2555 | 2662 | ||
2556 | return 0; | 2663 | return 0; |
@@ -2564,14 +2671,11 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2564 | { | 2671 | { |
2565 | struct drm_device *dev = obj->dev; | 2672 | struct drm_device *dev = obj->dev; |
2566 | drm_i915_private_t *dev_priv = dev->dev_private; | 2673 | drm_i915_private_t *dev_priv = dev->dev_private; |
2567 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2674 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2568 | struct drm_mm_node *free_space; | 2675 | struct drm_mm_node *free_space; |
2569 | bool retry_alloc = false; | 2676 | gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; |
2570 | int ret; | 2677 | int ret; |
2571 | 2678 | ||
2572 | if (dev_priv->mm.suspended) | ||
2573 | return -EBUSY; | ||
2574 | |||
2575 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 2679 | if (obj_priv->madv != I915_MADV_WILLNEED) { |
2576 | DRM_ERROR("Attempting to bind a purgeable object\n"); | 2680 | DRM_ERROR("Attempting to bind a purgeable object\n"); |
2577 | return -EINVAL; | 2681 | return -EINVAL; |
@@ -2613,15 +2717,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2613 | DRM_INFO("Binding object of size %zd at 0x%08x\n", | 2717 | DRM_INFO("Binding object of size %zd at 0x%08x\n", |
2614 | obj->size, obj_priv->gtt_offset); | 2718 | obj->size, obj_priv->gtt_offset); |
2615 | #endif | 2719 | #endif |
2616 | if (retry_alloc) { | 2720 | ret = i915_gem_object_get_pages(obj, gfpmask); |
2617 | i915_gem_object_set_page_gfp_mask (obj, | ||
2618 | i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY); | ||
2619 | } | ||
2620 | ret = i915_gem_object_get_pages(obj); | ||
2621 | if (retry_alloc) { | ||
2622 | i915_gem_object_set_page_gfp_mask (obj, | ||
2623 | i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY); | ||
2624 | } | ||
2625 | if (ret) { | 2721 | if (ret) { |
2626 | drm_mm_put_block(obj_priv->gtt_space); | 2722 | drm_mm_put_block(obj_priv->gtt_space); |
2627 | obj_priv->gtt_space = NULL; | 2723 | obj_priv->gtt_space = NULL; |
@@ -2631,9 +2727,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2631 | ret = i915_gem_evict_something(dev, obj->size); | 2727 | ret = i915_gem_evict_something(dev, obj->size); |
2632 | if (ret) { | 2728 | if (ret) { |
2633 | /* now try to shrink everyone else */ | 2729 | /* now try to shrink everyone else */ |
2634 | if (! retry_alloc) { | 2730 | if (gfpmask) { |
2635 | retry_alloc = true; | 2731 | gfpmask = 0; |
2636 | goto search_free; | 2732 | goto search_free; |
2637 | } | 2733 | } |
2638 | 2734 | ||
2639 | return ret; | 2735 | return ret; |
@@ -2682,7 +2778,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2682 | void | 2778 | void |
2683 | i915_gem_clflush_object(struct drm_gem_object *obj) | 2779 | i915_gem_clflush_object(struct drm_gem_object *obj) |
2684 | { | 2780 | { |
2685 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2781 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2686 | 2782 | ||
2687 | /* If we don't have a page list set up, then we're not pinned | 2783 | /* If we don't have a page list set up, then we're not pinned |
2688 | * to GPU, and we can ignore the cache flush because it'll happen | 2784 | * to GPU, and we can ignore the cache flush because it'll happen |
@@ -2701,7 +2797,6 @@ static void | |||
2701 | i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) | 2797 | i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) |
2702 | { | 2798 | { |
2703 | struct drm_device *dev = obj->dev; | 2799 | struct drm_device *dev = obj->dev; |
2704 | uint32_t seqno; | ||
2705 | uint32_t old_write_domain; | 2800 | uint32_t old_write_domain; |
2706 | 2801 | ||
2707 | if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) | 2802 | if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) |
@@ -2710,9 +2805,8 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) | |||
2710 | /* Queue the GPU write cache flushing we need. */ | 2805 | /* Queue the GPU write cache flushing we need. */ |
2711 | old_write_domain = obj->write_domain; | 2806 | old_write_domain = obj->write_domain; |
2712 | i915_gem_flush(dev, 0, obj->write_domain); | 2807 | i915_gem_flush(dev, 0, obj->write_domain); |
2713 | seqno = i915_add_request(dev, NULL, obj->write_domain); | 2808 | (void) i915_add_request(dev, NULL, obj->write_domain); |
2714 | obj->write_domain = 0; | 2809 | BUG_ON(obj->write_domain); |
2715 | i915_gem_object_move_to_active(obj, seqno); | ||
2716 | 2810 | ||
2717 | trace_i915_gem_object_change_domain(obj, | 2811 | trace_i915_gem_object_change_domain(obj, |
2718 | obj->read_domains, | 2812 | obj->read_domains, |
@@ -2760,6 +2854,22 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) | |||
2760 | old_write_domain); | 2854 | old_write_domain); |
2761 | } | 2855 | } |
2762 | 2856 | ||
2857 | void | ||
2858 | i915_gem_object_flush_write_domain(struct drm_gem_object *obj) | ||
2859 | { | ||
2860 | switch (obj->write_domain) { | ||
2861 | case I915_GEM_DOMAIN_GTT: | ||
2862 | i915_gem_object_flush_gtt_write_domain(obj); | ||
2863 | break; | ||
2864 | case I915_GEM_DOMAIN_CPU: | ||
2865 | i915_gem_object_flush_cpu_write_domain(obj); | ||
2866 | break; | ||
2867 | default: | ||
2868 | i915_gem_object_flush_gpu_write_domain(obj); | ||
2869 | break; | ||
2870 | } | ||
2871 | } | ||
2872 | |||
2763 | /** | 2873 | /** |
2764 | * Moves a single object to the GTT read, and possibly write domain. | 2874 | * Moves a single object to the GTT read, and possibly write domain. |
2765 | * | 2875 | * |
@@ -2769,7 +2879,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) | |||
2769 | int | 2879 | int |
2770 | i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | 2880 | i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) |
2771 | { | 2881 | { |
2772 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2882 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2773 | uint32_t old_write_domain, old_read_domains; | 2883 | uint32_t old_write_domain, old_read_domains; |
2774 | int ret; | 2884 | int ret; |
2775 | 2885 | ||
@@ -2811,6 +2921,57 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | |||
2811 | return 0; | 2921 | return 0; |
2812 | } | 2922 | } |
2813 | 2923 | ||
2924 | /* | ||
2925 | * Prepare buffer for display plane. Use uninterruptible for possible flush | ||
2926 | * wait, as in modesetting process we're not supposed to be interrupted. | ||
2927 | */ | ||
2928 | int | ||
2929 | i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) | ||
2930 | { | ||
2931 | struct drm_device *dev = obj->dev; | ||
2932 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2933 | uint32_t old_write_domain, old_read_domains; | ||
2934 | int ret; | ||
2935 | |||
2936 | /* Not valid to be called on unbound objects. */ | ||
2937 | if (obj_priv->gtt_space == NULL) | ||
2938 | return -EINVAL; | ||
2939 | |||
2940 | i915_gem_object_flush_gpu_write_domain(obj); | ||
2941 | |||
2942 | /* Wait on any GPU rendering and flushing to occur. */ | ||
2943 | if (obj_priv->active) { | ||
2944 | #if WATCH_BUF | ||
2945 | DRM_INFO("%s: object %p wait for seqno %08x\n", | ||
2946 | __func__, obj, obj_priv->last_rendering_seqno); | ||
2947 | #endif | ||
2948 | ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0); | ||
2949 | if (ret != 0) | ||
2950 | return ret; | ||
2951 | } | ||
2952 | |||
2953 | old_write_domain = obj->write_domain; | ||
2954 | old_read_domains = obj->read_domains; | ||
2955 | |||
2956 | obj->read_domains &= I915_GEM_DOMAIN_GTT; | ||
2957 | |||
2958 | i915_gem_object_flush_cpu_write_domain(obj); | ||
2959 | |||
2960 | /* It should now be out of any other write domains, and we can update | ||
2961 | * the domain values for our changes. | ||
2962 | */ | ||
2963 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); | ||
2964 | obj->read_domains |= I915_GEM_DOMAIN_GTT; | ||
2965 | obj->write_domain = I915_GEM_DOMAIN_GTT; | ||
2966 | obj_priv->dirty = 1; | ||
2967 | |||
2968 | trace_i915_gem_object_change_domain(obj, | ||
2969 | old_read_domains, | ||
2970 | old_write_domain); | ||
2971 | |||
2972 | return 0; | ||
2973 | } | ||
2974 | |||
2814 | /** | 2975 | /** |
2815 | * Moves a single object to the CPU read, and possibly write domain. | 2976 | * Moves a single object to the CPU read, and possibly write domain. |
2816 | * | 2977 | * |
@@ -2981,7 +3142,7 @@ static void | |||
2981 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | 3142 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) |
2982 | { | 3143 | { |
2983 | struct drm_device *dev = obj->dev; | 3144 | struct drm_device *dev = obj->dev; |
2984 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3145 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2985 | uint32_t invalidate_domains = 0; | 3146 | uint32_t invalidate_domains = 0; |
2986 | uint32_t flush_domains = 0; | 3147 | uint32_t flush_domains = 0; |
2987 | uint32_t old_read_domains; | 3148 | uint32_t old_read_domains; |
@@ -3066,7 +3227,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | |||
3066 | static void | 3227 | static void |
3067 | i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) | 3228 | i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) |
3068 | { | 3229 | { |
3069 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3230 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
3070 | 3231 | ||
3071 | if (!obj_priv->page_cpu_valid) | 3232 | if (!obj_priv->page_cpu_valid) |
3072 | return; | 3233 | return; |
@@ -3106,7 +3267,7 @@ static int | |||
3106 | i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | 3267 | i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, |
3107 | uint64_t offset, uint64_t size) | 3268 | uint64_t offset, uint64_t size) |
3108 | { | 3269 | { |
3109 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3270 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
3110 | uint32_t old_read_domains; | 3271 | uint32_t old_read_domains; |
3111 | int i, ret; | 3272 | int i, ret; |
3112 | 3273 | ||
@@ -3170,20 +3331,44 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
3170 | static int | 3331 | static int |
3171 | i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | 3332 | i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, |
3172 | struct drm_file *file_priv, | 3333 | struct drm_file *file_priv, |
3173 | struct drm_i915_gem_exec_object *entry, | 3334 | struct drm_i915_gem_exec_object2 *entry, |
3174 | struct drm_i915_gem_relocation_entry *relocs) | 3335 | struct drm_i915_gem_relocation_entry *relocs) |
3175 | { | 3336 | { |
3176 | struct drm_device *dev = obj->dev; | 3337 | struct drm_device *dev = obj->dev; |
3177 | drm_i915_private_t *dev_priv = dev->dev_private; | 3338 | drm_i915_private_t *dev_priv = dev->dev_private; |
3178 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3339 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
3179 | int i, ret; | 3340 | int i, ret; |
3180 | void __iomem *reloc_page; | 3341 | void __iomem *reloc_page; |
3342 | bool need_fence; | ||
3343 | |||
3344 | need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
3345 | obj_priv->tiling_mode != I915_TILING_NONE; | ||
3346 | |||
3347 | /* Check fence reg constraints and rebind if necessary */ | ||
3348 | if (need_fence && !i915_gem_object_fence_offset_ok(obj, | ||
3349 | obj_priv->tiling_mode)) | ||
3350 | i915_gem_object_unbind(obj); | ||
3181 | 3351 | ||
3182 | /* Choose the GTT offset for our buffer and put it there. */ | 3352 | /* Choose the GTT offset for our buffer and put it there. */ |
3183 | ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); | 3353 | ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); |
3184 | if (ret) | 3354 | if (ret) |
3185 | return ret; | 3355 | return ret; |
3186 | 3356 | ||
3357 | /* | ||
3358 | * Pre-965 chips need a fence register set up in order to | ||
3359 | * properly handle blits to/from tiled surfaces. | ||
3360 | */ | ||
3361 | if (need_fence) { | ||
3362 | ret = i915_gem_object_get_fence_reg(obj); | ||
3363 | if (ret != 0) { | ||
3364 | if (ret != -EBUSY && ret != -ERESTARTSYS) | ||
3365 | DRM_ERROR("Failure to install fence: %d\n", | ||
3366 | ret); | ||
3367 | i915_gem_object_unpin(obj); | ||
3368 | return ret; | ||
3369 | } | ||
3370 | } | ||
3371 | |||
3187 | entry->offset = obj_priv->gtt_offset; | 3372 | entry->offset = obj_priv->gtt_offset; |
3188 | 3373 | ||
3189 | /* Apply the relocations, using the GTT aperture to avoid cache | 3374 | /* Apply the relocations, using the GTT aperture to avoid cache |
@@ -3202,7 +3387,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3202 | i915_gem_object_unpin(obj); | 3387 | i915_gem_object_unpin(obj); |
3203 | return -EBADF; | 3388 | return -EBADF; |
3204 | } | 3389 | } |
3205 | target_obj_priv = target_obj->driver_private; | 3390 | target_obj_priv = to_intel_bo(target_obj); |
3206 | 3391 | ||
3207 | #if WATCH_RELOC | 3392 | #if WATCH_RELOC |
3208 | DRM_INFO("%s: obj %p offset %08x target %d " | 3393 | DRM_INFO("%s: obj %p offset %08x target %d " |
@@ -3231,6 +3416,16 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3231 | } | 3416 | } |
3232 | 3417 | ||
3233 | /* Validate that the target is in a valid r/w GPU domain */ | 3418 | /* Validate that the target is in a valid r/w GPU domain */ |
3419 | if (reloc->write_domain & (reloc->write_domain - 1)) { | ||
3420 | DRM_ERROR("reloc with multiple write domains: " | ||
3421 | "obj %p target %d offset %d " | ||
3422 | "read %08x write %08x", | ||
3423 | obj, reloc->target_handle, | ||
3424 | (int) reloc->offset, | ||
3425 | reloc->read_domains, | ||
3426 | reloc->write_domain); | ||
3427 | return -EINVAL; | ||
3428 | } | ||
3234 | if (reloc->write_domain & I915_GEM_DOMAIN_CPU || | 3429 | if (reloc->write_domain & I915_GEM_DOMAIN_CPU || |
3235 | reloc->read_domains & I915_GEM_DOMAIN_CPU) { | 3430 | reloc->read_domains & I915_GEM_DOMAIN_CPU) { |
3236 | DRM_ERROR("reloc with read/write CPU domains: " | 3431 | DRM_ERROR("reloc with read/write CPU domains: " |
@@ -3345,7 +3540,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3345 | */ | 3540 | */ |
3346 | static int | 3541 | static int |
3347 | i915_dispatch_gem_execbuffer(struct drm_device *dev, | 3542 | i915_dispatch_gem_execbuffer(struct drm_device *dev, |
3348 | struct drm_i915_gem_execbuffer *exec, | 3543 | struct drm_i915_gem_execbuffer2 *exec, |
3349 | struct drm_clip_rect *cliprects, | 3544 | struct drm_clip_rect *cliprects, |
3350 | uint64_t exec_offset) | 3545 | uint64_t exec_offset) |
3351 | { | 3546 | { |
@@ -3435,7 +3630,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) | |||
3435 | } | 3630 | } |
3436 | 3631 | ||
3437 | static int | 3632 | static int |
3438 | i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, | 3633 | i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list, |
3439 | uint32_t buffer_count, | 3634 | uint32_t buffer_count, |
3440 | struct drm_i915_gem_relocation_entry **relocs) | 3635 | struct drm_i915_gem_relocation_entry **relocs) |
3441 | { | 3636 | { |
@@ -3450,8 +3645,10 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, | |||
3450 | } | 3645 | } |
3451 | 3646 | ||
3452 | *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); | 3647 | *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); |
3453 | if (*relocs == NULL) | 3648 | if (*relocs == NULL) { |
3649 | DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count); | ||
3454 | return -ENOMEM; | 3650 | return -ENOMEM; |
3651 | } | ||
3455 | 3652 | ||
3456 | for (i = 0; i < buffer_count; i++) { | 3653 | for (i = 0; i < buffer_count; i++) { |
3457 | struct drm_i915_gem_relocation_entry __user *user_relocs; | 3654 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
@@ -3475,13 +3672,16 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, | |||
3475 | } | 3672 | } |
3476 | 3673 | ||
3477 | static int | 3674 | static int |
3478 | i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list, | 3675 | i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, |
3479 | uint32_t buffer_count, | 3676 | uint32_t buffer_count, |
3480 | struct drm_i915_gem_relocation_entry *relocs) | 3677 | struct drm_i915_gem_relocation_entry *relocs) |
3481 | { | 3678 | { |
3482 | uint32_t reloc_count = 0, i; | 3679 | uint32_t reloc_count = 0, i; |
3483 | int ret = 0; | 3680 | int ret = 0; |
3484 | 3681 | ||
3682 | if (relocs == NULL) | ||
3683 | return 0; | ||
3684 | |||
3485 | for (i = 0; i < buffer_count; i++) { | 3685 | for (i = 0; i < buffer_count; i++) { |
3486 | struct drm_i915_gem_relocation_entry __user *user_relocs; | 3686 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
3487 | int unwritten; | 3687 | int unwritten; |
@@ -3508,7 +3708,7 @@ err: | |||
3508 | } | 3708 | } |
3509 | 3709 | ||
3510 | static int | 3710 | static int |
3511 | i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec, | 3711 | i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec, |
3512 | uint64_t exec_offset) | 3712 | uint64_t exec_offset) |
3513 | { | 3713 | { |
3514 | uint32_t exec_start, exec_len; | 3714 | uint32_t exec_start, exec_len; |
@@ -3525,22 +3725,57 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec, | |||
3525 | return 0; | 3725 | return 0; |
3526 | } | 3726 | } |
3527 | 3727 | ||
3728 | static int | ||
3729 | i915_gem_wait_for_pending_flip(struct drm_device *dev, | ||
3730 | struct drm_gem_object **object_list, | ||
3731 | int count) | ||
3732 | { | ||
3733 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3734 | struct drm_i915_gem_object *obj_priv; | ||
3735 | DEFINE_WAIT(wait); | ||
3736 | int i, ret = 0; | ||
3737 | |||
3738 | for (;;) { | ||
3739 | prepare_to_wait(&dev_priv->pending_flip_queue, | ||
3740 | &wait, TASK_INTERRUPTIBLE); | ||
3741 | for (i = 0; i < count; i++) { | ||
3742 | obj_priv = to_intel_bo(object_list[i]); | ||
3743 | if (atomic_read(&obj_priv->pending_flip) > 0) | ||
3744 | break; | ||
3745 | } | ||
3746 | if (i == count) | ||
3747 | break; | ||
3748 | |||
3749 | if (!signal_pending(current)) { | ||
3750 | mutex_unlock(&dev->struct_mutex); | ||
3751 | schedule(); | ||
3752 | mutex_lock(&dev->struct_mutex); | ||
3753 | continue; | ||
3754 | } | ||
3755 | ret = -ERESTARTSYS; | ||
3756 | break; | ||
3757 | } | ||
3758 | finish_wait(&dev_priv->pending_flip_queue, &wait); | ||
3759 | |||
3760 | return ret; | ||
3761 | } | ||
3762 | |||
3528 | int | 3763 | int |
3529 | i915_gem_execbuffer(struct drm_device *dev, void *data, | 3764 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, |
3530 | struct drm_file *file_priv) | 3765 | struct drm_file *file_priv, |
3766 | struct drm_i915_gem_execbuffer2 *args, | ||
3767 | struct drm_i915_gem_exec_object2 *exec_list) | ||
3531 | { | 3768 | { |
3532 | drm_i915_private_t *dev_priv = dev->dev_private; | 3769 | drm_i915_private_t *dev_priv = dev->dev_private; |
3533 | struct drm_i915_gem_execbuffer *args = data; | ||
3534 | struct drm_i915_gem_exec_object *exec_list = NULL; | ||
3535 | struct drm_gem_object **object_list = NULL; | 3770 | struct drm_gem_object **object_list = NULL; |
3536 | struct drm_gem_object *batch_obj; | 3771 | struct drm_gem_object *batch_obj; |
3537 | struct drm_i915_gem_object *obj_priv; | 3772 | struct drm_i915_gem_object *obj_priv; |
3538 | struct drm_clip_rect *cliprects = NULL; | 3773 | struct drm_clip_rect *cliprects = NULL; |
3539 | struct drm_i915_gem_relocation_entry *relocs; | 3774 | struct drm_i915_gem_relocation_entry *relocs = NULL; |
3540 | int ret, ret2, i, pinned = 0; | 3775 | int ret = 0, ret2, i, pinned = 0; |
3541 | uint64_t exec_offset; | 3776 | uint64_t exec_offset; |
3542 | uint32_t seqno, flush_domains, reloc_index; | 3777 | uint32_t seqno, flush_domains, reloc_index; |
3543 | int pin_tries; | 3778 | int pin_tries, flips; |
3544 | 3779 | ||
3545 | #if WATCH_EXEC | 3780 | #if WATCH_EXEC |
3546 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | 3781 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", |
@@ -3551,31 +3786,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3551 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | 3786 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); |
3552 | return -EINVAL; | 3787 | return -EINVAL; |
3553 | } | 3788 | } |
3554 | /* Copy in the exec list from userland */ | 3789 | object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); |
3555 | exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count); | 3790 | if (object_list == NULL) { |
3556 | object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count); | 3791 | DRM_ERROR("Failed to allocate object list for %d buffers\n", |
3557 | if (exec_list == NULL || object_list == NULL) { | ||
3558 | DRM_ERROR("Failed to allocate exec or object list " | ||
3559 | "for %d buffers\n", | ||
3560 | args->buffer_count); | 3792 | args->buffer_count); |
3561 | ret = -ENOMEM; | 3793 | ret = -ENOMEM; |
3562 | goto pre_mutex_err; | 3794 | goto pre_mutex_err; |
3563 | } | 3795 | } |
3564 | ret = copy_from_user(exec_list, | ||
3565 | (struct drm_i915_relocation_entry __user *) | ||
3566 | (uintptr_t) args->buffers_ptr, | ||
3567 | sizeof(*exec_list) * args->buffer_count); | ||
3568 | if (ret != 0) { | ||
3569 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
3570 | args->buffer_count, ret); | ||
3571 | goto pre_mutex_err; | ||
3572 | } | ||
3573 | 3796 | ||
3574 | if (args->num_cliprects != 0) { | 3797 | if (args->num_cliprects != 0) { |
3575 | cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), | 3798 | cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), |
3576 | GFP_KERNEL); | 3799 | GFP_KERNEL); |
3577 | if (cliprects == NULL) | 3800 | if (cliprects == NULL) { |
3801 | ret = -ENOMEM; | ||
3578 | goto pre_mutex_err; | 3802 | goto pre_mutex_err; |
3803 | } | ||
3579 | 3804 | ||
3580 | ret = copy_from_user(cliprects, | 3805 | ret = copy_from_user(cliprects, |
3581 | (struct drm_clip_rect __user *) | 3806 | (struct drm_clip_rect __user *) |
@@ -3598,38 +3823,49 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3598 | i915_verify_inactive(dev, __FILE__, __LINE__); | 3823 | i915_verify_inactive(dev, __FILE__, __LINE__); |
3599 | 3824 | ||
3600 | if (atomic_read(&dev_priv->mm.wedged)) { | 3825 | if (atomic_read(&dev_priv->mm.wedged)) { |
3601 | DRM_ERROR("Execbuf while wedged\n"); | ||
3602 | mutex_unlock(&dev->struct_mutex); | 3826 | mutex_unlock(&dev->struct_mutex); |
3603 | ret = -EIO; | 3827 | ret = -EIO; |
3604 | goto pre_mutex_err; | 3828 | goto pre_mutex_err; |
3605 | } | 3829 | } |
3606 | 3830 | ||
3607 | if (dev_priv->mm.suspended) { | 3831 | if (dev_priv->mm.suspended) { |
3608 | DRM_ERROR("Execbuf while VT-switched.\n"); | ||
3609 | mutex_unlock(&dev->struct_mutex); | 3832 | mutex_unlock(&dev->struct_mutex); |
3610 | ret = -EBUSY; | 3833 | ret = -EBUSY; |
3611 | goto pre_mutex_err; | 3834 | goto pre_mutex_err; |
3612 | } | 3835 | } |
3613 | 3836 | ||
3614 | /* Look up object handles */ | 3837 | /* Look up object handles */ |
3838 | flips = 0; | ||
3615 | for (i = 0; i < args->buffer_count; i++) { | 3839 | for (i = 0; i < args->buffer_count; i++) { |
3616 | object_list[i] = drm_gem_object_lookup(dev, file_priv, | 3840 | object_list[i] = drm_gem_object_lookup(dev, file_priv, |
3617 | exec_list[i].handle); | 3841 | exec_list[i].handle); |
3618 | if (object_list[i] == NULL) { | 3842 | if (object_list[i] == NULL) { |
3619 | DRM_ERROR("Invalid object handle %d at index %d\n", | 3843 | DRM_ERROR("Invalid object handle %d at index %d\n", |
3620 | exec_list[i].handle, i); | 3844 | exec_list[i].handle, i); |
3845 | /* prevent error path from reading uninitialized data */ | ||
3846 | args->buffer_count = i + 1; | ||
3621 | ret = -EBADF; | 3847 | ret = -EBADF; |
3622 | goto err; | 3848 | goto err; |
3623 | } | 3849 | } |
3624 | 3850 | ||
3625 | obj_priv = object_list[i]->driver_private; | 3851 | obj_priv = to_intel_bo(object_list[i]); |
3626 | if (obj_priv->in_execbuffer) { | 3852 | if (obj_priv->in_execbuffer) { |
3627 | DRM_ERROR("Object %p appears more than once in object list\n", | 3853 | DRM_ERROR("Object %p appears more than once in object list\n", |
3628 | object_list[i]); | 3854 | object_list[i]); |
3855 | /* prevent error path from reading uninitialized data */ | ||
3856 | args->buffer_count = i + 1; | ||
3629 | ret = -EBADF; | 3857 | ret = -EBADF; |
3630 | goto err; | 3858 | goto err; |
3631 | } | 3859 | } |
3632 | obj_priv->in_execbuffer = true; | 3860 | obj_priv->in_execbuffer = true; |
3861 | flips += atomic_read(&obj_priv->pending_flip); | ||
3862 | } | ||
3863 | |||
3864 | if (flips > 0) { | ||
3865 | ret = i915_gem_wait_for_pending_flip(dev, object_list, | ||
3866 | args->buffer_count); | ||
3867 | if (ret) | ||
3868 | goto err; | ||
3633 | } | 3869 | } |
3634 | 3870 | ||
3635 | /* Pin and relocate */ | 3871 | /* Pin and relocate */ |
@@ -3731,16 +3967,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3731 | i915_gem_flush(dev, | 3967 | i915_gem_flush(dev, |
3732 | dev->invalidate_domains, | 3968 | dev->invalidate_domains, |
3733 | dev->flush_domains); | 3969 | dev->flush_domains); |
3734 | if (dev->flush_domains) | 3970 | if (dev->flush_domains & I915_GEM_GPU_DOMAINS) |
3735 | (void)i915_add_request(dev, file_priv, | 3971 | (void)i915_add_request(dev, file_priv, |
3736 | dev->flush_domains); | 3972 | dev->flush_domains); |
3737 | } | 3973 | } |
3738 | 3974 | ||
3739 | for (i = 0; i < args->buffer_count; i++) { | 3975 | for (i = 0; i < args->buffer_count; i++) { |
3740 | struct drm_gem_object *obj = object_list[i]; | 3976 | struct drm_gem_object *obj = object_list[i]; |
3977 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
3741 | uint32_t old_write_domain = obj->write_domain; | 3978 | uint32_t old_write_domain = obj->write_domain; |
3742 | 3979 | ||
3743 | obj->write_domain = obj->pending_write_domain; | 3980 | obj->write_domain = obj->pending_write_domain; |
3981 | if (obj->write_domain) | ||
3982 | list_move_tail(&obj_priv->gpu_write_list, | ||
3983 | &dev_priv->mm.gpu_write_list); | ||
3984 | else | ||
3985 | list_del_init(&obj_priv->gpu_write_list); | ||
3986 | |||
3744 | trace_i915_gem_object_change_domain(obj, | 3987 | trace_i915_gem_object_change_domain(obj, |
3745 | obj->read_domains, | 3988 | obj->read_domains, |
3746 | old_write_domain); | 3989 | old_write_domain); |
@@ -3806,7 +4049,7 @@ err: | |||
3806 | 4049 | ||
3807 | for (i = 0; i < args->buffer_count; i++) { | 4050 | for (i = 0; i < args->buffer_count; i++) { |
3808 | if (object_list[i]) { | 4051 | if (object_list[i]) { |
3809 | obj_priv = object_list[i]->driver_private; | 4052 | obj_priv = to_intel_bo(object_list[i]); |
3810 | obj_priv->in_execbuffer = false; | 4053 | obj_priv->in_execbuffer = false; |
3811 | } | 4054 | } |
3812 | drm_gem_object_unreference(object_list[i]); | 4055 | drm_gem_object_unreference(object_list[i]); |
@@ -3814,8 +4057,101 @@ err: | |||
3814 | 4057 | ||
3815 | mutex_unlock(&dev->struct_mutex); | 4058 | mutex_unlock(&dev->struct_mutex); |
3816 | 4059 | ||
4060 | pre_mutex_err: | ||
4061 | /* Copy the updated relocations out regardless of current error | ||
4062 | * state. Failure to update the relocs would mean that the next | ||
4063 | * time userland calls execbuf, it would do so with presumed offset | ||
4064 | * state that didn't match the actual object state. | ||
4065 | */ | ||
4066 | ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count, | ||
4067 | relocs); | ||
4068 | if (ret2 != 0) { | ||
4069 | DRM_ERROR("Failed to copy relocations back out: %d\n", ret2); | ||
4070 | |||
4071 | if (ret == 0) | ||
4072 | ret = ret2; | ||
4073 | } | ||
4074 | |||
4075 | drm_free_large(object_list); | ||
4076 | kfree(cliprects); | ||
4077 | |||
4078 | return ret; | ||
4079 | } | ||
4080 | |||
4081 | /* | ||
4082 | * Legacy execbuffer just creates an exec2 list from the original exec object | ||
4083 | * list array and passes it to the real function. | ||
4084 | */ | ||
4085 | int | ||
4086 | i915_gem_execbuffer(struct drm_device *dev, void *data, | ||
4087 | struct drm_file *file_priv) | ||
4088 | { | ||
4089 | struct drm_i915_gem_execbuffer *args = data; | ||
4090 | struct drm_i915_gem_execbuffer2 exec2; | ||
4091 | struct drm_i915_gem_exec_object *exec_list = NULL; | ||
4092 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | ||
4093 | int ret, i; | ||
4094 | |||
4095 | #if WATCH_EXEC | ||
4096 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
4097 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
4098 | #endif | ||
4099 | |||
4100 | if (args->buffer_count < 1) { | ||
4101 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | ||
4102 | return -EINVAL; | ||
4103 | } | ||
4104 | |||
4105 | /* Copy in the exec list from userland */ | ||
4106 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); | ||
4107 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | ||
4108 | if (exec_list == NULL || exec2_list == NULL) { | ||
4109 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | ||
4110 | args->buffer_count); | ||
4111 | drm_free_large(exec_list); | ||
4112 | drm_free_large(exec2_list); | ||
4113 | return -ENOMEM; | ||
4114 | } | ||
4115 | ret = copy_from_user(exec_list, | ||
4116 | (struct drm_i915_relocation_entry __user *) | ||
4117 | (uintptr_t) args->buffers_ptr, | ||
4118 | sizeof(*exec_list) * args->buffer_count); | ||
4119 | if (ret != 0) { | ||
4120 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
4121 | args->buffer_count, ret); | ||
4122 | drm_free_large(exec_list); | ||
4123 | drm_free_large(exec2_list); | ||
4124 | return -EFAULT; | ||
4125 | } | ||
4126 | |||
4127 | for (i = 0; i < args->buffer_count; i++) { | ||
4128 | exec2_list[i].handle = exec_list[i].handle; | ||
4129 | exec2_list[i].relocation_count = exec_list[i].relocation_count; | ||
4130 | exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; | ||
4131 | exec2_list[i].alignment = exec_list[i].alignment; | ||
4132 | exec2_list[i].offset = exec_list[i].offset; | ||
4133 | if (!IS_I965G(dev)) | ||
4134 | exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; | ||
4135 | else | ||
4136 | exec2_list[i].flags = 0; | ||
4137 | } | ||
4138 | |||
4139 | exec2.buffers_ptr = args->buffers_ptr; | ||
4140 | exec2.buffer_count = args->buffer_count; | ||
4141 | exec2.batch_start_offset = args->batch_start_offset; | ||
4142 | exec2.batch_len = args->batch_len; | ||
4143 | exec2.DR1 = args->DR1; | ||
4144 | exec2.DR4 = args->DR4; | ||
4145 | exec2.num_cliprects = args->num_cliprects; | ||
4146 | exec2.cliprects_ptr = args->cliprects_ptr; | ||
4147 | exec2.flags = 0; | ||
4148 | |||
4149 | ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); | ||
3817 | if (!ret) { | 4150 | if (!ret) { |
3818 | /* Copy the new buffer offsets back to the user's exec list. */ | 4151 | /* Copy the new buffer offsets back to the user's exec list. */ |
4152 | for (i = 0; i < args->buffer_count; i++) | ||
4153 | exec_list[i].offset = exec2_list[i].offset; | ||
4154 | /* ... and back out to userspace */ | ||
3819 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | 4155 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) |
3820 | (uintptr_t) args->buffers_ptr, | 4156 | (uintptr_t) args->buffers_ptr, |
3821 | exec_list, | 4157 | exec_list, |
@@ -3828,25 +4164,62 @@ err: | |||
3828 | } | 4164 | } |
3829 | } | 4165 | } |
3830 | 4166 | ||
3831 | /* Copy the updated relocations out regardless of current error | 4167 | drm_free_large(exec_list); |
3832 | * state. Failure to update the relocs would mean that the next | 4168 | drm_free_large(exec2_list); |
3833 | * time userland calls execbuf, it would do so with presumed offset | 4169 | return ret; |
3834 | * state that didn't match the actual object state. | 4170 | } |
3835 | */ | ||
3836 | ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count, | ||
3837 | relocs); | ||
3838 | if (ret2 != 0) { | ||
3839 | DRM_ERROR("Failed to copy relocations back out: %d\n", ret2); | ||
3840 | 4171 | ||
3841 | if (ret == 0) | 4172 | int |
3842 | ret = ret2; | 4173 | i915_gem_execbuffer2(struct drm_device *dev, void *data, |
4174 | struct drm_file *file_priv) | ||
4175 | { | ||
4176 | struct drm_i915_gem_execbuffer2 *args = data; | ||
4177 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | ||
4178 | int ret; | ||
4179 | |||
4180 | #if WATCH_EXEC | ||
4181 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
4182 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
4183 | #endif | ||
4184 | |||
4185 | if (args->buffer_count < 1) { | ||
4186 | DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); | ||
4187 | return -EINVAL; | ||
3843 | } | 4188 | } |
3844 | 4189 | ||
3845 | pre_mutex_err: | 4190 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); |
3846 | drm_free_large(object_list); | 4191 | if (exec2_list == NULL) { |
3847 | drm_free_large(exec_list); | 4192 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", |
3848 | kfree(cliprects); | 4193 | args->buffer_count); |
4194 | return -ENOMEM; | ||
4195 | } | ||
4196 | ret = copy_from_user(exec2_list, | ||
4197 | (struct drm_i915_relocation_entry __user *) | ||
4198 | (uintptr_t) args->buffers_ptr, | ||
4199 | sizeof(*exec2_list) * args->buffer_count); | ||
4200 | if (ret != 0) { | ||
4201 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
4202 | args->buffer_count, ret); | ||
4203 | drm_free_large(exec2_list); | ||
4204 | return -EFAULT; | ||
4205 | } | ||
3849 | 4206 | ||
4207 | ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list); | ||
4208 | if (!ret) { | ||
4209 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
4210 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
4211 | (uintptr_t) args->buffers_ptr, | ||
4212 | exec2_list, | ||
4213 | sizeof(*exec2_list) * args->buffer_count); | ||
4214 | if (ret) { | ||
4215 | ret = -EFAULT; | ||
4216 | DRM_ERROR("failed to copy %d exec entries " | ||
4217 | "back to user (%d)\n", | ||
4218 | args->buffer_count, ret); | ||
4219 | } | ||
4220 | } | ||
4221 | |||
4222 | drm_free_large(exec2_list); | ||
3850 | return ret; | 4223 | return ret; |
3851 | } | 4224 | } |
3852 | 4225 | ||
@@ -3854,7 +4227,7 @@ int | |||
3854 | i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | 4227 | i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) |
3855 | { | 4228 | { |
3856 | struct drm_device *dev = obj->dev; | 4229 | struct drm_device *dev = obj->dev; |
3857 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 4230 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
3858 | int ret; | 4231 | int ret; |
3859 | 4232 | ||
3860 | i915_verify_inactive(dev, __FILE__, __LINE__); | 4233 | i915_verify_inactive(dev, __FILE__, __LINE__); |
@@ -3863,19 +4236,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | |||
3863 | if (ret) | 4236 | if (ret) |
3864 | return ret; | 4237 | return ret; |
3865 | } | 4238 | } |
3866 | /* | 4239 | |
3867 | * Pre-965 chips need a fence register set up in order to | ||
3868 | * properly handle tiled surfaces. | ||
3869 | */ | ||
3870 | if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) { | ||
3871 | ret = i915_gem_object_get_fence_reg(obj); | ||
3872 | if (ret != 0) { | ||
3873 | if (ret != -EBUSY && ret != -ERESTARTSYS) | ||
3874 | DRM_ERROR("Failure to install fence: %d\n", | ||
3875 | ret); | ||
3876 | return ret; | ||
3877 | } | ||
3878 | } | ||
3879 | obj_priv->pin_count++; | 4240 | obj_priv->pin_count++; |
3880 | 4241 | ||
3881 | /* If the object is not active and not pending a flush, | 4242 | /* If the object is not active and not pending a flush, |
@@ -3899,7 +4260,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj) | |||
3899 | { | 4260 | { |
3900 | struct drm_device *dev = obj->dev; | 4261 | struct drm_device *dev = obj->dev; |
3901 | drm_i915_private_t *dev_priv = dev->dev_private; | 4262 | drm_i915_private_t *dev_priv = dev->dev_private; |
3902 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 4263 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
3903 | 4264 | ||
3904 | i915_verify_inactive(dev, __FILE__, __LINE__); | 4265 | i915_verify_inactive(dev, __FILE__, __LINE__); |
3905 | obj_priv->pin_count--; | 4266 | obj_priv->pin_count--; |
@@ -3939,7 +4300,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, | |||
3939 | mutex_unlock(&dev->struct_mutex); | 4300 | mutex_unlock(&dev->struct_mutex); |
3940 | return -EBADF; | 4301 | return -EBADF; |
3941 | } | 4302 | } |
3942 | obj_priv = obj->driver_private; | 4303 | obj_priv = to_intel_bo(obj); |
3943 | 4304 | ||
3944 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 4305 | if (obj_priv->madv != I915_MADV_WILLNEED) { |
3945 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); | 4306 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); |
@@ -3996,7 +4357,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | |||
3996 | return -EBADF; | 4357 | return -EBADF; |
3997 | } | 4358 | } |
3998 | 4359 | ||
3999 | obj_priv = obj->driver_private; | 4360 | obj_priv = to_intel_bo(obj); |
4000 | if (obj_priv->pin_filp != file_priv) { | 4361 | if (obj_priv->pin_filp != file_priv) { |
4001 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", | 4362 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", |
4002 | args->handle); | 4363 | args->handle); |
@@ -4038,7 +4399,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
4038 | */ | 4399 | */ |
4039 | i915_gem_retire_requests(dev); | 4400 | i915_gem_retire_requests(dev); |
4040 | 4401 | ||
4041 | obj_priv = obj->driver_private; | 4402 | obj_priv = to_intel_bo(obj); |
4042 | /* Don't count being on the flushing list against the object being | 4403 | /* Don't count being on the flushing list against the object being |
4043 | * done. Otherwise, a buffer left on the flushing list but not getting | 4404 | * done. Otherwise, a buffer left on the flushing list but not getting |
4044 | * flushed (because nobody's flushing that domain) won't ever return | 4405 | * flushed (because nobody's flushing that domain) won't ever return |
@@ -4084,7 +4445,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | |||
4084 | } | 4445 | } |
4085 | 4446 | ||
4086 | mutex_lock(&dev->struct_mutex); | 4447 | mutex_lock(&dev->struct_mutex); |
4087 | obj_priv = obj->driver_private; | 4448 | obj_priv = to_intel_bo(obj); |
4088 | 4449 | ||
4089 | if (obj_priv->pin_count) { | 4450 | if (obj_priv->pin_count) { |
4090 | drm_gem_object_unreference(obj); | 4451 | drm_gem_object_unreference(obj); |
@@ -4133,6 +4494,7 @@ int i915_gem_init_object(struct drm_gem_object *obj) | |||
4133 | obj_priv->obj = obj; | 4494 | obj_priv->obj = obj; |
4134 | obj_priv->fence_reg = I915_FENCE_REG_NONE; | 4495 | obj_priv->fence_reg = I915_FENCE_REG_NONE; |
4135 | INIT_LIST_HEAD(&obj_priv->list); | 4496 | INIT_LIST_HEAD(&obj_priv->list); |
4497 | INIT_LIST_HEAD(&obj_priv->gpu_write_list); | ||
4136 | INIT_LIST_HEAD(&obj_priv->fence_list); | 4498 | INIT_LIST_HEAD(&obj_priv->fence_list); |
4137 | obj_priv->madv = I915_MADV_WILLNEED; | 4499 | obj_priv->madv = I915_MADV_WILLNEED; |
4138 | 4500 | ||
@@ -4144,7 +4506,7 @@ int i915_gem_init_object(struct drm_gem_object *obj) | |||
4144 | void i915_gem_free_object(struct drm_gem_object *obj) | 4506 | void i915_gem_free_object(struct drm_gem_object *obj) |
4145 | { | 4507 | { |
4146 | struct drm_device *dev = obj->dev; | 4508 | struct drm_device *dev = obj->dev; |
4147 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 4509 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
4148 | 4510 | ||
4149 | trace_i915_gem_object_destroy(obj); | 4511 | trace_i915_gem_object_destroy(obj); |
4150 | 4512 | ||
@@ -4192,8 +4554,7 @@ int | |||
4192 | i915_gem_idle(struct drm_device *dev) | 4554 | i915_gem_idle(struct drm_device *dev) |
4193 | { | 4555 | { |
4194 | drm_i915_private_t *dev_priv = dev->dev_private; | 4556 | drm_i915_private_t *dev_priv = dev->dev_private; |
4195 | uint32_t seqno, cur_seqno, last_seqno; | 4557 | int ret; |
4196 | int stuck, ret; | ||
4197 | 4558 | ||
4198 | mutex_lock(&dev->struct_mutex); | 4559 | mutex_lock(&dev->struct_mutex); |
4199 | 4560 | ||
@@ -4202,116 +4563,80 @@ i915_gem_idle(struct drm_device *dev) | |||
4202 | return 0; | 4563 | return 0; |
4203 | } | 4564 | } |
4204 | 4565 | ||
4205 | /* Hack! Don't let anybody do execbuf while we don't control the chip. | 4566 | ret = i915_gpu_idle(dev); |
4206 | * We need to replace this with a semaphore, or something. | 4567 | if (ret) { |
4207 | */ | ||
4208 | dev_priv->mm.suspended = 1; | ||
4209 | del_timer(&dev_priv->hangcheck_timer); | ||
4210 | |||
4211 | /* Cancel the retire work handler, wait for it to finish if running | ||
4212 | */ | ||
4213 | mutex_unlock(&dev->struct_mutex); | ||
4214 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); | ||
4215 | mutex_lock(&dev->struct_mutex); | ||
4216 | |||
4217 | i915_kernel_lost_context(dev); | ||
4218 | |||
4219 | /* Flush the GPU along with all non-CPU write domains | ||
4220 | */ | ||
4221 | i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | ||
4222 | seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); | ||
4223 | |||
4224 | if (seqno == 0) { | ||
4225 | mutex_unlock(&dev->struct_mutex); | 4568 | mutex_unlock(&dev->struct_mutex); |
4226 | return -ENOMEM; | 4569 | return ret; |
4227 | } | 4570 | } |
4228 | 4571 | ||
4229 | dev_priv->mm.waiting_gem_seqno = seqno; | 4572 | /* Under UMS, be paranoid and evict. */ |
4230 | last_seqno = 0; | 4573 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) { |
4231 | stuck = 0; | 4574 | ret = i915_gem_evict_from_inactive_list(dev); |
4232 | for (;;) { | 4575 | if (ret) { |
4233 | cur_seqno = i915_get_gem_seqno(dev); | 4576 | mutex_unlock(&dev->struct_mutex); |
4234 | if (i915_seqno_passed(cur_seqno, seqno)) | 4577 | return ret; |
4235 | break; | ||
4236 | if (last_seqno == cur_seqno) { | ||
4237 | if (stuck++ > 100) { | ||
4238 | DRM_ERROR("hardware wedged\n"); | ||
4239 | atomic_set(&dev_priv->mm.wedged, 1); | ||
4240 | DRM_WAKEUP(&dev_priv->irq_queue); | ||
4241 | break; | ||
4242 | } | ||
4243 | } | 4578 | } |
4244 | msleep(10); | ||
4245 | last_seqno = cur_seqno; | ||
4246 | } | ||
4247 | dev_priv->mm.waiting_gem_seqno = 0; | ||
4248 | |||
4249 | i915_gem_retire_requests(dev); | ||
4250 | |||
4251 | spin_lock(&dev_priv->mm.active_list_lock); | ||
4252 | if (!atomic_read(&dev_priv->mm.wedged)) { | ||
4253 | /* Active and flushing should now be empty as we've | ||
4254 | * waited for a sequence higher than any pending execbuffer | ||
4255 | */ | ||
4256 | WARN_ON(!list_empty(&dev_priv->mm.active_list)); | ||
4257 | WARN_ON(!list_empty(&dev_priv->mm.flushing_list)); | ||
4258 | /* Request should now be empty as we've also waited | ||
4259 | * for the last request in the list | ||
4260 | */ | ||
4261 | WARN_ON(!list_empty(&dev_priv->mm.request_list)); | ||
4262 | } | 4579 | } |
4263 | 4580 | ||
4264 | /* Empty the active and flushing lists to inactive. If there's | 4581 | /* Hack! Don't let anybody do execbuf while we don't control the chip. |
4265 | * anything left at this point, it means that we're wedged and | 4582 | * We need to replace this with a semaphore, or something. |
4266 | * nothing good's going to happen by leaving them there. So strip | 4583 | * And not confound mm.suspended! |
4267 | * the GPU domains and just stuff them onto inactive. | ||
4268 | */ | 4584 | */ |
4269 | while (!list_empty(&dev_priv->mm.active_list)) { | 4585 | dev_priv->mm.suspended = 1; |
4270 | struct drm_gem_object *obj; | 4586 | del_timer(&dev_priv->hangcheck_timer); |
4271 | uint32_t old_write_domain; | ||
4272 | 4587 | ||
4273 | obj = list_first_entry(&dev_priv->mm.active_list, | 4588 | i915_kernel_lost_context(dev); |
4274 | struct drm_i915_gem_object, | 4589 | i915_gem_cleanup_ringbuffer(dev); |
4275 | list)->obj; | ||
4276 | old_write_domain = obj->write_domain; | ||
4277 | obj->write_domain &= ~I915_GEM_GPU_DOMAINS; | ||
4278 | i915_gem_object_move_to_inactive(obj); | ||
4279 | 4590 | ||
4280 | trace_i915_gem_object_change_domain(obj, | 4591 | mutex_unlock(&dev->struct_mutex); |
4281 | obj->read_domains, | ||
4282 | old_write_domain); | ||
4283 | } | ||
4284 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
4285 | 4592 | ||
4286 | while (!list_empty(&dev_priv->mm.flushing_list)) { | 4593 | /* Cancel the retire work handler, which should be idle now. */ |
4287 | struct drm_gem_object *obj; | 4594 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); |
4288 | uint32_t old_write_domain; | ||
4289 | 4595 | ||
4290 | obj = list_first_entry(&dev_priv->mm.flushing_list, | 4596 | return 0; |
4291 | struct drm_i915_gem_object, | 4597 | } |
4292 | list)->obj; | ||
4293 | old_write_domain = obj->write_domain; | ||
4294 | obj->write_domain &= ~I915_GEM_GPU_DOMAINS; | ||
4295 | i915_gem_object_move_to_inactive(obj); | ||
4296 | 4598 | ||
4297 | trace_i915_gem_object_change_domain(obj, | 4599 | /* |
4298 | obj->read_domains, | 4600 | * 965+ support PIPE_CONTROL commands, which provide finer grained control |
4299 | old_write_domain); | 4601 | * over cache flushing. |
4602 | */ | ||
4603 | static int | ||
4604 | i915_gem_init_pipe_control(struct drm_device *dev) | ||
4605 | { | ||
4606 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
4607 | struct drm_gem_object *obj; | ||
4608 | struct drm_i915_gem_object *obj_priv; | ||
4609 | int ret; | ||
4610 | |||
4611 | obj = drm_gem_object_alloc(dev, 4096); | ||
4612 | if (obj == NULL) { | ||
4613 | DRM_ERROR("Failed to allocate seqno page\n"); | ||
4614 | ret = -ENOMEM; | ||
4615 | goto err; | ||
4300 | } | 4616 | } |
4617 | obj_priv = to_intel_bo(obj); | ||
4618 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | ||
4301 | 4619 | ||
4620 | ret = i915_gem_object_pin(obj, 4096); | ||
4621 | if (ret) | ||
4622 | goto err_unref; | ||
4302 | 4623 | ||
4303 | /* Move all inactive buffers out of the GTT. */ | 4624 | dev_priv->seqno_gfx_addr = obj_priv->gtt_offset; |
4304 | ret = i915_gem_evict_from_inactive_list(dev); | 4625 | dev_priv->seqno_page = kmap(obj_priv->pages[0]); |
4305 | WARN_ON(!list_empty(&dev_priv->mm.inactive_list)); | 4626 | if (dev_priv->seqno_page == NULL) |
4306 | if (ret) { | 4627 | goto err_unpin; |
4307 | mutex_unlock(&dev->struct_mutex); | ||
4308 | return ret; | ||
4309 | } | ||
4310 | 4628 | ||
4311 | i915_gem_cleanup_ringbuffer(dev); | 4629 | dev_priv->seqno_obj = obj; |
4312 | mutex_unlock(&dev->struct_mutex); | 4630 | memset(dev_priv->seqno_page, 0, PAGE_SIZE); |
4313 | 4631 | ||
4314 | return 0; | 4632 | return 0; |
4633 | |||
4634 | err_unpin: | ||
4635 | i915_gem_object_unpin(obj); | ||
4636 | err_unref: | ||
4637 | drm_gem_object_unreference(obj); | ||
4638 | err: | ||
4639 | return ret; | ||
4315 | } | 4640 | } |
4316 | 4641 | ||
4317 | static int | 4642 | static int |
@@ -4331,15 +4656,16 @@ i915_gem_init_hws(struct drm_device *dev) | |||
4331 | obj = drm_gem_object_alloc(dev, 4096); | 4656 | obj = drm_gem_object_alloc(dev, 4096); |
4332 | if (obj == NULL) { | 4657 | if (obj == NULL) { |
4333 | DRM_ERROR("Failed to allocate status page\n"); | 4658 | DRM_ERROR("Failed to allocate status page\n"); |
4334 | return -ENOMEM; | 4659 | ret = -ENOMEM; |
4660 | goto err; | ||
4335 | } | 4661 | } |
4336 | obj_priv = obj->driver_private; | 4662 | obj_priv = to_intel_bo(obj); |
4337 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | 4663 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; |
4338 | 4664 | ||
4339 | ret = i915_gem_object_pin(obj, 4096); | 4665 | ret = i915_gem_object_pin(obj, 4096); |
4340 | if (ret != 0) { | 4666 | if (ret != 0) { |
4341 | drm_gem_object_unreference(obj); | 4667 | drm_gem_object_unreference(obj); |
4342 | return ret; | 4668 | goto err_unref; |
4343 | } | 4669 | } |
4344 | 4670 | ||
4345 | dev_priv->status_gfx_addr = obj_priv->gtt_offset; | 4671 | dev_priv->status_gfx_addr = obj_priv->gtt_offset; |
@@ -4348,17 +4674,52 @@ i915_gem_init_hws(struct drm_device *dev) | |||
4348 | if (dev_priv->hw_status_page == NULL) { | 4674 | if (dev_priv->hw_status_page == NULL) { |
4349 | DRM_ERROR("Failed to map status page.\n"); | 4675 | DRM_ERROR("Failed to map status page.\n"); |
4350 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 4676 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
4351 | i915_gem_object_unpin(obj); | 4677 | ret = -EINVAL; |
4352 | drm_gem_object_unreference(obj); | 4678 | goto err_unpin; |
4353 | return -EINVAL; | 4679 | } |
4680 | |||
4681 | if (HAS_PIPE_CONTROL(dev)) { | ||
4682 | ret = i915_gem_init_pipe_control(dev); | ||
4683 | if (ret) | ||
4684 | goto err_unpin; | ||
4354 | } | 4685 | } |
4686 | |||
4355 | dev_priv->hws_obj = obj; | 4687 | dev_priv->hws_obj = obj; |
4356 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); | 4688 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); |
4357 | I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); | 4689 | if (IS_GEN6(dev)) { |
4358 | I915_READ(HWS_PGA); /* posting read */ | 4690 | I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr); |
4359 | DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); | 4691 | I915_READ(HWS_PGA_GEN6); /* posting read */ |
4692 | } else { | ||
4693 | I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); | ||
4694 | I915_READ(HWS_PGA); /* posting read */ | ||
4695 | } | ||
4696 | DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); | ||
4360 | 4697 | ||
4361 | return 0; | 4698 | return 0; |
4699 | |||
4700 | err_unpin: | ||
4701 | i915_gem_object_unpin(obj); | ||
4702 | err_unref: | ||
4703 | drm_gem_object_unreference(obj); | ||
4704 | err: | ||
4705 | return 0; | ||
4706 | } | ||
4707 | |||
4708 | static void | ||
4709 | i915_gem_cleanup_pipe_control(struct drm_device *dev) | ||
4710 | { | ||
4711 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
4712 | struct drm_gem_object *obj; | ||
4713 | struct drm_i915_gem_object *obj_priv; | ||
4714 | |||
4715 | obj = dev_priv->seqno_obj; | ||
4716 | obj_priv = to_intel_bo(obj); | ||
4717 | kunmap(obj_priv->pages[0]); | ||
4718 | i915_gem_object_unpin(obj); | ||
4719 | drm_gem_object_unreference(obj); | ||
4720 | dev_priv->seqno_obj = NULL; | ||
4721 | |||
4722 | dev_priv->seqno_page = NULL; | ||
4362 | } | 4723 | } |
4363 | 4724 | ||
4364 | static void | 4725 | static void |
@@ -4372,7 +4733,7 @@ i915_gem_cleanup_hws(struct drm_device *dev) | |||
4372 | return; | 4733 | return; |
4373 | 4734 | ||
4374 | obj = dev_priv->hws_obj; | 4735 | obj = dev_priv->hws_obj; |
4375 | obj_priv = obj->driver_private; | 4736 | obj_priv = to_intel_bo(obj); |
4376 | 4737 | ||
4377 | kunmap(obj_priv->pages[0]); | 4738 | kunmap(obj_priv->pages[0]); |
4378 | i915_gem_object_unpin(obj); | 4739 | i915_gem_object_unpin(obj); |
@@ -4382,6 +4743,9 @@ i915_gem_cleanup_hws(struct drm_device *dev) | |||
4382 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 4743 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
4383 | dev_priv->hw_status_page = NULL; | 4744 | dev_priv->hw_status_page = NULL; |
4384 | 4745 | ||
4746 | if (HAS_PIPE_CONTROL(dev)) | ||
4747 | i915_gem_cleanup_pipe_control(dev); | ||
4748 | |||
4385 | /* Write high address into HWS_PGA when disabling. */ | 4749 | /* Write high address into HWS_PGA when disabling. */ |
4386 | I915_WRITE(HWS_PGA, 0x1ffff000); | 4750 | I915_WRITE(HWS_PGA, 0x1ffff000); |
4387 | } | 4751 | } |
@@ -4406,7 +4770,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev) | |||
4406 | i915_gem_cleanup_hws(dev); | 4770 | i915_gem_cleanup_hws(dev); |
4407 | return -ENOMEM; | 4771 | return -ENOMEM; |
4408 | } | 4772 | } |
4409 | obj_priv = obj->driver_private; | 4773 | obj_priv = to_intel_bo(obj); |
4410 | 4774 | ||
4411 | ret = i915_gem_object_pin(obj, 4096); | 4775 | ret = i915_gem_object_pin(obj, 4096); |
4412 | if (ret != 0) { | 4776 | if (ret != 0) { |
@@ -4492,6 +4856,11 @@ i915_gem_init_ringbuffer(struct drm_device *dev) | |||
4492 | ring->space += ring->Size; | 4856 | ring->space += ring->Size; |
4493 | } | 4857 | } |
4494 | 4858 | ||
4859 | if (IS_I9XX(dev) && !IS_GEN3(dev)) { | ||
4860 | I915_WRITE(MI_MODE, | ||
4861 | (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); | ||
4862 | } | ||
4863 | |||
4495 | return 0; | 4864 | return 0; |
4496 | } | 4865 | } |
4497 | 4866 | ||
@@ -4584,6 +4953,7 @@ i915_gem_load(struct drm_device *dev) | |||
4584 | spin_lock_init(&dev_priv->mm.active_list_lock); | 4953 | spin_lock_init(&dev_priv->mm.active_list_lock); |
4585 | INIT_LIST_HEAD(&dev_priv->mm.active_list); | 4954 | INIT_LIST_HEAD(&dev_priv->mm.active_list); |
4586 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); | 4955 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); |
4956 | INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); | ||
4587 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); | 4957 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
4588 | INIT_LIST_HEAD(&dev_priv->mm.request_list); | 4958 | INIT_LIST_HEAD(&dev_priv->mm.request_list); |
4589 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | 4959 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
@@ -4596,7 +4966,8 @@ i915_gem_load(struct drm_device *dev) | |||
4596 | spin_unlock(&shrink_list_lock); | 4966 | spin_unlock(&shrink_list_lock); |
4597 | 4967 | ||
4598 | /* Old X drivers will take 0-2 for front, back, depth buffers */ | 4968 | /* Old X drivers will take 0-2 for front, back, depth buffers */ |
4599 | dev_priv->fence_reg_start = 3; | 4969 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
4970 | dev_priv->fence_reg_start = 3; | ||
4600 | 4971 | ||
4601 | if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | 4972 | if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
4602 | dev_priv->num_fence_regs = 16; | 4973 | dev_priv->num_fence_regs = 16; |
@@ -4614,8 +4985,8 @@ i915_gem_load(struct drm_device *dev) | |||
4614 | for (i = 0; i < 8; i++) | 4985 | for (i = 0; i < 8; i++) |
4615 | I915_WRITE(FENCE_REG_945_8 + (i * 4), 0); | 4986 | I915_WRITE(FENCE_REG_945_8 + (i * 4), 0); |
4616 | } | 4987 | } |
4617 | |||
4618 | i915_gem_detect_bit_6_swizzle(dev); | 4988 | i915_gem_detect_bit_6_swizzle(dev); |
4989 | init_waitqueue_head(&dev_priv->pending_flip_queue); | ||
4619 | } | 4990 | } |
4620 | 4991 | ||
4621 | /* | 4992 | /* |
@@ -4638,7 +5009,7 @@ int i915_gem_init_phys_object(struct drm_device *dev, | |||
4638 | 5009 | ||
4639 | phys_obj->id = id; | 5010 | phys_obj->id = id; |
4640 | 5011 | ||
4641 | phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff); | 5012 | phys_obj->handle = drm_pci_alloc(dev, size, 0); |
4642 | if (!phys_obj->handle) { | 5013 | if (!phys_obj->handle) { |
4643 | ret = -ENOMEM; | 5014 | ret = -ENOMEM; |
4644 | goto kfree_obj; | 5015 | goto kfree_obj; |
@@ -4692,11 +5063,11 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | |||
4692 | int ret; | 5063 | int ret; |
4693 | int page_count; | 5064 | int page_count; |
4694 | 5065 | ||
4695 | obj_priv = obj->driver_private; | 5066 | obj_priv = to_intel_bo(obj); |
4696 | if (!obj_priv->phys_obj) | 5067 | if (!obj_priv->phys_obj) |
4697 | return; | 5068 | return; |
4698 | 5069 | ||
4699 | ret = i915_gem_object_get_pages(obj); | 5070 | ret = i915_gem_object_get_pages(obj, 0); |
4700 | if (ret) | 5071 | if (ret) |
4701 | goto out; | 5072 | goto out; |
4702 | 5073 | ||
@@ -4731,7 +5102,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
4731 | if (id > I915_MAX_PHYS_OBJECT) | 5102 | if (id > I915_MAX_PHYS_OBJECT) |
4732 | return -EINVAL; | 5103 | return -EINVAL; |
4733 | 5104 | ||
4734 | obj_priv = obj->driver_private; | 5105 | obj_priv = to_intel_bo(obj); |
4735 | 5106 | ||
4736 | if (obj_priv->phys_obj) { | 5107 | if (obj_priv->phys_obj) { |
4737 | if (obj_priv->phys_obj->id == id) | 5108 | if (obj_priv->phys_obj->id == id) |
@@ -4754,7 +5125,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
4754 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; | 5125 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; |
4755 | obj_priv->phys_obj->cur_obj = obj; | 5126 | obj_priv->phys_obj->cur_obj = obj; |
4756 | 5127 | ||
4757 | ret = i915_gem_object_get_pages(obj); | 5128 | ret = i915_gem_object_get_pages(obj, 0); |
4758 | if (ret) { | 5129 | if (ret) { |
4759 | DRM_ERROR("failed to get page list\n"); | 5130 | DRM_ERROR("failed to get page list\n"); |
4760 | goto out; | 5131 | goto out; |
@@ -4782,7 +5153,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | |||
4782 | struct drm_i915_gem_pwrite *args, | 5153 | struct drm_i915_gem_pwrite *args, |
4783 | struct drm_file *file_priv) | 5154 | struct drm_file *file_priv) |
4784 | { | 5155 | { |
4785 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 5156 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
4786 | void *obj_addr; | 5157 | void *obj_addr; |
4787 | int ret; | 5158 | int ret; |
4788 | char __user *user_data; | 5159 | char __user *user_data; |
@@ -4790,7 +5161,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | |||
4790 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 5161 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
4791 | obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; | 5162 | obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; |
4792 | 5163 | ||
4793 | DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size); | 5164 | DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size); |
4794 | ret = copy_from_user(obj_addr, user_data, args->size); | 5165 | ret = copy_from_user(obj_addr, user_data, args->size); |
4795 | if (ret) | 5166 | if (ret) |
4796 | return -EFAULT; | 5167 | return -EFAULT; |