diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2010-11-08 14:18:58 -0500 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2010-11-23 15:19:10 -0500 |
commit | 05394f3975dceb107a5e1393e2244946e5b43660 (patch) | |
tree | 2af73b6efec503ed4cd9c932018619bd28a1fe60 /drivers/gpu/drm/i915/i915_gem.c | |
parent | 185cbcb304ba4dee55e39593fd86dcd7813f62ec (diff) |
drm/i915: Use drm_i915_gem_object as the preferred type
A glorified s/obj_priv/obj/ with a net reduction of over a 100 lines and
many characters!
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 1268 |
1 files changed, 595 insertions, 673 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3cac366b3053..d196895527a6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -41,29 +41,30 @@ struct change_domains { | |||
41 | uint32_t flush_rings; | 41 | uint32_t flush_rings; |
42 | }; | 42 | }; |
43 | 43 | ||
44 | static uint32_t i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv); | 44 | static uint32_t i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj); |
45 | static uint32_t i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv); | 45 | static uint32_t i915_gem_get_gtt_size(struct drm_i915_gem_object *obj); |
46 | 46 | ||
47 | static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, | 47 | static int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj, |
48 | bool pipelined); | 48 | bool pipelined); |
49 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); | 49 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); |
50 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); | 50 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); |
51 | static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, | 51 | static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, |
52 | int write); | 52 | int write); |
53 | static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | 53 | static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, |
54 | uint64_t offset, | 54 | uint64_t offset, |
55 | uint64_t size); | 55 | uint64_t size); |
56 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); | 56 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); |
57 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj, | 57 | static int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, |
58 | bool interruptible); | 58 | bool interruptible); |
59 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | 59 | static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, |
60 | unsigned alignment, | 60 | unsigned alignment, |
61 | bool map_and_fenceable); | 61 | bool map_and_fenceable); |
62 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); | 62 | static void i915_gem_clear_fence_reg(struct drm_i915_gem_object *obj); |
63 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 63 | static int i915_gem_phys_pwrite(struct drm_device *dev, |
64 | struct drm_i915_gem_object *obj, | ||
64 | struct drm_i915_gem_pwrite *args, | 65 | struct drm_i915_gem_pwrite *args, |
65 | struct drm_file *file_priv); | 66 | struct drm_file *file); |
66 | static void i915_gem_free_object_tail(struct drm_gem_object *obj); | 67 | static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj); |
67 | 68 | ||
68 | static int i915_gem_inactive_shrink(struct shrinker *shrinker, | 69 | static int i915_gem_inactive_shrink(struct shrinker *shrinker, |
69 | int nr_to_scan, | 70 | int nr_to_scan, |
@@ -212,11 +213,9 @@ static int i915_mutex_lock_interruptible(struct drm_device *dev) | |||
212 | } | 213 | } |
213 | 214 | ||
214 | static inline bool | 215 | static inline bool |
215 | i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) | 216 | i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) |
216 | { | 217 | { |
217 | return obj_priv->gtt_space && | 218 | return obj->gtt_space && !obj->active && obj->pin_count == 0; |
218 | !obj_priv->active && | ||
219 | obj_priv->pin_count == 0; | ||
220 | } | 219 | } |
221 | 220 | ||
222 | int i915_gem_do_init(struct drm_device *dev, | 221 | int i915_gem_do_init(struct drm_device *dev, |
@@ -244,7 +243,7 @@ int i915_gem_do_init(struct drm_device *dev, | |||
244 | 243 | ||
245 | int | 244 | int |
246 | i915_gem_init_ioctl(struct drm_device *dev, void *data, | 245 | i915_gem_init_ioctl(struct drm_device *dev, void *data, |
247 | struct drm_file *file_priv) | 246 | struct drm_file *file) |
248 | { | 247 | { |
249 | struct drm_i915_gem_init *args = data; | 248 | struct drm_i915_gem_init *args = data; |
250 | int ret; | 249 | int ret; |
@@ -258,7 +257,7 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data, | |||
258 | 257 | ||
259 | int | 258 | int |
260 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | 259 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
261 | struct drm_file *file_priv) | 260 | struct drm_file *file) |
262 | { | 261 | { |
263 | struct drm_i915_private *dev_priv = dev->dev_private; | 262 | struct drm_i915_private *dev_priv = dev->dev_private; |
264 | struct drm_i915_gem_get_aperture *args = data; | 263 | struct drm_i915_gem_get_aperture *args = data; |
@@ -280,10 +279,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | |||
280 | */ | 279 | */ |
281 | int | 280 | int |
282 | i915_gem_create_ioctl(struct drm_device *dev, void *data, | 281 | i915_gem_create_ioctl(struct drm_device *dev, void *data, |
283 | struct drm_file *file_priv) | 282 | struct drm_file *file) |
284 | { | 283 | { |
285 | struct drm_i915_gem_create *args = data; | 284 | struct drm_i915_gem_create *args = data; |
286 | struct drm_gem_object *obj; | 285 | struct drm_i915_gem_object *obj; |
287 | int ret; | 286 | int ret; |
288 | u32 handle; | 287 | u32 handle; |
289 | 288 | ||
@@ -294,29 +293,28 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, | |||
294 | if (obj == NULL) | 293 | if (obj == NULL) |
295 | return -ENOMEM; | 294 | return -ENOMEM; |
296 | 295 | ||
297 | ret = drm_gem_handle_create(file_priv, obj, &handle); | 296 | ret = drm_gem_handle_create(file, &obj->base, &handle); |
298 | if (ret) { | 297 | if (ret) { |
299 | drm_gem_object_release(obj); | 298 | drm_gem_object_release(&obj->base); |
300 | i915_gem_info_remove_obj(dev->dev_private, obj->size); | 299 | i915_gem_info_remove_obj(dev->dev_private, obj->base.size); |
301 | kfree(obj); | 300 | kfree(obj); |
302 | return ret; | 301 | return ret; |
303 | } | 302 | } |
304 | 303 | ||
305 | /* drop reference from allocate - handle holds it now */ | 304 | /* drop reference from allocate - handle holds it now */ |
306 | drm_gem_object_unreference(obj); | 305 | drm_gem_object_unreference(&obj->base); |
307 | trace_i915_gem_object_create(obj); | 306 | trace_i915_gem_object_create(obj); |
308 | 307 | ||
309 | args->handle = handle; | 308 | args->handle = handle; |
310 | return 0; | 309 | return 0; |
311 | } | 310 | } |
312 | 311 | ||
313 | static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) | 312 | static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) |
314 | { | 313 | { |
315 | drm_i915_private_t *dev_priv = obj->dev->dev_private; | 314 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
316 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
317 | 315 | ||
318 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && | 316 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && |
319 | obj_priv->tiling_mode != I915_TILING_NONE; | 317 | obj->tiling_mode != I915_TILING_NONE; |
320 | } | 318 | } |
321 | 319 | ||
322 | static inline void | 320 | static inline void |
@@ -392,12 +390,12 @@ slow_shmem_bit17_copy(struct page *gpu_page, | |||
392 | * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). | 390 | * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). |
393 | */ | 391 | */ |
394 | static int | 392 | static int |
395 | i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | 393 | i915_gem_shmem_pread_fast(struct drm_device *dev, |
394 | struct drm_i915_gem_object *obj, | ||
396 | struct drm_i915_gem_pread *args, | 395 | struct drm_i915_gem_pread *args, |
397 | struct drm_file *file_priv) | 396 | struct drm_file *file) |
398 | { | 397 | { |
399 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 398 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
400 | struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; | ||
401 | ssize_t remain; | 399 | ssize_t remain; |
402 | loff_t offset; | 400 | loff_t offset; |
403 | char __user *user_data; | 401 | char __user *user_data; |
@@ -406,7 +404,6 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
406 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 404 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
407 | remain = args->size; | 405 | remain = args->size; |
408 | 406 | ||
409 | obj_priv = to_intel_bo(obj); | ||
410 | offset = args->offset; | 407 | offset = args->offset; |
411 | 408 | ||
412 | while (remain > 0) { | 409 | while (remain > 0) { |
@@ -455,12 +452,12 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
455 | * and not take page faults. | 452 | * and not take page faults. |
456 | */ | 453 | */ |
457 | static int | 454 | static int |
458 | i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | 455 | i915_gem_shmem_pread_slow(struct drm_device *dev, |
456 | struct drm_i915_gem_object *obj, | ||
459 | struct drm_i915_gem_pread *args, | 457 | struct drm_i915_gem_pread *args, |
460 | struct drm_file *file_priv) | 458 | struct drm_file *file) |
461 | { | 459 | { |
462 | struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; | 460 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
463 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
464 | struct mm_struct *mm = current->mm; | 461 | struct mm_struct *mm = current->mm; |
465 | struct page **user_pages; | 462 | struct page **user_pages; |
466 | ssize_t remain; | 463 | ssize_t remain; |
@@ -506,7 +503,6 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
506 | 503 | ||
507 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | 504 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); |
508 | 505 | ||
509 | obj_priv = to_intel_bo(obj); | ||
510 | offset = args->offset; | 506 | offset = args->offset; |
511 | 507 | ||
512 | while (remain > 0) { | 508 | while (remain > 0) { |
@@ -575,11 +571,10 @@ out: | |||
575 | */ | 571 | */ |
576 | int | 572 | int |
577 | i915_gem_pread_ioctl(struct drm_device *dev, void *data, | 573 | i915_gem_pread_ioctl(struct drm_device *dev, void *data, |
578 | struct drm_file *file_priv) | 574 | struct drm_file *file) |
579 | { | 575 | { |
580 | struct drm_i915_gem_pread *args = data; | 576 | struct drm_i915_gem_pread *args = data; |
581 | struct drm_gem_object *obj; | 577 | struct drm_i915_gem_object *obj; |
582 | struct drm_i915_gem_object *obj_priv; | ||
583 | int ret = 0; | 578 | int ret = 0; |
584 | 579 | ||
585 | if (args->size == 0) | 580 | if (args->size == 0) |
@@ -599,15 +594,15 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
599 | if (ret) | 594 | if (ret) |
600 | return ret; | 595 | return ret; |
601 | 596 | ||
602 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 597 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
603 | if (obj == NULL) { | 598 | if (obj == NULL) { |
604 | ret = -ENOENT; | 599 | ret = -ENOENT; |
605 | goto unlock; | 600 | goto unlock; |
606 | } | 601 | } |
607 | obj_priv = to_intel_bo(obj); | ||
608 | 602 | ||
609 | /* Bounds check source. */ | 603 | /* Bounds check source. */ |
610 | if (args->offset > obj->size || args->size > obj->size - args->offset) { | 604 | if (args->offset > obj->base.size || |
605 | args->size > obj->base.size - args->offset) { | ||
611 | ret = -EINVAL; | 606 | ret = -EINVAL; |
612 | goto out; | 607 | goto out; |
613 | } | 608 | } |
@@ -620,12 +615,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
620 | 615 | ||
621 | ret = -EFAULT; | 616 | ret = -EFAULT; |
622 | if (!i915_gem_object_needs_bit17_swizzle(obj)) | 617 | if (!i915_gem_object_needs_bit17_swizzle(obj)) |
623 | ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); | 618 | ret = i915_gem_shmem_pread_fast(dev, obj, args, file); |
624 | if (ret == -EFAULT) | 619 | if (ret == -EFAULT) |
625 | ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); | 620 | ret = i915_gem_shmem_pread_slow(dev, obj, args, file); |
626 | 621 | ||
627 | out: | 622 | out: |
628 | drm_gem_object_unreference(obj); | 623 | drm_gem_object_unreference(&obj->base); |
629 | unlock: | 624 | unlock: |
630 | mutex_unlock(&dev->struct_mutex); | 625 | mutex_unlock(&dev->struct_mutex); |
631 | return ret; | 626 | return ret; |
@@ -680,11 +675,11 @@ slow_kernel_write(struct io_mapping *mapping, | |||
680 | * user into the GTT, uncached. | 675 | * user into the GTT, uncached. |
681 | */ | 676 | */ |
682 | static int | 677 | static int |
683 | i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | 678 | i915_gem_gtt_pwrite_fast(struct drm_device *dev, |
679 | struct drm_i915_gem_object *obj, | ||
684 | struct drm_i915_gem_pwrite *args, | 680 | struct drm_i915_gem_pwrite *args, |
685 | struct drm_file *file_priv) | 681 | struct drm_file *file) |
686 | { | 682 | { |
687 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
688 | drm_i915_private_t *dev_priv = dev->dev_private; | 683 | drm_i915_private_t *dev_priv = dev->dev_private; |
689 | ssize_t remain; | 684 | ssize_t remain; |
690 | loff_t offset, page_base; | 685 | loff_t offset, page_base; |
@@ -694,8 +689,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
694 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 689 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
695 | remain = args->size; | 690 | remain = args->size; |
696 | 691 | ||
697 | obj_priv = to_intel_bo(obj); | 692 | offset = obj->gtt_offset + args->offset; |
698 | offset = obj_priv->gtt_offset + args->offset; | ||
699 | 693 | ||
700 | while (remain > 0) { | 694 | while (remain > 0) { |
701 | /* Operation in this page | 695 | /* Operation in this page |
@@ -735,11 +729,11 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
735 | * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit). | 729 | * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit). |
736 | */ | 730 | */ |
737 | static int | 731 | static int |
738 | i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | 732 | i915_gem_gtt_pwrite_slow(struct drm_device *dev, |
733 | struct drm_i915_gem_object *obj, | ||
739 | struct drm_i915_gem_pwrite *args, | 734 | struct drm_i915_gem_pwrite *args, |
740 | struct drm_file *file_priv) | 735 | struct drm_file *file) |
741 | { | 736 | { |
742 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
743 | drm_i915_private_t *dev_priv = dev->dev_private; | 737 | drm_i915_private_t *dev_priv = dev->dev_private; |
744 | ssize_t remain; | 738 | ssize_t remain; |
745 | loff_t gtt_page_base, offset; | 739 | loff_t gtt_page_base, offset; |
@@ -780,8 +774,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
780 | if (ret) | 774 | if (ret) |
781 | goto out_unpin_pages; | 775 | goto out_unpin_pages; |
782 | 776 | ||
783 | obj_priv = to_intel_bo(obj); | 777 | offset = obj->gtt_offset + args->offset; |
784 | offset = obj_priv->gtt_offset + args->offset; | ||
785 | 778 | ||
786 | while (remain > 0) { | 779 | while (remain > 0) { |
787 | /* Operation in this page | 780 | /* Operation in this page |
@@ -827,12 +820,12 @@ out_unpin_pages: | |||
827 | * copy_from_user into the kmapped pages backing the object. | 820 | * copy_from_user into the kmapped pages backing the object. |
828 | */ | 821 | */ |
829 | static int | 822 | static int |
830 | i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | 823 | i915_gem_shmem_pwrite_fast(struct drm_device *dev, |
824 | struct drm_i915_gem_object *obj, | ||
831 | struct drm_i915_gem_pwrite *args, | 825 | struct drm_i915_gem_pwrite *args, |
832 | struct drm_file *file_priv) | 826 | struct drm_file *file) |
833 | { | 827 | { |
834 | struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; | 828 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
835 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
836 | ssize_t remain; | 829 | ssize_t remain; |
837 | loff_t offset; | 830 | loff_t offset; |
838 | char __user *user_data; | 831 | char __user *user_data; |
@@ -841,9 +834,8 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
841 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 834 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
842 | remain = args->size; | 835 | remain = args->size; |
843 | 836 | ||
844 | obj_priv = to_intel_bo(obj); | ||
845 | offset = args->offset; | 837 | offset = args->offset; |
846 | obj_priv->dirty = 1; | 838 | obj->dirty = 1; |
847 | 839 | ||
848 | while (remain > 0) { | 840 | while (remain > 0) { |
849 | struct page *page; | 841 | struct page *page; |
@@ -898,12 +890,12 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
898 | * struct_mutex is held. | 890 | * struct_mutex is held. |
899 | */ | 891 | */ |
900 | static int | 892 | static int |
901 | i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | 893 | i915_gem_shmem_pwrite_slow(struct drm_device *dev, |
894 | struct drm_i915_gem_object *obj, | ||
902 | struct drm_i915_gem_pwrite *args, | 895 | struct drm_i915_gem_pwrite *args, |
903 | struct drm_file *file_priv) | 896 | struct drm_file *file) |
904 | { | 897 | { |
905 | struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; | 898 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
906 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
907 | struct mm_struct *mm = current->mm; | 899 | struct mm_struct *mm = current->mm; |
908 | struct page **user_pages; | 900 | struct page **user_pages; |
909 | ssize_t remain; | 901 | ssize_t remain; |
@@ -947,9 +939,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
947 | 939 | ||
948 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | 940 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); |
949 | 941 | ||
950 | obj_priv = to_intel_bo(obj); | ||
951 | offset = args->offset; | 942 | offset = args->offset; |
952 | obj_priv->dirty = 1; | 943 | obj->dirty = 1; |
953 | 944 | ||
954 | while (remain > 0) { | 945 | while (remain > 0) { |
955 | struct page *page; | 946 | struct page *page; |
@@ -1020,8 +1011,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
1020 | struct drm_file *file) | 1011 | struct drm_file *file) |
1021 | { | 1012 | { |
1022 | struct drm_i915_gem_pwrite *args = data; | 1013 | struct drm_i915_gem_pwrite *args = data; |
1023 | struct drm_gem_object *obj; | 1014 | struct drm_i915_gem_object *obj; |
1024 | struct drm_i915_gem_object *obj_priv; | ||
1025 | int ret; | 1015 | int ret; |
1026 | 1016 | ||
1027 | if (args->size == 0) | 1017 | if (args->size == 0) |
@@ -1041,15 +1031,15 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
1041 | if (ret) | 1031 | if (ret) |
1042 | return ret; | 1032 | return ret; |
1043 | 1033 | ||
1044 | obj = drm_gem_object_lookup(dev, file, args->handle); | 1034 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
1045 | if (obj == NULL) { | 1035 | if (obj == NULL) { |
1046 | ret = -ENOENT; | 1036 | ret = -ENOENT; |
1047 | goto unlock; | 1037 | goto unlock; |
1048 | } | 1038 | } |
1049 | obj_priv = to_intel_bo(obj); | ||
1050 | 1039 | ||
1051 | /* Bounds check destination. */ | 1040 | /* Bounds check destination. */ |
1052 | if (args->offset > obj->size || args->size > obj->size - args->offset) { | 1041 | if (args->offset > obj->base.size || |
1042 | args->size > obj->base.size - args->offset) { | ||
1053 | ret = -EINVAL; | 1043 | ret = -EINVAL; |
1054 | goto out; | 1044 | goto out; |
1055 | } | 1045 | } |
@@ -1060,11 +1050,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
1060 | * pread/pwrite currently are reading and writing from the CPU | 1050 | * pread/pwrite currently are reading and writing from the CPU |
1061 | * perspective, requiring manual detiling by the client. | 1051 | * perspective, requiring manual detiling by the client. |
1062 | */ | 1052 | */ |
1063 | if (obj_priv->phys_obj) | 1053 | if (obj->phys_obj) |
1064 | ret = i915_gem_phys_pwrite(dev, obj, args, file); | 1054 | ret = i915_gem_phys_pwrite(dev, obj, args, file); |
1065 | else if (obj_priv->tiling_mode == I915_TILING_NONE && | 1055 | else if (obj->tiling_mode == I915_TILING_NONE && |
1066 | obj_priv->gtt_space && | 1056 | obj->gtt_space && |
1067 | obj->write_domain != I915_GEM_DOMAIN_CPU) { | 1057 | obj->base.write_domain != I915_GEM_DOMAIN_CPU) { |
1068 | ret = i915_gem_object_pin(obj, 0, true); | 1058 | ret = i915_gem_object_pin(obj, 0, true); |
1069 | if (ret) | 1059 | if (ret) |
1070 | goto out; | 1060 | goto out; |
@@ -1092,7 +1082,7 @@ out_unpin: | |||
1092 | } | 1082 | } |
1093 | 1083 | ||
1094 | out: | 1084 | out: |
1095 | drm_gem_object_unreference(obj); | 1085 | drm_gem_object_unreference(&obj->base); |
1096 | unlock: | 1086 | unlock: |
1097 | mutex_unlock(&dev->struct_mutex); | 1087 | mutex_unlock(&dev->struct_mutex); |
1098 | return ret; | 1088 | return ret; |
@@ -1104,12 +1094,11 @@ unlock: | |||
1104 | */ | 1094 | */ |
1105 | int | 1095 | int |
1106 | i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | 1096 | i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
1107 | struct drm_file *file_priv) | 1097 | struct drm_file *file) |
1108 | { | 1098 | { |
1109 | struct drm_i915_private *dev_priv = dev->dev_private; | 1099 | struct drm_i915_private *dev_priv = dev->dev_private; |
1110 | struct drm_i915_gem_set_domain *args = data; | 1100 | struct drm_i915_gem_set_domain *args = data; |
1111 | struct drm_gem_object *obj; | 1101 | struct drm_i915_gem_object *obj; |
1112 | struct drm_i915_gem_object *obj_priv; | ||
1113 | uint32_t read_domains = args->read_domains; | 1102 | uint32_t read_domains = args->read_domains; |
1114 | uint32_t write_domain = args->write_domain; | 1103 | uint32_t write_domain = args->write_domain; |
1115 | int ret; | 1104 | int ret; |
@@ -1134,12 +1123,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1134 | if (ret) | 1123 | if (ret) |
1135 | return ret; | 1124 | return ret; |
1136 | 1125 | ||
1137 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 1126 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
1138 | if (obj == NULL) { | 1127 | if (obj == NULL) { |
1139 | ret = -ENOENT; | 1128 | ret = -ENOENT; |
1140 | goto unlock; | 1129 | goto unlock; |
1141 | } | 1130 | } |
1142 | obj_priv = to_intel_bo(obj); | ||
1143 | 1131 | ||
1144 | intel_mark_busy(dev, obj); | 1132 | intel_mark_busy(dev, obj); |
1145 | 1133 | ||
@@ -1149,9 +1137,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1149 | /* Update the LRU on the fence for the CPU access that's | 1137 | /* Update the LRU on the fence for the CPU access that's |
1150 | * about to occur. | 1138 | * about to occur. |
1151 | */ | 1139 | */ |
1152 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { | 1140 | if (obj->fence_reg != I915_FENCE_REG_NONE) { |
1153 | struct drm_i915_fence_reg *reg = | 1141 | struct drm_i915_fence_reg *reg = |
1154 | &dev_priv->fence_regs[obj_priv->fence_reg]; | 1142 | &dev_priv->fence_regs[obj->fence_reg]; |
1155 | list_move_tail(®->lru_list, | 1143 | list_move_tail(®->lru_list, |
1156 | &dev_priv->mm.fence_list); | 1144 | &dev_priv->mm.fence_list); |
1157 | } | 1145 | } |
@@ -1167,10 +1155,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1167 | } | 1155 | } |
1168 | 1156 | ||
1169 | /* Maintain LRU order of "inactive" objects */ | 1157 | /* Maintain LRU order of "inactive" objects */ |
1170 | if (ret == 0 && i915_gem_object_is_inactive(obj_priv)) | 1158 | if (ret == 0 && i915_gem_object_is_inactive(obj)) |
1171 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); | 1159 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
1172 | 1160 | ||
1173 | drm_gem_object_unreference(obj); | 1161 | drm_gem_object_unreference(&obj->base); |
1174 | unlock: | 1162 | unlock: |
1175 | mutex_unlock(&dev->struct_mutex); | 1163 | mutex_unlock(&dev->struct_mutex); |
1176 | return ret; | 1164 | return ret; |
@@ -1181,10 +1169,10 @@ unlock: | |||
1181 | */ | 1169 | */ |
1182 | int | 1170 | int |
1183 | i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | 1171 | i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, |
1184 | struct drm_file *file_priv) | 1172 | struct drm_file *file) |
1185 | { | 1173 | { |
1186 | struct drm_i915_gem_sw_finish *args = data; | 1174 | struct drm_i915_gem_sw_finish *args = data; |
1187 | struct drm_gem_object *obj; | 1175 | struct drm_i915_gem_object *obj; |
1188 | int ret = 0; | 1176 | int ret = 0; |
1189 | 1177 | ||
1190 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 1178 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
@@ -1194,17 +1182,17 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
1194 | if (ret) | 1182 | if (ret) |
1195 | return ret; | 1183 | return ret; |
1196 | 1184 | ||
1197 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 1185 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
1198 | if (obj == NULL) { | 1186 | if (obj == NULL) { |
1199 | ret = -ENOENT; | 1187 | ret = -ENOENT; |
1200 | goto unlock; | 1188 | goto unlock; |
1201 | } | 1189 | } |
1202 | 1190 | ||
1203 | /* Pinned buffers may be scanout, so flush the cache */ | 1191 | /* Pinned buffers may be scanout, so flush the cache */ |
1204 | if (to_intel_bo(obj)->pin_count) | 1192 | if (obj->pin_count) |
1205 | i915_gem_object_flush_cpu_write_domain(obj); | 1193 | i915_gem_object_flush_cpu_write_domain(obj); |
1206 | 1194 | ||
1207 | drm_gem_object_unreference(obj); | 1195 | drm_gem_object_unreference(&obj->base); |
1208 | unlock: | 1196 | unlock: |
1209 | mutex_unlock(&dev->struct_mutex); | 1197 | mutex_unlock(&dev->struct_mutex); |
1210 | return ret; | 1198 | return ret; |
@@ -1219,7 +1207,7 @@ unlock: | |||
1219 | */ | 1207 | */ |
1220 | int | 1208 | int |
1221 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | 1209 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, |
1222 | struct drm_file *file_priv) | 1210 | struct drm_file *file) |
1223 | { | 1211 | { |
1224 | struct drm_i915_private *dev_priv = dev->dev_private; | 1212 | struct drm_i915_private *dev_priv = dev->dev_private; |
1225 | struct drm_i915_gem_mmap *args = data; | 1213 | struct drm_i915_gem_mmap *args = data; |
@@ -1230,7 +1218,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
1230 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 1218 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
1231 | return -ENODEV; | 1219 | return -ENODEV; |
1232 | 1220 | ||
1233 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 1221 | obj = drm_gem_object_lookup(dev, file, args->handle); |
1234 | if (obj == NULL) | 1222 | if (obj == NULL) |
1235 | return -ENOENT; | 1223 | return -ENOENT; |
1236 | 1224 | ||
@@ -1273,10 +1261,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
1273 | */ | 1261 | */ |
1274 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1262 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
1275 | { | 1263 | { |
1276 | struct drm_gem_object *obj = vma->vm_private_data; | 1264 | struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); |
1277 | struct drm_device *dev = obj->dev; | 1265 | struct drm_device *dev = obj->base.dev; |
1278 | drm_i915_private_t *dev_priv = dev->dev_private; | 1266 | drm_i915_private_t *dev_priv = dev->dev_private; |
1279 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1280 | pgoff_t page_offset; | 1267 | pgoff_t page_offset; |
1281 | unsigned long pfn; | 1268 | unsigned long pfn; |
1282 | int ret = 0; | 1269 | int ret = 0; |
@@ -1288,17 +1275,17 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1288 | 1275 | ||
1289 | /* Now bind it into the GTT if needed */ | 1276 | /* Now bind it into the GTT if needed */ |
1290 | mutex_lock(&dev->struct_mutex); | 1277 | mutex_lock(&dev->struct_mutex); |
1291 | BUG_ON(obj_priv->pin_count && !obj_priv->pin_mappable); | 1278 | BUG_ON(obj->pin_count && !obj->pin_mappable); |
1292 | 1279 | ||
1293 | if (obj_priv->gtt_space) { | 1280 | if (obj->gtt_space) { |
1294 | if (!obj_priv->map_and_fenceable) { | 1281 | if (!obj->map_and_fenceable) { |
1295 | ret = i915_gem_object_unbind(obj); | 1282 | ret = i915_gem_object_unbind(obj); |
1296 | if (ret) | 1283 | if (ret) |
1297 | goto unlock; | 1284 | goto unlock; |
1298 | } | 1285 | } |
1299 | } | 1286 | } |
1300 | 1287 | ||
1301 | if (!obj_priv->gtt_space) { | 1288 | if (!obj->gtt_space) { |
1302 | ret = i915_gem_object_bind_to_gtt(obj, 0, true); | 1289 | ret = i915_gem_object_bind_to_gtt(obj, 0, true); |
1303 | if (ret) | 1290 | if (ret) |
1304 | goto unlock; | 1291 | goto unlock; |
@@ -1308,22 +1295,22 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1308 | if (ret) | 1295 | if (ret) |
1309 | goto unlock; | 1296 | goto unlock; |
1310 | 1297 | ||
1311 | if (!obj_priv->fault_mappable) { | 1298 | if (!obj->fault_mappable) { |
1312 | obj_priv->fault_mappable = true; | 1299 | obj->fault_mappable = true; |
1313 | i915_gem_info_update_mappable(dev_priv, obj_priv, true); | 1300 | i915_gem_info_update_mappable(dev_priv, obj, true); |
1314 | } | 1301 | } |
1315 | 1302 | ||
1316 | /* Need a new fence register? */ | 1303 | /* Need a new fence register? */ |
1317 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | 1304 | if (obj->tiling_mode != I915_TILING_NONE) { |
1318 | ret = i915_gem_object_get_fence_reg(obj, true); | 1305 | ret = i915_gem_object_get_fence_reg(obj, true); |
1319 | if (ret) | 1306 | if (ret) |
1320 | goto unlock; | 1307 | goto unlock; |
1321 | } | 1308 | } |
1322 | 1309 | ||
1323 | if (i915_gem_object_is_inactive(obj_priv)) | 1310 | if (i915_gem_object_is_inactive(obj)) |
1324 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); | 1311 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
1325 | 1312 | ||
1326 | pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + | 1313 | pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) + |
1327 | page_offset; | 1314 | page_offset; |
1328 | 1315 | ||
1329 | /* Finally, remap it using the new GTT offset */ | 1316 | /* Finally, remap it using the new GTT offset */ |
@@ -1356,36 +1343,39 @@ unlock: | |||
1356 | * This routine allocates and attaches a fake offset for @obj. | 1343 | * This routine allocates and attaches a fake offset for @obj. |
1357 | */ | 1344 | */ |
1358 | static int | 1345 | static int |
1359 | i915_gem_create_mmap_offset(struct drm_gem_object *obj) | 1346 | i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj) |
1360 | { | 1347 | { |
1361 | struct drm_device *dev = obj->dev; | 1348 | struct drm_device *dev = obj->base.dev; |
1362 | struct drm_gem_mm *mm = dev->mm_private; | 1349 | struct drm_gem_mm *mm = dev->mm_private; |
1363 | struct drm_map_list *list; | 1350 | struct drm_map_list *list; |
1364 | struct drm_local_map *map; | 1351 | struct drm_local_map *map; |
1365 | int ret = 0; | 1352 | int ret = 0; |
1366 | 1353 | ||
1367 | /* Set the object up for mmap'ing */ | 1354 | /* Set the object up for mmap'ing */ |
1368 | list = &obj->map_list; | 1355 | list = &obj->base.map_list; |
1369 | list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); | 1356 | list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); |
1370 | if (!list->map) | 1357 | if (!list->map) |
1371 | return -ENOMEM; | 1358 | return -ENOMEM; |
1372 | 1359 | ||
1373 | map = list->map; | 1360 | map = list->map; |
1374 | map->type = _DRM_GEM; | 1361 | map->type = _DRM_GEM; |
1375 | map->size = obj->size; | 1362 | map->size = obj->base.size; |
1376 | map->handle = obj; | 1363 | map->handle = obj; |
1377 | 1364 | ||
1378 | /* Get a DRM GEM mmap offset allocated... */ | 1365 | /* Get a DRM GEM mmap offset allocated... */ |
1379 | list->file_offset_node = drm_mm_search_free(&mm->offset_manager, | 1366 | list->file_offset_node = drm_mm_search_free(&mm->offset_manager, |
1380 | obj->size / PAGE_SIZE, 0, 0); | 1367 | obj->base.size / PAGE_SIZE, |
1368 | 0, 0); | ||
1381 | if (!list->file_offset_node) { | 1369 | if (!list->file_offset_node) { |
1382 | DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); | 1370 | DRM_ERROR("failed to allocate offset for bo %d\n", |
1371 | obj->base.name); | ||
1383 | ret = -ENOSPC; | 1372 | ret = -ENOSPC; |
1384 | goto out_free_list; | 1373 | goto out_free_list; |
1385 | } | 1374 | } |
1386 | 1375 | ||
1387 | list->file_offset_node = drm_mm_get_block(list->file_offset_node, | 1376 | list->file_offset_node = drm_mm_get_block(list->file_offset_node, |
1388 | obj->size / PAGE_SIZE, 0); | 1377 | obj->base.size / PAGE_SIZE, |
1378 | 0); | ||
1389 | if (!list->file_offset_node) { | 1379 | if (!list->file_offset_node) { |
1390 | ret = -ENOMEM; | 1380 | ret = -ENOMEM; |
1391 | goto out_free_list; | 1381 | goto out_free_list; |
@@ -1424,29 +1414,28 @@ out_free_list: | |||
1424 | * fixup by i915_gem_fault(). | 1414 | * fixup by i915_gem_fault(). |
1425 | */ | 1415 | */ |
1426 | void | 1416 | void |
1427 | i915_gem_release_mmap(struct drm_gem_object *obj) | 1417 | i915_gem_release_mmap(struct drm_i915_gem_object *obj) |
1428 | { | 1418 | { |
1429 | struct drm_device *dev = obj->dev; | 1419 | struct drm_device *dev = obj->base.dev; |
1430 | struct drm_i915_private *dev_priv = dev->dev_private; | 1420 | struct drm_i915_private *dev_priv = dev->dev_private; |
1431 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1432 | 1421 | ||
1433 | if (unlikely(obj->map_list.map && dev->dev_mapping)) | 1422 | if (unlikely(obj->base.map_list.map && dev->dev_mapping)) |
1434 | unmap_mapping_range(dev->dev_mapping, | 1423 | unmap_mapping_range(dev->dev_mapping, |
1435 | (loff_t)obj->map_list.hash.key<<PAGE_SHIFT, | 1424 | (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT, |
1436 | obj->size, 1); | 1425 | obj->base.size, 1); |
1437 | 1426 | ||
1438 | if (obj_priv->fault_mappable) { | 1427 | if (obj->fault_mappable) { |
1439 | obj_priv->fault_mappable = false; | 1428 | obj->fault_mappable = false; |
1440 | i915_gem_info_update_mappable(dev_priv, obj_priv, false); | 1429 | i915_gem_info_update_mappable(dev_priv, obj, false); |
1441 | } | 1430 | } |
1442 | } | 1431 | } |
1443 | 1432 | ||
1444 | static void | 1433 | static void |
1445 | i915_gem_free_mmap_offset(struct drm_gem_object *obj) | 1434 | i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj) |
1446 | { | 1435 | { |
1447 | struct drm_device *dev = obj->dev; | 1436 | struct drm_device *dev = obj->base.dev; |
1448 | struct drm_gem_mm *mm = dev->mm_private; | 1437 | struct drm_gem_mm *mm = dev->mm_private; |
1449 | struct drm_map_list *list = &obj->map_list; | 1438 | struct drm_map_list *list = &obj->base.map_list; |
1450 | 1439 | ||
1451 | drm_ht_remove_item(&mm->offset_hash, &list->hash); | 1440 | drm_ht_remove_item(&mm->offset_hash, &list->hash); |
1452 | drm_mm_put_block(list->file_offset_node); | 1441 | drm_mm_put_block(list->file_offset_node); |
@@ -1462,23 +1451,23 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj) | |||
1462 | * potential fence register mapping. | 1451 | * potential fence register mapping. |
1463 | */ | 1452 | */ |
1464 | static uint32_t | 1453 | static uint32_t |
1465 | i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv) | 1454 | i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj) |
1466 | { | 1455 | { |
1467 | struct drm_device *dev = obj_priv->base.dev; | 1456 | struct drm_device *dev = obj->base.dev; |
1468 | 1457 | ||
1469 | /* | 1458 | /* |
1470 | * Minimum alignment is 4k (GTT page size), but might be greater | 1459 | * Minimum alignment is 4k (GTT page size), but might be greater |
1471 | * if a fence register is needed for the object. | 1460 | * if a fence register is needed for the object. |
1472 | */ | 1461 | */ |
1473 | if (INTEL_INFO(dev)->gen >= 4 || | 1462 | if (INTEL_INFO(dev)->gen >= 4 || |
1474 | obj_priv->tiling_mode == I915_TILING_NONE) | 1463 | obj->tiling_mode == I915_TILING_NONE) |
1475 | return 4096; | 1464 | return 4096; |
1476 | 1465 | ||
1477 | /* | 1466 | /* |
1478 | * Previous chips need to be aligned to the size of the smallest | 1467 | * Previous chips need to be aligned to the size of the smallest |
1479 | * fence register that can contain the object. | 1468 | * fence register that can contain the object. |
1480 | */ | 1469 | */ |
1481 | return i915_gem_get_gtt_size(obj_priv); | 1470 | return i915_gem_get_gtt_size(obj); |
1482 | } | 1471 | } |
1483 | 1472 | ||
1484 | /** | 1473 | /** |
@@ -1490,16 +1479,16 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv) | |||
1490 | * unfenced tiled surface requirements. | 1479 | * unfenced tiled surface requirements. |
1491 | */ | 1480 | */ |
1492 | static uint32_t | 1481 | static uint32_t |
1493 | i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj_priv) | 1482 | i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) |
1494 | { | 1483 | { |
1495 | struct drm_device *dev = obj_priv->base.dev; | 1484 | struct drm_device *dev = obj->base.dev; |
1496 | int tile_height; | 1485 | int tile_height; |
1497 | 1486 | ||
1498 | /* | 1487 | /* |
1499 | * Minimum alignment is 4k (GTT page size) for sane hw. | 1488 | * Minimum alignment is 4k (GTT page size) for sane hw. |
1500 | */ | 1489 | */ |
1501 | if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) || | 1490 | if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) || |
1502 | obj_priv->tiling_mode == I915_TILING_NONE) | 1491 | obj->tiling_mode == I915_TILING_NONE) |
1503 | return 4096; | 1492 | return 4096; |
1504 | 1493 | ||
1505 | /* | 1494 | /* |
@@ -1508,18 +1497,18 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj_priv) | |||
1508 | * placed in a fenced gtt region). | 1497 | * placed in a fenced gtt region). |
1509 | */ | 1498 | */ |
1510 | if (IS_GEN2(dev) || | 1499 | if (IS_GEN2(dev) || |
1511 | (obj_priv->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) | 1500 | (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) |
1512 | tile_height = 32; | 1501 | tile_height = 32; |
1513 | else | 1502 | else |
1514 | tile_height = 8; | 1503 | tile_height = 8; |
1515 | 1504 | ||
1516 | return tile_height * obj_priv->stride * 2; | 1505 | return tile_height * obj->stride * 2; |
1517 | } | 1506 | } |
1518 | 1507 | ||
1519 | static uint32_t | 1508 | static uint32_t |
1520 | i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) | 1509 | i915_gem_get_gtt_size(struct drm_i915_gem_object *obj) |
1521 | { | 1510 | { |
1522 | struct drm_device *dev = obj_priv->base.dev; | 1511 | struct drm_device *dev = obj->base.dev; |
1523 | uint32_t size; | 1512 | uint32_t size; |
1524 | 1513 | ||
1525 | /* | 1514 | /* |
@@ -1527,7 +1516,7 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) | |||
1527 | * if a fence register is needed for the object. | 1516 | * if a fence register is needed for the object. |
1528 | */ | 1517 | */ |
1529 | if (INTEL_INFO(dev)->gen >= 4) | 1518 | if (INTEL_INFO(dev)->gen >= 4) |
1530 | return obj_priv->base.size; | 1519 | return obj->base.size; |
1531 | 1520 | ||
1532 | /* | 1521 | /* |
1533 | * Previous chips need to be aligned to the size of the smallest | 1522 | * Previous chips need to be aligned to the size of the smallest |
@@ -1538,7 +1527,7 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) | |||
1538 | else | 1527 | else |
1539 | size = 512*1024; | 1528 | size = 512*1024; |
1540 | 1529 | ||
1541 | while (size < obj_priv->base.size) | 1530 | while (size < obj->base.size) |
1542 | size <<= 1; | 1531 | size <<= 1; |
1543 | 1532 | ||
1544 | return size; | 1533 | return size; |
@@ -1548,7 +1537,7 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) | |||
1548 | * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing | 1537 | * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing |
1549 | * @dev: DRM device | 1538 | * @dev: DRM device |
1550 | * @data: GTT mapping ioctl data | 1539 | * @data: GTT mapping ioctl data |
1551 | * @file_priv: GEM object info | 1540 | * @file: GEM object info |
1552 | * | 1541 | * |
1553 | * Simply returns the fake offset to userspace so it can mmap it. | 1542 | * Simply returns the fake offset to userspace so it can mmap it. |
1554 | * The mmap call will end up in drm_gem_mmap(), which will set things | 1543 | * The mmap call will end up in drm_gem_mmap(), which will set things |
@@ -1561,12 +1550,11 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) | |||
1561 | */ | 1550 | */ |
1562 | int | 1551 | int |
1563 | i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | 1552 | i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, |
1564 | struct drm_file *file_priv) | 1553 | struct drm_file *file) |
1565 | { | 1554 | { |
1566 | struct drm_i915_private *dev_priv = dev->dev_private; | 1555 | struct drm_i915_private *dev_priv = dev->dev_private; |
1567 | struct drm_i915_gem_mmap_gtt *args = data; | 1556 | struct drm_i915_gem_mmap_gtt *args = data; |
1568 | struct drm_gem_object *obj; | 1557 | struct drm_i915_gem_object *obj; |
1569 | struct drm_i915_gem_object *obj_priv; | ||
1570 | int ret; | 1558 | int ret; |
1571 | 1559 | ||
1572 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 1560 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
@@ -1576,44 +1564,42 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
1576 | if (ret) | 1564 | if (ret) |
1577 | return ret; | 1565 | return ret; |
1578 | 1566 | ||
1579 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 1567 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
1580 | if (obj == NULL) { | 1568 | if (obj == NULL) { |
1581 | ret = -ENOENT; | 1569 | ret = -ENOENT; |
1582 | goto unlock; | 1570 | goto unlock; |
1583 | } | 1571 | } |
1584 | obj_priv = to_intel_bo(obj); | ||
1585 | 1572 | ||
1586 | if (obj->size > dev_priv->mm.gtt_mappable_end) { | 1573 | if (obj->base.size > dev_priv->mm.gtt_mappable_end) { |
1587 | ret = -E2BIG; | 1574 | ret = -E2BIG; |
1588 | goto unlock; | 1575 | goto unlock; |
1589 | } | 1576 | } |
1590 | 1577 | ||
1591 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 1578 | if (obj->madv != I915_MADV_WILLNEED) { |
1592 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); | 1579 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); |
1593 | ret = -EINVAL; | 1580 | ret = -EINVAL; |
1594 | goto out; | 1581 | goto out; |
1595 | } | 1582 | } |
1596 | 1583 | ||
1597 | if (!obj->map_list.map) { | 1584 | if (!obj->base.map_list.map) { |
1598 | ret = i915_gem_create_mmap_offset(obj); | 1585 | ret = i915_gem_create_mmap_offset(obj); |
1599 | if (ret) | 1586 | if (ret) |
1600 | goto out; | 1587 | goto out; |
1601 | } | 1588 | } |
1602 | 1589 | ||
1603 | args->offset = (u64)obj->map_list.hash.key << PAGE_SHIFT; | 1590 | args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; |
1604 | 1591 | ||
1605 | out: | 1592 | out: |
1606 | drm_gem_object_unreference(obj); | 1593 | drm_gem_object_unreference(&obj->base); |
1607 | unlock: | 1594 | unlock: |
1608 | mutex_unlock(&dev->struct_mutex); | 1595 | mutex_unlock(&dev->struct_mutex); |
1609 | return ret; | 1596 | return ret; |
1610 | } | 1597 | } |
1611 | 1598 | ||
1612 | static int | 1599 | static int |
1613 | i915_gem_object_get_pages_gtt(struct drm_gem_object *obj, | 1600 | i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, |
1614 | gfp_t gfpmask) | 1601 | gfp_t gfpmask) |
1615 | { | 1602 | { |
1616 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1617 | int page_count, i; | 1603 | int page_count, i; |
1618 | struct address_space *mapping; | 1604 | struct address_space *mapping; |
1619 | struct inode *inode; | 1605 | struct inode *inode; |
@@ -1622,13 +1608,13 @@ i915_gem_object_get_pages_gtt(struct drm_gem_object *obj, | |||
1622 | /* Get the list of pages out of our struct file. They'll be pinned | 1608 | /* Get the list of pages out of our struct file. They'll be pinned |
1623 | * at this point until we release them. | 1609 | * at this point until we release them. |
1624 | */ | 1610 | */ |
1625 | page_count = obj->size / PAGE_SIZE; | 1611 | page_count = obj->base.size / PAGE_SIZE; |
1626 | BUG_ON(obj_priv->pages != NULL); | 1612 | BUG_ON(obj->pages != NULL); |
1627 | obj_priv->pages = drm_malloc_ab(page_count, sizeof(struct page *)); | 1613 | obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); |
1628 | if (obj_priv->pages == NULL) | 1614 | if (obj->pages == NULL) |
1629 | return -ENOMEM; | 1615 | return -ENOMEM; |
1630 | 1616 | ||
1631 | inode = obj->filp->f_path.dentry->d_inode; | 1617 | inode = obj->base.filp->f_path.dentry->d_inode; |
1632 | mapping = inode->i_mapping; | 1618 | mapping = inode->i_mapping; |
1633 | for (i = 0; i < page_count; i++) { | 1619 | for (i = 0; i < page_count; i++) { |
1634 | page = read_cache_page_gfp(mapping, i, | 1620 | page = read_cache_page_gfp(mapping, i, |
@@ -1639,51 +1625,50 @@ i915_gem_object_get_pages_gtt(struct drm_gem_object *obj, | |||
1639 | if (IS_ERR(page)) | 1625 | if (IS_ERR(page)) |
1640 | goto err_pages; | 1626 | goto err_pages; |
1641 | 1627 | ||
1642 | obj_priv->pages[i] = page; | 1628 | obj->pages[i] = page; |
1643 | } | 1629 | } |
1644 | 1630 | ||
1645 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1631 | if (obj->tiling_mode != I915_TILING_NONE) |
1646 | i915_gem_object_do_bit_17_swizzle(obj); | 1632 | i915_gem_object_do_bit_17_swizzle(obj); |
1647 | 1633 | ||
1648 | return 0; | 1634 | return 0; |
1649 | 1635 | ||
1650 | err_pages: | 1636 | err_pages: |
1651 | while (i--) | 1637 | while (i--) |
1652 | page_cache_release(obj_priv->pages[i]); | 1638 | page_cache_release(obj->pages[i]); |
1653 | 1639 | ||
1654 | drm_free_large(obj_priv->pages); | 1640 | drm_free_large(obj->pages); |
1655 | obj_priv->pages = NULL; | 1641 | obj->pages = NULL; |
1656 | return PTR_ERR(page); | 1642 | return PTR_ERR(page); |
1657 | } | 1643 | } |
1658 | 1644 | ||
1659 | static void | 1645 | static void |
1660 | i915_gem_object_put_pages_gtt(struct drm_gem_object *obj) | 1646 | i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) |
1661 | { | 1647 | { |
1662 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 1648 | int page_count = obj->base.size / PAGE_SIZE; |
1663 | int page_count = obj->size / PAGE_SIZE; | ||
1664 | int i; | 1649 | int i; |
1665 | 1650 | ||
1666 | BUG_ON(obj_priv->madv == __I915_MADV_PURGED); | 1651 | BUG_ON(obj->madv == __I915_MADV_PURGED); |
1667 | 1652 | ||
1668 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1653 | if (obj->tiling_mode != I915_TILING_NONE) |
1669 | i915_gem_object_save_bit_17_swizzle(obj); | 1654 | i915_gem_object_save_bit_17_swizzle(obj); |
1670 | 1655 | ||
1671 | if (obj_priv->madv == I915_MADV_DONTNEED) | 1656 | if (obj->madv == I915_MADV_DONTNEED) |
1672 | obj_priv->dirty = 0; | 1657 | obj->dirty = 0; |
1673 | 1658 | ||
1674 | for (i = 0; i < page_count; i++) { | 1659 | for (i = 0; i < page_count; i++) { |
1675 | if (obj_priv->dirty) | 1660 | if (obj->dirty) |
1676 | set_page_dirty(obj_priv->pages[i]); | 1661 | set_page_dirty(obj->pages[i]); |
1677 | 1662 | ||
1678 | if (obj_priv->madv == I915_MADV_WILLNEED) | 1663 | if (obj->madv == I915_MADV_WILLNEED) |
1679 | mark_page_accessed(obj_priv->pages[i]); | 1664 | mark_page_accessed(obj->pages[i]); |
1680 | 1665 | ||
1681 | page_cache_release(obj_priv->pages[i]); | 1666 | page_cache_release(obj->pages[i]); |
1682 | } | 1667 | } |
1683 | obj_priv->dirty = 0; | 1668 | obj->dirty = 0; |
1684 | 1669 | ||
1685 | drm_free_large(obj_priv->pages); | 1670 | drm_free_large(obj->pages); |
1686 | obj_priv->pages = NULL; | 1671 | obj->pages = NULL; |
1687 | } | 1672 | } |
1688 | 1673 | ||
1689 | static uint32_t | 1674 | static uint32_t |
@@ -1695,47 +1680,44 @@ i915_gem_next_request_seqno(struct drm_device *dev, | |||
1695 | } | 1680 | } |
1696 | 1681 | ||
1697 | static void | 1682 | static void |
1698 | i915_gem_object_move_to_active(struct drm_gem_object *obj, | 1683 | i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
1699 | struct intel_ring_buffer *ring) | 1684 | struct intel_ring_buffer *ring) |
1700 | { | 1685 | { |
1701 | struct drm_device *dev = obj->dev; | 1686 | struct drm_device *dev = obj->base.dev; |
1702 | struct drm_i915_private *dev_priv = dev->dev_private; | 1687 | struct drm_i915_private *dev_priv = dev->dev_private; |
1703 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1704 | uint32_t seqno = i915_gem_next_request_seqno(dev, ring); | 1688 | uint32_t seqno = i915_gem_next_request_seqno(dev, ring); |
1705 | 1689 | ||
1706 | BUG_ON(ring == NULL); | 1690 | BUG_ON(ring == NULL); |
1707 | obj_priv->ring = ring; | 1691 | obj->ring = ring; |
1708 | 1692 | ||
1709 | /* Add a reference if we're newly entering the active list. */ | 1693 | /* Add a reference if we're newly entering the active list. */ |
1710 | if (!obj_priv->active) { | 1694 | if (!obj->active) { |
1711 | drm_gem_object_reference(obj); | 1695 | drm_gem_object_reference(&obj->base); |
1712 | obj_priv->active = 1; | 1696 | obj->active = 1; |
1713 | } | 1697 | } |
1714 | 1698 | ||
1715 | /* Move from whatever list we were on to the tail of execution. */ | 1699 | /* Move from whatever list we were on to the tail of execution. */ |
1716 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list); | 1700 | list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); |
1717 | list_move_tail(&obj_priv->ring_list, &ring->active_list); | 1701 | list_move_tail(&obj->ring_list, &ring->active_list); |
1718 | obj_priv->last_rendering_seqno = seqno; | 1702 | obj->last_rendering_seqno = seqno; |
1719 | } | 1703 | } |
1720 | 1704 | ||
1721 | static void | 1705 | static void |
1722 | i915_gem_object_move_to_flushing(struct drm_gem_object *obj) | 1706 | i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj) |
1723 | { | 1707 | { |
1724 | struct drm_device *dev = obj->dev; | 1708 | struct drm_device *dev = obj->base.dev; |
1725 | drm_i915_private_t *dev_priv = dev->dev_private; | 1709 | drm_i915_private_t *dev_priv = dev->dev_private; |
1726 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1727 | 1710 | ||
1728 | BUG_ON(!obj_priv->active); | 1711 | BUG_ON(!obj->active); |
1729 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list); | 1712 | list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list); |
1730 | list_del_init(&obj_priv->ring_list); | 1713 | list_del_init(&obj->ring_list); |
1731 | obj_priv->last_rendering_seqno = 0; | 1714 | obj->last_rendering_seqno = 0; |
1732 | } | 1715 | } |
1733 | 1716 | ||
1734 | /* Immediately discard the backing storage */ | 1717 | /* Immediately discard the backing storage */ |
1735 | static void | 1718 | static void |
1736 | i915_gem_object_truncate(struct drm_gem_object *obj) | 1719 | i915_gem_object_truncate(struct drm_i915_gem_object *obj) |
1737 | { | 1720 | { |
1738 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1739 | struct inode *inode; | 1721 | struct inode *inode; |
1740 | 1722 | ||
1741 | /* Our goal here is to return as much of the memory as | 1723 | /* Our goal here is to return as much of the memory as |
@@ -1744,40 +1726,39 @@ i915_gem_object_truncate(struct drm_gem_object *obj) | |||
1744 | * backing pages, *now*. Here we mirror the actions taken | 1726 | * backing pages, *now*. Here we mirror the actions taken |
1745 | * when by shmem_delete_inode() to release the backing store. | 1727 | * when by shmem_delete_inode() to release the backing store. |
1746 | */ | 1728 | */ |
1747 | inode = obj->filp->f_path.dentry->d_inode; | 1729 | inode = obj->base.filp->f_path.dentry->d_inode; |
1748 | truncate_inode_pages(inode->i_mapping, 0); | 1730 | truncate_inode_pages(inode->i_mapping, 0); |
1749 | if (inode->i_op->truncate_range) | 1731 | if (inode->i_op->truncate_range) |
1750 | inode->i_op->truncate_range(inode, 0, (loff_t)-1); | 1732 | inode->i_op->truncate_range(inode, 0, (loff_t)-1); |
1751 | 1733 | ||
1752 | obj_priv->madv = __I915_MADV_PURGED; | 1734 | obj->madv = __I915_MADV_PURGED; |
1753 | } | 1735 | } |
1754 | 1736 | ||
1755 | static inline int | 1737 | static inline int |
1756 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv) | 1738 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) |
1757 | { | 1739 | { |
1758 | return obj_priv->madv == I915_MADV_DONTNEED; | 1740 | return obj->madv == I915_MADV_DONTNEED; |
1759 | } | 1741 | } |
1760 | 1742 | ||
1761 | static void | 1743 | static void |
1762 | i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | 1744 | i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) |
1763 | { | 1745 | { |
1764 | struct drm_device *dev = obj->dev; | 1746 | struct drm_device *dev = obj->base.dev; |
1765 | drm_i915_private_t *dev_priv = dev->dev_private; | 1747 | drm_i915_private_t *dev_priv = dev->dev_private; |
1766 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1767 | 1748 | ||
1768 | if (obj_priv->pin_count != 0) | 1749 | if (obj->pin_count != 0) |
1769 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list); | 1750 | list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list); |
1770 | else | 1751 | else |
1771 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); | 1752 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
1772 | list_del_init(&obj_priv->ring_list); | 1753 | list_del_init(&obj->ring_list); |
1773 | 1754 | ||
1774 | BUG_ON(!list_empty(&obj_priv->gpu_write_list)); | 1755 | BUG_ON(!list_empty(&obj->gpu_write_list)); |
1775 | 1756 | ||
1776 | obj_priv->last_rendering_seqno = 0; | 1757 | obj->last_rendering_seqno = 0; |
1777 | obj_priv->ring = NULL; | 1758 | obj->ring = NULL; |
1778 | if (obj_priv->active) { | 1759 | if (obj->active) { |
1779 | obj_priv->active = 0; | 1760 | obj->active = 0; |
1780 | drm_gem_object_unreference(obj); | 1761 | drm_gem_object_unreference(&obj->base); |
1781 | } | 1762 | } |
1782 | WARN_ON(i915_verify_lists(dev)); | 1763 | WARN_ON(i915_verify_lists(dev)); |
1783 | } | 1764 | } |
@@ -1788,30 +1769,28 @@ i915_gem_process_flushing_list(struct drm_device *dev, | |||
1788 | struct intel_ring_buffer *ring) | 1769 | struct intel_ring_buffer *ring) |
1789 | { | 1770 | { |
1790 | drm_i915_private_t *dev_priv = dev->dev_private; | 1771 | drm_i915_private_t *dev_priv = dev->dev_private; |
1791 | struct drm_i915_gem_object *obj_priv, *next; | 1772 | struct drm_i915_gem_object *obj, *next; |
1792 | 1773 | ||
1793 | list_for_each_entry_safe(obj_priv, next, | 1774 | list_for_each_entry_safe(obj, next, |
1794 | &ring->gpu_write_list, | 1775 | &ring->gpu_write_list, |
1795 | gpu_write_list) { | 1776 | gpu_write_list) { |
1796 | struct drm_gem_object *obj = &obj_priv->base; | 1777 | if (obj->base.write_domain & flush_domains) { |
1778 | uint32_t old_write_domain = obj->base.write_domain; | ||
1797 | 1779 | ||
1798 | if (obj->write_domain & flush_domains) { | 1780 | obj->base.write_domain = 0; |
1799 | uint32_t old_write_domain = obj->write_domain; | 1781 | list_del_init(&obj->gpu_write_list); |
1800 | |||
1801 | obj->write_domain = 0; | ||
1802 | list_del_init(&obj_priv->gpu_write_list); | ||
1803 | i915_gem_object_move_to_active(obj, ring); | 1782 | i915_gem_object_move_to_active(obj, ring); |
1804 | 1783 | ||
1805 | /* update the fence lru list */ | 1784 | /* update the fence lru list */ |
1806 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { | 1785 | if (obj->fence_reg != I915_FENCE_REG_NONE) { |
1807 | struct drm_i915_fence_reg *reg = | 1786 | struct drm_i915_fence_reg *reg = |
1808 | &dev_priv->fence_regs[obj_priv->fence_reg]; | 1787 | &dev_priv->fence_regs[obj->fence_reg]; |
1809 | list_move_tail(®->lru_list, | 1788 | list_move_tail(®->lru_list, |
1810 | &dev_priv->mm.fence_list); | 1789 | &dev_priv->mm.fence_list); |
1811 | } | 1790 | } |
1812 | 1791 | ||
1813 | trace_i915_gem_object_change_domain(obj, | 1792 | trace_i915_gem_object_change_domain(obj, |
1814 | obj->read_domains, | 1793 | obj->base.read_domains, |
1815 | old_write_domain); | 1794 | old_write_domain); |
1816 | } | 1795 | } |
1817 | } | 1796 | } |
@@ -1912,22 +1891,22 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, | |||
1912 | } | 1891 | } |
1913 | 1892 | ||
1914 | while (!list_empty(&ring->active_list)) { | 1893 | while (!list_empty(&ring->active_list)) { |
1915 | struct drm_i915_gem_object *obj_priv; | 1894 | struct drm_i915_gem_object *obj; |
1916 | 1895 | ||
1917 | obj_priv = list_first_entry(&ring->active_list, | 1896 | obj = list_first_entry(&ring->active_list, |
1918 | struct drm_i915_gem_object, | 1897 | struct drm_i915_gem_object, |
1919 | ring_list); | 1898 | ring_list); |
1920 | 1899 | ||
1921 | obj_priv->base.write_domain = 0; | 1900 | obj->base.write_domain = 0; |
1922 | list_del_init(&obj_priv->gpu_write_list); | 1901 | list_del_init(&obj->gpu_write_list); |
1923 | i915_gem_object_move_to_inactive(&obj_priv->base); | 1902 | i915_gem_object_move_to_inactive(obj); |
1924 | } | 1903 | } |
1925 | } | 1904 | } |
1926 | 1905 | ||
1927 | void i915_gem_reset(struct drm_device *dev) | 1906 | void i915_gem_reset(struct drm_device *dev) |
1928 | { | 1907 | { |
1929 | struct drm_i915_private *dev_priv = dev->dev_private; | 1908 | struct drm_i915_private *dev_priv = dev->dev_private; |
1930 | struct drm_i915_gem_object *obj_priv; | 1909 | struct drm_i915_gem_object *obj; |
1931 | int i; | 1910 | int i; |
1932 | 1911 | ||
1933 | i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); | 1912 | i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); |
@@ -1939,23 +1918,23 @@ void i915_gem_reset(struct drm_device *dev) | |||
1939 | * lost bo to the inactive list. | 1918 | * lost bo to the inactive list. |
1940 | */ | 1919 | */ |
1941 | while (!list_empty(&dev_priv->mm.flushing_list)) { | 1920 | while (!list_empty(&dev_priv->mm.flushing_list)) { |
1942 | obj_priv = list_first_entry(&dev_priv->mm.flushing_list, | 1921 | obj= list_first_entry(&dev_priv->mm.flushing_list, |
1943 | struct drm_i915_gem_object, | 1922 | struct drm_i915_gem_object, |
1944 | mm_list); | 1923 | mm_list); |
1945 | 1924 | ||
1946 | obj_priv->base.write_domain = 0; | 1925 | obj->base.write_domain = 0; |
1947 | list_del_init(&obj_priv->gpu_write_list); | 1926 | list_del_init(&obj->gpu_write_list); |
1948 | i915_gem_object_move_to_inactive(&obj_priv->base); | 1927 | i915_gem_object_move_to_inactive(obj); |
1949 | } | 1928 | } |
1950 | 1929 | ||
1951 | /* Move everything out of the GPU domains to ensure we do any | 1930 | /* Move everything out of the GPU domains to ensure we do any |
1952 | * necessary invalidation upon reuse. | 1931 | * necessary invalidation upon reuse. |
1953 | */ | 1932 | */ |
1954 | list_for_each_entry(obj_priv, | 1933 | list_for_each_entry(obj, |
1955 | &dev_priv->mm.inactive_list, | 1934 | &dev_priv->mm.inactive_list, |
1956 | mm_list) | 1935 | mm_list) |
1957 | { | 1936 | { |
1958 | obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS; | 1937 | obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; |
1959 | } | 1938 | } |
1960 | 1939 | ||
1961 | /* The fence registers are invalidated so clear them out */ | 1940 | /* The fence registers are invalidated so clear them out */ |
@@ -2008,18 +1987,16 @@ i915_gem_retire_requests_ring(struct drm_device *dev, | |||
2008 | * by the ringbuffer to the flushing/inactive lists as appropriate. | 1987 | * by the ringbuffer to the flushing/inactive lists as appropriate. |
2009 | */ | 1988 | */ |
2010 | while (!list_empty(&ring->active_list)) { | 1989 | while (!list_empty(&ring->active_list)) { |
2011 | struct drm_gem_object *obj; | 1990 | struct drm_i915_gem_object *obj; |
2012 | struct drm_i915_gem_object *obj_priv; | ||
2013 | 1991 | ||
2014 | obj_priv = list_first_entry(&ring->active_list, | 1992 | obj= list_first_entry(&ring->active_list, |
2015 | struct drm_i915_gem_object, | 1993 | struct drm_i915_gem_object, |
2016 | ring_list); | 1994 | ring_list); |
2017 | 1995 | ||
2018 | if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno)) | 1996 | if (!i915_seqno_passed(seqno, obj->last_rendering_seqno)) |
2019 | break; | 1997 | break; |
2020 | 1998 | ||
2021 | obj = &obj_priv->base; | 1999 | if (obj->base.write_domain != 0) |
2022 | if (obj->write_domain != 0) | ||
2023 | i915_gem_object_move_to_flushing(obj); | 2000 | i915_gem_object_move_to_flushing(obj); |
2024 | else | 2001 | else |
2025 | i915_gem_object_move_to_inactive(obj); | 2002 | i915_gem_object_move_to_inactive(obj); |
@@ -2040,17 +2017,17 @@ i915_gem_retire_requests(struct drm_device *dev) | |||
2040 | drm_i915_private_t *dev_priv = dev->dev_private; | 2017 | drm_i915_private_t *dev_priv = dev->dev_private; |
2041 | 2018 | ||
2042 | if (!list_empty(&dev_priv->mm.deferred_free_list)) { | 2019 | if (!list_empty(&dev_priv->mm.deferred_free_list)) { |
2043 | struct drm_i915_gem_object *obj_priv, *tmp; | 2020 | struct drm_i915_gem_object *obj, *next; |
2044 | 2021 | ||
2045 | /* We must be careful that during unbind() we do not | 2022 | /* We must be careful that during unbind() we do not |
2046 | * accidentally infinitely recurse into retire requests. | 2023 | * accidentally infinitely recurse into retire requests. |
2047 | * Currently: | 2024 | * Currently: |
2048 | * retire -> free -> unbind -> wait -> retire_ring | 2025 | * retire -> free -> unbind -> wait -> retire_ring |
2049 | */ | 2026 | */ |
2050 | list_for_each_entry_safe(obj_priv, tmp, | 2027 | list_for_each_entry_safe(obj, next, |
2051 | &dev_priv->mm.deferred_free_list, | 2028 | &dev_priv->mm.deferred_free_list, |
2052 | mm_list) | 2029 | mm_list) |
2053 | i915_gem_free_object_tail(&obj_priv->base); | 2030 | i915_gem_free_object_tail(obj); |
2054 | } | 2031 | } |
2055 | 2032 | ||
2056 | i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); | 2033 | i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); |
@@ -2175,7 +2152,6 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno, | |||
2175 | 2152 | ||
2176 | static void | 2153 | static void |
2177 | i915_gem_flush_ring(struct drm_device *dev, | 2154 | i915_gem_flush_ring(struct drm_device *dev, |
2178 | struct drm_file *file_priv, | ||
2179 | struct intel_ring_buffer *ring, | 2155 | struct intel_ring_buffer *ring, |
2180 | uint32_t invalidate_domains, | 2156 | uint32_t invalidate_domains, |
2181 | uint32_t flush_domains) | 2157 | uint32_t flush_domains) |
@@ -2186,7 +2162,6 @@ i915_gem_flush_ring(struct drm_device *dev, | |||
2186 | 2162 | ||
2187 | static void | 2163 | static void |
2188 | i915_gem_flush(struct drm_device *dev, | 2164 | i915_gem_flush(struct drm_device *dev, |
2189 | struct drm_file *file_priv, | ||
2190 | uint32_t invalidate_domains, | 2165 | uint32_t invalidate_domains, |
2191 | uint32_t flush_domains, | 2166 | uint32_t flush_domains, |
2192 | uint32_t flush_rings) | 2167 | uint32_t flush_rings) |
@@ -2198,16 +2173,13 @@ i915_gem_flush(struct drm_device *dev, | |||
2198 | 2173 | ||
2199 | if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { | 2174 | if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { |
2200 | if (flush_rings & RING_RENDER) | 2175 | if (flush_rings & RING_RENDER) |
2201 | i915_gem_flush_ring(dev, file_priv, | 2176 | i915_gem_flush_ring(dev, &dev_priv->render_ring, |
2202 | &dev_priv->render_ring, | ||
2203 | invalidate_domains, flush_domains); | 2177 | invalidate_domains, flush_domains); |
2204 | if (flush_rings & RING_BSD) | 2178 | if (flush_rings & RING_BSD) |
2205 | i915_gem_flush_ring(dev, file_priv, | 2179 | i915_gem_flush_ring(dev, &dev_priv->bsd_ring, |
2206 | &dev_priv->bsd_ring, | ||
2207 | invalidate_domains, flush_domains); | 2180 | invalidate_domains, flush_domains); |
2208 | if (flush_rings & RING_BLT) | 2181 | if (flush_rings & RING_BLT) |
2209 | i915_gem_flush_ring(dev, file_priv, | 2182 | i915_gem_flush_ring(dev, &dev_priv->blt_ring, |
2210 | &dev_priv->blt_ring, | ||
2211 | invalidate_domains, flush_domains); | 2183 | invalidate_domains, flush_domains); |
2212 | } | 2184 | } |
2213 | } | 2185 | } |
@@ -2217,26 +2189,25 @@ i915_gem_flush(struct drm_device *dev, | |||
2217 | * safe to unbind from the GTT or access from the CPU. | 2189 | * safe to unbind from the GTT or access from the CPU. |
2218 | */ | 2190 | */ |
2219 | static int | 2191 | static int |
2220 | i915_gem_object_wait_rendering(struct drm_gem_object *obj, | 2192 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, |
2221 | bool interruptible) | 2193 | bool interruptible) |
2222 | { | 2194 | { |
2223 | struct drm_device *dev = obj->dev; | 2195 | struct drm_device *dev = obj->base.dev; |
2224 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2225 | int ret; | 2196 | int ret; |
2226 | 2197 | ||
2227 | /* This function only exists to support waiting for existing rendering, | 2198 | /* This function only exists to support waiting for existing rendering, |
2228 | * not for emitting required flushes. | 2199 | * not for emitting required flushes. |
2229 | */ | 2200 | */ |
2230 | BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0); | 2201 | BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0); |
2231 | 2202 | ||
2232 | /* If there is rendering queued on the buffer being evicted, wait for | 2203 | /* If there is rendering queued on the buffer being evicted, wait for |
2233 | * it. | 2204 | * it. |
2234 | */ | 2205 | */ |
2235 | if (obj_priv->active) { | 2206 | if (obj->active) { |
2236 | ret = i915_do_wait_request(dev, | 2207 | ret = i915_do_wait_request(dev, |
2237 | obj_priv->last_rendering_seqno, | 2208 | obj->last_rendering_seqno, |
2238 | interruptible, | 2209 | interruptible, |
2239 | obj_priv->ring); | 2210 | obj->ring); |
2240 | if (ret) | 2211 | if (ret) |
2241 | return ret; | 2212 | return ret; |
2242 | } | 2213 | } |
@@ -2248,17 +2219,16 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj, | |||
2248 | * Unbinds an object from the GTT aperture. | 2219 | * Unbinds an object from the GTT aperture. |
2249 | */ | 2220 | */ |
2250 | int | 2221 | int |
2251 | i915_gem_object_unbind(struct drm_gem_object *obj) | 2222 | i915_gem_object_unbind(struct drm_i915_gem_object *obj) |
2252 | { | 2223 | { |
2253 | struct drm_device *dev = obj->dev; | 2224 | struct drm_device *dev = obj->base.dev; |
2254 | struct drm_i915_private *dev_priv = dev->dev_private; | 2225 | struct drm_i915_private *dev_priv = dev->dev_private; |
2255 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2256 | int ret = 0; | 2226 | int ret = 0; |
2257 | 2227 | ||
2258 | if (obj_priv->gtt_space == NULL) | 2228 | if (obj->gtt_space == NULL) |
2259 | return 0; | 2229 | return 0; |
2260 | 2230 | ||
2261 | if (obj_priv->pin_count != 0) { | 2231 | if (obj->pin_count != 0) { |
2262 | DRM_ERROR("Attempting to unbind pinned buffer\n"); | 2232 | DRM_ERROR("Attempting to unbind pinned buffer\n"); |
2263 | return -EINVAL; | 2233 | return -EINVAL; |
2264 | } | 2234 | } |
@@ -2281,27 +2251,27 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2281 | */ | 2251 | */ |
2282 | if (ret) { | 2252 | if (ret) { |
2283 | i915_gem_clflush_object(obj); | 2253 | i915_gem_clflush_object(obj); |
2284 | obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU; | 2254 | obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
2285 | } | 2255 | } |
2286 | 2256 | ||
2287 | /* release the fence reg _after_ flushing */ | 2257 | /* release the fence reg _after_ flushing */ |
2288 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | 2258 | if (obj->fence_reg != I915_FENCE_REG_NONE) |
2289 | i915_gem_clear_fence_reg(obj); | 2259 | i915_gem_clear_fence_reg(obj); |
2290 | 2260 | ||
2291 | i915_gem_gtt_unbind_object(obj); | 2261 | i915_gem_gtt_unbind_object(obj); |
2292 | 2262 | ||
2293 | i915_gem_object_put_pages_gtt(obj); | 2263 | i915_gem_object_put_pages_gtt(obj); |
2294 | 2264 | ||
2295 | i915_gem_info_remove_gtt(dev_priv, obj_priv); | 2265 | i915_gem_info_remove_gtt(dev_priv, obj); |
2296 | list_del_init(&obj_priv->mm_list); | 2266 | list_del_init(&obj->mm_list); |
2297 | /* Avoid an unnecessary call to unbind on rebind. */ | 2267 | /* Avoid an unnecessary call to unbind on rebind. */ |
2298 | obj_priv->map_and_fenceable = true; | 2268 | obj->map_and_fenceable = true; |
2299 | 2269 | ||
2300 | drm_mm_put_block(obj_priv->gtt_space); | 2270 | drm_mm_put_block(obj->gtt_space); |
2301 | obj_priv->gtt_space = NULL; | 2271 | obj->gtt_space = NULL; |
2302 | obj_priv->gtt_offset = 0; | 2272 | obj->gtt_offset = 0; |
2303 | 2273 | ||
2304 | if (i915_gem_object_is_purgeable(obj_priv)) | 2274 | if (i915_gem_object_is_purgeable(obj)) |
2305 | i915_gem_object_truncate(obj); | 2275 | i915_gem_object_truncate(obj); |
2306 | 2276 | ||
2307 | trace_i915_gem_object_unbind(obj); | 2277 | trace_i915_gem_object_unbind(obj); |
@@ -2315,7 +2285,7 @@ static int i915_ring_idle(struct drm_device *dev, | |||
2315 | if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) | 2285 | if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) |
2316 | return 0; | 2286 | return 0; |
2317 | 2287 | ||
2318 | i915_gem_flush_ring(dev, NULL, ring, | 2288 | i915_gem_flush_ring(dev, ring, |
2319 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 2289 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
2320 | return i915_wait_request(dev, | 2290 | return i915_wait_request(dev, |
2321 | i915_gem_next_request_seqno(dev, ring), | 2291 | i915_gem_next_request_seqno(dev, ring), |
@@ -2350,89 +2320,86 @@ i915_gpu_idle(struct drm_device *dev) | |||
2350 | return 0; | 2320 | return 0; |
2351 | } | 2321 | } |
2352 | 2322 | ||
2353 | static void sandybridge_write_fence_reg(struct drm_gem_object *obj) | 2323 | static void sandybridge_write_fence_reg(struct drm_i915_gem_object *obj) |
2354 | { | 2324 | { |
2355 | struct drm_device *dev = obj->dev; | 2325 | struct drm_device *dev = obj->base.dev; |
2356 | drm_i915_private_t *dev_priv = dev->dev_private; | 2326 | drm_i915_private_t *dev_priv = dev->dev_private; |
2357 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2327 | u32 size = obj->gtt_space->size; |
2358 | u32 size = i915_gem_get_gtt_size(obj_priv); | 2328 | int regnum = obj->fence_reg; |
2359 | int regnum = obj_priv->fence_reg; | ||
2360 | uint64_t val; | 2329 | uint64_t val; |
2361 | 2330 | ||
2362 | val = (uint64_t)((obj_priv->gtt_offset + size - 4096) & | 2331 | val = (uint64_t)((obj->gtt_offset + size - 4096) & |
2363 | 0xfffff000) << 32; | 2332 | 0xfffff000) << 32; |
2364 | val |= obj_priv->gtt_offset & 0xfffff000; | 2333 | val |= obj->gtt_offset & 0xfffff000; |
2365 | val |= (uint64_t)((obj_priv->stride / 128) - 1) << | 2334 | val |= (uint64_t)((obj->stride / 128) - 1) << |
2366 | SANDYBRIDGE_FENCE_PITCH_SHIFT; | 2335 | SANDYBRIDGE_FENCE_PITCH_SHIFT; |
2367 | 2336 | ||
2368 | if (obj_priv->tiling_mode == I915_TILING_Y) | 2337 | if (obj->tiling_mode == I915_TILING_Y) |
2369 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; | 2338 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; |
2370 | val |= I965_FENCE_REG_VALID; | 2339 | val |= I965_FENCE_REG_VALID; |
2371 | 2340 | ||
2372 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val); | 2341 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val); |
2373 | } | 2342 | } |
2374 | 2343 | ||
2375 | static void i965_write_fence_reg(struct drm_gem_object *obj) | 2344 | static void i965_write_fence_reg(struct drm_i915_gem_object *obj) |
2376 | { | 2345 | { |
2377 | struct drm_device *dev = obj->dev; | 2346 | struct drm_device *dev = obj->base.dev; |
2378 | drm_i915_private_t *dev_priv = dev->dev_private; | 2347 | drm_i915_private_t *dev_priv = dev->dev_private; |
2379 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2348 | u32 size = obj->gtt_space->size; |
2380 | u32 size = i915_gem_get_gtt_size(obj_priv); | 2349 | int regnum = obj->fence_reg; |
2381 | int regnum = obj_priv->fence_reg; | ||
2382 | uint64_t val; | 2350 | uint64_t val; |
2383 | 2351 | ||
2384 | val = (uint64_t)((obj_priv->gtt_offset + size - 4096) & | 2352 | val = (uint64_t)((obj->gtt_offset + size - 4096) & |
2385 | 0xfffff000) << 32; | 2353 | 0xfffff000) << 32; |
2386 | val |= obj_priv->gtt_offset & 0xfffff000; | 2354 | val |= obj->gtt_offset & 0xfffff000; |
2387 | val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; | 2355 | val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; |
2388 | if (obj_priv->tiling_mode == I915_TILING_Y) | 2356 | if (obj->tiling_mode == I915_TILING_Y) |
2389 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; | 2357 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; |
2390 | val |= I965_FENCE_REG_VALID; | 2358 | val |= I965_FENCE_REG_VALID; |
2391 | 2359 | ||
2392 | I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val); | 2360 | I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val); |
2393 | } | 2361 | } |
2394 | 2362 | ||
2395 | static void i915_write_fence_reg(struct drm_gem_object *obj) | 2363 | static void i915_write_fence_reg(struct drm_i915_gem_object *obj) |
2396 | { | 2364 | { |
2397 | struct drm_device *dev = obj->dev; | 2365 | struct drm_device *dev = obj->base.dev; |
2398 | drm_i915_private_t *dev_priv = dev->dev_private; | 2366 | drm_i915_private_t *dev_priv = dev->dev_private; |
2399 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2367 | u32 size = obj->gtt_space->size; |
2400 | u32 size = i915_gem_get_gtt_size(obj_priv); | ||
2401 | uint32_t fence_reg, val, pitch_val; | 2368 | uint32_t fence_reg, val, pitch_val; |
2402 | int tile_width; | 2369 | int tile_width; |
2403 | 2370 | ||
2404 | if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || | 2371 | if ((obj->gtt_offset & ~I915_FENCE_START_MASK) || |
2405 | (obj_priv->gtt_offset & (size - 1))) { | 2372 | (obj->gtt_offset & (size - 1))) { |
2406 | WARN(1, "%s: object 0x%08x [fenceable? %d] not 1M or size (0x%08x) aligned [gtt_space offset=%lx, size=%lx]\n", | 2373 | WARN(1, "%s: object 0x%08x [fenceable? %d] not 1M or size (0x%08x) aligned [gtt_space offset=%lx, size=%lx]\n", |
2407 | __func__, obj_priv->gtt_offset, obj_priv->map_and_fenceable, size, | 2374 | __func__, obj->gtt_offset, obj->map_and_fenceable, size, |
2408 | obj_priv->gtt_space->start, obj_priv->gtt_space->size); | 2375 | obj->gtt_space->start, obj->gtt_space->size); |
2409 | return; | 2376 | return; |
2410 | } | 2377 | } |
2411 | 2378 | ||
2412 | if (obj_priv->tiling_mode == I915_TILING_Y && | 2379 | if (obj->tiling_mode == I915_TILING_Y && |
2413 | HAS_128_BYTE_Y_TILING(dev)) | 2380 | HAS_128_BYTE_Y_TILING(dev)) |
2414 | tile_width = 128; | 2381 | tile_width = 128; |
2415 | else | 2382 | else |
2416 | tile_width = 512; | 2383 | tile_width = 512; |
2417 | 2384 | ||
2418 | /* Note: pitch better be a power of two tile widths */ | 2385 | /* Note: pitch better be a power of two tile widths */ |
2419 | pitch_val = obj_priv->stride / tile_width; | 2386 | pitch_val = obj->stride / tile_width; |
2420 | pitch_val = ffs(pitch_val) - 1; | 2387 | pitch_val = ffs(pitch_val) - 1; |
2421 | 2388 | ||
2422 | if (obj_priv->tiling_mode == I915_TILING_Y && | 2389 | if (obj->tiling_mode == I915_TILING_Y && |
2423 | HAS_128_BYTE_Y_TILING(dev)) | 2390 | HAS_128_BYTE_Y_TILING(dev)) |
2424 | WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); | 2391 | WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); |
2425 | else | 2392 | else |
2426 | WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL); | 2393 | WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL); |
2427 | 2394 | ||
2428 | val = obj_priv->gtt_offset; | 2395 | val = obj->gtt_offset; |
2429 | if (obj_priv->tiling_mode == I915_TILING_Y) | 2396 | if (obj->tiling_mode == I915_TILING_Y) |
2430 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; | 2397 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; |
2431 | val |= I915_FENCE_SIZE_BITS(size); | 2398 | val |= I915_FENCE_SIZE_BITS(size); |
2432 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; | 2399 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; |
2433 | val |= I830_FENCE_REG_VALID; | 2400 | val |= I830_FENCE_REG_VALID; |
2434 | 2401 | ||
2435 | fence_reg = obj_priv->fence_reg; | 2402 | fence_reg = obj->fence_reg; |
2436 | if (fence_reg < 8) | 2403 | if (fence_reg < 8) |
2437 | fence_reg = FENCE_REG_830_0 + fence_reg * 4; | 2404 | fence_reg = FENCE_REG_830_0 + fence_reg * 4; |
2438 | else | 2405 | else |
@@ -2440,30 +2407,29 @@ static void i915_write_fence_reg(struct drm_gem_object *obj) | |||
2440 | I915_WRITE(fence_reg, val); | 2407 | I915_WRITE(fence_reg, val); |
2441 | } | 2408 | } |
2442 | 2409 | ||
2443 | static void i830_write_fence_reg(struct drm_gem_object *obj) | 2410 | static void i830_write_fence_reg(struct drm_i915_gem_object *obj) |
2444 | { | 2411 | { |
2445 | struct drm_device *dev = obj->dev; | 2412 | struct drm_device *dev = obj->base.dev; |
2446 | drm_i915_private_t *dev_priv = dev->dev_private; | 2413 | drm_i915_private_t *dev_priv = dev->dev_private; |
2447 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2414 | u32 size = obj->gtt_space->size; |
2448 | u32 size = i915_gem_get_gtt_size(obj_priv); | 2415 | int regnum = obj->fence_reg; |
2449 | int regnum = obj_priv->fence_reg; | ||
2450 | uint32_t val; | 2416 | uint32_t val; |
2451 | uint32_t pitch_val; | 2417 | uint32_t pitch_val; |
2452 | uint32_t fence_size_bits; | 2418 | uint32_t fence_size_bits; |
2453 | 2419 | ||
2454 | if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) || | 2420 | if ((obj->gtt_offset & ~I830_FENCE_START_MASK) || |
2455 | (obj_priv->gtt_offset & (obj->size - 1))) { | 2421 | (obj->gtt_offset & (obj->base.size - 1))) { |
2456 | WARN(1, "%s: object 0x%08x not 512K or size aligned\n", | 2422 | WARN(1, "%s: object 0x%08x not 512K or size aligned\n", |
2457 | __func__, obj_priv->gtt_offset); | 2423 | __func__, obj->gtt_offset); |
2458 | return; | 2424 | return; |
2459 | } | 2425 | } |
2460 | 2426 | ||
2461 | pitch_val = obj_priv->stride / 128; | 2427 | pitch_val = obj->stride / 128; |
2462 | pitch_val = ffs(pitch_val) - 1; | 2428 | pitch_val = ffs(pitch_val) - 1; |
2463 | WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); | 2429 | WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); |
2464 | 2430 | ||
2465 | val = obj_priv->gtt_offset; | 2431 | val = obj->gtt_offset; |
2466 | if (obj_priv->tiling_mode == I915_TILING_Y) | 2432 | if (obj->tiling_mode == I915_TILING_Y) |
2467 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; | 2433 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; |
2468 | fence_size_bits = I830_FENCE_SIZE_BITS(size); | 2434 | fence_size_bits = I830_FENCE_SIZE_BITS(size); |
2469 | WARN_ON(fence_size_bits & ~0x00000f00); | 2435 | WARN_ON(fence_size_bits & ~0x00000f00); |
@@ -2479,7 +2445,7 @@ static int i915_find_fence_reg(struct drm_device *dev, | |||
2479 | { | 2445 | { |
2480 | struct drm_i915_private *dev_priv = dev->dev_private; | 2446 | struct drm_i915_private *dev_priv = dev->dev_private; |
2481 | struct drm_i915_fence_reg *reg; | 2447 | struct drm_i915_fence_reg *reg; |
2482 | struct drm_i915_gem_object *obj_priv = NULL; | 2448 | struct drm_i915_gem_object *obj = NULL; |
2483 | int i, avail, ret; | 2449 | int i, avail, ret; |
2484 | 2450 | ||
2485 | /* First try to find a free reg */ | 2451 | /* First try to find a free reg */ |
@@ -2489,9 +2455,8 @@ static int i915_find_fence_reg(struct drm_device *dev, | |||
2489 | if (!reg->obj) | 2455 | if (!reg->obj) |
2490 | return i; | 2456 | return i; |
2491 | 2457 | ||
2492 | obj_priv = to_intel_bo(reg->obj); | 2458 | if (!reg->obj->pin_count) |
2493 | if (!obj_priv->pin_count) | 2459 | avail++; |
2494 | avail++; | ||
2495 | } | 2460 | } |
2496 | 2461 | ||
2497 | if (avail == 0) | 2462 | if (avail == 0) |
@@ -2501,12 +2466,12 @@ static int i915_find_fence_reg(struct drm_device *dev, | |||
2501 | avail = I915_FENCE_REG_NONE; | 2466 | avail = I915_FENCE_REG_NONE; |
2502 | list_for_each_entry(reg, &dev_priv->mm.fence_list, | 2467 | list_for_each_entry(reg, &dev_priv->mm.fence_list, |
2503 | lru_list) { | 2468 | lru_list) { |
2504 | obj_priv = to_intel_bo(reg->obj); | 2469 | obj = reg->obj; |
2505 | if (obj_priv->pin_count) | 2470 | if (obj->pin_count) |
2506 | continue; | 2471 | continue; |
2507 | 2472 | ||
2508 | /* found one! */ | 2473 | /* found one! */ |
2509 | avail = obj_priv->fence_reg; | 2474 | avail = obj->fence_reg; |
2510 | break; | 2475 | break; |
2511 | } | 2476 | } |
2512 | 2477 | ||
@@ -2516,9 +2481,9 @@ static int i915_find_fence_reg(struct drm_device *dev, | |||
2516 | * might drop that one, causing a use-after-free in it. So hold a | 2481 | * might drop that one, causing a use-after-free in it. So hold a |
2517 | * private reference to obj like the other callers of put_fence_reg | 2482 | * private reference to obj like the other callers of put_fence_reg |
2518 | * (set_tiling ioctl) do. */ | 2483 | * (set_tiling ioctl) do. */ |
2519 | drm_gem_object_reference(&obj_priv->base); | 2484 | drm_gem_object_reference(&obj->base); |
2520 | ret = i915_gem_object_put_fence_reg(&obj_priv->base, interruptible); | 2485 | ret = i915_gem_object_put_fence_reg(obj, interruptible); |
2521 | drm_gem_object_unreference(&obj_priv->base); | 2486 | drm_gem_object_unreference(&obj->base); |
2522 | if (ret != 0) | 2487 | if (ret != 0) |
2523 | return ret; | 2488 | return ret; |
2524 | 2489 | ||
@@ -2539,39 +2504,38 @@ static int i915_find_fence_reg(struct drm_device *dev, | |||
2539 | * and tiling format. | 2504 | * and tiling format. |
2540 | */ | 2505 | */ |
2541 | int | 2506 | int |
2542 | i915_gem_object_get_fence_reg(struct drm_gem_object *obj, | 2507 | i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj, |
2543 | bool interruptible) | 2508 | bool interruptible) |
2544 | { | 2509 | { |
2545 | struct drm_device *dev = obj->dev; | 2510 | struct drm_device *dev = obj->base.dev; |
2546 | struct drm_i915_private *dev_priv = dev->dev_private; | 2511 | struct drm_i915_private *dev_priv = dev->dev_private; |
2547 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2548 | struct drm_i915_fence_reg *reg = NULL; | 2512 | struct drm_i915_fence_reg *reg = NULL; |
2549 | int ret; | 2513 | int ret; |
2550 | 2514 | ||
2551 | /* Just update our place in the LRU if our fence is getting used. */ | 2515 | /* Just update our place in the LRU if our fence is getting used. */ |
2552 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { | 2516 | if (obj->fence_reg != I915_FENCE_REG_NONE) { |
2553 | reg = &dev_priv->fence_regs[obj_priv->fence_reg]; | 2517 | reg = &dev_priv->fence_regs[obj->fence_reg]; |
2554 | list_move_tail(®->lru_list, &dev_priv->mm.fence_list); | 2518 | list_move_tail(®->lru_list, &dev_priv->mm.fence_list); |
2555 | return 0; | 2519 | return 0; |
2556 | } | 2520 | } |
2557 | 2521 | ||
2558 | switch (obj_priv->tiling_mode) { | 2522 | switch (obj->tiling_mode) { |
2559 | case I915_TILING_NONE: | 2523 | case I915_TILING_NONE: |
2560 | WARN(1, "allocating a fence for non-tiled object?\n"); | 2524 | WARN(1, "allocating a fence for non-tiled object?\n"); |
2561 | break; | 2525 | break; |
2562 | case I915_TILING_X: | 2526 | case I915_TILING_X: |
2563 | if (!obj_priv->stride) | 2527 | if (!obj->stride) |
2564 | return -EINVAL; | 2528 | return -EINVAL; |
2565 | WARN((obj_priv->stride & (512 - 1)), | 2529 | WARN((obj->stride & (512 - 1)), |
2566 | "object 0x%08x is X tiled but has non-512B pitch\n", | 2530 | "object 0x%08x is X tiled but has non-512B pitch\n", |
2567 | obj_priv->gtt_offset); | 2531 | obj->gtt_offset); |
2568 | break; | 2532 | break; |
2569 | case I915_TILING_Y: | 2533 | case I915_TILING_Y: |
2570 | if (!obj_priv->stride) | 2534 | if (!obj->stride) |
2571 | return -EINVAL; | 2535 | return -EINVAL; |
2572 | WARN((obj_priv->stride & (128 - 1)), | 2536 | WARN((obj->stride & (128 - 1)), |
2573 | "object 0x%08x is Y tiled but has non-128B pitch\n", | 2537 | "object 0x%08x is Y tiled but has non-128B pitch\n", |
2574 | obj_priv->gtt_offset); | 2538 | obj->gtt_offset); |
2575 | break; | 2539 | break; |
2576 | } | 2540 | } |
2577 | 2541 | ||
@@ -2579,8 +2543,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, | |||
2579 | if (ret < 0) | 2543 | if (ret < 0) |
2580 | return ret; | 2544 | return ret; |
2581 | 2545 | ||
2582 | obj_priv->fence_reg = ret; | 2546 | obj->fence_reg = ret; |
2583 | reg = &dev_priv->fence_regs[obj_priv->fence_reg]; | 2547 | reg = &dev_priv->fence_regs[obj->fence_reg]; |
2584 | list_add_tail(®->lru_list, &dev_priv->mm.fence_list); | 2548 | list_add_tail(®->lru_list, &dev_priv->mm.fence_list); |
2585 | 2549 | ||
2586 | reg->obj = obj; | 2550 | reg->obj = obj; |
@@ -2602,8 +2566,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, | |||
2602 | } | 2566 | } |
2603 | 2567 | ||
2604 | trace_i915_gem_object_get_fence(obj, | 2568 | trace_i915_gem_object_get_fence(obj, |
2605 | obj_priv->fence_reg, | 2569 | obj->fence_reg, |
2606 | obj_priv->tiling_mode); | 2570 | obj->tiling_mode); |
2607 | 2571 | ||
2608 | return 0; | 2572 | return 0; |
2609 | } | 2573 | } |
@@ -2613,40 +2577,38 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, | |||
2613 | * @obj: object to clear | 2577 | * @obj: object to clear |
2614 | * | 2578 | * |
2615 | * Zeroes out the fence register itself and clears out the associated | 2579 | * Zeroes out the fence register itself and clears out the associated |
2616 | * data structures in dev_priv and obj_priv. | 2580 | * data structures in dev_priv and obj. |
2617 | */ | 2581 | */ |
2618 | static void | 2582 | static void |
2619 | i915_gem_clear_fence_reg(struct drm_gem_object *obj) | 2583 | i915_gem_clear_fence_reg(struct drm_i915_gem_object *obj) |
2620 | { | 2584 | { |
2621 | struct drm_device *dev = obj->dev; | 2585 | struct drm_device *dev = obj->base.dev; |
2622 | drm_i915_private_t *dev_priv = dev->dev_private; | 2586 | drm_i915_private_t *dev_priv = dev->dev_private; |
2623 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2587 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[obj->fence_reg]; |
2624 | struct drm_i915_fence_reg *reg = | ||
2625 | &dev_priv->fence_regs[obj_priv->fence_reg]; | ||
2626 | uint32_t fence_reg; | 2588 | uint32_t fence_reg; |
2627 | 2589 | ||
2628 | switch (INTEL_INFO(dev)->gen) { | 2590 | switch (INTEL_INFO(dev)->gen) { |
2629 | case 6: | 2591 | case 6: |
2630 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + | 2592 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + |
2631 | (obj_priv->fence_reg * 8), 0); | 2593 | (obj->fence_reg * 8), 0); |
2632 | break; | 2594 | break; |
2633 | case 5: | 2595 | case 5: |
2634 | case 4: | 2596 | case 4: |
2635 | I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); | 2597 | I915_WRITE64(FENCE_REG_965_0 + (obj->fence_reg * 8), 0); |
2636 | break; | 2598 | break; |
2637 | case 3: | 2599 | case 3: |
2638 | if (obj_priv->fence_reg >= 8) | 2600 | if (obj->fence_reg >= 8) |
2639 | fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4; | 2601 | fence_reg = FENCE_REG_945_8 + (obj->fence_reg - 8) * 4; |
2640 | else | 2602 | else |
2641 | case 2: | 2603 | case 2: |
2642 | fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4; | 2604 | fence_reg = FENCE_REG_830_0 + obj->fence_reg * 4; |
2643 | 2605 | ||
2644 | I915_WRITE(fence_reg, 0); | 2606 | I915_WRITE(fence_reg, 0); |
2645 | break; | 2607 | break; |
2646 | } | 2608 | } |
2647 | 2609 | ||
2648 | reg->obj = NULL; | 2610 | reg->obj = NULL; |
2649 | obj_priv->fence_reg = I915_FENCE_REG_NONE; | 2611 | obj->fence_reg = I915_FENCE_REG_NONE; |
2650 | list_del_init(®->lru_list); | 2612 | list_del_init(®->lru_list); |
2651 | } | 2613 | } |
2652 | 2614 | ||
@@ -2657,18 +2619,17 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) | |||
2657 | * @bool: whether the wait upon the fence is interruptible | 2619 | * @bool: whether the wait upon the fence is interruptible |
2658 | * | 2620 | * |
2659 | * Zeroes out the fence register itself and clears out the associated | 2621 | * Zeroes out the fence register itself and clears out the associated |
2660 | * data structures in dev_priv and obj_priv. | 2622 | * data structures in dev_priv and obj. |
2661 | */ | 2623 | */ |
2662 | int | 2624 | int |
2663 | i915_gem_object_put_fence_reg(struct drm_gem_object *obj, | 2625 | i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj, |
2664 | bool interruptible) | 2626 | bool interruptible) |
2665 | { | 2627 | { |
2666 | struct drm_device *dev = obj->dev; | 2628 | struct drm_device *dev = obj->base.dev; |
2667 | struct drm_i915_private *dev_priv = dev->dev_private; | 2629 | struct drm_i915_private *dev_priv = dev->dev_private; |
2668 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2669 | struct drm_i915_fence_reg *reg; | 2630 | struct drm_i915_fence_reg *reg; |
2670 | 2631 | ||
2671 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE) | 2632 | if (obj->fence_reg == I915_FENCE_REG_NONE) |
2672 | return 0; | 2633 | return 0; |
2673 | 2634 | ||
2674 | /* If we've changed tiling, GTT-mappings of the object | 2635 | /* If we've changed tiling, GTT-mappings of the object |
@@ -2681,7 +2642,7 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj, | |||
2681 | * therefore we must wait for any outstanding access to complete | 2642 | * therefore we must wait for any outstanding access to complete |
2682 | * before clearing the fence. | 2643 | * before clearing the fence. |
2683 | */ | 2644 | */ |
2684 | reg = &dev_priv->fence_regs[obj_priv->fence_reg]; | 2645 | reg = &dev_priv->fence_regs[obj->fence_reg]; |
2685 | if (reg->gpu) { | 2646 | if (reg->gpu) { |
2686 | int ret; | 2647 | int ret; |
2687 | 2648 | ||
@@ -2706,27 +2667,26 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj, | |||
2706 | * Finds free space in the GTT aperture and binds the object there. | 2667 | * Finds free space in the GTT aperture and binds the object there. |
2707 | */ | 2668 | */ |
2708 | static int | 2669 | static int |
2709 | i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | 2670 | i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, |
2710 | unsigned alignment, | 2671 | unsigned alignment, |
2711 | bool map_and_fenceable) | 2672 | bool map_and_fenceable) |
2712 | { | 2673 | { |
2713 | struct drm_device *dev = obj->dev; | 2674 | struct drm_device *dev = obj->base.dev; |
2714 | drm_i915_private_t *dev_priv = dev->dev_private; | 2675 | drm_i915_private_t *dev_priv = dev->dev_private; |
2715 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2716 | struct drm_mm_node *free_space; | 2676 | struct drm_mm_node *free_space; |
2717 | gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; | 2677 | gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; |
2718 | u32 size, fence_size, fence_alignment, unfenced_alignment; | 2678 | u32 size, fence_size, fence_alignment, unfenced_alignment; |
2719 | bool mappable, fenceable; | 2679 | bool mappable, fenceable; |
2720 | int ret; | 2680 | int ret; |
2721 | 2681 | ||
2722 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 2682 | if (obj->madv != I915_MADV_WILLNEED) { |
2723 | DRM_ERROR("Attempting to bind a purgeable object\n"); | 2683 | DRM_ERROR("Attempting to bind a purgeable object\n"); |
2724 | return -EINVAL; | 2684 | return -EINVAL; |
2725 | } | 2685 | } |
2726 | 2686 | ||
2727 | fence_size = i915_gem_get_gtt_size(obj_priv); | 2687 | fence_size = i915_gem_get_gtt_size(obj); |
2728 | fence_alignment = i915_gem_get_gtt_alignment(obj_priv); | 2688 | fence_alignment = i915_gem_get_gtt_alignment(obj); |
2729 | unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj_priv); | 2689 | unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj); |
2730 | 2690 | ||
2731 | if (alignment == 0) | 2691 | if (alignment == 0) |
2732 | alignment = map_and_fenceable ? fence_alignment : | 2692 | alignment = map_and_fenceable ? fence_alignment : |
@@ -2736,12 +2696,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | |||
2736 | return -EINVAL; | 2696 | return -EINVAL; |
2737 | } | 2697 | } |
2738 | 2698 | ||
2739 | size = map_and_fenceable ? fence_size : obj->size; | 2699 | size = map_and_fenceable ? fence_size : obj->base.size; |
2740 | 2700 | ||
2741 | /* If the object is bigger than the entire aperture, reject it early | 2701 | /* If the object is bigger than the entire aperture, reject it early |
2742 | * before evicting everything in a vain attempt to find space. | 2702 | * before evicting everything in a vain attempt to find space. |
2743 | */ | 2703 | */ |
2744 | if (obj->size > | 2704 | if (obj->base.size > |
2745 | (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { | 2705 | (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { |
2746 | DRM_ERROR("Attempting to bind an object larger than the aperture\n"); | 2706 | DRM_ERROR("Attempting to bind an object larger than the aperture\n"); |
2747 | return -E2BIG; | 2707 | return -E2BIG; |
@@ -2760,16 +2720,16 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | |||
2760 | 2720 | ||
2761 | if (free_space != NULL) { | 2721 | if (free_space != NULL) { |
2762 | if (map_and_fenceable) | 2722 | if (map_and_fenceable) |
2763 | obj_priv->gtt_space = | 2723 | obj->gtt_space = |
2764 | drm_mm_get_block_range_generic(free_space, | 2724 | drm_mm_get_block_range_generic(free_space, |
2765 | size, alignment, 0, | 2725 | size, alignment, 0, |
2766 | dev_priv->mm.gtt_mappable_end, | 2726 | dev_priv->mm.gtt_mappable_end, |
2767 | 0); | 2727 | 0); |
2768 | else | 2728 | else |
2769 | obj_priv->gtt_space = | 2729 | obj->gtt_space = |
2770 | drm_mm_get_block(free_space, size, alignment); | 2730 | drm_mm_get_block(free_space, size, alignment); |
2771 | } | 2731 | } |
2772 | if (obj_priv->gtt_space == NULL) { | 2732 | if (obj->gtt_space == NULL) { |
2773 | /* If the gtt is empty and we're still having trouble | 2733 | /* If the gtt is empty and we're still having trouble |
2774 | * fitting our object in, we're out of memory. | 2734 | * fitting our object in, we're out of memory. |
2775 | */ | 2735 | */ |
@@ -2783,8 +2743,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | |||
2783 | 2743 | ||
2784 | ret = i915_gem_object_get_pages_gtt(obj, gfpmask); | 2744 | ret = i915_gem_object_get_pages_gtt(obj, gfpmask); |
2785 | if (ret) { | 2745 | if (ret) { |
2786 | drm_mm_put_block(obj_priv->gtt_space); | 2746 | drm_mm_put_block(obj->gtt_space); |
2787 | obj_priv->gtt_space = NULL; | 2747 | obj->gtt_space = NULL; |
2788 | 2748 | ||
2789 | if (ret == -ENOMEM) { | 2749 | if (ret == -ENOMEM) { |
2790 | /* first try to clear up some space from the GTT */ | 2750 | /* first try to clear up some space from the GTT */ |
@@ -2810,8 +2770,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | |||
2810 | ret = i915_gem_gtt_bind_object(obj); | 2770 | ret = i915_gem_gtt_bind_object(obj); |
2811 | if (ret) { | 2771 | if (ret) { |
2812 | i915_gem_object_put_pages_gtt(obj); | 2772 | i915_gem_object_put_pages_gtt(obj); |
2813 | drm_mm_put_block(obj_priv->gtt_space); | 2773 | drm_mm_put_block(obj->gtt_space); |
2814 | obj_priv->gtt_space = NULL; | 2774 | obj->gtt_space = NULL; |
2815 | 2775 | ||
2816 | ret = i915_gem_evict_something(dev, size, | 2776 | ret = i915_gem_evict_something(dev, size, |
2817 | alignment, map_and_fenceable); | 2777 | alignment, map_and_fenceable); |
@@ -2821,65 +2781,61 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | |||
2821 | goto search_free; | 2781 | goto search_free; |
2822 | } | 2782 | } |
2823 | 2783 | ||
2824 | obj_priv->gtt_offset = obj_priv->gtt_space->start; | 2784 | obj->gtt_offset = obj->gtt_space->start; |
2825 | 2785 | ||
2826 | /* keep track of bounds object by adding it to the inactive list */ | 2786 | /* keep track of bounds object by adding it to the inactive list */ |
2827 | list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); | 2787 | list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
2828 | i915_gem_info_add_gtt(dev_priv, obj_priv); | 2788 | i915_gem_info_add_gtt(dev_priv, obj); |
2829 | 2789 | ||
2830 | /* Assert that the object is not currently in any GPU domain. As it | 2790 | /* Assert that the object is not currently in any GPU domain. As it |
2831 | * wasn't in the GTT, there shouldn't be any way it could have been in | 2791 | * wasn't in the GTT, there shouldn't be any way it could have been in |
2832 | * a GPU cache | 2792 | * a GPU cache |
2833 | */ | 2793 | */ |
2834 | BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); | 2794 | BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); |
2835 | BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); | 2795 | BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); |
2836 | 2796 | ||
2837 | trace_i915_gem_object_bind(obj, obj_priv->gtt_offset, map_and_fenceable); | 2797 | trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable); |
2838 | 2798 | ||
2839 | fenceable = | 2799 | fenceable = |
2840 | obj_priv->gtt_space->size == fence_size && | 2800 | obj->gtt_space->size == fence_size && |
2841 | (obj_priv->gtt_space->start & (fence_alignment -1)) == 0; | 2801 | (obj->gtt_space->start & (fence_alignment -1)) == 0; |
2842 | 2802 | ||
2843 | mappable = | 2803 | mappable = |
2844 | obj_priv->gtt_offset + obj->size <= dev_priv->mm.gtt_mappable_end; | 2804 | obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; |
2845 | 2805 | ||
2846 | obj_priv->map_and_fenceable = mappable && fenceable; | 2806 | obj->map_and_fenceable = mappable && fenceable; |
2847 | 2807 | ||
2848 | return 0; | 2808 | return 0; |
2849 | } | 2809 | } |
2850 | 2810 | ||
2851 | void | 2811 | void |
2852 | i915_gem_clflush_object(struct drm_gem_object *obj) | 2812 | i915_gem_clflush_object(struct drm_i915_gem_object *obj) |
2853 | { | 2813 | { |
2854 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2855 | |||
2856 | /* If we don't have a page list set up, then we're not pinned | 2814 | /* If we don't have a page list set up, then we're not pinned |
2857 | * to GPU, and we can ignore the cache flush because it'll happen | 2815 | * to GPU, and we can ignore the cache flush because it'll happen |
2858 | * again at bind time. | 2816 | * again at bind time. |
2859 | */ | 2817 | */ |
2860 | if (obj_priv->pages == NULL) | 2818 | if (obj->pages == NULL) |
2861 | return; | 2819 | return; |
2862 | 2820 | ||
2863 | trace_i915_gem_object_clflush(obj); | 2821 | trace_i915_gem_object_clflush(obj); |
2864 | 2822 | ||
2865 | drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); | 2823 | drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE); |
2866 | } | 2824 | } |
2867 | 2825 | ||
2868 | /** Flushes any GPU write domain for the object if it's dirty. */ | 2826 | /** Flushes any GPU write domain for the object if it's dirty. */ |
2869 | static int | 2827 | static int |
2870 | i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, | 2828 | i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj, |
2871 | bool pipelined) | 2829 | bool pipelined) |
2872 | { | 2830 | { |
2873 | struct drm_device *dev = obj->dev; | 2831 | struct drm_device *dev = obj->base.dev; |
2874 | 2832 | ||
2875 | if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) | 2833 | if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) |
2876 | return 0; | 2834 | return 0; |
2877 | 2835 | ||
2878 | /* Queue the GPU write cache flushing we need. */ | 2836 | /* Queue the GPU write cache flushing we need. */ |
2879 | i915_gem_flush_ring(dev, NULL, | 2837 | i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); |
2880 | to_intel_bo(obj)->ring, | 2838 | BUG_ON(obj->base.write_domain); |
2881 | 0, obj->write_domain); | ||
2882 | BUG_ON(obj->write_domain); | ||
2883 | 2839 | ||
2884 | if (pipelined) | 2840 | if (pipelined) |
2885 | return 0; | 2841 | return 0; |
@@ -2889,11 +2845,11 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, | |||
2889 | 2845 | ||
2890 | /** Flushes the GTT write domain for the object if it's dirty. */ | 2846 | /** Flushes the GTT write domain for the object if it's dirty. */ |
2891 | static void | 2847 | static void |
2892 | i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) | 2848 | i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) |
2893 | { | 2849 | { |
2894 | uint32_t old_write_domain; | 2850 | uint32_t old_write_domain; |
2895 | 2851 | ||
2896 | if (obj->write_domain != I915_GEM_DOMAIN_GTT) | 2852 | if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) |
2897 | return; | 2853 | return; |
2898 | 2854 | ||
2899 | /* No actual flushing is required for the GTT write domain. Writes | 2855 | /* No actual flushing is required for the GTT write domain. Writes |
@@ -2902,30 +2858,30 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) | |||
2902 | */ | 2858 | */ |
2903 | i915_gem_release_mmap(obj); | 2859 | i915_gem_release_mmap(obj); |
2904 | 2860 | ||
2905 | old_write_domain = obj->write_domain; | 2861 | old_write_domain = obj->base.write_domain; |
2906 | obj->write_domain = 0; | 2862 | obj->base.write_domain = 0; |
2907 | 2863 | ||
2908 | trace_i915_gem_object_change_domain(obj, | 2864 | trace_i915_gem_object_change_domain(obj, |
2909 | obj->read_domains, | 2865 | obj->base.read_domains, |
2910 | old_write_domain); | 2866 | old_write_domain); |
2911 | } | 2867 | } |
2912 | 2868 | ||
2913 | /** Flushes the CPU write domain for the object if it's dirty. */ | 2869 | /** Flushes the CPU write domain for the object if it's dirty. */ |
2914 | static void | 2870 | static void |
2915 | i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) | 2871 | i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) |
2916 | { | 2872 | { |
2917 | uint32_t old_write_domain; | 2873 | uint32_t old_write_domain; |
2918 | 2874 | ||
2919 | if (obj->write_domain != I915_GEM_DOMAIN_CPU) | 2875 | if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) |
2920 | return; | 2876 | return; |
2921 | 2877 | ||
2922 | i915_gem_clflush_object(obj); | 2878 | i915_gem_clflush_object(obj); |
2923 | intel_gtt_chipset_flush(); | 2879 | intel_gtt_chipset_flush(); |
2924 | old_write_domain = obj->write_domain; | 2880 | old_write_domain = obj->base.write_domain; |
2925 | obj->write_domain = 0; | 2881 | obj->base.write_domain = 0; |
2926 | 2882 | ||
2927 | trace_i915_gem_object_change_domain(obj, | 2883 | trace_i915_gem_object_change_domain(obj, |
2928 | obj->read_domains, | 2884 | obj->base.read_domains, |
2929 | old_write_domain); | 2885 | old_write_domain); |
2930 | } | 2886 | } |
2931 | 2887 | ||
@@ -2936,14 +2892,13 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) | |||
2936 | * flushes to occur. | 2892 | * flushes to occur. |
2937 | */ | 2893 | */ |
2938 | int | 2894 | int |
2939 | i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | 2895 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, int write) |
2940 | { | 2896 | { |
2941 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2942 | uint32_t old_write_domain, old_read_domains; | 2897 | uint32_t old_write_domain, old_read_domains; |
2943 | int ret; | 2898 | int ret; |
2944 | 2899 | ||
2945 | /* Not valid to be called on unbound objects. */ | 2900 | /* Not valid to be called on unbound objects. */ |
2946 | if (obj_priv->gtt_space == NULL) | 2901 | if (obj->gtt_space == NULL) |
2947 | return -EINVAL; | 2902 | return -EINVAL; |
2948 | 2903 | ||
2949 | ret = i915_gem_object_flush_gpu_write_domain(obj, false); | 2904 | ret = i915_gem_object_flush_gpu_write_domain(obj, false); |
@@ -2958,18 +2913,18 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | |||
2958 | return ret; | 2913 | return ret; |
2959 | } | 2914 | } |
2960 | 2915 | ||
2961 | old_write_domain = obj->write_domain; | 2916 | old_write_domain = obj->base.write_domain; |
2962 | old_read_domains = obj->read_domains; | 2917 | old_read_domains = obj->base.read_domains; |
2963 | 2918 | ||
2964 | /* It should now be out of any other write domains, and we can update | 2919 | /* It should now be out of any other write domains, and we can update |
2965 | * the domain values for our changes. | 2920 | * the domain values for our changes. |
2966 | */ | 2921 | */ |
2967 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); | 2922 | BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); |
2968 | obj->read_domains |= I915_GEM_DOMAIN_GTT; | 2923 | obj->base.read_domains |= I915_GEM_DOMAIN_GTT; |
2969 | if (write) { | 2924 | if (write) { |
2970 | obj->read_domains = I915_GEM_DOMAIN_GTT; | 2925 | obj->base.read_domains = I915_GEM_DOMAIN_GTT; |
2971 | obj->write_domain = I915_GEM_DOMAIN_GTT; | 2926 | obj->base.write_domain = I915_GEM_DOMAIN_GTT; |
2972 | obj_priv->dirty = 1; | 2927 | obj->dirty = 1; |
2973 | } | 2928 | } |
2974 | 2929 | ||
2975 | trace_i915_gem_object_change_domain(obj, | 2930 | trace_i915_gem_object_change_domain(obj, |
@@ -2984,15 +2939,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | |||
2984 | * wait, as in modesetting process we're not supposed to be interrupted. | 2939 | * wait, as in modesetting process we're not supposed to be interrupted. |
2985 | */ | 2940 | */ |
2986 | int | 2941 | int |
2987 | i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, | 2942 | i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, |
2988 | bool pipelined) | 2943 | bool pipelined) |
2989 | { | 2944 | { |
2990 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2991 | uint32_t old_read_domains; | 2945 | uint32_t old_read_domains; |
2992 | int ret; | 2946 | int ret; |
2993 | 2947 | ||
2994 | /* Not valid to be called on unbound objects. */ | 2948 | /* Not valid to be called on unbound objects. */ |
2995 | if (obj_priv->gtt_space == NULL) | 2949 | if (obj->gtt_space == NULL) |
2996 | return -EINVAL; | 2950 | return -EINVAL; |
2997 | 2951 | ||
2998 | ret = i915_gem_object_flush_gpu_write_domain(obj, true); | 2952 | ret = i915_gem_object_flush_gpu_write_domain(obj, true); |
@@ -3008,12 +2962,12 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, | |||
3008 | 2962 | ||
3009 | i915_gem_object_flush_cpu_write_domain(obj); | 2963 | i915_gem_object_flush_cpu_write_domain(obj); |
3010 | 2964 | ||
3011 | old_read_domains = obj->read_domains; | 2965 | old_read_domains = obj->base.read_domains; |
3012 | obj->read_domains |= I915_GEM_DOMAIN_GTT; | 2966 | obj->base.read_domains |= I915_GEM_DOMAIN_GTT; |
3013 | 2967 | ||
3014 | trace_i915_gem_object_change_domain(obj, | 2968 | trace_i915_gem_object_change_domain(obj, |
3015 | old_read_domains, | 2969 | old_read_domains, |
3016 | obj->write_domain); | 2970 | obj->base.write_domain); |
3017 | 2971 | ||
3018 | return 0; | 2972 | return 0; |
3019 | } | 2973 | } |
@@ -3026,10 +2980,10 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, | |||
3026 | return 0; | 2980 | return 0; |
3027 | 2981 | ||
3028 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) | 2982 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) |
3029 | i915_gem_flush_ring(obj->base.dev, NULL, obj->ring, | 2983 | i915_gem_flush_ring(obj->base.dev, obj->ring, |
3030 | 0, obj->base.write_domain); | 2984 | 0, obj->base.write_domain); |
3031 | 2985 | ||
3032 | return i915_gem_object_wait_rendering(&obj->base, interruptible); | 2986 | return i915_gem_object_wait_rendering(obj, interruptible); |
3033 | } | 2987 | } |
3034 | 2988 | ||
3035 | /** | 2989 | /** |
@@ -3039,7 +2993,7 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, | |||
3039 | * flushes to occur. | 2993 | * flushes to occur. |
3040 | */ | 2994 | */ |
3041 | static int | 2995 | static int |
3042 | i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | 2996 | i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, int write) |
3043 | { | 2997 | { |
3044 | uint32_t old_write_domain, old_read_domains; | 2998 | uint32_t old_write_domain, old_read_domains; |
3045 | int ret; | 2999 | int ret; |
@@ -3061,27 +3015,27 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
3061 | return ret; | 3015 | return ret; |
3062 | } | 3016 | } |
3063 | 3017 | ||
3064 | old_write_domain = obj->write_domain; | 3018 | old_write_domain = obj->base.write_domain; |
3065 | old_read_domains = obj->read_domains; | 3019 | old_read_domains = obj->base.read_domains; |
3066 | 3020 | ||
3067 | /* Flush the CPU cache if it's still invalid. */ | 3021 | /* Flush the CPU cache if it's still invalid. */ |
3068 | if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { | 3022 | if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { |
3069 | i915_gem_clflush_object(obj); | 3023 | i915_gem_clflush_object(obj); |
3070 | 3024 | ||
3071 | obj->read_domains |= I915_GEM_DOMAIN_CPU; | 3025 | obj->base.read_domains |= I915_GEM_DOMAIN_CPU; |
3072 | } | 3026 | } |
3073 | 3027 | ||
3074 | /* It should now be out of any other write domains, and we can update | 3028 | /* It should now be out of any other write domains, and we can update |
3075 | * the domain values for our changes. | 3029 | * the domain values for our changes. |
3076 | */ | 3030 | */ |
3077 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); | 3031 | BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); |
3078 | 3032 | ||
3079 | /* If we're writing through the CPU, then the GPU read domains will | 3033 | /* If we're writing through the CPU, then the GPU read domains will |
3080 | * need to be invalidated at next use. | 3034 | * need to be invalidated at next use. |
3081 | */ | 3035 | */ |
3082 | if (write) { | 3036 | if (write) { |
3083 | obj->read_domains = I915_GEM_DOMAIN_CPU; | 3037 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; |
3084 | obj->write_domain = I915_GEM_DOMAIN_CPU; | 3038 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
3085 | } | 3039 | } |
3086 | 3040 | ||
3087 | trace_i915_gem_object_change_domain(obj, | 3041 | trace_i915_gem_object_change_domain(obj, |
@@ -3203,20 +3157,18 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
3203 | * drm_agp_chipset_flush | 3157 | * drm_agp_chipset_flush |
3204 | */ | 3158 | */ |
3205 | static void | 3159 | static void |
3206 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | 3160 | i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, |
3207 | struct intel_ring_buffer *ring, | 3161 | struct intel_ring_buffer *ring, |
3208 | struct change_domains *cd) | 3162 | struct change_domains *cd) |
3209 | { | 3163 | { |
3210 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 3164 | uint32_t invalidate_domains = 0, flush_domains = 0; |
3211 | uint32_t invalidate_domains = 0; | ||
3212 | uint32_t flush_domains = 0; | ||
3213 | 3165 | ||
3214 | /* | 3166 | /* |
3215 | * If the object isn't moving to a new write domain, | 3167 | * If the object isn't moving to a new write domain, |
3216 | * let the object stay in multiple read domains | 3168 | * let the object stay in multiple read domains |
3217 | */ | 3169 | */ |
3218 | if (obj->pending_write_domain == 0) | 3170 | if (obj->base.pending_write_domain == 0) |
3219 | obj->pending_read_domains |= obj->read_domains; | 3171 | obj->base.pending_read_domains |= obj->base.read_domains; |
3220 | 3172 | ||
3221 | /* | 3173 | /* |
3222 | * Flush the current write domain if | 3174 | * Flush the current write domain if |
@@ -3224,18 +3176,18 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | |||
3224 | * any read domains which differ from the old | 3176 | * any read domains which differ from the old |
3225 | * write domain | 3177 | * write domain |
3226 | */ | 3178 | */ |
3227 | if (obj->write_domain && | 3179 | if (obj->base.write_domain && |
3228 | (obj->write_domain != obj->pending_read_domains || | 3180 | (obj->base.write_domain != obj->base.pending_read_domains || |
3229 | obj_priv->ring != ring)) { | 3181 | obj->ring != ring)) { |
3230 | flush_domains |= obj->write_domain; | 3182 | flush_domains |= obj->base.write_domain; |
3231 | invalidate_domains |= | 3183 | invalidate_domains |= |
3232 | obj->pending_read_domains & ~obj->write_domain; | 3184 | obj->base.pending_read_domains & ~obj->base.write_domain; |
3233 | } | 3185 | } |
3234 | /* | 3186 | /* |
3235 | * Invalidate any read caches which may have | 3187 | * Invalidate any read caches which may have |
3236 | * stale data. That is, any new read domains. | 3188 | * stale data. That is, any new read domains. |
3237 | */ | 3189 | */ |
3238 | invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; | 3190 | invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains; |
3239 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) | 3191 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) |
3240 | i915_gem_clflush_object(obj); | 3192 | i915_gem_clflush_object(obj); |
3241 | 3193 | ||
@@ -3249,13 +3201,13 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | |||
3249 | * write_domains). So if we have a current write domain that we | 3201 | * write_domains). So if we have a current write domain that we |
3250 | * aren't changing, set pending_write_domain to that. | 3202 | * aren't changing, set pending_write_domain to that. |
3251 | */ | 3203 | */ |
3252 | if (flush_domains == 0 && obj->pending_write_domain == 0) | 3204 | if (flush_domains == 0 && obj->base.pending_write_domain == 0) |
3253 | obj->pending_write_domain = obj->write_domain; | 3205 | obj->base.pending_write_domain = obj->base.write_domain; |
3254 | 3206 | ||
3255 | cd->invalidate_domains |= invalidate_domains; | 3207 | cd->invalidate_domains |= invalidate_domains; |
3256 | cd->flush_domains |= flush_domains; | 3208 | cd->flush_domains |= flush_domains; |
3257 | if (flush_domains & I915_GEM_GPU_DOMAINS) | 3209 | if (flush_domains & I915_GEM_GPU_DOMAINS) |
3258 | cd->flush_rings |= obj_priv->ring->id; | 3210 | cd->flush_rings |= obj->ring->id; |
3259 | if (invalidate_domains & I915_GEM_GPU_DOMAINS) | 3211 | if (invalidate_domains & I915_GEM_GPU_DOMAINS) |
3260 | cd->flush_rings |= ring->id; | 3212 | cd->flush_rings |= ring->id; |
3261 | } | 3213 | } |
@@ -3267,30 +3219,28 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | |||
3267 | * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). | 3219 | * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). |
3268 | */ | 3220 | */ |
3269 | static void | 3221 | static void |
3270 | i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) | 3222 | i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj) |
3271 | { | 3223 | { |
3272 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 3224 | if (!obj->page_cpu_valid) |
3273 | |||
3274 | if (!obj_priv->page_cpu_valid) | ||
3275 | return; | 3225 | return; |
3276 | 3226 | ||
3277 | /* If we're partially in the CPU read domain, finish moving it in. | 3227 | /* If we're partially in the CPU read domain, finish moving it in. |
3278 | */ | 3228 | */ |
3279 | if (obj->read_domains & I915_GEM_DOMAIN_CPU) { | 3229 | if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) { |
3280 | int i; | 3230 | int i; |
3281 | 3231 | ||
3282 | for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { | 3232 | for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) { |
3283 | if (obj_priv->page_cpu_valid[i]) | 3233 | if (obj->page_cpu_valid[i]) |
3284 | continue; | 3234 | continue; |
3285 | drm_clflush_pages(obj_priv->pages + i, 1); | 3235 | drm_clflush_pages(obj->pages + i, 1); |
3286 | } | 3236 | } |
3287 | } | 3237 | } |
3288 | 3238 | ||
3289 | /* Free the page_cpu_valid mappings which are now stale, whether | 3239 | /* Free the page_cpu_valid mappings which are now stale, whether |
3290 | * or not we've got I915_GEM_DOMAIN_CPU. | 3240 | * or not we've got I915_GEM_DOMAIN_CPU. |
3291 | */ | 3241 | */ |
3292 | kfree(obj_priv->page_cpu_valid); | 3242 | kfree(obj->page_cpu_valid); |
3293 | obj_priv->page_cpu_valid = NULL; | 3243 | obj->page_cpu_valid = NULL; |
3294 | } | 3244 | } |
3295 | 3245 | ||
3296 | /** | 3246 | /** |
@@ -3306,14 +3256,13 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) | |||
3306 | * flushes to occur. | 3256 | * flushes to occur. |
3307 | */ | 3257 | */ |
3308 | static int | 3258 | static int |
3309 | i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | 3259 | i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, |
3310 | uint64_t offset, uint64_t size) | 3260 | uint64_t offset, uint64_t size) |
3311 | { | 3261 | { |
3312 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
3313 | uint32_t old_read_domains; | 3262 | uint32_t old_read_domains; |
3314 | int i, ret; | 3263 | int i, ret; |
3315 | 3264 | ||
3316 | if (offset == 0 && size == obj->size) | 3265 | if (offset == 0 && size == obj->base.size) |
3317 | return i915_gem_object_set_to_cpu_domain(obj, 0); | 3266 | return i915_gem_object_set_to_cpu_domain(obj, 0); |
3318 | 3267 | ||
3319 | ret = i915_gem_object_flush_gpu_write_domain(obj, false); | 3268 | ret = i915_gem_object_flush_gpu_write_domain(obj, false); |
@@ -3322,45 +3271,45 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
3322 | i915_gem_object_flush_gtt_write_domain(obj); | 3271 | i915_gem_object_flush_gtt_write_domain(obj); |
3323 | 3272 | ||
3324 | /* If we're already fully in the CPU read domain, we're done. */ | 3273 | /* If we're already fully in the CPU read domain, we're done. */ |
3325 | if (obj_priv->page_cpu_valid == NULL && | 3274 | if (obj->page_cpu_valid == NULL && |
3326 | (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0) | 3275 | (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0) |
3327 | return 0; | 3276 | return 0; |
3328 | 3277 | ||
3329 | /* Otherwise, create/clear the per-page CPU read domain flag if we're | 3278 | /* Otherwise, create/clear the per-page CPU read domain flag if we're |
3330 | * newly adding I915_GEM_DOMAIN_CPU | 3279 | * newly adding I915_GEM_DOMAIN_CPU |
3331 | */ | 3280 | */ |
3332 | if (obj_priv->page_cpu_valid == NULL) { | 3281 | if (obj->page_cpu_valid == NULL) { |
3333 | obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE, | 3282 | obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE, |
3334 | GFP_KERNEL); | 3283 | GFP_KERNEL); |
3335 | if (obj_priv->page_cpu_valid == NULL) | 3284 | if (obj->page_cpu_valid == NULL) |
3336 | return -ENOMEM; | 3285 | return -ENOMEM; |
3337 | } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) | 3286 | } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) |
3338 | memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE); | 3287 | memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE); |
3339 | 3288 | ||
3340 | /* Flush the cache on any pages that are still invalid from the CPU's | 3289 | /* Flush the cache on any pages that are still invalid from the CPU's |
3341 | * perspective. | 3290 | * perspective. |
3342 | */ | 3291 | */ |
3343 | for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; | 3292 | for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; |
3344 | i++) { | 3293 | i++) { |
3345 | if (obj_priv->page_cpu_valid[i]) | 3294 | if (obj->page_cpu_valid[i]) |
3346 | continue; | 3295 | continue; |
3347 | 3296 | ||
3348 | drm_clflush_pages(obj_priv->pages + i, 1); | 3297 | drm_clflush_pages(obj->pages + i, 1); |
3349 | 3298 | ||
3350 | obj_priv->page_cpu_valid[i] = 1; | 3299 | obj->page_cpu_valid[i] = 1; |
3351 | } | 3300 | } |
3352 | 3301 | ||
3353 | /* It should now be out of any other write domains, and we can update | 3302 | /* It should now be out of any other write domains, and we can update |
3354 | * the domain values for our changes. | 3303 | * the domain values for our changes. |
3355 | */ | 3304 | */ |
3356 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); | 3305 | BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); |
3357 | 3306 | ||
3358 | old_read_domains = obj->read_domains; | 3307 | old_read_domains = obj->base.read_domains; |
3359 | obj->read_domains |= I915_GEM_DOMAIN_CPU; | 3308 | obj->base.read_domains |= I915_GEM_DOMAIN_CPU; |
3360 | 3309 | ||
3361 | trace_i915_gem_object_change_domain(obj, | 3310 | trace_i915_gem_object_change_domain(obj, |
3362 | old_read_domains, | 3311 | old_read_domains, |
3363 | obj->write_domain); | 3312 | obj->base.write_domain); |
3364 | 3313 | ||
3365 | return 0; | 3314 | return 0; |
3366 | } | 3315 | } |
@@ -3490,7 +3439,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
3490 | uint32_t __iomem *reloc_entry; | 3439 | uint32_t __iomem *reloc_entry; |
3491 | void __iomem *reloc_page; | 3440 | void __iomem *reloc_page; |
3492 | 3441 | ||
3493 | ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1); | 3442 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); |
3494 | if (ret) | 3443 | if (ret) |
3495 | goto err; | 3444 | goto err; |
3496 | 3445 | ||
@@ -3564,14 +3513,14 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, | |||
3564 | static int | 3513 | static int |
3565 | i915_gem_execbuffer_relocate(struct drm_device *dev, | 3514 | i915_gem_execbuffer_relocate(struct drm_device *dev, |
3566 | struct drm_file *file, | 3515 | struct drm_file *file, |
3567 | struct drm_gem_object **object_list, | 3516 | struct drm_i915_gem_object **object_list, |
3568 | struct drm_i915_gem_exec_object2 *exec_list, | 3517 | struct drm_i915_gem_exec_object2 *exec_list, |
3569 | int count) | 3518 | int count) |
3570 | { | 3519 | { |
3571 | int i, ret; | 3520 | int i, ret; |
3572 | 3521 | ||
3573 | for (i = 0; i < count; i++) { | 3522 | for (i = 0; i < count; i++) { |
3574 | struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); | 3523 | struct drm_i915_gem_object *obj = object_list[i]; |
3575 | obj->base.pending_read_domains = 0; | 3524 | obj->base.pending_read_domains = 0; |
3576 | obj->base.pending_write_domain = 0; | 3525 | obj->base.pending_write_domain = 0; |
3577 | ret = i915_gem_execbuffer_relocate_object(obj, file, | 3526 | ret = i915_gem_execbuffer_relocate_object(obj, file, |
@@ -3586,7 +3535,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, | |||
3586 | static int | 3535 | static int |
3587 | i915_gem_execbuffer_reserve(struct drm_device *dev, | 3536 | i915_gem_execbuffer_reserve(struct drm_device *dev, |
3588 | struct drm_file *file, | 3537 | struct drm_file *file, |
3589 | struct drm_gem_object **object_list, | 3538 | struct drm_i915_gem_object **object_list, |
3590 | struct drm_i915_gem_exec_object2 *exec_list, | 3539 | struct drm_i915_gem_exec_object2 *exec_list, |
3591 | int count) | 3540 | int count) |
3592 | { | 3541 | { |
@@ -3599,7 +3548,7 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, | |||
3599 | ret = 0; | 3548 | ret = 0; |
3600 | for (i = 0; i < count; i++) { | 3549 | for (i = 0; i < count; i++) { |
3601 | struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; | 3550 | struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; |
3602 | struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); | 3551 | struct drm_i915_gem_object *obj = object_list[i]; |
3603 | bool need_fence = | 3552 | bool need_fence = |
3604 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && | 3553 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
3605 | obj->tiling_mode != I915_TILING_NONE; | 3554 | obj->tiling_mode != I915_TILING_NONE; |
@@ -3610,12 +3559,12 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, | |||
3610 | 3559 | ||
3611 | /* Check fence reg constraints and rebind if necessary */ | 3560 | /* Check fence reg constraints and rebind if necessary */ |
3612 | if (need_mappable && !obj->map_and_fenceable) { | 3561 | if (need_mappable && !obj->map_and_fenceable) { |
3613 | ret = i915_gem_object_unbind(&obj->base); | 3562 | ret = i915_gem_object_unbind(obj); |
3614 | if (ret) | 3563 | if (ret) |
3615 | break; | 3564 | break; |
3616 | } | 3565 | } |
3617 | 3566 | ||
3618 | ret = i915_gem_object_pin(&obj->base, | 3567 | ret = i915_gem_object_pin(obj, |
3619 | entry->alignment, | 3568 | entry->alignment, |
3620 | need_mappable); | 3569 | need_mappable); |
3621 | if (ret) | 3570 | if (ret) |
@@ -3626,9 +3575,9 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, | |||
3626 | * to properly handle blits to/from tiled surfaces. | 3575 | * to properly handle blits to/from tiled surfaces. |
3627 | */ | 3576 | */ |
3628 | if (need_fence) { | 3577 | if (need_fence) { |
3629 | ret = i915_gem_object_get_fence_reg(&obj->base, true); | 3578 | ret = i915_gem_object_get_fence_reg(obj, true); |
3630 | if (ret) { | 3579 | if (ret) { |
3631 | i915_gem_object_unpin(&obj->base); | 3580 | i915_gem_object_unpin(obj); |
3632 | break; | 3581 | break; |
3633 | } | 3582 | } |
3634 | 3583 | ||
@@ -3658,17 +3607,15 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, | |||
3658 | static int | 3607 | static int |
3659 | i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | 3608 | i915_gem_execbuffer_relocate_slow(struct drm_device *dev, |
3660 | struct drm_file *file, | 3609 | struct drm_file *file, |
3661 | struct drm_gem_object **object_list, | 3610 | struct drm_i915_gem_object **object_list, |
3662 | struct drm_i915_gem_exec_object2 *exec_list, | 3611 | struct drm_i915_gem_exec_object2 *exec_list, |
3663 | int count) | 3612 | int count) |
3664 | { | 3613 | { |
3665 | struct drm_i915_gem_relocation_entry *reloc; | 3614 | struct drm_i915_gem_relocation_entry *reloc; |
3666 | int i, total, ret; | 3615 | int i, total, ret; |
3667 | 3616 | ||
3668 | for (i = 0; i < count; i++) { | 3617 | for (i = 0; i < count; i++) |
3669 | struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); | 3618 | object_list[i]->in_execbuffer = false; |
3670 | obj->in_execbuffer = false; | ||
3671 | } | ||
3672 | 3619 | ||
3673 | mutex_unlock(&dev->struct_mutex); | 3620 | mutex_unlock(&dev->struct_mutex); |
3674 | 3621 | ||
@@ -3713,7 +3660,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | |||
3713 | 3660 | ||
3714 | total = 0; | 3661 | total = 0; |
3715 | for (i = 0; i < count; i++) { | 3662 | for (i = 0; i < count; i++) { |
3716 | struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); | 3663 | struct drm_i915_gem_object *obj = object_list[i]; |
3717 | obj->base.pending_read_domains = 0; | 3664 | obj->base.pending_read_domains = 0; |
3718 | obj->base.pending_write_domain = 0; | 3665 | obj->base.pending_write_domain = 0; |
3719 | ret = i915_gem_execbuffer_relocate_object_slow(obj, file, | 3666 | ret = i915_gem_execbuffer_relocate_object_slow(obj, file, |
@@ -3740,7 +3687,7 @@ static int | |||
3740 | i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, | 3687 | i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, |
3741 | struct drm_file *file, | 3688 | struct drm_file *file, |
3742 | struct intel_ring_buffer *ring, | 3689 | struct intel_ring_buffer *ring, |
3743 | struct drm_gem_object **objects, | 3690 | struct drm_i915_gem_object **objects, |
3744 | int count) | 3691 | int count) |
3745 | { | 3692 | { |
3746 | struct change_domains cd; | 3693 | struct change_domains cd; |
@@ -3759,17 +3706,17 @@ i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, | |||
3759 | cd.invalidate_domains, | 3706 | cd.invalidate_domains, |
3760 | cd.flush_domains); | 3707 | cd.flush_domains); |
3761 | #endif | 3708 | #endif |
3762 | i915_gem_flush(dev, file, | 3709 | i915_gem_flush(dev, |
3763 | cd.invalidate_domains, | 3710 | cd.invalidate_domains, |
3764 | cd.flush_domains, | 3711 | cd.flush_domains, |
3765 | cd.flush_rings); | 3712 | cd.flush_rings); |
3766 | } | 3713 | } |
3767 | 3714 | ||
3768 | for (i = 0; i < count; i++) { | 3715 | for (i = 0; i < count; i++) { |
3769 | struct drm_i915_gem_object *obj = to_intel_bo(objects[i]); | 3716 | struct drm_i915_gem_object *obj = objects[i]; |
3770 | /* XXX replace with semaphores */ | 3717 | /* XXX replace with semaphores */ |
3771 | if (obj->ring && ring != obj->ring) { | 3718 | if (obj->ring && ring != obj->ring) { |
3772 | ret = i915_gem_object_wait_rendering(&obj->base, true); | 3719 | ret = i915_gem_object_wait_rendering(obj, true); |
3773 | if (ret) | 3720 | if (ret) |
3774 | return ret; | 3721 | return ret; |
3775 | } | 3722 | } |
@@ -3891,8 +3838,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3891 | struct drm_i915_gem_exec_object2 *exec_list) | 3838 | struct drm_i915_gem_exec_object2 *exec_list) |
3892 | { | 3839 | { |
3893 | drm_i915_private_t *dev_priv = dev->dev_private; | 3840 | drm_i915_private_t *dev_priv = dev->dev_private; |
3894 | struct drm_gem_object **object_list = NULL; | 3841 | struct drm_i915_gem_object **object_list = NULL; |
3895 | struct drm_gem_object *batch_obj; | 3842 | struct drm_i915_gem_object *batch_obj; |
3896 | struct drm_clip_rect *cliprects = NULL; | 3843 | struct drm_clip_rect *cliprects = NULL; |
3897 | struct drm_i915_gem_request *request = NULL; | 3844 | struct drm_i915_gem_request *request = NULL; |
3898 | int ret, i, flips; | 3845 | int ret, i, flips; |
@@ -3987,29 +3934,29 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3987 | 3934 | ||
3988 | /* Look up object handles */ | 3935 | /* Look up object handles */ |
3989 | for (i = 0; i < args->buffer_count; i++) { | 3936 | for (i = 0; i < args->buffer_count; i++) { |
3990 | struct drm_i915_gem_object *obj_priv; | 3937 | struct drm_i915_gem_object *obj; |
3991 | 3938 | ||
3992 | object_list[i] = drm_gem_object_lookup(dev, file, | 3939 | obj = to_intel_bo (drm_gem_object_lookup(dev, file, |
3993 | exec_list[i].handle); | 3940 | exec_list[i].handle)); |
3994 | if (object_list[i] == NULL) { | 3941 | if (obj == NULL) { |
3995 | DRM_ERROR("Invalid object handle %d at index %d\n", | 3942 | DRM_ERROR("Invalid object handle %d at index %d\n", |
3996 | exec_list[i].handle, i); | 3943 | exec_list[i].handle, i); |
3997 | /* prevent error path from reading uninitialized data */ | 3944 | /* prevent error path from reading uninitialized data */ |
3998 | args->buffer_count = i + 1; | 3945 | args->buffer_count = i; |
3999 | ret = -ENOENT; | 3946 | ret = -ENOENT; |
4000 | goto err; | 3947 | goto err; |
4001 | } | 3948 | } |
3949 | object_list[i] = obj; | ||
4002 | 3950 | ||
4003 | obj_priv = to_intel_bo(object_list[i]); | 3951 | if (obj->in_execbuffer) { |
4004 | if (obj_priv->in_execbuffer) { | ||
4005 | DRM_ERROR("Object %p appears more than once in object list\n", | 3952 | DRM_ERROR("Object %p appears more than once in object list\n", |
4006 | object_list[i]); | 3953 | obj); |
4007 | /* prevent error path from reading uninitialized data */ | 3954 | /* prevent error path from reading uninitialized data */ |
4008 | args->buffer_count = i + 1; | 3955 | args->buffer_count = i + 1; |
4009 | ret = -EINVAL; | 3956 | ret = -EINVAL; |
4010 | goto err; | 3957 | goto err; |
4011 | } | 3958 | } |
4012 | obj_priv->in_execbuffer = true; | 3959 | obj->in_execbuffer = true; |
4013 | } | 3960 | } |
4014 | 3961 | ||
4015 | /* Move the objects en-masse into the GTT, evicting if necessary. */ | 3962 | /* Move the objects en-masse into the GTT, evicting if necessary. */ |
@@ -4037,15 +3984,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
4037 | 3984 | ||
4038 | /* Set the pending read domains for the batch buffer to COMMAND */ | 3985 | /* Set the pending read domains for the batch buffer to COMMAND */ |
4039 | batch_obj = object_list[args->buffer_count-1]; | 3986 | batch_obj = object_list[args->buffer_count-1]; |
4040 | if (batch_obj->pending_write_domain) { | 3987 | if (batch_obj->base.pending_write_domain) { |
4041 | DRM_ERROR("Attempting to use self-modifying batch buffer\n"); | 3988 | DRM_ERROR("Attempting to use self-modifying batch buffer\n"); |
4042 | ret = -EINVAL; | 3989 | ret = -EINVAL; |
4043 | goto err; | 3990 | goto err; |
4044 | } | 3991 | } |
4045 | batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND; | 3992 | batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; |
4046 | 3993 | ||
4047 | /* Sanity check the batch buffer */ | 3994 | /* Sanity check the batch buffer */ |
4048 | exec_offset = to_intel_bo(batch_obj)->gtt_offset; | 3995 | exec_offset = batch_obj->gtt_offset; |
4049 | ret = i915_gem_check_execbuffer(args, exec_offset); | 3996 | ret = i915_gem_check_execbuffer(args, exec_offset); |
4050 | if (ret != 0) { | 3997 | if (ret != 0) { |
4051 | DRM_ERROR("execbuf with invalid offset/length\n"); | 3998 | DRM_ERROR("execbuf with invalid offset/length\n"); |
@@ -4077,8 +4024,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
4077 | */ | 4024 | */ |
4078 | flips = 0; | 4025 | flips = 0; |
4079 | for (i = 0; i < args->buffer_count; i++) { | 4026 | for (i = 0; i < args->buffer_count; i++) { |
4080 | if (object_list[i]->write_domain) | 4027 | if (object_list[i]->base.write_domain) |
4081 | flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip); | 4028 | flips |= atomic_read(&object_list[i]->pending_flip); |
4082 | } | 4029 | } |
4083 | if (flips) { | 4030 | if (flips) { |
4084 | int plane, flip_mask; | 4031 | int plane, flip_mask; |
@@ -4110,23 +4057,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
4110 | } | 4057 | } |
4111 | 4058 | ||
4112 | for (i = 0; i < args->buffer_count; i++) { | 4059 | for (i = 0; i < args->buffer_count; i++) { |
4113 | struct drm_gem_object *obj = object_list[i]; | 4060 | struct drm_i915_gem_object *obj = object_list[i]; |
4114 | 4061 | ||
4115 | obj->read_domains = obj->pending_read_domains; | 4062 | obj->base.read_domains = obj->base.pending_read_domains; |
4116 | obj->write_domain = obj->pending_write_domain; | 4063 | obj->base.write_domain = obj->base.pending_write_domain; |
4117 | 4064 | ||
4118 | i915_gem_object_move_to_active(obj, ring); | 4065 | i915_gem_object_move_to_active(obj, ring); |
4119 | if (obj->write_domain) { | 4066 | if (obj->base.write_domain) { |
4120 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 4067 | obj->dirty = 1; |
4121 | obj_priv->dirty = 1; | 4068 | list_move_tail(&obj->gpu_write_list, |
4122 | list_move_tail(&obj_priv->gpu_write_list, | ||
4123 | &ring->gpu_write_list); | 4069 | &ring->gpu_write_list); |
4124 | intel_mark_busy(dev, obj); | 4070 | intel_mark_busy(dev, obj); |
4125 | } | 4071 | } |
4126 | 4072 | ||
4127 | trace_i915_gem_object_change_domain(obj, | 4073 | trace_i915_gem_object_change_domain(obj, |
4128 | obj->read_domains, | 4074 | obj->base.read_domains, |
4129 | obj->write_domain); | 4075 | obj->base.write_domain); |
4130 | } | 4076 | } |
4131 | 4077 | ||
4132 | /* | 4078 | /* |
@@ -4142,11 +4088,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
4142 | 4088 | ||
4143 | err: | 4089 | err: |
4144 | for (i = 0; i < args->buffer_count; i++) { | 4090 | for (i = 0; i < args->buffer_count; i++) { |
4145 | if (object_list[i] == NULL) | 4091 | object_list[i]->in_execbuffer = false; |
4146 | break; | 4092 | drm_gem_object_unreference(&object_list[i]->base); |
4147 | |||
4148 | to_intel_bo(object_list[i])->in_execbuffer = false; | ||
4149 | drm_gem_object_unreference(object_list[i]); | ||
4150 | } | 4093 | } |
4151 | 4094 | ||
4152 | mutex_unlock(&dev->struct_mutex); | 4095 | mutex_unlock(&dev->struct_mutex); |
@@ -4165,7 +4108,7 @@ pre_mutex_err: | |||
4165 | */ | 4108 | */ |
4166 | int | 4109 | int |
4167 | i915_gem_execbuffer(struct drm_device *dev, void *data, | 4110 | i915_gem_execbuffer(struct drm_device *dev, void *data, |
4168 | struct drm_file *file_priv) | 4111 | struct drm_file *file) |
4169 | { | 4112 | { |
4170 | struct drm_i915_gem_execbuffer *args = data; | 4113 | struct drm_i915_gem_execbuffer *args = data; |
4171 | struct drm_i915_gem_execbuffer2 exec2; | 4114 | struct drm_i915_gem_execbuffer2 exec2; |
@@ -4227,7 +4170,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
4227 | exec2.cliprects_ptr = args->cliprects_ptr; | 4170 | exec2.cliprects_ptr = args->cliprects_ptr; |
4228 | exec2.flags = I915_EXEC_RENDER; | 4171 | exec2.flags = I915_EXEC_RENDER; |
4229 | 4172 | ||
4230 | ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); | 4173 | ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); |
4231 | if (!ret) { | 4174 | if (!ret) { |
4232 | /* Copy the new buffer offsets back to the user's exec list. */ | 4175 | /* Copy the new buffer offsets back to the user's exec list. */ |
4233 | for (i = 0; i < args->buffer_count; i++) | 4176 | for (i = 0; i < args->buffer_count; i++) |
@@ -4252,7 +4195,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
4252 | 4195 | ||
4253 | int | 4196 | int |
4254 | i915_gem_execbuffer2(struct drm_device *dev, void *data, | 4197 | i915_gem_execbuffer2(struct drm_device *dev, void *data, |
4255 | struct drm_file *file_priv) | 4198 | struct drm_file *file) |
4256 | { | 4199 | { |
4257 | struct drm_i915_gem_execbuffer2 *args = data; | 4200 | struct drm_i915_gem_execbuffer2 *args = data; |
4258 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | 4201 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; |
@@ -4285,7 +4228,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, | |||
4285 | return -EFAULT; | 4228 | return -EFAULT; |
4286 | } | 4229 | } |
4287 | 4230 | ||
4288 | ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list); | 4231 | ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); |
4289 | if (!ret) { | 4232 | if (!ret) { |
4290 | /* Copy the new buffer offsets back to the user's exec list. */ | 4233 | /* Copy the new buffer offsets back to the user's exec list. */ |
4291 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | 4234 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) |
@@ -4305,109 +4248,106 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, | |||
4305 | } | 4248 | } |
4306 | 4249 | ||
4307 | int | 4250 | int |
4308 | i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, | 4251 | i915_gem_object_pin(struct drm_i915_gem_object *obj, |
4252 | uint32_t alignment, | ||
4309 | bool map_and_fenceable) | 4253 | bool map_and_fenceable) |
4310 | { | 4254 | { |
4311 | struct drm_device *dev = obj->dev; | 4255 | struct drm_device *dev = obj->base.dev; |
4312 | struct drm_i915_private *dev_priv = dev->dev_private; | 4256 | struct drm_i915_private *dev_priv = dev->dev_private; |
4313 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
4314 | int ret; | 4257 | int ret; |
4315 | 4258 | ||
4316 | BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); | 4259 | BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); |
4317 | BUG_ON(map_and_fenceable && !map_and_fenceable); | 4260 | BUG_ON(map_and_fenceable && !map_and_fenceable); |
4318 | WARN_ON(i915_verify_lists(dev)); | 4261 | WARN_ON(i915_verify_lists(dev)); |
4319 | 4262 | ||
4320 | if (obj_priv->gtt_space != NULL) { | 4263 | if (obj->gtt_space != NULL) { |
4321 | if ((alignment && obj_priv->gtt_offset & (alignment - 1)) || | 4264 | if ((alignment && obj->gtt_offset & (alignment - 1)) || |
4322 | (map_and_fenceable && !obj_priv->map_and_fenceable)) { | 4265 | (map_and_fenceable && !obj->map_and_fenceable)) { |
4323 | WARN(obj_priv->pin_count, | 4266 | WARN(obj->pin_count, |
4324 | "bo is already pinned with incorrect alignment:" | 4267 | "bo is already pinned with incorrect alignment:" |
4325 | " offset=%x, req.alignment=%x, req.map_and_fenceable=%d," | 4268 | " offset=%x, req.alignment=%x, req.map_and_fenceable=%d," |
4326 | " obj->map_and_fenceable=%d\n", | 4269 | " obj->map_and_fenceable=%d\n", |
4327 | obj_priv->gtt_offset, alignment, | 4270 | obj->gtt_offset, alignment, |
4328 | map_and_fenceable, | 4271 | map_and_fenceable, |
4329 | obj_priv->map_and_fenceable); | 4272 | obj->map_and_fenceable); |
4330 | ret = i915_gem_object_unbind(obj); | 4273 | ret = i915_gem_object_unbind(obj); |
4331 | if (ret) | 4274 | if (ret) |
4332 | return ret; | 4275 | return ret; |
4333 | } | 4276 | } |
4334 | } | 4277 | } |
4335 | 4278 | ||
4336 | if (obj_priv->gtt_space == NULL) { | 4279 | if (obj->gtt_space == NULL) { |
4337 | ret = i915_gem_object_bind_to_gtt(obj, alignment, | 4280 | ret = i915_gem_object_bind_to_gtt(obj, alignment, |
4338 | map_and_fenceable); | 4281 | map_and_fenceable); |
4339 | if (ret) | 4282 | if (ret) |
4340 | return ret; | 4283 | return ret; |
4341 | } | 4284 | } |
4342 | 4285 | ||
4343 | if (obj_priv->pin_count++ == 0) { | 4286 | if (obj->pin_count++ == 0) { |
4344 | i915_gem_info_add_pin(dev_priv, obj_priv, map_and_fenceable); | 4287 | i915_gem_info_add_pin(dev_priv, obj, map_and_fenceable); |
4345 | if (!obj_priv->active) | 4288 | if (!obj->active) |
4346 | list_move_tail(&obj_priv->mm_list, | 4289 | list_move_tail(&obj->mm_list, |
4347 | &dev_priv->mm.pinned_list); | 4290 | &dev_priv->mm.pinned_list); |
4348 | } | 4291 | } |
4349 | BUG_ON(!obj_priv->pin_mappable && map_and_fenceable); | 4292 | BUG_ON(!obj->pin_mappable && map_and_fenceable); |
4350 | 4293 | ||
4351 | WARN_ON(i915_verify_lists(dev)); | 4294 | WARN_ON(i915_verify_lists(dev)); |
4352 | return 0; | 4295 | return 0; |
4353 | } | 4296 | } |
4354 | 4297 | ||
4355 | void | 4298 | void |
4356 | i915_gem_object_unpin(struct drm_gem_object *obj) | 4299 | i915_gem_object_unpin(struct drm_i915_gem_object *obj) |
4357 | { | 4300 | { |
4358 | struct drm_device *dev = obj->dev; | 4301 | struct drm_device *dev = obj->base.dev; |
4359 | drm_i915_private_t *dev_priv = dev->dev_private; | 4302 | drm_i915_private_t *dev_priv = dev->dev_private; |
4360 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
4361 | 4303 | ||
4362 | WARN_ON(i915_verify_lists(dev)); | 4304 | WARN_ON(i915_verify_lists(dev)); |
4363 | BUG_ON(obj_priv->pin_count == 0); | 4305 | BUG_ON(obj->pin_count == 0); |
4364 | BUG_ON(obj_priv->gtt_space == NULL); | 4306 | BUG_ON(obj->gtt_space == NULL); |
4365 | 4307 | ||
4366 | if (--obj_priv->pin_count == 0) { | 4308 | if (--obj->pin_count == 0) { |
4367 | if (!obj_priv->active) | 4309 | if (!obj->active) |
4368 | list_move_tail(&obj_priv->mm_list, | 4310 | list_move_tail(&obj->mm_list, |
4369 | &dev_priv->mm.inactive_list); | 4311 | &dev_priv->mm.inactive_list); |
4370 | i915_gem_info_remove_pin(dev_priv, obj_priv); | 4312 | i915_gem_info_remove_pin(dev_priv, obj); |
4371 | } | 4313 | } |
4372 | WARN_ON(i915_verify_lists(dev)); | 4314 | WARN_ON(i915_verify_lists(dev)); |
4373 | } | 4315 | } |
4374 | 4316 | ||
4375 | int | 4317 | int |
4376 | i915_gem_pin_ioctl(struct drm_device *dev, void *data, | 4318 | i915_gem_pin_ioctl(struct drm_device *dev, void *data, |
4377 | struct drm_file *file_priv) | 4319 | struct drm_file *file) |
4378 | { | 4320 | { |
4379 | struct drm_i915_gem_pin *args = data; | 4321 | struct drm_i915_gem_pin *args = data; |
4380 | struct drm_gem_object *obj; | 4322 | struct drm_i915_gem_object *obj; |
4381 | struct drm_i915_gem_object *obj_priv; | ||
4382 | int ret; | 4323 | int ret; |
4383 | 4324 | ||
4384 | ret = i915_mutex_lock_interruptible(dev); | 4325 | ret = i915_mutex_lock_interruptible(dev); |
4385 | if (ret) | 4326 | if (ret) |
4386 | return ret; | 4327 | return ret; |
4387 | 4328 | ||
4388 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 4329 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
4389 | if (obj == NULL) { | 4330 | if (obj == NULL) { |
4390 | ret = -ENOENT; | 4331 | ret = -ENOENT; |
4391 | goto unlock; | 4332 | goto unlock; |
4392 | } | 4333 | } |
4393 | obj_priv = to_intel_bo(obj); | ||
4394 | 4334 | ||
4395 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 4335 | if (obj->madv != I915_MADV_WILLNEED) { |
4396 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); | 4336 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); |
4397 | ret = -EINVAL; | 4337 | ret = -EINVAL; |
4398 | goto out; | 4338 | goto out; |
4399 | } | 4339 | } |
4400 | 4340 | ||
4401 | if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { | 4341 | if (obj->pin_filp != NULL && obj->pin_filp != file) { |
4402 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", | 4342 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", |
4403 | args->handle); | 4343 | args->handle); |
4404 | ret = -EINVAL; | 4344 | ret = -EINVAL; |
4405 | goto out; | 4345 | goto out; |
4406 | } | 4346 | } |
4407 | 4347 | ||
4408 | obj_priv->user_pin_count++; | 4348 | obj->user_pin_count++; |
4409 | obj_priv->pin_filp = file_priv; | 4349 | obj->pin_filp = file; |
4410 | if (obj_priv->user_pin_count == 1) { | 4350 | if (obj->user_pin_count == 1) { |
4411 | ret = i915_gem_object_pin(obj, args->alignment, true); | 4351 | ret = i915_gem_object_pin(obj, args->alignment, true); |
4412 | if (ret) | 4352 | if (ret) |
4413 | goto out; | 4353 | goto out; |
@@ -4417,9 +4357,9 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, | |||
4417 | * as the X server doesn't manage domains yet | 4357 | * as the X server doesn't manage domains yet |
4418 | */ | 4358 | */ |
4419 | i915_gem_object_flush_cpu_write_domain(obj); | 4359 | i915_gem_object_flush_cpu_write_domain(obj); |
4420 | args->offset = obj_priv->gtt_offset; | 4360 | args->offset = obj->gtt_offset; |
4421 | out: | 4361 | out: |
4422 | drm_gem_object_unreference(obj); | 4362 | drm_gem_object_unreference(&obj->base); |
4423 | unlock: | 4363 | unlock: |
4424 | mutex_unlock(&dev->struct_mutex); | 4364 | mutex_unlock(&dev->struct_mutex); |
4425 | return ret; | 4365 | return ret; |
@@ -4427,38 +4367,36 @@ unlock: | |||
4427 | 4367 | ||
4428 | int | 4368 | int |
4429 | i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | 4369 | i915_gem_unpin_ioctl(struct drm_device *dev, void *data, |
4430 | struct drm_file *file_priv) | 4370 | struct drm_file *file) |
4431 | { | 4371 | { |
4432 | struct drm_i915_gem_pin *args = data; | 4372 | struct drm_i915_gem_pin *args = data; |
4433 | struct drm_gem_object *obj; | 4373 | struct drm_i915_gem_object *obj; |
4434 | struct drm_i915_gem_object *obj_priv; | ||
4435 | int ret; | 4374 | int ret; |
4436 | 4375 | ||
4437 | ret = i915_mutex_lock_interruptible(dev); | 4376 | ret = i915_mutex_lock_interruptible(dev); |
4438 | if (ret) | 4377 | if (ret) |
4439 | return ret; | 4378 | return ret; |
4440 | 4379 | ||
4441 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 4380 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
4442 | if (obj == NULL) { | 4381 | if (obj == NULL) { |
4443 | ret = -ENOENT; | 4382 | ret = -ENOENT; |
4444 | goto unlock; | 4383 | goto unlock; |
4445 | } | 4384 | } |
4446 | obj_priv = to_intel_bo(obj); | ||
4447 | 4385 | ||
4448 | if (obj_priv->pin_filp != file_priv) { | 4386 | if (obj->pin_filp != file) { |
4449 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", | 4387 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", |
4450 | args->handle); | 4388 | args->handle); |
4451 | ret = -EINVAL; | 4389 | ret = -EINVAL; |
4452 | goto out; | 4390 | goto out; |
4453 | } | 4391 | } |
4454 | obj_priv->user_pin_count--; | 4392 | obj->user_pin_count--; |
4455 | if (obj_priv->user_pin_count == 0) { | 4393 | if (obj->user_pin_count == 0) { |
4456 | obj_priv->pin_filp = NULL; | 4394 | obj->pin_filp = NULL; |
4457 | i915_gem_object_unpin(obj); | 4395 | i915_gem_object_unpin(obj); |
4458 | } | 4396 | } |
4459 | 4397 | ||
4460 | out: | 4398 | out: |
4461 | drm_gem_object_unreference(obj); | 4399 | drm_gem_object_unreference(&obj->base); |
4462 | unlock: | 4400 | unlock: |
4463 | mutex_unlock(&dev->struct_mutex); | 4401 | mutex_unlock(&dev->struct_mutex); |
4464 | return ret; | 4402 | return ret; |
@@ -4466,52 +4404,49 @@ unlock: | |||
4466 | 4404 | ||
4467 | int | 4405 | int |
4468 | i915_gem_busy_ioctl(struct drm_device *dev, void *data, | 4406 | i915_gem_busy_ioctl(struct drm_device *dev, void *data, |
4469 | struct drm_file *file_priv) | 4407 | struct drm_file *file) |
4470 | { | 4408 | { |
4471 | struct drm_i915_gem_busy *args = data; | 4409 | struct drm_i915_gem_busy *args = data; |
4472 | struct drm_gem_object *obj; | 4410 | struct drm_i915_gem_object *obj; |
4473 | struct drm_i915_gem_object *obj_priv; | ||
4474 | int ret; | 4411 | int ret; |
4475 | 4412 | ||
4476 | ret = i915_mutex_lock_interruptible(dev); | 4413 | ret = i915_mutex_lock_interruptible(dev); |
4477 | if (ret) | 4414 | if (ret) |
4478 | return ret; | 4415 | return ret; |
4479 | 4416 | ||
4480 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 4417 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
4481 | if (obj == NULL) { | 4418 | if (obj == NULL) { |
4482 | ret = -ENOENT; | 4419 | ret = -ENOENT; |
4483 | goto unlock; | 4420 | goto unlock; |
4484 | } | 4421 | } |
4485 | obj_priv = to_intel_bo(obj); | ||
4486 | 4422 | ||
4487 | /* Count all active objects as busy, even if they are currently not used | 4423 | /* Count all active objects as busy, even if they are currently not used |
4488 | * by the gpu. Users of this interface expect objects to eventually | 4424 | * by the gpu. Users of this interface expect objects to eventually |
4489 | * become non-busy without any further actions, therefore emit any | 4425 | * become non-busy without any further actions, therefore emit any |
4490 | * necessary flushes here. | 4426 | * necessary flushes here. |
4491 | */ | 4427 | */ |
4492 | args->busy = obj_priv->active; | 4428 | args->busy = obj->active; |
4493 | if (args->busy) { | 4429 | if (args->busy) { |
4494 | /* Unconditionally flush objects, even when the gpu still uses this | 4430 | /* Unconditionally flush objects, even when the gpu still uses this |
4495 | * object. Userspace calling this function indicates that it wants to | 4431 | * object. Userspace calling this function indicates that it wants to |
4496 | * use this buffer rather sooner than later, so issuing the required | 4432 | * use this buffer rather sooner than later, so issuing the required |
4497 | * flush earlier is beneficial. | 4433 | * flush earlier is beneficial. |
4498 | */ | 4434 | */ |
4499 | if (obj->write_domain & I915_GEM_GPU_DOMAINS) | 4435 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) |
4500 | i915_gem_flush_ring(dev, file_priv, | 4436 | i915_gem_flush_ring(dev, obj->ring, |
4501 | obj_priv->ring, | 4437 | 0, obj->base.write_domain); |
4502 | 0, obj->write_domain); | ||
4503 | 4438 | ||
4504 | /* Update the active list for the hardware's current position. | 4439 | /* Update the active list for the hardware's current position. |
4505 | * Otherwise this only updates on a delayed timer or when irqs | 4440 | * Otherwise this only updates on a delayed timer or when irqs |
4506 | * are actually unmasked, and our working set ends up being | 4441 | * are actually unmasked, and our working set ends up being |
4507 | * larger than required. | 4442 | * larger than required. |
4508 | */ | 4443 | */ |
4509 | i915_gem_retire_requests_ring(dev, obj_priv->ring); | 4444 | i915_gem_retire_requests_ring(dev, obj->ring); |
4510 | 4445 | ||
4511 | args->busy = obj_priv->active; | 4446 | args->busy = obj->active; |
4512 | } | 4447 | } |
4513 | 4448 | ||
4514 | drm_gem_object_unreference(obj); | 4449 | drm_gem_object_unreference(&obj->base); |
4515 | unlock: | 4450 | unlock: |
4516 | mutex_unlock(&dev->struct_mutex); | 4451 | mutex_unlock(&dev->struct_mutex); |
4517 | return ret; | 4452 | return ret; |
@@ -4529,8 +4464,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | |||
4529 | struct drm_file *file_priv) | 4464 | struct drm_file *file_priv) |
4530 | { | 4465 | { |
4531 | struct drm_i915_gem_madvise *args = data; | 4466 | struct drm_i915_gem_madvise *args = data; |
4532 | struct drm_gem_object *obj; | 4467 | struct drm_i915_gem_object *obj; |
4533 | struct drm_i915_gem_object *obj_priv; | ||
4534 | int ret; | 4468 | int ret; |
4535 | 4469 | ||
4536 | switch (args->madv) { | 4470 | switch (args->madv) { |
@@ -4545,37 +4479,36 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | |||
4545 | if (ret) | 4479 | if (ret) |
4546 | return ret; | 4480 | return ret; |
4547 | 4481 | ||
4548 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 4482 | obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle)); |
4549 | if (obj == NULL) { | 4483 | if (obj == NULL) { |
4550 | ret = -ENOENT; | 4484 | ret = -ENOENT; |
4551 | goto unlock; | 4485 | goto unlock; |
4552 | } | 4486 | } |
4553 | obj_priv = to_intel_bo(obj); | ||
4554 | 4487 | ||
4555 | if (obj_priv->pin_count) { | 4488 | if (obj->pin_count) { |
4556 | ret = -EINVAL; | 4489 | ret = -EINVAL; |
4557 | goto out; | 4490 | goto out; |
4558 | } | 4491 | } |
4559 | 4492 | ||
4560 | if (obj_priv->madv != __I915_MADV_PURGED) | 4493 | if (obj->madv != __I915_MADV_PURGED) |
4561 | obj_priv->madv = args->madv; | 4494 | obj->madv = args->madv; |
4562 | 4495 | ||
4563 | /* if the object is no longer bound, discard its backing storage */ | 4496 | /* if the object is no longer bound, discard its backing storage */ |
4564 | if (i915_gem_object_is_purgeable(obj_priv) && | 4497 | if (i915_gem_object_is_purgeable(obj) && |
4565 | obj_priv->gtt_space == NULL) | 4498 | obj->gtt_space == NULL) |
4566 | i915_gem_object_truncate(obj); | 4499 | i915_gem_object_truncate(obj); |
4567 | 4500 | ||
4568 | args->retained = obj_priv->madv != __I915_MADV_PURGED; | 4501 | args->retained = obj->madv != __I915_MADV_PURGED; |
4569 | 4502 | ||
4570 | out: | 4503 | out: |
4571 | drm_gem_object_unreference(obj); | 4504 | drm_gem_object_unreference(&obj->base); |
4572 | unlock: | 4505 | unlock: |
4573 | mutex_unlock(&dev->struct_mutex); | 4506 | mutex_unlock(&dev->struct_mutex); |
4574 | return ret; | 4507 | return ret; |
4575 | } | 4508 | } |
4576 | 4509 | ||
4577 | struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, | 4510 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
4578 | size_t size) | 4511 | size_t size) |
4579 | { | 4512 | { |
4580 | struct drm_i915_private *dev_priv = dev->dev_private; | 4513 | struct drm_i915_private *dev_priv = dev->dev_private; |
4581 | struct drm_i915_gem_object *obj; | 4514 | struct drm_i915_gem_object *obj; |
@@ -4605,7 +4538,7 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, | |||
4605 | /* Avoid an unnecessary call to unbind on the first bind. */ | 4538 | /* Avoid an unnecessary call to unbind on the first bind. */ |
4606 | obj->map_and_fenceable = true; | 4539 | obj->map_and_fenceable = true; |
4607 | 4540 | ||
4608 | return &obj->base; | 4541 | return obj; |
4609 | } | 4542 | } |
4610 | 4543 | ||
4611 | int i915_gem_init_object(struct drm_gem_object *obj) | 4544 | int i915_gem_init_object(struct drm_gem_object *obj) |
@@ -4615,42 +4548,41 @@ int i915_gem_init_object(struct drm_gem_object *obj) | |||
4615 | return 0; | 4548 | return 0; |
4616 | } | 4549 | } |
4617 | 4550 | ||
4618 | static void i915_gem_free_object_tail(struct drm_gem_object *obj) | 4551 | static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj) |
4619 | { | 4552 | { |
4620 | struct drm_device *dev = obj->dev; | 4553 | struct drm_device *dev = obj->base.dev; |
4621 | drm_i915_private_t *dev_priv = dev->dev_private; | 4554 | drm_i915_private_t *dev_priv = dev->dev_private; |
4622 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
4623 | int ret; | 4555 | int ret; |
4624 | 4556 | ||
4625 | ret = i915_gem_object_unbind(obj); | 4557 | ret = i915_gem_object_unbind(obj); |
4626 | if (ret == -ERESTARTSYS) { | 4558 | if (ret == -ERESTARTSYS) { |
4627 | list_move(&obj_priv->mm_list, | 4559 | list_move(&obj->mm_list, |
4628 | &dev_priv->mm.deferred_free_list); | 4560 | &dev_priv->mm.deferred_free_list); |
4629 | return; | 4561 | return; |
4630 | } | 4562 | } |
4631 | 4563 | ||
4632 | if (obj->map_list.map) | 4564 | if (obj->base.map_list.map) |
4633 | i915_gem_free_mmap_offset(obj); | 4565 | i915_gem_free_mmap_offset(obj); |
4634 | 4566 | ||
4635 | drm_gem_object_release(obj); | 4567 | drm_gem_object_release(&obj->base); |
4636 | i915_gem_info_remove_obj(dev_priv, obj->size); | 4568 | i915_gem_info_remove_obj(dev_priv, obj->base.size); |
4637 | 4569 | ||
4638 | kfree(obj_priv->page_cpu_valid); | 4570 | kfree(obj->page_cpu_valid); |
4639 | kfree(obj_priv->bit_17); | 4571 | kfree(obj->bit_17); |
4640 | kfree(obj_priv); | 4572 | kfree(obj); |
4641 | } | 4573 | } |
4642 | 4574 | ||
4643 | void i915_gem_free_object(struct drm_gem_object *obj) | 4575 | void i915_gem_free_object(struct drm_gem_object *gem_obj) |
4644 | { | 4576 | { |
4645 | struct drm_device *dev = obj->dev; | 4577 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); |
4646 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 4578 | struct drm_device *dev = obj->base.dev; |
4647 | 4579 | ||
4648 | trace_i915_gem_object_destroy(obj); | 4580 | trace_i915_gem_object_destroy(obj); |
4649 | 4581 | ||
4650 | while (obj_priv->pin_count > 0) | 4582 | while (obj->pin_count > 0) |
4651 | i915_gem_object_unpin(obj); | 4583 | i915_gem_object_unpin(obj); |
4652 | 4584 | ||
4653 | if (obj_priv->phys_obj) | 4585 | if (obj->phys_obj) |
4654 | i915_gem_detach_phys_object(dev, obj); | 4586 | i915_gem_detach_phys_object(dev, obj); |
4655 | 4587 | ||
4656 | i915_gem_free_object_tail(obj); | 4588 | i915_gem_free_object_tail(obj); |
@@ -4710,8 +4642,7 @@ static int | |||
4710 | i915_gem_init_pipe_control(struct drm_device *dev) | 4642 | i915_gem_init_pipe_control(struct drm_device *dev) |
4711 | { | 4643 | { |
4712 | drm_i915_private_t *dev_priv = dev->dev_private; | 4644 | drm_i915_private_t *dev_priv = dev->dev_private; |
4713 | struct drm_gem_object *obj; | 4645 | struct drm_i915_gem_object *obj; |
4714 | struct drm_i915_gem_object *obj_priv; | ||
4715 | int ret; | 4646 | int ret; |
4716 | 4647 | ||
4717 | obj = i915_gem_alloc_object(dev, 4096); | 4648 | obj = i915_gem_alloc_object(dev, 4096); |
@@ -4720,15 +4651,14 @@ i915_gem_init_pipe_control(struct drm_device *dev) | |||
4720 | ret = -ENOMEM; | 4651 | ret = -ENOMEM; |
4721 | goto err; | 4652 | goto err; |
4722 | } | 4653 | } |
4723 | obj_priv = to_intel_bo(obj); | 4654 | obj->agp_type = AGP_USER_CACHED_MEMORY; |
4724 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | ||
4725 | 4655 | ||
4726 | ret = i915_gem_object_pin(obj, 4096, true); | 4656 | ret = i915_gem_object_pin(obj, 4096, true); |
4727 | if (ret) | 4657 | if (ret) |
4728 | goto err_unref; | 4658 | goto err_unref; |
4729 | 4659 | ||
4730 | dev_priv->seqno_gfx_addr = obj_priv->gtt_offset; | 4660 | dev_priv->seqno_gfx_addr = obj->gtt_offset; |
4731 | dev_priv->seqno_page = kmap(obj_priv->pages[0]); | 4661 | dev_priv->seqno_page = kmap(obj->pages[0]); |
4732 | if (dev_priv->seqno_page == NULL) | 4662 | if (dev_priv->seqno_page == NULL) |
4733 | goto err_unpin; | 4663 | goto err_unpin; |
4734 | 4664 | ||
@@ -4740,7 +4670,7 @@ i915_gem_init_pipe_control(struct drm_device *dev) | |||
4740 | err_unpin: | 4670 | err_unpin: |
4741 | i915_gem_object_unpin(obj); | 4671 | i915_gem_object_unpin(obj); |
4742 | err_unref: | 4672 | err_unref: |
4743 | drm_gem_object_unreference(obj); | 4673 | drm_gem_object_unreference(&obj->base); |
4744 | err: | 4674 | err: |
4745 | return ret; | 4675 | return ret; |
4746 | } | 4676 | } |
@@ -4750,14 +4680,12 @@ static void | |||
4750 | i915_gem_cleanup_pipe_control(struct drm_device *dev) | 4680 | i915_gem_cleanup_pipe_control(struct drm_device *dev) |
4751 | { | 4681 | { |
4752 | drm_i915_private_t *dev_priv = dev->dev_private; | 4682 | drm_i915_private_t *dev_priv = dev->dev_private; |
4753 | struct drm_gem_object *obj; | 4683 | struct drm_i915_gem_object *obj; |
4754 | struct drm_i915_gem_object *obj_priv; | ||
4755 | 4684 | ||
4756 | obj = dev_priv->seqno_obj; | 4685 | obj = dev_priv->seqno_obj; |
4757 | obj_priv = to_intel_bo(obj); | 4686 | kunmap(obj->pages[0]); |
4758 | kunmap(obj_priv->pages[0]); | ||
4759 | i915_gem_object_unpin(obj); | 4687 | i915_gem_object_unpin(obj); |
4760 | drm_gem_object_unreference(obj); | 4688 | drm_gem_object_unreference(&obj->base); |
4761 | dev_priv->seqno_obj = NULL; | 4689 | dev_priv->seqno_obj = NULL; |
4762 | 4690 | ||
4763 | dev_priv->seqno_page = NULL; | 4691 | dev_priv->seqno_page = NULL; |
@@ -5035,20 +4963,18 @@ void i915_gem_free_all_phys_object(struct drm_device *dev) | |||
5035 | } | 4963 | } |
5036 | 4964 | ||
5037 | void i915_gem_detach_phys_object(struct drm_device *dev, | 4965 | void i915_gem_detach_phys_object(struct drm_device *dev, |
5038 | struct drm_gem_object *obj) | 4966 | struct drm_i915_gem_object *obj) |
5039 | { | 4967 | { |
5040 | struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; | 4968 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
5041 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
5042 | char *vaddr; | 4969 | char *vaddr; |
5043 | int i; | 4970 | int i; |
5044 | int page_count; | 4971 | int page_count; |
5045 | 4972 | ||
5046 | if (!obj_priv->phys_obj) | 4973 | if (!obj->phys_obj) |
5047 | return; | 4974 | return; |
5048 | vaddr = obj_priv->phys_obj->handle->vaddr; | 4975 | vaddr = obj->phys_obj->handle->vaddr; |
5049 | |||
5050 | page_count = obj->size / PAGE_SIZE; | ||
5051 | 4976 | ||
4977 | page_count = obj->base.size / PAGE_SIZE; | ||
5052 | for (i = 0; i < page_count; i++) { | 4978 | for (i = 0; i < page_count; i++) { |
5053 | struct page *page = read_cache_page_gfp(mapping, i, | 4979 | struct page *page = read_cache_page_gfp(mapping, i, |
5054 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | 4980 | GFP_HIGHUSER | __GFP_RECLAIMABLE); |
@@ -5066,19 +4992,18 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | |||
5066 | } | 4992 | } |
5067 | intel_gtt_chipset_flush(); | 4993 | intel_gtt_chipset_flush(); |
5068 | 4994 | ||
5069 | obj_priv->phys_obj->cur_obj = NULL; | 4995 | obj->phys_obj->cur_obj = NULL; |
5070 | obj_priv->phys_obj = NULL; | 4996 | obj->phys_obj = NULL; |
5071 | } | 4997 | } |
5072 | 4998 | ||
5073 | int | 4999 | int |
5074 | i915_gem_attach_phys_object(struct drm_device *dev, | 5000 | i915_gem_attach_phys_object(struct drm_device *dev, |
5075 | struct drm_gem_object *obj, | 5001 | struct drm_i915_gem_object *obj, |
5076 | int id, | 5002 | int id, |
5077 | int align) | 5003 | int align) |
5078 | { | 5004 | { |
5079 | struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; | 5005 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
5080 | drm_i915_private_t *dev_priv = dev->dev_private; | 5006 | drm_i915_private_t *dev_priv = dev->dev_private; |
5081 | struct drm_i915_gem_object *obj_priv; | ||
5082 | int ret = 0; | 5007 | int ret = 0; |
5083 | int page_count; | 5008 | int page_count; |
5084 | int i; | 5009 | int i; |
@@ -5086,10 +5011,8 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
5086 | if (id > I915_MAX_PHYS_OBJECT) | 5011 | if (id > I915_MAX_PHYS_OBJECT) |
5087 | return -EINVAL; | 5012 | return -EINVAL; |
5088 | 5013 | ||
5089 | obj_priv = to_intel_bo(obj); | 5014 | if (obj->phys_obj) { |
5090 | 5015 | if (obj->phys_obj->id == id) | |
5091 | if (obj_priv->phys_obj) { | ||
5092 | if (obj_priv->phys_obj->id == id) | ||
5093 | return 0; | 5016 | return 0; |
5094 | i915_gem_detach_phys_object(dev, obj); | 5017 | i915_gem_detach_phys_object(dev, obj); |
5095 | } | 5018 | } |
@@ -5097,18 +5020,19 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
5097 | /* create a new object */ | 5020 | /* create a new object */ |
5098 | if (!dev_priv->mm.phys_objs[id - 1]) { | 5021 | if (!dev_priv->mm.phys_objs[id - 1]) { |
5099 | ret = i915_gem_init_phys_object(dev, id, | 5022 | ret = i915_gem_init_phys_object(dev, id, |
5100 | obj->size, align); | 5023 | obj->base.size, align); |
5101 | if (ret) { | 5024 | if (ret) { |
5102 | DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size); | 5025 | DRM_ERROR("failed to init phys object %d size: %zu\n", |
5026 | id, obj->base.size); | ||
5103 | return ret; | 5027 | return ret; |
5104 | } | 5028 | } |
5105 | } | 5029 | } |
5106 | 5030 | ||
5107 | /* bind to the object */ | 5031 | /* bind to the object */ |
5108 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; | 5032 | obj->phys_obj = dev_priv->mm.phys_objs[id - 1]; |
5109 | obj_priv->phys_obj->cur_obj = obj; | 5033 | obj->phys_obj->cur_obj = obj; |
5110 | 5034 | ||
5111 | page_count = obj->size / PAGE_SIZE; | 5035 | page_count = obj->base.size / PAGE_SIZE; |
5112 | 5036 | ||
5113 | for (i = 0; i < page_count; i++) { | 5037 | for (i = 0; i < page_count; i++) { |
5114 | struct page *page; | 5038 | struct page *page; |
@@ -5120,7 +5044,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
5120 | return PTR_ERR(page); | 5044 | return PTR_ERR(page); |
5121 | 5045 | ||
5122 | src = kmap_atomic(page); | 5046 | src = kmap_atomic(page); |
5123 | dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); | 5047 | dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE); |
5124 | memcpy(dst, src, PAGE_SIZE); | 5048 | memcpy(dst, src, PAGE_SIZE); |
5125 | kunmap_atomic(src); | 5049 | kunmap_atomic(src); |
5126 | 5050 | ||
@@ -5132,16 +5056,14 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
5132 | } | 5056 | } |
5133 | 5057 | ||
5134 | static int | 5058 | static int |
5135 | i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 5059 | i915_gem_phys_pwrite(struct drm_device *dev, |
5060 | struct drm_i915_gem_object *obj, | ||
5136 | struct drm_i915_gem_pwrite *args, | 5061 | struct drm_i915_gem_pwrite *args, |
5137 | struct drm_file *file_priv) | 5062 | struct drm_file *file_priv) |
5138 | { | 5063 | { |
5139 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 5064 | void *vaddr = obj->phys_obj->handle->vaddr + args->offset; |
5140 | void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset; | ||
5141 | char __user *user_data = (char __user *) (uintptr_t) args->data_ptr; | 5065 | char __user *user_data = (char __user *) (uintptr_t) args->data_ptr; |
5142 | 5066 | ||
5143 | DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size); | ||
5144 | |||
5145 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { | 5067 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { |
5146 | unsigned long unwritten; | 5068 | unsigned long unwritten; |
5147 | 5069 | ||
@@ -5228,7 +5150,7 @@ rescan: | |||
5228 | &dev_priv->mm.inactive_list, | 5150 | &dev_priv->mm.inactive_list, |
5229 | mm_list) { | 5151 | mm_list) { |
5230 | if (i915_gem_object_is_purgeable(obj)) { | 5152 | if (i915_gem_object_is_purgeable(obj)) { |
5231 | i915_gem_object_unbind(&obj->base); | 5153 | i915_gem_object_unbind(obj); |
5232 | if (--nr_to_scan == 0) | 5154 | if (--nr_to_scan == 0) |
5233 | break; | 5155 | break; |
5234 | } | 5156 | } |
@@ -5240,7 +5162,7 @@ rescan: | |||
5240 | &dev_priv->mm.inactive_list, | 5162 | &dev_priv->mm.inactive_list, |
5241 | mm_list) { | 5163 | mm_list) { |
5242 | if (nr_to_scan) { | 5164 | if (nr_to_scan) { |
5243 | i915_gem_object_unbind(&obj->base); | 5165 | i915_gem_object_unbind(obj); |
5244 | nr_to_scan--; | 5166 | nr_to_scan--; |
5245 | } else | 5167 | } else |
5246 | cnt++; | 5168 | cnt++; |