diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2010-11-08 14:18:58 -0500 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2010-11-23 15:19:10 -0500 |
commit | 05394f3975dceb107a5e1393e2244946e5b43660 (patch) | |
tree | 2af73b6efec503ed4cd9c932018619bd28a1fe60 /drivers | |
parent | 185cbcb304ba4dee55e39593fd86dcd7813f62ec (diff) |
drm/i915: Use drm_i915_gem_object as the preferred type
A glorified s/obj_priv/obj/ with a net reduction of over a 100 lines and
many characters!
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 64 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 84 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 1268 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_debug.c | 23 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_evict.c | 67 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 68 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_tiling.c | 104 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 67 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_trace.h | 41 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 242 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_drv.h | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_fb.c | 25 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_overlay.c | 48 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 54 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 4 |
16 files changed, 1019 insertions, 1157 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 4fe49e0228ef..1e8cd74d18d5 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -87,19 +87,19 @@ static int i915_capabilities(struct seq_file *m, void *data) | |||
87 | return 0; | 87 | return 0; |
88 | } | 88 | } |
89 | 89 | ||
90 | static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv) | 90 | static const char *get_pin_flag(struct drm_i915_gem_object *obj) |
91 | { | 91 | { |
92 | if (obj_priv->user_pin_count > 0) | 92 | if (obj->user_pin_count > 0) |
93 | return "P"; | 93 | return "P"; |
94 | else if (obj_priv->pin_count > 0) | 94 | else if (obj->pin_count > 0) |
95 | return "p"; | 95 | return "p"; |
96 | else | 96 | else |
97 | return " "; | 97 | return " "; |
98 | } | 98 | } |
99 | 99 | ||
100 | static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv) | 100 | static const char *get_tiling_flag(struct drm_i915_gem_object *obj) |
101 | { | 101 | { |
102 | switch (obj_priv->tiling_mode) { | 102 | switch (obj->tiling_mode) { |
103 | default: | 103 | default: |
104 | case I915_TILING_NONE: return " "; | 104 | case I915_TILING_NONE: return " "; |
105 | case I915_TILING_X: return "X"; | 105 | case I915_TILING_X: return "X"; |
@@ -140,7 +140,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
140 | struct list_head *head; | 140 | struct list_head *head; |
141 | struct drm_device *dev = node->minor->dev; | 141 | struct drm_device *dev = node->minor->dev; |
142 | drm_i915_private_t *dev_priv = dev->dev_private; | 142 | drm_i915_private_t *dev_priv = dev->dev_private; |
143 | struct drm_i915_gem_object *obj_priv; | 143 | struct drm_i915_gem_object *obj; |
144 | size_t total_obj_size, total_gtt_size; | 144 | size_t total_obj_size, total_gtt_size; |
145 | int count, ret; | 145 | int count, ret; |
146 | 146 | ||
@@ -175,12 +175,12 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
175 | } | 175 | } |
176 | 176 | ||
177 | total_obj_size = total_gtt_size = count = 0; | 177 | total_obj_size = total_gtt_size = count = 0; |
178 | list_for_each_entry(obj_priv, head, mm_list) { | 178 | list_for_each_entry(obj, head, mm_list) { |
179 | seq_printf(m, " "); | 179 | seq_printf(m, " "); |
180 | describe_obj(m, obj_priv); | 180 | describe_obj(m, obj); |
181 | seq_printf(m, "\n"); | 181 | seq_printf(m, "\n"); |
182 | total_obj_size += obj_priv->base.size; | 182 | total_obj_size += obj->base.size; |
183 | total_gtt_size += obj_priv->gtt_space->size; | 183 | total_gtt_size += obj->gtt_space->size; |
184 | count++; | 184 | count++; |
185 | } | 185 | } |
186 | mutex_unlock(&dev->struct_mutex); | 186 | mutex_unlock(&dev->struct_mutex); |
@@ -251,14 +251,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) | |||
251 | seq_printf(m, "%d prepares\n", work->pending); | 251 | seq_printf(m, "%d prepares\n", work->pending); |
252 | 252 | ||
253 | if (work->old_fb_obj) { | 253 | if (work->old_fb_obj) { |
254 | struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj); | 254 | struct drm_i915_gem_object *obj = work->old_fb_obj; |
255 | if(obj_priv) | 255 | if (obj) |
256 | seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); | 256 | seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); |
257 | } | 257 | } |
258 | if (work->pending_flip_obj) { | 258 | if (work->pending_flip_obj) { |
259 | struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj); | 259 | struct drm_i915_gem_object *obj = work->pending_flip_obj; |
260 | if(obj_priv) | 260 | if (obj) |
261 | seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); | 261 | seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); |
262 | } | 262 | } |
263 | } | 263 | } |
264 | spin_unlock_irqrestore(&dev->event_lock, flags); | 264 | spin_unlock_irqrestore(&dev->event_lock, flags); |
@@ -421,17 +421,17 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) | |||
421 | seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); | 421 | seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); |
422 | seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); | 422 | seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); |
423 | for (i = 0; i < dev_priv->num_fence_regs; i++) { | 423 | for (i = 0; i < dev_priv->num_fence_regs; i++) { |
424 | struct drm_gem_object *obj = dev_priv->fence_regs[i].obj; | 424 | struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; |
425 | 425 | ||
426 | seq_printf(m, "Fenced object[%2d] = ", i); | 426 | seq_printf(m, "Fenced object[%2d] = ", i); |
427 | if (obj == NULL) | 427 | if (obj == NULL) |
428 | seq_printf(m, "unused"); | 428 | seq_printf(m, "unused"); |
429 | else | 429 | else |
430 | describe_obj(m, to_intel_bo(obj)); | 430 | describe_obj(m, obj); |
431 | seq_printf(m, "\n"); | 431 | seq_printf(m, "\n"); |
432 | } | 432 | } |
433 | mutex_unlock(&dev->struct_mutex); | ||
434 | 433 | ||
434 | mutex_unlock(&dev->struct_mutex); | ||
435 | return 0; | 435 | return 0; |
436 | } | 436 | } |
437 | 437 | ||
@@ -465,14 +465,14 @@ static int i915_hws_info(struct seq_file *m, void *data) | |||
465 | 465 | ||
466 | static void i915_dump_object(struct seq_file *m, | 466 | static void i915_dump_object(struct seq_file *m, |
467 | struct io_mapping *mapping, | 467 | struct io_mapping *mapping, |
468 | struct drm_i915_gem_object *obj_priv) | 468 | struct drm_i915_gem_object *obj) |
469 | { | 469 | { |
470 | int page, page_count, i; | 470 | int page, page_count, i; |
471 | 471 | ||
472 | page_count = obj_priv->base.size / PAGE_SIZE; | 472 | page_count = obj->base.size / PAGE_SIZE; |
473 | for (page = 0; page < page_count; page++) { | 473 | for (page = 0; page < page_count; page++) { |
474 | u32 *mem = io_mapping_map_wc(mapping, | 474 | u32 *mem = io_mapping_map_wc(mapping, |
475 | obj_priv->gtt_offset + page * PAGE_SIZE); | 475 | obj->gtt_offset + page * PAGE_SIZE); |
476 | for (i = 0; i < PAGE_SIZE; i += 4) | 476 | for (i = 0; i < PAGE_SIZE; i += 4) |
477 | seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); | 477 | seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); |
478 | io_mapping_unmap(mem); | 478 | io_mapping_unmap(mem); |
@@ -484,25 +484,21 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) | |||
484 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 484 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
485 | struct drm_device *dev = node->minor->dev; | 485 | struct drm_device *dev = node->minor->dev; |
486 | drm_i915_private_t *dev_priv = dev->dev_private; | 486 | drm_i915_private_t *dev_priv = dev->dev_private; |
487 | struct drm_gem_object *obj; | 487 | struct drm_i915_gem_object *obj; |
488 | struct drm_i915_gem_object *obj_priv; | ||
489 | int ret; | 488 | int ret; |
490 | 489 | ||
491 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 490 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
492 | if (ret) | 491 | if (ret) |
493 | return ret; | 492 | return ret; |
494 | 493 | ||
495 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { | 494 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { |
496 | obj = &obj_priv->base; | 495 | if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) { |
497 | if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { | 496 | seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); |
498 | seq_printf(m, "--- gtt_offset = 0x%08x\n", | 497 | i915_dump_object(m, dev_priv->mm.gtt_mapping, obj); |
499 | obj_priv->gtt_offset); | ||
500 | i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv); | ||
501 | } | 498 | } |
502 | } | 499 | } |
503 | 500 | ||
504 | mutex_unlock(&dev->struct_mutex); | 501 | mutex_unlock(&dev->struct_mutex); |
505 | |||
506 | return 0; | 502 | return 0; |
507 | } | 503 | } |
508 | 504 | ||
@@ -525,7 +521,7 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data) | |||
525 | if (ret) | 521 | if (ret) |
526 | return ret; | 522 | return ret; |
527 | 523 | ||
528 | if (!ring->gem_object) { | 524 | if (!ring->obj) { |
529 | seq_printf(m, "No ringbuffer setup\n"); | 525 | seq_printf(m, "No ringbuffer setup\n"); |
530 | } else { | 526 | } else { |
531 | u8 *virt = ring->virtual_start; | 527 | u8 *virt = ring->virtual_start; |
@@ -983,7 +979,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) | |||
983 | fb->base.height, | 979 | fb->base.height, |
984 | fb->base.depth, | 980 | fb->base.depth, |
985 | fb->base.bits_per_pixel); | 981 | fb->base.bits_per_pixel); |
986 | describe_obj(m, to_intel_bo(fb->obj)); | 982 | describe_obj(m, fb->obj); |
987 | seq_printf(m, "\n"); | 983 | seq_printf(m, "\n"); |
988 | 984 | ||
989 | list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { | 985 | list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { |
@@ -995,7 +991,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) | |||
995 | fb->base.height, | 991 | fb->base.height, |
996 | fb->base.depth, | 992 | fb->base.depth, |
997 | fb->base.bits_per_pixel); | 993 | fb->base.bits_per_pixel); |
998 | describe_obj(m, to_intel_bo(fb->obj)); | 994 | describe_obj(m, fb->obj); |
999 | seq_printf(m, "\n"); | 995 | seq_printf(m, "\n"); |
1000 | } | 996 | } |
1001 | 997 | ||
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 7084de7c4c55..7960fd63ecb1 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -157,7 +157,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
157 | } | 157 | } |
158 | 158 | ||
159 | if (init->ring_size != 0) { | 159 | if (init->ring_size != 0) { |
160 | if (dev_priv->render_ring.gem_object != NULL) { | 160 | if (dev_priv->render_ring.obj != NULL) { |
161 | i915_dma_cleanup(dev); | 161 | i915_dma_cleanup(dev); |
162 | DRM_ERROR("Client tried to initialize ringbuffer in " | 162 | DRM_ERROR("Client tried to initialize ringbuffer in " |
163 | "GEM mode\n"); | 163 | "GEM mode\n"); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index dc371d987aa7..22d6388b331f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -32,7 +32,6 @@ | |||
32 | 32 | ||
33 | #include "i915_reg.h" | 33 | #include "i915_reg.h" |
34 | #include "intel_bios.h" | 34 | #include "intel_bios.h" |
35 | #include "i915_trace.h" | ||
36 | #include "intel_ringbuffer.h" | 35 | #include "intel_ringbuffer.h" |
37 | #include <linux/io-mapping.h> | 36 | #include <linux/io-mapping.h> |
38 | #include <linux/i2c.h> | 37 | #include <linux/i2c.h> |
@@ -90,7 +89,7 @@ struct drm_i915_gem_phys_object { | |||
90 | int id; | 89 | int id; |
91 | struct page **page_list; | 90 | struct page **page_list; |
92 | drm_dma_handle_t *handle; | 91 | drm_dma_handle_t *handle; |
93 | struct drm_gem_object *cur_obj; | 92 | struct drm_i915_gem_object *cur_obj; |
94 | }; | 93 | }; |
95 | 94 | ||
96 | struct mem_block { | 95 | struct mem_block { |
@@ -125,7 +124,7 @@ struct drm_i915_master_private { | |||
125 | #define I915_FENCE_REG_NONE -1 | 124 | #define I915_FENCE_REG_NONE -1 |
126 | 125 | ||
127 | struct drm_i915_fence_reg { | 126 | struct drm_i915_fence_reg { |
128 | struct drm_gem_object *obj; | 127 | struct drm_i915_gem_object *obj; |
129 | struct list_head lru_list; | 128 | struct list_head lru_list; |
130 | bool gpu; | 129 | bool gpu; |
131 | }; | 130 | }; |
@@ -280,9 +279,9 @@ typedef struct drm_i915_private { | |||
280 | uint32_t counter; | 279 | uint32_t counter; |
281 | unsigned int seqno_gfx_addr; | 280 | unsigned int seqno_gfx_addr; |
282 | drm_local_map_t hws_map; | 281 | drm_local_map_t hws_map; |
283 | struct drm_gem_object *seqno_obj; | 282 | struct drm_i915_gem_object *seqno_obj; |
284 | struct drm_gem_object *pwrctx; | 283 | struct drm_i915_gem_object *pwrctx; |
285 | struct drm_gem_object *renderctx; | 284 | struct drm_i915_gem_object *renderctx; |
286 | 285 | ||
287 | struct resource mch_res; | 286 | struct resource mch_res; |
288 | 287 | ||
@@ -690,14 +689,14 @@ typedef struct drm_i915_private { | |||
690 | u8 fmax; | 689 | u8 fmax; |
691 | u8 fstart; | 690 | u8 fstart; |
692 | 691 | ||
693 | u64 last_count1; | 692 | u64 last_count1; |
694 | unsigned long last_time1; | 693 | unsigned long last_time1; |
695 | u64 last_count2; | 694 | u64 last_count2; |
696 | struct timespec last_time2; | 695 | struct timespec last_time2; |
697 | unsigned long gfx_power; | 696 | unsigned long gfx_power; |
698 | int c_m; | 697 | int c_m; |
699 | int r_t; | 698 | int r_t; |
700 | u8 corr; | 699 | u8 corr; |
701 | spinlock_t *mchdev_lock; | 700 | spinlock_t *mchdev_lock; |
702 | 701 | ||
703 | enum no_fbc_reason no_fbc_reason; | 702 | enum no_fbc_reason no_fbc_reason; |
@@ -711,7 +710,6 @@ typedef struct drm_i915_private { | |||
711 | struct intel_fbdev *fbdev; | 710 | struct intel_fbdev *fbdev; |
712 | } drm_i915_private_t; | 711 | } drm_i915_private_t; |
713 | 712 | ||
714 | /** driver private structure attached to each drm_gem_object */ | ||
715 | struct drm_i915_gem_object { | 713 | struct drm_i915_gem_object { |
716 | struct drm_gem_object base; | 714 | struct drm_gem_object base; |
717 | 715 | ||
@@ -918,7 +916,7 @@ enum intel_chip_family { | |||
918 | #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) | 916 | #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) |
919 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) | 917 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) |
920 | 918 | ||
921 | #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) | 919 | #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) |
922 | #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) | 920 | #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) |
923 | 921 | ||
924 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte | 922 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte |
@@ -947,6 +945,8 @@ enum intel_chip_family { | |||
947 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) | 945 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) |
948 | #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) | 946 | #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) |
949 | 947 | ||
948 | #include "i915_trace.h" | ||
949 | |||
950 | extern struct drm_ioctl_desc i915_ioctls[]; | 950 | extern struct drm_ioctl_desc i915_ioctls[]; |
951 | extern int i915_max_ioctl; | 951 | extern int i915_max_ioctl; |
952 | extern unsigned int i915_fbpercrtc; | 952 | extern unsigned int i915_fbpercrtc; |
@@ -1085,14 +1085,15 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | |||
1085 | struct drm_file *file_priv); | 1085 | struct drm_file *file_priv); |
1086 | void i915_gem_load(struct drm_device *dev); | 1086 | void i915_gem_load(struct drm_device *dev); |
1087 | int i915_gem_init_object(struct drm_gem_object *obj); | 1087 | int i915_gem_init_object(struct drm_gem_object *obj); |
1088 | struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, | 1088 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
1089 | size_t size); | 1089 | size_t size); |
1090 | void i915_gem_free_object(struct drm_gem_object *obj); | 1090 | void i915_gem_free_object(struct drm_gem_object *obj); |
1091 | int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, | 1091 | int i915_gem_object_pin(struct drm_i915_gem_object *obj, |
1092 | uint32_t alignment, | ||
1092 | bool map_and_fenceable); | 1093 | bool map_and_fenceable); |
1093 | void i915_gem_object_unpin(struct drm_gem_object *obj); | 1094 | void i915_gem_object_unpin(struct drm_i915_gem_object *obj); |
1094 | int i915_gem_object_unbind(struct drm_gem_object *obj); | 1095 | int i915_gem_object_unbind(struct drm_i915_gem_object *obj); |
1095 | void i915_gem_release_mmap(struct drm_gem_object *obj); | 1096 | void i915_gem_release_mmap(struct drm_i915_gem_object *obj); |
1096 | void i915_gem_lastclose(struct drm_device *dev); | 1097 | void i915_gem_lastclose(struct drm_device *dev); |
1097 | 1098 | ||
1098 | /** | 1099 | /** |
@@ -1104,14 +1105,14 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) | |||
1104 | return (int32_t)(seq1 - seq2) >= 0; | 1105 | return (int32_t)(seq1 - seq2) >= 0; |
1105 | } | 1106 | } |
1106 | 1107 | ||
1107 | int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, | 1108 | int i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj, |
1108 | bool interruptible); | 1109 | bool interruptible); |
1109 | int i915_gem_object_put_fence_reg(struct drm_gem_object *obj, | 1110 | int i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj, |
1110 | bool interruptible); | 1111 | bool interruptible); |
1111 | void i915_gem_retire_requests(struct drm_device *dev); | 1112 | void i915_gem_retire_requests(struct drm_device *dev); |
1112 | void i915_gem_reset(struct drm_device *dev); | 1113 | void i915_gem_reset(struct drm_device *dev); |
1113 | void i915_gem_clflush_object(struct drm_gem_object *obj); | 1114 | void i915_gem_clflush_object(struct drm_i915_gem_object *obj); |
1114 | int i915_gem_object_set_domain(struct drm_gem_object *obj, | 1115 | int i915_gem_object_set_domain(struct drm_i915_gem_object *obj, |
1115 | uint32_t read_domains, | 1116 | uint32_t read_domains, |
1116 | uint32_t write_domain); | 1117 | uint32_t write_domain); |
1117 | int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, | 1118 | int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, |
@@ -1131,23 +1132,23 @@ int i915_do_wait_request(struct drm_device *dev, | |||
1131 | bool interruptible, | 1132 | bool interruptible, |
1132 | struct intel_ring_buffer *ring); | 1133 | struct intel_ring_buffer *ring); |
1133 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 1134 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
1134 | int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, | 1135 | int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, |
1135 | int write); | 1136 | int write); |
1136 | int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, | 1137 | int i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, |
1137 | bool pipelined); | 1138 | bool pipelined); |
1138 | int i915_gem_attach_phys_object(struct drm_device *dev, | 1139 | int i915_gem_attach_phys_object(struct drm_device *dev, |
1139 | struct drm_gem_object *obj, | 1140 | struct drm_i915_gem_object *obj, |
1140 | int id, | 1141 | int id, |
1141 | int align); | 1142 | int align); |
1142 | void i915_gem_detach_phys_object(struct drm_device *dev, | 1143 | void i915_gem_detach_phys_object(struct drm_device *dev, |
1143 | struct drm_gem_object *obj); | 1144 | struct drm_i915_gem_object *obj); |
1144 | void i915_gem_free_all_phys_object(struct drm_device *dev); | 1145 | void i915_gem_free_all_phys_object(struct drm_device *dev); |
1145 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); | 1146 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); |
1146 | 1147 | ||
1147 | /* i915_gem_gtt.c */ | 1148 | /* i915_gem_gtt.c */ |
1148 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); | 1149 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); |
1149 | int i915_gem_gtt_bind_object(struct drm_gem_object *obj); | 1150 | int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); |
1150 | void i915_gem_gtt_unbind_object(struct drm_gem_object *obj); | 1151 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); |
1151 | 1152 | ||
1152 | /* i915_gem_evict.c */ | 1153 | /* i915_gem_evict.c */ |
1153 | int i915_gem_evict_something(struct drm_device *dev, int min_size, | 1154 | int i915_gem_evict_something(struct drm_device *dev, int min_size, |
@@ -1157,19 +1158,20 @@ int i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only); | |||
1157 | 1158 | ||
1158 | /* i915_gem_tiling.c */ | 1159 | /* i915_gem_tiling.c */ |
1159 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); | 1160 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
1160 | void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); | 1161 | void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); |
1161 | void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); | 1162 | void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); |
1162 | 1163 | ||
1163 | /* i915_gem_debug.c */ | 1164 | /* i915_gem_debug.c */ |
1164 | void i915_gem_dump_object(struct drm_gem_object *obj, int len, | 1165 | void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, |
1165 | const char *where, uint32_t mark); | 1166 | const char *where, uint32_t mark); |
1166 | #if WATCH_LISTS | 1167 | #if WATCH_LISTS |
1167 | int i915_verify_lists(struct drm_device *dev); | 1168 | int i915_verify_lists(struct drm_device *dev); |
1168 | #else | 1169 | #else |
1169 | #define i915_verify_lists(dev) 0 | 1170 | #define i915_verify_lists(dev) 0 |
1170 | #endif | 1171 | #endif |
1171 | void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); | 1172 | void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, |
1172 | void i915_gem_dump_object(struct drm_gem_object *obj, int len, | 1173 | int handle); |
1174 | void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, | ||
1173 | const char *where, uint32_t mark); | 1175 | const char *where, uint32_t mark); |
1174 | 1176 | ||
1175 | /* i915_debugfs.c */ | 1177 | /* i915_debugfs.c */ |
@@ -1251,10 +1253,10 @@ extern void intel_display_print_error_state(struct seq_file *m, | |||
1251 | * In that case, we don't need to do it when GEM is initialized as nobody else | 1253 | * In that case, we don't need to do it when GEM is initialized as nobody else |
1252 | * has access to the ring. | 1254 | * has access to the ring. |
1253 | */ | 1255 | */ |
1254 | #define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ | 1256 | #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ |
1255 | if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \ | 1257 | if (((drm_i915_private_t *)dev->dev_private)->render_ring.obj \ |
1256 | == NULL) \ | 1258 | == NULL) \ |
1257 | LOCK_TEST_WITH_RETURN(dev, file_priv); \ | 1259 | LOCK_TEST_WITH_RETURN(dev, file); \ |
1258 | } while (0) | 1260 | } while (0) |
1259 | 1261 | ||
1260 | 1262 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3cac366b3053..d196895527a6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -41,29 +41,30 @@ struct change_domains { | |||
41 | uint32_t flush_rings; | 41 | uint32_t flush_rings; |
42 | }; | 42 | }; |
43 | 43 | ||
44 | static uint32_t i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv); | 44 | static uint32_t i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj); |
45 | static uint32_t i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv); | 45 | static uint32_t i915_gem_get_gtt_size(struct drm_i915_gem_object *obj); |
46 | 46 | ||
47 | static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, | 47 | static int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj, |
48 | bool pipelined); | 48 | bool pipelined); |
49 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); | 49 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); |
50 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); | 50 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); |
51 | static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, | 51 | static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, |
52 | int write); | 52 | int write); |
53 | static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | 53 | static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, |
54 | uint64_t offset, | 54 | uint64_t offset, |
55 | uint64_t size); | 55 | uint64_t size); |
56 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); | 56 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); |
57 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj, | 57 | static int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, |
58 | bool interruptible); | 58 | bool interruptible); |
59 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | 59 | static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, |
60 | unsigned alignment, | 60 | unsigned alignment, |
61 | bool map_and_fenceable); | 61 | bool map_and_fenceable); |
62 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); | 62 | static void i915_gem_clear_fence_reg(struct drm_i915_gem_object *obj); |
63 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 63 | static int i915_gem_phys_pwrite(struct drm_device *dev, |
64 | struct drm_i915_gem_object *obj, | ||
64 | struct drm_i915_gem_pwrite *args, | 65 | struct drm_i915_gem_pwrite *args, |
65 | struct drm_file *file_priv); | 66 | struct drm_file *file); |
66 | static void i915_gem_free_object_tail(struct drm_gem_object *obj); | 67 | static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj); |
67 | 68 | ||
68 | static int i915_gem_inactive_shrink(struct shrinker *shrinker, | 69 | static int i915_gem_inactive_shrink(struct shrinker *shrinker, |
69 | int nr_to_scan, | 70 | int nr_to_scan, |
@@ -212,11 +213,9 @@ static int i915_mutex_lock_interruptible(struct drm_device *dev) | |||
212 | } | 213 | } |
213 | 214 | ||
214 | static inline bool | 215 | static inline bool |
215 | i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) | 216 | i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) |
216 | { | 217 | { |
217 | return obj_priv->gtt_space && | 218 | return obj->gtt_space && !obj->active && obj->pin_count == 0; |
218 | !obj_priv->active && | ||
219 | obj_priv->pin_count == 0; | ||
220 | } | 219 | } |
221 | 220 | ||
222 | int i915_gem_do_init(struct drm_device *dev, | 221 | int i915_gem_do_init(struct drm_device *dev, |
@@ -244,7 +243,7 @@ int i915_gem_do_init(struct drm_device *dev, | |||
244 | 243 | ||
245 | int | 244 | int |
246 | i915_gem_init_ioctl(struct drm_device *dev, void *data, | 245 | i915_gem_init_ioctl(struct drm_device *dev, void *data, |
247 | struct drm_file *file_priv) | 246 | struct drm_file *file) |
248 | { | 247 | { |
249 | struct drm_i915_gem_init *args = data; | 248 | struct drm_i915_gem_init *args = data; |
250 | int ret; | 249 | int ret; |
@@ -258,7 +257,7 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data, | |||
258 | 257 | ||
259 | int | 258 | int |
260 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | 259 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
261 | struct drm_file *file_priv) | 260 | struct drm_file *file) |
262 | { | 261 | { |
263 | struct drm_i915_private *dev_priv = dev->dev_private; | 262 | struct drm_i915_private *dev_priv = dev->dev_private; |
264 | struct drm_i915_gem_get_aperture *args = data; | 263 | struct drm_i915_gem_get_aperture *args = data; |
@@ -280,10 +279,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | |||
280 | */ | 279 | */ |
281 | int | 280 | int |
282 | i915_gem_create_ioctl(struct drm_device *dev, void *data, | 281 | i915_gem_create_ioctl(struct drm_device *dev, void *data, |
283 | struct drm_file *file_priv) | 282 | struct drm_file *file) |
284 | { | 283 | { |
285 | struct drm_i915_gem_create *args = data; | 284 | struct drm_i915_gem_create *args = data; |
286 | struct drm_gem_object *obj; | 285 | struct drm_i915_gem_object *obj; |
287 | int ret; | 286 | int ret; |
288 | u32 handle; | 287 | u32 handle; |
289 | 288 | ||
@@ -294,29 +293,28 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, | |||
294 | if (obj == NULL) | 293 | if (obj == NULL) |
295 | return -ENOMEM; | 294 | return -ENOMEM; |
296 | 295 | ||
297 | ret = drm_gem_handle_create(file_priv, obj, &handle); | 296 | ret = drm_gem_handle_create(file, &obj->base, &handle); |
298 | if (ret) { | 297 | if (ret) { |
299 | drm_gem_object_release(obj); | 298 | drm_gem_object_release(&obj->base); |
300 | i915_gem_info_remove_obj(dev->dev_private, obj->size); | 299 | i915_gem_info_remove_obj(dev->dev_private, obj->base.size); |
301 | kfree(obj); | 300 | kfree(obj); |
302 | return ret; | 301 | return ret; |
303 | } | 302 | } |
304 | 303 | ||
305 | /* drop reference from allocate - handle holds it now */ | 304 | /* drop reference from allocate - handle holds it now */ |
306 | drm_gem_object_unreference(obj); | 305 | drm_gem_object_unreference(&obj->base); |
307 | trace_i915_gem_object_create(obj); | 306 | trace_i915_gem_object_create(obj); |
308 | 307 | ||
309 | args->handle = handle; | 308 | args->handle = handle; |
310 | return 0; | 309 | return 0; |
311 | } | 310 | } |
312 | 311 | ||
313 | static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) | 312 | static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) |
314 | { | 313 | { |
315 | drm_i915_private_t *dev_priv = obj->dev->dev_private; | 314 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
316 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
317 | 315 | ||
318 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && | 316 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && |
319 | obj_priv->tiling_mode != I915_TILING_NONE; | 317 | obj->tiling_mode != I915_TILING_NONE; |
320 | } | 318 | } |
321 | 319 | ||
322 | static inline void | 320 | static inline void |
@@ -392,12 +390,12 @@ slow_shmem_bit17_copy(struct page *gpu_page, | |||
392 | * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). | 390 | * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). |
393 | */ | 391 | */ |
394 | static int | 392 | static int |
395 | i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | 393 | i915_gem_shmem_pread_fast(struct drm_device *dev, |
394 | struct drm_i915_gem_object *obj, | ||
396 | struct drm_i915_gem_pread *args, | 395 | struct drm_i915_gem_pread *args, |
397 | struct drm_file *file_priv) | 396 | struct drm_file *file) |
398 | { | 397 | { |
399 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 398 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
400 | struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; | ||
401 | ssize_t remain; | 399 | ssize_t remain; |
402 | loff_t offset; | 400 | loff_t offset; |
403 | char __user *user_data; | 401 | char __user *user_data; |
@@ -406,7 +404,6 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
406 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 404 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
407 | remain = args->size; | 405 | remain = args->size; |
408 | 406 | ||
409 | obj_priv = to_intel_bo(obj); | ||
410 | offset = args->offset; | 407 | offset = args->offset; |
411 | 408 | ||
412 | while (remain > 0) { | 409 | while (remain > 0) { |
@@ -455,12 +452,12 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
455 | * and not take page faults. | 452 | * and not take page faults. |
456 | */ | 453 | */ |
457 | static int | 454 | static int |
458 | i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | 455 | i915_gem_shmem_pread_slow(struct drm_device *dev, |
456 | struct drm_i915_gem_object *obj, | ||
459 | struct drm_i915_gem_pread *args, | 457 | struct drm_i915_gem_pread *args, |
460 | struct drm_file *file_priv) | 458 | struct drm_file *file) |
461 | { | 459 | { |
462 | struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; | 460 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
463 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
464 | struct mm_struct *mm = current->mm; | 461 | struct mm_struct *mm = current->mm; |
465 | struct page **user_pages; | 462 | struct page **user_pages; |
466 | ssize_t remain; | 463 | ssize_t remain; |
@@ -506,7 +503,6 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
506 | 503 | ||
507 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | 504 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); |
508 | 505 | ||
509 | obj_priv = to_intel_bo(obj); | ||
510 | offset = args->offset; | 506 | offset = args->offset; |
511 | 507 | ||
512 | while (remain > 0) { | 508 | while (remain > 0) { |
@@ -575,11 +571,10 @@ out: | |||
575 | */ | 571 | */ |
576 | int | 572 | int |
577 | i915_gem_pread_ioctl(struct drm_device *dev, void *data, | 573 | i915_gem_pread_ioctl(struct drm_device *dev, void *data, |
578 | struct drm_file *file_priv) | 574 | struct drm_file *file) |
579 | { | 575 | { |
580 | struct drm_i915_gem_pread *args = data; | 576 | struct drm_i915_gem_pread *args = data; |
581 | struct drm_gem_object *obj; | 577 | struct drm_i915_gem_object *obj; |
582 | struct drm_i915_gem_object *obj_priv; | ||
583 | int ret = 0; | 578 | int ret = 0; |
584 | 579 | ||
585 | if (args->size == 0) | 580 | if (args->size == 0) |
@@ -599,15 +594,15 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
599 | if (ret) | 594 | if (ret) |
600 | return ret; | 595 | return ret; |
601 | 596 | ||
602 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 597 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
603 | if (obj == NULL) { | 598 | if (obj == NULL) { |
604 | ret = -ENOENT; | 599 | ret = -ENOENT; |
605 | goto unlock; | 600 | goto unlock; |
606 | } | 601 | } |
607 | obj_priv = to_intel_bo(obj); | ||
608 | 602 | ||
609 | /* Bounds check source. */ | 603 | /* Bounds check source. */ |
610 | if (args->offset > obj->size || args->size > obj->size - args->offset) { | 604 | if (args->offset > obj->base.size || |
605 | args->size > obj->base.size - args->offset) { | ||
611 | ret = -EINVAL; | 606 | ret = -EINVAL; |
612 | goto out; | 607 | goto out; |
613 | } | 608 | } |
@@ -620,12 +615,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
620 | 615 | ||
621 | ret = -EFAULT; | 616 | ret = -EFAULT; |
622 | if (!i915_gem_object_needs_bit17_swizzle(obj)) | 617 | if (!i915_gem_object_needs_bit17_swizzle(obj)) |
623 | ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); | 618 | ret = i915_gem_shmem_pread_fast(dev, obj, args, file); |
624 | if (ret == -EFAULT) | 619 | if (ret == -EFAULT) |
625 | ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); | 620 | ret = i915_gem_shmem_pread_slow(dev, obj, args, file); |
626 | 621 | ||
627 | out: | 622 | out: |
628 | drm_gem_object_unreference(obj); | 623 | drm_gem_object_unreference(&obj->base); |
629 | unlock: | 624 | unlock: |
630 | mutex_unlock(&dev->struct_mutex); | 625 | mutex_unlock(&dev->struct_mutex); |
631 | return ret; | 626 | return ret; |
@@ -680,11 +675,11 @@ slow_kernel_write(struct io_mapping *mapping, | |||
680 | * user into the GTT, uncached. | 675 | * user into the GTT, uncached. |
681 | */ | 676 | */ |
682 | static int | 677 | static int |
683 | i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | 678 | i915_gem_gtt_pwrite_fast(struct drm_device *dev, |
679 | struct drm_i915_gem_object *obj, | ||
684 | struct drm_i915_gem_pwrite *args, | 680 | struct drm_i915_gem_pwrite *args, |
685 | struct drm_file *file_priv) | 681 | struct drm_file *file) |
686 | { | 682 | { |
687 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
688 | drm_i915_private_t *dev_priv = dev->dev_private; | 683 | drm_i915_private_t *dev_priv = dev->dev_private; |
689 | ssize_t remain; | 684 | ssize_t remain; |
690 | loff_t offset, page_base; | 685 | loff_t offset, page_base; |
@@ -694,8 +689,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
694 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 689 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
695 | remain = args->size; | 690 | remain = args->size; |
696 | 691 | ||
697 | obj_priv = to_intel_bo(obj); | 692 | offset = obj->gtt_offset + args->offset; |
698 | offset = obj_priv->gtt_offset + args->offset; | ||
699 | 693 | ||
700 | while (remain > 0) { | 694 | while (remain > 0) { |
701 | /* Operation in this page | 695 | /* Operation in this page |
@@ -735,11 +729,11 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
735 | * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit). | 729 | * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit). |
736 | */ | 730 | */ |
737 | static int | 731 | static int |
738 | i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | 732 | i915_gem_gtt_pwrite_slow(struct drm_device *dev, |
733 | struct drm_i915_gem_object *obj, | ||
739 | struct drm_i915_gem_pwrite *args, | 734 | struct drm_i915_gem_pwrite *args, |
740 | struct drm_file *file_priv) | 735 | struct drm_file *file) |
741 | { | 736 | { |
742 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
743 | drm_i915_private_t *dev_priv = dev->dev_private; | 737 | drm_i915_private_t *dev_priv = dev->dev_private; |
744 | ssize_t remain; | 738 | ssize_t remain; |
745 | loff_t gtt_page_base, offset; | 739 | loff_t gtt_page_base, offset; |
@@ -780,8 +774,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
780 | if (ret) | 774 | if (ret) |
781 | goto out_unpin_pages; | 775 | goto out_unpin_pages; |
782 | 776 | ||
783 | obj_priv = to_intel_bo(obj); | 777 | offset = obj->gtt_offset + args->offset; |
784 | offset = obj_priv->gtt_offset + args->offset; | ||
785 | 778 | ||
786 | while (remain > 0) { | 779 | while (remain > 0) { |
787 | /* Operation in this page | 780 | /* Operation in this page |
@@ -827,12 +820,12 @@ out_unpin_pages: | |||
827 | * copy_from_user into the kmapped pages backing the object. | 820 | * copy_from_user into the kmapped pages backing the object. |
828 | */ | 821 | */ |
829 | static int | 822 | static int |
830 | i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | 823 | i915_gem_shmem_pwrite_fast(struct drm_device *dev, |
824 | struct drm_i915_gem_object *obj, | ||
831 | struct drm_i915_gem_pwrite *args, | 825 | struct drm_i915_gem_pwrite *args, |
832 | struct drm_file *file_priv) | 826 | struct drm_file *file) |
833 | { | 827 | { |
834 | struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; | 828 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
835 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
836 | ssize_t remain; | 829 | ssize_t remain; |
837 | loff_t offset; | 830 | loff_t offset; |
838 | char __user *user_data; | 831 | char __user *user_data; |
@@ -841,9 +834,8 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
841 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 834 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
842 | remain = args->size; | 835 | remain = args->size; |
843 | 836 | ||
844 | obj_priv = to_intel_bo(obj); | ||
845 | offset = args->offset; | 837 | offset = args->offset; |
846 | obj_priv->dirty = 1; | 838 | obj->dirty = 1; |
847 | 839 | ||
848 | while (remain > 0) { | 840 | while (remain > 0) { |
849 | struct page *page; | 841 | struct page *page; |
@@ -898,12 +890,12 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
898 | * struct_mutex is held. | 890 | * struct_mutex is held. |
899 | */ | 891 | */ |
900 | static int | 892 | static int |
901 | i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | 893 | i915_gem_shmem_pwrite_slow(struct drm_device *dev, |
894 | struct drm_i915_gem_object *obj, | ||
902 | struct drm_i915_gem_pwrite *args, | 895 | struct drm_i915_gem_pwrite *args, |
903 | struct drm_file *file_priv) | 896 | struct drm_file *file) |
904 | { | 897 | { |
905 | struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; | 898 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
906 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
907 | struct mm_struct *mm = current->mm; | 899 | struct mm_struct *mm = current->mm; |
908 | struct page **user_pages; | 900 | struct page **user_pages; |
909 | ssize_t remain; | 901 | ssize_t remain; |
@@ -947,9 +939,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
947 | 939 | ||
948 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | 940 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); |
949 | 941 | ||
950 | obj_priv = to_intel_bo(obj); | ||
951 | offset = args->offset; | 942 | offset = args->offset; |
952 | obj_priv->dirty = 1; | 943 | obj->dirty = 1; |
953 | 944 | ||
954 | while (remain > 0) { | 945 | while (remain > 0) { |
955 | struct page *page; | 946 | struct page *page; |
@@ -1020,8 +1011,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
1020 | struct drm_file *file) | 1011 | struct drm_file *file) |
1021 | { | 1012 | { |
1022 | struct drm_i915_gem_pwrite *args = data; | 1013 | struct drm_i915_gem_pwrite *args = data; |
1023 | struct drm_gem_object *obj; | 1014 | struct drm_i915_gem_object *obj; |
1024 | struct drm_i915_gem_object *obj_priv; | ||
1025 | int ret; | 1015 | int ret; |
1026 | 1016 | ||
1027 | if (args->size == 0) | 1017 | if (args->size == 0) |
@@ -1041,15 +1031,15 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
1041 | if (ret) | 1031 | if (ret) |
1042 | return ret; | 1032 | return ret; |
1043 | 1033 | ||
1044 | obj = drm_gem_object_lookup(dev, file, args->handle); | 1034 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
1045 | if (obj == NULL) { | 1035 | if (obj == NULL) { |
1046 | ret = -ENOENT; | 1036 | ret = -ENOENT; |
1047 | goto unlock; | 1037 | goto unlock; |
1048 | } | 1038 | } |
1049 | obj_priv = to_intel_bo(obj); | ||
1050 | 1039 | ||
1051 | /* Bounds check destination. */ | 1040 | /* Bounds check destination. */ |
1052 | if (args->offset > obj->size || args->size > obj->size - args->offset) { | 1041 | if (args->offset > obj->base.size || |
1042 | args->size > obj->base.size - args->offset) { | ||
1053 | ret = -EINVAL; | 1043 | ret = -EINVAL; |
1054 | goto out; | 1044 | goto out; |
1055 | } | 1045 | } |
@@ -1060,11 +1050,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
1060 | * pread/pwrite currently are reading and writing from the CPU | 1050 | * pread/pwrite currently are reading and writing from the CPU |
1061 | * perspective, requiring manual detiling by the client. | 1051 | * perspective, requiring manual detiling by the client. |
1062 | */ | 1052 | */ |
1063 | if (obj_priv->phys_obj) | 1053 | if (obj->phys_obj) |
1064 | ret = i915_gem_phys_pwrite(dev, obj, args, file); | 1054 | ret = i915_gem_phys_pwrite(dev, obj, args, file); |
1065 | else if (obj_priv->tiling_mode == I915_TILING_NONE && | 1055 | else if (obj->tiling_mode == I915_TILING_NONE && |
1066 | obj_priv->gtt_space && | 1056 | obj->gtt_space && |
1067 | obj->write_domain != I915_GEM_DOMAIN_CPU) { | 1057 | obj->base.write_domain != I915_GEM_DOMAIN_CPU) { |
1068 | ret = i915_gem_object_pin(obj, 0, true); | 1058 | ret = i915_gem_object_pin(obj, 0, true); |
1069 | if (ret) | 1059 | if (ret) |
1070 | goto out; | 1060 | goto out; |
@@ -1092,7 +1082,7 @@ out_unpin: | |||
1092 | } | 1082 | } |
1093 | 1083 | ||
1094 | out: | 1084 | out: |
1095 | drm_gem_object_unreference(obj); | 1085 | drm_gem_object_unreference(&obj->base); |
1096 | unlock: | 1086 | unlock: |
1097 | mutex_unlock(&dev->struct_mutex); | 1087 | mutex_unlock(&dev->struct_mutex); |
1098 | return ret; | 1088 | return ret; |
@@ -1104,12 +1094,11 @@ unlock: | |||
1104 | */ | 1094 | */ |
1105 | int | 1095 | int |
1106 | i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | 1096 | i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
1107 | struct drm_file *file_priv) | 1097 | struct drm_file *file) |
1108 | { | 1098 | { |
1109 | struct drm_i915_private *dev_priv = dev->dev_private; | 1099 | struct drm_i915_private *dev_priv = dev->dev_private; |
1110 | struct drm_i915_gem_set_domain *args = data; | 1100 | struct drm_i915_gem_set_domain *args = data; |
1111 | struct drm_gem_object *obj; | 1101 | struct drm_i915_gem_object *obj; |
1112 | struct drm_i915_gem_object *obj_priv; | ||
1113 | uint32_t read_domains = args->read_domains; | 1102 | uint32_t read_domains = args->read_domains; |
1114 | uint32_t write_domain = args->write_domain; | 1103 | uint32_t write_domain = args->write_domain; |
1115 | int ret; | 1104 | int ret; |
@@ -1134,12 +1123,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1134 | if (ret) | 1123 | if (ret) |
1135 | return ret; | 1124 | return ret; |
1136 | 1125 | ||
1137 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 1126 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
1138 | if (obj == NULL) { | 1127 | if (obj == NULL) { |
1139 | ret = -ENOENT; | 1128 | ret = -ENOENT; |
1140 | goto unlock; | 1129 | goto unlock; |
1141 | } | 1130 | } |
1142 | obj_priv = to_intel_bo(obj); | ||
1143 | 1131 | ||
1144 | intel_mark_busy(dev, obj); | 1132 | intel_mark_busy(dev, obj); |
1145 | 1133 | ||
@@ -1149,9 +1137,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1149 | /* Update the LRU on the fence for the CPU access that's | 1137 | /* Update the LRU on the fence for the CPU access that's |
1150 | * about to occur. | 1138 | * about to occur. |
1151 | */ | 1139 | */ |
1152 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { | 1140 | if (obj->fence_reg != I915_FENCE_REG_NONE) { |
1153 | struct drm_i915_fence_reg *reg = | 1141 | struct drm_i915_fence_reg *reg = |
1154 | &dev_priv->fence_regs[obj_priv->fence_reg]; | 1142 | &dev_priv->fence_regs[obj->fence_reg]; |
1155 | list_move_tail(®->lru_list, | 1143 | list_move_tail(®->lru_list, |
1156 | &dev_priv->mm.fence_list); | 1144 | &dev_priv->mm.fence_list); |
1157 | } | 1145 | } |
@@ -1167,10 +1155,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1167 | } | 1155 | } |
1168 | 1156 | ||
1169 | /* Maintain LRU order of "inactive" objects */ | 1157 | /* Maintain LRU order of "inactive" objects */ |
1170 | if (ret == 0 && i915_gem_object_is_inactive(obj_priv)) | 1158 | if (ret == 0 && i915_gem_object_is_inactive(obj)) |
1171 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); | 1159 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
1172 | 1160 | ||
1173 | drm_gem_object_unreference(obj); | 1161 | drm_gem_object_unreference(&obj->base); |
1174 | unlock: | 1162 | unlock: |
1175 | mutex_unlock(&dev->struct_mutex); | 1163 | mutex_unlock(&dev->struct_mutex); |
1176 | return ret; | 1164 | return ret; |
@@ -1181,10 +1169,10 @@ unlock: | |||
1181 | */ | 1169 | */ |
1182 | int | 1170 | int |
1183 | i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | 1171 | i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, |
1184 | struct drm_file *file_priv) | 1172 | struct drm_file *file) |
1185 | { | 1173 | { |
1186 | struct drm_i915_gem_sw_finish *args = data; | 1174 | struct drm_i915_gem_sw_finish *args = data; |
1187 | struct drm_gem_object *obj; | 1175 | struct drm_i915_gem_object *obj; |
1188 | int ret = 0; | 1176 | int ret = 0; |
1189 | 1177 | ||
1190 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 1178 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
@@ -1194,17 +1182,17 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
1194 | if (ret) | 1182 | if (ret) |
1195 | return ret; | 1183 | return ret; |
1196 | 1184 | ||
1197 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 1185 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
1198 | if (obj == NULL) { | 1186 | if (obj == NULL) { |
1199 | ret = -ENOENT; | 1187 | ret = -ENOENT; |
1200 | goto unlock; | 1188 | goto unlock; |
1201 | } | 1189 | } |
1202 | 1190 | ||
1203 | /* Pinned buffers may be scanout, so flush the cache */ | 1191 | /* Pinned buffers may be scanout, so flush the cache */ |
1204 | if (to_intel_bo(obj)->pin_count) | 1192 | if (obj->pin_count) |
1205 | i915_gem_object_flush_cpu_write_domain(obj); | 1193 | i915_gem_object_flush_cpu_write_domain(obj); |
1206 | 1194 | ||
1207 | drm_gem_object_unreference(obj); | 1195 | drm_gem_object_unreference(&obj->base); |
1208 | unlock: | 1196 | unlock: |
1209 | mutex_unlock(&dev->struct_mutex); | 1197 | mutex_unlock(&dev->struct_mutex); |
1210 | return ret; | 1198 | return ret; |
@@ -1219,7 +1207,7 @@ unlock: | |||
1219 | */ | 1207 | */ |
1220 | int | 1208 | int |
1221 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | 1209 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, |
1222 | struct drm_file *file_priv) | 1210 | struct drm_file *file) |
1223 | { | 1211 | { |
1224 | struct drm_i915_private *dev_priv = dev->dev_private; | 1212 | struct drm_i915_private *dev_priv = dev->dev_private; |
1225 | struct drm_i915_gem_mmap *args = data; | 1213 | struct drm_i915_gem_mmap *args = data; |
@@ -1230,7 +1218,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
1230 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 1218 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
1231 | return -ENODEV; | 1219 | return -ENODEV; |
1232 | 1220 | ||
1233 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 1221 | obj = drm_gem_object_lookup(dev, file, args->handle); |
1234 | if (obj == NULL) | 1222 | if (obj == NULL) |
1235 | return -ENOENT; | 1223 | return -ENOENT; |
1236 | 1224 | ||
@@ -1273,10 +1261,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
1273 | */ | 1261 | */ |
1274 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1262 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
1275 | { | 1263 | { |
1276 | struct drm_gem_object *obj = vma->vm_private_data; | 1264 | struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); |
1277 | struct drm_device *dev = obj->dev; | 1265 | struct drm_device *dev = obj->base.dev; |
1278 | drm_i915_private_t *dev_priv = dev->dev_private; | 1266 | drm_i915_private_t *dev_priv = dev->dev_private; |
1279 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1280 | pgoff_t page_offset; | 1267 | pgoff_t page_offset; |
1281 | unsigned long pfn; | 1268 | unsigned long pfn; |
1282 | int ret = 0; | 1269 | int ret = 0; |
@@ -1288,17 +1275,17 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1288 | 1275 | ||
1289 | /* Now bind it into the GTT if needed */ | 1276 | /* Now bind it into the GTT if needed */ |
1290 | mutex_lock(&dev->struct_mutex); | 1277 | mutex_lock(&dev->struct_mutex); |
1291 | BUG_ON(obj_priv->pin_count && !obj_priv->pin_mappable); | 1278 | BUG_ON(obj->pin_count && !obj->pin_mappable); |
1292 | 1279 | ||
1293 | if (obj_priv->gtt_space) { | 1280 | if (obj->gtt_space) { |
1294 | if (!obj_priv->map_and_fenceable) { | 1281 | if (!obj->map_and_fenceable) { |
1295 | ret = i915_gem_object_unbind(obj); | 1282 | ret = i915_gem_object_unbind(obj); |
1296 | if (ret) | 1283 | if (ret) |
1297 | goto unlock; | 1284 | goto unlock; |
1298 | } | 1285 | } |
1299 | } | 1286 | } |
1300 | 1287 | ||
1301 | if (!obj_priv->gtt_space) { | 1288 | if (!obj->gtt_space) { |
1302 | ret = i915_gem_object_bind_to_gtt(obj, 0, true); | 1289 | ret = i915_gem_object_bind_to_gtt(obj, 0, true); |
1303 | if (ret) | 1290 | if (ret) |
1304 | goto unlock; | 1291 | goto unlock; |
@@ -1308,22 +1295,22 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1308 | if (ret) | 1295 | if (ret) |
1309 | goto unlock; | 1296 | goto unlock; |
1310 | 1297 | ||
1311 | if (!obj_priv->fault_mappable) { | 1298 | if (!obj->fault_mappable) { |
1312 | obj_priv->fault_mappable = true; | 1299 | obj->fault_mappable = true; |
1313 | i915_gem_info_update_mappable(dev_priv, obj_priv, true); | 1300 | i915_gem_info_update_mappable(dev_priv, obj, true); |
1314 | } | 1301 | } |
1315 | 1302 | ||
1316 | /* Need a new fence register? */ | 1303 | /* Need a new fence register? */ |
1317 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | 1304 | if (obj->tiling_mode != I915_TILING_NONE) { |
1318 | ret = i915_gem_object_get_fence_reg(obj, true); | 1305 | ret = i915_gem_object_get_fence_reg(obj, true); |
1319 | if (ret) | 1306 | if (ret) |
1320 | goto unlock; | 1307 | goto unlock; |
1321 | } | 1308 | } |
1322 | 1309 | ||
1323 | if (i915_gem_object_is_inactive(obj_priv)) | 1310 | if (i915_gem_object_is_inactive(obj)) |
1324 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); | 1311 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
1325 | 1312 | ||
1326 | pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + | 1313 | pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) + |
1327 | page_offset; | 1314 | page_offset; |
1328 | 1315 | ||
1329 | /* Finally, remap it using the new GTT offset */ | 1316 | /* Finally, remap it using the new GTT offset */ |
@@ -1356,36 +1343,39 @@ unlock: | |||
1356 | * This routine allocates and attaches a fake offset for @obj. | 1343 | * This routine allocates and attaches a fake offset for @obj. |
1357 | */ | 1344 | */ |
1358 | static int | 1345 | static int |
1359 | i915_gem_create_mmap_offset(struct drm_gem_object *obj) | 1346 | i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj) |
1360 | { | 1347 | { |
1361 | struct drm_device *dev = obj->dev; | 1348 | struct drm_device *dev = obj->base.dev; |
1362 | struct drm_gem_mm *mm = dev->mm_private; | 1349 | struct drm_gem_mm *mm = dev->mm_private; |
1363 | struct drm_map_list *list; | 1350 | struct drm_map_list *list; |
1364 | struct drm_local_map *map; | 1351 | struct drm_local_map *map; |
1365 | int ret = 0; | 1352 | int ret = 0; |
1366 | 1353 | ||
1367 | /* Set the object up for mmap'ing */ | 1354 | /* Set the object up for mmap'ing */ |
1368 | list = &obj->map_list; | 1355 | list = &obj->base.map_list; |
1369 | list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); | 1356 | list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); |
1370 | if (!list->map) | 1357 | if (!list->map) |
1371 | return -ENOMEM; | 1358 | return -ENOMEM; |
1372 | 1359 | ||
1373 | map = list->map; | 1360 | map = list->map; |
1374 | map->type = _DRM_GEM; | 1361 | map->type = _DRM_GEM; |
1375 | map->size = obj->size; | 1362 | map->size = obj->base.size; |
1376 | map->handle = obj; | 1363 | map->handle = obj; |
1377 | 1364 | ||
1378 | /* Get a DRM GEM mmap offset allocated... */ | 1365 | /* Get a DRM GEM mmap offset allocated... */ |
1379 | list->file_offset_node = drm_mm_search_free(&mm->offset_manager, | 1366 | list->file_offset_node = drm_mm_search_free(&mm->offset_manager, |
1380 | obj->size / PAGE_SIZE, 0, 0); | 1367 | obj->base.size / PAGE_SIZE, |
1368 | 0, 0); | ||
1381 | if (!list->file_offset_node) { | 1369 | if (!list->file_offset_node) { |
1382 | DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); | 1370 | DRM_ERROR("failed to allocate offset for bo %d\n", |
1371 | obj->base.name); | ||
1383 | ret = -ENOSPC; | 1372 | ret = -ENOSPC; |
1384 | goto out_free_list; | 1373 | goto out_free_list; |
1385 | } | 1374 | } |
1386 | 1375 | ||
1387 | list->file_offset_node = drm_mm_get_block(list->file_offset_node, | 1376 | list->file_offset_node = drm_mm_get_block(list->file_offset_node, |
1388 | obj->size / PAGE_SIZE, 0); | 1377 | obj->base.size / PAGE_SIZE, |
1378 | 0); | ||
1389 | if (!list->file_offset_node) { | 1379 | if (!list->file_offset_node) { |
1390 | ret = -ENOMEM; | 1380 | ret = -ENOMEM; |
1391 | goto out_free_list; | 1381 | goto out_free_list; |
@@ -1424,29 +1414,28 @@ out_free_list: | |||
1424 | * fixup by i915_gem_fault(). | 1414 | * fixup by i915_gem_fault(). |
1425 | */ | 1415 | */ |
1426 | void | 1416 | void |
1427 | i915_gem_release_mmap(struct drm_gem_object *obj) | 1417 | i915_gem_release_mmap(struct drm_i915_gem_object *obj) |
1428 | { | 1418 | { |
1429 | struct drm_device *dev = obj->dev; | 1419 | struct drm_device *dev = obj->base.dev; |
1430 | struct drm_i915_private *dev_priv = dev->dev_private; | 1420 | struct drm_i915_private *dev_priv = dev->dev_private; |
1431 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1432 | 1421 | ||
1433 | if (unlikely(obj->map_list.map && dev->dev_mapping)) | 1422 | if (unlikely(obj->base.map_list.map && dev->dev_mapping)) |
1434 | unmap_mapping_range(dev->dev_mapping, | 1423 | unmap_mapping_range(dev->dev_mapping, |
1435 | (loff_t)obj->map_list.hash.key<<PAGE_SHIFT, | 1424 | (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT, |
1436 | obj->size, 1); | 1425 | obj->base.size, 1); |
1437 | 1426 | ||
1438 | if (obj_priv->fault_mappable) { | 1427 | if (obj->fault_mappable) { |
1439 | obj_priv->fault_mappable = false; | 1428 | obj->fault_mappable = false; |
1440 | i915_gem_info_update_mappable(dev_priv, obj_priv, false); | 1429 | i915_gem_info_update_mappable(dev_priv, obj, false); |
1441 | } | 1430 | } |
1442 | } | 1431 | } |
1443 | 1432 | ||
1444 | static void | 1433 | static void |
1445 | i915_gem_free_mmap_offset(struct drm_gem_object *obj) | 1434 | i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj) |
1446 | { | 1435 | { |
1447 | struct drm_device *dev = obj->dev; | 1436 | struct drm_device *dev = obj->base.dev; |
1448 | struct drm_gem_mm *mm = dev->mm_private; | 1437 | struct drm_gem_mm *mm = dev->mm_private; |
1449 | struct drm_map_list *list = &obj->map_list; | 1438 | struct drm_map_list *list = &obj->base.map_list; |
1450 | 1439 | ||
1451 | drm_ht_remove_item(&mm->offset_hash, &list->hash); | 1440 | drm_ht_remove_item(&mm->offset_hash, &list->hash); |
1452 | drm_mm_put_block(list->file_offset_node); | 1441 | drm_mm_put_block(list->file_offset_node); |
@@ -1462,23 +1451,23 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj) | |||
1462 | * potential fence register mapping. | 1451 | * potential fence register mapping. |
1463 | */ | 1452 | */ |
1464 | static uint32_t | 1453 | static uint32_t |
1465 | i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv) | 1454 | i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj) |
1466 | { | 1455 | { |
1467 | struct drm_device *dev = obj_priv->base.dev; | 1456 | struct drm_device *dev = obj->base.dev; |
1468 | 1457 | ||
1469 | /* | 1458 | /* |
1470 | * Minimum alignment is 4k (GTT page size), but might be greater | 1459 | * Minimum alignment is 4k (GTT page size), but might be greater |
1471 | * if a fence register is needed for the object. | 1460 | * if a fence register is needed for the object. |
1472 | */ | 1461 | */ |
1473 | if (INTEL_INFO(dev)->gen >= 4 || | 1462 | if (INTEL_INFO(dev)->gen >= 4 || |
1474 | obj_priv->tiling_mode == I915_TILING_NONE) | 1463 | obj->tiling_mode == I915_TILING_NONE) |
1475 | return 4096; | 1464 | return 4096; |
1476 | 1465 | ||
1477 | /* | 1466 | /* |
1478 | * Previous chips need to be aligned to the size of the smallest | 1467 | * Previous chips need to be aligned to the size of the smallest |
1479 | * fence register that can contain the object. | 1468 | * fence register that can contain the object. |
1480 | */ | 1469 | */ |
1481 | return i915_gem_get_gtt_size(obj_priv); | 1470 | return i915_gem_get_gtt_size(obj); |
1482 | } | 1471 | } |
1483 | 1472 | ||
1484 | /** | 1473 | /** |
@@ -1490,16 +1479,16 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv) | |||
1490 | * unfenced tiled surface requirements. | 1479 | * unfenced tiled surface requirements. |
1491 | */ | 1480 | */ |
1492 | static uint32_t | 1481 | static uint32_t |
1493 | i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj_priv) | 1482 | i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) |
1494 | { | 1483 | { |
1495 | struct drm_device *dev = obj_priv->base.dev; | 1484 | struct drm_device *dev = obj->base.dev; |
1496 | int tile_height; | 1485 | int tile_height; |
1497 | 1486 | ||
1498 | /* | 1487 | /* |
1499 | * Minimum alignment is 4k (GTT page size) for sane hw. | 1488 | * Minimum alignment is 4k (GTT page size) for sane hw. |
1500 | */ | 1489 | */ |
1501 | if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) || | 1490 | if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) || |
1502 | obj_priv->tiling_mode == I915_TILING_NONE) | 1491 | obj->tiling_mode == I915_TILING_NONE) |
1503 | return 4096; | 1492 | return 4096; |
1504 | 1493 | ||
1505 | /* | 1494 | /* |
@@ -1508,18 +1497,18 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj_priv) | |||
1508 | * placed in a fenced gtt region). | 1497 | * placed in a fenced gtt region). |
1509 | */ | 1498 | */ |
1510 | if (IS_GEN2(dev) || | 1499 | if (IS_GEN2(dev) || |
1511 | (obj_priv->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) | 1500 | (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) |
1512 | tile_height = 32; | 1501 | tile_height = 32; |
1513 | else | 1502 | else |
1514 | tile_height = 8; | 1503 | tile_height = 8; |
1515 | 1504 | ||
1516 | return tile_height * obj_priv->stride * 2; | 1505 | return tile_height * obj->stride * 2; |
1517 | } | 1506 | } |
1518 | 1507 | ||
1519 | static uint32_t | 1508 | static uint32_t |
1520 | i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) | 1509 | i915_gem_get_gtt_size(struct drm_i915_gem_object *obj) |
1521 | { | 1510 | { |
1522 | struct drm_device *dev = obj_priv->base.dev; | 1511 | struct drm_device *dev = obj->base.dev; |
1523 | uint32_t size; | 1512 | uint32_t size; |
1524 | 1513 | ||
1525 | /* | 1514 | /* |
@@ -1527,7 +1516,7 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) | |||
1527 | * if a fence register is needed for the object. | 1516 | * if a fence register is needed for the object. |
1528 | */ | 1517 | */ |
1529 | if (INTEL_INFO(dev)->gen >= 4) | 1518 | if (INTEL_INFO(dev)->gen >= 4) |
1530 | return obj_priv->base.size; | 1519 | return obj->base.size; |
1531 | 1520 | ||
1532 | /* | 1521 | /* |
1533 | * Previous chips need to be aligned to the size of the smallest | 1522 | * Previous chips need to be aligned to the size of the smallest |
@@ -1538,7 +1527,7 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) | |||
1538 | else | 1527 | else |
1539 | size = 512*1024; | 1528 | size = 512*1024; |
1540 | 1529 | ||
1541 | while (size < obj_priv->base.size) | 1530 | while (size < obj->base.size) |
1542 | size <<= 1; | 1531 | size <<= 1; |
1543 | 1532 | ||
1544 | return size; | 1533 | return size; |
@@ -1548,7 +1537,7 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) | |||
1548 | * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing | 1537 | * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing |
1549 | * @dev: DRM device | 1538 | * @dev: DRM device |
1550 | * @data: GTT mapping ioctl data | 1539 | * @data: GTT mapping ioctl data |
1551 | * @file_priv: GEM object info | 1540 | * @file: GEM object info |
1552 | * | 1541 | * |
1553 | * Simply returns the fake offset to userspace so it can mmap it. | 1542 | * Simply returns the fake offset to userspace so it can mmap it. |
1554 | * The mmap call will end up in drm_gem_mmap(), which will set things | 1543 | * The mmap call will end up in drm_gem_mmap(), which will set things |
@@ -1561,12 +1550,11 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) | |||
1561 | */ | 1550 | */ |
1562 | int | 1551 | int |
1563 | i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | 1552 | i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, |
1564 | struct drm_file *file_priv) | 1553 | struct drm_file *file) |
1565 | { | 1554 | { |
1566 | struct drm_i915_private *dev_priv = dev->dev_private; | 1555 | struct drm_i915_private *dev_priv = dev->dev_private; |
1567 | struct drm_i915_gem_mmap_gtt *args = data; | 1556 | struct drm_i915_gem_mmap_gtt *args = data; |
1568 | struct drm_gem_object *obj; | 1557 | struct drm_i915_gem_object *obj; |
1569 | struct drm_i915_gem_object *obj_priv; | ||
1570 | int ret; | 1558 | int ret; |
1571 | 1559 | ||
1572 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 1560 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
@@ -1576,44 +1564,42 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
1576 | if (ret) | 1564 | if (ret) |
1577 | return ret; | 1565 | return ret; |
1578 | 1566 | ||
1579 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 1567 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
1580 | if (obj == NULL) { | 1568 | if (obj == NULL) { |
1581 | ret = -ENOENT; | 1569 | ret = -ENOENT; |
1582 | goto unlock; | 1570 | goto unlock; |
1583 | } | 1571 | } |
1584 | obj_priv = to_intel_bo(obj); | ||
1585 | 1572 | ||
1586 | if (obj->size > dev_priv->mm.gtt_mappable_end) { | 1573 | if (obj->base.size > dev_priv->mm.gtt_mappable_end) { |
1587 | ret = -E2BIG; | 1574 | ret = -E2BIG; |
1588 | goto unlock; | 1575 | goto unlock; |
1589 | } | 1576 | } |
1590 | 1577 | ||
1591 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 1578 | if (obj->madv != I915_MADV_WILLNEED) { |
1592 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); | 1579 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); |
1593 | ret = -EINVAL; | 1580 | ret = -EINVAL; |
1594 | goto out; | 1581 | goto out; |
1595 | } | 1582 | } |
1596 | 1583 | ||
1597 | if (!obj->map_list.map) { | 1584 | if (!obj->base.map_list.map) { |
1598 | ret = i915_gem_create_mmap_offset(obj); | 1585 | ret = i915_gem_create_mmap_offset(obj); |
1599 | if (ret) | 1586 | if (ret) |
1600 | goto out; | 1587 | goto out; |
1601 | } | 1588 | } |
1602 | 1589 | ||
1603 | args->offset = (u64)obj->map_list.hash.key << PAGE_SHIFT; | 1590 | args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; |
1604 | 1591 | ||
1605 | out: | 1592 | out: |
1606 | drm_gem_object_unreference(obj); | 1593 | drm_gem_object_unreference(&obj->base); |
1607 | unlock: | 1594 | unlock: |
1608 | mutex_unlock(&dev->struct_mutex); | 1595 | mutex_unlock(&dev->struct_mutex); |
1609 | return ret; | 1596 | return ret; |
1610 | } | 1597 | } |
1611 | 1598 | ||
1612 | static int | 1599 | static int |
1613 | i915_gem_object_get_pages_gtt(struct drm_gem_object *obj, | 1600 | i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, |
1614 | gfp_t gfpmask) | 1601 | gfp_t gfpmask) |
1615 | { | 1602 | { |
1616 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1617 | int page_count, i; | 1603 | int page_count, i; |
1618 | struct address_space *mapping; | 1604 | struct address_space *mapping; |
1619 | struct inode *inode; | 1605 | struct inode *inode; |
@@ -1622,13 +1608,13 @@ i915_gem_object_get_pages_gtt(struct drm_gem_object *obj, | |||
1622 | /* Get the list of pages out of our struct file. They'll be pinned | 1608 | /* Get the list of pages out of our struct file. They'll be pinned |
1623 | * at this point until we release them. | 1609 | * at this point until we release them. |
1624 | */ | 1610 | */ |
1625 | page_count = obj->size / PAGE_SIZE; | 1611 | page_count = obj->base.size / PAGE_SIZE; |
1626 | BUG_ON(obj_priv->pages != NULL); | 1612 | BUG_ON(obj->pages != NULL); |
1627 | obj_priv->pages = drm_malloc_ab(page_count, sizeof(struct page *)); | 1613 | obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); |
1628 | if (obj_priv->pages == NULL) | 1614 | if (obj->pages == NULL) |
1629 | return -ENOMEM; | 1615 | return -ENOMEM; |
1630 | 1616 | ||
1631 | inode = obj->filp->f_path.dentry->d_inode; | 1617 | inode = obj->base.filp->f_path.dentry->d_inode; |
1632 | mapping = inode->i_mapping; | 1618 | mapping = inode->i_mapping; |
1633 | for (i = 0; i < page_count; i++) { | 1619 | for (i = 0; i < page_count; i++) { |
1634 | page = read_cache_page_gfp(mapping, i, | 1620 | page = read_cache_page_gfp(mapping, i, |
@@ -1639,51 +1625,50 @@ i915_gem_object_get_pages_gtt(struct drm_gem_object *obj, | |||
1639 | if (IS_ERR(page)) | 1625 | if (IS_ERR(page)) |
1640 | goto err_pages; | 1626 | goto err_pages; |
1641 | 1627 | ||
1642 | obj_priv->pages[i] = page; | 1628 | obj->pages[i] = page; |
1643 | } | 1629 | } |
1644 | 1630 | ||
1645 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1631 | if (obj->tiling_mode != I915_TILING_NONE) |
1646 | i915_gem_object_do_bit_17_swizzle(obj); | 1632 | i915_gem_object_do_bit_17_swizzle(obj); |
1647 | 1633 | ||
1648 | return 0; | 1634 | return 0; |
1649 | 1635 | ||
1650 | err_pages: | 1636 | err_pages: |
1651 | while (i--) | 1637 | while (i--) |
1652 | page_cache_release(obj_priv->pages[i]); | 1638 | page_cache_release(obj->pages[i]); |
1653 | 1639 | ||
1654 | drm_free_large(obj_priv->pages); | 1640 | drm_free_large(obj->pages); |
1655 | obj_priv->pages = NULL; | 1641 | obj->pages = NULL; |
1656 | return PTR_ERR(page); | 1642 | return PTR_ERR(page); |
1657 | } | 1643 | } |
1658 | 1644 | ||
1659 | static void | 1645 | static void |
1660 | i915_gem_object_put_pages_gtt(struct drm_gem_object *obj) | 1646 | i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) |
1661 | { | 1647 | { |
1662 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 1648 | int page_count = obj->base.size / PAGE_SIZE; |
1663 | int page_count = obj->size / PAGE_SIZE; | ||
1664 | int i; | 1649 | int i; |
1665 | 1650 | ||
1666 | BUG_ON(obj_priv->madv == __I915_MADV_PURGED); | 1651 | BUG_ON(obj->madv == __I915_MADV_PURGED); |
1667 | 1652 | ||
1668 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1653 | if (obj->tiling_mode != I915_TILING_NONE) |
1669 | i915_gem_object_save_bit_17_swizzle(obj); | 1654 | i915_gem_object_save_bit_17_swizzle(obj); |
1670 | 1655 | ||
1671 | if (obj_priv->madv == I915_MADV_DONTNEED) | 1656 | if (obj->madv == I915_MADV_DONTNEED) |
1672 | obj_priv->dirty = 0; | 1657 | obj->dirty = 0; |
1673 | 1658 | ||
1674 | for (i = 0; i < page_count; i++) { | 1659 | for (i = 0; i < page_count; i++) { |
1675 | if (obj_priv->dirty) | 1660 | if (obj->dirty) |
1676 | set_page_dirty(obj_priv->pages[i]); | 1661 | set_page_dirty(obj->pages[i]); |
1677 | 1662 | ||
1678 | if (obj_priv->madv == I915_MADV_WILLNEED) | 1663 | if (obj->madv == I915_MADV_WILLNEED) |
1679 | mark_page_accessed(obj_priv->pages[i]); | 1664 | mark_page_accessed(obj->pages[i]); |
1680 | 1665 | ||
1681 | page_cache_release(obj_priv->pages[i]); | 1666 | page_cache_release(obj->pages[i]); |
1682 | } | 1667 | } |
1683 | obj_priv->dirty = 0; | 1668 | obj->dirty = 0; |
1684 | 1669 | ||
1685 | drm_free_large(obj_priv->pages); | 1670 | drm_free_large(obj->pages); |
1686 | obj_priv->pages = NULL; | 1671 | obj->pages = NULL; |
1687 | } | 1672 | } |
1688 | 1673 | ||
1689 | static uint32_t | 1674 | static uint32_t |
@@ -1695,47 +1680,44 @@ i915_gem_next_request_seqno(struct drm_device *dev, | |||
1695 | } | 1680 | } |
1696 | 1681 | ||
1697 | static void | 1682 | static void |
1698 | i915_gem_object_move_to_active(struct drm_gem_object *obj, | 1683 | i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
1699 | struct intel_ring_buffer *ring) | 1684 | struct intel_ring_buffer *ring) |
1700 | { | 1685 | { |
1701 | struct drm_device *dev = obj->dev; | 1686 | struct drm_device *dev = obj->base.dev; |
1702 | struct drm_i915_private *dev_priv = dev->dev_private; | 1687 | struct drm_i915_private *dev_priv = dev->dev_private; |
1703 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1704 | uint32_t seqno = i915_gem_next_request_seqno(dev, ring); | 1688 | uint32_t seqno = i915_gem_next_request_seqno(dev, ring); |
1705 | 1689 | ||
1706 | BUG_ON(ring == NULL); | 1690 | BUG_ON(ring == NULL); |
1707 | obj_priv->ring = ring; | 1691 | obj->ring = ring; |
1708 | 1692 | ||
1709 | /* Add a reference if we're newly entering the active list. */ | 1693 | /* Add a reference if we're newly entering the active list. */ |
1710 | if (!obj_priv->active) { | 1694 | if (!obj->active) { |
1711 | drm_gem_object_reference(obj); | 1695 | drm_gem_object_reference(&obj->base); |
1712 | obj_priv->active = 1; | 1696 | obj->active = 1; |
1713 | } | 1697 | } |
1714 | 1698 | ||
1715 | /* Move from whatever list we were on to the tail of execution. */ | 1699 | /* Move from whatever list we were on to the tail of execution. */ |
1716 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list); | 1700 | list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); |
1717 | list_move_tail(&obj_priv->ring_list, &ring->active_list); | 1701 | list_move_tail(&obj->ring_list, &ring->active_list); |
1718 | obj_priv->last_rendering_seqno = seqno; | 1702 | obj->last_rendering_seqno = seqno; |
1719 | } | 1703 | } |
1720 | 1704 | ||
1721 | static void | 1705 | static void |
1722 | i915_gem_object_move_to_flushing(struct drm_gem_object *obj) | 1706 | i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj) |
1723 | { | 1707 | { |
1724 | struct drm_device *dev = obj->dev; | 1708 | struct drm_device *dev = obj->base.dev; |
1725 | drm_i915_private_t *dev_priv = dev->dev_private; | 1709 | drm_i915_private_t *dev_priv = dev->dev_private; |
1726 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1727 | 1710 | ||
1728 | BUG_ON(!obj_priv->active); | 1711 | BUG_ON(!obj->active); |
1729 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list); | 1712 | list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list); |
1730 | list_del_init(&obj_priv->ring_list); | 1713 | list_del_init(&obj->ring_list); |
1731 | obj_priv->last_rendering_seqno = 0; | 1714 | obj->last_rendering_seqno = 0; |
1732 | } | 1715 | } |
1733 | 1716 | ||
1734 | /* Immediately discard the backing storage */ | 1717 | /* Immediately discard the backing storage */ |
1735 | static void | 1718 | static void |
1736 | i915_gem_object_truncate(struct drm_gem_object *obj) | 1719 | i915_gem_object_truncate(struct drm_i915_gem_object *obj) |
1737 | { | 1720 | { |
1738 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1739 | struct inode *inode; | 1721 | struct inode *inode; |
1740 | 1722 | ||
1741 | /* Our goal here is to return as much of the memory as | 1723 | /* Our goal here is to return as much of the memory as |
@@ -1744,40 +1726,39 @@ i915_gem_object_truncate(struct drm_gem_object *obj) | |||
1744 | * backing pages, *now*. Here we mirror the actions taken | 1726 | * backing pages, *now*. Here we mirror the actions taken |
1745 | * when by shmem_delete_inode() to release the backing store. | 1727 | * when by shmem_delete_inode() to release the backing store. |
1746 | */ | 1728 | */ |
1747 | inode = obj->filp->f_path.dentry->d_inode; | 1729 | inode = obj->base.filp->f_path.dentry->d_inode; |
1748 | truncate_inode_pages(inode->i_mapping, 0); | 1730 | truncate_inode_pages(inode->i_mapping, 0); |
1749 | if (inode->i_op->truncate_range) | 1731 | if (inode->i_op->truncate_range) |
1750 | inode->i_op->truncate_range(inode, 0, (loff_t)-1); | 1732 | inode->i_op->truncate_range(inode, 0, (loff_t)-1); |
1751 | 1733 | ||
1752 | obj_priv->madv = __I915_MADV_PURGED; | 1734 | obj->madv = __I915_MADV_PURGED; |
1753 | } | 1735 | } |
1754 | 1736 | ||
1755 | static inline int | 1737 | static inline int |
1756 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv) | 1738 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) |
1757 | { | 1739 | { |
1758 | return obj_priv->madv == I915_MADV_DONTNEED; | 1740 | return obj->madv == I915_MADV_DONTNEED; |
1759 | } | 1741 | } |
1760 | 1742 | ||
1761 | static void | 1743 | static void |
1762 | i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | 1744 | i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) |
1763 | { | 1745 | { |
1764 | struct drm_device *dev = obj->dev; | 1746 | struct drm_device *dev = obj->base.dev; |
1765 | drm_i915_private_t *dev_priv = dev->dev_private; | 1747 | drm_i915_private_t *dev_priv = dev->dev_private; |
1766 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1767 | 1748 | ||
1768 | if (obj_priv->pin_count != 0) | 1749 | if (obj->pin_count != 0) |
1769 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list); | 1750 | list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list); |
1770 | else | 1751 | else |
1771 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); | 1752 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
1772 | list_del_init(&obj_priv->ring_list); | 1753 | list_del_init(&obj->ring_list); |
1773 | 1754 | ||
1774 | BUG_ON(!list_empty(&obj_priv->gpu_write_list)); | 1755 | BUG_ON(!list_empty(&obj->gpu_write_list)); |
1775 | 1756 | ||
1776 | obj_priv->last_rendering_seqno = 0; | 1757 | obj->last_rendering_seqno = 0; |
1777 | obj_priv->ring = NULL; | 1758 | obj->ring = NULL; |
1778 | if (obj_priv->active) { | 1759 | if (obj->active) { |
1779 | obj_priv->active = 0; | 1760 | obj->active = 0; |
1780 | drm_gem_object_unreference(obj); | 1761 | drm_gem_object_unreference(&obj->base); |
1781 | } | 1762 | } |
1782 | WARN_ON(i915_verify_lists(dev)); | 1763 | WARN_ON(i915_verify_lists(dev)); |
1783 | } | 1764 | } |
@@ -1788,30 +1769,28 @@ i915_gem_process_flushing_list(struct drm_device *dev, | |||
1788 | struct intel_ring_buffer *ring) | 1769 | struct intel_ring_buffer *ring) |
1789 | { | 1770 | { |
1790 | drm_i915_private_t *dev_priv = dev->dev_private; | 1771 | drm_i915_private_t *dev_priv = dev->dev_private; |
1791 | struct drm_i915_gem_object *obj_priv, *next; | 1772 | struct drm_i915_gem_object *obj, *next; |
1792 | 1773 | ||
1793 | list_for_each_entry_safe(obj_priv, next, | 1774 | list_for_each_entry_safe(obj, next, |
1794 | &ring->gpu_write_list, | 1775 | &ring->gpu_write_list, |
1795 | gpu_write_list) { | 1776 | gpu_write_list) { |
1796 | struct drm_gem_object *obj = &obj_priv->base; | 1777 | if (obj->base.write_domain & flush_domains) { |
1778 | uint32_t old_write_domain = obj->base.write_domain; | ||
1797 | 1779 | ||
1798 | if (obj->write_domain & flush_domains) { | 1780 | obj->base.write_domain = 0; |
1799 | uint32_t old_write_domain = obj->write_domain; | 1781 | list_del_init(&obj->gpu_write_list); |
1800 | |||
1801 | obj->write_domain = 0; | ||
1802 | list_del_init(&obj_priv->gpu_write_list); | ||
1803 | i915_gem_object_move_to_active(obj, ring); | 1782 | i915_gem_object_move_to_active(obj, ring); |
1804 | 1783 | ||
1805 | /* update the fence lru list */ | 1784 | /* update the fence lru list */ |
1806 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { | 1785 | if (obj->fence_reg != I915_FENCE_REG_NONE) { |
1807 | struct drm_i915_fence_reg *reg = | 1786 | struct drm_i915_fence_reg *reg = |
1808 | &dev_priv->fence_regs[obj_priv->fence_reg]; | 1787 | &dev_priv->fence_regs[obj->fence_reg]; |
1809 | list_move_tail(®->lru_list, | 1788 | list_move_tail(®->lru_list, |
1810 | &dev_priv->mm.fence_list); | 1789 | &dev_priv->mm.fence_list); |
1811 | } | 1790 | } |
1812 | 1791 | ||
1813 | trace_i915_gem_object_change_domain(obj, | 1792 | trace_i915_gem_object_change_domain(obj, |
1814 | obj->read_domains, | 1793 | obj->base.read_domains, |
1815 | old_write_domain); | 1794 | old_write_domain); |
1816 | } | 1795 | } |
1817 | } | 1796 | } |
@@ -1912,22 +1891,22 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, | |||
1912 | } | 1891 | } |
1913 | 1892 | ||
1914 | while (!list_empty(&ring->active_list)) { | 1893 | while (!list_empty(&ring->active_list)) { |
1915 | struct drm_i915_gem_object *obj_priv; | 1894 | struct drm_i915_gem_object *obj; |
1916 | 1895 | ||
1917 | obj_priv = list_first_entry(&ring->active_list, | 1896 | obj = list_first_entry(&ring->active_list, |
1918 | struct drm_i915_gem_object, | 1897 | struct drm_i915_gem_object, |
1919 | ring_list); | 1898 | ring_list); |
1920 | 1899 | ||
1921 | obj_priv->base.write_domain = 0; | 1900 | obj->base.write_domain = 0; |
1922 | list_del_init(&obj_priv->gpu_write_list); | 1901 | list_del_init(&obj->gpu_write_list); |
1923 | i915_gem_object_move_to_inactive(&obj_priv->base); | 1902 | i915_gem_object_move_to_inactive(obj); |
1924 | } | 1903 | } |
1925 | } | 1904 | } |
1926 | 1905 | ||
1927 | void i915_gem_reset(struct drm_device *dev) | 1906 | void i915_gem_reset(struct drm_device *dev) |
1928 | { | 1907 | { |
1929 | struct drm_i915_private *dev_priv = dev->dev_private; | 1908 | struct drm_i915_private *dev_priv = dev->dev_private; |
1930 | struct drm_i915_gem_object *obj_priv; | 1909 | struct drm_i915_gem_object *obj; |
1931 | int i; | 1910 | int i; |
1932 | 1911 | ||
1933 | i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); | 1912 | i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); |
@@ -1939,23 +1918,23 @@ void i915_gem_reset(struct drm_device *dev) | |||
1939 | * lost bo to the inactive list. | 1918 | * lost bo to the inactive list. |
1940 | */ | 1919 | */ |
1941 | while (!list_empty(&dev_priv->mm.flushing_list)) { | 1920 | while (!list_empty(&dev_priv->mm.flushing_list)) { |
1942 | obj_priv = list_first_entry(&dev_priv->mm.flushing_list, | 1921 | obj= list_first_entry(&dev_priv->mm.flushing_list, |
1943 | struct drm_i915_gem_object, | 1922 | struct drm_i915_gem_object, |
1944 | mm_list); | 1923 | mm_list); |
1945 | 1924 | ||
1946 | obj_priv->base.write_domain = 0; | 1925 | obj->base.write_domain = 0; |
1947 | list_del_init(&obj_priv->gpu_write_list); | 1926 | list_del_init(&obj->gpu_write_list); |
1948 | i915_gem_object_move_to_inactive(&obj_priv->base); | 1927 | i915_gem_object_move_to_inactive(obj); |
1949 | } | 1928 | } |
1950 | 1929 | ||
1951 | /* Move everything out of the GPU domains to ensure we do any | 1930 | /* Move everything out of the GPU domains to ensure we do any |
1952 | * necessary invalidation upon reuse. | 1931 | * necessary invalidation upon reuse. |
1953 | */ | 1932 | */ |
1954 | list_for_each_entry(obj_priv, | 1933 | list_for_each_entry(obj, |
1955 | &dev_priv->mm.inactive_list, | 1934 | &dev_priv->mm.inactive_list, |
1956 | mm_list) | 1935 | mm_list) |
1957 | { | 1936 | { |
1958 | obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS; | 1937 | obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; |
1959 | } | 1938 | } |
1960 | 1939 | ||
1961 | /* The fence registers are invalidated so clear them out */ | 1940 | /* The fence registers are invalidated so clear them out */ |
@@ -2008,18 +1987,16 @@ i915_gem_retire_requests_ring(struct drm_device *dev, | |||
2008 | * by the ringbuffer to the flushing/inactive lists as appropriate. | 1987 | * by the ringbuffer to the flushing/inactive lists as appropriate. |
2009 | */ | 1988 | */ |
2010 | while (!list_empty(&ring->active_list)) { | 1989 | while (!list_empty(&ring->active_list)) { |
2011 | struct drm_gem_object *obj; | 1990 | struct drm_i915_gem_object *obj; |
2012 | struct drm_i915_gem_object *obj_priv; | ||
2013 | 1991 | ||
2014 | obj_priv = list_first_entry(&ring->active_list, | 1992 | obj= list_first_entry(&ring->active_list, |
2015 | struct drm_i915_gem_object, | 1993 | struct drm_i915_gem_object, |
2016 | ring_list); | 1994 | ring_list); |
2017 | 1995 | ||
2018 | if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno)) | 1996 | if (!i915_seqno_passed(seqno, obj->last_rendering_seqno)) |
2019 | break; | 1997 | break; |
2020 | 1998 | ||
2021 | obj = &obj_priv->base; | 1999 | if (obj->base.write_domain != 0) |
2022 | if (obj->write_domain != 0) | ||
2023 | i915_gem_object_move_to_flushing(obj); | 2000 | i915_gem_object_move_to_flushing(obj); |
2024 | else | 2001 | else |
2025 | i915_gem_object_move_to_inactive(obj); | 2002 | i915_gem_object_move_to_inactive(obj); |
@@ -2040,17 +2017,17 @@ i915_gem_retire_requests(struct drm_device *dev) | |||
2040 | drm_i915_private_t *dev_priv = dev->dev_private; | 2017 | drm_i915_private_t *dev_priv = dev->dev_private; |
2041 | 2018 | ||
2042 | if (!list_empty(&dev_priv->mm.deferred_free_list)) { | 2019 | if (!list_empty(&dev_priv->mm.deferred_free_list)) { |
2043 | struct drm_i915_gem_object *obj_priv, *tmp; | 2020 | struct drm_i915_gem_object *obj, *next; |
2044 | 2021 | ||
2045 | /* We must be careful that during unbind() we do not | 2022 | /* We must be careful that during unbind() we do not |
2046 | * accidentally infinitely recurse into retire requests. | 2023 | * accidentally infinitely recurse into retire requests. |
2047 | * Currently: | 2024 | * Currently: |
2048 | * retire -> free -> unbind -> wait -> retire_ring | 2025 | * retire -> free -> unbind -> wait -> retire_ring |
2049 | */ | 2026 | */ |
2050 | list_for_each_entry_safe(obj_priv, tmp, | 2027 | list_for_each_entry_safe(obj, next, |
2051 | &dev_priv->mm.deferred_free_list, | 2028 | &dev_priv->mm.deferred_free_list, |
2052 | mm_list) | 2029 | mm_list) |
2053 | i915_gem_free_object_tail(&obj_priv->base); | 2030 | i915_gem_free_object_tail(obj); |
2054 | } | 2031 | } |
2055 | 2032 | ||
2056 | i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); | 2033 | i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); |
@@ -2175,7 +2152,6 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno, | |||
2175 | 2152 | ||
2176 | static void | 2153 | static void |
2177 | i915_gem_flush_ring(struct drm_device *dev, | 2154 | i915_gem_flush_ring(struct drm_device *dev, |
2178 | struct drm_file *file_priv, | ||
2179 | struct intel_ring_buffer *ring, | 2155 | struct intel_ring_buffer *ring, |
2180 | uint32_t invalidate_domains, | 2156 | uint32_t invalidate_domains, |
2181 | uint32_t flush_domains) | 2157 | uint32_t flush_domains) |
@@ -2186,7 +2162,6 @@ i915_gem_flush_ring(struct drm_device *dev, | |||
2186 | 2162 | ||
2187 | static void | 2163 | static void |
2188 | i915_gem_flush(struct drm_device *dev, | 2164 | i915_gem_flush(struct drm_device *dev, |
2189 | struct drm_file *file_priv, | ||
2190 | uint32_t invalidate_domains, | 2165 | uint32_t invalidate_domains, |
2191 | uint32_t flush_domains, | 2166 | uint32_t flush_domains, |
2192 | uint32_t flush_rings) | 2167 | uint32_t flush_rings) |
@@ -2198,16 +2173,13 @@ i915_gem_flush(struct drm_device *dev, | |||
2198 | 2173 | ||
2199 | if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { | 2174 | if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { |
2200 | if (flush_rings & RING_RENDER) | 2175 | if (flush_rings & RING_RENDER) |
2201 | i915_gem_flush_ring(dev, file_priv, | 2176 | i915_gem_flush_ring(dev, &dev_priv->render_ring, |
2202 | &dev_priv->render_ring, | ||
2203 | invalidate_domains, flush_domains); | 2177 | invalidate_domains, flush_domains); |
2204 | if (flush_rings & RING_BSD) | 2178 | if (flush_rings & RING_BSD) |
2205 | i915_gem_flush_ring(dev, file_priv, | 2179 | i915_gem_flush_ring(dev, &dev_priv->bsd_ring, |
2206 | &dev_priv->bsd_ring, | ||
2207 | invalidate_domains, flush_domains); | 2180 | invalidate_domains, flush_domains); |
2208 | if (flush_rings & RING_BLT) | 2181 | if (flush_rings & RING_BLT) |
2209 | i915_gem_flush_ring(dev, file_priv, | 2182 | i915_gem_flush_ring(dev, &dev_priv->blt_ring, |
2210 | &dev_priv->blt_ring, | ||
2211 | invalidate_domains, flush_domains); | 2183 | invalidate_domains, flush_domains); |
2212 | } | 2184 | } |
2213 | } | 2185 | } |
@@ -2217,26 +2189,25 @@ i915_gem_flush(struct drm_device *dev, | |||
2217 | * safe to unbind from the GTT or access from the CPU. | 2189 | * safe to unbind from the GTT or access from the CPU. |
2218 | */ | 2190 | */ |
2219 | static int | 2191 | static int |
2220 | i915_gem_object_wait_rendering(struct drm_gem_object *obj, | 2192 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, |
2221 | bool interruptible) | 2193 | bool interruptible) |
2222 | { | 2194 | { |
2223 | struct drm_device *dev = obj->dev; | 2195 | struct drm_device *dev = obj->base.dev; |
2224 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2225 | int ret; | 2196 | int ret; |
2226 | 2197 | ||
2227 | /* This function only exists to support waiting for existing rendering, | 2198 | /* This function only exists to support waiting for existing rendering, |
2228 | * not for emitting required flushes. | 2199 | * not for emitting required flushes. |
2229 | */ | 2200 | */ |
2230 | BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0); | 2201 | BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0); |
2231 | 2202 | ||
2232 | /* If there is rendering queued on the buffer being evicted, wait for | 2203 | /* If there is rendering queued on the buffer being evicted, wait for |
2233 | * it. | 2204 | * it. |
2234 | */ | 2205 | */ |
2235 | if (obj_priv->active) { | 2206 | if (obj->active) { |
2236 | ret = i915_do_wait_request(dev, | 2207 | ret = i915_do_wait_request(dev, |
2237 | obj_priv->last_rendering_seqno, | 2208 | obj->last_rendering_seqno, |
2238 | interruptible, | 2209 | interruptible, |
2239 | obj_priv->ring); | 2210 | obj->ring); |
2240 | if (ret) | 2211 | if (ret) |
2241 | return ret; | 2212 | return ret; |
2242 | } | 2213 | } |
@@ -2248,17 +2219,16 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj, | |||
2248 | * Unbinds an object from the GTT aperture. | 2219 | * Unbinds an object from the GTT aperture. |
2249 | */ | 2220 | */ |
2250 | int | 2221 | int |
2251 | i915_gem_object_unbind(struct drm_gem_object *obj) | 2222 | i915_gem_object_unbind(struct drm_i915_gem_object *obj) |
2252 | { | 2223 | { |
2253 | struct drm_device *dev = obj->dev; | 2224 | struct drm_device *dev = obj->base.dev; |
2254 | struct drm_i915_private *dev_priv = dev->dev_private; | 2225 | struct drm_i915_private *dev_priv = dev->dev_private; |
2255 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2256 | int ret = 0; | 2226 | int ret = 0; |
2257 | 2227 | ||
2258 | if (obj_priv->gtt_space == NULL) | 2228 | if (obj->gtt_space == NULL) |
2259 | return 0; | 2229 | return 0; |
2260 | 2230 | ||
2261 | if (obj_priv->pin_count != 0) { | 2231 | if (obj->pin_count != 0) { |
2262 | DRM_ERROR("Attempting to unbind pinned buffer\n"); | 2232 | DRM_ERROR("Attempting to unbind pinned buffer\n"); |
2263 | return -EINVAL; | 2233 | return -EINVAL; |
2264 | } | 2234 | } |
@@ -2281,27 +2251,27 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2281 | */ | 2251 | */ |
2282 | if (ret) { | 2252 | if (ret) { |
2283 | i915_gem_clflush_object(obj); | 2253 | i915_gem_clflush_object(obj); |
2284 | obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU; | 2254 | obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
2285 | } | 2255 | } |
2286 | 2256 | ||
2287 | /* release the fence reg _after_ flushing */ | 2257 | /* release the fence reg _after_ flushing */ |
2288 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | 2258 | if (obj->fence_reg != I915_FENCE_REG_NONE) |
2289 | i915_gem_clear_fence_reg(obj); | 2259 | i915_gem_clear_fence_reg(obj); |
2290 | 2260 | ||
2291 | i915_gem_gtt_unbind_object(obj); | 2261 | i915_gem_gtt_unbind_object(obj); |
2292 | 2262 | ||
2293 | i915_gem_object_put_pages_gtt(obj); | 2263 | i915_gem_object_put_pages_gtt(obj); |
2294 | 2264 | ||
2295 | i915_gem_info_remove_gtt(dev_priv, obj_priv); | 2265 | i915_gem_info_remove_gtt(dev_priv, obj); |
2296 | list_del_init(&obj_priv->mm_list); | 2266 | list_del_init(&obj->mm_list); |
2297 | /* Avoid an unnecessary call to unbind on rebind. */ | 2267 | /* Avoid an unnecessary call to unbind on rebind. */ |
2298 | obj_priv->map_and_fenceable = true; | 2268 | obj->map_and_fenceable = true; |
2299 | 2269 | ||
2300 | drm_mm_put_block(obj_priv->gtt_space); | 2270 | drm_mm_put_block(obj->gtt_space); |
2301 | obj_priv->gtt_space = NULL; | 2271 | obj->gtt_space = NULL; |
2302 | obj_priv->gtt_offset = 0; | 2272 | obj->gtt_offset = 0; |
2303 | 2273 | ||
2304 | if (i915_gem_object_is_purgeable(obj_priv)) | 2274 | if (i915_gem_object_is_purgeable(obj)) |
2305 | i915_gem_object_truncate(obj); | 2275 | i915_gem_object_truncate(obj); |
2306 | 2276 | ||
2307 | trace_i915_gem_object_unbind(obj); | 2277 | trace_i915_gem_object_unbind(obj); |
@@ -2315,7 +2285,7 @@ static int i915_ring_idle(struct drm_device *dev, | |||
2315 | if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) | 2285 | if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) |
2316 | return 0; | 2286 | return 0; |
2317 | 2287 | ||
2318 | i915_gem_flush_ring(dev, NULL, ring, | 2288 | i915_gem_flush_ring(dev, ring, |
2319 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 2289 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
2320 | return i915_wait_request(dev, | 2290 | return i915_wait_request(dev, |
2321 | i915_gem_next_request_seqno(dev, ring), | 2291 | i915_gem_next_request_seqno(dev, ring), |
@@ -2350,89 +2320,86 @@ i915_gpu_idle(struct drm_device *dev) | |||
2350 | return 0; | 2320 | return 0; |
2351 | } | 2321 | } |
2352 | 2322 | ||
2353 | static void sandybridge_write_fence_reg(struct drm_gem_object *obj) | 2323 | static void sandybridge_write_fence_reg(struct drm_i915_gem_object *obj) |
2354 | { | 2324 | { |
2355 | struct drm_device *dev = obj->dev; | 2325 | struct drm_device *dev = obj->base.dev; |
2356 | drm_i915_private_t *dev_priv = dev->dev_private; | 2326 | drm_i915_private_t *dev_priv = dev->dev_private; |
2357 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2327 | u32 size = obj->gtt_space->size; |
2358 | u32 size = i915_gem_get_gtt_size(obj_priv); | 2328 | int regnum = obj->fence_reg; |
2359 | int regnum = obj_priv->fence_reg; | ||
2360 | uint64_t val; | 2329 | uint64_t val; |
2361 | 2330 | ||
2362 | val = (uint64_t)((obj_priv->gtt_offset + size - 4096) & | 2331 | val = (uint64_t)((obj->gtt_offset + size - 4096) & |
2363 | 0xfffff000) << 32; | 2332 | 0xfffff000) << 32; |
2364 | val |= obj_priv->gtt_offset & 0xfffff000; | 2333 | val |= obj->gtt_offset & 0xfffff000; |
2365 | val |= (uint64_t)((obj_priv->stride / 128) - 1) << | 2334 | val |= (uint64_t)((obj->stride / 128) - 1) << |
2366 | SANDYBRIDGE_FENCE_PITCH_SHIFT; | 2335 | SANDYBRIDGE_FENCE_PITCH_SHIFT; |
2367 | 2336 | ||
2368 | if (obj_priv->tiling_mode == I915_TILING_Y) | 2337 | if (obj->tiling_mode == I915_TILING_Y) |
2369 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; | 2338 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; |
2370 | val |= I965_FENCE_REG_VALID; | 2339 | val |= I965_FENCE_REG_VALID; |
2371 | 2340 | ||
2372 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val); | 2341 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val); |
2373 | } | 2342 | } |
2374 | 2343 | ||
2375 | static void i965_write_fence_reg(struct drm_gem_object *obj) | 2344 | static void i965_write_fence_reg(struct drm_i915_gem_object *obj) |
2376 | { | 2345 | { |
2377 | struct drm_device *dev = obj->dev; | 2346 | struct drm_device *dev = obj->base.dev; |
2378 | drm_i915_private_t *dev_priv = dev->dev_private; | 2347 | drm_i915_private_t *dev_priv = dev->dev_private; |
2379 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2348 | u32 size = obj->gtt_space->size; |
2380 | u32 size = i915_gem_get_gtt_size(obj_priv); | 2349 | int regnum = obj->fence_reg; |
2381 | int regnum = obj_priv->fence_reg; | ||
2382 | uint64_t val; | 2350 | uint64_t val; |
2383 | 2351 | ||
2384 | val = (uint64_t)((obj_priv->gtt_offset + size - 4096) & | 2352 | val = (uint64_t)((obj->gtt_offset + size - 4096) & |
2385 | 0xfffff000) << 32; | 2353 | 0xfffff000) << 32; |
2386 | val |= obj_priv->gtt_offset & 0xfffff000; | 2354 | val |= obj->gtt_offset & 0xfffff000; |
2387 | val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; | 2355 | val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; |
2388 | if (obj_priv->tiling_mode == I915_TILING_Y) | 2356 | if (obj->tiling_mode == I915_TILING_Y) |
2389 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; | 2357 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; |
2390 | val |= I965_FENCE_REG_VALID; | 2358 | val |= I965_FENCE_REG_VALID; |
2391 | 2359 | ||
2392 | I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val); | 2360 | I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val); |
2393 | } | 2361 | } |
2394 | 2362 | ||
2395 | static void i915_write_fence_reg(struct drm_gem_object *obj) | 2363 | static void i915_write_fence_reg(struct drm_i915_gem_object *obj) |
2396 | { | 2364 | { |
2397 | struct drm_device *dev = obj->dev; | 2365 | struct drm_device *dev = obj->base.dev; |
2398 | drm_i915_private_t *dev_priv = dev->dev_private; | 2366 | drm_i915_private_t *dev_priv = dev->dev_private; |
2399 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2367 | u32 size = obj->gtt_space->size; |
2400 | u32 size = i915_gem_get_gtt_size(obj_priv); | ||
2401 | uint32_t fence_reg, val, pitch_val; | 2368 | uint32_t fence_reg, val, pitch_val; |
2402 | int tile_width; | 2369 | int tile_width; |
2403 | 2370 | ||
2404 | if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || | 2371 | if ((obj->gtt_offset & ~I915_FENCE_START_MASK) || |
2405 | (obj_priv->gtt_offset & (size - 1))) { | 2372 | (obj->gtt_offset & (size - 1))) { |
2406 | WARN(1, "%s: object 0x%08x [fenceable? %d] not 1M or size (0x%08x) aligned [gtt_space offset=%lx, size=%lx]\n", | 2373 | WARN(1, "%s: object 0x%08x [fenceable? %d] not 1M or size (0x%08x) aligned [gtt_space offset=%lx, size=%lx]\n", |
2407 | __func__, obj_priv->gtt_offset, obj_priv->map_and_fenceable, size, | 2374 | __func__, obj->gtt_offset, obj->map_and_fenceable, size, |
2408 | obj_priv->gtt_space->start, obj_priv->gtt_space->size); | 2375 | obj->gtt_space->start, obj->gtt_space->size); |
2409 | return; | 2376 | return; |
2410 | } | 2377 | } |
2411 | 2378 | ||
2412 | if (obj_priv->tiling_mode == I915_TILING_Y && | 2379 | if (obj->tiling_mode == I915_TILING_Y && |
2413 | HAS_128_BYTE_Y_TILING(dev)) | 2380 | HAS_128_BYTE_Y_TILING(dev)) |
2414 | tile_width = 128; | 2381 | tile_width = 128; |
2415 | else | 2382 | else |
2416 | tile_width = 512; | 2383 | tile_width = 512; |
2417 | 2384 | ||
2418 | /* Note: pitch better be a power of two tile widths */ | 2385 | /* Note: pitch better be a power of two tile widths */ |
2419 | pitch_val = obj_priv->stride / tile_width; | 2386 | pitch_val = obj->stride / tile_width; |
2420 | pitch_val = ffs(pitch_val) - 1; | 2387 | pitch_val = ffs(pitch_val) - 1; |
2421 | 2388 | ||
2422 | if (obj_priv->tiling_mode == I915_TILING_Y && | 2389 | if (obj->tiling_mode == I915_TILING_Y && |
2423 | HAS_128_BYTE_Y_TILING(dev)) | 2390 | HAS_128_BYTE_Y_TILING(dev)) |
2424 | WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); | 2391 | WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); |
2425 | else | 2392 | else |
2426 | WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL); | 2393 | WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL); |
2427 | 2394 | ||
2428 | val = obj_priv->gtt_offset; | 2395 | val = obj->gtt_offset; |
2429 | if (obj_priv->tiling_mode == I915_TILING_Y) | 2396 | if (obj->tiling_mode == I915_TILING_Y) |
2430 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; | 2397 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; |
2431 | val |= I915_FENCE_SIZE_BITS(size); | 2398 | val |= I915_FENCE_SIZE_BITS(size); |
2432 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; | 2399 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; |
2433 | val |= I830_FENCE_REG_VALID; | 2400 | val |= I830_FENCE_REG_VALID; |
2434 | 2401 | ||
2435 | fence_reg = obj_priv->fence_reg; | 2402 | fence_reg = obj->fence_reg; |
2436 | if (fence_reg < 8) | 2403 | if (fence_reg < 8) |
2437 | fence_reg = FENCE_REG_830_0 + fence_reg * 4; | 2404 | fence_reg = FENCE_REG_830_0 + fence_reg * 4; |
2438 | else | 2405 | else |
@@ -2440,30 +2407,29 @@ static void i915_write_fence_reg(struct drm_gem_object *obj) | |||
2440 | I915_WRITE(fence_reg, val); | 2407 | I915_WRITE(fence_reg, val); |
2441 | } | 2408 | } |
2442 | 2409 | ||
2443 | static void i830_write_fence_reg(struct drm_gem_object *obj) | 2410 | static void i830_write_fence_reg(struct drm_i915_gem_object *obj) |
2444 | { | 2411 | { |
2445 | struct drm_device *dev = obj->dev; | 2412 | struct drm_device *dev = obj->base.dev; |
2446 | drm_i915_private_t *dev_priv = dev->dev_private; | 2413 | drm_i915_private_t *dev_priv = dev->dev_private; |
2447 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2414 | u32 size = obj->gtt_space->size; |
2448 | u32 size = i915_gem_get_gtt_size(obj_priv); | 2415 | int regnum = obj->fence_reg; |
2449 | int regnum = obj_priv->fence_reg; | ||
2450 | uint32_t val; | 2416 | uint32_t val; |
2451 | uint32_t pitch_val; | 2417 | uint32_t pitch_val; |
2452 | uint32_t fence_size_bits; | 2418 | uint32_t fence_size_bits; |
2453 | 2419 | ||
2454 | if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) || | 2420 | if ((obj->gtt_offset & ~I830_FENCE_START_MASK) || |
2455 | (obj_priv->gtt_offset & (obj->size - 1))) { | 2421 | (obj->gtt_offset & (obj->base.size - 1))) { |
2456 | WARN(1, "%s: object 0x%08x not 512K or size aligned\n", | 2422 | WARN(1, "%s: object 0x%08x not 512K or size aligned\n", |
2457 | __func__, obj_priv->gtt_offset); | 2423 | __func__, obj->gtt_offset); |
2458 | return; | 2424 | return; |
2459 | } | 2425 | } |
2460 | 2426 | ||
2461 | pitch_val = obj_priv->stride / 128; | 2427 | pitch_val = obj->stride / 128; |
2462 | pitch_val = ffs(pitch_val) - 1; | 2428 | pitch_val = ffs(pitch_val) - 1; |
2463 | WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); | 2429 | WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); |
2464 | 2430 | ||
2465 | val = obj_priv->gtt_offset; | 2431 | val = obj->gtt_offset; |
2466 | if (obj_priv->tiling_mode == I915_TILING_Y) | 2432 | if (obj->tiling_mode == I915_TILING_Y) |
2467 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; | 2433 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; |
2468 | fence_size_bits = I830_FENCE_SIZE_BITS(size); | 2434 | fence_size_bits = I830_FENCE_SIZE_BITS(size); |
2469 | WARN_ON(fence_size_bits & ~0x00000f00); | 2435 | WARN_ON(fence_size_bits & ~0x00000f00); |
@@ -2479,7 +2445,7 @@ static int i915_find_fence_reg(struct drm_device *dev, | |||
2479 | { | 2445 | { |
2480 | struct drm_i915_private *dev_priv = dev->dev_private; | 2446 | struct drm_i915_private *dev_priv = dev->dev_private; |
2481 | struct drm_i915_fence_reg *reg; | 2447 | struct drm_i915_fence_reg *reg; |
2482 | struct drm_i915_gem_object *obj_priv = NULL; | 2448 | struct drm_i915_gem_object *obj = NULL; |
2483 | int i, avail, ret; | 2449 | int i, avail, ret; |
2484 | 2450 | ||
2485 | /* First try to find a free reg */ | 2451 | /* First try to find a free reg */ |
@@ -2489,9 +2455,8 @@ static int i915_find_fence_reg(struct drm_device *dev, | |||
2489 | if (!reg->obj) | 2455 | if (!reg->obj) |
2490 | return i; | 2456 | return i; |
2491 | 2457 | ||
2492 | obj_priv = to_intel_bo(reg->obj); | 2458 | if (!reg->obj->pin_count) |
2493 | if (!obj_priv->pin_count) | 2459 | avail++; |
2494 | avail++; | ||
2495 | } | 2460 | } |
2496 | 2461 | ||
2497 | if (avail == 0) | 2462 | if (avail == 0) |
@@ -2501,12 +2466,12 @@ static int i915_find_fence_reg(struct drm_device *dev, | |||
2501 | avail = I915_FENCE_REG_NONE; | 2466 | avail = I915_FENCE_REG_NONE; |
2502 | list_for_each_entry(reg, &dev_priv->mm.fence_list, | 2467 | list_for_each_entry(reg, &dev_priv->mm.fence_list, |
2503 | lru_list) { | 2468 | lru_list) { |
2504 | obj_priv = to_intel_bo(reg->obj); | 2469 | obj = reg->obj; |
2505 | if (obj_priv->pin_count) | 2470 | if (obj->pin_count) |
2506 | continue; | 2471 | continue; |
2507 | 2472 | ||
2508 | /* found one! */ | 2473 | /* found one! */ |
2509 | avail = obj_priv->fence_reg; | 2474 | avail = obj->fence_reg; |
2510 | break; | 2475 | break; |
2511 | } | 2476 | } |
2512 | 2477 | ||
@@ -2516,9 +2481,9 @@ static int i915_find_fence_reg(struct drm_device *dev, | |||
2516 | * might drop that one, causing a use-after-free in it. So hold a | 2481 | * might drop that one, causing a use-after-free in it. So hold a |
2517 | * private reference to obj like the other callers of put_fence_reg | 2482 | * private reference to obj like the other callers of put_fence_reg |
2518 | * (set_tiling ioctl) do. */ | 2483 | * (set_tiling ioctl) do. */ |
2519 | drm_gem_object_reference(&obj_priv->base); | 2484 | drm_gem_object_reference(&obj->base); |
2520 | ret = i915_gem_object_put_fence_reg(&obj_priv->base, interruptible); | 2485 | ret = i915_gem_object_put_fence_reg(obj, interruptible); |
2521 | drm_gem_object_unreference(&obj_priv->base); | 2486 | drm_gem_object_unreference(&obj->base); |
2522 | if (ret != 0) | 2487 | if (ret != 0) |
2523 | return ret; | 2488 | return ret; |
2524 | 2489 | ||
@@ -2539,39 +2504,38 @@ static int i915_find_fence_reg(struct drm_device *dev, | |||
2539 | * and tiling format. | 2504 | * and tiling format. |
2540 | */ | 2505 | */ |
2541 | int | 2506 | int |
2542 | i915_gem_object_get_fence_reg(struct drm_gem_object *obj, | 2507 | i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj, |
2543 | bool interruptible) | 2508 | bool interruptible) |
2544 | { | 2509 | { |
2545 | struct drm_device *dev = obj->dev; | 2510 | struct drm_device *dev = obj->base.dev; |
2546 | struct drm_i915_private *dev_priv = dev->dev_private; | 2511 | struct drm_i915_private *dev_priv = dev->dev_private; |
2547 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2548 | struct drm_i915_fence_reg *reg = NULL; | 2512 | struct drm_i915_fence_reg *reg = NULL; |
2549 | int ret; | 2513 | int ret; |
2550 | 2514 | ||
2551 | /* Just update our place in the LRU if our fence is getting used. */ | 2515 | /* Just update our place in the LRU if our fence is getting used. */ |
2552 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { | 2516 | if (obj->fence_reg != I915_FENCE_REG_NONE) { |
2553 | reg = &dev_priv->fence_regs[obj_priv->fence_reg]; | 2517 | reg = &dev_priv->fence_regs[obj->fence_reg]; |
2554 | list_move_tail(®->lru_list, &dev_priv->mm.fence_list); | 2518 | list_move_tail(®->lru_list, &dev_priv->mm.fence_list); |
2555 | return 0; | 2519 | return 0; |
2556 | } | 2520 | } |
2557 | 2521 | ||
2558 | switch (obj_priv->tiling_mode) { | 2522 | switch (obj->tiling_mode) { |
2559 | case I915_TILING_NONE: | 2523 | case I915_TILING_NONE: |
2560 | WARN(1, "allocating a fence for non-tiled object?\n"); | 2524 | WARN(1, "allocating a fence for non-tiled object?\n"); |
2561 | break; | 2525 | break; |
2562 | case I915_TILING_X: | 2526 | case I915_TILING_X: |
2563 | if (!obj_priv->stride) | 2527 | if (!obj->stride) |
2564 | return -EINVAL; | 2528 | return -EINVAL; |
2565 | WARN((obj_priv->stride & (512 - 1)), | 2529 | WARN((obj->stride & (512 - 1)), |
2566 | "object 0x%08x is X tiled but has non-512B pitch\n", | 2530 | "object 0x%08x is X tiled but has non-512B pitch\n", |
2567 | obj_priv->gtt_offset); | 2531 | obj->gtt_offset); |
2568 | break; | 2532 | break; |
2569 | case I915_TILING_Y: | 2533 | case I915_TILING_Y: |
2570 | if (!obj_priv->stride) | 2534 | if (!obj->stride) |
2571 | return -EINVAL; | 2535 | return -EINVAL; |
2572 | WARN((obj_priv->stride & (128 - 1)), | 2536 | WARN((obj->stride & (128 - 1)), |
2573 | "object 0x%08x is Y tiled but has non-128B pitch\n", | 2537 | "object 0x%08x is Y tiled but has non-128B pitch\n", |
2574 | obj_priv->gtt_offset); | 2538 | obj->gtt_offset); |
2575 | break; | 2539 | break; |
2576 | } | 2540 | } |
2577 | 2541 | ||
@@ -2579,8 +2543,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, | |||
2579 | if (ret < 0) | 2543 | if (ret < 0) |
2580 | return ret; | 2544 | return ret; |
2581 | 2545 | ||
2582 | obj_priv->fence_reg = ret; | 2546 | obj->fence_reg = ret; |
2583 | reg = &dev_priv->fence_regs[obj_priv->fence_reg]; | 2547 | reg = &dev_priv->fence_regs[obj->fence_reg]; |
2584 | list_add_tail(®->lru_list, &dev_priv->mm.fence_list); | 2548 | list_add_tail(®->lru_list, &dev_priv->mm.fence_list); |
2585 | 2549 | ||
2586 | reg->obj = obj; | 2550 | reg->obj = obj; |
@@ -2602,8 +2566,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, | |||
2602 | } | 2566 | } |
2603 | 2567 | ||
2604 | trace_i915_gem_object_get_fence(obj, | 2568 | trace_i915_gem_object_get_fence(obj, |
2605 | obj_priv->fence_reg, | 2569 | obj->fence_reg, |
2606 | obj_priv->tiling_mode); | 2570 | obj->tiling_mode); |
2607 | 2571 | ||
2608 | return 0; | 2572 | return 0; |
2609 | } | 2573 | } |
@@ -2613,40 +2577,38 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, | |||
2613 | * @obj: object to clear | 2577 | * @obj: object to clear |
2614 | * | 2578 | * |
2615 | * Zeroes out the fence register itself and clears out the associated | 2579 | * Zeroes out the fence register itself and clears out the associated |
2616 | * data structures in dev_priv and obj_priv. | 2580 | * data structures in dev_priv and obj. |
2617 | */ | 2581 | */ |
2618 | static void | 2582 | static void |
2619 | i915_gem_clear_fence_reg(struct drm_gem_object *obj) | 2583 | i915_gem_clear_fence_reg(struct drm_i915_gem_object *obj) |
2620 | { | 2584 | { |
2621 | struct drm_device *dev = obj->dev; | 2585 | struct drm_device *dev = obj->base.dev; |
2622 | drm_i915_private_t *dev_priv = dev->dev_private; | 2586 | drm_i915_private_t *dev_priv = dev->dev_private; |
2623 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2587 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[obj->fence_reg]; |
2624 | struct drm_i915_fence_reg *reg = | ||
2625 | &dev_priv->fence_regs[obj_priv->fence_reg]; | ||
2626 | uint32_t fence_reg; | 2588 | uint32_t fence_reg; |
2627 | 2589 | ||
2628 | switch (INTEL_INFO(dev)->gen) { | 2590 | switch (INTEL_INFO(dev)->gen) { |
2629 | case 6: | 2591 | case 6: |
2630 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + | 2592 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + |
2631 | (obj_priv->fence_reg * 8), 0); | 2593 | (obj->fence_reg * 8), 0); |
2632 | break; | 2594 | break; |
2633 | case 5: | 2595 | case 5: |
2634 | case 4: | 2596 | case 4: |
2635 | I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); | 2597 | I915_WRITE64(FENCE_REG_965_0 + (obj->fence_reg * 8), 0); |
2636 | break; | 2598 | break; |
2637 | case 3: | 2599 | case 3: |
2638 | if (obj_priv->fence_reg >= 8) | 2600 | if (obj->fence_reg >= 8) |
2639 | fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4; | 2601 | fence_reg = FENCE_REG_945_8 + (obj->fence_reg - 8) * 4; |
2640 | else | 2602 | else |
2641 | case 2: | 2603 | case 2: |
2642 | fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4; | 2604 | fence_reg = FENCE_REG_830_0 + obj->fence_reg * 4; |
2643 | 2605 | ||
2644 | I915_WRITE(fence_reg, 0); | 2606 | I915_WRITE(fence_reg, 0); |
2645 | break; | 2607 | break; |
2646 | } | 2608 | } |
2647 | 2609 | ||
2648 | reg->obj = NULL; | 2610 | reg->obj = NULL; |
2649 | obj_priv->fence_reg = I915_FENCE_REG_NONE; | 2611 | obj->fence_reg = I915_FENCE_REG_NONE; |
2650 | list_del_init(®->lru_list); | 2612 | list_del_init(®->lru_list); |
2651 | } | 2613 | } |
2652 | 2614 | ||
@@ -2657,18 +2619,17 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) | |||
2657 | * @bool: whether the wait upon the fence is interruptible | 2619 | * @bool: whether the wait upon the fence is interruptible |
2658 | * | 2620 | * |
2659 | * Zeroes out the fence register itself and clears out the associated | 2621 | * Zeroes out the fence register itself and clears out the associated |
2660 | * data structures in dev_priv and obj_priv. | 2622 | * data structures in dev_priv and obj. |
2661 | */ | 2623 | */ |
2662 | int | 2624 | int |
2663 | i915_gem_object_put_fence_reg(struct drm_gem_object *obj, | 2625 | i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj, |
2664 | bool interruptible) | 2626 | bool interruptible) |
2665 | { | 2627 | { |
2666 | struct drm_device *dev = obj->dev; | 2628 | struct drm_device *dev = obj->base.dev; |
2667 | struct drm_i915_private *dev_priv = dev->dev_private; | 2629 | struct drm_i915_private *dev_priv = dev->dev_private; |
2668 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2669 | struct drm_i915_fence_reg *reg; | 2630 | struct drm_i915_fence_reg *reg; |
2670 | 2631 | ||
2671 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE) | 2632 | if (obj->fence_reg == I915_FENCE_REG_NONE) |
2672 | return 0; | 2633 | return 0; |
2673 | 2634 | ||
2674 | /* If we've changed tiling, GTT-mappings of the object | 2635 | /* If we've changed tiling, GTT-mappings of the object |
@@ -2681,7 +2642,7 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj, | |||
2681 | * therefore we must wait for any outstanding access to complete | 2642 | * therefore we must wait for any outstanding access to complete |
2682 | * before clearing the fence. | 2643 | * before clearing the fence. |
2683 | */ | 2644 | */ |
2684 | reg = &dev_priv->fence_regs[obj_priv->fence_reg]; | 2645 | reg = &dev_priv->fence_regs[obj->fence_reg]; |
2685 | if (reg->gpu) { | 2646 | if (reg->gpu) { |
2686 | int ret; | 2647 | int ret; |
2687 | 2648 | ||
@@ -2706,27 +2667,26 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj, | |||
2706 | * Finds free space in the GTT aperture and binds the object there. | 2667 | * Finds free space in the GTT aperture and binds the object there. |
2707 | */ | 2668 | */ |
2708 | static int | 2669 | static int |
2709 | i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | 2670 | i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, |
2710 | unsigned alignment, | 2671 | unsigned alignment, |
2711 | bool map_and_fenceable) | 2672 | bool map_and_fenceable) |
2712 | { | 2673 | { |
2713 | struct drm_device *dev = obj->dev; | 2674 | struct drm_device *dev = obj->base.dev; |
2714 | drm_i915_private_t *dev_priv = dev->dev_private; | 2675 | drm_i915_private_t *dev_priv = dev->dev_private; |
2715 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2716 | struct drm_mm_node *free_space; | 2676 | struct drm_mm_node *free_space; |
2717 | gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; | 2677 | gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; |
2718 | u32 size, fence_size, fence_alignment, unfenced_alignment; | 2678 | u32 size, fence_size, fence_alignment, unfenced_alignment; |
2719 | bool mappable, fenceable; | 2679 | bool mappable, fenceable; |
2720 | int ret; | 2680 | int ret; |
2721 | 2681 | ||
2722 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 2682 | if (obj->madv != I915_MADV_WILLNEED) { |
2723 | DRM_ERROR("Attempting to bind a purgeable object\n"); | 2683 | DRM_ERROR("Attempting to bind a purgeable object\n"); |
2724 | return -EINVAL; | 2684 | return -EINVAL; |
2725 | } | 2685 | } |
2726 | 2686 | ||
2727 | fence_size = i915_gem_get_gtt_size(obj_priv); | 2687 | fence_size = i915_gem_get_gtt_size(obj); |
2728 | fence_alignment = i915_gem_get_gtt_alignment(obj_priv); | 2688 | fence_alignment = i915_gem_get_gtt_alignment(obj); |
2729 | unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj_priv); | 2689 | unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj); |
2730 | 2690 | ||
2731 | if (alignment == 0) | 2691 | if (alignment == 0) |
2732 | alignment = map_and_fenceable ? fence_alignment : | 2692 | alignment = map_and_fenceable ? fence_alignment : |
@@ -2736,12 +2696,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | |||
2736 | return -EINVAL; | 2696 | return -EINVAL; |
2737 | } | 2697 | } |
2738 | 2698 | ||
2739 | size = map_and_fenceable ? fence_size : obj->size; | 2699 | size = map_and_fenceable ? fence_size : obj->base.size; |
2740 | 2700 | ||
2741 | /* If the object is bigger than the entire aperture, reject it early | 2701 | /* If the object is bigger than the entire aperture, reject it early |
2742 | * before evicting everything in a vain attempt to find space. | 2702 | * before evicting everything in a vain attempt to find space. |
2743 | */ | 2703 | */ |
2744 | if (obj->size > | 2704 | if (obj->base.size > |
2745 | (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { | 2705 | (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { |
2746 | DRM_ERROR("Attempting to bind an object larger than the aperture\n"); | 2706 | DRM_ERROR("Attempting to bind an object larger than the aperture\n"); |
2747 | return -E2BIG; | 2707 | return -E2BIG; |
@@ -2760,16 +2720,16 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | |||
2760 | 2720 | ||
2761 | if (free_space != NULL) { | 2721 | if (free_space != NULL) { |
2762 | if (map_and_fenceable) | 2722 | if (map_and_fenceable) |
2763 | obj_priv->gtt_space = | 2723 | obj->gtt_space = |
2764 | drm_mm_get_block_range_generic(free_space, | 2724 | drm_mm_get_block_range_generic(free_space, |
2765 | size, alignment, 0, | 2725 | size, alignment, 0, |
2766 | dev_priv->mm.gtt_mappable_end, | 2726 | dev_priv->mm.gtt_mappable_end, |
2767 | 0); | 2727 | 0); |
2768 | else | 2728 | else |
2769 | obj_priv->gtt_space = | 2729 | obj->gtt_space = |
2770 | drm_mm_get_block(free_space, size, alignment); | 2730 | drm_mm_get_block(free_space, size, alignment); |
2771 | } | 2731 | } |
2772 | if (obj_priv->gtt_space == NULL) { | 2732 | if (obj->gtt_space == NULL) { |
2773 | /* If the gtt is empty and we're still having trouble | 2733 | /* If the gtt is empty and we're still having trouble |
2774 | * fitting our object in, we're out of memory. | 2734 | * fitting our object in, we're out of memory. |
2775 | */ | 2735 | */ |
@@ -2783,8 +2743,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | |||
2783 | 2743 | ||
2784 | ret = i915_gem_object_get_pages_gtt(obj, gfpmask); | 2744 | ret = i915_gem_object_get_pages_gtt(obj, gfpmask); |
2785 | if (ret) { | 2745 | if (ret) { |
2786 | drm_mm_put_block(obj_priv->gtt_space); | 2746 | drm_mm_put_block(obj->gtt_space); |
2787 | obj_priv->gtt_space = NULL; | 2747 | obj->gtt_space = NULL; |
2788 | 2748 | ||
2789 | if (ret == -ENOMEM) { | 2749 | if (ret == -ENOMEM) { |
2790 | /* first try to clear up some space from the GTT */ | 2750 | /* first try to clear up some space from the GTT */ |
@@ -2810,8 +2770,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | |||
2810 | ret = i915_gem_gtt_bind_object(obj); | 2770 | ret = i915_gem_gtt_bind_object(obj); |
2811 | if (ret) { | 2771 | if (ret) { |
2812 | i915_gem_object_put_pages_gtt(obj); | 2772 | i915_gem_object_put_pages_gtt(obj); |
2813 | drm_mm_put_block(obj_priv->gtt_space); | 2773 | drm_mm_put_block(obj->gtt_space); |
2814 | obj_priv->gtt_space = NULL; | 2774 | obj->gtt_space = NULL; |
2815 | 2775 | ||
2816 | ret = i915_gem_evict_something(dev, size, | 2776 | ret = i915_gem_evict_something(dev, size, |
2817 | alignment, map_and_fenceable); | 2777 | alignment, map_and_fenceable); |
@@ -2821,65 +2781,61 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | |||
2821 | goto search_free; | 2781 | goto search_free; |
2822 | } | 2782 | } |
2823 | 2783 | ||
2824 | obj_priv->gtt_offset = obj_priv->gtt_space->start; | 2784 | obj->gtt_offset = obj->gtt_space->start; |
2825 | 2785 | ||
2826 | /* keep track of bounds object by adding it to the inactive list */ | 2786 | /* keep track of bounds object by adding it to the inactive list */ |
2827 | list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); | 2787 | list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
2828 | i915_gem_info_add_gtt(dev_priv, obj_priv); | 2788 | i915_gem_info_add_gtt(dev_priv, obj); |
2829 | 2789 | ||
2830 | /* Assert that the object is not currently in any GPU domain. As it | 2790 | /* Assert that the object is not currently in any GPU domain. As it |
2831 | * wasn't in the GTT, there shouldn't be any way it could have been in | 2791 | * wasn't in the GTT, there shouldn't be any way it could have been in |
2832 | * a GPU cache | 2792 | * a GPU cache |
2833 | */ | 2793 | */ |
2834 | BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); | 2794 | BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); |
2835 | BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); | 2795 | BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); |
2836 | 2796 | ||
2837 | trace_i915_gem_object_bind(obj, obj_priv->gtt_offset, map_and_fenceable); | 2797 | trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable); |
2838 | 2798 | ||
2839 | fenceable = | 2799 | fenceable = |
2840 | obj_priv->gtt_space->size == fence_size && | 2800 | obj->gtt_space->size == fence_size && |
2841 | (obj_priv->gtt_space->start & (fence_alignment -1)) == 0; | 2801 | (obj->gtt_space->start & (fence_alignment -1)) == 0; |
2842 | 2802 | ||
2843 | mappable = | 2803 | mappable = |
2844 | obj_priv->gtt_offset + obj->size <= dev_priv->mm.gtt_mappable_end; | 2804 | obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; |
2845 | 2805 | ||
2846 | obj_priv->map_and_fenceable = mappable && fenceable; | 2806 | obj->map_and_fenceable = mappable && fenceable; |
2847 | 2807 | ||
2848 | return 0; | 2808 | return 0; |
2849 | } | 2809 | } |
2850 | 2810 | ||
2851 | void | 2811 | void |
2852 | i915_gem_clflush_object(struct drm_gem_object *obj) | 2812 | i915_gem_clflush_object(struct drm_i915_gem_object *obj) |
2853 | { | 2813 | { |
2854 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2855 | |||
2856 | /* If we don't have a page list set up, then we're not pinned | 2814 | /* If we don't have a page list set up, then we're not pinned |
2857 | * to GPU, and we can ignore the cache flush because it'll happen | 2815 | * to GPU, and we can ignore the cache flush because it'll happen |
2858 | * again at bind time. | 2816 | * again at bind time. |
2859 | */ | 2817 | */ |
2860 | if (obj_priv->pages == NULL) | 2818 | if (obj->pages == NULL) |
2861 | return; | 2819 | return; |
2862 | 2820 | ||
2863 | trace_i915_gem_object_clflush(obj); | 2821 | trace_i915_gem_object_clflush(obj); |
2864 | 2822 | ||
2865 | drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); | 2823 | drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE); |
2866 | } | 2824 | } |
2867 | 2825 | ||
2868 | /** Flushes any GPU write domain for the object if it's dirty. */ | 2826 | /** Flushes any GPU write domain for the object if it's dirty. */ |
2869 | static int | 2827 | static int |
2870 | i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, | 2828 | i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj, |
2871 | bool pipelined) | 2829 | bool pipelined) |
2872 | { | 2830 | { |
2873 | struct drm_device *dev = obj->dev; | 2831 | struct drm_device *dev = obj->base.dev; |
2874 | 2832 | ||
2875 | if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) | 2833 | if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) |
2876 | return 0; | 2834 | return 0; |
2877 | 2835 | ||
2878 | /* Queue the GPU write cache flushing we need. */ | 2836 | /* Queue the GPU write cache flushing we need. */ |
2879 | i915_gem_flush_ring(dev, NULL, | 2837 | i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); |
2880 | to_intel_bo(obj)->ring, | 2838 | BUG_ON(obj->base.write_domain); |
2881 | 0, obj->write_domain); | ||
2882 | BUG_ON(obj->write_domain); | ||
2883 | 2839 | ||
2884 | if (pipelined) | 2840 | if (pipelined) |
2885 | return 0; | 2841 | return 0; |
@@ -2889,11 +2845,11 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, | |||
2889 | 2845 | ||
2890 | /** Flushes the GTT write domain for the object if it's dirty. */ | 2846 | /** Flushes the GTT write domain for the object if it's dirty. */ |
2891 | static void | 2847 | static void |
2892 | i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) | 2848 | i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) |
2893 | { | 2849 | { |
2894 | uint32_t old_write_domain; | 2850 | uint32_t old_write_domain; |
2895 | 2851 | ||
2896 | if (obj->write_domain != I915_GEM_DOMAIN_GTT) | 2852 | if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) |
2897 | return; | 2853 | return; |
2898 | 2854 | ||
2899 | /* No actual flushing is required for the GTT write domain. Writes | 2855 | /* No actual flushing is required for the GTT write domain. Writes |
@@ -2902,30 +2858,30 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) | |||
2902 | */ | 2858 | */ |
2903 | i915_gem_release_mmap(obj); | 2859 | i915_gem_release_mmap(obj); |
2904 | 2860 | ||
2905 | old_write_domain = obj->write_domain; | 2861 | old_write_domain = obj->base.write_domain; |
2906 | obj->write_domain = 0; | 2862 | obj->base.write_domain = 0; |
2907 | 2863 | ||
2908 | trace_i915_gem_object_change_domain(obj, | 2864 | trace_i915_gem_object_change_domain(obj, |
2909 | obj->read_domains, | 2865 | obj->base.read_domains, |
2910 | old_write_domain); | 2866 | old_write_domain); |
2911 | } | 2867 | } |
2912 | 2868 | ||
2913 | /** Flushes the CPU write domain for the object if it's dirty. */ | 2869 | /** Flushes the CPU write domain for the object if it's dirty. */ |
2914 | static void | 2870 | static void |
2915 | i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) | 2871 | i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) |
2916 | { | 2872 | { |
2917 | uint32_t old_write_domain; | 2873 | uint32_t old_write_domain; |
2918 | 2874 | ||
2919 | if (obj->write_domain != I915_GEM_DOMAIN_CPU) | 2875 | if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) |
2920 | return; | 2876 | return; |
2921 | 2877 | ||
2922 | i915_gem_clflush_object(obj); | 2878 | i915_gem_clflush_object(obj); |
2923 | intel_gtt_chipset_flush(); | 2879 | intel_gtt_chipset_flush(); |
2924 | old_write_domain = obj->write_domain; | 2880 | old_write_domain = obj->base.write_domain; |
2925 | obj->write_domain = 0; | 2881 | obj->base.write_domain = 0; |
2926 | 2882 | ||
2927 | trace_i915_gem_object_change_domain(obj, | 2883 | trace_i915_gem_object_change_domain(obj, |
2928 | obj->read_domains, | 2884 | obj->base.read_domains, |
2929 | old_write_domain); | 2885 | old_write_domain); |
2930 | } | 2886 | } |
2931 | 2887 | ||
@@ -2936,14 +2892,13 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) | |||
2936 | * flushes to occur. | 2892 | * flushes to occur. |
2937 | */ | 2893 | */ |
2938 | int | 2894 | int |
2939 | i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | 2895 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, int write) |
2940 | { | 2896 | { |
2941 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2942 | uint32_t old_write_domain, old_read_domains; | 2897 | uint32_t old_write_domain, old_read_domains; |
2943 | int ret; | 2898 | int ret; |
2944 | 2899 | ||
2945 | /* Not valid to be called on unbound objects. */ | 2900 | /* Not valid to be called on unbound objects. */ |
2946 | if (obj_priv->gtt_space == NULL) | 2901 | if (obj->gtt_space == NULL) |
2947 | return -EINVAL; | 2902 | return -EINVAL; |
2948 | 2903 | ||
2949 | ret = i915_gem_object_flush_gpu_write_domain(obj, false); | 2904 | ret = i915_gem_object_flush_gpu_write_domain(obj, false); |
@@ -2958,18 +2913,18 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | |||
2958 | return ret; | 2913 | return ret; |
2959 | } | 2914 | } |
2960 | 2915 | ||
2961 | old_write_domain = obj->write_domain; | 2916 | old_write_domain = obj->base.write_domain; |
2962 | old_read_domains = obj->read_domains; | 2917 | old_read_domains = obj->base.read_domains; |
2963 | 2918 | ||
2964 | /* It should now be out of any other write domains, and we can update | 2919 | /* It should now be out of any other write domains, and we can update |
2965 | * the domain values for our changes. | 2920 | * the domain values for our changes. |
2966 | */ | 2921 | */ |
2967 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); | 2922 | BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); |
2968 | obj->read_domains |= I915_GEM_DOMAIN_GTT; | 2923 | obj->base.read_domains |= I915_GEM_DOMAIN_GTT; |
2969 | if (write) { | 2924 | if (write) { |
2970 | obj->read_domains = I915_GEM_DOMAIN_GTT; | 2925 | obj->base.read_domains = I915_GEM_DOMAIN_GTT; |
2971 | obj->write_domain = I915_GEM_DOMAIN_GTT; | 2926 | obj->base.write_domain = I915_GEM_DOMAIN_GTT; |
2972 | obj_priv->dirty = 1; | 2927 | obj->dirty = 1; |
2973 | } | 2928 | } |
2974 | 2929 | ||
2975 | trace_i915_gem_object_change_domain(obj, | 2930 | trace_i915_gem_object_change_domain(obj, |
@@ -2984,15 +2939,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | |||
2984 | * wait, as in modesetting process we're not supposed to be interrupted. | 2939 | * wait, as in modesetting process we're not supposed to be interrupted. |
2985 | */ | 2940 | */ |
2986 | int | 2941 | int |
2987 | i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, | 2942 | i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, |
2988 | bool pipelined) | 2943 | bool pipelined) |
2989 | { | 2944 | { |
2990 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2991 | uint32_t old_read_domains; | 2945 | uint32_t old_read_domains; |
2992 | int ret; | 2946 | int ret; |
2993 | 2947 | ||
2994 | /* Not valid to be called on unbound objects. */ | 2948 | /* Not valid to be called on unbound objects. */ |
2995 | if (obj_priv->gtt_space == NULL) | 2949 | if (obj->gtt_space == NULL) |
2996 | return -EINVAL; | 2950 | return -EINVAL; |
2997 | 2951 | ||
2998 | ret = i915_gem_object_flush_gpu_write_domain(obj, true); | 2952 | ret = i915_gem_object_flush_gpu_write_domain(obj, true); |
@@ -3008,12 +2962,12 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, | |||
3008 | 2962 | ||
3009 | i915_gem_object_flush_cpu_write_domain(obj); | 2963 | i915_gem_object_flush_cpu_write_domain(obj); |
3010 | 2964 | ||
3011 | old_read_domains = obj->read_domains; | 2965 | old_read_domains = obj->base.read_domains; |
3012 | obj->read_domains |= I915_GEM_DOMAIN_GTT; | 2966 | obj->base.read_domains |= I915_GEM_DOMAIN_GTT; |
3013 | 2967 | ||
3014 | trace_i915_gem_object_change_domain(obj, | 2968 | trace_i915_gem_object_change_domain(obj, |
3015 | old_read_domains, | 2969 | old_read_domains, |
3016 | obj->write_domain); | 2970 | obj->base.write_domain); |
3017 | 2971 | ||
3018 | return 0; | 2972 | return 0; |
3019 | } | 2973 | } |
@@ -3026,10 +2980,10 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, | |||
3026 | return 0; | 2980 | return 0; |
3027 | 2981 | ||
3028 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) | 2982 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) |
3029 | i915_gem_flush_ring(obj->base.dev, NULL, obj->ring, | 2983 | i915_gem_flush_ring(obj->base.dev, obj->ring, |
3030 | 0, obj->base.write_domain); | 2984 | 0, obj->base.write_domain); |
3031 | 2985 | ||
3032 | return i915_gem_object_wait_rendering(&obj->base, interruptible); | 2986 | return i915_gem_object_wait_rendering(obj, interruptible); |
3033 | } | 2987 | } |
3034 | 2988 | ||
3035 | /** | 2989 | /** |
@@ -3039,7 +2993,7 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, | |||
3039 | * flushes to occur. | 2993 | * flushes to occur. |
3040 | */ | 2994 | */ |
3041 | static int | 2995 | static int |
3042 | i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | 2996 | i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, int write) |
3043 | { | 2997 | { |
3044 | uint32_t old_write_domain, old_read_domains; | 2998 | uint32_t old_write_domain, old_read_domains; |
3045 | int ret; | 2999 | int ret; |
@@ -3061,27 +3015,27 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
3061 | return ret; | 3015 | return ret; |
3062 | } | 3016 | } |
3063 | 3017 | ||
3064 | old_write_domain = obj->write_domain; | 3018 | old_write_domain = obj->base.write_domain; |
3065 | old_read_domains = obj->read_domains; | 3019 | old_read_domains = obj->base.read_domains; |
3066 | 3020 | ||
3067 | /* Flush the CPU cache if it's still invalid. */ | 3021 | /* Flush the CPU cache if it's still invalid. */ |
3068 | if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { | 3022 | if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { |
3069 | i915_gem_clflush_object(obj); | 3023 | i915_gem_clflush_object(obj); |
3070 | 3024 | ||
3071 | obj->read_domains |= I915_GEM_DOMAIN_CPU; | 3025 | obj->base.read_domains |= I915_GEM_DOMAIN_CPU; |
3072 | } | 3026 | } |
3073 | 3027 | ||
3074 | /* It should now be out of any other write domains, and we can update | 3028 | /* It should now be out of any other write domains, and we can update |
3075 | * the domain values for our changes. | 3029 | * the domain values for our changes. |
3076 | */ | 3030 | */ |
3077 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); | 3031 | BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); |
3078 | 3032 | ||
3079 | /* If we're writing through the CPU, then the GPU read domains will | 3033 | /* If we're writing through the CPU, then the GPU read domains will |
3080 | * need to be invalidated at next use. | 3034 | * need to be invalidated at next use. |
3081 | */ | 3035 | */ |
3082 | if (write) { | 3036 | if (write) { |
3083 | obj->read_domains = I915_GEM_DOMAIN_CPU; | 3037 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; |
3084 | obj->write_domain = I915_GEM_DOMAIN_CPU; | 3038 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
3085 | } | 3039 | } |
3086 | 3040 | ||
3087 | trace_i915_gem_object_change_domain(obj, | 3041 | trace_i915_gem_object_change_domain(obj, |
@@ -3203,20 +3157,18 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
3203 | * drm_agp_chipset_flush | 3157 | * drm_agp_chipset_flush |
3204 | */ | 3158 | */ |
3205 | static void | 3159 | static void |
3206 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | 3160 | i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, |
3207 | struct intel_ring_buffer *ring, | 3161 | struct intel_ring_buffer *ring, |
3208 | struct change_domains *cd) | 3162 | struct change_domains *cd) |
3209 | { | 3163 | { |
3210 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 3164 | uint32_t invalidate_domains = 0, flush_domains = 0; |
3211 | uint32_t invalidate_domains = 0; | ||
3212 | uint32_t flush_domains = 0; | ||
3213 | 3165 | ||
3214 | /* | 3166 | /* |
3215 | * If the object isn't moving to a new write domain, | 3167 | * If the object isn't moving to a new write domain, |
3216 | * let the object stay in multiple read domains | 3168 | * let the object stay in multiple read domains |
3217 | */ | 3169 | */ |
3218 | if (obj->pending_write_domain == 0) | 3170 | if (obj->base.pending_write_domain == 0) |
3219 | obj->pending_read_domains |= obj->read_domains; | 3171 | obj->base.pending_read_domains |= obj->base.read_domains; |
3220 | 3172 | ||
3221 | /* | 3173 | /* |
3222 | * Flush the current write domain if | 3174 | * Flush the current write domain if |
@@ -3224,18 +3176,18 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | |||
3224 | * any read domains which differ from the old | 3176 | * any read domains which differ from the old |
3225 | * write domain | 3177 | * write domain |
3226 | */ | 3178 | */ |
3227 | if (obj->write_domain && | 3179 | if (obj->base.write_domain && |
3228 | (obj->write_domain != obj->pending_read_domains || | 3180 | (obj->base.write_domain != obj->base.pending_read_domains || |
3229 | obj_priv->ring != ring)) { | 3181 | obj->ring != ring)) { |
3230 | flush_domains |= obj->write_domain; | 3182 | flush_domains |= obj->base.write_domain; |
3231 | invalidate_domains |= | 3183 | invalidate_domains |= |
3232 | obj->pending_read_domains & ~obj->write_domain; | 3184 | obj->base.pending_read_domains & ~obj->base.write_domain; |
3233 | } | 3185 | } |
3234 | /* | 3186 | /* |
3235 | * Invalidate any read caches which may have | 3187 | * Invalidate any read caches which may have |
3236 | * stale data. That is, any new read domains. | 3188 | * stale data. That is, any new read domains. |
3237 | */ | 3189 | */ |
3238 | invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; | 3190 | invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains; |
3239 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) | 3191 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) |
3240 | i915_gem_clflush_object(obj); | 3192 | i915_gem_clflush_object(obj); |
3241 | 3193 | ||
@@ -3249,13 +3201,13 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | |||
3249 | * write_domains). So if we have a current write domain that we | 3201 | * write_domains). So if we have a current write domain that we |
3250 | * aren't changing, set pending_write_domain to that. | 3202 | * aren't changing, set pending_write_domain to that. |
3251 | */ | 3203 | */ |
3252 | if (flush_domains == 0 && obj->pending_write_domain == 0) | 3204 | if (flush_domains == 0 && obj->base.pending_write_domain == 0) |
3253 | obj->pending_write_domain = obj->write_domain; | 3205 | obj->base.pending_write_domain = obj->base.write_domain; |
3254 | 3206 | ||
3255 | cd->invalidate_domains |= invalidate_domains; | 3207 | cd->invalidate_domains |= invalidate_domains; |
3256 | cd->flush_domains |= flush_domains; | 3208 | cd->flush_domains |= flush_domains; |
3257 | if (flush_domains & I915_GEM_GPU_DOMAINS) | 3209 | if (flush_domains & I915_GEM_GPU_DOMAINS) |
3258 | cd->flush_rings |= obj_priv->ring->id; | 3210 | cd->flush_rings |= obj->ring->id; |
3259 | if (invalidate_domains & I915_GEM_GPU_DOMAINS) | 3211 | if (invalidate_domains & I915_GEM_GPU_DOMAINS) |
3260 | cd->flush_rings |= ring->id; | 3212 | cd->flush_rings |= ring->id; |
3261 | } | 3213 | } |
@@ -3267,30 +3219,28 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | |||
3267 | * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). | 3219 | * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). |
3268 | */ | 3220 | */ |
3269 | static void | 3221 | static void |
3270 | i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) | 3222 | i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj) |
3271 | { | 3223 | { |
3272 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 3224 | if (!obj->page_cpu_valid) |
3273 | |||
3274 | if (!obj_priv->page_cpu_valid) | ||
3275 | return; | 3225 | return; |
3276 | 3226 | ||
3277 | /* If we're partially in the CPU read domain, finish moving it in. | 3227 | /* If we're partially in the CPU read domain, finish moving it in. |
3278 | */ | 3228 | */ |
3279 | if (obj->read_domains & I915_GEM_DOMAIN_CPU) { | 3229 | if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) { |
3280 | int i; | 3230 | int i; |
3281 | 3231 | ||
3282 | for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { | 3232 | for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) { |
3283 | if (obj_priv->page_cpu_valid[i]) | 3233 | if (obj->page_cpu_valid[i]) |
3284 | continue; | 3234 | continue; |
3285 | drm_clflush_pages(obj_priv->pages + i, 1); | 3235 | drm_clflush_pages(obj->pages + i, 1); |
3286 | } | 3236 | } |
3287 | } | 3237 | } |
3288 | 3238 | ||
3289 | /* Free the page_cpu_valid mappings which are now stale, whether | 3239 | /* Free the page_cpu_valid mappings which are now stale, whether |
3290 | * or not we've got I915_GEM_DOMAIN_CPU. | 3240 | * or not we've got I915_GEM_DOMAIN_CPU. |
3291 | */ | 3241 | */ |
3292 | kfree(obj_priv->page_cpu_valid); | 3242 | kfree(obj->page_cpu_valid); |
3293 | obj_priv->page_cpu_valid = NULL; | 3243 | obj->page_cpu_valid = NULL; |
3294 | } | 3244 | } |
3295 | 3245 | ||
3296 | /** | 3246 | /** |
@@ -3306,14 +3256,13 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) | |||
3306 | * flushes to occur. | 3256 | * flushes to occur. |
3307 | */ | 3257 | */ |
3308 | static int | 3258 | static int |
3309 | i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | 3259 | i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, |
3310 | uint64_t offset, uint64_t size) | 3260 | uint64_t offset, uint64_t size) |
3311 | { | 3261 | { |
3312 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
3313 | uint32_t old_read_domains; | 3262 | uint32_t old_read_domains; |
3314 | int i, ret; | 3263 | int i, ret; |
3315 | 3264 | ||
3316 | if (offset == 0 && size == obj->size) | 3265 | if (offset == 0 && size == obj->base.size) |
3317 | return i915_gem_object_set_to_cpu_domain(obj, 0); | 3266 | return i915_gem_object_set_to_cpu_domain(obj, 0); |
3318 | 3267 | ||
3319 | ret = i915_gem_object_flush_gpu_write_domain(obj, false); | 3268 | ret = i915_gem_object_flush_gpu_write_domain(obj, false); |
@@ -3322,45 +3271,45 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
3322 | i915_gem_object_flush_gtt_write_domain(obj); | 3271 | i915_gem_object_flush_gtt_write_domain(obj); |
3323 | 3272 | ||
3324 | /* If we're already fully in the CPU read domain, we're done. */ | 3273 | /* If we're already fully in the CPU read domain, we're done. */ |
3325 | if (obj_priv->page_cpu_valid == NULL && | 3274 | if (obj->page_cpu_valid == NULL && |
3326 | (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0) | 3275 | (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0) |
3327 | return 0; | 3276 | return 0; |
3328 | 3277 | ||
3329 | /* Otherwise, create/clear the per-page CPU read domain flag if we're | 3278 | /* Otherwise, create/clear the per-page CPU read domain flag if we're |
3330 | * newly adding I915_GEM_DOMAIN_CPU | 3279 | * newly adding I915_GEM_DOMAIN_CPU |
3331 | */ | 3280 | */ |
3332 | if (obj_priv->page_cpu_valid == NULL) { | 3281 | if (obj->page_cpu_valid == NULL) { |
3333 | obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE, | 3282 | obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE, |
3334 | GFP_KERNEL); | 3283 | GFP_KERNEL); |
3335 | if (obj_priv->page_cpu_valid == NULL) | 3284 | if (obj->page_cpu_valid == NULL) |
3336 | return -ENOMEM; | 3285 | return -ENOMEM; |
3337 | } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) | 3286 | } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) |
3338 | memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE); | 3287 | memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE); |
3339 | 3288 | ||
3340 | /* Flush the cache on any pages that are still invalid from the CPU's | 3289 | /* Flush the cache on any pages that are still invalid from the CPU's |
3341 | * perspective. | 3290 | * perspective. |
3342 | */ | 3291 | */ |
3343 | for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; | 3292 | for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; |
3344 | i++) { | 3293 | i++) { |
3345 | if (obj_priv->page_cpu_valid[i]) | 3294 | if (obj->page_cpu_valid[i]) |
3346 | continue; | 3295 | continue; |
3347 | 3296 | ||
3348 | drm_clflush_pages(obj_priv->pages + i, 1); | 3297 | drm_clflush_pages(obj->pages + i, 1); |
3349 | 3298 | ||
3350 | obj_priv->page_cpu_valid[i] = 1; | 3299 | obj->page_cpu_valid[i] = 1; |
3351 | } | 3300 | } |
3352 | 3301 | ||
3353 | /* It should now be out of any other write domains, and we can update | 3302 | /* It should now be out of any other write domains, and we can update |
3354 | * the domain values for our changes. | 3303 | * the domain values for our changes. |
3355 | */ | 3304 | */ |
3356 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); | 3305 | BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); |
3357 | 3306 | ||
3358 | old_read_domains = obj->read_domains; | 3307 | old_read_domains = obj->base.read_domains; |
3359 | obj->read_domains |= I915_GEM_DOMAIN_CPU; | 3308 | obj->base.read_domains |= I915_GEM_DOMAIN_CPU; |
3360 | 3309 | ||
3361 | trace_i915_gem_object_change_domain(obj, | 3310 | trace_i915_gem_object_change_domain(obj, |
3362 | old_read_domains, | 3311 | old_read_domains, |
3363 | obj->write_domain); | 3312 | obj->base.write_domain); |
3364 | 3313 | ||
3365 | return 0; | 3314 | return 0; |
3366 | } | 3315 | } |
@@ -3490,7 +3439,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
3490 | uint32_t __iomem *reloc_entry; | 3439 | uint32_t __iomem *reloc_entry; |
3491 | void __iomem *reloc_page; | 3440 | void __iomem *reloc_page; |
3492 | 3441 | ||
3493 | ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1); | 3442 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); |
3494 | if (ret) | 3443 | if (ret) |
3495 | goto err; | 3444 | goto err; |
3496 | 3445 | ||
@@ -3564,14 +3513,14 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, | |||
3564 | static int | 3513 | static int |
3565 | i915_gem_execbuffer_relocate(struct drm_device *dev, | 3514 | i915_gem_execbuffer_relocate(struct drm_device *dev, |
3566 | struct drm_file *file, | 3515 | struct drm_file *file, |
3567 | struct drm_gem_object **object_list, | 3516 | struct drm_i915_gem_object **object_list, |
3568 | struct drm_i915_gem_exec_object2 *exec_list, | 3517 | struct drm_i915_gem_exec_object2 *exec_list, |
3569 | int count) | 3518 | int count) |
3570 | { | 3519 | { |
3571 | int i, ret; | 3520 | int i, ret; |
3572 | 3521 | ||
3573 | for (i = 0; i < count; i++) { | 3522 | for (i = 0; i < count; i++) { |
3574 | struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); | 3523 | struct drm_i915_gem_object *obj = object_list[i]; |
3575 | obj->base.pending_read_domains = 0; | 3524 | obj->base.pending_read_domains = 0; |
3576 | obj->base.pending_write_domain = 0; | 3525 | obj->base.pending_write_domain = 0; |
3577 | ret = i915_gem_execbuffer_relocate_object(obj, file, | 3526 | ret = i915_gem_execbuffer_relocate_object(obj, file, |
@@ -3586,7 +3535,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, | |||
3586 | static int | 3535 | static int |
3587 | i915_gem_execbuffer_reserve(struct drm_device *dev, | 3536 | i915_gem_execbuffer_reserve(struct drm_device *dev, |
3588 | struct drm_file *file, | 3537 | struct drm_file *file, |
3589 | struct drm_gem_object **object_list, | 3538 | struct drm_i915_gem_object **object_list, |
3590 | struct drm_i915_gem_exec_object2 *exec_list, | 3539 | struct drm_i915_gem_exec_object2 *exec_list, |
3591 | int count) | 3540 | int count) |
3592 | { | 3541 | { |
@@ -3599,7 +3548,7 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, | |||
3599 | ret = 0; | 3548 | ret = 0; |
3600 | for (i = 0; i < count; i++) { | 3549 | for (i = 0; i < count; i++) { |
3601 | struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; | 3550 | struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; |
3602 | struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); | 3551 | struct drm_i915_gem_object *obj = object_list[i]; |
3603 | bool need_fence = | 3552 | bool need_fence = |
3604 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && | 3553 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
3605 | obj->tiling_mode != I915_TILING_NONE; | 3554 | obj->tiling_mode != I915_TILING_NONE; |
@@ -3610,12 +3559,12 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, | |||
3610 | 3559 | ||
3611 | /* Check fence reg constraints and rebind if necessary */ | 3560 | /* Check fence reg constraints and rebind if necessary */ |
3612 | if (need_mappable && !obj->map_and_fenceable) { | 3561 | if (need_mappable && !obj->map_and_fenceable) { |
3613 | ret = i915_gem_object_unbind(&obj->base); | 3562 | ret = i915_gem_object_unbind(obj); |
3614 | if (ret) | 3563 | if (ret) |
3615 | break; | 3564 | break; |
3616 | } | 3565 | } |
3617 | 3566 | ||
3618 | ret = i915_gem_object_pin(&obj->base, | 3567 | ret = i915_gem_object_pin(obj, |
3619 | entry->alignment, | 3568 | entry->alignment, |
3620 | need_mappable); | 3569 | need_mappable); |
3621 | if (ret) | 3570 | if (ret) |
@@ -3626,9 +3575,9 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, | |||
3626 | * to properly handle blits to/from tiled surfaces. | 3575 | * to properly handle blits to/from tiled surfaces. |
3627 | */ | 3576 | */ |
3628 | if (need_fence) { | 3577 | if (need_fence) { |
3629 | ret = i915_gem_object_get_fence_reg(&obj->base, true); | 3578 | ret = i915_gem_object_get_fence_reg(obj, true); |
3630 | if (ret) { | 3579 | if (ret) { |
3631 | i915_gem_object_unpin(&obj->base); | 3580 | i915_gem_object_unpin(obj); |
3632 | break; | 3581 | break; |
3633 | } | 3582 | } |
3634 | 3583 | ||
@@ -3658,17 +3607,15 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, | |||
3658 | static int | 3607 | static int |
3659 | i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | 3608 | i915_gem_execbuffer_relocate_slow(struct drm_device *dev, |
3660 | struct drm_file *file, | 3609 | struct drm_file *file, |
3661 | struct drm_gem_object **object_list, | 3610 | struct drm_i915_gem_object **object_list, |
3662 | struct drm_i915_gem_exec_object2 *exec_list, | 3611 | struct drm_i915_gem_exec_object2 *exec_list, |
3663 | int count) | 3612 | int count) |
3664 | { | 3613 | { |
3665 | struct drm_i915_gem_relocation_entry *reloc; | 3614 | struct drm_i915_gem_relocation_entry *reloc; |
3666 | int i, total, ret; | 3615 | int i, total, ret; |
3667 | 3616 | ||
3668 | for (i = 0; i < count; i++) { | 3617 | for (i = 0; i < count; i++) |
3669 | struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); | 3618 | object_list[i]->in_execbuffer = false; |
3670 | obj->in_execbuffer = false; | ||
3671 | } | ||
3672 | 3619 | ||
3673 | mutex_unlock(&dev->struct_mutex); | 3620 | mutex_unlock(&dev->struct_mutex); |
3674 | 3621 | ||
@@ -3713,7 +3660,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | |||
3713 | 3660 | ||
3714 | total = 0; | 3661 | total = 0; |
3715 | for (i = 0; i < count; i++) { | 3662 | for (i = 0; i < count; i++) { |
3716 | struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); | 3663 | struct drm_i915_gem_object *obj = object_list[i]; |
3717 | obj->base.pending_read_domains = 0; | 3664 | obj->base.pending_read_domains = 0; |
3718 | obj->base.pending_write_domain = 0; | 3665 | obj->base.pending_write_domain = 0; |
3719 | ret = i915_gem_execbuffer_relocate_object_slow(obj, file, | 3666 | ret = i915_gem_execbuffer_relocate_object_slow(obj, file, |
@@ -3740,7 +3687,7 @@ static int | |||
3740 | i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, | 3687 | i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, |
3741 | struct drm_file *file, | 3688 | struct drm_file *file, |
3742 | struct intel_ring_buffer *ring, | 3689 | struct intel_ring_buffer *ring, |
3743 | struct drm_gem_object **objects, | 3690 | struct drm_i915_gem_object **objects, |
3744 | int count) | 3691 | int count) |
3745 | { | 3692 | { |
3746 | struct change_domains cd; | 3693 | struct change_domains cd; |
@@ -3759,17 +3706,17 @@ i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, | |||
3759 | cd.invalidate_domains, | 3706 | cd.invalidate_domains, |
3760 | cd.flush_domains); | 3707 | cd.flush_domains); |
3761 | #endif | 3708 | #endif |
3762 | i915_gem_flush(dev, file, | 3709 | i915_gem_flush(dev, |
3763 | cd.invalidate_domains, | 3710 | cd.invalidate_domains, |
3764 | cd.flush_domains, | 3711 | cd.flush_domains, |
3765 | cd.flush_rings); | 3712 | cd.flush_rings); |
3766 | } | 3713 | } |
3767 | 3714 | ||
3768 | for (i = 0; i < count; i++) { | 3715 | for (i = 0; i < count; i++) { |
3769 | struct drm_i915_gem_object *obj = to_intel_bo(objects[i]); | 3716 | struct drm_i915_gem_object *obj = objects[i]; |
3770 | /* XXX replace with semaphores */ | 3717 | /* XXX replace with semaphores */ |
3771 | if (obj->ring && ring != obj->ring) { | 3718 | if (obj->ring && ring != obj->ring) { |
3772 | ret = i915_gem_object_wait_rendering(&obj->base, true); | 3719 | ret = i915_gem_object_wait_rendering(obj, true); |
3773 | if (ret) | 3720 | if (ret) |
3774 | return ret; | 3721 | return ret; |
3775 | } | 3722 | } |
@@ -3891,8 +3838,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3891 | struct drm_i915_gem_exec_object2 *exec_list) | 3838 | struct drm_i915_gem_exec_object2 *exec_list) |
3892 | { | 3839 | { |
3893 | drm_i915_private_t *dev_priv = dev->dev_private; | 3840 | drm_i915_private_t *dev_priv = dev->dev_private; |
3894 | struct drm_gem_object **object_list = NULL; | 3841 | struct drm_i915_gem_object **object_list = NULL; |
3895 | struct drm_gem_object *batch_obj; | 3842 | struct drm_i915_gem_object *batch_obj; |
3896 | struct drm_clip_rect *cliprects = NULL; | 3843 | struct drm_clip_rect *cliprects = NULL; |
3897 | struct drm_i915_gem_request *request = NULL; | 3844 | struct drm_i915_gem_request *request = NULL; |
3898 | int ret, i, flips; | 3845 | int ret, i, flips; |
@@ -3987,29 +3934,29 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3987 | 3934 | ||
3988 | /* Look up object handles */ | 3935 | /* Look up object handles */ |
3989 | for (i = 0; i < args->buffer_count; i++) { | 3936 | for (i = 0; i < args->buffer_count; i++) { |
3990 | struct drm_i915_gem_object *obj_priv; | 3937 | struct drm_i915_gem_object *obj; |
3991 | 3938 | ||
3992 | object_list[i] = drm_gem_object_lookup(dev, file, | 3939 | obj = to_intel_bo (drm_gem_object_lookup(dev, file, |
3993 | exec_list[i].handle); | 3940 | exec_list[i].handle)); |
3994 | if (object_list[i] == NULL) { | 3941 | if (obj == NULL) { |
3995 | DRM_ERROR("Invalid object handle %d at index %d\n", | 3942 | DRM_ERROR("Invalid object handle %d at index %d\n", |
3996 | exec_list[i].handle, i); | 3943 | exec_list[i].handle, i); |
3997 | /* prevent error path from reading uninitialized data */ | 3944 | /* prevent error path from reading uninitialized data */ |
3998 | args->buffer_count = i + 1; | 3945 | args->buffer_count = i; |
3999 | ret = -ENOENT; | 3946 | ret = -ENOENT; |
4000 | goto err; | 3947 | goto err; |
4001 | } | 3948 | } |
3949 | object_list[i] = obj; | ||
4002 | 3950 | ||
4003 | obj_priv = to_intel_bo(object_list[i]); | 3951 | if (obj->in_execbuffer) { |
4004 | if (obj_priv->in_execbuffer) { | ||
4005 | DRM_ERROR("Object %p appears more than once in object list\n", | 3952 | DRM_ERROR("Object %p appears more than once in object list\n", |
4006 | object_list[i]); | 3953 | obj); |
4007 | /* prevent error path from reading uninitialized data */ | 3954 | /* prevent error path from reading uninitialized data */ |
4008 | args->buffer_count = i + 1; | 3955 | args->buffer_count = i + 1; |
4009 | ret = -EINVAL; | 3956 | ret = -EINVAL; |
4010 | goto err; | 3957 | goto err; |
4011 | } | 3958 | } |
4012 | obj_priv->in_execbuffer = true; | 3959 | obj->in_execbuffer = true; |
4013 | } | 3960 | } |
4014 | 3961 | ||
4015 | /* Move the objects en-masse into the GTT, evicting if necessary. */ | 3962 | /* Move the objects en-masse into the GTT, evicting if necessary. */ |
@@ -4037,15 +3984,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
4037 | 3984 | ||
4038 | /* Set the pending read domains for the batch buffer to COMMAND */ | 3985 | /* Set the pending read domains for the batch buffer to COMMAND */ |
4039 | batch_obj = object_list[args->buffer_count-1]; | 3986 | batch_obj = object_list[args->buffer_count-1]; |
4040 | if (batch_obj->pending_write_domain) { | 3987 | if (batch_obj->base.pending_write_domain) { |
4041 | DRM_ERROR("Attempting to use self-modifying batch buffer\n"); | 3988 | DRM_ERROR("Attempting to use self-modifying batch buffer\n"); |
4042 | ret = -EINVAL; | 3989 | ret = -EINVAL; |
4043 | goto err; | 3990 | goto err; |
4044 | } | 3991 | } |
4045 | batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND; | 3992 | batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; |
4046 | 3993 | ||
4047 | /* Sanity check the batch buffer */ | 3994 | /* Sanity check the batch buffer */ |
4048 | exec_offset = to_intel_bo(batch_obj)->gtt_offset; | 3995 | exec_offset = batch_obj->gtt_offset; |
4049 | ret = i915_gem_check_execbuffer(args, exec_offset); | 3996 | ret = i915_gem_check_execbuffer(args, exec_offset); |
4050 | if (ret != 0) { | 3997 | if (ret != 0) { |
4051 | DRM_ERROR("execbuf with invalid offset/length\n"); | 3998 | DRM_ERROR("execbuf with invalid offset/length\n"); |
@@ -4077,8 +4024,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
4077 | */ | 4024 | */ |
4078 | flips = 0; | 4025 | flips = 0; |
4079 | for (i = 0; i < args->buffer_count; i++) { | 4026 | for (i = 0; i < args->buffer_count; i++) { |
4080 | if (object_list[i]->write_domain) | 4027 | if (object_list[i]->base.write_domain) |
4081 | flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip); | 4028 | flips |= atomic_read(&object_list[i]->pending_flip); |
4082 | } | 4029 | } |
4083 | if (flips) { | 4030 | if (flips) { |
4084 | int plane, flip_mask; | 4031 | int plane, flip_mask; |
@@ -4110,23 +4057,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
4110 | } | 4057 | } |
4111 | 4058 | ||
4112 | for (i = 0; i < args->buffer_count; i++) { | 4059 | for (i = 0; i < args->buffer_count; i++) { |
4113 | struct drm_gem_object *obj = object_list[i]; | 4060 | struct drm_i915_gem_object *obj = object_list[i]; |
4114 | 4061 | ||
4115 | obj->read_domains = obj->pending_read_domains; | 4062 | obj->base.read_domains = obj->base.pending_read_domains; |
4116 | obj->write_domain = obj->pending_write_domain; | 4063 | obj->base.write_domain = obj->base.pending_write_domain; |
4117 | 4064 | ||
4118 | i915_gem_object_move_to_active(obj, ring); | 4065 | i915_gem_object_move_to_active(obj, ring); |
4119 | if (obj->write_domain) { | 4066 | if (obj->base.write_domain) { |
4120 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 4067 | obj->dirty = 1; |
4121 | obj_priv->dirty = 1; | 4068 | list_move_tail(&obj->gpu_write_list, |
4122 | list_move_tail(&obj_priv->gpu_write_list, | ||
4123 | &ring->gpu_write_list); | 4069 | &ring->gpu_write_list); |
4124 | intel_mark_busy(dev, obj); | 4070 | intel_mark_busy(dev, obj); |
4125 | } | 4071 | } |
4126 | 4072 | ||
4127 | trace_i915_gem_object_change_domain(obj, | 4073 | trace_i915_gem_object_change_domain(obj, |
4128 | obj->read_domains, | 4074 | obj->base.read_domains, |
4129 | obj->write_domain); | 4075 | obj->base.write_domain); |
4130 | } | 4076 | } |
4131 | 4077 | ||
4132 | /* | 4078 | /* |
@@ -4142,11 +4088,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
4142 | 4088 | ||
4143 | err: | 4089 | err: |
4144 | for (i = 0; i < args->buffer_count; i++) { | 4090 | for (i = 0; i < args->buffer_count; i++) { |
4145 | if (object_list[i] == NULL) | 4091 | object_list[i]->in_execbuffer = false; |
4146 | break; | 4092 | drm_gem_object_unreference(&object_list[i]->base); |
4147 | |||
4148 | to_intel_bo(object_list[i])->in_execbuffer = false; | ||
4149 | drm_gem_object_unreference(object_list[i]); | ||
4150 | } | 4093 | } |
4151 | 4094 | ||
4152 | mutex_unlock(&dev->struct_mutex); | 4095 | mutex_unlock(&dev->struct_mutex); |
@@ -4165,7 +4108,7 @@ pre_mutex_err: | |||
4165 | */ | 4108 | */ |
4166 | int | 4109 | int |
4167 | i915_gem_execbuffer(struct drm_device *dev, void *data, | 4110 | i915_gem_execbuffer(struct drm_device *dev, void *data, |
4168 | struct drm_file *file_priv) | 4111 | struct drm_file *file) |
4169 | { | 4112 | { |
4170 | struct drm_i915_gem_execbuffer *args = data; | 4113 | struct drm_i915_gem_execbuffer *args = data; |
4171 | struct drm_i915_gem_execbuffer2 exec2; | 4114 | struct drm_i915_gem_execbuffer2 exec2; |
@@ -4227,7 +4170,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
4227 | exec2.cliprects_ptr = args->cliprects_ptr; | 4170 | exec2.cliprects_ptr = args->cliprects_ptr; |
4228 | exec2.flags = I915_EXEC_RENDER; | 4171 | exec2.flags = I915_EXEC_RENDER; |
4229 | 4172 | ||
4230 | ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); | 4173 | ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); |
4231 | if (!ret) { | 4174 | if (!ret) { |
4232 | /* Copy the new buffer offsets back to the user's exec list. */ | 4175 | /* Copy the new buffer offsets back to the user's exec list. */ |
4233 | for (i = 0; i < args->buffer_count; i++) | 4176 | for (i = 0; i < args->buffer_count; i++) |
@@ -4252,7 +4195,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
4252 | 4195 | ||
4253 | int | 4196 | int |
4254 | i915_gem_execbuffer2(struct drm_device *dev, void *data, | 4197 | i915_gem_execbuffer2(struct drm_device *dev, void *data, |
4255 | struct drm_file *file_priv) | 4198 | struct drm_file *file) |
4256 | { | 4199 | { |
4257 | struct drm_i915_gem_execbuffer2 *args = data; | 4200 | struct drm_i915_gem_execbuffer2 *args = data; |
4258 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | 4201 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; |
@@ -4285,7 +4228,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, | |||
4285 | return -EFAULT; | 4228 | return -EFAULT; |
4286 | } | 4229 | } |
4287 | 4230 | ||
4288 | ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list); | 4231 | ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); |
4289 | if (!ret) { | 4232 | if (!ret) { |
4290 | /* Copy the new buffer offsets back to the user's exec list. */ | 4233 | /* Copy the new buffer offsets back to the user's exec list. */ |
4291 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | 4234 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) |
@@ -4305,109 +4248,106 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, | |||
4305 | } | 4248 | } |
4306 | 4249 | ||
4307 | int | 4250 | int |
4308 | i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, | 4251 | i915_gem_object_pin(struct drm_i915_gem_object *obj, |
4252 | uint32_t alignment, | ||
4309 | bool map_and_fenceable) | 4253 | bool map_and_fenceable) |
4310 | { | 4254 | { |
4311 | struct drm_device *dev = obj->dev; | 4255 | struct drm_device *dev = obj->base.dev; |
4312 | struct drm_i915_private *dev_priv = dev->dev_private; | 4256 | struct drm_i915_private *dev_priv = dev->dev_private; |
4313 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
4314 | int ret; | 4257 | int ret; |
4315 | 4258 | ||
4316 | BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); | 4259 | BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); |
4317 | BUG_ON(map_and_fenceable && !map_and_fenceable); | 4260 | BUG_ON(map_and_fenceable && !map_and_fenceable); |
4318 | WARN_ON(i915_verify_lists(dev)); | 4261 | WARN_ON(i915_verify_lists(dev)); |
4319 | 4262 | ||
4320 | if (obj_priv->gtt_space != NULL) { | 4263 | if (obj->gtt_space != NULL) { |
4321 | if ((alignment && obj_priv->gtt_offset & (alignment - 1)) || | 4264 | if ((alignment && obj->gtt_offset & (alignment - 1)) || |
4322 | (map_and_fenceable && !obj_priv->map_and_fenceable)) { | 4265 | (map_and_fenceable && !obj->map_and_fenceable)) { |
4323 | WARN(obj_priv->pin_count, | 4266 | WARN(obj->pin_count, |
4324 | "bo is already pinned with incorrect alignment:" | 4267 | "bo is already pinned with incorrect alignment:" |
4325 | " offset=%x, req.alignment=%x, req.map_and_fenceable=%d," | 4268 | " offset=%x, req.alignment=%x, req.map_and_fenceable=%d," |
4326 | " obj->map_and_fenceable=%d\n", | 4269 | " obj->map_and_fenceable=%d\n", |
4327 | obj_priv->gtt_offset, alignment, | 4270 | obj->gtt_offset, alignment, |
4328 | map_and_fenceable, | 4271 | map_and_fenceable, |
4329 | obj_priv->map_and_fenceable); | 4272 | obj->map_and_fenceable); |
4330 | ret = i915_gem_object_unbind(obj); | 4273 | ret = i915_gem_object_unbind(obj); |
4331 | if (ret) | 4274 | if (ret) |
4332 | return ret; | 4275 | return ret; |
4333 | } | 4276 | } |
4334 | } | 4277 | } |
4335 | 4278 | ||
4336 | if (obj_priv->gtt_space == NULL) { | 4279 | if (obj->gtt_space == NULL) { |
4337 | ret = i915_gem_object_bind_to_gtt(obj, alignment, | 4280 | ret = i915_gem_object_bind_to_gtt(obj, alignment, |
4338 | map_and_fenceable); | 4281 | map_and_fenceable); |
4339 | if (ret) | 4282 | if (ret) |
4340 | return ret; | 4283 | return ret; |
4341 | } | 4284 | } |
4342 | 4285 | ||
4343 | if (obj_priv->pin_count++ == 0) { | 4286 | if (obj->pin_count++ == 0) { |
4344 | i915_gem_info_add_pin(dev_priv, obj_priv, map_and_fenceable); | 4287 | i915_gem_info_add_pin(dev_priv, obj, map_and_fenceable); |
4345 | if (!obj_priv->active) | 4288 | if (!obj->active) |
4346 | list_move_tail(&obj_priv->mm_list, | 4289 | list_move_tail(&obj->mm_list, |
4347 | &dev_priv->mm.pinned_list); | 4290 | &dev_priv->mm.pinned_list); |
4348 | } | 4291 | } |
4349 | BUG_ON(!obj_priv->pin_mappable && map_and_fenceable); | 4292 | BUG_ON(!obj->pin_mappable && map_and_fenceable); |
4350 | 4293 | ||
4351 | WARN_ON(i915_verify_lists(dev)); | 4294 | WARN_ON(i915_verify_lists(dev)); |
4352 | return 0; | 4295 | return 0; |
4353 | } | 4296 | } |
4354 | 4297 | ||
4355 | void | 4298 | void |
4356 | i915_gem_object_unpin(struct drm_gem_object *obj) | 4299 | i915_gem_object_unpin(struct drm_i915_gem_object *obj) |
4357 | { | 4300 | { |
4358 | struct drm_device *dev = obj->dev; | 4301 | struct drm_device *dev = obj->base.dev; |
4359 | drm_i915_private_t *dev_priv = dev->dev_private; | 4302 | drm_i915_private_t *dev_priv = dev->dev_private; |
4360 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
4361 | 4303 | ||
4362 | WARN_ON(i915_verify_lists(dev)); | 4304 | WARN_ON(i915_verify_lists(dev)); |
4363 | BUG_ON(obj_priv->pin_count == 0); | 4305 | BUG_ON(obj->pin_count == 0); |
4364 | BUG_ON(obj_priv->gtt_space == NULL); | 4306 | BUG_ON(obj->gtt_space == NULL); |
4365 | 4307 | ||
4366 | if (--obj_priv->pin_count == 0) { | 4308 | if (--obj->pin_count == 0) { |
4367 | if (!obj_priv->active) | 4309 | if (!obj->active) |
4368 | list_move_tail(&obj_priv->mm_list, | 4310 | list_move_tail(&obj->mm_list, |
4369 | &dev_priv->mm.inactive_list); | 4311 | &dev_priv->mm.inactive_list); |
4370 | i915_gem_info_remove_pin(dev_priv, obj_priv); | 4312 | i915_gem_info_remove_pin(dev_priv, obj); |
4371 | } | 4313 | } |
4372 | WARN_ON(i915_verify_lists(dev)); | 4314 | WARN_ON(i915_verify_lists(dev)); |
4373 | } | 4315 | } |
4374 | 4316 | ||
4375 | int | 4317 | int |
4376 | i915_gem_pin_ioctl(struct drm_device *dev, void *data, | 4318 | i915_gem_pin_ioctl(struct drm_device *dev, void *data, |
4377 | struct drm_file *file_priv) | 4319 | struct drm_file *file) |
4378 | { | 4320 | { |
4379 | struct drm_i915_gem_pin *args = data; | 4321 | struct drm_i915_gem_pin *args = data; |
4380 | struct drm_gem_object *obj; | 4322 | struct drm_i915_gem_object *obj; |
4381 | struct drm_i915_gem_object *obj_priv; | ||
4382 | int ret; | 4323 | int ret; |
4383 | 4324 | ||
4384 | ret = i915_mutex_lock_interruptible(dev); | 4325 | ret = i915_mutex_lock_interruptible(dev); |
4385 | if (ret) | 4326 | if (ret) |
4386 | return ret; | 4327 | return ret; |
4387 | 4328 | ||
4388 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 4329 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
4389 | if (obj == NULL) { | 4330 | if (obj == NULL) { |
4390 | ret = -ENOENT; | 4331 | ret = -ENOENT; |
4391 | goto unlock; | 4332 | goto unlock; |
4392 | } | 4333 | } |
4393 | obj_priv = to_intel_bo(obj); | ||
4394 | 4334 | ||
4395 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 4335 | if (obj->madv != I915_MADV_WILLNEED) { |
4396 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); | 4336 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); |
4397 | ret = -EINVAL; | 4337 | ret = -EINVAL; |
4398 | goto out; | 4338 | goto out; |
4399 | } | 4339 | } |
4400 | 4340 | ||
4401 | if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { | 4341 | if (obj->pin_filp != NULL && obj->pin_filp != file) { |
4402 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", | 4342 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", |
4403 | args->handle); | 4343 | args->handle); |
4404 | ret = -EINVAL; | 4344 | ret = -EINVAL; |
4405 | goto out; | 4345 | goto out; |
4406 | } | 4346 | } |
4407 | 4347 | ||
4408 | obj_priv->user_pin_count++; | 4348 | obj->user_pin_count++; |
4409 | obj_priv->pin_filp = file_priv; | 4349 | obj->pin_filp = file; |
4410 | if (obj_priv->user_pin_count == 1) { | 4350 | if (obj->user_pin_count == 1) { |
4411 | ret = i915_gem_object_pin(obj, args->alignment, true); | 4351 | ret = i915_gem_object_pin(obj, args->alignment, true); |
4412 | if (ret) | 4352 | if (ret) |
4413 | goto out; | 4353 | goto out; |
@@ -4417,9 +4357,9 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, | |||
4417 | * as the X server doesn't manage domains yet | 4357 | * as the X server doesn't manage domains yet |
4418 | */ | 4358 | */ |
4419 | i915_gem_object_flush_cpu_write_domain(obj); | 4359 | i915_gem_object_flush_cpu_write_domain(obj); |
4420 | args->offset = obj_priv->gtt_offset; | 4360 | args->offset = obj->gtt_offset; |
4421 | out: | 4361 | out: |
4422 | drm_gem_object_unreference(obj); | 4362 | drm_gem_object_unreference(&obj->base); |
4423 | unlock: | 4363 | unlock: |
4424 | mutex_unlock(&dev->struct_mutex); | 4364 | mutex_unlock(&dev->struct_mutex); |
4425 | return ret; | 4365 | return ret; |
@@ -4427,38 +4367,36 @@ unlock: | |||
4427 | 4367 | ||
4428 | int | 4368 | int |
4429 | i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | 4369 | i915_gem_unpin_ioctl(struct drm_device *dev, void *data, |
4430 | struct drm_file *file_priv) | 4370 | struct drm_file *file) |
4431 | { | 4371 | { |
4432 | struct drm_i915_gem_pin *args = data; | 4372 | struct drm_i915_gem_pin *args = data; |
4433 | struct drm_gem_object *obj; | 4373 | struct drm_i915_gem_object *obj; |
4434 | struct drm_i915_gem_object *obj_priv; | ||
4435 | int ret; | 4374 | int ret; |
4436 | 4375 | ||
4437 | ret = i915_mutex_lock_interruptible(dev); | 4376 | ret = i915_mutex_lock_interruptible(dev); |
4438 | if (ret) | 4377 | if (ret) |
4439 | return ret; | 4378 | return ret; |
4440 | 4379 | ||
4441 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 4380 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
4442 | if (obj == NULL) { | 4381 | if (obj == NULL) { |
4443 | ret = -ENOENT; | 4382 | ret = -ENOENT; |
4444 | goto unlock; | 4383 | goto unlock; |
4445 | } | 4384 | } |
4446 | obj_priv = to_intel_bo(obj); | ||
4447 | 4385 | ||
4448 | if (obj_priv->pin_filp != file_priv) { | 4386 | if (obj->pin_filp != file) { |
4449 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", | 4387 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", |
4450 | args->handle); | 4388 | args->handle); |
4451 | ret = -EINVAL; | 4389 | ret = -EINVAL; |
4452 | goto out; | 4390 | goto out; |
4453 | } | 4391 | } |
4454 | obj_priv->user_pin_count--; | 4392 | obj->user_pin_count--; |
4455 | if (obj_priv->user_pin_count == 0) { | 4393 | if (obj->user_pin_count == 0) { |
4456 | obj_priv->pin_filp = NULL; | 4394 | obj->pin_filp = NULL; |
4457 | i915_gem_object_unpin(obj); | 4395 | i915_gem_object_unpin(obj); |
4458 | } | 4396 | } |
4459 | 4397 | ||
4460 | out: | 4398 | out: |
4461 | drm_gem_object_unreference(obj); | 4399 | drm_gem_object_unreference(&obj->base); |
4462 | unlock: | 4400 | unlock: |
4463 | mutex_unlock(&dev->struct_mutex); | 4401 | mutex_unlock(&dev->struct_mutex); |
4464 | return ret; | 4402 | return ret; |
@@ -4466,52 +4404,49 @@ unlock: | |||
4466 | 4404 | ||
4467 | int | 4405 | int |
4468 | i915_gem_busy_ioctl(struct drm_device *dev, void *data, | 4406 | i915_gem_busy_ioctl(struct drm_device *dev, void *data, |
4469 | struct drm_file *file_priv) | 4407 | struct drm_file *file) |
4470 | { | 4408 | { |
4471 | struct drm_i915_gem_busy *args = data; | 4409 | struct drm_i915_gem_busy *args = data; |
4472 | struct drm_gem_object *obj; | 4410 | struct drm_i915_gem_object *obj; |
4473 | struct drm_i915_gem_object *obj_priv; | ||
4474 | int ret; | 4411 | int ret; |
4475 | 4412 | ||
4476 | ret = i915_mutex_lock_interruptible(dev); | 4413 | ret = i915_mutex_lock_interruptible(dev); |
4477 | if (ret) | 4414 | if (ret) |
4478 | return ret; | 4415 | return ret; |
4479 | 4416 | ||
4480 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 4417 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
4481 | if (obj == NULL) { | 4418 | if (obj == NULL) { |
4482 | ret = -ENOENT; | 4419 | ret = -ENOENT; |
4483 | goto unlock; | 4420 | goto unlock; |
4484 | } | 4421 | } |
4485 | obj_priv = to_intel_bo(obj); | ||
4486 | 4422 | ||
4487 | /* Count all active objects as busy, even if they are currently not used | 4423 | /* Count all active objects as busy, even if they are currently not used |
4488 | * by the gpu. Users of this interface expect objects to eventually | 4424 | * by the gpu. Users of this interface expect objects to eventually |
4489 | * become non-busy without any further actions, therefore emit any | 4425 | * become non-busy without any further actions, therefore emit any |
4490 | * necessary flushes here. | 4426 | * necessary flushes here. |
4491 | */ | 4427 | */ |
4492 | args->busy = obj_priv->active; | 4428 | args->busy = obj->active; |
4493 | if (args->busy) { | 4429 | if (args->busy) { |
4494 | /* Unconditionally flush objects, even when the gpu still uses this | 4430 | /* Unconditionally flush objects, even when the gpu still uses this |
4495 | * object. Userspace calling this function indicates that it wants to | 4431 | * object. Userspace calling this function indicates that it wants to |
4496 | * use this buffer rather sooner than later, so issuing the required | 4432 | * use this buffer rather sooner than later, so issuing the required |
4497 | * flush earlier is beneficial. | 4433 | * flush earlier is beneficial. |
4498 | */ | 4434 | */ |
4499 | if (obj->write_domain & I915_GEM_GPU_DOMAINS) | 4435 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) |
4500 | i915_gem_flush_ring(dev, file_priv, | 4436 | i915_gem_flush_ring(dev, obj->ring, |
4501 | obj_priv->ring, | 4437 | 0, obj->base.write_domain); |
4502 | 0, obj->write_domain); | ||
4503 | 4438 | ||
4504 | /* Update the active list for the hardware's current position. | 4439 | /* Update the active list for the hardware's current position. |
4505 | * Otherwise this only updates on a delayed timer or when irqs | 4440 | * Otherwise this only updates on a delayed timer or when irqs |
4506 | * are actually unmasked, and our working set ends up being | 4441 | * are actually unmasked, and our working set ends up being |
4507 | * larger than required. | 4442 | * larger than required. |
4508 | */ | 4443 | */ |
4509 | i915_gem_retire_requests_ring(dev, obj_priv->ring); | 4444 | i915_gem_retire_requests_ring(dev, obj->ring); |
4510 | 4445 | ||
4511 | args->busy = obj_priv->active; | 4446 | args->busy = obj->active; |
4512 | } | 4447 | } |
4513 | 4448 | ||
4514 | drm_gem_object_unreference(obj); | 4449 | drm_gem_object_unreference(&obj->base); |
4515 | unlock: | 4450 | unlock: |
4516 | mutex_unlock(&dev->struct_mutex); | 4451 | mutex_unlock(&dev->struct_mutex); |
4517 | return ret; | 4452 | return ret; |
@@ -4529,8 +4464,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | |||
4529 | struct drm_file *file_priv) | 4464 | struct drm_file *file_priv) |
4530 | { | 4465 | { |
4531 | struct drm_i915_gem_madvise *args = data; | 4466 | struct drm_i915_gem_madvise *args = data; |
4532 | struct drm_gem_object *obj; | 4467 | struct drm_i915_gem_object *obj; |
4533 | struct drm_i915_gem_object *obj_priv; | ||
4534 | int ret; | 4468 | int ret; |
4535 | 4469 | ||
4536 | switch (args->madv) { | 4470 | switch (args->madv) { |
@@ -4545,37 +4479,36 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | |||
4545 | if (ret) | 4479 | if (ret) |
4546 | return ret; | 4480 | return ret; |
4547 | 4481 | ||
4548 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 4482 | obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle)); |
4549 | if (obj == NULL) { | 4483 | if (obj == NULL) { |
4550 | ret = -ENOENT; | 4484 | ret = -ENOENT; |
4551 | goto unlock; | 4485 | goto unlock; |
4552 | } | 4486 | } |
4553 | obj_priv = to_intel_bo(obj); | ||
4554 | 4487 | ||
4555 | if (obj_priv->pin_count) { | 4488 | if (obj->pin_count) { |
4556 | ret = -EINVAL; | 4489 | ret = -EINVAL; |
4557 | goto out; | 4490 | goto out; |
4558 | } | 4491 | } |
4559 | 4492 | ||
4560 | if (obj_priv->madv != __I915_MADV_PURGED) | 4493 | if (obj->madv != __I915_MADV_PURGED) |
4561 | obj_priv->madv = args->madv; | 4494 | obj->madv = args->madv; |
4562 | 4495 | ||
4563 | /* if the object is no longer bound, discard its backing storage */ | 4496 | /* if the object is no longer bound, discard its backing storage */ |
4564 | if (i915_gem_object_is_purgeable(obj_priv) && | 4497 | if (i915_gem_object_is_purgeable(obj) && |
4565 | obj_priv->gtt_space == NULL) | 4498 | obj->gtt_space == NULL) |
4566 | i915_gem_object_truncate(obj); | 4499 | i915_gem_object_truncate(obj); |
4567 | 4500 | ||
4568 | args->retained = obj_priv->madv != __I915_MADV_PURGED; | 4501 | args->retained = obj->madv != __I915_MADV_PURGED; |
4569 | 4502 | ||
4570 | out: | 4503 | out: |
4571 | drm_gem_object_unreference(obj); | 4504 | drm_gem_object_unreference(&obj->base); |
4572 | unlock: | 4505 | unlock: |
4573 | mutex_unlock(&dev->struct_mutex); | 4506 | mutex_unlock(&dev->struct_mutex); |
4574 | return ret; | 4507 | return ret; |
4575 | } | 4508 | } |
4576 | 4509 | ||
4577 | struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, | 4510 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
4578 | size_t size) | 4511 | size_t size) |
4579 | { | 4512 | { |
4580 | struct drm_i915_private *dev_priv = dev->dev_private; | 4513 | struct drm_i915_private *dev_priv = dev->dev_private; |
4581 | struct drm_i915_gem_object *obj; | 4514 | struct drm_i915_gem_object *obj; |
@@ -4605,7 +4538,7 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, | |||
4605 | /* Avoid an unnecessary call to unbind on the first bind. */ | 4538 | /* Avoid an unnecessary call to unbind on the first bind. */ |
4606 | obj->map_and_fenceable = true; | 4539 | obj->map_and_fenceable = true; |
4607 | 4540 | ||
4608 | return &obj->base; | 4541 | return obj; |
4609 | } | 4542 | } |
4610 | 4543 | ||
4611 | int i915_gem_init_object(struct drm_gem_object *obj) | 4544 | int i915_gem_init_object(struct drm_gem_object *obj) |
@@ -4615,42 +4548,41 @@ int i915_gem_init_object(struct drm_gem_object *obj) | |||
4615 | return 0; | 4548 | return 0; |
4616 | } | 4549 | } |
4617 | 4550 | ||
4618 | static void i915_gem_free_object_tail(struct drm_gem_object *obj) | 4551 | static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj) |
4619 | { | 4552 | { |
4620 | struct drm_device *dev = obj->dev; | 4553 | struct drm_device *dev = obj->base.dev; |
4621 | drm_i915_private_t *dev_priv = dev->dev_private; | 4554 | drm_i915_private_t *dev_priv = dev->dev_private; |
4622 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
4623 | int ret; | 4555 | int ret; |
4624 | 4556 | ||
4625 | ret = i915_gem_object_unbind(obj); | 4557 | ret = i915_gem_object_unbind(obj); |
4626 | if (ret == -ERESTARTSYS) { | 4558 | if (ret == -ERESTARTSYS) { |
4627 | list_move(&obj_priv->mm_list, | 4559 | list_move(&obj->mm_list, |
4628 | &dev_priv->mm.deferred_free_list); | 4560 | &dev_priv->mm.deferred_free_list); |
4629 | return; | 4561 | return; |
4630 | } | 4562 | } |
4631 | 4563 | ||
4632 | if (obj->map_list.map) | 4564 | if (obj->base.map_list.map) |
4633 | i915_gem_free_mmap_offset(obj); | 4565 | i915_gem_free_mmap_offset(obj); |
4634 | 4566 | ||
4635 | drm_gem_object_release(obj); | 4567 | drm_gem_object_release(&obj->base); |
4636 | i915_gem_info_remove_obj(dev_priv, obj->size); | 4568 | i915_gem_info_remove_obj(dev_priv, obj->base.size); |
4637 | 4569 | ||
4638 | kfree(obj_priv->page_cpu_valid); | 4570 | kfree(obj->page_cpu_valid); |
4639 | kfree(obj_priv->bit_17); | 4571 | kfree(obj->bit_17); |
4640 | kfree(obj_priv); | 4572 | kfree(obj); |
4641 | } | 4573 | } |
4642 | 4574 | ||
4643 | void i915_gem_free_object(struct drm_gem_object *obj) | 4575 | void i915_gem_free_object(struct drm_gem_object *gem_obj) |
4644 | { | 4576 | { |
4645 | struct drm_device *dev = obj->dev; | 4577 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); |
4646 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 4578 | struct drm_device *dev = obj->base.dev; |
4647 | 4579 | ||
4648 | trace_i915_gem_object_destroy(obj); | 4580 | trace_i915_gem_object_destroy(obj); |
4649 | 4581 | ||
4650 | while (obj_priv->pin_count > 0) | 4582 | while (obj->pin_count > 0) |
4651 | i915_gem_object_unpin(obj); | 4583 | i915_gem_object_unpin(obj); |
4652 | 4584 | ||
4653 | if (obj_priv->phys_obj) | 4585 | if (obj->phys_obj) |
4654 | i915_gem_detach_phys_object(dev, obj); | 4586 | i915_gem_detach_phys_object(dev, obj); |
4655 | 4587 | ||
4656 | i915_gem_free_object_tail(obj); | 4588 | i915_gem_free_object_tail(obj); |
@@ -4710,8 +4642,7 @@ static int | |||
4710 | i915_gem_init_pipe_control(struct drm_device *dev) | 4642 | i915_gem_init_pipe_control(struct drm_device *dev) |
4711 | { | 4643 | { |
4712 | drm_i915_private_t *dev_priv = dev->dev_private; | 4644 | drm_i915_private_t *dev_priv = dev->dev_private; |
4713 | struct drm_gem_object *obj; | 4645 | struct drm_i915_gem_object *obj; |
4714 | struct drm_i915_gem_object *obj_priv; | ||
4715 | int ret; | 4646 | int ret; |
4716 | 4647 | ||
4717 | obj = i915_gem_alloc_object(dev, 4096); | 4648 | obj = i915_gem_alloc_object(dev, 4096); |
@@ -4720,15 +4651,14 @@ i915_gem_init_pipe_control(struct drm_device *dev) | |||
4720 | ret = -ENOMEM; | 4651 | ret = -ENOMEM; |
4721 | goto err; | 4652 | goto err; |
4722 | } | 4653 | } |
4723 | obj_priv = to_intel_bo(obj); | 4654 | obj->agp_type = AGP_USER_CACHED_MEMORY; |
4724 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | ||
4725 | 4655 | ||
4726 | ret = i915_gem_object_pin(obj, 4096, true); | 4656 | ret = i915_gem_object_pin(obj, 4096, true); |
4727 | if (ret) | 4657 | if (ret) |
4728 | goto err_unref; | 4658 | goto err_unref; |
4729 | 4659 | ||
4730 | dev_priv->seqno_gfx_addr = obj_priv->gtt_offset; | 4660 | dev_priv->seqno_gfx_addr = obj->gtt_offset; |
4731 | dev_priv->seqno_page = kmap(obj_priv->pages[0]); | 4661 | dev_priv->seqno_page = kmap(obj->pages[0]); |
4732 | if (dev_priv->seqno_page == NULL) | 4662 | if (dev_priv->seqno_page == NULL) |
4733 | goto err_unpin; | 4663 | goto err_unpin; |
4734 | 4664 | ||
@@ -4740,7 +4670,7 @@ i915_gem_init_pipe_control(struct drm_device *dev) | |||
4740 | err_unpin: | 4670 | err_unpin: |
4741 | i915_gem_object_unpin(obj); | 4671 | i915_gem_object_unpin(obj); |
4742 | err_unref: | 4672 | err_unref: |
4743 | drm_gem_object_unreference(obj); | 4673 | drm_gem_object_unreference(&obj->base); |
4744 | err: | 4674 | err: |
4745 | return ret; | 4675 | return ret; |
4746 | } | 4676 | } |
@@ -4750,14 +4680,12 @@ static void | |||
4750 | i915_gem_cleanup_pipe_control(struct drm_device *dev) | 4680 | i915_gem_cleanup_pipe_control(struct drm_device *dev) |
4751 | { | 4681 | { |
4752 | drm_i915_private_t *dev_priv = dev->dev_private; | 4682 | drm_i915_private_t *dev_priv = dev->dev_private; |
4753 | struct drm_gem_object *obj; | 4683 | struct drm_i915_gem_object *obj; |
4754 | struct drm_i915_gem_object *obj_priv; | ||
4755 | 4684 | ||
4756 | obj = dev_priv->seqno_obj; | 4685 | obj = dev_priv->seqno_obj; |
4757 | obj_priv = to_intel_bo(obj); | 4686 | kunmap(obj->pages[0]); |
4758 | kunmap(obj_priv->pages[0]); | ||
4759 | i915_gem_object_unpin(obj); | 4687 | i915_gem_object_unpin(obj); |
4760 | drm_gem_object_unreference(obj); | 4688 | drm_gem_object_unreference(&obj->base); |
4761 | dev_priv->seqno_obj = NULL; | 4689 | dev_priv->seqno_obj = NULL; |
4762 | 4690 | ||
4763 | dev_priv->seqno_page = NULL; | 4691 | dev_priv->seqno_page = NULL; |
@@ -5035,20 +4963,18 @@ void i915_gem_free_all_phys_object(struct drm_device *dev) | |||
5035 | } | 4963 | } |
5036 | 4964 | ||
5037 | void i915_gem_detach_phys_object(struct drm_device *dev, | 4965 | void i915_gem_detach_phys_object(struct drm_device *dev, |
5038 | struct drm_gem_object *obj) | 4966 | struct drm_i915_gem_object *obj) |
5039 | { | 4967 | { |
5040 | struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; | 4968 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
5041 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
5042 | char *vaddr; | 4969 | char *vaddr; |
5043 | int i; | 4970 | int i; |
5044 | int page_count; | 4971 | int page_count; |
5045 | 4972 | ||
5046 | if (!obj_priv->phys_obj) | 4973 | if (!obj->phys_obj) |
5047 | return; | 4974 | return; |
5048 | vaddr = obj_priv->phys_obj->handle->vaddr; | 4975 | vaddr = obj->phys_obj->handle->vaddr; |
5049 | |||
5050 | page_count = obj->size / PAGE_SIZE; | ||
5051 | 4976 | ||
4977 | page_count = obj->base.size / PAGE_SIZE; | ||
5052 | for (i = 0; i < page_count; i++) { | 4978 | for (i = 0; i < page_count; i++) { |
5053 | struct page *page = read_cache_page_gfp(mapping, i, | 4979 | struct page *page = read_cache_page_gfp(mapping, i, |
5054 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | 4980 | GFP_HIGHUSER | __GFP_RECLAIMABLE); |
@@ -5066,19 +4992,18 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | |||
5066 | } | 4992 | } |
5067 | intel_gtt_chipset_flush(); | 4993 | intel_gtt_chipset_flush(); |
5068 | 4994 | ||
5069 | obj_priv->phys_obj->cur_obj = NULL; | 4995 | obj->phys_obj->cur_obj = NULL; |
5070 | obj_priv->phys_obj = NULL; | 4996 | obj->phys_obj = NULL; |
5071 | } | 4997 | } |
5072 | 4998 | ||
5073 | int | 4999 | int |
5074 | i915_gem_attach_phys_object(struct drm_device *dev, | 5000 | i915_gem_attach_phys_object(struct drm_device *dev, |
5075 | struct drm_gem_object *obj, | 5001 | struct drm_i915_gem_object *obj, |
5076 | int id, | 5002 | int id, |
5077 | int align) | 5003 | int align) |
5078 | { | 5004 | { |
5079 | struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; | 5005 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
5080 | drm_i915_private_t *dev_priv = dev->dev_private; | 5006 | drm_i915_private_t *dev_priv = dev->dev_private; |
5081 | struct drm_i915_gem_object *obj_priv; | ||
5082 | int ret = 0; | 5007 | int ret = 0; |
5083 | int page_count; | 5008 | int page_count; |
5084 | int i; | 5009 | int i; |
@@ -5086,10 +5011,8 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
5086 | if (id > I915_MAX_PHYS_OBJECT) | 5011 | if (id > I915_MAX_PHYS_OBJECT) |
5087 | return -EINVAL; | 5012 | return -EINVAL; |
5088 | 5013 | ||
5089 | obj_priv = to_intel_bo(obj); | 5014 | if (obj->phys_obj) { |
5090 | 5015 | if (obj->phys_obj->id == id) | |
5091 | if (obj_priv->phys_obj) { | ||
5092 | if (obj_priv->phys_obj->id == id) | ||
5093 | return 0; | 5016 | return 0; |
5094 | i915_gem_detach_phys_object(dev, obj); | 5017 | i915_gem_detach_phys_object(dev, obj); |
5095 | } | 5018 | } |
@@ -5097,18 +5020,19 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
5097 | /* create a new object */ | 5020 | /* create a new object */ |
5098 | if (!dev_priv->mm.phys_objs[id - 1]) { | 5021 | if (!dev_priv->mm.phys_objs[id - 1]) { |
5099 | ret = i915_gem_init_phys_object(dev, id, | 5022 | ret = i915_gem_init_phys_object(dev, id, |
5100 | obj->size, align); | 5023 | obj->base.size, align); |
5101 | if (ret) { | 5024 | if (ret) { |
5102 | DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size); | 5025 | DRM_ERROR("failed to init phys object %d size: %zu\n", |
5026 | id, obj->base.size); | ||
5103 | return ret; | 5027 | return ret; |
5104 | } | 5028 | } |
5105 | } | 5029 | } |
5106 | 5030 | ||
5107 | /* bind to the object */ | 5031 | /* bind to the object */ |
5108 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; | 5032 | obj->phys_obj = dev_priv->mm.phys_objs[id - 1]; |
5109 | obj_priv->phys_obj->cur_obj = obj; | 5033 | obj->phys_obj->cur_obj = obj; |
5110 | 5034 | ||
5111 | page_count = obj->size / PAGE_SIZE; | 5035 | page_count = obj->base.size / PAGE_SIZE; |
5112 | 5036 | ||
5113 | for (i = 0; i < page_count; i++) { | 5037 | for (i = 0; i < page_count; i++) { |
5114 | struct page *page; | 5038 | struct page *page; |
@@ -5120,7 +5044,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
5120 | return PTR_ERR(page); | 5044 | return PTR_ERR(page); |
5121 | 5045 | ||
5122 | src = kmap_atomic(page); | 5046 | src = kmap_atomic(page); |
5123 | dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); | 5047 | dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE); |
5124 | memcpy(dst, src, PAGE_SIZE); | 5048 | memcpy(dst, src, PAGE_SIZE); |
5125 | kunmap_atomic(src); | 5049 | kunmap_atomic(src); |
5126 | 5050 | ||
@@ -5132,16 +5056,14 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
5132 | } | 5056 | } |
5133 | 5057 | ||
5134 | static int | 5058 | static int |
5135 | i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 5059 | i915_gem_phys_pwrite(struct drm_device *dev, |
5060 | struct drm_i915_gem_object *obj, | ||
5136 | struct drm_i915_gem_pwrite *args, | 5061 | struct drm_i915_gem_pwrite *args, |
5137 | struct drm_file *file_priv) | 5062 | struct drm_file *file_priv) |
5138 | { | 5063 | { |
5139 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 5064 | void *vaddr = obj->phys_obj->handle->vaddr + args->offset; |
5140 | void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset; | ||
5141 | char __user *user_data = (char __user *) (uintptr_t) args->data_ptr; | 5065 | char __user *user_data = (char __user *) (uintptr_t) args->data_ptr; |
5142 | 5066 | ||
5143 | DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size); | ||
5144 | |||
5145 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { | 5067 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { |
5146 | unsigned long unwritten; | 5068 | unsigned long unwritten; |
5147 | 5069 | ||
@@ -5228,7 +5150,7 @@ rescan: | |||
5228 | &dev_priv->mm.inactive_list, | 5150 | &dev_priv->mm.inactive_list, |
5229 | mm_list) { | 5151 | mm_list) { |
5230 | if (i915_gem_object_is_purgeable(obj)) { | 5152 | if (i915_gem_object_is_purgeable(obj)) { |
5231 | i915_gem_object_unbind(&obj->base); | 5153 | i915_gem_object_unbind(obj); |
5232 | if (--nr_to_scan == 0) | 5154 | if (--nr_to_scan == 0) |
5233 | break; | 5155 | break; |
5234 | } | 5156 | } |
@@ -5240,7 +5162,7 @@ rescan: | |||
5240 | &dev_priv->mm.inactive_list, | 5162 | &dev_priv->mm.inactive_list, |
5241 | mm_list) { | 5163 | mm_list) { |
5242 | if (nr_to_scan) { | 5164 | if (nr_to_scan) { |
5243 | i915_gem_object_unbind(&obj->base); | 5165 | i915_gem_object_unbind(obj); |
5244 | nr_to_scan--; | 5166 | nr_to_scan--; |
5245 | } else | 5167 | } else |
5246 | cnt++; | 5168 | cnt++; |
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 48644b840a8d..29d014c48ca2 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c | |||
@@ -152,13 +152,12 @@ i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end, | |||
152 | } | 152 | } |
153 | 153 | ||
154 | void | 154 | void |
155 | i915_gem_dump_object(struct drm_gem_object *obj, int len, | 155 | i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, |
156 | const char *where, uint32_t mark) | 156 | const char *where, uint32_t mark) |
157 | { | 157 | { |
158 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
159 | int page; | 158 | int page; |
160 | 159 | ||
161 | DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); | 160 | DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset); |
162 | for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) { | 161 | for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) { |
163 | int page_len, chunk, chunk_len; | 162 | int page_len, chunk, chunk_len; |
164 | 163 | ||
@@ -170,9 +169,9 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len, | |||
170 | chunk_len = page_len - chunk; | 169 | chunk_len = page_len - chunk; |
171 | if (chunk_len > 128) | 170 | if (chunk_len > 128) |
172 | chunk_len = 128; | 171 | chunk_len = 128; |
173 | i915_gem_dump_page(obj_priv->pages[page], | 172 | i915_gem_dump_page(obj->pages[page], |
174 | chunk, chunk + chunk_len, | 173 | chunk, chunk + chunk_len, |
175 | obj_priv->gtt_offset + | 174 | obj->gtt_offset + |
176 | page * PAGE_SIZE, | 175 | page * PAGE_SIZE, |
177 | mark); | 176 | mark); |
178 | } | 177 | } |
@@ -182,21 +181,19 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len, | |||
182 | 181 | ||
183 | #if WATCH_COHERENCY | 182 | #if WATCH_COHERENCY |
184 | void | 183 | void |
185 | i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) | 184 | i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle) |
186 | { | 185 | { |
187 | struct drm_device *dev = obj->dev; | 186 | struct drm_device *dev = obj->base.dev; |
188 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
189 | int page; | 187 | int page; |
190 | uint32_t *gtt_mapping; | 188 | uint32_t *gtt_mapping; |
191 | uint32_t *backing_map = NULL; | 189 | uint32_t *backing_map = NULL; |
192 | int bad_count = 0; | 190 | int bad_count = 0; |
193 | 191 | ||
194 | DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n", | 192 | DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n", |
195 | __func__, obj, obj_priv->gtt_offset, handle, | 193 | __func__, obj, obj->gtt_offset, handle, |
196 | obj->size / 1024); | 194 | obj->size / 1024); |
197 | 195 | ||
198 | gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset, | 196 | gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size); |
199 | obj->size); | ||
200 | if (gtt_mapping == NULL) { | 197 | if (gtt_mapping == NULL) { |
201 | DRM_ERROR("failed to map GTT space\n"); | 198 | DRM_ERROR("failed to map GTT space\n"); |
202 | return; | 199 | return; |
@@ -205,7 +202,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) | |||
205 | for (page = 0; page < obj->size / PAGE_SIZE; page++) { | 202 | for (page = 0; page < obj->size / PAGE_SIZE; page++) { |
206 | int i; | 203 | int i; |
207 | 204 | ||
208 | backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0); | 205 | backing_map = kmap_atomic(obj->pages[page], KM_USER0); |
209 | 206 | ||
210 | if (backing_map == NULL) { | 207 | if (backing_map == NULL) { |
211 | DRM_ERROR("failed to map backing page\n"); | 208 | DRM_ERROR("failed to map backing page\n"); |
@@ -220,7 +217,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) | |||
220 | if (cpuval != gttval) { | 217 | if (cpuval != gttval) { |
221 | DRM_INFO("incoherent CPU vs GPU at 0x%08x: " | 218 | DRM_INFO("incoherent CPU vs GPU at 0x%08x: " |
222 | "0x%08x vs 0x%08x\n", | 219 | "0x%08x vs 0x%08x\n", |
223 | (int)(obj_priv->gtt_offset + | 220 | (int)(obj->gtt_offset + |
224 | page * PAGE_SIZE + i * 4), | 221 | page * PAGE_SIZE + i * 4), |
225 | cpuval, gttval); | 222 | cpuval, gttval); |
226 | if (bad_count++ >= 8) { | 223 | if (bad_count++ >= 8) { |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 3f6f336bbb4d..03e15d37b550 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -32,12 +32,11 @@ | |||
32 | #include "i915_drm.h" | 32 | #include "i915_drm.h" |
33 | 33 | ||
34 | static bool | 34 | static bool |
35 | mark_free(struct drm_i915_gem_object *obj_priv, | 35 | mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) |
36 | struct list_head *unwind) | ||
37 | { | 36 | { |
38 | list_add(&obj_priv->evict_list, unwind); | 37 | list_add(&obj->evict_list, unwind); |
39 | drm_gem_object_reference(&obj_priv->base); | 38 | drm_gem_object_reference(&obj->base); |
40 | return drm_mm_scan_add_block(obj_priv->gtt_space); | 39 | return drm_mm_scan_add_block(obj->gtt_space); |
41 | } | 40 | } |
42 | 41 | ||
43 | int | 42 | int |
@@ -46,7 +45,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, | |||
46 | { | 45 | { |
47 | drm_i915_private_t *dev_priv = dev->dev_private; | 46 | drm_i915_private_t *dev_priv = dev->dev_private; |
48 | struct list_head eviction_list, unwind_list; | 47 | struct list_head eviction_list, unwind_list; |
49 | struct drm_i915_gem_object *obj_priv; | 48 | struct drm_i915_gem_object *obj; |
50 | int ret = 0; | 49 | int ret = 0; |
51 | 50 | ||
52 | i915_gem_retire_requests(dev); | 51 | i915_gem_retire_requests(dev); |
@@ -96,42 +95,42 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, | |||
96 | drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); | 95 | drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); |
97 | 96 | ||
98 | /* First see if there is a large enough contiguous idle region... */ | 97 | /* First see if there is a large enough contiguous idle region... */ |
99 | list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { | 98 | list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { |
100 | if (mark_free(obj_priv, &unwind_list)) | 99 | if (mark_free(obj, &unwind_list)) |
101 | goto found; | 100 | goto found; |
102 | } | 101 | } |
103 | 102 | ||
104 | /* Now merge in the soon-to-be-expired objects... */ | 103 | /* Now merge in the soon-to-be-expired objects... */ |
105 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { | 104 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { |
106 | /* Does the object require an outstanding flush? */ | 105 | /* Does the object require an outstanding flush? */ |
107 | if (obj_priv->base.write_domain || obj_priv->pin_count) | 106 | if (obj->base.write_domain || obj->pin_count) |
108 | continue; | 107 | continue; |
109 | 108 | ||
110 | if (mark_free(obj_priv, &unwind_list)) | 109 | if (mark_free(obj, &unwind_list)) |
111 | goto found; | 110 | goto found; |
112 | } | 111 | } |
113 | 112 | ||
114 | /* Finally add anything with a pending flush (in order of retirement) */ | 113 | /* Finally add anything with a pending flush (in order of retirement) */ |
115 | list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { | 114 | list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) { |
116 | if (obj_priv->pin_count) | 115 | if (obj->pin_count) |
117 | continue; | 116 | continue; |
118 | 117 | ||
119 | if (mark_free(obj_priv, &unwind_list)) | 118 | if (mark_free(obj, &unwind_list)) |
120 | goto found; | 119 | goto found; |
121 | } | 120 | } |
122 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { | 121 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { |
123 | if (! obj_priv->base.write_domain || obj_priv->pin_count) | 122 | if (! obj->base.write_domain || obj->pin_count) |
124 | continue; | 123 | continue; |
125 | 124 | ||
126 | if (mark_free(obj_priv, &unwind_list)) | 125 | if (mark_free(obj, &unwind_list)) |
127 | goto found; | 126 | goto found; |
128 | } | 127 | } |
129 | 128 | ||
130 | /* Nothing found, clean up and bail out! */ | 129 | /* Nothing found, clean up and bail out! */ |
131 | list_for_each_entry(obj_priv, &unwind_list, evict_list) { | 130 | list_for_each_entry(obj, &unwind_list, evict_list) { |
132 | ret = drm_mm_scan_remove_block(obj_priv->gtt_space); | 131 | ret = drm_mm_scan_remove_block(obj->gtt_space); |
133 | BUG_ON(ret); | 132 | BUG_ON(ret); |
134 | drm_gem_object_unreference(&obj_priv->base); | 133 | drm_gem_object_unreference(&obj->base); |
135 | } | 134 | } |
136 | 135 | ||
137 | /* We expect the caller to unpin, evict all and try again, or give up. | 136 | /* We expect the caller to unpin, evict all and try again, or give up. |
@@ -145,26 +144,26 @@ found: | |||
145 | * temporary list. */ | 144 | * temporary list. */ |
146 | INIT_LIST_HEAD(&eviction_list); | 145 | INIT_LIST_HEAD(&eviction_list); |
147 | while (!list_empty(&unwind_list)) { | 146 | while (!list_empty(&unwind_list)) { |
148 | obj_priv = list_first_entry(&unwind_list, | 147 | obj = list_first_entry(&unwind_list, |
149 | struct drm_i915_gem_object, | 148 | struct drm_i915_gem_object, |
150 | evict_list); | 149 | evict_list); |
151 | if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { | 150 | if (drm_mm_scan_remove_block(obj->gtt_space)) { |
152 | list_move(&obj_priv->evict_list, &eviction_list); | 151 | list_move(&obj->evict_list, &eviction_list); |
153 | continue; | 152 | continue; |
154 | } | 153 | } |
155 | list_del(&obj_priv->evict_list); | 154 | list_del(&obj->evict_list); |
156 | drm_gem_object_unreference(&obj_priv->base); | 155 | drm_gem_object_unreference(&obj->base); |
157 | } | 156 | } |
158 | 157 | ||
159 | /* Unbinding will emit any required flushes */ | 158 | /* Unbinding will emit any required flushes */ |
160 | while (!list_empty(&eviction_list)) { | 159 | while (!list_empty(&eviction_list)) { |
161 | obj_priv = list_first_entry(&eviction_list, | 160 | obj = list_first_entry(&eviction_list, |
162 | struct drm_i915_gem_object, | 161 | struct drm_i915_gem_object, |
163 | evict_list); | 162 | evict_list); |
164 | if (ret == 0) | 163 | if (ret == 0) |
165 | ret = i915_gem_object_unbind(&obj_priv->base); | 164 | ret = i915_gem_object_unbind(obj); |
166 | list_del(&obj_priv->evict_list); | 165 | list_del(&obj->evict_list); |
167 | drm_gem_object_unreference(&obj_priv->base); | 166 | drm_gem_object_unreference(&obj->base); |
168 | } | 167 | } |
169 | 168 | ||
170 | return ret; | 169 | return ret; |
@@ -203,7 +202,7 @@ i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only) | |||
203 | list_for_each_entry_safe(obj, next, | 202 | list_for_each_entry_safe(obj, next, |
204 | &dev_priv->mm.inactive_list, mm_list) { | 203 | &dev_priv->mm.inactive_list, mm_list) { |
205 | if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) { | 204 | if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) { |
206 | int ret = i915_gem_object_unbind(&obj->base); | 205 | int ret = i915_gem_object_unbind(obj); |
207 | if (ret) | 206 | if (ret) |
208 | return ret; | 207 | return ret; |
209 | } | 208 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 0b34a1aee9b6..71c2b0f3747b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -32,71 +32,67 @@ | |||
32 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) | 32 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) |
33 | { | 33 | { |
34 | struct drm_i915_private *dev_priv = dev->dev_private; | 34 | struct drm_i915_private *dev_priv = dev->dev_private; |
35 | struct drm_i915_gem_object *obj_priv; | 35 | struct drm_i915_gem_object *obj; |
36 | 36 | ||
37 | list_for_each_entry(obj_priv, | 37 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { |
38 | &dev_priv->mm.gtt_list, | ||
39 | gtt_list) { | ||
40 | if (dev_priv->mm.gtt->needs_dmar) { | 38 | if (dev_priv->mm.gtt->needs_dmar) { |
41 | BUG_ON(!obj_priv->sg_list); | 39 | BUG_ON(!obj->sg_list); |
42 | 40 | ||
43 | intel_gtt_insert_sg_entries(obj_priv->sg_list, | 41 | intel_gtt_insert_sg_entries(obj->sg_list, |
44 | obj_priv->num_sg, | 42 | obj->num_sg, |
45 | obj_priv->gtt_space->start | 43 | obj->gtt_space->start |
46 | >> PAGE_SHIFT, | 44 | >> PAGE_SHIFT, |
47 | obj_priv->agp_type); | 45 | obj->agp_type); |
48 | } else | 46 | } else |
49 | intel_gtt_insert_pages(obj_priv->gtt_space->start | 47 | intel_gtt_insert_pages(obj->gtt_space->start |
50 | >> PAGE_SHIFT, | 48 | >> PAGE_SHIFT, |
51 | obj_priv->base.size >> PAGE_SHIFT, | 49 | obj->base.size >> PAGE_SHIFT, |
52 | obj_priv->pages, | 50 | obj->pages, |
53 | obj_priv->agp_type); | 51 | obj->agp_type); |
54 | } | 52 | } |
55 | 53 | ||
56 | /* Be paranoid and flush the chipset cache. */ | 54 | /* Be paranoid and flush the chipset cache. */ |
57 | intel_gtt_chipset_flush(); | 55 | intel_gtt_chipset_flush(); |
58 | } | 56 | } |
59 | 57 | ||
60 | int i915_gem_gtt_bind_object(struct drm_gem_object *obj) | 58 | int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj) |
61 | { | 59 | { |
62 | struct drm_device *dev = obj->dev; | 60 | struct drm_device *dev = obj->base.dev; |
63 | struct drm_i915_private *dev_priv = dev->dev_private; | 61 | struct drm_i915_private *dev_priv = dev->dev_private; |
64 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
65 | int ret; | 62 | int ret; |
66 | 63 | ||
67 | if (dev_priv->mm.gtt->needs_dmar) { | 64 | if (dev_priv->mm.gtt->needs_dmar) { |
68 | ret = intel_gtt_map_memory(obj_priv->pages, | 65 | ret = intel_gtt_map_memory(obj->pages, |
69 | obj->size >> PAGE_SHIFT, | 66 | obj->base.size >> PAGE_SHIFT, |
70 | &obj_priv->sg_list, | 67 | &obj->sg_list, |
71 | &obj_priv->num_sg); | 68 | &obj->num_sg); |
72 | if (ret != 0) | 69 | if (ret != 0) |
73 | return ret; | 70 | return ret; |
74 | 71 | ||
75 | intel_gtt_insert_sg_entries(obj_priv->sg_list, obj_priv->num_sg, | 72 | intel_gtt_insert_sg_entries(obj->sg_list, |
76 | obj_priv->gtt_space->start | 73 | obj->num_sg, |
77 | >> PAGE_SHIFT, | 74 | obj->gtt_space->start >> PAGE_SHIFT, |
78 | obj_priv->agp_type); | 75 | obj->agp_type); |
79 | } else | 76 | } else |
80 | intel_gtt_insert_pages(obj_priv->gtt_space->start >> PAGE_SHIFT, | 77 | intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, |
81 | obj->size >> PAGE_SHIFT, | 78 | obj->base.size >> PAGE_SHIFT, |
82 | obj_priv->pages, | 79 | obj->pages, |
83 | obj_priv->agp_type); | 80 | obj->agp_type); |
84 | 81 | ||
85 | return 0; | 82 | return 0; |
86 | } | 83 | } |
87 | 84 | ||
88 | void i915_gem_gtt_unbind_object(struct drm_gem_object *obj) | 85 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) |
89 | { | 86 | { |
90 | struct drm_device *dev = obj->dev; | 87 | struct drm_device *dev = obj->base.dev; |
91 | struct drm_i915_private *dev_priv = dev->dev_private; | 88 | struct drm_i915_private *dev_priv = dev->dev_private; |
92 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
93 | 89 | ||
94 | if (dev_priv->mm.gtt->needs_dmar) { | 90 | if (dev_priv->mm.gtt->needs_dmar) { |
95 | intel_gtt_unmap_memory(obj_priv->sg_list, obj_priv->num_sg); | 91 | intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); |
96 | obj_priv->sg_list = NULL; | 92 | obj->sg_list = NULL; |
97 | obj_priv->num_sg = 0; | 93 | obj->num_sg = 0; |
98 | } | 94 | } |
99 | 95 | ||
100 | intel_gtt_clear_range(obj_priv->gtt_space->start >> PAGE_SHIFT, | 96 | intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, |
101 | obj->size >> PAGE_SHIFT); | 97 | obj->base.size >> PAGE_SHIFT); |
102 | } | 98 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index a517b48d441d..1c5fdb30f272 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -234,25 +234,24 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | |||
234 | 234 | ||
235 | /* Is the current GTT allocation valid for the change in tiling? */ | 235 | /* Is the current GTT allocation valid for the change in tiling? */ |
236 | static bool | 236 | static bool |
237 | i915_gem_object_fence_ok(struct drm_gem_object *obj, int tiling_mode) | 237 | i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) |
238 | { | 238 | { |
239 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
240 | u32 size; | 239 | u32 size; |
241 | 240 | ||
242 | if (tiling_mode == I915_TILING_NONE) | 241 | if (tiling_mode == I915_TILING_NONE) |
243 | return true; | 242 | return true; |
244 | 243 | ||
245 | if (INTEL_INFO(obj->dev)->gen >= 4) | 244 | if (INTEL_INFO(obj->base.dev)->gen >= 4) |
246 | return true; | 245 | return true; |
247 | 246 | ||
248 | if (!obj_priv->gtt_space) | 247 | if (!obj->gtt_space) |
249 | return true; | 248 | return true; |
250 | 249 | ||
251 | if (INTEL_INFO(obj->dev)->gen == 3) { | 250 | if (INTEL_INFO(obj->base.dev)->gen == 3) { |
252 | if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) | 251 | if (obj->gtt_offset & ~I915_FENCE_START_MASK) |
253 | return false; | 252 | return false; |
254 | } else { | 253 | } else { |
255 | if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) | 254 | if (obj->gtt_offset & ~I830_FENCE_START_MASK) |
256 | return false; | 255 | return false; |
257 | } | 256 | } |
258 | 257 | ||
@@ -260,18 +259,18 @@ i915_gem_object_fence_ok(struct drm_gem_object *obj, int tiling_mode) | |||
260 | * Previous chips need to be aligned to the size of the smallest | 259 | * Previous chips need to be aligned to the size of the smallest |
261 | * fence register that can contain the object. | 260 | * fence register that can contain the object. |
262 | */ | 261 | */ |
263 | if (INTEL_INFO(obj->dev)->gen == 3) | 262 | if (INTEL_INFO(obj->base.dev)->gen == 3) |
264 | size = 1024*1024; | 263 | size = 1024*1024; |
265 | else | 264 | else |
266 | size = 512*1024; | 265 | size = 512*1024; |
267 | 266 | ||
268 | while (size < obj_priv->base.size) | 267 | while (size < obj->base.size) |
269 | size <<= 1; | 268 | size <<= 1; |
270 | 269 | ||
271 | if (obj_priv->gtt_space->size != size) | 270 | if (obj->gtt_space->size != size) |
272 | return false; | 271 | return false; |
273 | 272 | ||
274 | if (obj_priv->gtt_offset & (size - 1)) | 273 | if (obj->gtt_offset & (size - 1)) |
275 | return false; | 274 | return false; |
276 | 275 | ||
277 | return true; | 276 | return true; |
@@ -283,30 +282,29 @@ i915_gem_object_fence_ok(struct drm_gem_object *obj, int tiling_mode) | |||
283 | */ | 282 | */ |
284 | int | 283 | int |
285 | i915_gem_set_tiling(struct drm_device *dev, void *data, | 284 | i915_gem_set_tiling(struct drm_device *dev, void *data, |
286 | struct drm_file *file_priv) | 285 | struct drm_file *file) |
287 | { | 286 | { |
288 | struct drm_i915_gem_set_tiling *args = data; | 287 | struct drm_i915_gem_set_tiling *args = data; |
289 | drm_i915_private_t *dev_priv = dev->dev_private; | 288 | drm_i915_private_t *dev_priv = dev->dev_private; |
290 | struct drm_gem_object *obj; | 289 | struct drm_i915_gem_object *obj; |
291 | struct drm_i915_gem_object *obj_priv; | ||
292 | int ret; | 290 | int ret; |
293 | 291 | ||
294 | ret = i915_gem_check_is_wedged(dev); | 292 | ret = i915_gem_check_is_wedged(dev); |
295 | if (ret) | 293 | if (ret) |
296 | return ret; | 294 | return ret; |
297 | 295 | ||
298 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 296 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
299 | if (obj == NULL) | 297 | if (obj == NULL) |
300 | return -ENOENT; | 298 | return -ENOENT; |
301 | obj_priv = to_intel_bo(obj); | ||
302 | 299 | ||
303 | if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { | 300 | if (!i915_tiling_ok(dev, |
304 | drm_gem_object_unreference_unlocked(obj); | 301 | args->stride, obj->base.size, args->tiling_mode)) { |
302 | drm_gem_object_unreference_unlocked(&obj->base); | ||
305 | return -EINVAL; | 303 | return -EINVAL; |
306 | } | 304 | } |
307 | 305 | ||
308 | if (obj_priv->pin_count) { | 306 | if (obj->pin_count) { |
309 | drm_gem_object_unreference_unlocked(obj); | 307 | drm_gem_object_unreference_unlocked(&obj->base); |
310 | return -EBUSY; | 308 | return -EBUSY; |
311 | } | 309 | } |
312 | 310 | ||
@@ -340,8 +338,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
340 | } | 338 | } |
341 | 339 | ||
342 | mutex_lock(&dev->struct_mutex); | 340 | mutex_lock(&dev->struct_mutex); |
343 | if (args->tiling_mode != obj_priv->tiling_mode || | 341 | if (args->tiling_mode != obj->tiling_mode || |
344 | args->stride != obj_priv->stride) { | 342 | args->stride != obj->stride) { |
345 | /* We need to rebind the object if its current allocation | 343 | /* We need to rebind the object if its current allocation |
346 | * no longer meets the alignment restrictions for its new | 344 | * no longer meets the alignment restrictions for its new |
347 | * tiling mode. Otherwise we can just leave it alone, but | 345 | * tiling mode. Otherwise we can just leave it alone, but |
@@ -349,22 +347,22 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
349 | */ | 347 | */ |
350 | if (!i915_gem_object_fence_ok(obj, args->tiling_mode)) | 348 | if (!i915_gem_object_fence_ok(obj, args->tiling_mode)) |
351 | ret = i915_gem_object_unbind(obj); | 349 | ret = i915_gem_object_unbind(obj); |
352 | else if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | 350 | else if (obj->fence_reg != I915_FENCE_REG_NONE) |
353 | ret = i915_gem_object_put_fence_reg(obj, true); | 351 | ret = i915_gem_object_put_fence_reg(obj, true); |
354 | else | 352 | else |
355 | i915_gem_release_mmap(obj); | 353 | i915_gem_release_mmap(obj); |
356 | 354 | ||
357 | if (ret != 0) { | 355 | if (ret != 0) { |
358 | args->tiling_mode = obj_priv->tiling_mode; | 356 | args->tiling_mode = obj->tiling_mode; |
359 | args->stride = obj_priv->stride; | 357 | args->stride = obj->stride; |
360 | goto err; | 358 | goto err; |
361 | } | 359 | } |
362 | 360 | ||
363 | obj_priv->tiling_mode = args->tiling_mode; | 361 | obj->tiling_mode = args->tiling_mode; |
364 | obj_priv->stride = args->stride; | 362 | obj->stride = args->stride; |
365 | } | 363 | } |
366 | err: | 364 | err: |
367 | drm_gem_object_unreference(obj); | 365 | drm_gem_object_unreference(&obj->base); |
368 | mutex_unlock(&dev->struct_mutex); | 366 | mutex_unlock(&dev->struct_mutex); |
369 | 367 | ||
370 | return ret; | 368 | return ret; |
@@ -375,22 +373,20 @@ err: | |||
375 | */ | 373 | */ |
376 | int | 374 | int |
377 | i915_gem_get_tiling(struct drm_device *dev, void *data, | 375 | i915_gem_get_tiling(struct drm_device *dev, void *data, |
378 | struct drm_file *file_priv) | 376 | struct drm_file *file) |
379 | { | 377 | { |
380 | struct drm_i915_gem_get_tiling *args = data; | 378 | struct drm_i915_gem_get_tiling *args = data; |
381 | drm_i915_private_t *dev_priv = dev->dev_private; | 379 | drm_i915_private_t *dev_priv = dev->dev_private; |
382 | struct drm_gem_object *obj; | 380 | struct drm_i915_gem_object *obj; |
383 | struct drm_i915_gem_object *obj_priv; | ||
384 | 381 | ||
385 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 382 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
386 | if (obj == NULL) | 383 | if (obj == NULL) |
387 | return -ENOENT; | 384 | return -ENOENT; |
388 | obj_priv = to_intel_bo(obj); | ||
389 | 385 | ||
390 | mutex_lock(&dev->struct_mutex); | 386 | mutex_lock(&dev->struct_mutex); |
391 | 387 | ||
392 | args->tiling_mode = obj_priv->tiling_mode; | 388 | args->tiling_mode = obj->tiling_mode; |
393 | switch (obj_priv->tiling_mode) { | 389 | switch (obj->tiling_mode) { |
394 | case I915_TILING_X: | 390 | case I915_TILING_X: |
395 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; | 391 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; |
396 | break; | 392 | break; |
@@ -410,7 +406,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, | |||
410 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) | 406 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) |
411 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; | 407 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; |
412 | 408 | ||
413 | drm_gem_object_unreference(obj); | 409 | drm_gem_object_unreference(&obj->base); |
414 | mutex_unlock(&dev->struct_mutex); | 410 | mutex_unlock(&dev->struct_mutex); |
415 | 411 | ||
416 | return 0; | 412 | return 0; |
@@ -440,46 +436,44 @@ i915_gem_swizzle_page(struct page *page) | |||
440 | } | 436 | } |
441 | 437 | ||
442 | void | 438 | void |
443 | i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) | 439 | i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) |
444 | { | 440 | { |
445 | struct drm_device *dev = obj->dev; | 441 | struct drm_device *dev = obj->base.dev; |
446 | drm_i915_private_t *dev_priv = dev->dev_private; | 442 | drm_i915_private_t *dev_priv = dev->dev_private; |
447 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 443 | int page_count = obj->base.size >> PAGE_SHIFT; |
448 | int page_count = obj->size >> PAGE_SHIFT; | ||
449 | int i; | 444 | int i; |
450 | 445 | ||
451 | if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) | 446 | if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) |
452 | return; | 447 | return; |
453 | 448 | ||
454 | if (obj_priv->bit_17 == NULL) | 449 | if (obj->bit_17 == NULL) |
455 | return; | 450 | return; |
456 | 451 | ||
457 | for (i = 0; i < page_count; i++) { | 452 | for (i = 0; i < page_count; i++) { |
458 | char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17; | 453 | char new_bit_17 = page_to_phys(obj->pages[i]) >> 17; |
459 | if ((new_bit_17 & 0x1) != | 454 | if ((new_bit_17 & 0x1) != |
460 | (test_bit(i, obj_priv->bit_17) != 0)) { | 455 | (test_bit(i, obj->bit_17) != 0)) { |
461 | i915_gem_swizzle_page(obj_priv->pages[i]); | 456 | i915_gem_swizzle_page(obj->pages[i]); |
462 | set_page_dirty(obj_priv->pages[i]); | 457 | set_page_dirty(obj->pages[i]); |
463 | } | 458 | } |
464 | } | 459 | } |
465 | } | 460 | } |
466 | 461 | ||
467 | void | 462 | void |
468 | i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) | 463 | i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) |
469 | { | 464 | { |
470 | struct drm_device *dev = obj->dev; | 465 | struct drm_device *dev = obj->base.dev; |
471 | drm_i915_private_t *dev_priv = dev->dev_private; | 466 | drm_i915_private_t *dev_priv = dev->dev_private; |
472 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 467 | int page_count = obj->base.size >> PAGE_SHIFT; |
473 | int page_count = obj->size >> PAGE_SHIFT; | ||
474 | int i; | 468 | int i; |
475 | 469 | ||
476 | if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) | 470 | if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) |
477 | return; | 471 | return; |
478 | 472 | ||
479 | if (obj_priv->bit_17 == NULL) { | 473 | if (obj->bit_17 == NULL) { |
480 | obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * | 474 | obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * |
481 | sizeof(long), GFP_KERNEL); | 475 | sizeof(long), GFP_KERNEL); |
482 | if (obj_priv->bit_17 == NULL) { | 476 | if (obj->bit_17 == NULL) { |
483 | DRM_ERROR("Failed to allocate memory for bit 17 " | 477 | DRM_ERROR("Failed to allocate memory for bit 17 " |
484 | "record\n"); | 478 | "record\n"); |
485 | return; | 479 | return; |
@@ -487,9 +481,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) | |||
487 | } | 481 | } |
488 | 482 | ||
489 | for (i = 0; i < page_count; i++) { | 483 | for (i = 0; i < page_count; i++) { |
490 | if (page_to_phys(obj_priv->pages[i]) & (1 << 17)) | 484 | if (page_to_phys(obj->pages[i]) & (1 << 17)) |
491 | __set_bit(i, obj_priv->bit_17); | 485 | __set_bit(i, obj->bit_17); |
492 | else | 486 | else |
493 | __clear_bit(i, obj_priv->bit_17); | 487 | __clear_bit(i, obj->bit_17); |
494 | } | 488 | } |
495 | } | 489 | } |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index a8f55f061f6d..09ac3bbd8165 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -423,28 +423,23 @@ static void i915_error_work_func(struct work_struct *work) | |||
423 | #ifdef CONFIG_DEBUG_FS | 423 | #ifdef CONFIG_DEBUG_FS |
424 | static struct drm_i915_error_object * | 424 | static struct drm_i915_error_object * |
425 | i915_error_object_create(struct drm_device *dev, | 425 | i915_error_object_create(struct drm_device *dev, |
426 | struct drm_gem_object *src) | 426 | struct drm_i915_gem_object *src) |
427 | { | 427 | { |
428 | drm_i915_private_t *dev_priv = dev->dev_private; | 428 | drm_i915_private_t *dev_priv = dev->dev_private; |
429 | struct drm_i915_error_object *dst; | 429 | struct drm_i915_error_object *dst; |
430 | struct drm_i915_gem_object *src_priv; | ||
431 | int page, page_count; | 430 | int page, page_count; |
432 | u32 reloc_offset; | 431 | u32 reloc_offset; |
433 | 432 | ||
434 | if (src == NULL) | 433 | if (src == NULL || src->pages == NULL) |
435 | return NULL; | 434 | return NULL; |
436 | 435 | ||
437 | src_priv = to_intel_bo(src); | 436 | page_count = src->base.size / PAGE_SIZE; |
438 | if (src_priv->pages == NULL) | ||
439 | return NULL; | ||
440 | |||
441 | page_count = src->size / PAGE_SIZE; | ||
442 | 437 | ||
443 | dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC); | 438 | dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC); |
444 | if (dst == NULL) | 439 | if (dst == NULL) |
445 | return NULL; | 440 | return NULL; |
446 | 441 | ||
447 | reloc_offset = src_priv->gtt_offset; | 442 | reloc_offset = src->gtt_offset; |
448 | for (page = 0; page < page_count; page++) { | 443 | for (page = 0; page < page_count; page++) { |
449 | unsigned long flags; | 444 | unsigned long flags; |
450 | void __iomem *s; | 445 | void __iomem *s; |
@@ -466,7 +461,7 @@ i915_error_object_create(struct drm_device *dev, | |||
466 | reloc_offset += PAGE_SIZE; | 461 | reloc_offset += PAGE_SIZE; |
467 | } | 462 | } |
468 | dst->page_count = page_count; | 463 | dst->page_count = page_count; |
469 | dst->gtt_offset = src_priv->gtt_offset; | 464 | dst->gtt_offset = src->gtt_offset; |
470 | 465 | ||
471 | return dst; | 466 | return dst; |
472 | 467 | ||
@@ -598,9 +593,9 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err, | |||
598 | static void i915_capture_error_state(struct drm_device *dev) | 593 | static void i915_capture_error_state(struct drm_device *dev) |
599 | { | 594 | { |
600 | struct drm_i915_private *dev_priv = dev->dev_private; | 595 | struct drm_i915_private *dev_priv = dev->dev_private; |
601 | struct drm_i915_gem_object *obj_priv; | 596 | struct drm_i915_gem_object *obj; |
602 | struct drm_i915_error_state *error; | 597 | struct drm_i915_error_state *error; |
603 | struct drm_gem_object *batchbuffer[2]; | 598 | struct drm_i915_gem_object *batchbuffer[2]; |
604 | unsigned long flags; | 599 | unsigned long flags; |
605 | u32 bbaddr; | 600 | u32 bbaddr; |
606 | int count; | 601 | int count; |
@@ -668,34 +663,30 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
668 | batchbuffer[0] = NULL; | 663 | batchbuffer[0] = NULL; |
669 | batchbuffer[1] = NULL; | 664 | batchbuffer[1] = NULL; |
670 | count = 0; | 665 | count = 0; |
671 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { | 666 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { |
672 | struct drm_gem_object *obj = &obj_priv->base; | ||
673 | |||
674 | if (batchbuffer[0] == NULL && | 667 | if (batchbuffer[0] == NULL && |
675 | bbaddr >= obj_priv->gtt_offset && | 668 | bbaddr >= obj->gtt_offset && |
676 | bbaddr < obj_priv->gtt_offset + obj->size) | 669 | bbaddr < obj->gtt_offset + obj->base.size) |
677 | batchbuffer[0] = obj; | 670 | batchbuffer[0] = obj; |
678 | 671 | ||
679 | if (batchbuffer[1] == NULL && | 672 | if (batchbuffer[1] == NULL && |
680 | error->acthd >= obj_priv->gtt_offset && | 673 | error->acthd >= obj->gtt_offset && |
681 | error->acthd < obj_priv->gtt_offset + obj->size) | 674 | error->acthd < obj->gtt_offset + obj->base.size) |
682 | batchbuffer[1] = obj; | 675 | batchbuffer[1] = obj; |
683 | 676 | ||
684 | count++; | 677 | count++; |
685 | } | 678 | } |
686 | /* Scan the other lists for completeness for those bizarre errors. */ | 679 | /* Scan the other lists for completeness for those bizarre errors. */ |
687 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { | 680 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { |
688 | list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { | 681 | list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) { |
689 | struct drm_gem_object *obj = &obj_priv->base; | ||
690 | |||
691 | if (batchbuffer[0] == NULL && | 682 | if (batchbuffer[0] == NULL && |
692 | bbaddr >= obj_priv->gtt_offset && | 683 | bbaddr >= obj->gtt_offset && |
693 | bbaddr < obj_priv->gtt_offset + obj->size) | 684 | bbaddr < obj->gtt_offset + obj->base.size) |
694 | batchbuffer[0] = obj; | 685 | batchbuffer[0] = obj; |
695 | 686 | ||
696 | if (batchbuffer[1] == NULL && | 687 | if (batchbuffer[1] == NULL && |
697 | error->acthd >= obj_priv->gtt_offset && | 688 | error->acthd >= obj->gtt_offset && |
698 | error->acthd < obj_priv->gtt_offset + obj->size) | 689 | error->acthd < obj->gtt_offset + obj->base.size) |
699 | batchbuffer[1] = obj; | 690 | batchbuffer[1] = obj; |
700 | 691 | ||
701 | if (batchbuffer[0] && batchbuffer[1]) | 692 | if (batchbuffer[0] && batchbuffer[1]) |
@@ -703,17 +694,15 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
703 | } | 694 | } |
704 | } | 695 | } |
705 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { | 696 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { |
706 | list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { | 697 | list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { |
707 | struct drm_gem_object *obj = &obj_priv->base; | ||
708 | |||
709 | if (batchbuffer[0] == NULL && | 698 | if (batchbuffer[0] == NULL && |
710 | bbaddr >= obj_priv->gtt_offset && | 699 | bbaddr >= obj->gtt_offset && |
711 | bbaddr < obj_priv->gtt_offset + obj->size) | 700 | bbaddr < obj->gtt_offset + obj->base.size) |
712 | batchbuffer[0] = obj; | 701 | batchbuffer[0] = obj; |
713 | 702 | ||
714 | if (batchbuffer[1] == NULL && | 703 | if (batchbuffer[1] == NULL && |
715 | error->acthd >= obj_priv->gtt_offset && | 704 | error->acthd >= obj->gtt_offset && |
716 | error->acthd < obj_priv->gtt_offset + obj->size) | 705 | error->acthd < obj->gtt_offset + obj->base.size) |
717 | batchbuffer[1] = obj; | 706 | batchbuffer[1] = obj; |
718 | 707 | ||
719 | if (batchbuffer[0] && batchbuffer[1]) | 708 | if (batchbuffer[0] && batchbuffer[1]) |
@@ -732,14 +721,14 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
732 | 721 | ||
733 | /* Record the ringbuffer */ | 722 | /* Record the ringbuffer */ |
734 | error->ringbuffer = i915_error_object_create(dev, | 723 | error->ringbuffer = i915_error_object_create(dev, |
735 | dev_priv->render_ring.gem_object); | 724 | dev_priv->render_ring.obj); |
736 | 725 | ||
737 | /* Record buffers on the active and pinned lists. */ | 726 | /* Record buffers on the active and pinned lists. */ |
738 | error->active_bo = NULL; | 727 | error->active_bo = NULL; |
739 | error->pinned_bo = NULL; | 728 | error->pinned_bo = NULL; |
740 | 729 | ||
741 | error->active_bo_count = count; | 730 | error->active_bo_count = count; |
742 | list_for_each_entry(obj_priv, &dev_priv->mm.pinned_list, mm_list) | 731 | list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) |
743 | count++; | 732 | count++; |
744 | error->pinned_bo_count = count - error->active_bo_count; | 733 | error->pinned_bo_count = count - error->active_bo_count; |
745 | 734 | ||
@@ -948,7 +937,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) | |||
948 | drm_i915_private_t *dev_priv = dev->dev_private; | 937 | drm_i915_private_t *dev_priv = dev->dev_private; |
949 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 938 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
950 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 939 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
951 | struct drm_i915_gem_object *obj_priv; | 940 | struct drm_i915_gem_object *obj; |
952 | struct intel_unpin_work *work; | 941 | struct intel_unpin_work *work; |
953 | unsigned long flags; | 942 | unsigned long flags; |
954 | bool stall_detected; | 943 | bool stall_detected; |
@@ -967,13 +956,13 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) | |||
967 | } | 956 | } |
968 | 957 | ||
969 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ | 958 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ |
970 | obj_priv = to_intel_bo(work->pending_flip_obj); | 959 | obj = work->pending_flip_obj; |
971 | if (INTEL_INFO(dev)->gen >= 4) { | 960 | if (INTEL_INFO(dev)->gen >= 4) { |
972 | int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; | 961 | int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; |
973 | stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset; | 962 | stall_detected = I915_READ(dspsurf) == obj->gtt_offset; |
974 | } else { | 963 | } else { |
975 | int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR; | 964 | int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR; |
976 | stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset + | 965 | stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + |
977 | crtc->y * crtc->fb->pitch + | 966 | crtc->y * crtc->fb->pitch + |
978 | crtc->x * crtc->fb->bits_per_pixel/8); | 967 | crtc->x * crtc->fb->bits_per_pixel/8); |
979 | } | 968 | } |
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 34ef49fd0377..1df7262ae077 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/tracepoint.h> | 6 | #include <linux/tracepoint.h> |
7 | 7 | ||
8 | #include <drm/drmP.h> | 8 | #include <drm/drmP.h> |
9 | #include "i915_drv.h" | ||
9 | 10 | ||
10 | #undef TRACE_SYSTEM | 11 | #undef TRACE_SYSTEM |
11 | #define TRACE_SYSTEM i915 | 12 | #define TRACE_SYSTEM i915 |
@@ -16,18 +17,18 @@ | |||
16 | 17 | ||
17 | TRACE_EVENT(i915_gem_object_create, | 18 | TRACE_EVENT(i915_gem_object_create, |
18 | 19 | ||
19 | TP_PROTO(struct drm_gem_object *obj), | 20 | TP_PROTO(struct drm_i915_gem_object *obj), |
20 | 21 | ||
21 | TP_ARGS(obj), | 22 | TP_ARGS(obj), |
22 | 23 | ||
23 | TP_STRUCT__entry( | 24 | TP_STRUCT__entry( |
24 | __field(struct drm_gem_object *, obj) | 25 | __field(struct drm_i915_gem_object *, obj) |
25 | __field(u32, size) | 26 | __field(u32, size) |
26 | ), | 27 | ), |
27 | 28 | ||
28 | TP_fast_assign( | 29 | TP_fast_assign( |
29 | __entry->obj = obj; | 30 | __entry->obj = obj; |
30 | __entry->size = obj->size; | 31 | __entry->size = obj->base.size; |
31 | ), | 32 | ), |
32 | 33 | ||
33 | TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) | 34 | TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) |
@@ -35,12 +36,12 @@ TRACE_EVENT(i915_gem_object_create, | |||
35 | 36 | ||
36 | TRACE_EVENT(i915_gem_object_bind, | 37 | TRACE_EVENT(i915_gem_object_bind, |
37 | 38 | ||
38 | TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset, bool mappable), | 39 | TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable), |
39 | 40 | ||
40 | TP_ARGS(obj, gtt_offset, mappable), | 41 | TP_ARGS(obj, gtt_offset, mappable), |
41 | 42 | ||
42 | TP_STRUCT__entry( | 43 | TP_STRUCT__entry( |
43 | __field(struct drm_gem_object *, obj) | 44 | __field(struct drm_i915_gem_object *, obj) |
44 | __field(u32, gtt_offset) | 45 | __field(u32, gtt_offset) |
45 | __field(bool, mappable) | 46 | __field(bool, mappable) |
46 | ), | 47 | ), |
@@ -58,20 +59,20 @@ TRACE_EVENT(i915_gem_object_bind, | |||
58 | 59 | ||
59 | TRACE_EVENT(i915_gem_object_change_domain, | 60 | TRACE_EVENT(i915_gem_object_change_domain, |
60 | 61 | ||
61 | TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), | 62 | TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), |
62 | 63 | ||
63 | TP_ARGS(obj, old_read_domains, old_write_domain), | 64 | TP_ARGS(obj, old_read_domains, old_write_domain), |
64 | 65 | ||
65 | TP_STRUCT__entry( | 66 | TP_STRUCT__entry( |
66 | __field(struct drm_gem_object *, obj) | 67 | __field(struct drm_i915_gem_object *, obj) |
67 | __field(u32, read_domains) | 68 | __field(u32, read_domains) |
68 | __field(u32, write_domain) | 69 | __field(u32, write_domain) |
69 | ), | 70 | ), |
70 | 71 | ||
71 | TP_fast_assign( | 72 | TP_fast_assign( |
72 | __entry->obj = obj; | 73 | __entry->obj = obj; |
73 | __entry->read_domains = obj->read_domains | (old_read_domains << 16); | 74 | __entry->read_domains = obj->base.read_domains | (old_read_domains << 16); |
74 | __entry->write_domain = obj->write_domain | (old_write_domain << 16); | 75 | __entry->write_domain = obj->base.write_domain | (old_write_domain << 16); |
75 | ), | 76 | ), |
76 | 77 | ||
77 | TP_printk("obj=%p, read=%04x, write=%04x", | 78 | TP_printk("obj=%p, read=%04x, write=%04x", |
@@ -81,12 +82,12 @@ TRACE_EVENT(i915_gem_object_change_domain, | |||
81 | 82 | ||
82 | TRACE_EVENT(i915_gem_object_get_fence, | 83 | TRACE_EVENT(i915_gem_object_get_fence, |
83 | 84 | ||
84 | TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode), | 85 | TP_PROTO(struct drm_i915_gem_object *obj, int fence, int tiling_mode), |
85 | 86 | ||
86 | TP_ARGS(obj, fence, tiling_mode), | 87 | TP_ARGS(obj, fence, tiling_mode), |
87 | 88 | ||
88 | TP_STRUCT__entry( | 89 | TP_STRUCT__entry( |
89 | __field(struct drm_gem_object *, obj) | 90 | __field(struct drm_i915_gem_object *, obj) |
90 | __field(int, fence) | 91 | __field(int, fence) |
91 | __field(int, tiling_mode) | 92 | __field(int, tiling_mode) |
92 | ), | 93 | ), |
@@ -103,12 +104,12 @@ TRACE_EVENT(i915_gem_object_get_fence, | |||
103 | 104 | ||
104 | DECLARE_EVENT_CLASS(i915_gem_object, | 105 | DECLARE_EVENT_CLASS(i915_gem_object, |
105 | 106 | ||
106 | TP_PROTO(struct drm_gem_object *obj), | 107 | TP_PROTO(struct drm_i915_gem_object *obj), |
107 | 108 | ||
108 | TP_ARGS(obj), | 109 | TP_ARGS(obj), |
109 | 110 | ||
110 | TP_STRUCT__entry( | 111 | TP_STRUCT__entry( |
111 | __field(struct drm_gem_object *, obj) | 112 | __field(struct drm_i915_gem_object *, obj) |
112 | ), | 113 | ), |
113 | 114 | ||
114 | TP_fast_assign( | 115 | TP_fast_assign( |
@@ -120,21 +121,21 @@ DECLARE_EVENT_CLASS(i915_gem_object, | |||
120 | 121 | ||
121 | DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, | 122 | DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, |
122 | 123 | ||
123 | TP_PROTO(struct drm_gem_object *obj), | 124 | TP_PROTO(struct drm_i915_gem_object *obj), |
124 | 125 | ||
125 | TP_ARGS(obj) | 126 | TP_ARGS(obj) |
126 | ); | 127 | ); |
127 | 128 | ||
128 | DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, | 129 | DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, |
129 | 130 | ||
130 | TP_PROTO(struct drm_gem_object *obj), | 131 | TP_PROTO(struct drm_i915_gem_object *obj), |
131 | 132 | ||
132 | TP_ARGS(obj) | 133 | TP_ARGS(obj) |
133 | ); | 134 | ); |
134 | 135 | ||
135 | DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, | 136 | DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, |
136 | 137 | ||
137 | TP_PROTO(struct drm_gem_object *obj), | 138 | TP_PROTO(struct drm_i915_gem_object *obj), |
138 | 139 | ||
139 | TP_ARGS(obj) | 140 | TP_ARGS(obj) |
140 | ); | 141 | ); |
@@ -266,13 +267,13 @@ DEFINE_EVENT(i915_ring, i915_ring_wait_end, | |||
266 | ); | 267 | ); |
267 | 268 | ||
268 | TRACE_EVENT(i915_flip_request, | 269 | TRACE_EVENT(i915_flip_request, |
269 | TP_PROTO(int plane, struct drm_gem_object *obj), | 270 | TP_PROTO(int plane, struct drm_i915_gem_object *obj), |
270 | 271 | ||
271 | TP_ARGS(plane, obj), | 272 | TP_ARGS(plane, obj), |
272 | 273 | ||
273 | TP_STRUCT__entry( | 274 | TP_STRUCT__entry( |
274 | __field(int, plane) | 275 | __field(int, plane) |
275 | __field(struct drm_gem_object *, obj) | 276 | __field(struct drm_i915_gem_object *, obj) |
276 | ), | 277 | ), |
277 | 278 | ||
278 | TP_fast_assign( | 279 | TP_fast_assign( |
@@ -284,13 +285,13 @@ TRACE_EVENT(i915_flip_request, | |||
284 | ); | 285 | ); |
285 | 286 | ||
286 | TRACE_EVENT(i915_flip_complete, | 287 | TRACE_EVENT(i915_flip_complete, |
287 | TP_PROTO(int plane, struct drm_gem_object *obj), | 288 | TP_PROTO(int plane, struct drm_i915_gem_object *obj), |
288 | 289 | ||
289 | TP_ARGS(plane, obj), | 290 | TP_ARGS(plane, obj), |
290 | 291 | ||
291 | TP_STRUCT__entry( | 292 | TP_STRUCT__entry( |
292 | __field(int, plane) | 293 | __field(int, plane) |
293 | __field(struct drm_gem_object *, obj) | 294 | __field(struct drm_i915_gem_object *, obj) |
294 | ), | 295 | ), |
295 | 296 | ||
296 | TP_fast_assign( | 297 | TP_fast_assign( |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d4bc443f43fc..ae7d4f55ce07 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1066,13 +1066,13 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1066 | struct drm_i915_private *dev_priv = dev->dev_private; | 1066 | struct drm_i915_private *dev_priv = dev->dev_private; |
1067 | struct drm_framebuffer *fb = crtc->fb; | 1067 | struct drm_framebuffer *fb = crtc->fb; |
1068 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 1068 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1069 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); | 1069 | struct drm_i915_gem_object *obj = intel_fb->obj; |
1070 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1070 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1071 | int plane, i; | 1071 | int plane, i; |
1072 | u32 fbc_ctl, fbc_ctl2; | 1072 | u32 fbc_ctl, fbc_ctl2; |
1073 | 1073 | ||
1074 | if (fb->pitch == dev_priv->cfb_pitch && | 1074 | if (fb->pitch == dev_priv->cfb_pitch && |
1075 | obj_priv->fence_reg == dev_priv->cfb_fence && | 1075 | obj->fence_reg == dev_priv->cfb_fence && |
1076 | intel_crtc->plane == dev_priv->cfb_plane && | 1076 | intel_crtc->plane == dev_priv->cfb_plane && |
1077 | I915_READ(FBC_CONTROL) & FBC_CTL_EN) | 1077 | I915_READ(FBC_CONTROL) & FBC_CTL_EN) |
1078 | return; | 1078 | return; |
@@ -1086,7 +1086,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1086 | 1086 | ||
1087 | /* FBC_CTL wants 64B units */ | 1087 | /* FBC_CTL wants 64B units */ |
1088 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | 1088 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; |
1089 | dev_priv->cfb_fence = obj_priv->fence_reg; | 1089 | dev_priv->cfb_fence = obj->fence_reg; |
1090 | dev_priv->cfb_plane = intel_crtc->plane; | 1090 | dev_priv->cfb_plane = intel_crtc->plane; |
1091 | plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; | 1091 | plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; |
1092 | 1092 | ||
@@ -1096,7 +1096,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1096 | 1096 | ||
1097 | /* Set it up... */ | 1097 | /* Set it up... */ |
1098 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; | 1098 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; |
1099 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1099 | if (obj->tiling_mode != I915_TILING_NONE) |
1100 | fbc_ctl2 |= FBC_CTL_CPU_FENCE; | 1100 | fbc_ctl2 |= FBC_CTL_CPU_FENCE; |
1101 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); | 1101 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); |
1102 | I915_WRITE(FBC_FENCE_OFF, crtc->y); | 1102 | I915_WRITE(FBC_FENCE_OFF, crtc->y); |
@@ -1107,7 +1107,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1107 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ | 1107 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ |
1108 | fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; | 1108 | fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; |
1109 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; | 1109 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; |
1110 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1110 | if (obj->tiling_mode != I915_TILING_NONE) |
1111 | fbc_ctl |= dev_priv->cfb_fence; | 1111 | fbc_ctl |= dev_priv->cfb_fence; |
1112 | I915_WRITE(FBC_CONTROL, fbc_ctl); | 1112 | I915_WRITE(FBC_CONTROL, fbc_ctl); |
1113 | 1113 | ||
@@ -1150,7 +1150,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1150 | struct drm_i915_private *dev_priv = dev->dev_private; | 1150 | struct drm_i915_private *dev_priv = dev->dev_private; |
1151 | struct drm_framebuffer *fb = crtc->fb; | 1151 | struct drm_framebuffer *fb = crtc->fb; |
1152 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 1152 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1153 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); | 1153 | struct drm_i915_gem_object *obj = intel_fb->obj; |
1154 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1154 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1155 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; | 1155 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; |
1156 | unsigned long stall_watermark = 200; | 1156 | unsigned long stall_watermark = 200; |
@@ -1159,7 +1159,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1159 | dpfc_ctl = I915_READ(DPFC_CONTROL); | 1159 | dpfc_ctl = I915_READ(DPFC_CONTROL); |
1160 | if (dpfc_ctl & DPFC_CTL_EN) { | 1160 | if (dpfc_ctl & DPFC_CTL_EN) { |
1161 | if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && | 1161 | if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && |
1162 | dev_priv->cfb_fence == obj_priv->fence_reg && | 1162 | dev_priv->cfb_fence == obj->fence_reg && |
1163 | dev_priv->cfb_plane == intel_crtc->plane && | 1163 | dev_priv->cfb_plane == intel_crtc->plane && |
1164 | dev_priv->cfb_y == crtc->y) | 1164 | dev_priv->cfb_y == crtc->y) |
1165 | return; | 1165 | return; |
@@ -1170,12 +1170,12 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1170 | } | 1170 | } |
1171 | 1171 | ||
1172 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | 1172 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; |
1173 | dev_priv->cfb_fence = obj_priv->fence_reg; | 1173 | dev_priv->cfb_fence = obj->fence_reg; |
1174 | dev_priv->cfb_plane = intel_crtc->plane; | 1174 | dev_priv->cfb_plane = intel_crtc->plane; |
1175 | dev_priv->cfb_y = crtc->y; | 1175 | dev_priv->cfb_y = crtc->y; |
1176 | 1176 | ||
1177 | dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; | 1177 | dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; |
1178 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | 1178 | if (obj->tiling_mode != I915_TILING_NONE) { |
1179 | dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; | 1179 | dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; |
1180 | I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); | 1180 | I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); |
1181 | } else { | 1181 | } else { |
@@ -1221,7 +1221,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1221 | struct drm_i915_private *dev_priv = dev->dev_private; | 1221 | struct drm_i915_private *dev_priv = dev->dev_private; |
1222 | struct drm_framebuffer *fb = crtc->fb; | 1222 | struct drm_framebuffer *fb = crtc->fb; |
1223 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 1223 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1224 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); | 1224 | struct drm_i915_gem_object *obj = intel_fb->obj; |
1225 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1225 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1226 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; | 1226 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; |
1227 | unsigned long stall_watermark = 200; | 1227 | unsigned long stall_watermark = 200; |
@@ -1230,9 +1230,9 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1230 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); | 1230 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); |
1231 | if (dpfc_ctl & DPFC_CTL_EN) { | 1231 | if (dpfc_ctl & DPFC_CTL_EN) { |
1232 | if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && | 1232 | if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && |
1233 | dev_priv->cfb_fence == obj_priv->fence_reg && | 1233 | dev_priv->cfb_fence == obj->fence_reg && |
1234 | dev_priv->cfb_plane == intel_crtc->plane && | 1234 | dev_priv->cfb_plane == intel_crtc->plane && |
1235 | dev_priv->cfb_offset == obj_priv->gtt_offset && | 1235 | dev_priv->cfb_offset == obj->gtt_offset && |
1236 | dev_priv->cfb_y == crtc->y) | 1236 | dev_priv->cfb_y == crtc->y) |
1237 | return; | 1237 | return; |
1238 | 1238 | ||
@@ -1242,14 +1242,14 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1242 | } | 1242 | } |
1243 | 1243 | ||
1244 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | 1244 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; |
1245 | dev_priv->cfb_fence = obj_priv->fence_reg; | 1245 | dev_priv->cfb_fence = obj->fence_reg; |
1246 | dev_priv->cfb_plane = intel_crtc->plane; | 1246 | dev_priv->cfb_plane = intel_crtc->plane; |
1247 | dev_priv->cfb_offset = obj_priv->gtt_offset; | 1247 | dev_priv->cfb_offset = obj->gtt_offset; |
1248 | dev_priv->cfb_y = crtc->y; | 1248 | dev_priv->cfb_y = crtc->y; |
1249 | 1249 | ||
1250 | dpfc_ctl &= DPFC_RESERVED; | 1250 | dpfc_ctl &= DPFC_RESERVED; |
1251 | dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); | 1251 | dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); |
1252 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | 1252 | if (obj->tiling_mode != I915_TILING_NONE) { |
1253 | dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); | 1253 | dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); |
1254 | I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); | 1254 | I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); |
1255 | } else { | 1255 | } else { |
@@ -1260,7 +1260,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1260 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | | 1260 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | |
1261 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); | 1261 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); |
1262 | I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); | 1262 | I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); |
1263 | I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID); | 1263 | I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); |
1264 | /* enable it... */ | 1264 | /* enable it... */ |
1265 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); | 1265 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); |
1266 | 1266 | ||
@@ -1345,7 +1345,7 @@ static void intel_update_fbc(struct drm_device *dev) | |||
1345 | struct intel_crtc *intel_crtc; | 1345 | struct intel_crtc *intel_crtc; |
1346 | struct drm_framebuffer *fb; | 1346 | struct drm_framebuffer *fb; |
1347 | struct intel_framebuffer *intel_fb; | 1347 | struct intel_framebuffer *intel_fb; |
1348 | struct drm_i915_gem_object *obj_priv; | 1348 | struct drm_i915_gem_object *obj; |
1349 | 1349 | ||
1350 | DRM_DEBUG_KMS("\n"); | 1350 | DRM_DEBUG_KMS("\n"); |
1351 | 1351 | ||
@@ -1384,9 +1384,9 @@ static void intel_update_fbc(struct drm_device *dev) | |||
1384 | intel_crtc = to_intel_crtc(crtc); | 1384 | intel_crtc = to_intel_crtc(crtc); |
1385 | fb = crtc->fb; | 1385 | fb = crtc->fb; |
1386 | intel_fb = to_intel_framebuffer(fb); | 1386 | intel_fb = to_intel_framebuffer(fb); |
1387 | obj_priv = to_intel_bo(intel_fb->obj); | 1387 | obj = intel_fb->obj; |
1388 | 1388 | ||
1389 | if (intel_fb->obj->size > dev_priv->cfb_size) { | 1389 | if (intel_fb->obj->base.size > dev_priv->cfb_size) { |
1390 | DRM_DEBUG_KMS("framebuffer too large, disabling " | 1390 | DRM_DEBUG_KMS("framebuffer too large, disabling " |
1391 | "compression\n"); | 1391 | "compression\n"); |
1392 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; | 1392 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; |
@@ -1410,7 +1410,7 @@ static void intel_update_fbc(struct drm_device *dev) | |||
1410 | dev_priv->no_fbc_reason = FBC_BAD_PLANE; | 1410 | dev_priv->no_fbc_reason = FBC_BAD_PLANE; |
1411 | goto out_disable; | 1411 | goto out_disable; |
1412 | } | 1412 | } |
1413 | if (obj_priv->tiling_mode != I915_TILING_X) { | 1413 | if (obj->tiling_mode != I915_TILING_X) { |
1414 | DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); | 1414 | DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); |
1415 | dev_priv->no_fbc_reason = FBC_NOT_TILED; | 1415 | dev_priv->no_fbc_reason = FBC_NOT_TILED; |
1416 | goto out_disable; | 1416 | goto out_disable; |
@@ -1433,14 +1433,13 @@ out_disable: | |||
1433 | 1433 | ||
1434 | int | 1434 | int |
1435 | intel_pin_and_fence_fb_obj(struct drm_device *dev, | 1435 | intel_pin_and_fence_fb_obj(struct drm_device *dev, |
1436 | struct drm_gem_object *obj, | 1436 | struct drm_i915_gem_object *obj, |
1437 | bool pipelined) | 1437 | bool pipelined) |
1438 | { | 1438 | { |
1439 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1440 | u32 alignment; | 1439 | u32 alignment; |
1441 | int ret; | 1440 | int ret; |
1442 | 1441 | ||
1443 | switch (obj_priv->tiling_mode) { | 1442 | switch (obj->tiling_mode) { |
1444 | case I915_TILING_NONE: | 1443 | case I915_TILING_NONE: |
1445 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) | 1444 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) |
1446 | alignment = 128 * 1024; | 1445 | alignment = 128 * 1024; |
@@ -1474,7 +1473,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, | |||
1474 | * framebuffer compression. For simplicity, we always install | 1473 | * framebuffer compression. For simplicity, we always install |
1475 | * a fence as the cost is not that onerous. | 1474 | * a fence as the cost is not that onerous. |
1476 | */ | 1475 | */ |
1477 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | 1476 | if (obj->tiling_mode != I915_TILING_NONE) { |
1478 | ret = i915_gem_object_get_fence_reg(obj, false); | 1477 | ret = i915_gem_object_get_fence_reg(obj, false); |
1479 | if (ret) | 1478 | if (ret) |
1480 | goto err_unpin; | 1479 | goto err_unpin; |
@@ -1496,8 +1495,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1496 | struct drm_i915_private *dev_priv = dev->dev_private; | 1495 | struct drm_i915_private *dev_priv = dev->dev_private; |
1497 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1496 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1498 | struct intel_framebuffer *intel_fb; | 1497 | struct intel_framebuffer *intel_fb; |
1499 | struct drm_i915_gem_object *obj_priv; | 1498 | struct drm_i915_gem_object *obj; |
1500 | struct drm_gem_object *obj; | ||
1501 | int plane = intel_crtc->plane; | 1499 | int plane = intel_crtc->plane; |
1502 | unsigned long Start, Offset; | 1500 | unsigned long Start, Offset; |
1503 | u32 dspcntr; | 1501 | u32 dspcntr; |
@@ -1514,7 +1512,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1514 | 1512 | ||
1515 | intel_fb = to_intel_framebuffer(fb); | 1513 | intel_fb = to_intel_framebuffer(fb); |
1516 | obj = intel_fb->obj; | 1514 | obj = intel_fb->obj; |
1517 | obj_priv = to_intel_bo(obj); | ||
1518 | 1515 | ||
1519 | reg = DSPCNTR(plane); | 1516 | reg = DSPCNTR(plane); |
1520 | dspcntr = I915_READ(reg); | 1517 | dspcntr = I915_READ(reg); |
@@ -1539,7 +1536,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1539 | return -EINVAL; | 1536 | return -EINVAL; |
1540 | } | 1537 | } |
1541 | if (INTEL_INFO(dev)->gen >= 4) { | 1538 | if (INTEL_INFO(dev)->gen >= 4) { |
1542 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1539 | if (obj->tiling_mode != I915_TILING_NONE) |
1543 | dspcntr |= DISPPLANE_TILED; | 1540 | dspcntr |= DISPPLANE_TILED; |
1544 | else | 1541 | else |
1545 | dspcntr &= ~DISPPLANE_TILED; | 1542 | dspcntr &= ~DISPPLANE_TILED; |
@@ -1551,7 +1548,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1551 | 1548 | ||
1552 | I915_WRITE(reg, dspcntr); | 1549 | I915_WRITE(reg, dspcntr); |
1553 | 1550 | ||
1554 | Start = obj_priv->gtt_offset; | 1551 | Start = obj->gtt_offset; |
1555 | Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); | 1552 | Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); |
1556 | 1553 | ||
1557 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", | 1554 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", |
@@ -1605,18 +1602,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1605 | 1602 | ||
1606 | if (old_fb) { | 1603 | if (old_fb) { |
1607 | struct drm_i915_private *dev_priv = dev->dev_private; | 1604 | struct drm_i915_private *dev_priv = dev->dev_private; |
1608 | struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj; | 1605 | struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; |
1609 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1610 | 1606 | ||
1611 | wait_event(dev_priv->pending_flip_queue, | 1607 | wait_event(dev_priv->pending_flip_queue, |
1612 | atomic_read(&obj_priv->pending_flip) == 0); | 1608 | atomic_read(&obj->pending_flip) == 0); |
1613 | 1609 | ||
1614 | /* Big Hammer, we also need to ensure that any pending | 1610 | /* Big Hammer, we also need to ensure that any pending |
1615 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the | 1611 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the |
1616 | * current scanout is retired before unpinning the old | 1612 | * current scanout is retired before unpinning the old |
1617 | * framebuffer. | 1613 | * framebuffer. |
1618 | */ | 1614 | */ |
1619 | ret = i915_gem_object_flush_gpu(obj_priv, false); | 1615 | ret = i915_gem_object_flush_gpu(obj, false); |
1620 | if (ret) { | 1616 | if (ret) { |
1621 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); | 1617 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); |
1622 | mutex_unlock(&dev->struct_mutex); | 1618 | mutex_unlock(&dev->struct_mutex); |
@@ -2010,16 +2006,16 @@ static void intel_clear_scanline_wait(struct drm_device *dev) | |||
2010 | 2006 | ||
2011 | static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) | 2007 | static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) |
2012 | { | 2008 | { |
2013 | struct drm_i915_gem_object *obj_priv; | 2009 | struct drm_i915_gem_object *obj; |
2014 | struct drm_i915_private *dev_priv; | 2010 | struct drm_i915_private *dev_priv; |
2015 | 2011 | ||
2016 | if (crtc->fb == NULL) | 2012 | if (crtc->fb == NULL) |
2017 | return; | 2013 | return; |
2018 | 2014 | ||
2019 | obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj); | 2015 | obj = to_intel_framebuffer(crtc->fb)->obj; |
2020 | dev_priv = crtc->dev->dev_private; | 2016 | dev_priv = crtc->dev->dev_private; |
2021 | wait_event(dev_priv->pending_flip_queue, | 2017 | wait_event(dev_priv->pending_flip_queue, |
2022 | atomic_read(&obj_priv->pending_flip) == 0); | 2018 | atomic_read(&obj->pending_flip) == 0); |
2023 | } | 2019 | } |
2024 | 2020 | ||
2025 | static void ironlake_crtc_enable(struct drm_crtc *crtc) | 2021 | static void ironlake_crtc_enable(struct drm_crtc *crtc) |
@@ -4333,15 +4329,14 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, | |||
4333 | } | 4329 | } |
4334 | 4330 | ||
4335 | static int intel_crtc_cursor_set(struct drm_crtc *crtc, | 4331 | static int intel_crtc_cursor_set(struct drm_crtc *crtc, |
4336 | struct drm_file *file_priv, | 4332 | struct drm_file *file, |
4337 | uint32_t handle, | 4333 | uint32_t handle, |
4338 | uint32_t width, uint32_t height) | 4334 | uint32_t width, uint32_t height) |
4339 | { | 4335 | { |
4340 | struct drm_device *dev = crtc->dev; | 4336 | struct drm_device *dev = crtc->dev; |
4341 | struct drm_i915_private *dev_priv = dev->dev_private; | 4337 | struct drm_i915_private *dev_priv = dev->dev_private; |
4342 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4338 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4343 | struct drm_gem_object *bo; | 4339 | struct drm_i915_gem_object *obj; |
4344 | struct drm_i915_gem_object *obj_priv; | ||
4345 | uint32_t addr; | 4340 | uint32_t addr; |
4346 | int ret; | 4341 | int ret; |
4347 | 4342 | ||
@@ -4351,7 +4346,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4351 | if (!handle) { | 4346 | if (!handle) { |
4352 | DRM_DEBUG_KMS("cursor off\n"); | 4347 | DRM_DEBUG_KMS("cursor off\n"); |
4353 | addr = 0; | 4348 | addr = 0; |
4354 | bo = NULL; | 4349 | obj = NULL; |
4355 | mutex_lock(&dev->struct_mutex); | 4350 | mutex_lock(&dev->struct_mutex); |
4356 | goto finish; | 4351 | goto finish; |
4357 | } | 4352 | } |
@@ -4362,13 +4357,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4362 | return -EINVAL; | 4357 | return -EINVAL; |
4363 | } | 4358 | } |
4364 | 4359 | ||
4365 | bo = drm_gem_object_lookup(dev, file_priv, handle); | 4360 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); |
4366 | if (!bo) | 4361 | if (!obj) |
4367 | return -ENOENT; | 4362 | return -ENOENT; |
4368 | 4363 | ||
4369 | obj_priv = to_intel_bo(bo); | 4364 | if (obj->base.size < width * height * 4) { |
4370 | |||
4371 | if (bo->size < width * height * 4) { | ||
4372 | DRM_ERROR("buffer is to small\n"); | 4365 | DRM_ERROR("buffer is to small\n"); |
4373 | ret = -ENOMEM; | 4366 | ret = -ENOMEM; |
4374 | goto fail; | 4367 | goto fail; |
@@ -4377,29 +4370,29 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4377 | /* we only need to pin inside GTT if cursor is non-phy */ | 4370 | /* we only need to pin inside GTT if cursor is non-phy */ |
4378 | mutex_lock(&dev->struct_mutex); | 4371 | mutex_lock(&dev->struct_mutex); |
4379 | if (!dev_priv->info->cursor_needs_physical) { | 4372 | if (!dev_priv->info->cursor_needs_physical) { |
4380 | ret = i915_gem_object_pin(bo, PAGE_SIZE, true); | 4373 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true); |
4381 | if (ret) { | 4374 | if (ret) { |
4382 | DRM_ERROR("failed to pin cursor bo\n"); | 4375 | DRM_ERROR("failed to pin cursor bo\n"); |
4383 | goto fail_locked; | 4376 | goto fail_locked; |
4384 | } | 4377 | } |
4385 | 4378 | ||
4386 | ret = i915_gem_object_set_to_gtt_domain(bo, 0); | 4379 | ret = i915_gem_object_set_to_gtt_domain(obj, 0); |
4387 | if (ret) { | 4380 | if (ret) { |
4388 | DRM_ERROR("failed to move cursor bo into the GTT\n"); | 4381 | DRM_ERROR("failed to move cursor bo into the GTT\n"); |
4389 | goto fail_unpin; | 4382 | goto fail_unpin; |
4390 | } | 4383 | } |
4391 | 4384 | ||
4392 | addr = obj_priv->gtt_offset; | 4385 | addr = obj->gtt_offset; |
4393 | } else { | 4386 | } else { |
4394 | int align = IS_I830(dev) ? 16 * 1024 : 256; | 4387 | int align = IS_I830(dev) ? 16 * 1024 : 256; |
4395 | ret = i915_gem_attach_phys_object(dev, bo, | 4388 | ret = i915_gem_attach_phys_object(dev, obj, |
4396 | (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, | 4389 | (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, |
4397 | align); | 4390 | align); |
4398 | if (ret) { | 4391 | if (ret) { |
4399 | DRM_ERROR("failed to attach phys object\n"); | 4392 | DRM_ERROR("failed to attach phys object\n"); |
4400 | goto fail_locked; | 4393 | goto fail_locked; |
4401 | } | 4394 | } |
4402 | addr = obj_priv->phys_obj->handle->busaddr; | 4395 | addr = obj->phys_obj->handle->busaddr; |
4403 | } | 4396 | } |
4404 | 4397 | ||
4405 | if (IS_GEN2(dev)) | 4398 | if (IS_GEN2(dev)) |
@@ -4408,17 +4401,17 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4408 | finish: | 4401 | finish: |
4409 | if (intel_crtc->cursor_bo) { | 4402 | if (intel_crtc->cursor_bo) { |
4410 | if (dev_priv->info->cursor_needs_physical) { | 4403 | if (dev_priv->info->cursor_needs_physical) { |
4411 | if (intel_crtc->cursor_bo != bo) | 4404 | if (intel_crtc->cursor_bo != obj) |
4412 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); | 4405 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); |
4413 | } else | 4406 | } else |
4414 | i915_gem_object_unpin(intel_crtc->cursor_bo); | 4407 | i915_gem_object_unpin(intel_crtc->cursor_bo); |
4415 | drm_gem_object_unreference(intel_crtc->cursor_bo); | 4408 | drm_gem_object_unreference(&intel_crtc->cursor_bo->base); |
4416 | } | 4409 | } |
4417 | 4410 | ||
4418 | mutex_unlock(&dev->struct_mutex); | 4411 | mutex_unlock(&dev->struct_mutex); |
4419 | 4412 | ||
4420 | intel_crtc->cursor_addr = addr; | 4413 | intel_crtc->cursor_addr = addr; |
4421 | intel_crtc->cursor_bo = bo; | 4414 | intel_crtc->cursor_bo = obj; |
4422 | intel_crtc->cursor_width = width; | 4415 | intel_crtc->cursor_width = width; |
4423 | intel_crtc->cursor_height = height; | 4416 | intel_crtc->cursor_height = height; |
4424 | 4417 | ||
@@ -4426,11 +4419,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4426 | 4419 | ||
4427 | return 0; | 4420 | return 0; |
4428 | fail_unpin: | 4421 | fail_unpin: |
4429 | i915_gem_object_unpin(bo); | 4422 | i915_gem_object_unpin(obj); |
4430 | fail_locked: | 4423 | fail_locked: |
4431 | mutex_unlock(&dev->struct_mutex); | 4424 | mutex_unlock(&dev->struct_mutex); |
4432 | fail: | 4425 | fail: |
4433 | drm_gem_object_unreference_unlocked(bo); | 4426 | drm_gem_object_unreference_unlocked(&obj->base); |
4434 | return ret; | 4427 | return ret; |
4435 | } | 4428 | } |
4436 | 4429 | ||
@@ -4890,7 +4883,7 @@ static void intel_idle_update(struct work_struct *work) | |||
4890 | * buffer), we'll also mark the display as busy, so we know to increase its | 4883 | * buffer), we'll also mark the display as busy, so we know to increase its |
4891 | * clock frequency. | 4884 | * clock frequency. |
4892 | */ | 4885 | */ |
4893 | void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) | 4886 | void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj) |
4894 | { | 4887 | { |
4895 | drm_i915_private_t *dev_priv = dev->dev_private; | 4888 | drm_i915_private_t *dev_priv = dev->dev_private; |
4896 | struct drm_crtc *crtc = NULL; | 4889 | struct drm_crtc *crtc = NULL; |
@@ -4971,8 +4964,8 @@ static void intel_unpin_work_fn(struct work_struct *__work) | |||
4971 | 4964 | ||
4972 | mutex_lock(&work->dev->struct_mutex); | 4965 | mutex_lock(&work->dev->struct_mutex); |
4973 | i915_gem_object_unpin(work->old_fb_obj); | 4966 | i915_gem_object_unpin(work->old_fb_obj); |
4974 | drm_gem_object_unreference(work->pending_flip_obj); | 4967 | drm_gem_object_unreference(&work->pending_flip_obj->base); |
4975 | drm_gem_object_unreference(work->old_fb_obj); | 4968 | drm_gem_object_unreference(&work->old_fb_obj->base); |
4976 | mutex_unlock(&work->dev->struct_mutex); | 4969 | mutex_unlock(&work->dev->struct_mutex); |
4977 | kfree(work); | 4970 | kfree(work); |
4978 | } | 4971 | } |
@@ -4983,7 +4976,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev, | |||
4983 | drm_i915_private_t *dev_priv = dev->dev_private; | 4976 | drm_i915_private_t *dev_priv = dev->dev_private; |
4984 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4977 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4985 | struct intel_unpin_work *work; | 4978 | struct intel_unpin_work *work; |
4986 | struct drm_i915_gem_object *obj_priv; | 4979 | struct drm_i915_gem_object *obj; |
4987 | struct drm_pending_vblank_event *e; | 4980 | struct drm_pending_vblank_event *e; |
4988 | struct timeval now; | 4981 | struct timeval now; |
4989 | unsigned long flags; | 4982 | unsigned long flags; |
@@ -5015,10 +5008,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev, | |||
5015 | 5008 | ||
5016 | spin_unlock_irqrestore(&dev->event_lock, flags); | 5009 | spin_unlock_irqrestore(&dev->event_lock, flags); |
5017 | 5010 | ||
5018 | obj_priv = to_intel_bo(work->old_fb_obj); | 5011 | obj = work->old_fb_obj; |
5019 | atomic_clear_mask(1 << intel_crtc->plane, | 5012 | atomic_clear_mask(1 << intel_crtc->plane, |
5020 | &obj_priv->pending_flip.counter); | 5013 | &obj->pending_flip.counter); |
5021 | if (atomic_read(&obj_priv->pending_flip) == 0) | 5014 | if (atomic_read(&obj->pending_flip) == 0) |
5022 | wake_up(&dev_priv->pending_flip_queue); | 5015 | wake_up(&dev_priv->pending_flip_queue); |
5023 | schedule_work(&work->work); | 5016 | schedule_work(&work->work); |
5024 | 5017 | ||
@@ -5065,8 +5058,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5065 | struct drm_device *dev = crtc->dev; | 5058 | struct drm_device *dev = crtc->dev; |
5066 | struct drm_i915_private *dev_priv = dev->dev_private; | 5059 | struct drm_i915_private *dev_priv = dev->dev_private; |
5067 | struct intel_framebuffer *intel_fb; | 5060 | struct intel_framebuffer *intel_fb; |
5068 | struct drm_i915_gem_object *obj_priv; | 5061 | struct drm_i915_gem_object *obj; |
5069 | struct drm_gem_object *obj; | ||
5070 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5062 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5071 | struct intel_unpin_work *work; | 5063 | struct intel_unpin_work *work; |
5072 | unsigned long flags, offset; | 5064 | unsigned long flags, offset; |
@@ -5105,8 +5097,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5105 | goto cleanup_work; | 5097 | goto cleanup_work; |
5106 | 5098 | ||
5107 | /* Reference the objects for the scheduled work. */ | 5099 | /* Reference the objects for the scheduled work. */ |
5108 | drm_gem_object_reference(work->old_fb_obj); | 5100 | drm_gem_object_reference(&work->old_fb_obj->base); |
5109 | drm_gem_object_reference(obj); | 5101 | drm_gem_object_reference(&obj->base); |
5110 | 5102 | ||
5111 | crtc->fb = fb; | 5103 | crtc->fb = fb; |
5112 | 5104 | ||
@@ -5134,7 +5126,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5134 | } | 5126 | } |
5135 | 5127 | ||
5136 | work->pending_flip_obj = obj; | 5128 | work->pending_flip_obj = obj; |
5137 | obj_priv = to_intel_bo(obj); | ||
5138 | 5129 | ||
5139 | work->enable_stall_check = true; | 5130 | work->enable_stall_check = true; |
5140 | 5131 | ||
@@ -5148,15 +5139,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5148 | /* Block clients from rendering to the new back buffer until | 5139 | /* Block clients from rendering to the new back buffer until |
5149 | * the flip occurs and the object is no longer visible. | 5140 | * the flip occurs and the object is no longer visible. |
5150 | */ | 5141 | */ |
5151 | atomic_add(1 << intel_crtc->plane, | 5142 | atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); |
5152 | &to_intel_bo(work->old_fb_obj)->pending_flip); | ||
5153 | 5143 | ||
5154 | switch (INTEL_INFO(dev)->gen) { | 5144 | switch (INTEL_INFO(dev)->gen) { |
5155 | case 2: | 5145 | case 2: |
5156 | OUT_RING(MI_DISPLAY_FLIP | | 5146 | OUT_RING(MI_DISPLAY_FLIP | |
5157 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5147 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
5158 | OUT_RING(fb->pitch); | 5148 | OUT_RING(fb->pitch); |
5159 | OUT_RING(obj_priv->gtt_offset + offset); | 5149 | OUT_RING(obj->gtt_offset + offset); |
5160 | OUT_RING(MI_NOOP); | 5150 | OUT_RING(MI_NOOP); |
5161 | break; | 5151 | break; |
5162 | 5152 | ||
@@ -5164,7 +5154,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5164 | OUT_RING(MI_DISPLAY_FLIP_I915 | | 5154 | OUT_RING(MI_DISPLAY_FLIP_I915 | |
5165 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5155 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
5166 | OUT_RING(fb->pitch); | 5156 | OUT_RING(fb->pitch); |
5167 | OUT_RING(obj_priv->gtt_offset + offset); | 5157 | OUT_RING(obj->gtt_offset + offset); |
5168 | OUT_RING(MI_NOOP); | 5158 | OUT_RING(MI_NOOP); |
5169 | break; | 5159 | break; |
5170 | 5160 | ||
@@ -5177,7 +5167,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5177 | OUT_RING(MI_DISPLAY_FLIP | | 5167 | OUT_RING(MI_DISPLAY_FLIP | |
5178 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5168 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
5179 | OUT_RING(fb->pitch); | 5169 | OUT_RING(fb->pitch); |
5180 | OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); | 5170 | OUT_RING(obj->gtt_offset | obj->tiling_mode); |
5181 | 5171 | ||
5182 | /* XXX Enabling the panel-fitter across page-flip is so far | 5172 | /* XXX Enabling the panel-fitter across page-flip is so far |
5183 | * untested on non-native modes, so ignore it for now. | 5173 | * untested on non-native modes, so ignore it for now. |
@@ -5191,8 +5181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5191 | case 6: | 5181 | case 6: |
5192 | OUT_RING(MI_DISPLAY_FLIP | | 5182 | OUT_RING(MI_DISPLAY_FLIP | |
5193 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5183 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
5194 | OUT_RING(fb->pitch | obj_priv->tiling_mode); | 5184 | OUT_RING(fb->pitch | obj->tiling_mode); |
5195 | OUT_RING(obj_priv->gtt_offset); | 5185 | OUT_RING(obj->gtt_offset); |
5196 | 5186 | ||
5197 | pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; | 5187 | pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; |
5198 | pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; | 5188 | pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; |
@@ -5208,8 +5198,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5208 | return 0; | 5198 | return 0; |
5209 | 5199 | ||
5210 | cleanup_objs: | 5200 | cleanup_objs: |
5211 | drm_gem_object_unreference(work->old_fb_obj); | 5201 | drm_gem_object_unreference(&work->old_fb_obj->base); |
5212 | drm_gem_object_unreference(obj); | 5202 | drm_gem_object_unreference(&obj->base); |
5213 | cleanup_work: | 5203 | cleanup_work: |
5214 | mutex_unlock(&dev->struct_mutex); | 5204 | mutex_unlock(&dev->struct_mutex); |
5215 | 5205 | ||
@@ -5295,7 +5285,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
5295 | } | 5285 | } |
5296 | 5286 | ||
5297 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | 5287 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
5298 | struct drm_file *file_priv) | 5288 | struct drm_file *file) |
5299 | { | 5289 | { |
5300 | drm_i915_private_t *dev_priv = dev->dev_private; | 5290 | drm_i915_private_t *dev_priv = dev->dev_private; |
5301 | struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; | 5291 | struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; |
@@ -5440,19 +5430,19 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) | |||
5440 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 5430 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
5441 | 5431 | ||
5442 | drm_framebuffer_cleanup(fb); | 5432 | drm_framebuffer_cleanup(fb); |
5443 | drm_gem_object_unreference_unlocked(intel_fb->obj); | 5433 | drm_gem_object_unreference_unlocked(&intel_fb->obj->base); |
5444 | 5434 | ||
5445 | kfree(intel_fb); | 5435 | kfree(intel_fb); |
5446 | } | 5436 | } |
5447 | 5437 | ||
5448 | static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, | 5438 | static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, |
5449 | struct drm_file *file_priv, | 5439 | struct drm_file *file, |
5450 | unsigned int *handle) | 5440 | unsigned int *handle) |
5451 | { | 5441 | { |
5452 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 5442 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
5453 | struct drm_gem_object *object = intel_fb->obj; | 5443 | struct drm_i915_gem_object *obj = intel_fb->obj; |
5454 | 5444 | ||
5455 | return drm_gem_handle_create(file_priv, object, handle); | 5445 | return drm_gem_handle_create(file, &obj->base, handle); |
5456 | } | 5446 | } |
5457 | 5447 | ||
5458 | static const struct drm_framebuffer_funcs intel_fb_funcs = { | 5448 | static const struct drm_framebuffer_funcs intel_fb_funcs = { |
@@ -5463,12 +5453,11 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = { | |||
5463 | int intel_framebuffer_init(struct drm_device *dev, | 5453 | int intel_framebuffer_init(struct drm_device *dev, |
5464 | struct intel_framebuffer *intel_fb, | 5454 | struct intel_framebuffer *intel_fb, |
5465 | struct drm_mode_fb_cmd *mode_cmd, | 5455 | struct drm_mode_fb_cmd *mode_cmd, |
5466 | struct drm_gem_object *obj) | 5456 | struct drm_i915_gem_object *obj) |
5467 | { | 5457 | { |
5468 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
5469 | int ret; | 5458 | int ret; |
5470 | 5459 | ||
5471 | if (obj_priv->tiling_mode == I915_TILING_Y) | 5460 | if (obj->tiling_mode == I915_TILING_Y) |
5472 | return -EINVAL; | 5461 | return -EINVAL; |
5473 | 5462 | ||
5474 | if (mode_cmd->pitch & 63) | 5463 | if (mode_cmd->pitch & 63) |
@@ -5500,11 +5489,11 @@ intel_user_framebuffer_create(struct drm_device *dev, | |||
5500 | struct drm_file *filp, | 5489 | struct drm_file *filp, |
5501 | struct drm_mode_fb_cmd *mode_cmd) | 5490 | struct drm_mode_fb_cmd *mode_cmd) |
5502 | { | 5491 | { |
5503 | struct drm_gem_object *obj; | 5492 | struct drm_i915_gem_object *obj; |
5504 | struct intel_framebuffer *intel_fb; | 5493 | struct intel_framebuffer *intel_fb; |
5505 | int ret; | 5494 | int ret; |
5506 | 5495 | ||
5507 | obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle); | 5496 | obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle)); |
5508 | if (!obj) | 5497 | if (!obj) |
5509 | return ERR_PTR(-ENOENT); | 5498 | return ERR_PTR(-ENOENT); |
5510 | 5499 | ||
@@ -5512,10 +5501,9 @@ intel_user_framebuffer_create(struct drm_device *dev, | |||
5512 | if (!intel_fb) | 5501 | if (!intel_fb) |
5513 | return ERR_PTR(-ENOMEM); | 5502 | return ERR_PTR(-ENOMEM); |
5514 | 5503 | ||
5515 | ret = intel_framebuffer_init(dev, intel_fb, | 5504 | ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); |
5516 | mode_cmd, obj); | ||
5517 | if (ret) { | 5505 | if (ret) { |
5518 | drm_gem_object_unreference_unlocked(obj); | 5506 | drm_gem_object_unreference_unlocked(&obj->base); |
5519 | kfree(intel_fb); | 5507 | kfree(intel_fb); |
5520 | return ERR_PTR(ret); | 5508 | return ERR_PTR(ret); |
5521 | } | 5509 | } |
@@ -5528,10 +5516,10 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { | |||
5528 | .output_poll_changed = intel_fb_output_poll_changed, | 5516 | .output_poll_changed = intel_fb_output_poll_changed, |
5529 | }; | 5517 | }; |
5530 | 5518 | ||
5531 | static struct drm_gem_object * | 5519 | static struct drm_i915_gem_object * |
5532 | intel_alloc_context_page(struct drm_device *dev) | 5520 | intel_alloc_context_page(struct drm_device *dev) |
5533 | { | 5521 | { |
5534 | struct drm_gem_object *ctx; | 5522 | struct drm_i915_gem_object *ctx; |
5535 | int ret; | 5523 | int ret; |
5536 | 5524 | ||
5537 | ctx = i915_gem_alloc_object(dev, 4096); | 5525 | ctx = i915_gem_alloc_object(dev, 4096); |
@@ -5559,7 +5547,7 @@ intel_alloc_context_page(struct drm_device *dev) | |||
5559 | err_unpin: | 5547 | err_unpin: |
5560 | i915_gem_object_unpin(ctx); | 5548 | i915_gem_object_unpin(ctx); |
5561 | err_unref: | 5549 | err_unref: |
5562 | drm_gem_object_unreference(ctx); | 5550 | drm_gem_object_unreference(&ctx->base); |
5563 | mutex_unlock(&dev->struct_mutex); | 5551 | mutex_unlock(&dev->struct_mutex); |
5564 | return NULL; | 5552 | return NULL; |
5565 | } | 5553 | } |
@@ -5886,20 +5874,17 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5886 | if (dev_priv->renderctx == NULL) | 5874 | if (dev_priv->renderctx == NULL) |
5887 | dev_priv->renderctx = intel_alloc_context_page(dev); | 5875 | dev_priv->renderctx = intel_alloc_context_page(dev); |
5888 | if (dev_priv->renderctx) { | 5876 | if (dev_priv->renderctx) { |
5889 | struct drm_i915_gem_object *obj_priv; | 5877 | struct drm_i915_gem_object *obj = dev_priv->renderctx; |
5890 | obj_priv = to_intel_bo(dev_priv->renderctx); | 5878 | if (BEGIN_LP_RING(4) == 0) { |
5891 | if (obj_priv) { | 5879 | OUT_RING(MI_SET_CONTEXT); |
5892 | if (BEGIN_LP_RING(4) == 0) { | 5880 | OUT_RING(obj->gtt_offset | |
5893 | OUT_RING(MI_SET_CONTEXT); | 5881 | MI_MM_SPACE_GTT | |
5894 | OUT_RING(obj_priv->gtt_offset | | 5882 | MI_SAVE_EXT_STATE_EN | |
5895 | MI_MM_SPACE_GTT | | 5883 | MI_RESTORE_EXT_STATE_EN | |
5896 | MI_SAVE_EXT_STATE_EN | | 5884 | MI_RESTORE_INHIBIT); |
5897 | MI_RESTORE_EXT_STATE_EN | | 5885 | OUT_RING(MI_NOOP); |
5898 | MI_RESTORE_INHIBIT); | 5886 | OUT_RING(MI_FLUSH); |
5899 | OUT_RING(MI_NOOP); | 5887 | ADVANCE_LP_RING(); |
5900 | OUT_RING(MI_FLUSH); | ||
5901 | ADVANCE_LP_RING(); | ||
5902 | } | ||
5903 | } | 5888 | } |
5904 | } else | 5889 | } else |
5905 | DRM_DEBUG_KMS("Failed to allocate render context." | 5890 | DRM_DEBUG_KMS("Failed to allocate render context." |
@@ -5907,22 +5892,11 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5907 | } | 5892 | } |
5908 | 5893 | ||
5909 | if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { | 5894 | if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { |
5910 | struct drm_i915_gem_object *obj_priv = NULL; | 5895 | if (dev_priv->pwrctx == NULL) |
5911 | 5896 | dev_priv->pwrctx = intel_alloc_context_page(dev); | |
5912 | if (dev_priv->pwrctx) { | 5897 | if (dev_priv->pwrctx) { |
5913 | obj_priv = to_intel_bo(dev_priv->pwrctx); | 5898 | struct drm_i915_gem_object *obj = dev_priv->pwrctx; |
5914 | } else { | 5899 | I915_WRITE(PWRCTXA, obj->gtt_offset | PWRCTX_EN); |
5915 | struct drm_gem_object *pwrctx; | ||
5916 | |||
5917 | pwrctx = intel_alloc_context_page(dev); | ||
5918 | if (pwrctx) { | ||
5919 | dev_priv->pwrctx = pwrctx; | ||
5920 | obj_priv = to_intel_bo(pwrctx); | ||
5921 | } | ||
5922 | } | ||
5923 | |||
5924 | if (obj_priv) { | ||
5925 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); | ||
5926 | I915_WRITE(MCHBAR_RENDER_STANDBY, | 5900 | I915_WRITE(MCHBAR_RENDER_STANDBY, |
5927 | I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); | 5901 | I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); |
5928 | } | 5902 | } |
@@ -6197,23 +6171,25 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
6197 | dev_priv->display.disable_fbc(dev); | 6171 | dev_priv->display.disable_fbc(dev); |
6198 | 6172 | ||
6199 | if (dev_priv->renderctx) { | 6173 | if (dev_priv->renderctx) { |
6200 | struct drm_i915_gem_object *obj_priv; | 6174 | struct drm_i915_gem_object *obj = dev_priv->renderctx; |
6175 | |||
6176 | I915_WRITE(CCID, obj->gtt_offset &~ CCID_EN); | ||
6177 | POSTING_READ(CCID); | ||
6201 | 6178 | ||
6202 | obj_priv = to_intel_bo(dev_priv->renderctx); | 6179 | i915_gem_object_unpin(obj); |
6203 | I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN); | 6180 | drm_gem_object_unreference(&obj->base); |
6204 | I915_READ(CCID); | 6181 | dev_priv->renderctx = NULL; |
6205 | i915_gem_object_unpin(dev_priv->renderctx); | ||
6206 | drm_gem_object_unreference(dev_priv->renderctx); | ||
6207 | } | 6182 | } |
6208 | 6183 | ||
6209 | if (dev_priv->pwrctx) { | 6184 | if (dev_priv->pwrctx) { |
6210 | struct drm_i915_gem_object *obj_priv; | 6185 | struct drm_i915_gem_object *obj = dev_priv->pwrctx; |
6186 | |||
6187 | I915_WRITE(PWRCTXA, obj->gtt_offset &~ PWRCTX_EN); | ||
6188 | POSTING_READ(PWRCTXA); | ||
6211 | 6189 | ||
6212 | obj_priv = to_intel_bo(dev_priv->pwrctx); | 6190 | i915_gem_object_unpin(obj); |
6213 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN); | 6191 | drm_gem_object_unreference(&obj->base); |
6214 | I915_READ(PWRCTXA); | 6192 | dev_priv->pwrctx = NULL; |
6215 | i915_gem_object_unpin(dev_priv->pwrctx); | ||
6216 | drm_gem_object_unreference(dev_priv->pwrctx); | ||
6217 | } | 6193 | } |
6218 | 6194 | ||
6219 | if (IS_IRONLAKE_M(dev)) | 6195 | if (IS_IRONLAKE_M(dev)) |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 21551fe74541..5a4f14e36d6c 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -127,7 +127,7 @@ intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode) | |||
127 | 127 | ||
128 | struct intel_framebuffer { | 128 | struct intel_framebuffer { |
129 | struct drm_framebuffer base; | 129 | struct drm_framebuffer base; |
130 | struct drm_gem_object *obj; | 130 | struct drm_i915_gem_object *obj; |
131 | }; | 131 | }; |
132 | 132 | ||
133 | struct intel_fbdev { | 133 | struct intel_fbdev { |
@@ -166,7 +166,7 @@ struct intel_crtc { | |||
166 | struct intel_unpin_work *unpin_work; | 166 | struct intel_unpin_work *unpin_work; |
167 | int fdi_lanes; | 167 | int fdi_lanes; |
168 | 168 | ||
169 | struct drm_gem_object *cursor_bo; | 169 | struct drm_i915_gem_object *cursor_bo; |
170 | uint32_t cursor_addr; | 170 | uint32_t cursor_addr; |
171 | int16_t cursor_x, cursor_y; | 171 | int16_t cursor_x, cursor_y; |
172 | int16_t cursor_width, cursor_height; | 172 | int16_t cursor_width, cursor_height; |
@@ -220,8 +220,8 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) | |||
220 | struct intel_unpin_work { | 220 | struct intel_unpin_work { |
221 | struct work_struct work; | 221 | struct work_struct work; |
222 | struct drm_device *dev; | 222 | struct drm_device *dev; |
223 | struct drm_gem_object *old_fb_obj; | 223 | struct drm_i915_gem_object *old_fb_obj; |
224 | struct drm_gem_object *pending_flip_obj; | 224 | struct drm_i915_gem_object *pending_flip_obj; |
225 | struct drm_pending_vblank_event *event; | 225 | struct drm_pending_vblank_event *event; |
226 | int pending; | 226 | int pending; |
227 | bool enable_stall_check; | 227 | bool enable_stall_check; |
@@ -236,7 +236,8 @@ void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); | |||
236 | extern bool intel_sdvo_init(struct drm_device *dev, int output_device); | 236 | extern bool intel_sdvo_init(struct drm_device *dev, int output_device); |
237 | extern void intel_dvo_init(struct drm_device *dev); | 237 | extern void intel_dvo_init(struct drm_device *dev); |
238 | extern void intel_tv_init(struct drm_device *dev); | 238 | extern void intel_tv_init(struct drm_device *dev); |
239 | extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj); | 239 | extern void intel_mark_busy(struct drm_device *dev, |
240 | struct drm_i915_gem_object *obj); | ||
240 | extern void intel_lvds_init(struct drm_device *dev); | 241 | extern void intel_lvds_init(struct drm_device *dev); |
241 | extern void intel_dp_init(struct drm_device *dev, int dp_reg); | 242 | extern void intel_dp_init(struct drm_device *dev, int dp_reg); |
242 | void | 243 | void |
@@ -299,13 +300,13 @@ extern void ironlake_disable_drps(struct drm_device *dev); | |||
299 | extern void intel_init_emon(struct drm_device *dev); | 300 | extern void intel_init_emon(struct drm_device *dev); |
300 | 301 | ||
301 | extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, | 302 | extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, |
302 | struct drm_gem_object *obj, | 303 | struct drm_i915_gem_object *obj, |
303 | bool pipelined); | 304 | bool pipelined); |
304 | 305 | ||
305 | extern int intel_framebuffer_init(struct drm_device *dev, | 306 | extern int intel_framebuffer_init(struct drm_device *dev, |
306 | struct intel_framebuffer *ifb, | 307 | struct intel_framebuffer *ifb, |
307 | struct drm_mode_fb_cmd *mode_cmd, | 308 | struct drm_mode_fb_cmd *mode_cmd, |
308 | struct drm_gem_object *obj); | 309 | struct drm_i915_gem_object *obj); |
309 | extern int intel_fbdev_init(struct drm_device *dev); | 310 | extern int intel_fbdev_init(struct drm_device *dev); |
310 | extern void intel_fbdev_fini(struct drm_device *dev); | 311 | extern void intel_fbdev_fini(struct drm_device *dev); |
311 | 312 | ||
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index af2a1dddc28e..c2cffeb4fe89 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -65,8 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
65 | struct fb_info *info; | 65 | struct fb_info *info; |
66 | struct drm_framebuffer *fb; | 66 | struct drm_framebuffer *fb; |
67 | struct drm_mode_fb_cmd mode_cmd; | 67 | struct drm_mode_fb_cmd mode_cmd; |
68 | struct drm_gem_object *fbo = NULL; | 68 | struct drm_i915_gem_object *obj; |
69 | struct drm_i915_gem_object *obj_priv; | ||
70 | struct device *device = &dev->pdev->dev; | 69 | struct device *device = &dev->pdev->dev; |
71 | int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0; | 70 | int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0; |
72 | 71 | ||
@@ -83,18 +82,17 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
83 | 82 | ||
84 | size = mode_cmd.pitch * mode_cmd.height; | 83 | size = mode_cmd.pitch * mode_cmd.height; |
85 | size = ALIGN(size, PAGE_SIZE); | 84 | size = ALIGN(size, PAGE_SIZE); |
86 | fbo = i915_gem_alloc_object(dev, size); | 85 | obj = i915_gem_alloc_object(dev, size); |
87 | if (!fbo) { | 86 | if (!obj) { |
88 | DRM_ERROR("failed to allocate framebuffer\n"); | 87 | DRM_ERROR("failed to allocate framebuffer\n"); |
89 | ret = -ENOMEM; | 88 | ret = -ENOMEM; |
90 | goto out; | 89 | goto out; |
91 | } | 90 | } |
92 | obj_priv = to_intel_bo(fbo); | ||
93 | 91 | ||
94 | mutex_lock(&dev->struct_mutex); | 92 | mutex_lock(&dev->struct_mutex); |
95 | 93 | ||
96 | /* Flush everything out, we'll be doing GTT only from now on */ | 94 | /* Flush everything out, we'll be doing GTT only from now on */ |
97 | ret = intel_pin_and_fence_fb_obj(dev, fbo, false); | 95 | ret = intel_pin_and_fence_fb_obj(dev, obj, false); |
98 | if (ret) { | 96 | if (ret) { |
99 | DRM_ERROR("failed to pin fb: %d\n", ret); | 97 | DRM_ERROR("failed to pin fb: %d\n", ret); |
100 | goto out_unref; | 98 | goto out_unref; |
@@ -108,7 +106,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
108 | 106 | ||
109 | info->par = ifbdev; | 107 | info->par = ifbdev; |
110 | 108 | ||
111 | ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo); | 109 | ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj); |
112 | if (ret) | 110 | if (ret) |
113 | goto out_unpin; | 111 | goto out_unpin; |
114 | 112 | ||
@@ -134,11 +132,10 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
134 | else | 132 | else |
135 | info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); | 133 | info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); |
136 | 134 | ||
137 | info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset; | 135 | info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; |
138 | info->fix.smem_len = size; | 136 | info->fix.smem_len = size; |
139 | 137 | ||
140 | info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset, | 138 | info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size); |
141 | size); | ||
142 | if (!info->screen_base) { | 139 | if (!info->screen_base) { |
143 | ret = -ENOSPC; | 140 | ret = -ENOSPC; |
144 | goto out_unpin; | 141 | goto out_unpin; |
@@ -168,7 +165,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
168 | 165 | ||
169 | DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", | 166 | DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", |
170 | fb->width, fb->height, | 167 | fb->width, fb->height, |
171 | obj_priv->gtt_offset, fbo); | 168 | obj->gtt_offset, obj); |
172 | 169 | ||
173 | 170 | ||
174 | mutex_unlock(&dev->struct_mutex); | 171 | mutex_unlock(&dev->struct_mutex); |
@@ -176,9 +173,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
176 | return 0; | 173 | return 0; |
177 | 174 | ||
178 | out_unpin: | 175 | out_unpin: |
179 | i915_gem_object_unpin(fbo); | 176 | i915_gem_object_unpin(obj); |
180 | out_unref: | 177 | out_unref: |
181 | drm_gem_object_unreference(fbo); | 178 | drm_gem_object_unreference(&obj->base); |
182 | mutex_unlock(&dev->struct_mutex); | 179 | mutex_unlock(&dev->struct_mutex); |
183 | out: | 180 | out: |
184 | return ret; | 181 | return ret; |
@@ -225,7 +222,7 @@ static void intel_fbdev_destroy(struct drm_device *dev, | |||
225 | 222 | ||
226 | drm_framebuffer_cleanup(&ifb->base); | 223 | drm_framebuffer_cleanup(&ifb->base); |
227 | if (ifb->obj) { | 224 | if (ifb->obj) { |
228 | drm_gem_object_unreference_unlocked(ifb->obj); | 225 | drm_gem_object_unreference_unlocked(&ifb->obj->base); |
229 | ifb->obj = NULL; | 226 | ifb->obj = NULL; |
230 | } | 227 | } |
231 | } | 228 | } |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index ec8ffaccbbdb..af715cc03ee0 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -376,24 +376,23 @@ static int intel_overlay_continue(struct intel_overlay *overlay, | |||
376 | 376 | ||
377 | static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) | 377 | static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) |
378 | { | 378 | { |
379 | struct drm_gem_object *obj = &overlay->old_vid_bo->base; | 379 | struct drm_i915_gem_object *obj = overlay->old_vid_bo; |
380 | 380 | ||
381 | i915_gem_object_unpin(obj); | 381 | i915_gem_object_unpin(obj); |
382 | drm_gem_object_unreference(obj); | 382 | drm_gem_object_unreference(&obj->base); |
383 | 383 | ||
384 | overlay->old_vid_bo = NULL; | 384 | overlay->old_vid_bo = NULL; |
385 | } | 385 | } |
386 | 386 | ||
387 | static void intel_overlay_off_tail(struct intel_overlay *overlay) | 387 | static void intel_overlay_off_tail(struct intel_overlay *overlay) |
388 | { | 388 | { |
389 | struct drm_gem_object *obj; | 389 | struct drm_i915_gem_object *obj = overlay->vid_bo; |
390 | 390 | ||
391 | /* never have the overlay hw on without showing a frame */ | 391 | /* never have the overlay hw on without showing a frame */ |
392 | BUG_ON(!overlay->vid_bo); | 392 | BUG_ON(!overlay->vid_bo); |
393 | obj = &overlay->vid_bo->base; | ||
394 | 393 | ||
395 | i915_gem_object_unpin(obj); | 394 | i915_gem_object_unpin(obj); |
396 | drm_gem_object_unreference(obj); | 395 | drm_gem_object_unreference(&obj->base); |
397 | overlay->vid_bo = NULL; | 396 | overlay->vid_bo = NULL; |
398 | 397 | ||
399 | overlay->crtc->overlay = NULL; | 398 | overlay->crtc->overlay = NULL; |
@@ -764,13 +763,12 @@ static u32 overlay_cmd_reg(struct put_image_params *params) | |||
764 | } | 763 | } |
765 | 764 | ||
766 | static int intel_overlay_do_put_image(struct intel_overlay *overlay, | 765 | static int intel_overlay_do_put_image(struct intel_overlay *overlay, |
767 | struct drm_gem_object *new_bo, | 766 | struct drm_i915_gem_object *new_bo, |
768 | struct put_image_params *params) | 767 | struct put_image_params *params) |
769 | { | 768 | { |
770 | int ret, tmp_width; | 769 | int ret, tmp_width; |
771 | struct overlay_registers *regs; | 770 | struct overlay_registers *regs; |
772 | bool scale_changed = false; | 771 | bool scale_changed = false; |
773 | struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo); | ||
774 | struct drm_device *dev = overlay->dev; | 772 | struct drm_device *dev = overlay->dev; |
775 | 773 | ||
776 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | 774 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
@@ -825,7 +823,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
825 | regs->SWIDTHSW = calc_swidthsw(overlay->dev, | 823 | regs->SWIDTHSW = calc_swidthsw(overlay->dev, |
826 | params->offset_Y, tmp_width); | 824 | params->offset_Y, tmp_width); |
827 | regs->SHEIGHT = params->src_h; | 825 | regs->SHEIGHT = params->src_h; |
828 | regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y; | 826 | regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y; |
829 | regs->OSTRIDE = params->stride_Y; | 827 | regs->OSTRIDE = params->stride_Y; |
830 | 828 | ||
831 | if (params->format & I915_OVERLAY_YUV_PLANAR) { | 829 | if (params->format & I915_OVERLAY_YUV_PLANAR) { |
@@ -839,8 +837,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
839 | params->src_w/uv_hscale); | 837 | params->src_w/uv_hscale); |
840 | regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16; | 838 | regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16; |
841 | regs->SHEIGHT |= (params->src_h/uv_vscale) << 16; | 839 | regs->SHEIGHT |= (params->src_h/uv_vscale) << 16; |
842 | regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U; | 840 | regs->OBUF_0U = new_bo->gtt_offset + params->offset_U; |
843 | regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V; | 841 | regs->OBUF_0V = new_bo->gtt_offset + params->offset_V; |
844 | regs->OSTRIDE |= params->stride_UV << 16; | 842 | regs->OSTRIDE |= params->stride_UV << 16; |
845 | } | 843 | } |
846 | 844 | ||
@@ -857,7 +855,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
857 | goto out_unpin; | 855 | goto out_unpin; |
858 | 856 | ||
859 | overlay->old_vid_bo = overlay->vid_bo; | 857 | overlay->old_vid_bo = overlay->vid_bo; |
860 | overlay->vid_bo = to_intel_bo(new_bo); | 858 | overlay->vid_bo = new_bo; |
861 | 859 | ||
862 | return 0; | 860 | return 0; |
863 | 861 | ||
@@ -970,7 +968,7 @@ static int check_overlay_scaling(struct put_image_params *rec) | |||
970 | 968 | ||
971 | static int check_overlay_src(struct drm_device *dev, | 969 | static int check_overlay_src(struct drm_device *dev, |
972 | struct drm_intel_overlay_put_image *rec, | 970 | struct drm_intel_overlay_put_image *rec, |
973 | struct drm_gem_object *new_bo) | 971 | struct drm_i915_gem_object *new_bo) |
974 | { | 972 | { |
975 | int uv_hscale = uv_hsubsampling(rec->flags); | 973 | int uv_hscale = uv_hsubsampling(rec->flags); |
976 | int uv_vscale = uv_vsubsampling(rec->flags); | 974 | int uv_vscale = uv_vsubsampling(rec->flags); |
@@ -1055,7 +1053,7 @@ static int check_overlay_src(struct drm_device *dev, | |||
1055 | return -EINVAL; | 1053 | return -EINVAL; |
1056 | 1054 | ||
1057 | tmp = rec->stride_Y*rec->src_height; | 1055 | tmp = rec->stride_Y*rec->src_height; |
1058 | if (rec->offset_Y + tmp > new_bo->size) | 1056 | if (rec->offset_Y + tmp > new_bo->base.size) |
1059 | return -EINVAL; | 1057 | return -EINVAL; |
1060 | break; | 1058 | break; |
1061 | 1059 | ||
@@ -1066,12 +1064,12 @@ static int check_overlay_src(struct drm_device *dev, | |||
1066 | return -EINVAL; | 1064 | return -EINVAL; |
1067 | 1065 | ||
1068 | tmp = rec->stride_Y * rec->src_height; | 1066 | tmp = rec->stride_Y * rec->src_height; |
1069 | if (rec->offset_Y + tmp > new_bo->size) | 1067 | if (rec->offset_Y + tmp > new_bo->base.size) |
1070 | return -EINVAL; | 1068 | return -EINVAL; |
1071 | 1069 | ||
1072 | tmp = rec->stride_UV * (rec->src_height / uv_vscale); | 1070 | tmp = rec->stride_UV * (rec->src_height / uv_vscale); |
1073 | if (rec->offset_U + tmp > new_bo->size || | 1071 | if (rec->offset_U + tmp > new_bo->base.size || |
1074 | rec->offset_V + tmp > new_bo->size) | 1072 | rec->offset_V + tmp > new_bo->base.size) |
1075 | return -EINVAL; | 1073 | return -EINVAL; |
1076 | break; | 1074 | break; |
1077 | } | 1075 | } |
@@ -1114,7 +1112,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, | |||
1114 | struct intel_overlay *overlay; | 1112 | struct intel_overlay *overlay; |
1115 | struct drm_mode_object *drmmode_obj; | 1113 | struct drm_mode_object *drmmode_obj; |
1116 | struct intel_crtc *crtc; | 1114 | struct intel_crtc *crtc; |
1117 | struct drm_gem_object *new_bo; | 1115 | struct drm_i915_gem_object *new_bo; |
1118 | struct put_image_params *params; | 1116 | struct put_image_params *params; |
1119 | int ret; | 1117 | int ret; |
1120 | 1118 | ||
@@ -1153,8 +1151,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, | |||
1153 | } | 1151 | } |
1154 | crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); | 1152 | crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); |
1155 | 1153 | ||
1156 | new_bo = drm_gem_object_lookup(dev, file_priv, | 1154 | new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv, |
1157 | put_image_rec->bo_handle); | 1155 | put_image_rec->bo_handle)); |
1158 | if (!new_bo) { | 1156 | if (!new_bo) { |
1159 | ret = -ENOENT; | 1157 | ret = -ENOENT; |
1160 | goto out_free; | 1158 | goto out_free; |
@@ -1245,7 +1243,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, | |||
1245 | out_unlock: | 1243 | out_unlock: |
1246 | mutex_unlock(&dev->struct_mutex); | 1244 | mutex_unlock(&dev->struct_mutex); |
1247 | mutex_unlock(&dev->mode_config.mutex); | 1245 | mutex_unlock(&dev->mode_config.mutex); |
1248 | drm_gem_object_unreference_unlocked(new_bo); | 1246 | drm_gem_object_unreference_unlocked(&new_bo->base); |
1249 | out_free: | 1247 | out_free: |
1250 | kfree(params); | 1248 | kfree(params); |
1251 | 1249 | ||
@@ -1398,7 +1396,7 @@ void intel_setup_overlay(struct drm_device *dev) | |||
1398 | { | 1396 | { |
1399 | drm_i915_private_t *dev_priv = dev->dev_private; | 1397 | drm_i915_private_t *dev_priv = dev->dev_private; |
1400 | struct intel_overlay *overlay; | 1398 | struct intel_overlay *overlay; |
1401 | struct drm_gem_object *reg_bo; | 1399 | struct drm_i915_gem_object *reg_bo; |
1402 | struct overlay_registers *regs; | 1400 | struct overlay_registers *regs; |
1403 | int ret; | 1401 | int ret; |
1404 | 1402 | ||
@@ -1413,7 +1411,7 @@ void intel_setup_overlay(struct drm_device *dev) | |||
1413 | reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); | 1411 | reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); |
1414 | if (!reg_bo) | 1412 | if (!reg_bo) |
1415 | goto out_free; | 1413 | goto out_free; |
1416 | overlay->reg_bo = to_intel_bo(reg_bo); | 1414 | overlay->reg_bo = reg_bo; |
1417 | 1415 | ||
1418 | if (OVERLAY_NEEDS_PHYSICAL(dev)) { | 1416 | if (OVERLAY_NEEDS_PHYSICAL(dev)) { |
1419 | ret = i915_gem_attach_phys_object(dev, reg_bo, | 1417 | ret = i915_gem_attach_phys_object(dev, reg_bo, |
@@ -1423,14 +1421,14 @@ void intel_setup_overlay(struct drm_device *dev) | |||
1423 | DRM_ERROR("failed to attach phys overlay regs\n"); | 1421 | DRM_ERROR("failed to attach phys overlay regs\n"); |
1424 | goto out_free_bo; | 1422 | goto out_free_bo; |
1425 | } | 1423 | } |
1426 | overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr; | 1424 | overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; |
1427 | } else { | 1425 | } else { |
1428 | ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true); | 1426 | ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true); |
1429 | if (ret) { | 1427 | if (ret) { |
1430 | DRM_ERROR("failed to pin overlay register bo\n"); | 1428 | DRM_ERROR("failed to pin overlay register bo\n"); |
1431 | goto out_free_bo; | 1429 | goto out_free_bo; |
1432 | } | 1430 | } |
1433 | overlay->flip_addr = overlay->reg_bo->gtt_offset; | 1431 | overlay->flip_addr = reg_bo->gtt_offset; |
1434 | 1432 | ||
1435 | ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); | 1433 | ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); |
1436 | if (ret) { | 1434 | if (ret) { |
@@ -1462,7 +1460,7 @@ void intel_setup_overlay(struct drm_device *dev) | |||
1462 | out_unpin_bo: | 1460 | out_unpin_bo: |
1463 | i915_gem_object_unpin(reg_bo); | 1461 | i915_gem_object_unpin(reg_bo); |
1464 | out_free_bo: | 1462 | out_free_bo: |
1465 | drm_gem_object_unreference(reg_bo); | 1463 | drm_gem_object_unreference(®_bo->base); |
1466 | out_free: | 1464 | out_free: |
1467 | kfree(overlay); | 1465 | kfree(overlay); |
1468 | return; | 1466 | return; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 1db860d7989a..181aad31125d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -139,7 +139,7 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) | |||
139 | static int init_ring_common(struct intel_ring_buffer *ring) | 139 | static int init_ring_common(struct intel_ring_buffer *ring) |
140 | { | 140 | { |
141 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | 141 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
142 | struct drm_i915_gem_object *obj_priv = to_intel_bo(ring->gem_object); | 142 | struct drm_i915_gem_object *obj = ring->obj; |
143 | u32 head; | 143 | u32 head; |
144 | 144 | ||
145 | /* Stop the ring if it's running. */ | 145 | /* Stop the ring if it's running. */ |
@@ -148,7 +148,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) | |||
148 | ring->write_tail(ring, 0); | 148 | ring->write_tail(ring, 0); |
149 | 149 | ||
150 | /* Initialize the ring. */ | 150 | /* Initialize the ring. */ |
151 | I915_WRITE_START(ring, obj_priv->gtt_offset); | 151 | I915_WRITE_START(ring, obj->gtt_offset); |
152 | head = I915_READ_HEAD(ring) & HEAD_ADDR; | 152 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
153 | 153 | ||
154 | /* G45 ring initialization fails to reset head to zero */ | 154 | /* G45 ring initialization fails to reset head to zero */ |
@@ -178,7 +178,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) | |||
178 | 178 | ||
179 | /* If the head is still not zero, the ring is dead */ | 179 | /* If the head is still not zero, the ring is dead */ |
180 | if ((I915_READ_CTL(ring) & RING_VALID) == 0 || | 180 | if ((I915_READ_CTL(ring) & RING_VALID) == 0 || |
181 | I915_READ_START(ring) != obj_priv->gtt_offset || | 181 | I915_READ_START(ring) != obj->gtt_offset || |
182 | (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { | 182 | (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { |
183 | DRM_ERROR("%s initialization failed " | 183 | DRM_ERROR("%s initialization failed " |
184 | "ctl %08x head %08x tail %08x start %08x\n", | 184 | "ctl %08x head %08x tail %08x start %08x\n", |
@@ -514,17 +514,15 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | |||
514 | static void cleanup_status_page(struct intel_ring_buffer *ring) | 514 | static void cleanup_status_page(struct intel_ring_buffer *ring) |
515 | { | 515 | { |
516 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | 516 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
517 | struct drm_gem_object *obj; | 517 | struct drm_i915_gem_object *obj; |
518 | struct drm_i915_gem_object *obj_priv; | ||
519 | 518 | ||
520 | obj = ring->status_page.obj; | 519 | obj = ring->status_page.obj; |
521 | if (obj == NULL) | 520 | if (obj == NULL) |
522 | return; | 521 | return; |
523 | obj_priv = to_intel_bo(obj); | ||
524 | 522 | ||
525 | kunmap(obj_priv->pages[0]); | 523 | kunmap(obj->pages[0]); |
526 | i915_gem_object_unpin(obj); | 524 | i915_gem_object_unpin(obj); |
527 | drm_gem_object_unreference(obj); | 525 | drm_gem_object_unreference(&obj->base); |
528 | ring->status_page.obj = NULL; | 526 | ring->status_page.obj = NULL; |
529 | 527 | ||
530 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 528 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
@@ -534,8 +532,7 @@ static int init_status_page(struct intel_ring_buffer *ring) | |||
534 | { | 532 | { |
535 | struct drm_device *dev = ring->dev; | 533 | struct drm_device *dev = ring->dev; |
536 | drm_i915_private_t *dev_priv = dev->dev_private; | 534 | drm_i915_private_t *dev_priv = dev->dev_private; |
537 | struct drm_gem_object *obj; | 535 | struct drm_i915_gem_object *obj; |
538 | struct drm_i915_gem_object *obj_priv; | ||
539 | int ret; | 536 | int ret; |
540 | 537 | ||
541 | obj = i915_gem_alloc_object(dev, 4096); | 538 | obj = i915_gem_alloc_object(dev, 4096); |
@@ -544,16 +541,15 @@ static int init_status_page(struct intel_ring_buffer *ring) | |||
544 | ret = -ENOMEM; | 541 | ret = -ENOMEM; |
545 | goto err; | 542 | goto err; |
546 | } | 543 | } |
547 | obj_priv = to_intel_bo(obj); | 544 | obj->agp_type = AGP_USER_CACHED_MEMORY; |
548 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | ||
549 | 545 | ||
550 | ret = i915_gem_object_pin(obj, 4096, true); | 546 | ret = i915_gem_object_pin(obj, 4096, true); |
551 | if (ret != 0) { | 547 | if (ret != 0) { |
552 | goto err_unref; | 548 | goto err_unref; |
553 | } | 549 | } |
554 | 550 | ||
555 | ring->status_page.gfx_addr = obj_priv->gtt_offset; | 551 | ring->status_page.gfx_addr = obj->gtt_offset; |
556 | ring->status_page.page_addr = kmap(obj_priv->pages[0]); | 552 | ring->status_page.page_addr = kmap(obj->pages[0]); |
557 | if (ring->status_page.page_addr == NULL) { | 553 | if (ring->status_page.page_addr == NULL) { |
558 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 554 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
559 | goto err_unpin; | 555 | goto err_unpin; |
@@ -570,7 +566,7 @@ static int init_status_page(struct intel_ring_buffer *ring) | |||
570 | err_unpin: | 566 | err_unpin: |
571 | i915_gem_object_unpin(obj); | 567 | i915_gem_object_unpin(obj); |
572 | err_unref: | 568 | err_unref: |
573 | drm_gem_object_unreference(obj); | 569 | drm_gem_object_unreference(&obj->base); |
574 | err: | 570 | err: |
575 | return ret; | 571 | return ret; |
576 | } | 572 | } |
@@ -578,8 +574,7 @@ err: | |||
578 | int intel_init_ring_buffer(struct drm_device *dev, | 574 | int intel_init_ring_buffer(struct drm_device *dev, |
579 | struct intel_ring_buffer *ring) | 575 | struct intel_ring_buffer *ring) |
580 | { | 576 | { |
581 | struct drm_i915_gem_object *obj_priv; | 577 | struct drm_i915_gem_object *obj; |
582 | struct drm_gem_object *obj; | ||
583 | int ret; | 578 | int ret; |
584 | 579 | ||
585 | ring->dev = dev; | 580 | ring->dev = dev; |
@@ -600,15 +595,14 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
600 | goto err_hws; | 595 | goto err_hws; |
601 | } | 596 | } |
602 | 597 | ||
603 | ring->gem_object = obj; | 598 | ring->obj = obj; |
604 | 599 | ||
605 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true); | 600 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true); |
606 | if (ret) | 601 | if (ret) |
607 | goto err_unref; | 602 | goto err_unref; |
608 | 603 | ||
609 | obj_priv = to_intel_bo(obj); | ||
610 | ring->map.size = ring->size; | 604 | ring->map.size = ring->size; |
611 | ring->map.offset = dev->agp->base + obj_priv->gtt_offset; | 605 | ring->map.offset = dev->agp->base + obj->gtt_offset; |
612 | ring->map.type = 0; | 606 | ring->map.type = 0; |
613 | ring->map.flags = 0; | 607 | ring->map.flags = 0; |
614 | ring->map.mtrr = 0; | 608 | ring->map.mtrr = 0; |
@@ -632,8 +626,8 @@ err_unmap: | |||
632 | err_unpin: | 626 | err_unpin: |
633 | i915_gem_object_unpin(obj); | 627 | i915_gem_object_unpin(obj); |
634 | err_unref: | 628 | err_unref: |
635 | drm_gem_object_unreference(obj); | 629 | drm_gem_object_unreference(&obj->base); |
636 | ring->gem_object = NULL; | 630 | ring->obj = NULL; |
637 | err_hws: | 631 | err_hws: |
638 | cleanup_status_page(ring); | 632 | cleanup_status_page(ring); |
639 | return ret; | 633 | return ret; |
@@ -644,7 +638,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) | |||
644 | struct drm_i915_private *dev_priv; | 638 | struct drm_i915_private *dev_priv; |
645 | int ret; | 639 | int ret; |
646 | 640 | ||
647 | if (ring->gem_object == NULL) | 641 | if (ring->obj == NULL) |
648 | return; | 642 | return; |
649 | 643 | ||
650 | /* Disable the ring buffer. The ring must be idle at this point */ | 644 | /* Disable the ring buffer. The ring must be idle at this point */ |
@@ -654,9 +648,9 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) | |||
654 | 648 | ||
655 | drm_core_ioremapfree(&ring->map, ring->dev); | 649 | drm_core_ioremapfree(&ring->map, ring->dev); |
656 | 650 | ||
657 | i915_gem_object_unpin(ring->gem_object); | 651 | i915_gem_object_unpin(ring->obj); |
658 | drm_gem_object_unreference(ring->gem_object); | 652 | drm_gem_object_unreference(&ring->obj->base); |
659 | ring->gem_object = NULL; | 653 | ring->obj = NULL; |
660 | 654 | ||
661 | if (ring->cleanup) | 655 | if (ring->cleanup) |
662 | ring->cleanup(ring); | 656 | ring->cleanup(ring); |
@@ -902,11 +896,11 @@ static int blt_ring_init(struct intel_ring_buffer *ring) | |||
902 | u32 *ptr; | 896 | u32 *ptr; |
903 | int ret; | 897 | int ret; |
904 | 898 | ||
905 | obj = to_intel_bo(i915_gem_alloc_object(ring->dev, 4096)); | 899 | obj = i915_gem_alloc_object(ring->dev, 4096); |
906 | if (obj == NULL) | 900 | if (obj == NULL) |
907 | return -ENOMEM; | 901 | return -ENOMEM; |
908 | 902 | ||
909 | ret = i915_gem_object_pin(&obj->base, 4096, true); | 903 | ret = i915_gem_object_pin(obj, 4096, true); |
910 | if (ret) { | 904 | if (ret) { |
911 | drm_gem_object_unreference(&obj->base); | 905 | drm_gem_object_unreference(&obj->base); |
912 | return ret; | 906 | return ret; |
@@ -917,9 +911,9 @@ static int blt_ring_init(struct intel_ring_buffer *ring) | |||
917 | *ptr++ = MI_NOOP; | 911 | *ptr++ = MI_NOOP; |
918 | kunmap(obj->pages[0]); | 912 | kunmap(obj->pages[0]); |
919 | 913 | ||
920 | ret = i915_gem_object_set_to_gtt_domain(&obj->base, false); | 914 | ret = i915_gem_object_set_to_gtt_domain(obj, false); |
921 | if (ret) { | 915 | if (ret) { |
922 | i915_gem_object_unpin(&obj->base); | 916 | i915_gem_object_unpin(obj); |
923 | drm_gem_object_unreference(&obj->base); | 917 | drm_gem_object_unreference(&obj->base); |
924 | return ret; | 918 | return ret; |
925 | } | 919 | } |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 2565d65a625b..1747e329ee94 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -4,7 +4,7 @@ | |||
4 | struct intel_hw_status_page { | 4 | struct intel_hw_status_page { |
5 | u32 __iomem *page_addr; | 5 | u32 __iomem *page_addr; |
6 | unsigned int gfx_addr; | 6 | unsigned int gfx_addr; |
7 | struct drm_gem_object *obj; | 7 | struct drm_i915_gem_object *obj; |
8 | }; | 8 | }; |
9 | 9 | ||
10 | #define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) | 10 | #define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) |
@@ -32,7 +32,7 @@ struct intel_ring_buffer { | |||
32 | u32 mmio_base; | 32 | u32 mmio_base; |
33 | void *virtual_start; | 33 | void *virtual_start; |
34 | struct drm_device *dev; | 34 | struct drm_device *dev; |
35 | struct drm_gem_object *gem_object; | 35 | struct drm_i915_gem_object *obj; |
36 | 36 | ||
37 | unsigned int head; | 37 | unsigned int head; |
38 | unsigned int tail; | 38 | unsigned int tail; |