aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/drm_mm.c41
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c77
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c12
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c23
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c160
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c76
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h1
-rw-r--r--include/drm/drm_mm.h25
-rw-r--r--include/uapi/drm/i915_drm.h10
14 files changed, 356 insertions, 88 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 0761a03cdbb2..2bf9670ba29b 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -184,19 +184,27 @@ EXPORT_SYMBOL(drm_mm_get_block_generic);
184 * -ENOSPC if no suitable free area is available. The preallocated memory node 184 * -ENOSPC if no suitable free area is available. The preallocated memory node
185 * must be cleared. 185 * must be cleared.
186 */ 186 */
187int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node, 187int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
188 unsigned long size, unsigned alignment) 188 unsigned long size, unsigned alignment,
189 unsigned long color)
189{ 190{
190 struct drm_mm_node *hole_node; 191 struct drm_mm_node *hole_node;
191 192
192 hole_node = drm_mm_search_free(mm, size, alignment, false); 193 hole_node = drm_mm_search_free_generic(mm, size, alignment,
194 color, 0);
193 if (!hole_node) 195 if (!hole_node)
194 return -ENOSPC; 196 return -ENOSPC;
195 197
196 drm_mm_insert_helper(hole_node, node, size, alignment, 0); 198 drm_mm_insert_helper(hole_node, node, size, alignment, color);
197
198 return 0; 199 return 0;
199} 200}
201EXPORT_SYMBOL(drm_mm_insert_node_generic);
202
203int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
204 unsigned long size, unsigned alignment)
205{
206 return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
207}
200EXPORT_SYMBOL(drm_mm_insert_node); 208EXPORT_SYMBOL(drm_mm_insert_node);
201 209
202static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, 210static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
@@ -275,22 +283,31 @@ EXPORT_SYMBOL(drm_mm_get_block_range_generic);
275 * -ENOSPC if no suitable free area is available. This is for range 283 * -ENOSPC if no suitable free area is available. This is for range
276 * restricted allocations. The preallocated memory node must be cleared. 284 * restricted allocations. The preallocated memory node must be cleared.
277 */ 285 */
278int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node, 286int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
279 unsigned long size, unsigned alignment, 287 unsigned long size, unsigned alignment, unsigned long color,
280 unsigned long start, unsigned long end) 288 unsigned long start, unsigned long end)
281{ 289{
282 struct drm_mm_node *hole_node; 290 struct drm_mm_node *hole_node;
283 291
284 hole_node = drm_mm_search_free_in_range(mm, size, alignment, 292 hole_node = drm_mm_search_free_in_range_generic(mm,
285 start, end, false); 293 size, alignment, color,
294 start, end, 0);
286 if (!hole_node) 295 if (!hole_node)
287 return -ENOSPC; 296 return -ENOSPC;
288 297
289 drm_mm_insert_helper_range(hole_node, node, size, alignment, 0, 298 drm_mm_insert_helper_range(hole_node, node,
299 size, alignment, color,
290 start, end); 300 start, end);
291
292 return 0; 301 return 0;
293} 302}
303EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
304
305int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
306 unsigned long size, unsigned alignment,
307 unsigned long start, unsigned long end)
308{
309 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
310}
294EXPORT_SYMBOL(drm_mm_insert_node_in_range); 311EXPORT_SYMBOL(drm_mm_insert_node_in_range);
295 312
296/** 313/**
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 8f63cd5de4b4..99daa896105d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -989,6 +989,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
989 case I915_PARAM_HAS_SECURE_BATCHES: 989 case I915_PARAM_HAS_SECURE_BATCHES:
990 value = capable(CAP_SYS_ADMIN); 990 value = capable(CAP_SYS_ADMIN);
991 break; 991 break;
992 case I915_PARAM_HAS_PINNED_BATCHES:
993 value = 1;
994 break;
992 default: 995 default:
993 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 996 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
994 param->param); 997 param->param);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 557843dd4b2e..ed3059575576 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -780,6 +780,7 @@ typedef struct drm_i915_private {
780 struct i915_hw_ppgtt *aliasing_ppgtt; 780 struct i915_hw_ppgtt *aliasing_ppgtt;
781 781
782 struct shrinker inactive_shrinker; 782 struct shrinker inactive_shrinker;
783 bool shrinker_no_lock_stealing;
783 784
784 /** 785 /**
785 * List of objects currently involved in rendering. 786 * List of objects currently involved in rendering.
@@ -1100,6 +1101,7 @@ struct drm_i915_gem_object {
1100 */ 1101 */
1101 atomic_t pending_flip; 1102 atomic_t pending_flip;
1102}; 1103};
1104#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
1103 1105
1104#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 1106#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
1105 1107
@@ -1166,6 +1168,9 @@ struct drm_i915_file_private {
1166#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \ 1168#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
1167 (dev)->pci_device == 0x0152 || \ 1169 (dev)->pci_device == 0x0152 || \
1168 (dev)->pci_device == 0x015a) 1170 (dev)->pci_device == 0x015a)
1171#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
1172 (dev)->pci_device == 0x0106 || \
1173 (dev)->pci_device == 0x010A)
1169#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 1174#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1170#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 1175#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1171#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1176#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
@@ -1196,6 +1201,9 @@ struct drm_i915_file_private {
1196#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 1201#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
1197#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 1202#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1198 1203
1204/* Early gen2 have a totally busted CS tlb and require pinned batches. */
1205#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
1206
1199/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1207/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1200 * rows, which changed the alignment requirements and fence programming. 1208 * rows, which changed the alignment requirements and fence programming.
1201 */ 1209 */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 742206e45103..da3c82e301b1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1517,9 +1517,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1517 if (obj->base.map_list.map) 1517 if (obj->base.map_list.map)
1518 return 0; 1518 return 0;
1519 1519
1520 dev_priv->mm.shrinker_no_lock_stealing = true;
1521
1520 ret = drm_gem_create_mmap_offset(&obj->base); 1522 ret = drm_gem_create_mmap_offset(&obj->base);
1521 if (ret != -ENOSPC) 1523 if (ret != -ENOSPC)
1522 return ret; 1524 goto out;
1523 1525
1524 /* Badly fragmented mmap space? The only way we can recover 1526 /* Badly fragmented mmap space? The only way we can recover
1525 * space is by destroying unwanted objects. We can't randomly release 1527 * space is by destroying unwanted objects. We can't randomly release
@@ -1531,10 +1533,14 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1531 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT); 1533 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1532 ret = drm_gem_create_mmap_offset(&obj->base); 1534 ret = drm_gem_create_mmap_offset(&obj->base);
1533 if (ret != -ENOSPC) 1535 if (ret != -ENOSPC)
1534 return ret; 1536 goto out;
1535 1537
1536 i915_gem_shrink_all(dev_priv); 1538 i915_gem_shrink_all(dev_priv);
1537 return drm_gem_create_mmap_offset(&obj->base); 1539 ret = drm_gem_create_mmap_offset(&obj->base);
1540out:
1541 dev_priv->mm.shrinker_no_lock_stealing = false;
1542
1543 return ret;
1538} 1544}
1539 1545
1540static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) 1546static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
@@ -2890,7 +2896,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2890{ 2896{
2891 struct drm_device *dev = obj->base.dev; 2897 struct drm_device *dev = obj->base.dev;
2892 drm_i915_private_t *dev_priv = dev->dev_private; 2898 drm_i915_private_t *dev_priv = dev->dev_private;
2893 struct drm_mm_node *free_space; 2899 struct drm_mm_node *node;
2894 u32 size, fence_size, fence_alignment, unfenced_alignment; 2900 u32 size, fence_size, fence_alignment, unfenced_alignment;
2895 bool mappable, fenceable; 2901 bool mappable, fenceable;
2896 int ret; 2902 int ret;
@@ -2936,66 +2942,54 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2936 2942
2937 i915_gem_object_pin_pages(obj); 2943 i915_gem_object_pin_pages(obj);
2938 2944
2945 node = kzalloc(sizeof(*node), GFP_KERNEL);
2946 if (node == NULL) {
2947 i915_gem_object_unpin_pages(obj);
2948 return -ENOMEM;
2949 }
2950
2939 search_free: 2951 search_free:
2940 if (map_and_fenceable) 2952 if (map_and_fenceable)
2941 free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space, 2953 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
2942 size, alignment, obj->cache_level, 2954 size, alignment, obj->cache_level,
2943 0, dev_priv->mm.gtt_mappable_end, 2955 0, dev_priv->mm.gtt_mappable_end);
2944 false);
2945 else 2956 else
2946 free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space, 2957 ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
2947 size, alignment, obj->cache_level, 2958 size, alignment, obj->cache_level);
2948 false); 2959 if (ret) {
2949
2950 if (free_space != NULL) {
2951 if (map_and_fenceable)
2952 free_space =
2953 drm_mm_get_block_range_generic(free_space,
2954 size, alignment, obj->cache_level,
2955 0, dev_priv->mm.gtt_mappable_end,
2956 false);
2957 else
2958 free_space =
2959 drm_mm_get_block_generic(free_space,
2960 size, alignment, obj->cache_level,
2961 false);
2962 }
2963 if (free_space == NULL) {
2964 ret = i915_gem_evict_something(dev, size, alignment, 2960 ret = i915_gem_evict_something(dev, size, alignment,
2965 obj->cache_level, 2961 obj->cache_level,
2966 map_and_fenceable, 2962 map_and_fenceable,
2967 nonblocking); 2963 nonblocking);
2968 if (ret) { 2964 if (ret == 0)
2969 i915_gem_object_unpin_pages(obj); 2965 goto search_free;
2970 return ret;
2971 }
2972 2966
2973 goto search_free; 2967 i915_gem_object_unpin_pages(obj);
2968 kfree(node);
2969 return ret;
2974 } 2970 }
2975 if (WARN_ON(!i915_gem_valid_gtt_space(dev, 2971 if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
2976 free_space,
2977 obj->cache_level))) {
2978 i915_gem_object_unpin_pages(obj); 2972 i915_gem_object_unpin_pages(obj);
2979 drm_mm_put_block(free_space); 2973 drm_mm_put_block(node);
2980 return -EINVAL; 2974 return -EINVAL;
2981 } 2975 }
2982 2976
2983 ret = i915_gem_gtt_prepare_object(obj); 2977 ret = i915_gem_gtt_prepare_object(obj);
2984 if (ret) { 2978 if (ret) {
2985 i915_gem_object_unpin_pages(obj); 2979 i915_gem_object_unpin_pages(obj);
2986 drm_mm_put_block(free_space); 2980 drm_mm_put_block(node);
2987 return ret; 2981 return ret;
2988 } 2982 }
2989 2983
2990 list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); 2984 list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
2991 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 2985 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2992 2986
2993 obj->gtt_space = free_space; 2987 obj->gtt_space = node;
2994 obj->gtt_offset = free_space->start; 2988 obj->gtt_offset = node->start;
2995 2989
2996 fenceable = 2990 fenceable =
2997 free_space->size == fence_size && 2991 node->size == fence_size &&
2998 (free_space->start & (fence_alignment - 1)) == 0; 2992 (node->start & (fence_alignment - 1)) == 0;
2999 2993
3000 mappable = 2994 mappable =
3001 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; 2995 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
@@ -4392,6 +4386,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4392 if (!mutex_is_locked_by(&dev->struct_mutex, current)) 4386 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4393 return 0; 4387 return 0;
4394 4388
4389 if (dev_priv->mm.shrinker_no_lock_stealing)
4390 return 0;
4391
4395 unlock = false; 4392 unlock = false;
4396 } 4393 }
4397 4394
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 773ef77b6c22..7be4241e8242 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -226,7 +226,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
226{ 226{
227 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 227 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
228 228
229 return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600); 229 return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
230} 230}
231 231
232static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) 232static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index ee8f97f0539e..d6a994a07393 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -808,6 +808,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
808 808
809 flags |= I915_DISPATCH_SECURE; 809 flags |= I915_DISPATCH_SECURE;
810 } 810 }
811 if (args->flags & I915_EXEC_IS_PINNED)
812 flags |= I915_DISPATCH_PINNED;
811 813
812 switch (args->flags & I915_EXEC_RING_MASK) { 814 switch (args->flags & I915_EXEC_RING_MASK) {
813 case I915_EXEC_DEFAULT: 815 case I915_EXEC_DEFAULT:
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a4dc97f8b9f0..2220dec3e5d9 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1087,6 +1087,18 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1087 if (!ring->get_seqno) 1087 if (!ring->get_seqno)
1088 return NULL; 1088 return NULL;
1089 1089
1090 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1091 u32 acthd = I915_READ(ACTHD);
1092
1093 if (WARN_ON(ring->id != RCS))
1094 return NULL;
1095
1096 obj = ring->private;
1097 if (acthd >= obj->gtt_offset &&
1098 acthd < obj->gtt_offset + obj->base.size)
1099 return i915_error_object_create(dev_priv, obj);
1100 }
1101
1090 seqno = ring->get_seqno(ring, false); 1102 seqno = ring->get_seqno(ring, false);
1091 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 1103 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1092 if (obj->ring != ring) 1104 if (obj->ring != ring)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3f75cfaf1c3f..186ee5c85b51 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -517,6 +517,7 @@
517 * the enables for writing to the corresponding low bit. 517 * the enables for writing to the corresponding low bit.
518 */ 518 */
519#define _3D_CHICKEN 0x02084 519#define _3D_CHICKEN 0x02084
520#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
520#define _3D_CHICKEN2 0x0208c 521#define _3D_CHICKEN2 0x0208c
521/* Disables pipelining of read flushes past the SF-WIZ interface. 522/* Disables pipelining of read flushes past the SF-WIZ interface.
522 * Required on all Ironlake steppings according to the B-Spec, but the 523 * Required on all Ironlake steppings according to the B-Spec, but the
@@ -532,7 +533,8 @@
532# define MI_FLUSH_ENABLE (1 << 12) 533# define MI_FLUSH_ENABLE (1 << 12)
533 534
534#define GEN6_GT_MODE 0x20d0 535#define GEN6_GT_MODE 0x20d0
535#define GEN6_GT_MODE_HI (1 << 9) 536#define GEN6_GT_MODE_HI (1 << 9)
537#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
536 538
537#define GFX_MODE 0x02520 539#define GFX_MODE 0x02520
538#define GFX_MODE_GEN7 0x0229c 540#define GFX_MODE_GEN7 0x0229c
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5d127e068950..a9fb046b94a1 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8144,10 +8144,6 @@ intel_modeset_stage_output_state(struct drm_device *dev,
8144 DRM_DEBUG_KMS("encoder changed, full mode switch\n"); 8144 DRM_DEBUG_KMS("encoder changed, full mode switch\n");
8145 config->mode_changed = true; 8145 config->mode_changed = true;
8146 } 8146 }
8147
8148 /* Disable all disconnected encoders. */
8149 if (connector->base.status == connector_status_disconnected)
8150 connector->new_encoder = NULL;
8151 } 8147 }
8152 /* connector->new_encoder is now updated for all connectors. */ 8148 /* connector->new_encoder is now updated for all connectors. */
8153 8149
@@ -9167,6 +9163,23 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
9167 * the crtc fixup. */ 9163 * the crtc fixup. */
9168} 9164}
9169 9165
9166static void i915_redisable_vga(struct drm_device *dev)
9167{
9168 struct drm_i915_private *dev_priv = dev->dev_private;
9169 u32 vga_reg;
9170
9171 if (HAS_PCH_SPLIT(dev))
9172 vga_reg = CPU_VGACNTRL;
9173 else
9174 vga_reg = VGACNTRL;
9175
9176 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
9177 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
9178 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
9179 POSTING_READ(vga_reg);
9180 }
9181}
9182
9170/* Scan out the current hw modeset state, sanitizes it and maps it into the drm 9183/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
9171 * and i915 state tracking structures. */ 9184 * and i915 state tracking structures. */
9172void intel_modeset_setup_hw_state(struct drm_device *dev, 9185void intel_modeset_setup_hw_state(struct drm_device *dev,
@@ -9275,6 +9288,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
9275 intel_set_mode(&crtc->base, &crtc->base.mode, 9288 intel_set_mode(&crtc->base, &crtc->base.mode,
9276 crtc->base.x, crtc->base.y, crtc->base.fb); 9289 crtc->base.x, crtc->base.y, crtc->base.fb);
9277 } 9290 }
9291
9292 i915_redisable_vga(dev);
9278 } else { 9293 } else {
9279 intel_modeset_update_staged_output_state(dev); 9294 intel_modeset_update_staged_output_state(dev);
9280 } 9295 }
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 496caa73eb70..e6f54ffab3ba 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -405,7 +405,7 @@ void intel_update_fbc(struct drm_device *dev)
405 * - going to an unsupported config (interlace, pixel multiply, etc.) 405 * - going to an unsupported config (interlace, pixel multiply, etc.)
406 */ 406 */
407 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { 407 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
408 if (tmp_crtc->enabled && 408 if (to_intel_crtc(tmp_crtc)->active &&
409 !to_intel_crtc(tmp_crtc)->primary_disabled && 409 !to_intel_crtc(tmp_crtc)->primary_disabled &&
410 tmp_crtc->fb) { 410 tmp_crtc->fb) {
411 if (crtc) { 411 if (crtc) {
@@ -992,7 +992,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
992 struct drm_crtc *crtc, *enabled = NULL; 992 struct drm_crtc *crtc, *enabled = NULL;
993 993
994 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 994 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
995 if (crtc->enabled && crtc->fb) { 995 if (to_intel_crtc(crtc)->active && crtc->fb) {
996 if (enabled) 996 if (enabled)
997 return NULL; 997 return NULL;
998 enabled = crtc; 998 enabled = crtc;
@@ -1086,7 +1086,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1086 int entries, tlb_miss; 1086 int entries, tlb_miss;
1087 1087
1088 crtc = intel_get_crtc_for_plane(dev, plane); 1088 crtc = intel_get_crtc_for_plane(dev, plane);
1089 if (crtc->fb == NULL || !crtc->enabled) { 1089 if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) {
1090 *cursor_wm = cursor->guard_size; 1090 *cursor_wm = cursor->guard_size;
1091 *plane_wm = display->guard_size; 1091 *plane_wm = display->guard_size;
1092 return false; 1092 return false;
@@ -1215,7 +1215,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
1215 int entries; 1215 int entries;
1216 1216
1217 crtc = intel_get_crtc_for_plane(dev, plane); 1217 crtc = intel_get_crtc_for_plane(dev, plane);
1218 if (crtc->fb == NULL || !crtc->enabled) 1218 if (crtc->fb == NULL || !to_intel_crtc(crtc)->active)
1219 return false; 1219 return false;
1220 1220
1221 clock = crtc->mode.clock; /* VESA DOT Clock */ 1221 clock = crtc->mode.clock; /* VESA DOT Clock */
@@ -1286,6 +1286,7 @@ static void valleyview_update_wm(struct drm_device *dev)
1286 struct drm_i915_private *dev_priv = dev->dev_private; 1286 struct drm_i915_private *dev_priv = dev->dev_private;
1287 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1287 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1288 int plane_sr, cursor_sr; 1288 int plane_sr, cursor_sr;
1289 int ignore_plane_sr, ignore_cursor_sr;
1289 unsigned int enabled = 0; 1290 unsigned int enabled = 0;
1290 1291
1291 vlv_update_drain_latency(dev); 1292 vlv_update_drain_latency(dev);
@@ -1302,17 +1303,23 @@ static void valleyview_update_wm(struct drm_device *dev)
1302 &planeb_wm, &cursorb_wm)) 1303 &planeb_wm, &cursorb_wm))
1303 enabled |= 2; 1304 enabled |= 2;
1304 1305
1305 plane_sr = cursor_sr = 0;
1306 if (single_plane_enabled(enabled) && 1306 if (single_plane_enabled(enabled) &&
1307 g4x_compute_srwm(dev, ffs(enabled) - 1, 1307 g4x_compute_srwm(dev, ffs(enabled) - 1,
1308 sr_latency_ns, 1308 sr_latency_ns,
1309 &valleyview_wm_info, 1309 &valleyview_wm_info,
1310 &valleyview_cursor_wm_info, 1310 &valleyview_cursor_wm_info,
1311 &plane_sr, &cursor_sr)) 1311 &plane_sr, &ignore_cursor_sr) &&
1312 g4x_compute_srwm(dev, ffs(enabled) - 1,
1313 2*sr_latency_ns,
1314 &valleyview_wm_info,
1315 &valleyview_cursor_wm_info,
1316 &ignore_plane_sr, &cursor_sr)) {
1312 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN); 1317 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
1313 else 1318 } else {
1314 I915_WRITE(FW_BLC_SELF_VLV, 1319 I915_WRITE(FW_BLC_SELF_VLV,
1315 I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN); 1320 I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
1321 plane_sr = cursor_sr = 0;
1322 }
1316 1323
1317 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", 1324 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1318 planea_wm, cursora_wm, 1325 planea_wm, cursora_wm,
@@ -1352,17 +1359,18 @@ static void g4x_update_wm(struct drm_device *dev)
1352 &planeb_wm, &cursorb_wm)) 1359 &planeb_wm, &cursorb_wm))
1353 enabled |= 2; 1360 enabled |= 2;
1354 1361
1355 plane_sr = cursor_sr = 0;
1356 if (single_plane_enabled(enabled) && 1362 if (single_plane_enabled(enabled) &&
1357 g4x_compute_srwm(dev, ffs(enabled) - 1, 1363 g4x_compute_srwm(dev, ffs(enabled) - 1,
1358 sr_latency_ns, 1364 sr_latency_ns,
1359 &g4x_wm_info, 1365 &g4x_wm_info,
1360 &g4x_cursor_wm_info, 1366 &g4x_cursor_wm_info,
1361 &plane_sr, &cursor_sr)) 1367 &plane_sr, &cursor_sr)) {
1362 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 1368 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1363 else 1369 } else {
1364 I915_WRITE(FW_BLC_SELF, 1370 I915_WRITE(FW_BLC_SELF,
1365 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); 1371 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
1372 plane_sr = cursor_sr = 0;
1373 }
1366 1374
1367 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", 1375 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1368 planea_wm, cursora_wm, 1376 planea_wm, cursora_wm,
@@ -1468,7 +1476,7 @@ static void i9xx_update_wm(struct drm_device *dev)
1468 1476
1469 fifo_size = dev_priv->display.get_fifo_size(dev, 0); 1477 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1470 crtc = intel_get_crtc_for_plane(dev, 0); 1478 crtc = intel_get_crtc_for_plane(dev, 0);
1471 if (crtc->enabled && crtc->fb) { 1479 if (to_intel_crtc(crtc)->active && crtc->fb) {
1472 int cpp = crtc->fb->bits_per_pixel / 8; 1480 int cpp = crtc->fb->bits_per_pixel / 8;
1473 if (IS_GEN2(dev)) 1481 if (IS_GEN2(dev))
1474 cpp = 4; 1482 cpp = 4;
@@ -1482,7 +1490,7 @@ static void i9xx_update_wm(struct drm_device *dev)
1482 1490
1483 fifo_size = dev_priv->display.get_fifo_size(dev, 1); 1491 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1484 crtc = intel_get_crtc_for_plane(dev, 1); 1492 crtc = intel_get_crtc_for_plane(dev, 1);
1485 if (crtc->enabled && crtc->fb) { 1493 if (to_intel_crtc(crtc)->active && crtc->fb) {
1486 int cpp = crtc->fb->bits_per_pixel / 8; 1494 int cpp = crtc->fb->bits_per_pixel / 8;
1487 if (IS_GEN2(dev)) 1495 if (IS_GEN2(dev))
1488 cpp = 4; 1496 cpp = 4;
@@ -1811,8 +1819,110 @@ static void sandybridge_update_wm(struct drm_device *dev)
1811 enabled |= 2; 1819 enabled |= 2;
1812 } 1820 }
1813 1821
1814 if ((dev_priv->num_pipe == 3) && 1822 /*
1815 g4x_compute_wm0(dev, 2, 1823 * Calculate and update the self-refresh watermark only when one
1824 * display plane is used.
1825 *
1826 * SNB support 3 levels of watermark.
1827 *
1828 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1829 * and disabled in the descending order
1830 *
1831 */
1832 I915_WRITE(WM3_LP_ILK, 0);
1833 I915_WRITE(WM2_LP_ILK, 0);
1834 I915_WRITE(WM1_LP_ILK, 0);
1835
1836 if (!single_plane_enabled(enabled) ||
1837 dev_priv->sprite_scaling_enabled)
1838 return;
1839 enabled = ffs(enabled) - 1;
1840
1841 /* WM1 */
1842 if (!ironlake_compute_srwm(dev, 1, enabled,
1843 SNB_READ_WM1_LATENCY() * 500,
1844 &sandybridge_display_srwm_info,
1845 &sandybridge_cursor_srwm_info,
1846 &fbc_wm, &plane_wm, &cursor_wm))
1847 return;
1848
1849 I915_WRITE(WM1_LP_ILK,
1850 WM1_LP_SR_EN |
1851 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1852 (fbc_wm << WM1_LP_FBC_SHIFT) |
1853 (plane_wm << WM1_LP_SR_SHIFT) |
1854 cursor_wm);
1855
1856 /* WM2 */
1857 if (!ironlake_compute_srwm(dev, 2, enabled,
1858 SNB_READ_WM2_LATENCY() * 500,
1859 &sandybridge_display_srwm_info,
1860 &sandybridge_cursor_srwm_info,
1861 &fbc_wm, &plane_wm, &cursor_wm))
1862 return;
1863
1864 I915_WRITE(WM2_LP_ILK,
1865 WM2_LP_EN |
1866 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1867 (fbc_wm << WM1_LP_FBC_SHIFT) |
1868 (plane_wm << WM1_LP_SR_SHIFT) |
1869 cursor_wm);
1870
1871 /* WM3 */
1872 if (!ironlake_compute_srwm(dev, 3, enabled,
1873 SNB_READ_WM3_LATENCY() * 500,
1874 &sandybridge_display_srwm_info,
1875 &sandybridge_cursor_srwm_info,
1876 &fbc_wm, &plane_wm, &cursor_wm))
1877 return;
1878
1879 I915_WRITE(WM3_LP_ILK,
1880 WM3_LP_EN |
1881 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1882 (fbc_wm << WM1_LP_FBC_SHIFT) |
1883 (plane_wm << WM1_LP_SR_SHIFT) |
1884 cursor_wm);
1885}
1886
1887static void ivybridge_update_wm(struct drm_device *dev)
1888{
1889 struct drm_i915_private *dev_priv = dev->dev_private;
1890 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
1891 u32 val;
1892 int fbc_wm, plane_wm, cursor_wm;
1893 int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
1894 unsigned int enabled;
1895
1896 enabled = 0;
1897 if (g4x_compute_wm0(dev, 0,
1898 &sandybridge_display_wm_info, latency,
1899 &sandybridge_cursor_wm_info, latency,
1900 &plane_wm, &cursor_wm)) {
1901 val = I915_READ(WM0_PIPEA_ILK);
1902 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1903 I915_WRITE(WM0_PIPEA_ILK, val |
1904 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1905 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1906 " plane %d, " "cursor: %d\n",
1907 plane_wm, cursor_wm);
1908 enabled |= 1;
1909 }
1910
1911 if (g4x_compute_wm0(dev, 1,
1912 &sandybridge_display_wm_info, latency,
1913 &sandybridge_cursor_wm_info, latency,
1914 &plane_wm, &cursor_wm)) {
1915 val = I915_READ(WM0_PIPEB_ILK);
1916 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1917 I915_WRITE(WM0_PIPEB_ILK, val |
1918 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1919 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1920 " plane %d, cursor: %d\n",
1921 plane_wm, cursor_wm);
1922 enabled |= 2;
1923 }
1924
1925 if (g4x_compute_wm0(dev, 2,
1816 &sandybridge_display_wm_info, latency, 1926 &sandybridge_display_wm_info, latency,
1817 &sandybridge_cursor_wm_info, latency, 1927 &sandybridge_cursor_wm_info, latency,
1818 &plane_wm, &cursor_wm)) { 1928 &plane_wm, &cursor_wm)) {
@@ -1875,12 +1985,17 @@ static void sandybridge_update_wm(struct drm_device *dev)
1875 (plane_wm << WM1_LP_SR_SHIFT) | 1985 (plane_wm << WM1_LP_SR_SHIFT) |
1876 cursor_wm); 1986 cursor_wm);
1877 1987
1878 /* WM3 */ 1988 /* WM3, note we have to correct the cursor latency */
1879 if (!ironlake_compute_srwm(dev, 3, enabled, 1989 if (!ironlake_compute_srwm(dev, 3, enabled,
1880 SNB_READ_WM3_LATENCY() * 500, 1990 SNB_READ_WM3_LATENCY() * 500,
1881 &sandybridge_display_srwm_info, 1991 &sandybridge_display_srwm_info,
1882 &sandybridge_cursor_srwm_info, 1992 &sandybridge_cursor_srwm_info,
1883 &fbc_wm, &plane_wm, &cursor_wm)) 1993 &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
1994 !ironlake_compute_srwm(dev, 3, enabled,
1995 2 * SNB_READ_WM3_LATENCY() * 500,
1996 &sandybridge_display_srwm_info,
1997 &sandybridge_cursor_srwm_info,
1998 &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
1884 return; 1999 return;
1885 2000
1886 I915_WRITE(WM3_LP_ILK, 2001 I915_WRITE(WM3_LP_ILK,
@@ -1929,7 +2044,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
1929 int entries, tlb_miss; 2044 int entries, tlb_miss;
1930 2045
1931 crtc = intel_get_crtc_for_plane(dev, plane); 2046 crtc = intel_get_crtc_for_plane(dev, plane);
1932 if (crtc->fb == NULL || !crtc->enabled) { 2047 if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) {
1933 *sprite_wm = display->guard_size; 2048 *sprite_wm = display->guard_size;
1934 return false; 2049 return false;
1935 } 2050 }
@@ -3471,6 +3586,15 @@ static void gen6_init_clock_gating(struct drm_device *dev)
3471 I915_READ(ILK_DISPLAY_CHICKEN2) | 3586 I915_READ(ILK_DISPLAY_CHICKEN2) |
3472 ILK_ELPIN_409_SELECT); 3587 ILK_ELPIN_409_SELECT);
3473 3588
3589 /* WaDisableHiZPlanesWhenMSAAEnabled */
3590 I915_WRITE(_3D_CHICKEN,
3591 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
3592
3593 /* WaSetupGtModeTdRowDispatch */
3594 if (IS_SNB_GT1(dev))
3595 I915_WRITE(GEN6_GT_MODE,
3596 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
3597
3474 I915_WRITE(WM3_LP_ILK, 0); 3598 I915_WRITE(WM3_LP_ILK, 0);
3475 I915_WRITE(WM2_LP_ILK, 0); 3599 I915_WRITE(WM2_LP_ILK, 0);
3476 I915_WRITE(WM1_LP_ILK, 0); 3600 I915_WRITE(WM1_LP_ILK, 0);
@@ -3999,7 +4123,7 @@ void intel_init_pm(struct drm_device *dev)
3999 } else if (IS_IVYBRIDGE(dev)) { 4123 } else if (IS_IVYBRIDGE(dev)) {
4000 /* FIXME: detect B0+ stepping and use auto training */ 4124 /* FIXME: detect B0+ stepping and use auto training */
4001 if (SNB_READ_WM0_LATENCY()) { 4125 if (SNB_READ_WM0_LATENCY()) {
4002 dev_priv->display.update_wm = sandybridge_update_wm; 4126 dev_priv->display.update_wm = ivybridge_update_wm;
4003 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; 4127 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
4004 } else { 4128 } else {
4005 DRM_DEBUG_KMS("Failed to read display plane latency. " 4129 DRM_DEBUG_KMS("Failed to read display plane latency. "
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 2346b920bd86..ae253e04c391 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -547,9 +547,14 @@ static int init_render_ring(struct intel_ring_buffer *ring)
547 547
548static void render_ring_cleanup(struct intel_ring_buffer *ring) 548static void render_ring_cleanup(struct intel_ring_buffer *ring)
549{ 549{
550 struct drm_device *dev = ring->dev;
551
550 if (!ring->private) 552 if (!ring->private)
551 return; 553 return;
552 554
555 if (HAS_BROKEN_CS_TLB(dev))
556 drm_gem_object_unreference(to_gem_object(ring->private));
557
553 cleanup_pipe_control(ring); 558 cleanup_pipe_control(ring);
554} 559}
555 560
@@ -969,6 +974,8 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
969 return 0; 974 return 0;
970} 975}
971 976
977/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
978#define I830_BATCH_LIMIT (256*1024)
972static int 979static int
973i830_dispatch_execbuffer(struct intel_ring_buffer *ring, 980i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
974 u32 offset, u32 len, 981 u32 offset, u32 len,
@@ -976,15 +983,47 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
976{ 983{
977 int ret; 984 int ret;
978 985
979 ret = intel_ring_begin(ring, 4); 986 if (flags & I915_DISPATCH_PINNED) {
980 if (ret) 987 ret = intel_ring_begin(ring, 4);
981 return ret; 988 if (ret)
989 return ret;
982 990
983 intel_ring_emit(ring, MI_BATCH_BUFFER); 991 intel_ring_emit(ring, MI_BATCH_BUFFER);
984 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 992 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
985 intel_ring_emit(ring, offset + len - 8); 993 intel_ring_emit(ring, offset + len - 8);
986 intel_ring_emit(ring, 0); 994 intel_ring_emit(ring, MI_NOOP);
987 intel_ring_advance(ring); 995 intel_ring_advance(ring);
996 } else {
997 struct drm_i915_gem_object *obj = ring->private;
998 u32 cs_offset = obj->gtt_offset;
999
1000 if (len > I830_BATCH_LIMIT)
1001 return -ENOSPC;
1002
1003 ret = intel_ring_begin(ring, 9+3);
1004 if (ret)
1005 return ret;
1006 /* Blit the batch (which has now all relocs applied) to the stable batch
1007 * scratch bo area (so that the CS never stumbles over its tlb
1008 * invalidation bug) ... */
1009 intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
1010 XY_SRC_COPY_BLT_WRITE_ALPHA |
1011 XY_SRC_COPY_BLT_WRITE_RGB);
1012 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
1013 intel_ring_emit(ring, 0);
1014 intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
1015 intel_ring_emit(ring, cs_offset);
1016 intel_ring_emit(ring, 0);
1017 intel_ring_emit(ring, 4096);
1018 intel_ring_emit(ring, offset);
1019 intel_ring_emit(ring, MI_FLUSH);
1020
1021 /* ... and execute it. */
1022 intel_ring_emit(ring, MI_BATCH_BUFFER);
1023 intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1024 intel_ring_emit(ring, cs_offset + len - 8);
1025 intel_ring_advance(ring);
1026 }
988 1027
989 return 0; 1028 return 0;
990} 1029}
@@ -1596,6 +1635,27 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1596 ring->init = init_render_ring; 1635 ring->init = init_render_ring;
1597 ring->cleanup = render_ring_cleanup; 1636 ring->cleanup = render_ring_cleanup;
1598 1637
1638 /* Workaround batchbuffer to combat CS tlb bug. */
1639 if (HAS_BROKEN_CS_TLB(dev)) {
1640 struct drm_i915_gem_object *obj;
1641 int ret;
1642
1643 obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
1644 if (obj == NULL) {
1645 DRM_ERROR("Failed to allocate batch bo\n");
1646 return -ENOMEM;
1647 }
1648
1649 ret = i915_gem_object_pin(obj, 0, true, false);
1650 if (ret != 0) {
1651 drm_gem_object_unreference(&obj->base);
1652 DRM_ERROR("Failed to ping batch bo\n");
1653 return ret;
1654 }
1655
1656 ring->private = obj;
1657 }
1658
1599 return intel_init_ring_buffer(dev, ring); 1659 return intel_init_ring_buffer(dev, ring);
1600} 1660}
1601 1661
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 526182ed0c6d..6af87cd05725 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -94,6 +94,7 @@ struct intel_ring_buffer {
94 u32 offset, u32 length, 94 u32 offset, u32 length,
95 unsigned flags); 95 unsigned flags);
96#define I915_DISPATCH_SECURE 0x1 96#define I915_DISPATCH_SECURE 0x1
97#define I915_DISPATCH_PINNED 0x2
97 void (*cleanup)(struct intel_ring_buffer *ring); 98 void (*cleanup)(struct intel_ring_buffer *ring);
98 int (*sync_to)(struct intel_ring_buffer *ring, 99 int (*sync_to)(struct intel_ring_buffer *ring,
99 struct intel_ring_buffer *to, 100 struct intel_ring_buffer *to,
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 06d7f798a08c..0f4a366f6fa6 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -158,12 +158,29 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
158 return drm_mm_get_block_range_generic(parent, size, alignment, 0, 158 return drm_mm_get_block_range_generic(parent, size, alignment, 0,
159 start, end, 1); 159 start, end, 1);
160} 160}
161extern int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node, 161
162 unsigned long size, unsigned alignment); 162extern int drm_mm_insert_node(struct drm_mm *mm,
163 struct drm_mm_node *node,
164 unsigned long size,
165 unsigned alignment);
163extern int drm_mm_insert_node_in_range(struct drm_mm *mm, 166extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
164 struct drm_mm_node *node, 167 struct drm_mm_node *node,
165 unsigned long size, unsigned alignment, 168 unsigned long size,
166 unsigned long start, unsigned long end); 169 unsigned alignment,
170 unsigned long start,
171 unsigned long end);
172extern int drm_mm_insert_node_generic(struct drm_mm *mm,
173 struct drm_mm_node *node,
174 unsigned long size,
175 unsigned alignment,
176 unsigned long color);
177extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
178 struct drm_mm_node *node,
179 unsigned long size,
180 unsigned alignment,
181 unsigned long color,
182 unsigned long start,
183 unsigned long end);
167extern void drm_mm_put_block(struct drm_mm_node *cur); 184extern void drm_mm_put_block(struct drm_mm_node *cur);
168extern void drm_mm_remove_node(struct drm_mm_node *node); 185extern void drm_mm_remove_node(struct drm_mm_node *node);
169extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); 186extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index b746a3cf5fa9..c4d2e9c74002 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -307,6 +307,7 @@ typedef struct drm_i915_irq_wait {
307#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 307#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
308#define I915_PARAM_RSVD_FOR_FUTURE_USE 22 308#define I915_PARAM_RSVD_FOR_FUTURE_USE 22
309#define I915_PARAM_HAS_SECURE_BATCHES 23 309#define I915_PARAM_HAS_SECURE_BATCHES 23
310#define I915_PARAM_HAS_PINNED_BATCHES 24
310 311
311typedef struct drm_i915_getparam { 312typedef struct drm_i915_getparam {
312 int param; 313 int param;
@@ -677,6 +678,15 @@ struct drm_i915_gem_execbuffer2 {
677 */ 678 */
678#define I915_EXEC_SECURE (1<<9) 679#define I915_EXEC_SECURE (1<<9)
679 680
681/** Inform the kernel that the batch is and will always be pinned. This
682 * negates the requirement for a workaround to be performed to avoid
683 * an incoherent CS (such as can be found on 830/845). If this flag is
684 * not passed, the kernel will endeavour to make sure the batch is
685 * coherent with the CS before execution. If this flag is passed,
686 * userspace assumes the responsibility for ensuring the same.
687 */
688#define I915_EXEC_IS_PINNED (1<<10)
689
680#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 690#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
681#define i915_execbuffer2_set_context_id(eb2, context) \ 691#define i915_execbuffer2_set_context_id(eb2, context) \
682 (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK 692 (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK