aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h42
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c367
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c218
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c132
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c36
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c18
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c62
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c60
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c76
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c19
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c9
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c12
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c13
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c50
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c54
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h1
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c2
23 files changed, 677 insertions, 515 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 96177eec0a0e..eedb023af27d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1833,7 +1833,6 @@ int i915_driver_unload(struct drm_device *dev)
1833 flush_workqueue(dev_priv->wq); 1833 flush_workqueue(dev_priv->wq);
1834 1834
1835 mutex_lock(&dev->struct_mutex); 1835 mutex_lock(&dev->struct_mutex);
1836 i915_gem_free_all_phys_object(dev);
1837 i915_gem_cleanup_ringbuffer(dev); 1836 i915_gem_cleanup_ringbuffer(dev);
1838 i915_gem_context_fini(dev); 1837 i915_gem_context_fini(dev);
1839 WARN_ON(dev_priv->mm.aliasing_ppgtt); 1838 WARN_ON(dev_priv->mm.aliasing_ppgtt);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0905cd915589..388c028e223c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -242,18 +242,6 @@ struct intel_ddi_plls {
242#define WATCH_LISTS 0 242#define WATCH_LISTS 0
243#define WATCH_GTT 0 243#define WATCH_GTT 0
244 244
245#define I915_GEM_PHYS_CURSOR_0 1
246#define I915_GEM_PHYS_CURSOR_1 2
247#define I915_GEM_PHYS_OVERLAY_REGS 3
248#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
249
250struct drm_i915_gem_phys_object {
251 int id;
252 struct page **page_list;
253 drm_dma_handle_t *handle;
254 struct drm_i915_gem_object *cur_obj;
255};
256
257struct opregion_header; 245struct opregion_header;
258struct opregion_acpi; 246struct opregion_acpi;
259struct opregion_swsci; 247struct opregion_swsci;
@@ -1187,9 +1175,6 @@ struct i915_gem_mm {
1187 /** Bit 6 swizzling required for Y tiling */ 1175 /** Bit 6 swizzling required for Y tiling */
1188 uint32_t bit_6_swizzle_y; 1176 uint32_t bit_6_swizzle_y;
1189 1177
1190 /* storage for physical objects */
1191 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
1192
1193 /* accounting, useful for userland debugging */ 1178 /* accounting, useful for userland debugging */
1194 spinlock_t object_stat_lock; 1179 spinlock_t object_stat_lock;
1195 size_t object_memory; 1180 size_t object_memory;
@@ -1308,6 +1293,7 @@ struct intel_vbt_data {
1308 1293
1309 struct { 1294 struct {
1310 u16 pwm_freq_hz; 1295 u16 pwm_freq_hz;
1296 bool present;
1311 bool active_low_pwm; 1297 bool active_low_pwm;
1312 } backlight; 1298 } backlight;
1313 1299
@@ -1768,7 +1754,7 @@ struct drm_i915_gem_object {
1768 struct drm_file *pin_filp; 1754 struct drm_file *pin_filp;
1769 1755
1770 /** for phy allocated objects */ 1756 /** for phy allocated objects */
1771 struct drm_i915_gem_phys_object *phys_obj; 1757 drm_dma_handle_t *phys_handle;
1772}; 1758};
1773 1759
1774#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 1760#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
@@ -1953,6 +1939,9 @@ struct drm_i915_cmd_table {
1953#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) 1939#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
1954#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ 1940#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
1955 ((dev)->pdev->device & 0x00F0) == 0x0020) 1941 ((dev)->pdev->device & 0x00F0) == 0x0020)
1942/* ULX machines are also considered ULT. */
1943#define IS_HSW_ULX(dev) ((dev)->pdev->device == 0x0A0E || \
1944 (dev)->pdev->device == 0x0A1E)
1956#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 1945#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
1957 1946
1958/* 1947/*
@@ -2200,10 +2189,12 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
2200#define PIN_MAPPABLE 0x1 2189#define PIN_MAPPABLE 0x1
2201#define PIN_NONBLOCK 0x2 2190#define PIN_NONBLOCK 0x2
2202#define PIN_GLOBAL 0x4 2191#define PIN_GLOBAL 0x4
2192#define PIN_OFFSET_BIAS 0x8
2193#define PIN_OFFSET_MASK (~4095)
2203int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 2194int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
2204 struct i915_address_space *vm, 2195 struct i915_address_space *vm,
2205 uint32_t alignment, 2196 uint32_t alignment,
2206 unsigned flags); 2197 uint64_t flags);
2207int __must_check i915_vma_unbind(struct i915_vma *vma); 2198int __must_check i915_vma_unbind(struct i915_vma *vma);
2208int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2199int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
2209void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); 2200void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
@@ -2330,13 +2321,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2330 u32 alignment, 2321 u32 alignment,
2331 struct intel_ring_buffer *pipelined); 2322 struct intel_ring_buffer *pipelined);
2332void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); 2323void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
2333int i915_gem_attach_phys_object(struct drm_device *dev, 2324int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
2334 struct drm_i915_gem_object *obj,
2335 int id,
2336 int align); 2325 int align);
2337void i915_gem_detach_phys_object(struct drm_device *dev,
2338 struct drm_i915_gem_object *obj);
2339void i915_gem_free_all_phys_object(struct drm_device *dev);
2340int i915_gem_open(struct drm_device *dev, struct drm_file *file); 2326int i915_gem_open(struct drm_device *dev, struct drm_file *file);
2341void i915_gem_release(struct drm_device *dev, struct drm_file *file); 2327void i915_gem_release(struct drm_device *dev, struct drm_file *file);
2342 2328
@@ -2431,20 +2417,18 @@ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
2431int i915_gem_context_enable(struct drm_i915_private *dev_priv); 2417int i915_gem_context_enable(struct drm_i915_private *dev_priv);
2432void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 2418void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
2433int i915_switch_context(struct intel_ring_buffer *ring, 2419int i915_switch_context(struct intel_ring_buffer *ring,
2434 struct drm_file *file, struct i915_hw_context *to); 2420 struct i915_hw_context *to);
2435struct i915_hw_context * 2421struct i915_hw_context *
2436i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); 2422i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
2437void i915_gem_context_free(struct kref *ctx_ref); 2423void i915_gem_context_free(struct kref *ctx_ref);
2438static inline void i915_gem_context_reference(struct i915_hw_context *ctx) 2424static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
2439{ 2425{
2440 if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev)) 2426 kref_get(&ctx->ref);
2441 kref_get(&ctx->ref);
2442} 2427}
2443 2428
2444static inline void i915_gem_context_unreference(struct i915_hw_context *ctx) 2429static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
2445{ 2430{
2446 if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev)) 2431 kref_put(&ctx->ref, i915_gem_context_free);
2447 kref_put(&ctx->ref, i915_gem_context_free);
2448} 2432}
2449 2433
2450static inline bool i915_gem_context_is_default(const struct i915_hw_context *c) 2434static inline bool i915_gem_context_is_default(const struct i915_hw_context *c)
@@ -2463,6 +2447,8 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
2463 int min_size, 2447 int min_size,
2464 unsigned alignment, 2448 unsigned alignment,
2465 unsigned cache_level, 2449 unsigned cache_level,
2450 unsigned long start,
2451 unsigned long end,
2466 unsigned flags); 2452 unsigned flags);
2467int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 2453int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
2468int i915_gem_evict_everything(struct drm_device *dev); 2454int i915_gem_evict_everything(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6370a761d137..3326770c9ed2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,10 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
43static __must_check int 43static __must_check int
44i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 44i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45 bool readonly); 45 bool readonly);
46static int i915_gem_phys_pwrite(struct drm_device *dev,
47 struct drm_i915_gem_object *obj,
48 struct drm_i915_gem_pwrite *args,
49 struct drm_file *file);
50 46
51static void i915_gem_write_fence(struct drm_device *dev, int reg, 47static void i915_gem_write_fence(struct drm_device *dev, int reg,
52 struct drm_i915_gem_object *obj); 48 struct drm_i915_gem_object *obj);
@@ -209,6 +205,128 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
209 return 0; 205 return 0;
210} 206}
211 207
208static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
209{
210 drm_dma_handle_t *phys = obj->phys_handle;
211
212 if (!phys)
213 return;
214
215 if (obj->madv == I915_MADV_WILLNEED) {
216 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
217 char *vaddr = phys->vaddr;
218 int i;
219
220 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
221 struct page *page = shmem_read_mapping_page(mapping, i);
222 if (!IS_ERR(page)) {
223 char *dst = kmap_atomic(page);
224 memcpy(dst, vaddr, PAGE_SIZE);
225 drm_clflush_virt_range(dst, PAGE_SIZE);
226 kunmap_atomic(dst);
227
228 set_page_dirty(page);
229 mark_page_accessed(page);
230 page_cache_release(page);
231 }
232 vaddr += PAGE_SIZE;
233 }
234 i915_gem_chipset_flush(obj->base.dev);
235 }
236
237#ifdef CONFIG_X86
238 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
239#endif
240 drm_pci_free(obj->base.dev, phys);
241 obj->phys_handle = NULL;
242}
243
244int
245i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
246 int align)
247{
248 drm_dma_handle_t *phys;
249 struct address_space *mapping;
250 char *vaddr;
251 int i;
252
253 if (obj->phys_handle) {
254 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
255 return -EBUSY;
256
257 return 0;
258 }
259
260 if (obj->madv != I915_MADV_WILLNEED)
261 return -EFAULT;
262
263 if (obj->base.filp == NULL)
264 return -EINVAL;
265
266 /* create a new object */
267 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
268 if (!phys)
269 return -ENOMEM;
270
271 vaddr = phys->vaddr;
272#ifdef CONFIG_X86
273 set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
274#endif
275 mapping = file_inode(obj->base.filp)->i_mapping;
276 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
277 struct page *page;
278 char *src;
279
280 page = shmem_read_mapping_page(mapping, i);
281 if (IS_ERR(page)) {
282#ifdef CONFIG_X86
283 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
284#endif
285 drm_pci_free(obj->base.dev, phys);
286 return PTR_ERR(page);
287 }
288
289 src = kmap_atomic(page);
290 memcpy(vaddr, src, PAGE_SIZE);
291 kunmap_atomic(src);
292
293 mark_page_accessed(page);
294 page_cache_release(page);
295
296 vaddr += PAGE_SIZE;
297 }
298
299 obj->phys_handle = phys;
300 return 0;
301}
302
303static int
304i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
305 struct drm_i915_gem_pwrite *args,
306 struct drm_file *file_priv)
307{
308 struct drm_device *dev = obj->base.dev;
309 void *vaddr = obj->phys_handle->vaddr + args->offset;
310 char __user *user_data = to_user_ptr(args->data_ptr);
311
312 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
313 unsigned long unwritten;
314
315 /* The physical object once assigned is fixed for the lifetime
316 * of the obj, so we can safely drop the lock and continue
317 * to access vaddr.
318 */
319 mutex_unlock(&dev->struct_mutex);
320 unwritten = copy_from_user(vaddr, user_data, args->size);
321 mutex_lock(&dev->struct_mutex);
322 if (unwritten)
323 return -EFAULT;
324 }
325
326 i915_gem_chipset_flush(dev);
327 return 0;
328}
329
212void *i915_gem_object_alloc(struct drm_device *dev) 330void *i915_gem_object_alloc(struct drm_device *dev)
213{ 331{
214 struct drm_i915_private *dev_priv = dev->dev_private; 332 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -921,8 +1039,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
921 * pread/pwrite currently are reading and writing from the CPU 1039 * pread/pwrite currently are reading and writing from the CPU
922 * perspective, requiring manual detiling by the client. 1040 * perspective, requiring manual detiling by the client.
923 */ 1041 */
924 if (obj->phys_obj) { 1042 if (obj->phys_handle) {
925 ret = i915_gem_phys_pwrite(dev, obj, args, file); 1043 ret = i915_gem_phys_pwrite(obj, args, file);
926 goto out; 1044 goto out;
927 } 1045 }
928 1046
@@ -2790,7 +2908,7 @@ int i915_gpu_idle(struct drm_device *dev)
2790 2908
2791 /* Flush everything onto the inactive list. */ 2909 /* Flush everything onto the inactive list. */
2792 for_each_ring(ring, dev_priv, i) { 2910 for_each_ring(ring, dev_priv, i) {
2793 ret = i915_switch_context(ring, NULL, ring->default_context); 2911 ret = i915_switch_context(ring, ring->default_context);
2794 if (ret) 2912 if (ret)
2795 return ret; 2913 return ret;
2796 2914
@@ -3208,12 +3326,14 @@ static struct i915_vma *
3208i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, 3326i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3209 struct i915_address_space *vm, 3327 struct i915_address_space *vm,
3210 unsigned alignment, 3328 unsigned alignment,
3211 unsigned flags) 3329 uint64_t flags)
3212{ 3330{
3213 struct drm_device *dev = obj->base.dev; 3331 struct drm_device *dev = obj->base.dev;
3214 struct drm_i915_private *dev_priv = dev->dev_private; 3332 struct drm_i915_private *dev_priv = dev->dev_private;
3215 u32 size, fence_size, fence_alignment, unfenced_alignment; 3333 u32 size, fence_size, fence_alignment, unfenced_alignment;
3216 size_t gtt_max = 3334 unsigned long start =
3335 flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3336 unsigned long end =
3217 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; 3337 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3218 struct i915_vma *vma; 3338 struct i915_vma *vma;
3219 int ret; 3339 int ret;
@@ -3242,11 +3362,11 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3242 /* If the object is bigger than the entire aperture, reject it early 3362 /* If the object is bigger than the entire aperture, reject it early
3243 * before evicting everything in a vain attempt to find space. 3363 * before evicting everything in a vain attempt to find space.
3244 */ 3364 */
3245 if (obj->base.size > gtt_max) { 3365 if (obj->base.size > end) {
3246 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", 3366 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
3247 obj->base.size, 3367 obj->base.size,
3248 flags & PIN_MAPPABLE ? "mappable" : "total", 3368 flags & PIN_MAPPABLE ? "mappable" : "total",
3249 gtt_max); 3369 end);
3250 return ERR_PTR(-E2BIG); 3370 return ERR_PTR(-E2BIG);
3251 } 3371 }
3252 3372
@@ -3263,12 +3383,15 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3263search_free: 3383search_free:
3264 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, 3384 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3265 size, alignment, 3385 size, alignment,
3266 obj->cache_level, 0, gtt_max, 3386 obj->cache_level,
3387 start, end,
3267 DRM_MM_SEARCH_DEFAULT, 3388 DRM_MM_SEARCH_DEFAULT,
3268 DRM_MM_CREATE_DEFAULT); 3389 DRM_MM_CREATE_DEFAULT);
3269 if (ret) { 3390 if (ret) {
3270 ret = i915_gem_evict_something(dev, vm, size, alignment, 3391 ret = i915_gem_evict_something(dev, vm, size, alignment,
3271 obj->cache_level, flags); 3392 obj->cache_level,
3393 start, end,
3394 flags);
3272 if (ret == 0) 3395 if (ret == 0)
3273 goto search_free; 3396 goto search_free;
3274 3397
@@ -3828,11 +3951,30 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3828 return ret; 3951 return ret;
3829} 3952}
3830 3953
3954static bool
3955i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
3956{
3957 struct drm_i915_gem_object *obj = vma->obj;
3958
3959 if (alignment &&
3960 vma->node.start & (alignment - 1))
3961 return true;
3962
3963 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
3964 return true;
3965
3966 if (flags & PIN_OFFSET_BIAS &&
3967 vma->node.start < (flags & PIN_OFFSET_MASK))
3968 return true;
3969
3970 return false;
3971}
3972
3831int 3973int
3832i915_gem_object_pin(struct drm_i915_gem_object *obj, 3974i915_gem_object_pin(struct drm_i915_gem_object *obj,
3833 struct i915_address_space *vm, 3975 struct i915_address_space *vm,
3834 uint32_t alignment, 3976 uint32_t alignment,
3835 unsigned flags) 3977 uint64_t flags)
3836{ 3978{
3837 struct i915_vma *vma; 3979 struct i915_vma *vma;
3838 int ret; 3980 int ret;
@@ -3845,15 +3987,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3845 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) 3987 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3846 return -EBUSY; 3988 return -EBUSY;
3847 3989
3848 if ((alignment && 3990 if (i915_vma_misplaced(vma, alignment, flags)) {
3849 vma->node.start & (alignment - 1)) ||
3850 (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
3851 WARN(vma->pin_count, 3991 WARN(vma->pin_count,
3852 "bo is already pinned with incorrect alignment:" 3992 "bo is already pinned with incorrect alignment:"
3853 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," 3993 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3854 " obj->map_and_fenceable=%d\n", 3994 " obj->map_and_fenceable=%d\n",
3855 i915_gem_obj_offset(obj, vm), alignment, 3995 i915_gem_obj_offset(obj, vm), alignment,
3856 flags & PIN_MAPPABLE, 3996 !!(flags & PIN_MAPPABLE),
3857 obj->map_and_fenceable); 3997 obj->map_and_fenceable);
3858 ret = i915_vma_unbind(vma); 3998 ret = i915_vma_unbind(vma);
3859 if (ret) 3999 if (ret)
@@ -4163,9 +4303,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4163 4303
4164 trace_i915_gem_object_destroy(obj); 4304 trace_i915_gem_object_destroy(obj);
4165 4305
4166 if (obj->phys_obj)
4167 i915_gem_detach_phys_object(dev, obj);
4168
4169 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { 4306 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4170 int ret; 4307 int ret;
4171 4308
@@ -4183,6 +4320,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4183 } 4320 }
4184 } 4321 }
4185 4322
4323 i915_gem_object_detach_phys(obj);
4324
4186 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up 4325 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4187 * before progressing. */ 4326 * before progressing. */
4188 if (obj->stolen) 4327 if (obj->stolen)
@@ -4646,190 +4785,6 @@ i915_gem_load(struct drm_device *dev)
4646 register_shrinker(&dev_priv->mm.inactive_shrinker); 4785 register_shrinker(&dev_priv->mm.inactive_shrinker);
4647} 4786}
4648 4787
4649/*
4650 * Create a physically contiguous memory object for this object
4651 * e.g. for cursor + overlay regs
4652 */
4653static int i915_gem_init_phys_object(struct drm_device *dev,
4654 int id, int size, int align)
4655{
4656 struct drm_i915_private *dev_priv = dev->dev_private;
4657 struct drm_i915_gem_phys_object *phys_obj;
4658 int ret;
4659
4660 if (dev_priv->mm.phys_objs[id - 1] || !size)
4661 return 0;
4662
4663 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
4664 if (!phys_obj)
4665 return -ENOMEM;
4666
4667 phys_obj->id = id;
4668
4669 phys_obj->handle = drm_pci_alloc(dev, size, align);
4670 if (!phys_obj->handle) {
4671 ret = -ENOMEM;
4672 goto kfree_obj;
4673 }
4674#ifdef CONFIG_X86
4675 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4676#endif
4677
4678 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4679
4680 return 0;
4681kfree_obj:
4682 kfree(phys_obj);
4683 return ret;
4684}
4685
4686static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4687{
4688 struct drm_i915_private *dev_priv = dev->dev_private;
4689 struct drm_i915_gem_phys_object *phys_obj;
4690
4691 if (!dev_priv->mm.phys_objs[id - 1])
4692 return;
4693
4694 phys_obj = dev_priv->mm.phys_objs[id - 1];
4695 if (phys_obj->cur_obj) {
4696 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4697 }
4698
4699#ifdef CONFIG_X86
4700 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4701#endif
4702 drm_pci_free(dev, phys_obj->handle);
4703 kfree(phys_obj);
4704 dev_priv->mm.phys_objs[id - 1] = NULL;
4705}
4706
4707void i915_gem_free_all_phys_object(struct drm_device *dev)
4708{
4709 int i;
4710
4711 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4712 i915_gem_free_phys_object(dev, i);
4713}
4714
4715void i915_gem_detach_phys_object(struct drm_device *dev,
4716 struct drm_i915_gem_object *obj)
4717{
4718 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4719 char *vaddr;
4720 int i;
4721 int page_count;
4722
4723 if (!obj->phys_obj)
4724 return;
4725 vaddr = obj->phys_obj->handle->vaddr;
4726
4727 page_count = obj->base.size / PAGE_SIZE;
4728 for (i = 0; i < page_count; i++) {
4729 struct page *page = shmem_read_mapping_page(mapping, i);
4730 if (!IS_ERR(page)) {
4731 char *dst = kmap_atomic(page);
4732 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4733 kunmap_atomic(dst);
4734
4735 drm_clflush_pages(&page, 1);
4736
4737 set_page_dirty(page);
4738 mark_page_accessed(page);
4739 page_cache_release(page);
4740 }
4741 }
4742 i915_gem_chipset_flush(dev);
4743
4744 obj->phys_obj->cur_obj = NULL;
4745 obj->phys_obj = NULL;
4746}
4747
4748int
4749i915_gem_attach_phys_object(struct drm_device *dev,
4750 struct drm_i915_gem_object *obj,
4751 int id,
4752 int align)
4753{
4754 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4755 struct drm_i915_private *dev_priv = dev->dev_private;
4756 int ret = 0;
4757 int page_count;
4758 int i;
4759
4760 if (id > I915_MAX_PHYS_OBJECT)
4761 return -EINVAL;
4762
4763 if (obj->phys_obj) {
4764 if (obj->phys_obj->id == id)
4765 return 0;
4766 i915_gem_detach_phys_object(dev, obj);
4767 }
4768
4769 /* create a new object */
4770 if (!dev_priv->mm.phys_objs[id - 1]) {
4771 ret = i915_gem_init_phys_object(dev, id,
4772 obj->base.size, align);
4773 if (ret) {
4774 DRM_ERROR("failed to init phys object %d size: %zu\n",
4775 id, obj->base.size);
4776 return ret;
4777 }
4778 }
4779
4780 /* bind to the object */
4781 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4782 obj->phys_obj->cur_obj = obj;
4783
4784 page_count = obj->base.size / PAGE_SIZE;
4785
4786 for (i = 0; i < page_count; i++) {
4787 struct page *page;
4788 char *dst, *src;
4789
4790 page = shmem_read_mapping_page(mapping, i);
4791 if (IS_ERR(page))
4792 return PTR_ERR(page);
4793
4794 src = kmap_atomic(page);
4795 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4796 memcpy(dst, src, PAGE_SIZE);
4797 kunmap_atomic(src);
4798
4799 mark_page_accessed(page);
4800 page_cache_release(page);
4801 }
4802
4803 return 0;
4804}
4805
4806static int
4807i915_gem_phys_pwrite(struct drm_device *dev,
4808 struct drm_i915_gem_object *obj,
4809 struct drm_i915_gem_pwrite *args,
4810 struct drm_file *file_priv)
4811{
4812 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4813 char __user *user_data = to_user_ptr(args->data_ptr);
4814
4815 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4816 unsigned long unwritten;
4817
4818 /* The physical object once assigned is fixed for the lifetime
4819 * of the obj, so we can safely drop the lock and continue
4820 * to access vaddr.
4821 */
4822 mutex_unlock(&dev->struct_mutex);
4823 unwritten = copy_from_user(vaddr, user_data, args->size);
4824 mutex_lock(&dev->struct_mutex);
4825 if (unwritten)
4826 return -EFAULT;
4827 }
4828
4829 i915_gem_chipset_flush(dev);
4830 return 0;
4831}
4832
4833void i915_gem_release(struct drm_device *dev, struct drm_file *file) 4788void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4834{ 4789{
4835 struct drm_i915_file_private *file_priv = file->driver_priv; 4790 struct drm_i915_file_private *file_priv = file->driver_priv;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 6043062ffce7..d72db15afa02 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -96,9 +96,6 @@
96#define GEN6_CONTEXT_ALIGN (64<<10) 96#define GEN6_CONTEXT_ALIGN (64<<10)
97#define GEN7_CONTEXT_ALIGN 4096 97#define GEN7_CONTEXT_ALIGN 4096
98 98
99static int do_switch(struct intel_ring_buffer *ring,
100 struct i915_hw_context *to);
101
102static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) 99static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
103{ 100{
104 struct drm_device *dev = ppgtt->base.dev; 101 struct drm_device *dev = ppgtt->base.dev;
@@ -185,13 +182,15 @@ void i915_gem_context_free(struct kref *ctx_ref)
185 typeof(*ctx), ref); 182 typeof(*ctx), ref);
186 struct i915_hw_ppgtt *ppgtt = NULL; 183 struct i915_hw_ppgtt *ppgtt = NULL;
187 184
188 /* We refcount even the aliasing PPGTT to keep the code symmetric */ 185 if (ctx->obj) {
189 if (USES_PPGTT(ctx->obj->base.dev)) 186 /* We refcount even the aliasing PPGTT to keep the code symmetric */
190 ppgtt = ctx_to_ppgtt(ctx); 187 if (USES_PPGTT(ctx->obj->base.dev))
188 ppgtt = ctx_to_ppgtt(ctx);
191 189
192 /* XXX: Free up the object before tearing down the address space, in 190 /* XXX: Free up the object before tearing down the address space, in
193 * case we're bound in the PPGTT */ 191 * case we're bound in the PPGTT */
194 drm_gem_object_unreference(&ctx->obj->base); 192 drm_gem_object_unreference(&ctx->obj->base);
193 }
195 194
196 if (ppgtt) 195 if (ppgtt)
197 kref_put(&ppgtt->ref, ppgtt_release); 196 kref_put(&ppgtt->ref, ppgtt_release);
@@ -232,32 +231,32 @@ __create_hw_context(struct drm_device *dev,
232 return ERR_PTR(-ENOMEM); 231 return ERR_PTR(-ENOMEM);
233 232
234 kref_init(&ctx->ref); 233 kref_init(&ctx->ref);
235 ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size); 234 list_add_tail(&ctx->link, &dev_priv->context_list);
236 INIT_LIST_HEAD(&ctx->link);
237 if (ctx->obj == NULL) {
238 kfree(ctx);
239 DRM_DEBUG_DRIVER("Context object allocated failed\n");
240 return ERR_PTR(-ENOMEM);
241 }
242 235
243 if (INTEL_INFO(dev)->gen >= 7) { 236 if (dev_priv->hw_context_size) {
244 ret = i915_gem_object_set_cache_level(ctx->obj, 237 ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
245 I915_CACHE_L3_LLC); 238 if (ctx->obj == NULL) {
246 /* Failure shouldn't ever happen this early */ 239 ret = -ENOMEM;
247 if (WARN_ON(ret))
248 goto err_out; 240 goto err_out;
249 } 241 }
250 242
251 list_add_tail(&ctx->link, &dev_priv->context_list); 243 if (INTEL_INFO(dev)->gen >= 7) {
244 ret = i915_gem_object_set_cache_level(ctx->obj,
245 I915_CACHE_L3_LLC);
246 /* Failure shouldn't ever happen this early */
247 if (WARN_ON(ret))
248 goto err_out;
249 }
250 }
252 251
253 /* Default context will never have a file_priv */ 252 /* Default context will never have a file_priv */
254 if (file_priv == NULL) 253 if (file_priv != NULL) {
255 return ctx; 254 ret = idr_alloc(&file_priv->context_idr, ctx,
256 255 DEFAULT_CONTEXT_ID, 0, GFP_KERNEL);
257 ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID, 0, 256 if (ret < 0)
258 GFP_KERNEL); 257 goto err_out;
259 if (ret < 0) 258 } else
260 goto err_out; 259 ret = DEFAULT_CONTEXT_ID;
261 260
262 ctx->file_priv = file_priv; 261 ctx->file_priv = file_priv;
263 ctx->id = ret; 262 ctx->id = ret;
@@ -294,7 +293,7 @@ i915_gem_create_context(struct drm_device *dev,
294 if (IS_ERR(ctx)) 293 if (IS_ERR(ctx))
295 return ctx; 294 return ctx;
296 295
297 if (is_global_default_ctx) { 296 if (is_global_default_ctx && ctx->obj) {
298 /* We may need to do things with the shrinker which 297 /* We may need to do things with the shrinker which
299 * require us to immediately switch back to the default 298 * require us to immediately switch back to the default
300 * context. This can cause a problem as pinning the 299 * context. This can cause a problem as pinning the
@@ -342,7 +341,7 @@ i915_gem_create_context(struct drm_device *dev,
342 return ctx; 341 return ctx;
343 342
344err_unpin: 343err_unpin:
345 if (is_global_default_ctx) 344 if (is_global_default_ctx && ctx->obj)
346 i915_gem_object_ggtt_unpin(ctx->obj); 345 i915_gem_object_ggtt_unpin(ctx->obj);
347err_destroy: 346err_destroy:
348 i915_gem_context_unreference(ctx); 347 i915_gem_context_unreference(ctx);
@@ -352,32 +351,22 @@ err_destroy:
352void i915_gem_context_reset(struct drm_device *dev) 351void i915_gem_context_reset(struct drm_device *dev)
353{ 352{
354 struct drm_i915_private *dev_priv = dev->dev_private; 353 struct drm_i915_private *dev_priv = dev->dev_private;
355 struct intel_ring_buffer *ring;
356 int i; 354 int i;
357 355
358 if (!HAS_HW_CONTEXTS(dev))
359 return;
360
361 /* Prevent the hardware from restoring the last context (which hung) on 356 /* Prevent the hardware from restoring the last context (which hung) on
362 * the next switch */ 357 * the next switch */
363 for (i = 0; i < I915_NUM_RINGS; i++) { 358 for (i = 0; i < I915_NUM_RINGS; i++) {
364 struct i915_hw_context *dctx; 359 struct intel_ring_buffer *ring = &dev_priv->ring[i];
365 if (!(INTEL_INFO(dev)->ring_mask & (1<<i))) 360 struct i915_hw_context *dctx = ring->default_context;
366 continue;
367 361
368 /* Do a fake switch to the default context */ 362 /* Do a fake switch to the default context */
369 ring = &dev_priv->ring[i]; 363 if (ring->last_context == dctx)
370 dctx = ring->default_context;
371 if (WARN_ON(!dctx))
372 continue; 364 continue;
373 365
374 if (!ring->last_context) 366 if (!ring->last_context)
375 continue; 367 continue;
376 368
377 if (ring->last_context == dctx) 369 if (dctx->obj && i == RCS) {
378 continue;
379
380 if (i == RCS) {
381 WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj, 370 WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
382 get_context_alignment(dev), 0)); 371 get_context_alignment(dev), 0));
383 /* Fake a finish/inactive */ 372 /* Fake a finish/inactive */
@@ -394,44 +383,35 @@ void i915_gem_context_reset(struct drm_device *dev)
394int i915_gem_context_init(struct drm_device *dev) 383int i915_gem_context_init(struct drm_device *dev)
395{ 384{
396 struct drm_i915_private *dev_priv = dev->dev_private; 385 struct drm_i915_private *dev_priv = dev->dev_private;
397 struct intel_ring_buffer *ring; 386 struct i915_hw_context *ctx;
398 int i; 387 int i;
399 388
400 if (!HAS_HW_CONTEXTS(dev))
401 return 0;
402
403 /* Init should only be called once per module load. Eventually the 389 /* Init should only be called once per module load. Eventually the
404 * restriction on the context_disabled check can be loosened. */ 390 * restriction on the context_disabled check can be loosened. */
405 if (WARN_ON(dev_priv->ring[RCS].default_context)) 391 if (WARN_ON(dev_priv->ring[RCS].default_context))
406 return 0; 392 return 0;
407 393
408 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); 394 if (HAS_HW_CONTEXTS(dev)) {
409 395 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
410 if (dev_priv->hw_context_size > (1<<20)) { 396 if (dev_priv->hw_context_size > (1<<20)) {
411 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n"); 397 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
412 return -E2BIG; 398 dev_priv->hw_context_size);
399 dev_priv->hw_context_size = 0;
400 }
413 } 401 }
414 402
415 dev_priv->ring[RCS].default_context = 403 ctx = i915_gem_create_context(dev, NULL, USES_PPGTT(dev));
416 i915_gem_create_context(dev, NULL, USES_PPGTT(dev)); 404 if (IS_ERR(ctx)) {
417 405 DRM_ERROR("Failed to create default global context (error %ld)\n",
418 if (IS_ERR_OR_NULL(dev_priv->ring[RCS].default_context)) { 406 PTR_ERR(ctx));
419 DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %ld\n", 407 return PTR_ERR(ctx);
420 PTR_ERR(dev_priv->ring[RCS].default_context));
421 return PTR_ERR(dev_priv->ring[RCS].default_context);
422 } 408 }
423 409
424 for (i = RCS + 1; i < I915_NUM_RINGS; i++) { 410 /* NB: RCS will hold a ref for all rings */
425 if (!(INTEL_INFO(dev)->ring_mask & (1<<i))) 411 for (i = 0; i < I915_NUM_RINGS; i++)
426 continue; 412 dev_priv->ring[i].default_context = ctx;
427
428 ring = &dev_priv->ring[i];
429 413
430 /* NB: RCS will hold a ref for all rings */ 414 DRM_DEBUG_DRIVER("%s context support initialized\n", dev_priv->hw_context_size ? "HW" : "fake");
431 ring->default_context = dev_priv->ring[RCS].default_context;
432 }
433
434 DRM_DEBUG_DRIVER("HW context support initialized\n");
435 return 0; 415 return 0;
436} 416}
437 417
@@ -441,33 +421,30 @@ void i915_gem_context_fini(struct drm_device *dev)
441 struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context; 421 struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
442 int i; 422 int i;
443 423
444 if (!HAS_HW_CONTEXTS(dev)) 424 if (dctx->obj) {
445 return; 425 /* The only known way to stop the gpu from accessing the hw context is
446 426 * to reset it. Do this as the very last operation to avoid confusing
447 /* The only known way to stop the gpu from accessing the hw context is 427 * other code, leading to spurious errors. */
448 * to reset it. Do this as the very last operation to avoid confusing 428 intel_gpu_reset(dev);
449 * other code, leading to spurious errors. */ 429
450 intel_gpu_reset(dev); 430 /* When default context is created and switched to, base object refcount
451 431 * will be 2 (+1 from object creation and +1 from do_switch()).
452 /* When default context is created and switched to, base object refcount 432 * i915_gem_context_fini() will be called after gpu_idle() has switched
453 * will be 2 (+1 from object creation and +1 from do_switch()). 433 * to default context. So we need to unreference the base object once
454 * i915_gem_context_fini() will be called after gpu_idle() has switched 434 * to offset the do_switch part, so that i915_gem_context_unreference()
455 * to default context. So we need to unreference the base object once 435 * can then free the base object correctly. */
456 * to offset the do_switch part, so that i915_gem_context_unreference() 436 WARN_ON(!dev_priv->ring[RCS].last_context);
457 * can then free the base object correctly. */ 437 if (dev_priv->ring[RCS].last_context == dctx) {
458 WARN_ON(!dev_priv->ring[RCS].last_context); 438 /* Fake switch to NULL context */
459 if (dev_priv->ring[RCS].last_context == dctx) { 439 WARN_ON(dctx->obj->active);
460 /* Fake switch to NULL context */ 440 i915_gem_object_ggtt_unpin(dctx->obj);
461 WARN_ON(dctx->obj->active); 441 i915_gem_context_unreference(dctx);
462 i915_gem_object_ggtt_unpin(dctx->obj); 442 dev_priv->ring[RCS].last_context = NULL;
463 i915_gem_context_unreference(dctx); 443 }
464 dev_priv->ring[RCS].last_context = NULL;
465 } 444 }
466 445
467 for (i = 0; i < I915_NUM_RINGS; i++) { 446 for (i = 0; i < I915_NUM_RINGS; i++) {
468 struct intel_ring_buffer *ring = &dev_priv->ring[i]; 447 struct intel_ring_buffer *ring = &dev_priv->ring[i];
469 if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
470 continue;
471 448
472 if (ring->last_context) 449 if (ring->last_context)
473 i915_gem_context_unreference(ring->last_context); 450 i915_gem_context_unreference(ring->last_context);
@@ -478,7 +455,6 @@ void i915_gem_context_fini(struct drm_device *dev)
478 455
479 i915_gem_object_ggtt_unpin(dctx->obj); 456 i915_gem_object_ggtt_unpin(dctx->obj);
480 i915_gem_context_unreference(dctx); 457 i915_gem_context_unreference(dctx);
481 dev_priv->mm.aliasing_ppgtt = NULL;
482} 458}
483 459
484int i915_gem_context_enable(struct drm_i915_private *dev_priv) 460int i915_gem_context_enable(struct drm_i915_private *dev_priv)
@@ -486,9 +462,6 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
486 struct intel_ring_buffer *ring; 462 struct intel_ring_buffer *ring;
487 int ret, i; 463 int ret, i;
488 464
489 if (!HAS_HW_CONTEXTS(dev_priv->dev))
490 return 0;
491
492 /* This is the only place the aliasing PPGTT gets enabled, which means 465 /* This is the only place the aliasing PPGTT gets enabled, which means
493 * it has to happen before we bail on reset */ 466 * it has to happen before we bail on reset */
494 if (dev_priv->mm.aliasing_ppgtt) { 467 if (dev_priv->mm.aliasing_ppgtt) {
@@ -503,7 +476,7 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
503 BUG_ON(!dev_priv->ring[RCS].default_context); 476 BUG_ON(!dev_priv->ring[RCS].default_context);
504 477
505 for_each_ring(ring, dev_priv, i) { 478 for_each_ring(ring, dev_priv, i) {
506 ret = do_switch(ring, ring->default_context); 479 ret = i915_switch_context(ring, ring->default_context);
507 if (ret) 480 if (ret)
508 return ret; 481 return ret;
509 } 482 }
@@ -526,19 +499,6 @@ static int context_idr_cleanup(int id, void *p, void *data)
526int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) 499int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
527{ 500{
528 struct drm_i915_file_private *file_priv = file->driver_priv; 501 struct drm_i915_file_private *file_priv = file->driver_priv;
529 struct drm_i915_private *dev_priv = dev->dev_private;
530
531 if (!HAS_HW_CONTEXTS(dev)) {
532 /* Cheat for hang stats */
533 file_priv->private_default_ctx =
534 kzalloc(sizeof(struct i915_hw_context), GFP_KERNEL);
535
536 if (file_priv->private_default_ctx == NULL)
537 return -ENOMEM;
538
539 file_priv->private_default_ctx->vm = &dev_priv->gtt.base;
540 return 0;
541 }
542 502
543 idr_init(&file_priv->context_idr); 503 idr_init(&file_priv->context_idr);
544 504
@@ -559,14 +519,10 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
559{ 519{
560 struct drm_i915_file_private *file_priv = file->driver_priv; 520 struct drm_i915_file_private *file_priv = file->driver_priv;
561 521
562 if (!HAS_HW_CONTEXTS(dev)) {
563 kfree(file_priv->private_default_ctx);
564 return;
565 }
566
567 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 522 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
568 i915_gem_context_unreference(file_priv->private_default_ctx);
569 idr_destroy(&file_priv->context_idr); 523 idr_destroy(&file_priv->context_idr);
524
525 i915_gem_context_unreference(file_priv->private_default_ctx);
570} 526}
571 527
572struct i915_hw_context * 528struct i915_hw_context *
@@ -574,9 +530,6 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
574{ 530{
575 struct i915_hw_context *ctx; 531 struct i915_hw_context *ctx;
576 532
577 if (!HAS_HW_CONTEXTS(file_priv->dev_priv->dev))
578 return file_priv->private_default_ctx;
579
580 ctx = (struct i915_hw_context *)idr_find(&file_priv->context_idr, id); 533 ctx = (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
581 if (!ctx) 534 if (!ctx)
582 return ERR_PTR(-ENOENT); 535 return ERR_PTR(-ENOENT);
@@ -758,7 +711,6 @@ unpin_out:
758/** 711/**
759 * i915_switch_context() - perform a GPU context switch. 712 * i915_switch_context() - perform a GPU context switch.
760 * @ring: ring for which we'll execute the context switch 713 * @ring: ring for which we'll execute the context switch
761 * @file_priv: file_priv associated with the context, may be NULL
762 * @to: the context to switch to 714 * @to: the context to switch to
763 * 715 *
764 * The context life cycle is simple. The context refcount is incremented and 716 * The context life cycle is simple. The context refcount is incremented and
@@ -767,24 +719,30 @@ unpin_out:
767 * object while letting the normal object tracking destroy the backing BO. 719 * object while letting the normal object tracking destroy the backing BO.
768 */ 720 */
769int i915_switch_context(struct intel_ring_buffer *ring, 721int i915_switch_context(struct intel_ring_buffer *ring,
770 struct drm_file *file,
771 struct i915_hw_context *to) 722 struct i915_hw_context *to)
772{ 723{
773 struct drm_i915_private *dev_priv = ring->dev->dev_private; 724 struct drm_i915_private *dev_priv = ring->dev->dev_private;
774 725
775 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 726 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
776 727
777 BUG_ON(file && to == NULL); 728 if (to->obj == NULL) { /* We have the fake context */
778 729 if (to != ring->last_context) {
779 /* We have the fake context */ 730 i915_gem_context_reference(to);
780 if (!HAS_HW_CONTEXTS(ring->dev)) { 731 if (ring->last_context)
781 ring->last_context = to; 732 i915_gem_context_unreference(ring->last_context);
733 ring->last_context = to;
734 }
782 return 0; 735 return 0;
783 } 736 }
784 737
785 return do_switch(ring, to); 738 return do_switch(ring, to);
786} 739}
787 740
741static bool hw_context_enabled(struct drm_device *dev)
742{
743 return to_i915(dev)->hw_context_size;
744}
745
788int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 746int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
789 struct drm_file *file) 747 struct drm_file *file)
790{ 748{
@@ -793,7 +751,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
793 struct i915_hw_context *ctx; 751 struct i915_hw_context *ctx;
794 int ret; 752 int ret;
795 753
796 if (!HAS_HW_CONTEXTS(dev)) 754 if (!hw_context_enabled(dev))
797 return -ENODEV; 755 return -ENODEV;
798 756
799 ret = i915_mutex_lock_interruptible(dev); 757 ret = i915_mutex_lock_interruptible(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 75fca63dc8c1..bbf4b12d842e 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -68,9 +68,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
68int 68int
69i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, 69i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
70 int min_size, unsigned alignment, unsigned cache_level, 70 int min_size, unsigned alignment, unsigned cache_level,
71 unsigned long start, unsigned long end,
71 unsigned flags) 72 unsigned flags)
72{ 73{
73 struct drm_i915_private *dev_priv = dev->dev_private;
74 struct list_head eviction_list, unwind_list; 74 struct list_head eviction_list, unwind_list;
75 struct i915_vma *vma; 75 struct i915_vma *vma;
76 int ret = 0; 76 int ret = 0;
@@ -102,11 +102,10 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
102 */ 102 */
103 103
104 INIT_LIST_HEAD(&unwind_list); 104 INIT_LIST_HEAD(&unwind_list);
105 if (flags & PIN_MAPPABLE) { 105 if (start != 0 || end != vm->total) {
106 BUG_ON(!i915_is_ggtt(vm));
107 drm_mm_init_scan_with_range(&vm->mm, min_size, 106 drm_mm_init_scan_with_range(&vm->mm, min_size,
108 alignment, cache_level, 0, 107 alignment, cache_level,
109 dev_priv->gtt.mappable_end); 108 start, end);
110 } else 109 } else
111 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); 110 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
112 111
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 7447160155a3..20fef6c50267 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -35,6 +35,9 @@
35 35
36#define __EXEC_OBJECT_HAS_PIN (1<<31) 36#define __EXEC_OBJECT_HAS_PIN (1<<31)
37#define __EXEC_OBJECT_HAS_FENCE (1<<30) 37#define __EXEC_OBJECT_HAS_FENCE (1<<30)
38#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
39
40#define BATCH_OFFSET_BIAS (256*1024)
38 41
39struct eb_vmas { 42struct eb_vmas {
40 struct list_head vmas; 43 struct list_head vmas;
@@ -545,7 +548,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
545 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; 548 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
546 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 549 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
547 bool need_fence; 550 bool need_fence;
548 unsigned flags; 551 uint64_t flags;
549 int ret; 552 int ret;
550 553
551 flags = 0; 554 flags = 0;
@@ -559,6 +562,8 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
559 562
560 if (entry->flags & EXEC_OBJECT_NEEDS_GTT) 563 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
561 flags |= PIN_GLOBAL; 564 flags |= PIN_GLOBAL;
565 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
566 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
562 567
563 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); 568 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
564 if (ret) 569 if (ret)
@@ -592,6 +597,36 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
592 return 0; 597 return 0;
593} 598}
594 599
600static bool
601eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
602{
603 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
604 struct drm_i915_gem_object *obj = vma->obj;
605 bool need_fence, need_mappable;
606
607 need_fence =
608 has_fenced_gpu_access &&
609 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
610 obj->tiling_mode != I915_TILING_NONE;
611 need_mappable = need_fence || need_reloc_mappable(vma);
612
613 WARN_ON((need_mappable || need_fence) &&
614 !i915_is_ggtt(vma->vm));
615
616 if (entry->alignment &&
617 vma->node.start & (entry->alignment - 1))
618 return true;
619
620 if (need_mappable && !obj->map_and_fenceable)
621 return true;
622
623 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
624 vma->node.start < BATCH_OFFSET_BIAS)
625 return true;
626
627 return false;
628}
629
595static int 630static int
596i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 631i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
597 struct list_head *vmas, 632 struct list_head *vmas,
@@ -653,26 +688,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
653 688
654 /* Unbind any ill-fitting objects or pin. */ 689 /* Unbind any ill-fitting objects or pin. */
655 list_for_each_entry(vma, vmas, exec_list) { 690 list_for_each_entry(vma, vmas, exec_list) {
656 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
657 bool need_fence, need_mappable;
658
659 obj = vma->obj;
660
661 if (!drm_mm_node_allocated(&vma->node)) 691 if (!drm_mm_node_allocated(&vma->node))
662 continue; 692 continue;
663 693
664 need_fence = 694 if (eb_vma_misplaced(vma, has_fenced_gpu_access))
665 has_fenced_gpu_access &&
666 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
667 obj->tiling_mode != I915_TILING_NONE;
668 need_mappable = need_fence || need_reloc_mappable(vma);
669
670 WARN_ON((need_mappable || need_fence) &&
671 !i915_is_ggtt(vma->vm));
672
673 if ((entry->alignment &&
674 vma->node.start & (entry->alignment - 1)) ||
675 (need_mappable && !obj->map_and_fenceable))
676 ret = i915_vma_unbind(vma); 695 ret = i915_vma_unbind(vma);
677 else 696 else
678 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); 697 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
@@ -773,9 +792,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
773 * relocations were valid. 792 * relocations were valid.
774 */ 793 */
775 for (j = 0; j < exec[i].relocation_count; j++) { 794 for (j = 0; j < exec[i].relocation_count; j++) {
776 if (copy_to_user(&user_relocs[j].presumed_offset, 795 if (__copy_to_user(&user_relocs[j].presumed_offset,
777 &invalid_offset, 796 &invalid_offset,
778 sizeof(invalid_offset))) { 797 sizeof(invalid_offset))) {
779 ret = -EFAULT; 798 ret = -EFAULT;
780 mutex_lock(&dev->struct_mutex); 799 mutex_lock(&dev->struct_mutex);
781 goto err; 800 goto err;
@@ -999,6 +1018,25 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
999 return 0; 1018 return 0;
1000} 1019}
1001 1020
1021static struct drm_i915_gem_object *
1022eb_get_batch(struct eb_vmas *eb)
1023{
1024 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
1025
1026 /*
1027 * SNA is doing fancy tricks with compressing batch buffers, which leads
1028 * to negative relocation deltas. Usually that works out ok since the
1029 * relocate address is still positive, except when the batch is placed
1030 * very low in the GTT. Ensure this doesn't happen.
1031 *
1032 * Note that actual hangs have only been observed on gen7, but for
1033 * paranoia do it everywhere.
1034 */
1035 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
1036
1037 return vma->obj;
1038}
1039
1002static int 1040static int
1003i915_gem_do_execbuffer(struct drm_device *dev, void *data, 1041i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1004 struct drm_file *file, 1042 struct drm_file *file,
@@ -1153,7 +1191,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1153 goto err; 1191 goto err;
1154 1192
1155 /* take note of the batch buffer before we might reorder the lists */ 1193 /* take note of the batch buffer before we might reorder the lists */
1156 batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj; 1194 batch_obj = eb_get_batch(eb);
1157 1195
1158 /* Move the objects en-masse into the GTT, evicting if necessary. */ 1196 /* Move the objects en-masse into the GTT, evicting if necessary. */
1159 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 1197 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
@@ -1221,7 +1259,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1221 if (ret) 1259 if (ret)
1222 goto err; 1260 goto err;
1223 1261
1224 ret = i915_switch_context(ring, file, ctx); 1262 ret = i915_switch_context(ring, ctx);
1225 if (ret) 1263 if (ret)
1226 goto err; 1264 goto err;
1227 1265
@@ -1355,18 +1393,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1355 1393
1356 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); 1394 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1357 if (!ret) { 1395 if (!ret) {
1396 struct drm_i915_gem_exec_object __user *user_exec_list =
1397 to_user_ptr(args->buffers_ptr);
1398
1358 /* Copy the new buffer offsets back to the user's exec list. */ 1399 /* Copy the new buffer offsets back to the user's exec list. */
1359 for (i = 0; i < args->buffer_count; i++) 1400 for (i = 0; i < args->buffer_count; i++) {
1360 exec_list[i].offset = exec2_list[i].offset; 1401 ret = __copy_to_user(&user_exec_list[i].offset,
1361 /* ... and back out to userspace */ 1402 &exec2_list[i].offset,
1362 ret = copy_to_user(to_user_ptr(args->buffers_ptr), 1403 sizeof(user_exec_list[i].offset));
1363 exec_list, 1404 if (ret) {
1364 sizeof(*exec_list) * args->buffer_count); 1405 ret = -EFAULT;
1365 if (ret) { 1406 DRM_DEBUG("failed to copy %d exec entries "
1366 ret = -EFAULT; 1407 "back to user (%d)\n",
1367 DRM_DEBUG("failed to copy %d exec entries " 1408 args->buffer_count, ret);
1368 "back to user (%d)\n", 1409 break;
1369 args->buffer_count, ret); 1410 }
1370 } 1411 }
1371 } 1412 }
1372 1413
@@ -1412,14 +1453,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1412 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); 1453 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1413 if (!ret) { 1454 if (!ret) {
1414 /* Copy the new buffer offsets back to the user's exec list. */ 1455 /* Copy the new buffer offsets back to the user's exec list. */
1415 ret = copy_to_user(to_user_ptr(args->buffers_ptr), 1456 struct drm_i915_gem_exec_object2 *user_exec_list =
1416 exec2_list, 1457 to_user_ptr(args->buffers_ptr);
1417 sizeof(*exec2_list) * args->buffer_count); 1458 int i;
1418 if (ret) { 1459
1419 ret = -EFAULT; 1460 for (i = 0; i < args->buffer_count; i++) {
1420 DRM_DEBUG("failed to copy %d exec entries " 1461 ret = __copy_to_user(&user_exec_list[i].offset,
1421 "back to user (%d)\n", 1462 &exec2_list[i].offset,
1422 args->buffer_count, ret); 1463 sizeof(user_exec_list[i].offset));
1464 if (ret) {
1465 ret = -EFAULT;
1466 DRM_DEBUG("failed to copy %d exec entries "
1467 "back to user\n",
1468 args->buffer_count);
1469 break;
1470 }
1423 } 1471 }
1424 } 1472 }
1425 1473
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index ab5e93c30aa2..5deb22864c52 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -34,25 +34,35 @@ static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv);
34 34
35bool intel_enable_ppgtt(struct drm_device *dev, bool full) 35bool intel_enable_ppgtt(struct drm_device *dev, bool full)
36{ 36{
37 if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) 37 if (i915.enable_ppgtt == 0)
38 return false; 38 return false;
39 39
40 if (i915.enable_ppgtt == 1 && full) 40 if (i915.enable_ppgtt == 1 && full)
41 return false; 41 return false;
42 42
43 return true;
44}
45
46static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
47{
48 if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
49 return 0;
50
51 if (enable_ppgtt == 1)
52 return 1;
53
54 if (enable_ppgtt == 2 && HAS_PPGTT(dev))
55 return 2;
56
43#ifdef CONFIG_INTEL_IOMMU 57#ifdef CONFIG_INTEL_IOMMU
44 /* Disable ppgtt on SNB if VT-d is on. */ 58 /* Disable ppgtt on SNB if VT-d is on. */
45 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { 59 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
46 DRM_INFO("Disabling PPGTT because VT-d is on\n"); 60 DRM_INFO("Disabling PPGTT because VT-d is on\n");
47 return false; 61 return 0;
48 } 62 }
49#endif 63#endif
50 64
51 /* Full ppgtt disabled by default for now due to issues. */ 65 return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
52 if (full)
53 return false; /* HAS_PPGTT(dev) */
54 else
55 return HAS_ALIASING_PPGTT(dev);
56} 66}
57 67
58#define GEN6_PPGTT_PD_ENTRIES 512 68#define GEN6_PPGTT_PD_ENTRIES 512
@@ -1079,7 +1089,9 @@ alloc:
1079 if (ret == -ENOSPC && !retried) { 1089 if (ret == -ENOSPC && !retried) {
1080 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, 1090 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
1081 GEN6_PD_SIZE, GEN6_PD_ALIGN, 1091 GEN6_PD_SIZE, GEN6_PD_ALIGN,
1082 I915_CACHE_NONE, 0); 1092 I915_CACHE_NONE,
1093 0, dev_priv->gtt.base.total,
1094 0);
1083 if (ret) 1095 if (ret)
1084 return ret; 1096 return ret;
1085 1097
@@ -2031,6 +2043,14 @@ int i915_gem_gtt_init(struct drm_device *dev)
2031 gtt->base.total >> 20); 2043 gtt->base.total >> 20);
2032 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20); 2044 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
2033 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); 2045 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
2046 /*
2047 * i915.enable_ppgtt is read-only, so do an early pass to validate the
2048 * user's requested state against the hardware/driver capabilities. We
2049 * do this now so that we can print out any log messages once rather
2050 * than every time we check intel_enable_ppgtt().
2051 */
2052 i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
2053 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
2034 2054
2035 return 0; 2055 return 0;
2036} 2056}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 7753249b3a95..f98ba4e6e70b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1362,10 +1362,20 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
1362 spin_lock(&dev_priv->irq_lock); 1362 spin_lock(&dev_priv->irq_lock);
1363 for (i = 1; i < HPD_NUM_PINS; i++) { 1363 for (i = 1; i < HPD_NUM_PINS; i++) {
1364 1364
1365 WARN_ONCE(hpd[i] & hotplug_trigger && 1365 if (hpd[i] & hotplug_trigger &&
1366 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED, 1366 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1367 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", 1367 /*
1368 hotplug_trigger, i, hpd[i]); 1368 * On GMCH platforms the interrupt mask bits only
1369 * prevent irq generation, not the setting of the
1370 * hotplug bits itself. So only WARN about unexpected
1371 * interrupts on saner platforms.
1372 */
1373 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1374 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1375 hotplug_trigger, i, hpd[i]);
1376
1377 continue;
1378 }
1369 1379
1370 if (!(hpd[i] & hotplug_trigger) || 1380 if (!(hpd[i] & hotplug_trigger) ||
1371 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1381 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 9f5b18d9d885..c77af69c2d8f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -827,6 +827,7 @@ enum punit_power_well {
827# define MI_FLUSH_ENABLE (1 << 12) 827# define MI_FLUSH_ENABLE (1 << 12)
828# define ASYNC_FLIP_PERF_DISABLE (1 << 14) 828# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
829# define MODE_IDLE (1 << 9) 829# define MODE_IDLE (1 << 9)
830# define STOP_RING (1 << 8)
830 831
831#define GEN6_GT_MODE 0x20d0 832#define GEN6_GT_MODE 0x20d0
832#define GEN7_GT_MODE 0x7008 833#define GEN7_GT_MODE 0x7008
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 4867f4cc0938..aff4a113cda3 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -287,6 +287,9 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
287 const struct bdb_lfp_backlight_data *backlight_data; 287 const struct bdb_lfp_backlight_data *backlight_data;
288 const struct bdb_lfp_backlight_data_entry *entry; 288 const struct bdb_lfp_backlight_data_entry *entry;
289 289
290 /* Err to enabling backlight if no backlight block. */
291 dev_priv->vbt.backlight.present = true;
292
290 backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT); 293 backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
291 if (!backlight_data) 294 if (!backlight_data)
292 return; 295 return;
@@ -299,6 +302,13 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
299 302
300 entry = &backlight_data->data[panel_type]; 303 entry = &backlight_data->data[panel_type];
301 304
305 dev_priv->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
306 if (!dev_priv->vbt.backlight.present) {
307 DRM_DEBUG_KMS("PWM backlight not present in VBT (type %u)\n",
308 entry->type);
309 return;
310 }
311
302 dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; 312 dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
303 dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; 313 dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
304 DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, " 314 DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
@@ -550,47 +560,71 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
550 560
551 dev_priv->vbt.edp_pps = *edp_pps; 561 dev_priv->vbt.edp_pps = *edp_pps;
552 562
553 dev_priv->vbt.edp_rate = edp_link_params->rate ? DP_LINK_BW_2_7 : 563 switch (edp_link_params->rate) {
554 DP_LINK_BW_1_62; 564 case EDP_RATE_1_62:
565 dev_priv->vbt.edp_rate = DP_LINK_BW_1_62;
566 break;
567 case EDP_RATE_2_7:
568 dev_priv->vbt.edp_rate = DP_LINK_BW_2_7;
569 break;
570 default:
571 DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n",
572 edp_link_params->rate);
573 break;
574 }
575
555 switch (edp_link_params->lanes) { 576 switch (edp_link_params->lanes) {
556 case 0: 577 case EDP_LANE_1:
557 dev_priv->vbt.edp_lanes = 1; 578 dev_priv->vbt.edp_lanes = 1;
558 break; 579 break;
559 case 1: 580 case EDP_LANE_2:
560 dev_priv->vbt.edp_lanes = 2; 581 dev_priv->vbt.edp_lanes = 2;
561 break; 582 break;
562 case 3: 583 case EDP_LANE_4:
563 default:
564 dev_priv->vbt.edp_lanes = 4; 584 dev_priv->vbt.edp_lanes = 4;
565 break; 585 break;
586 default:
587 DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n",
588 edp_link_params->lanes);
589 break;
566 } 590 }
591
567 switch (edp_link_params->preemphasis) { 592 switch (edp_link_params->preemphasis) {
568 case 0: 593 case EDP_PREEMPHASIS_NONE:
569 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0; 594 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
570 break; 595 break;
571 case 1: 596 case EDP_PREEMPHASIS_3_5dB:
572 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; 597 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
573 break; 598 break;
574 case 2: 599 case EDP_PREEMPHASIS_6dB:
575 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6; 600 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
576 break; 601 break;
577 case 3: 602 case EDP_PREEMPHASIS_9_5dB:
578 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; 603 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
579 break; 604 break;
605 default:
606 DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
607 edp_link_params->preemphasis);
608 break;
580 } 609 }
610
581 switch (edp_link_params->vswing) { 611 switch (edp_link_params->vswing) {
582 case 0: 612 case EDP_VSWING_0_4V:
583 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400; 613 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400;
584 break; 614 break;
585 case 1: 615 case EDP_VSWING_0_6V:
586 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600; 616 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600;
587 break; 617 break;
588 case 2: 618 case EDP_VSWING_0_8V:
589 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800; 619 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800;
590 break; 620 break;
591 case 3: 621 case EDP_VSWING_1_2V:
592 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200; 622 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200;
593 break; 623 break;
624 default:
625 DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
626 edp_link_params->vswing);
627 break;
594 } 628 }
595} 629}
596 630
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 83b7629e4367..f27f7b282465 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -374,6 +374,9 @@ struct bdb_lvds_lfp_data {
374 struct bdb_lvds_lfp_data_entry data[16]; 374 struct bdb_lvds_lfp_data_entry data[16];
375} __packed; 375} __packed;
376 376
377#define BDB_BACKLIGHT_TYPE_NONE 0
378#define BDB_BACKLIGHT_TYPE_PWM 2
379
377struct bdb_lfp_backlight_data_entry { 380struct bdb_lfp_backlight_data_entry {
378 u8 type:2; 381 u8 type:2;
379 u8 active_low_pwm:1; 382 u8 active_low_pwm:1;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index dae976f51d83..5b60e25baa32 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7825,14 +7825,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
7825 addr = i915_gem_obj_ggtt_offset(obj); 7825 addr = i915_gem_obj_ggtt_offset(obj);
7826 } else { 7826 } else {
7827 int align = IS_I830(dev) ? 16 * 1024 : 256; 7827 int align = IS_I830(dev) ? 16 * 1024 : 256;
7828 ret = i915_gem_attach_phys_object(dev, obj, 7828 ret = i915_gem_object_attach_phys(obj, align);
7829 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
7830 align);
7831 if (ret) { 7829 if (ret) {
7832 DRM_DEBUG_KMS("failed to attach phys object\n"); 7830 DRM_DEBUG_KMS("failed to attach phys object\n");
7833 goto fail_locked; 7831 goto fail_locked;
7834 } 7832 }
7835 addr = obj->phys_obj->handle->busaddr; 7833 addr = obj->phys_handle->busaddr;
7836 } 7834 }
7837 7835
7838 if (IS_GEN2(dev)) 7836 if (IS_GEN2(dev))
@@ -7840,10 +7838,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
7840 7838
7841 finish: 7839 finish:
7842 if (intel_crtc->cursor_bo) { 7840 if (intel_crtc->cursor_bo) {
7843 if (INTEL_INFO(dev)->cursor_needs_physical) { 7841 if (!INTEL_INFO(dev)->cursor_needs_physical)
7844 if (intel_crtc->cursor_bo != obj)
7845 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
7846 } else
7847 i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); 7842 i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
7848 drm_gem_object_unreference(&intel_crtc->cursor_bo->base); 7843 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
7849 } 7844 }
@@ -9654,11 +9649,22 @@ intel_pipe_config_compare(struct drm_device *dev,
9654 PIPE_CONF_CHECK_I(pipe_src_w); 9649 PIPE_CONF_CHECK_I(pipe_src_w);
9655 PIPE_CONF_CHECK_I(pipe_src_h); 9650 PIPE_CONF_CHECK_I(pipe_src_h);
9656 9651
9657 PIPE_CONF_CHECK_I(gmch_pfit.control); 9652 /*
9658 /* pfit ratios are autocomputed by the hw on gen4+ */ 9653 * FIXME: BIOS likes to set up a cloned config with lvds+external
9659 if (INTEL_INFO(dev)->gen < 4) 9654 * screen. Since we don't yet re-compute the pipe config when moving
9660 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); 9655 * just the lvds port away to another pipe the sw tracking won't match.
9661 PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); 9656 *
9657 * Proper atomic modesets with recomputed global state will fix this.
9658 * Until then just don't check gmch state for inherited modes.
9659 */
9660 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
9661 PIPE_CONF_CHECK_I(gmch_pfit.control);
9662 /* pfit ratios are autocomputed by the hw on gen4+ */
9663 if (INTEL_INFO(dev)->gen < 4)
9664 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
9665 PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
9666 }
9667
9662 PIPE_CONF_CHECK_I(pch_pfit.enabled); 9668 PIPE_CONF_CHECK_I(pch_pfit.enabled);
9663 if (current_config->pch_pfit.enabled) { 9669 if (current_config->pch_pfit.enabled) {
9664 PIPE_CONF_CHECK_I(pch_pfit.pos); 9670 PIPE_CONF_CHECK_I(pch_pfit.pos);
@@ -11384,15 +11390,6 @@ void intel_modeset_init(struct drm_device *dev)
11384 } 11390 }
11385} 11391}
11386 11392
11387static void
11388intel_connector_break_all_links(struct intel_connector *connector)
11389{
11390 connector->base.dpms = DRM_MODE_DPMS_OFF;
11391 connector->base.encoder = NULL;
11392 connector->encoder->connectors_active = false;
11393 connector->encoder->base.crtc = NULL;
11394}
11395
11396static void intel_enable_pipe_a(struct drm_device *dev) 11393static void intel_enable_pipe_a(struct drm_device *dev)
11397{ 11394{
11398 struct intel_connector *connector; 11395 struct intel_connector *connector;
@@ -11474,8 +11471,17 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
11474 if (connector->encoder->base.crtc != &crtc->base) 11471 if (connector->encoder->base.crtc != &crtc->base)
11475 continue; 11472 continue;
11476 11473
11477 intel_connector_break_all_links(connector); 11474 connector->base.dpms = DRM_MODE_DPMS_OFF;
11475 connector->base.encoder = NULL;
11478 } 11476 }
11477 /* multiple connectors may have the same encoder:
11478 * handle them and break crtc link separately */
11479 list_for_each_entry(connector, &dev->mode_config.connector_list,
11480 base.head)
11481 if (connector->encoder->base.crtc == &crtc->base) {
11482 connector->encoder->base.crtc = NULL;
11483 connector->encoder->connectors_active = false;
11484 }
11479 11485
11480 WARN_ON(crtc->active); 11486 WARN_ON(crtc->active);
11481 crtc->base.enabled = false; 11487 crtc->base.enabled = false;
@@ -11557,6 +11563,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
11557 drm_get_encoder_name(&encoder->base)); 11563 drm_get_encoder_name(&encoder->base));
11558 encoder->disable(encoder); 11564 encoder->disable(encoder);
11559 } 11565 }
11566 encoder->base.crtc = NULL;
11567 encoder->connectors_active = false;
11560 11568
11561 /* Inconsistent output/port/pipe state happens presumably due to 11569 /* Inconsistent output/port/pipe state happens presumably due to
11562 * a bug in one of the get_hw_state functions. Or someplace else 11570 * a bug in one of the get_hw_state functions. Or someplace else
@@ -11567,8 +11575,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
11567 base.head) { 11575 base.head) {
11568 if (connector->encoder != encoder) 11576 if (connector->encoder != encoder)
11569 continue; 11577 continue;
11570 11578 connector->base.dpms = DRM_MODE_DPMS_OFF;
11571 intel_connector_break_all_links(connector); 11579 connector->base.encoder = NULL;
11572 } 11580 }
11573 } 11581 }
11574 /* Enabled encoders without active connectors will be fixed in 11582 /* Enabled encoders without active connectors will be fixed in
@@ -11616,6 +11624,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
11616 base.head) { 11624 base.head) {
11617 memset(&crtc->config, 0, sizeof(crtc->config)); 11625 memset(&crtc->config, 0, sizeof(crtc->config));
11618 11626
11627 crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
11628
11619 crtc->active = dev_priv->display.get_pipe_config(crtc, 11629 crtc->active = dev_priv->display.get_pipe_config(crtc,
11620 &crtc->config); 11630 &crtc->config);
11621 11631
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index a0dad1a2f819..2a00cb828d20 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -105,7 +105,8 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
105 case DP_LINK_BW_2_7: 105 case DP_LINK_BW_2_7:
106 break; 106 break;
107 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ 107 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
108 if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) && 108 if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
109 INTEL_INFO(dev)->gen >= 8) &&
109 intel_dp->dpcd[DP_DPCD_REV] >= 0x12) 110 intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
110 max_link_bw = DP_LINK_BW_5_4; 111 max_link_bw = DP_LINK_BW_5_4;
111 else 112 else
@@ -120,6 +121,22 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
120 return max_link_bw; 121 return max_link_bw;
121} 122}
122 123
124static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
125{
126 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
127 struct drm_device *dev = intel_dig_port->base.base.dev;
128 u8 source_max, sink_max;
129
130 source_max = 4;
131 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
132 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
133 source_max = 2;
134
135 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
136
137 return min(source_max, sink_max);
138}
139
123/* 140/*
124 * The units on the numbers in the next two are... bizarre. Examples will 141 * The units on the numbers in the next two are... bizarre. Examples will
125 * make it clearer; this one parallels an example in the eDP spec. 142 * make it clearer; this one parallels an example in the eDP spec.
@@ -170,7 +187,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
170 } 187 }
171 188
172 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); 189 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
173 max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); 190 max_lanes = intel_dp_max_lane_count(intel_dp);
174 191
175 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 192 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
176 mode_rate = intel_dp_link_required(target_clock, 18); 193 mode_rate = intel_dp_link_required(target_clock, 18);
@@ -575,7 +592,8 @@ out:
575 return ret; 592 return ret;
576} 593}
577 594
578#define HEADER_SIZE 4 595#define BARE_ADDRESS_SIZE 3
596#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
579static ssize_t 597static ssize_t
580intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 598intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
581{ 599{
@@ -592,7 +610,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
592 switch (msg->request & ~DP_AUX_I2C_MOT) { 610 switch (msg->request & ~DP_AUX_I2C_MOT) {
593 case DP_AUX_NATIVE_WRITE: 611 case DP_AUX_NATIVE_WRITE:
594 case DP_AUX_I2C_WRITE: 612 case DP_AUX_I2C_WRITE:
595 txsize = HEADER_SIZE + msg->size; 613 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
596 rxsize = 1; 614 rxsize = 1;
597 615
598 if (WARN_ON(txsize > 20)) 616 if (WARN_ON(txsize > 20))
@@ -611,7 +629,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
611 629
612 case DP_AUX_NATIVE_READ: 630 case DP_AUX_NATIVE_READ:
613 case DP_AUX_I2C_READ: 631 case DP_AUX_I2C_READ:
614 txsize = HEADER_SIZE; 632 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
615 rxsize = msg->size + 1; 633 rxsize = msg->size + 1;
616 634
617 if (WARN_ON(rxsize > 20)) 635 if (WARN_ON(rxsize > 20))
@@ -749,8 +767,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
749 struct intel_crtc *intel_crtc = encoder->new_crtc; 767 struct intel_crtc *intel_crtc = encoder->new_crtc;
750 struct intel_connector *intel_connector = intel_dp->attached_connector; 768 struct intel_connector *intel_connector = intel_dp->attached_connector;
751 int lane_count, clock; 769 int lane_count, clock;
752 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); 770 int min_lane_count = 1;
771 int max_lane_count = intel_dp_max_lane_count(intel_dp);
753 /* Conveniently, the link BW constants become indices with a shift...*/ 772 /* Conveniently, the link BW constants become indices with a shift...*/
773 int min_clock = 0;
754 int max_clock = intel_dp_max_link_bw(intel_dp) >> 3; 774 int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
755 int bpp, mode_rate; 775 int bpp, mode_rate;
756 static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; 776 static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
@@ -783,19 +803,38 @@ intel_dp_compute_config(struct intel_encoder *encoder,
783 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 803 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
784 * bpc in between. */ 804 * bpc in between. */
785 bpp = pipe_config->pipe_bpp; 805 bpp = pipe_config->pipe_bpp;
786 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && 806 if (is_edp(intel_dp)) {
787 dev_priv->vbt.edp_bpp < bpp) { 807 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
788 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", 808 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
789 dev_priv->vbt.edp_bpp); 809 dev_priv->vbt.edp_bpp);
790 bpp = dev_priv->vbt.edp_bpp; 810 bpp = dev_priv->vbt.edp_bpp;
811 }
812
813 if (IS_BROADWELL(dev)) {
814 /* Yes, it's an ugly hack. */
815 min_lane_count = max_lane_count;
816 DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n",
817 min_lane_count);
818 } else if (dev_priv->vbt.edp_lanes) {
819 min_lane_count = min(dev_priv->vbt.edp_lanes,
820 max_lane_count);
821 DRM_DEBUG_KMS("using min %u lanes per VBT\n",
822 min_lane_count);
823 }
824
825 if (dev_priv->vbt.edp_rate) {
826 min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
827 DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
828 bws[min_clock]);
829 }
791 } 830 }
792 831
793 for (; bpp >= 6*3; bpp -= 2*3) { 832 for (; bpp >= 6*3; bpp -= 2*3) {
794 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 833 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
795 bpp); 834 bpp);
796 835
797 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 836 for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
798 for (clock = 0; clock <= max_clock; clock++) { 837 for (clock = min_clock; clock <= max_clock; clock++) {
799 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); 838 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
800 link_avail = intel_dp_max_data_rate(link_clock, 839 link_avail = intel_dp_max_data_rate(link_clock,
801 lane_count); 840 lane_count);
@@ -3618,7 +3657,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3618{ 3657{
3619 struct drm_connector *connector = &intel_connector->base; 3658 struct drm_connector *connector = &intel_connector->base;
3620 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3659 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3621 struct drm_device *dev = intel_dig_port->base.base.dev; 3660 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3661 struct drm_device *dev = intel_encoder->base.dev;
3622 struct drm_i915_private *dev_priv = dev->dev_private; 3662 struct drm_i915_private *dev_priv = dev->dev_private;
3623 struct drm_display_mode *fixed_mode = NULL; 3663 struct drm_display_mode *fixed_mode = NULL;
3624 bool has_dpcd; 3664 bool has_dpcd;
@@ -3628,6 +3668,14 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3628 if (!is_edp(intel_dp)) 3668 if (!is_edp(intel_dp))
3629 return true; 3669 return true;
3630 3670
3671 /* The VDD bit needs a power domain reference, so if the bit is already
3672 * enabled when we boot, grab this reference. */
3673 if (edp_have_panel_vdd(intel_dp)) {
3674 enum intel_display_power_domain power_domain;
3675 power_domain = intel_display_port_power_domain(intel_encoder);
3676 intel_display_power_get(dev_priv, power_domain);
3677 }
3678
3631 /* Cache DPCD and EDID for edp. */ 3679 /* Cache DPCD and EDID for edp. */
3632 intel_edp_panel_vdd_on(intel_dp); 3680 intel_edp_panel_vdd_on(intel_dp);
3633 has_dpcd = intel_dp_get_dpcd(intel_dp); 3681 has_dpcd = intel_dp_get_dpcd(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0542de982260..328b1a70264b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -236,7 +236,8 @@ struct intel_crtc_config {
236 * tracked with quirk flags so that fastboot and state checker can act 236 * tracked with quirk flags so that fastboot and state checker can act
237 * accordingly. 237 * accordingly.
238 */ 238 */
239#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ 239#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
240#define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */
240 unsigned long quirks; 241 unsigned long quirks;
241 242
242 /* User requested mode, only valid as a starting point to 243 /* User requested mode, only valid as a starting point to
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index b4d44e62f0c7..f73ba5e6b7a8 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -132,6 +132,16 @@ static int intelfb_create(struct drm_fb_helper *helper,
132 132
133 mutex_lock(&dev->struct_mutex); 133 mutex_lock(&dev->struct_mutex);
134 134
135 if (intel_fb &&
136 (sizes->fb_width > intel_fb->base.width ||
137 sizes->fb_height > intel_fb->base.height)) {
138 DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
139 " releasing it\n",
140 intel_fb->base.width, intel_fb->base.height,
141 sizes->fb_width, sizes->fb_height);
142 drm_framebuffer_unreference(&intel_fb->base);
143 intel_fb = ifbdev->fb = NULL;
144 }
135 if (!intel_fb || WARN_ON(!intel_fb->obj)) { 145 if (!intel_fb || WARN_ON(!intel_fb->obj)) {
136 DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); 146 DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
137 ret = intelfb_alloc(helper, sizes); 147 ret = intelfb_alloc(helper, sizes);
@@ -377,6 +387,15 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
377 height); 387 height);
378 } 388 }
379 389
390 /* No preferred mode marked by the EDID? Are there any modes? */
391 if (!modes[i] && !list_empty(&connector->modes)) {
392 DRM_DEBUG_KMS("using first mode listed on connector %s\n",
393 drm_get_connector_name(connector));
394 modes[i] = list_first_entry(&connector->modes,
395 struct drm_display_mode,
396 head);
397 }
398
380 /* last resort: use current mode */ 399 /* last resort: use current mode */
381 if (!modes[i]) { 400 if (!modes[i]) {
382 /* 401 /*
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index b0413e190625..157267aa3561 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -821,11 +821,11 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
821 } 821 }
822} 822}
823 823
824static int hdmi_portclock_limit(struct intel_hdmi *hdmi) 824static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
825{ 825{
826 struct drm_device *dev = intel_hdmi_to_dev(hdmi); 826 struct drm_device *dev = intel_hdmi_to_dev(hdmi);
827 827
828 if (!hdmi->has_hdmi_sink || IS_G4X(dev)) 828 if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev))
829 return 165000; 829 return 165000;
830 else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) 830 else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
831 return 300000; 831 return 300000;
@@ -837,7 +837,8 @@ static enum drm_mode_status
837intel_hdmi_mode_valid(struct drm_connector *connector, 837intel_hdmi_mode_valid(struct drm_connector *connector,
838 struct drm_display_mode *mode) 838 struct drm_display_mode *mode)
839{ 839{
840 if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector))) 840 if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
841 true))
841 return MODE_CLOCK_HIGH; 842 return MODE_CLOCK_HIGH;
842 if (mode->clock < 20000) 843 if (mode->clock < 20000)
843 return MODE_CLOCK_LOW; 844 return MODE_CLOCK_LOW;
@@ -879,7 +880,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
879 struct drm_device *dev = encoder->base.dev; 880 struct drm_device *dev = encoder->base.dev;
880 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 881 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
881 int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2; 882 int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
882 int portclock_limit = hdmi_portclock_limit(intel_hdmi); 883 int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
883 int desired_bpp; 884 int desired_bpp;
884 885
885 if (intel_hdmi->color_range_auto) { 886 if (intel_hdmi->color_range_auto) {
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d8adc9104dca..129db0c7d835 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -193,7 +193,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
193 struct overlay_registers __iomem *regs; 193 struct overlay_registers __iomem *regs;
194 194
195 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 195 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
196 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; 196 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
197 else 197 else
198 regs = io_mapping_map_wc(dev_priv->gtt.mappable, 198 regs = io_mapping_map_wc(dev_priv->gtt.mappable,
199 i915_gem_obj_ggtt_offset(overlay->reg_bo)); 199 i915_gem_obj_ggtt_offset(overlay->reg_bo));
@@ -1340,14 +1340,12 @@ void intel_setup_overlay(struct drm_device *dev)
1340 overlay->reg_bo = reg_bo; 1340 overlay->reg_bo = reg_bo;
1341 1341
1342 if (OVERLAY_NEEDS_PHYSICAL(dev)) { 1342 if (OVERLAY_NEEDS_PHYSICAL(dev)) {
1343 ret = i915_gem_attach_phys_object(dev, reg_bo, 1343 ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
1344 I915_GEM_PHYS_OVERLAY_REGS,
1345 PAGE_SIZE);
1346 if (ret) { 1344 if (ret) {
1347 DRM_ERROR("failed to attach phys overlay regs\n"); 1345 DRM_ERROR("failed to attach phys overlay regs\n");
1348 goto out_free_bo; 1346 goto out_free_bo;
1349 } 1347 }
1350 overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; 1348 overlay->flip_addr = reg_bo->phys_handle->busaddr;
1351 } else { 1349 } else {
1352 ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE); 1350 ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
1353 if (ret) { 1351 if (ret) {
@@ -1428,7 +1426,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1428 /* Cast to make sparse happy, but it's wc memory anyway, so 1426 /* Cast to make sparse happy, but it's wc memory anyway, so
1429 * equivalent to the wc io mapping on X86. */ 1427 * equivalent to the wc io mapping on X86. */
1430 regs = (struct overlay_registers __iomem *) 1428 regs = (struct overlay_registers __iomem *)
1431 overlay->reg_bo->phys_obj->handle->vaddr; 1429 overlay->reg_bo->phys_handle->vaddr;
1432 else 1430 else
1433 regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, 1431 regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1434 i915_gem_obj_ggtt_offset(overlay->reg_bo)); 1432 i915_gem_obj_ggtt_offset(overlay->reg_bo));
@@ -1462,7 +1460,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
1462 error->dovsta = I915_READ(DOVSTA); 1460 error->dovsta = I915_READ(DOVSTA);
1463 error->isr = I915_READ(ISR); 1461 error->isr = I915_READ(ISR);
1464 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1462 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1465 error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; 1463 error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
1466 else 1464 else
1467 error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo); 1465 error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
1468 1466
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index cb058408c70e..cb8cfb7e0974 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -492,6 +492,7 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
492 enum pipe pipe = intel_get_pipe_from_connector(connector); 492 enum pipe pipe = intel_get_pipe_from_connector(connector);
493 u32 freq; 493 u32 freq;
494 unsigned long flags; 494 unsigned long flags;
495 u64 n;
495 496
496 if (!panel->backlight.present || pipe == INVALID_PIPE) 497 if (!panel->backlight.present || pipe == INVALID_PIPE)
497 return; 498 return;
@@ -502,10 +503,9 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
502 503
503 /* scale to hardware max, but be careful to not overflow */ 504 /* scale to hardware max, but be careful to not overflow */
504 freq = panel->backlight.max; 505 freq = panel->backlight.max;
505 if (freq < max) 506 n = (u64)level * freq;
506 level = level * freq / max; 507 do_div(n, max);
507 else 508 level = n;
508 level = freq / max * level;
509 509
510 panel->backlight.level = level; 510 panel->backlight.level = level;
511 if (panel->backlight.device) 511 if (panel->backlight.device)
@@ -1065,6 +1065,11 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
1065 unsigned long flags; 1065 unsigned long flags;
1066 int ret; 1066 int ret;
1067 1067
1068 if (!dev_priv->vbt.backlight.present) {
1069 DRM_DEBUG_KMS("native backlight control not available per VBT\n");
1070 return 0;
1071 }
1072
1068 /* set level and max in panel struct */ 1073 /* set level and max in panel struct */
1069 spin_lock_irqsave(&dev_priv->backlight_lock, flags); 1074 spin_lock_irqsave(&dev_priv->backlight_lock, flags);
1070 ret = dev_priv->display.setup_backlight(intel_connector); 1075 ret = dev_priv->display.setup_backlight(intel_connector);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 5874716774a7..d93dcf683e8c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1545,6 +1545,16 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1545 1545
1546 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); 1546 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1547 1547
1548 if (IS_I915GM(dev) && enabled) {
1549 struct intel_framebuffer *fb;
1550
1551 fb = to_intel_framebuffer(enabled->primary->fb);
1552
1553 /* self-refresh seems busted with untiled */
1554 if (fb->obj->tiling_mode == I915_TILING_NONE)
1555 enabled = NULL;
1556 }
1557
1548 /* 1558 /*
1549 * Overlay gets an aggressive default since video jitter is bad. 1559 * Overlay gets an aggressive default since video jitter is bad.
1550 */ 1560 */
@@ -2085,6 +2095,43 @@ static void intel_print_wm_latency(struct drm_device *dev,
2085 } 2095 }
2086} 2096}
2087 2097
2098static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2099 uint16_t wm[5], uint16_t min)
2100{
2101 int level, max_level = ilk_wm_max_level(dev_priv->dev);
2102
2103 if (wm[0] >= min)
2104 return false;
2105
2106 wm[0] = max(wm[0], min);
2107 for (level = 1; level <= max_level; level++)
2108 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2109
2110 return true;
2111}
2112
2113static void snb_wm_latency_quirk(struct drm_device *dev)
2114{
2115 struct drm_i915_private *dev_priv = dev->dev_private;
2116 bool changed;
2117
2118 /*
2119 * The BIOS provided WM memory latency values are often
2120 * inadequate for high resolution displays. Adjust them.
2121 */
2122 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2123 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2124 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2125
2126 if (!changed)
2127 return;
2128
2129 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2130 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2131 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2132 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2133}
2134
2088static void ilk_setup_wm_latency(struct drm_device *dev) 2135static void ilk_setup_wm_latency(struct drm_device *dev)
2089{ 2136{
2090 struct drm_i915_private *dev_priv = dev->dev_private; 2137 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2102,6 +2149,9 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
2102 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); 2149 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2103 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); 2150 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2104 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); 2151 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2152
2153 if (IS_GEN6(dev))
2154 snb_wm_latency_quirk(dev);
2105} 2155}
2106 2156
2107static void ilk_compute_wm_parameters(struct drm_crtc *crtc, 2157static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 6bc68bdcf433..79fb4cc2137c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -437,32 +437,41 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
437 I915_WRITE(HWS_PGA, addr); 437 I915_WRITE(HWS_PGA, addr);
438} 438}
439 439
440static int init_ring_common(struct intel_ring_buffer *ring) 440static bool stop_ring(struct intel_ring_buffer *ring)
441{ 441{
442 struct drm_device *dev = ring->dev; 442 struct drm_i915_private *dev_priv = to_i915(ring->dev);
443 struct drm_i915_private *dev_priv = dev->dev_private;
444 struct drm_i915_gem_object *obj = ring->obj;
445 int ret = 0;
446 u32 head;
447 443
448 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 444 if (!IS_GEN2(ring->dev)) {
445 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
446 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
447 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
448 return false;
449 }
450 }
449 451
450 /* Stop the ring if it's running. */
451 I915_WRITE_CTL(ring, 0); 452 I915_WRITE_CTL(ring, 0);
452 I915_WRITE_HEAD(ring, 0); 453 I915_WRITE_HEAD(ring, 0);
453 ring->write_tail(ring, 0); 454 ring->write_tail(ring, 0);
454 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000))
455 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
456 455
457 if (I915_NEED_GFX_HWS(dev)) 456 if (!IS_GEN2(ring->dev)) {
458 intel_ring_setup_status_page(ring); 457 (void)I915_READ_CTL(ring);
459 else 458 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
460 ring_setup_phys_status_page(ring); 459 }
461 460
462 head = I915_READ_HEAD(ring) & HEAD_ADDR; 461 return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
462}
463 463
464 /* G45 ring initialization fails to reset head to zero */ 464static int init_ring_common(struct intel_ring_buffer *ring)
465 if (head != 0) { 465{
466 struct drm_device *dev = ring->dev;
467 struct drm_i915_private *dev_priv = dev->dev_private;
468 struct drm_i915_gem_object *obj = ring->obj;
469 int ret = 0;
470
471 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
472
473 if (!stop_ring(ring)) {
474 /* G45 ring initialization often fails to reset head to zero */
466 DRM_DEBUG_KMS("%s head not reset to zero " 475 DRM_DEBUG_KMS("%s head not reset to zero "
467 "ctl %08x head %08x tail %08x start %08x\n", 476 "ctl %08x head %08x tail %08x start %08x\n",
468 ring->name, 477 ring->name,
@@ -471,9 +480,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
471 I915_READ_TAIL(ring), 480 I915_READ_TAIL(ring),
472 I915_READ_START(ring)); 481 I915_READ_START(ring));
473 482
474 I915_WRITE_HEAD(ring, 0); 483 if (!stop_ring(ring)) {
475
476 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
477 DRM_ERROR("failed to set %s head to zero " 484 DRM_ERROR("failed to set %s head to zero "
478 "ctl %08x head %08x tail %08x start %08x\n", 485 "ctl %08x head %08x tail %08x start %08x\n",
479 ring->name, 486 ring->name,
@@ -481,9 +488,16 @@ static int init_ring_common(struct intel_ring_buffer *ring)
481 I915_READ_HEAD(ring), 488 I915_READ_HEAD(ring),
482 I915_READ_TAIL(ring), 489 I915_READ_TAIL(ring),
483 I915_READ_START(ring)); 490 I915_READ_START(ring));
491 ret = -EIO;
492 goto out;
484 } 493 }
485 } 494 }
486 495
496 if (I915_NEED_GFX_HWS(dev))
497 intel_ring_setup_status_page(ring);
498 else
499 ring_setup_phys_status_page(ring);
500
487 /* Initialize the ring. This must happen _after_ we've cleared the ring 501 /* Initialize the ring. This must happen _after_ we've cleared the ring
488 * registers with the above sequence (the readback of the HEAD registers 502 * registers with the above sequence (the readback of the HEAD registers
489 * also enforces ordering), otherwise the hw might lose the new ring 503 * also enforces ordering), otherwise the hw might lose the new ring
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 270a6a973438..2b91c4b4d34b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -34,6 +34,7 @@ struct intel_hw_status_page {
34#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) 34#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
35 35
36#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) 36#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
37#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
37 38
38enum intel_ring_hangcheck_action { 39enum intel_ring_hangcheck_action {
39 HANGCHECK_IDLE = 0, 40 HANGCHECK_IDLE = 0,
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d27155adf5db..46be00d66df3 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2424,8 +2424,8 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
2424 if (ret < 0) 2424 if (ret < 0)
2425 goto err1; 2425 goto err1;
2426 2426
2427 ret = sysfs_create_link(&encoder->ddc.dev.kobj, 2427 ret = sysfs_create_link(&drm_connector->kdev->kobj,
2428 &drm_connector->kdev->kobj, 2428 &encoder->ddc.dev.kobj,
2429 encoder->ddc.dev.kobj.name); 2429 encoder->ddc.dev.kobj.name);
2430 if (ret < 0) 2430 if (ret < 0)
2431 goto err2; 2431 goto err2;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index f729dc71d5be..d0c75779d3f6 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -185,6 +185,8 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
185{ 185{
186 __raw_i915_write32(dev_priv, FORCEWAKE_VLV, 186 __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
187 _MASKED_BIT_DISABLE(0xffff)); 187 _MASKED_BIT_DISABLE(0xffff));
188 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
189 _MASKED_BIT_DISABLE(0xffff));
188 /* something from same cacheline, but !FORCEWAKE_VLV */ 190 /* something from same cacheline, but !FORCEWAKE_VLV */
189 __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); 191 __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
190} 192}