aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h1
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c7
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c20
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c7
-rw-r--r--drivers/gpu/drm/drm_edid.c8
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/drm_stub.c6
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c80
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c22
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c10
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h17
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c50
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c28
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c1
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c17
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h92
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c4
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c29
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h16
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_display.c156
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h6
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1051
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c23
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/core/subdev.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/instmem.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c87
-rw-r--r--drivers/gpu/drm/radeon/cik.c12
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c2
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c4
-rw-r--r--drivers/gpu/drm/radeon/ni.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/rs690.c10
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c6
64 files changed, 937 insertions, 1149 deletions
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index eef09ec9a5ff..a72cae03b99b 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -103,6 +103,7 @@ void armada_drm_queue_unref_work(struct drm_device *,
103extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs; 103extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
104 104
105int armada_fbdev_init(struct drm_device *); 105int armada_fbdev_init(struct drm_device *);
106void armada_fbdev_lastclose(struct drm_device *);
106void armada_fbdev_fini(struct drm_device *); 107void armada_fbdev_fini(struct drm_device *);
107 108
108int armada_overlay_plane_create(struct drm_device *, unsigned long); 109int armada_overlay_plane_create(struct drm_device *, unsigned long);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 069f64533ac3..acf3a36c9ebc 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -322,6 +322,11 @@ static struct drm_ioctl_desc armada_ioctls[] = {
322 DRM_UNLOCKED), 322 DRM_UNLOCKED),
323}; 323};
324 324
325static void armada_drm_lastclose(struct drm_device *dev)
326{
327 armada_fbdev_lastclose(dev);
328}
329
325static const struct file_operations armada_drm_fops = { 330static const struct file_operations armada_drm_fops = {
326 .owner = THIS_MODULE, 331 .owner = THIS_MODULE,
327 .llseek = no_llseek, 332 .llseek = no_llseek,
@@ -338,7 +343,7 @@ static struct drm_driver armada_drm_driver = {
338 .open = NULL, 343 .open = NULL,
339 .preclose = NULL, 344 .preclose = NULL,
340 .postclose = NULL, 345 .postclose = NULL,
341 .lastclose = NULL, 346 .lastclose = armada_drm_lastclose,
342 .unload = armada_drm_unload, 347 .unload = armada_drm_unload,
343 .get_vblank_counter = drm_vblank_count, 348 .get_vblank_counter = drm_vblank_count,
344 .enable_vblank = armada_drm_enable_vblank, 349 .enable_vblank = armada_drm_enable_vblank,
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index dd5ea77dac96..948cb14c561e 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -105,9 +105,9 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
105 drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth); 105 drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
106 drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height); 106 drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
107 107
108 DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n", 108 DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n",
109 dfb->fb.width, dfb->fb.height, 109 dfb->fb.width, dfb->fb.height, dfb->fb.bits_per_pixel,
110 dfb->fb.bits_per_pixel, obj->phys_addr); 110 (unsigned long long)obj->phys_addr);
111 111
112 return 0; 112 return 0;
113 113
@@ -177,6 +177,16 @@ int armada_fbdev_init(struct drm_device *dev)
177 return ret; 177 return ret;
178} 178}
179 179
180void armada_fbdev_lastclose(struct drm_device *dev)
181{
182 struct armada_private *priv = dev->dev_private;
183
184 drm_modeset_lock_all(dev);
185 if (priv->fbdev)
186 drm_fb_helper_restore_fbdev_mode(priv->fbdev);
187 drm_modeset_unlock_all(dev);
188}
189
180void armada_fbdev_fini(struct drm_device *dev) 190void armada_fbdev_fini(struct drm_device *dev)
181{ 191{
182 struct armada_private *priv = dev->dev_private; 192 struct armada_private *priv = dev->dev_private;
@@ -192,11 +202,11 @@ void armada_fbdev_fini(struct drm_device *dev)
192 framebuffer_release(info); 202 framebuffer_release(info);
193 } 203 }
194 204
205 drm_fb_helper_fini(fbh);
206
195 if (fbh->fb) 207 if (fbh->fb)
196 fbh->fb->funcs->destroy(fbh->fb); 208 fbh->fb->funcs->destroy(fbh->fb);
197 209
198 drm_fb_helper_fini(fbh);
199
200 priv->fbdev = NULL; 210 priv->fbdev = NULL;
201 } 211 }
202} 212}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 9f2356bae7fd..887816f43476 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -172,8 +172,9 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
172 obj->dev_addr = obj->linear->start; 172 obj->dev_addr = obj->linear->start;
173 } 173 }
174 174
175 DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n", 175 DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
176 obj, obj->phys_addr, obj->dev_addr); 176 (unsigned long long)obj->phys_addr,
177 (unsigned long long)obj->dev_addr);
177 178
178 return 0; 179 return 0;
179} 180}
@@ -557,7 +558,6 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
557 * refcount on the gem object itself. 558 * refcount on the gem object itself.
558 */ 559 */
559 drm_gem_object_reference(obj); 560 drm_gem_object_reference(obj);
560 dma_buf_put(buf);
561 return obj; 561 return obj;
562 } 562 }
563 } 563 }
@@ -573,6 +573,7 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
573 } 573 }
574 574
575 dobj->obj.import_attach = attach; 575 dobj->obj.import_attach = attach;
576 get_dma_buf(buf);
576 577
577 /* 578 /*
578 * Don't call dma_buf_map_attachment() here - it maps the 579 * Don't call dma_buf_map_attachment() here - it maps the
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 0a1e4a5f4234..8835dcddfac3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -68,6 +68,8 @@
68#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) 68#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
69/* Force reduced-blanking timings for detailed modes */ 69/* Force reduced-blanking timings for detailed modes */
70#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7) 70#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
71/* Force 8bpc */
72#define EDID_QUIRK_FORCE_8BPC (1 << 8)
71 73
72struct detailed_mode_closure { 74struct detailed_mode_closure {
73 struct drm_connector *connector; 75 struct drm_connector *connector;
@@ -128,6 +130,9 @@ static struct edid_quirk {
128 130
129 /* Medion MD 30217 PG */ 131 /* Medion MD 30217 PG */
130 { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, 132 { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
133
134 /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
135 { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
131}; 136};
132 137
133/* 138/*
@@ -3435,6 +3440,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
3435 3440
3436 drm_add_display_info(edid, &connector->display_info); 3441 drm_add_display_info(edid, &connector->display_info);
3437 3442
3443 if (quirks & EDID_QUIRK_FORCE_8BPC)
3444 connector->display_info.bpc = 8;
3445
3438 return num_modes; 3446 return num_modes;
3439} 3447}
3440EXPORT_SYMBOL(drm_add_edid_modes); 3448EXPORT_SYMBOL(drm_add_edid_modes);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 85071a1c4547..b0733153dfd2 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1041,7 +1041,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
1041 /* if equal delete the probed mode */ 1041 /* if equal delete the probed mode */
1042 mode->status = pmode->status; 1042 mode->status = pmode->status;
1043 /* Merge type bits together */ 1043 /* Merge type bits together */
1044 mode->type = pmode->type; 1044 mode->type |= pmode->type;
1045 list_del(&pmode->head); 1045 list_del(&pmode->head);
1046 drm_mode_destroy(connector->dev, pmode); 1046 drm_mode_destroy(connector->dev, pmode);
1047 break; 1047 break;
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index a4a5c6ac110a..98a33c580ca1 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -564,11 +564,11 @@ err_unload:
564 if (dev->driver->unload) 564 if (dev->driver->unload)
565 dev->driver->unload(dev); 565 dev->driver->unload(dev);
566err_primary_node: 566err_primary_node:
567 drm_put_minor(dev->primary); 567 drm_unplug_minor(dev->primary);
568err_render_node: 568err_render_node:
569 drm_put_minor(dev->render); 569 drm_unplug_minor(dev->render);
570err_control_node: 570err_control_node:
571 drm_put_minor(dev->control); 571 drm_unplug_minor(dev->control);
572out_unlock: 572out_unlock:
573 mutex_unlock(&drm_global_mutex); 573 mutex_unlock(&drm_global_mutex);
574 return ret; 574 return ret;
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 41838eaa799c..da682cbcb806 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -4,7 +4,6 @@
4 4
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6i915-y := i915_drv.o i915_dma.o i915_irq.o \ 6i915-y := i915_drv.o i915_dma.o i915_irq.o \
7 i915_debugfs.o \
8 i915_gpu_error.o \ 7 i915_gpu_error.o \
9 i915_suspend.o \ 8 i915_suspend.o \
10 i915_gem.o \ 9 i915_gem.o \
@@ -55,6 +54,8 @@ i915-$(CONFIG_ACPI) += intel_acpi.o
55 54
56i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o 55i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
57 56
57i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
58
58obj-$(CONFIG_DRM_I915) += i915.o 59obj-$(CONFIG_DRM_I915) += i915.o
59 60
60CFLAGS_i915_trace_points.o := -I$(src) 61CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 6badc1596ceb..b2b46c52294c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -40,8 +40,6 @@
40#include <drm/i915_drm.h> 40#include <drm/i915_drm.h>
41#include "i915_drv.h" 41#include "i915_drv.h"
42 42
43#if defined(CONFIG_DEBUG_FS)
44
45enum { 43enum {
46 ACTIVE_LIST, 44 ACTIVE_LIST,
47 INACTIVE_LIST, 45 INACTIVE_LIST,
@@ -406,16 +404,26 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
406 seq_putc(m, '\n'); 404 seq_putc(m, '\n');
407 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 405 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
408 struct file_stats stats; 406 struct file_stats stats;
407 struct task_struct *task;
409 408
410 memset(&stats, 0, sizeof(stats)); 409 memset(&stats, 0, sizeof(stats));
411 idr_for_each(&file->object_idr, per_file_stats, &stats); 410 idr_for_each(&file->object_idr, per_file_stats, &stats);
411 /*
412 * Although we have a valid reference on file->pid, that does
413 * not guarantee that the task_struct who called get_pid() is
414 * still alive (e.g. get_pid(current) => fork() => exit()).
415 * Therefore, we need to protect this ->comm access using RCU.
416 */
417 rcu_read_lock();
418 task = pid_task(file->pid, PIDTYPE_PID);
412 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n", 419 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
413 get_pid_task(file->pid, PIDTYPE_PID)->comm, 420 task ? task->comm : "<unknown>",
414 stats.count, 421 stats.count,
415 stats.total, 422 stats.total,
416 stats.active, 423 stats.active,
417 stats.inactive, 424 stats.inactive,
418 stats.unbound); 425 stats.unbound);
426 rcu_read_unlock();
419 } 427 }
420 428
421 mutex_unlock(&dev->struct_mutex); 429 mutex_unlock(&dev->struct_mutex);
@@ -1170,6 +1178,50 @@ static int ironlake_drpc_info(struct seq_file *m)
1170 return 0; 1178 return 0;
1171} 1179}
1172 1180
1181static int vlv_drpc_info(struct seq_file *m)
1182{
1183
1184 struct drm_info_node *node = (struct drm_info_node *) m->private;
1185 struct drm_device *dev = node->minor->dev;
1186 struct drm_i915_private *dev_priv = dev->dev_private;
1187 u32 rpmodectl1, rcctl1;
1188 unsigned fw_rendercount = 0, fw_mediacount = 0;
1189
1190 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1191 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1192
1193 seq_printf(m, "Video Turbo Mode: %s\n",
1194 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1195 seq_printf(m, "Turbo enabled: %s\n",
1196 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1197 seq_printf(m, "HW control enabled: %s\n",
1198 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1199 seq_printf(m, "SW control enabled: %s\n",
1200 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1201 GEN6_RP_MEDIA_SW_MODE));
1202 seq_printf(m, "RC6 Enabled: %s\n",
1203 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1204 GEN6_RC_CTL_EI_MODE(1))));
1205 seq_printf(m, "Render Power Well: %s\n",
1206 (I915_READ(VLV_GTLC_PW_STATUS) &
1207 VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1208 seq_printf(m, "Media Power Well: %s\n",
1209 (I915_READ(VLV_GTLC_PW_STATUS) &
1210 VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1211
1212 spin_lock_irq(&dev_priv->uncore.lock);
1213 fw_rendercount = dev_priv->uncore.fw_rendercount;
1214 fw_mediacount = dev_priv->uncore.fw_mediacount;
1215 spin_unlock_irq(&dev_priv->uncore.lock);
1216
1217 seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount);
1218 seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount);
1219
1220
1221 return 0;
1222}
1223
1224
1173static int gen6_drpc_info(struct seq_file *m) 1225static int gen6_drpc_info(struct seq_file *m)
1174{ 1226{
1175 1227
@@ -1275,7 +1327,9 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
1275 struct drm_info_node *node = (struct drm_info_node *) m->private; 1327 struct drm_info_node *node = (struct drm_info_node *) m->private;
1276 struct drm_device *dev = node->minor->dev; 1328 struct drm_device *dev = node->minor->dev;
1277 1329
1278 if (IS_GEN6(dev) || IS_GEN7(dev)) 1330 if (IS_VALLEYVIEW(dev))
1331 return vlv_drpc_info(m);
1332 else if (IS_GEN6(dev) || IS_GEN7(dev))
1279 return gen6_drpc_info(m); 1333 return gen6_drpc_info(m);
1280 else 1334 else
1281 return ironlake_drpc_info(m); 1335 return ironlake_drpc_info(m);
@@ -1287,7 +1341,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1287 struct drm_device *dev = node->minor->dev; 1341 struct drm_device *dev = node->minor->dev;
1288 drm_i915_private_t *dev_priv = dev->dev_private; 1342 drm_i915_private_t *dev_priv = dev->dev_private;
1289 1343
1290 if (!I915_HAS_FBC(dev)) { 1344 if (!HAS_FBC(dev)) {
1291 seq_puts(m, "FBC unsupported on this chipset\n"); 1345 seq_puts(m, "FBC unsupported on this chipset\n");
1292 return 0; 1346 return 0;
1293 } 1347 }
@@ -1349,7 +1403,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
1349 return 0; 1403 return 0;
1350 } 1404 }
1351 1405
1352 if (I915_READ(IPS_CTL) & IPS_ENABLE) 1406 if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE)
1353 seq_puts(m, "enabled\n"); 1407 seq_puts(m, "enabled\n");
1354 else 1408 else
1355 seq_puts(m, "disabled\n"); 1409 seq_puts(m, "disabled\n");
@@ -2117,8 +2171,8 @@ static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
2117 info->dev = dev; 2171 info->dev = dev;
2118 ent = debugfs_create_file(info->name, S_IRUGO, root, info, 2172 ent = debugfs_create_file(info->name, S_IRUGO, root, info,
2119 &i915_pipe_crc_fops); 2173 &i915_pipe_crc_fops);
2120 if (IS_ERR(ent)) 2174 if (!ent)
2121 return PTR_ERR(ent); 2175 return -ENOMEM;
2122 2176
2123 return drm_add_fake_info_node(minor, ent, info); 2177 return drm_add_fake_info_node(minor, ent, info);
2124} 2178}
@@ -3133,8 +3187,8 @@ static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
3133 S_IRUSR, 3187 S_IRUSR,
3134 root, dev, 3188 root, dev,
3135 &i915_forcewake_fops); 3189 &i915_forcewake_fops);
3136 if (IS_ERR(ent)) 3190 if (!ent)
3137 return PTR_ERR(ent); 3191 return -ENOMEM;
3138 3192
3139 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 3193 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
3140} 3194}
@@ -3151,8 +3205,8 @@ static int i915_debugfs_create(struct dentry *root,
3151 S_IRUGO | S_IWUSR, 3205 S_IRUGO | S_IWUSR,
3152 root, dev, 3206 root, dev,
3153 fops); 3207 fops);
3154 if (IS_ERR(ent)) 3208 if (!ent)
3155 return PTR_ERR(ent); 3209 return -ENOMEM;
3156 3210
3157 return drm_add_fake_info_node(minor, ent, fops); 3211 return drm_add_fake_info_node(minor, ent, fops);
3158} 3212}
@@ -3282,5 +3336,3 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
3282 drm_debugfs_remove_files(info_list, 1, minor); 3336 drm_debugfs_remove_files(info_list, 1, minor);
3283 } 3337 }
3284} 3338}
3285
3286#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 750918c779c8..e177d021c444 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -85,6 +85,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev)
85 drm_i915_private_t *dev_priv = dev->dev_private; 85 drm_i915_private_t *dev_priv = dev->dev_private;
86 struct drm_i915_master_private *master_priv; 86 struct drm_i915_master_private *master_priv;
87 87
88 /*
89 * The dri breadcrumb update races against the drm master disappearing.
90 * Instead of trying to fix this (this is by far not the only ums issue)
91 * just don't do the update in kms mode.
92 */
93 if (drm_core_check_feature(dev, DRIVER_MODESET))
94 return;
95
88 if (dev->primary->master) { 96 if (dev->primary->master) {
89 master_priv = dev->primary->master->driver_priv; 97 master_priv = dev->primary->master->driver_priv;
90 if (master_priv->sarea_priv) 98 if (master_priv->sarea_priv)
@@ -1405,7 +1413,7 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1405 master->driver_priv = NULL; 1413 master->driver_priv = NULL;
1406} 1414}
1407 1415
1408#ifdef CONFIG_DRM_I915_FBDEV 1416#if IS_ENABLED(CONFIG_FB)
1409static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) 1417static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1410{ 1418{
1411 struct apertures_struct *ap; 1419 struct apertures_struct *ap;
@@ -1496,16 +1504,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1496 spin_lock_init(&dev_priv->uncore.lock); 1504 spin_lock_init(&dev_priv->uncore.lock);
1497 spin_lock_init(&dev_priv->mm.object_stat_lock); 1505 spin_lock_init(&dev_priv->mm.object_stat_lock);
1498 mutex_init(&dev_priv->dpio_lock); 1506 mutex_init(&dev_priv->dpio_lock);
1499 mutex_init(&dev_priv->rps.hw_lock);
1500 mutex_init(&dev_priv->modeset_restore_lock); 1507 mutex_init(&dev_priv->modeset_restore_lock);
1501 1508
1502 mutex_init(&dev_priv->pc8.lock); 1509 intel_pm_setup(dev);
1503 dev_priv->pc8.requirements_met = false;
1504 dev_priv->pc8.gpu_idle = false;
1505 dev_priv->pc8.irqs_disabled = false;
1506 dev_priv->pc8.enabled = false;
1507 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
1508 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
1509 1510
1510 intel_display_crc_init(dev); 1511 intel_display_crc_init(dev);
1511 1512
@@ -1609,7 +1610,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1609 } 1610 }
1610 1611
1611 intel_irq_init(dev); 1612 intel_irq_init(dev);
1612 intel_pm_init(dev);
1613 intel_uncore_sanitize(dev); 1613 intel_uncore_sanitize(dev);
1614 1614
1615 /* Try to make sure MCHBAR is enabled before poking at it */ 1615 /* Try to make sure MCHBAR is enabled before poking at it */
@@ -1855,8 +1855,10 @@ void i915_driver_lastclose(struct drm_device * dev)
1855 1855
1856void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 1856void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1857{ 1857{
1858 mutex_lock(&dev->struct_mutex);
1858 i915_gem_context_close(dev, file_priv); 1859 i915_gem_context_close(dev, file_priv);
1859 i915_gem_release(dev, file_priv); 1860 i915_gem_release(dev, file_priv);
1861 mutex_unlock(&dev->struct_mutex);
1860} 1862}
1861 1863
1862void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 1864void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 74516930de7a..43245b3fd2a2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -480,12 +480,12 @@ check_next:
480bool i915_semaphore_is_enabled(struct drm_device *dev) 480bool i915_semaphore_is_enabled(struct drm_device *dev)
481{ 481{
482 if (INTEL_INFO(dev)->gen < 6) 482 if (INTEL_INFO(dev)->gen < 6)
483 return 0; 483 return false;
484 484
485 /* Until we get further testing... */ 485 /* Until we get further testing... */
486 if (IS_GEN8(dev)) { 486 if (IS_GEN8(dev)) {
487 WARN_ON(!i915_preliminary_hw_support); 487 WARN_ON(!i915_preliminary_hw_support);
488 return 0; 488 return false;
489 } 489 }
490 490
491 if (i915_semaphores >= 0) 491 if (i915_semaphores >= 0)
@@ -497,7 +497,7 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
497 return false; 497 return false;
498#endif 498#endif
499 499
500 return 1; 500 return true;
501} 501}
502 502
503static int i915_drm_freeze(struct drm_device *dev) 503static int i915_drm_freeze(struct drm_device *dev)
@@ -657,6 +657,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
657 intel_modeset_init_hw(dev); 657 intel_modeset_init_hw(dev);
658 658
659 drm_modeset_lock_all(dev); 659 drm_modeset_lock_all(dev);
660 drm_mode_config_reset(dev);
660 intel_modeset_setup_hw_state(dev, true); 661 intel_modeset_setup_hw_state(dev, true);
661 drm_modeset_unlock_all(dev); 662 drm_modeset_unlock_all(dev);
662 663
@@ -919,6 +920,9 @@ static int i915_runtime_suspend(struct device *device)
919 920
920 DRM_DEBUG_KMS("Suspending device\n"); 921 DRM_DEBUG_KMS("Suspending device\n");
921 922
923 i915_gem_release_all_mmaps(dev_priv);
924
925 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
922 dev_priv->pm.suspended = true; 926 dev_priv->pm.suspended = true;
923 intel_opregion_notify_adapter(dev, PCI_D3cold); 927 intel_opregion_notify_adapter(dev, PCI_D3cold);
924 928
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ae2c80c1981b..ff6f870d6621 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1184,6 +1184,11 @@ struct intel_vbt_data {
1184 int edp_bpp; 1184 int edp_bpp;
1185 struct edp_power_seq edp_pps; 1185 struct edp_power_seq edp_pps;
1186 1186
1187 struct {
1188 u16 pwm_freq_hz;
1189 bool active_low_pwm;
1190 } backlight;
1191
1187 /* MIPI DSI */ 1192 /* MIPI DSI */
1188 struct { 1193 struct {
1189 u16 panel_id; 1194 u16 panel_id;
@@ -1210,7 +1215,7 @@ struct intel_wm_level {
1210 uint32_t fbc_val; 1215 uint32_t fbc_val;
1211}; 1216};
1212 1217
1213struct hsw_wm_values { 1218struct ilk_wm_values {
1214 uint32_t wm_pipe[3]; 1219 uint32_t wm_pipe[3];
1215 uint32_t wm_lp[3]; 1220 uint32_t wm_lp[3];
1216 uint32_t wm_lp_spr[3]; 1221 uint32_t wm_lp_spr[3];
@@ -1396,7 +1401,6 @@ typedef struct drm_i915_private {
1396 1401
1397 /* overlay */ 1402 /* overlay */
1398 struct intel_overlay *overlay; 1403 struct intel_overlay *overlay;
1399 unsigned int sprite_scaling_enabled;
1400 1404
1401 /* backlight registers and fields in struct intel_panel */ 1405 /* backlight registers and fields in struct intel_panel */
1402 spinlock_t backlight_lock; 1406 spinlock_t backlight_lock;
@@ -1517,7 +1521,7 @@ typedef struct drm_i915_private {
1517 uint16_t cur_latency[5]; 1521 uint16_t cur_latency[5];
1518 1522
1519 /* current hardware state */ 1523 /* current hardware state */
1520 struct hsw_wm_values hw; 1524 struct ilk_wm_values hw;
1521 } wm; 1525 } wm;
1522 1526
1523 struct i915_package_c8 pc8; 1527 struct i915_package_c8 pc8;
@@ -1840,7 +1844,7 @@ struct drm_i915_file_private {
1840 1844
1841#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) 1845#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1842#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 1846#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1843#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1847#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1844 1848
1845#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev)) 1849#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev))
1846 1850
@@ -1848,7 +1852,7 @@ struct drm_i915_file_private {
1848#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 1852#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
1849#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1853#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
1850#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */ 1854#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */
1851#define HAS_RUNTIME_PM(dev) false 1855#define HAS_RUNTIME_PM(dev) (IS_HASWELL(dev))
1852 1856
1853#define INTEL_PCH_DEVICE_ID_MASK 0xff00 1857#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1854#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 1858#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -1933,9 +1937,7 @@ void i915_queue_hangcheck(struct drm_device *dev);
1933void i915_handle_error(struct drm_device *dev, bool wedged); 1937void i915_handle_error(struct drm_device *dev, bool wedged);
1934 1938
1935extern void intel_irq_init(struct drm_device *dev); 1939extern void intel_irq_init(struct drm_device *dev);
1936extern void intel_pm_init(struct drm_device *dev);
1937extern void intel_hpd_init(struct drm_device *dev); 1940extern void intel_hpd_init(struct drm_device *dev);
1938extern void intel_pm_init(struct drm_device *dev);
1939 1941
1940extern void intel_uncore_sanitize(struct drm_device *dev); 1942extern void intel_uncore_sanitize(struct drm_device *dev);
1941extern void intel_uncore_early_sanitize(struct drm_device *dev); 1943extern void intel_uncore_early_sanitize(struct drm_device *dev);
@@ -2015,6 +2017,7 @@ void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
2015int __must_check i915_vma_unbind(struct i915_vma *vma); 2017int __must_check i915_vma_unbind(struct i915_vma *vma);
2016int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj); 2018int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
2017int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2019int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
2020void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
2018void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 2021void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
2019void i915_gem_lastclose(struct drm_device *dev); 2022void i915_gem_lastclose(struct drm_device *dev);
2020 2023
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2be904c704e9..32636a470367 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1465,6 +1465,22 @@ out:
1465 return ret; 1465 return ret;
1466} 1466}
1467 1467
1468void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1469{
1470 struct i915_vma *vma;
1471
1472 /*
1473 * Only the global gtt is relevant for gtt memory mappings, so restrict
1474 * list traversal to objects bound into the global address space. Note
1475 * that the active list should be empty, but better safe than sorry.
1476 */
1477 WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
1478 list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
1479 i915_gem_release_mmap(vma->obj);
1480 list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
1481 i915_gem_release_mmap(vma->obj);
1482}
1483
1468/** 1484/**
1469 * i915_gem_release_mmap - remove physical page mappings 1485 * i915_gem_release_mmap - remove physical page mappings
1470 * @obj: obj in question 1486 * @obj: obj in question
@@ -2354,15 +2370,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
2354 kfree(request); 2370 kfree(request);
2355} 2371}
2356 2372
2357static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, 2373static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2358 struct intel_ring_buffer *ring) 2374 struct intel_ring_buffer *ring)
2359{ 2375{
2360 u32 completed_seqno; 2376 u32 completed_seqno = ring->get_seqno(ring, false);
2361 u32 acthd; 2377 u32 acthd = intel_ring_get_active_head(ring);
2378 struct drm_i915_gem_request *request;
2379
2380 list_for_each_entry(request, &ring->request_list, list) {
2381 if (i915_seqno_passed(completed_seqno, request->seqno))
2382 continue;
2362 2383
2363 acthd = intel_ring_get_active_head(ring); 2384 i915_set_reset_status(ring, request, acthd);
2364 completed_seqno = ring->get_seqno(ring, false); 2385 }
2386}
2365 2387
2388static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2389 struct intel_ring_buffer *ring)
2390{
2366 while (!list_empty(&ring->request_list)) { 2391 while (!list_empty(&ring->request_list)) {
2367 struct drm_i915_gem_request *request; 2392 struct drm_i915_gem_request *request;
2368 2393
@@ -2370,9 +2395,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2370 struct drm_i915_gem_request, 2395 struct drm_i915_gem_request,
2371 list); 2396 list);
2372 2397
2373 if (request->seqno > completed_seqno)
2374 i915_set_reset_status(ring, request, acthd);
2375
2376 i915_gem_free_request(request); 2398 i915_gem_free_request(request);
2377 } 2399 }
2378 2400
@@ -2414,8 +2436,16 @@ void i915_gem_reset(struct drm_device *dev)
2414 struct intel_ring_buffer *ring; 2436 struct intel_ring_buffer *ring;
2415 int i; 2437 int i;
2416 2438
2439 /*
2440 * Before we free the objects from the requests, we need to inspect
2441 * them for finding the guilty party. As the requests only borrow
2442 * their reference to the objects, the inspection must be done first.
2443 */
2444 for_each_ring(ring, dev_priv, i)
2445 i915_gem_reset_ring_status(dev_priv, ring);
2446
2417 for_each_ring(ring, dev_priv, i) 2447 for_each_ring(ring, dev_priv, i)
2418 i915_gem_reset_ring_lists(dev_priv, ring); 2448 i915_gem_reset_ring_cleanup(dev_priv, ring);
2419 2449
2420 i915_gem_cleanup_ringbuffer(dev); 2450 i915_gem_cleanup_ringbuffer(dev);
2421 2451
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 41877045a1a0..e08acaba5402 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -345,10 +345,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
345{ 345{
346 struct drm_i915_file_private *file_priv = file->driver_priv; 346 struct drm_i915_file_private *file_priv = file->driver_priv;
347 347
348 mutex_lock(&dev->struct_mutex);
349 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 348 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
350 idr_destroy(&file_priv->context_idr); 349 idr_destroy(&file_priv->context_idr);
351 mutex_unlock(&dev->struct_mutex);
352} 350}
353 351
354static struct i915_hw_context * 352static struct i915_hw_context *
@@ -421,11 +419,21 @@ static int do_switch(struct i915_hw_context *to)
421 if (ret) 419 if (ret)
422 return ret; 420 return ret;
423 421
424 /* Clear this page out of any CPU caches for coherent swap-in/out. Note 422 /*
423 * Pin can switch back to the default context if we end up calling into
424 * evict_everything - as a last ditch gtt defrag effort that also
425 * switches to the default context. Hence we need to reload from here.
426 */
427 from = ring->last_context;
428
429 /*
430 * Clear this page out of any CPU caches for coherent swap-in/out. Note
425 * that thanks to write = false in this call and us not setting any gpu 431 * that thanks to write = false in this call and us not setting any gpu
426 * write domains when putting a context object onto the active list 432 * write domains when putting a context object onto the active list
427 * (when switching away from it), this won't block. 433 * (when switching away from it), this won't block.
428 * XXX: We need a real interface to do this instead of trickery. */ 434 *
435 * XXX: We need a real interface to do this instead of trickery.
436 */
429 ret = i915_gem_object_set_to_gtt_domain(to->obj, false); 437 ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
430 if (ret) { 438 if (ret) {
431 i915_gem_object_unpin(to->obj); 439 i915_gem_object_unpin(to->obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index b7376533633d..8f3adc7d0dc8 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -88,6 +88,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
88 } else 88 } else
89 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); 89 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
90 90
91search_again:
91 /* First see if there is a large enough contiguous idle region... */ 92 /* First see if there is a large enough contiguous idle region... */
92 list_for_each_entry(vma, &vm->inactive_list, mm_list) { 93 list_for_each_entry(vma, &vm->inactive_list, mm_list) {
93 if (mark_free(vma, &unwind_list)) 94 if (mark_free(vma, &unwind_list))
@@ -115,10 +116,17 @@ none:
115 list_del_init(&vma->exec_list); 116 list_del_init(&vma->exec_list);
116 } 117 }
117 118
118 /* We expect the caller to unpin, evict all and try again, or give up. 119 /* Can we unpin some objects such as idle hw contents,
119 * So calling i915_gem_evict_vm() is unnecessary. 120 * or pending flips?
120 */ 121 */
121 return -ENOSPC; 122 ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev);
123 if (ret)
124 return ret;
125
126 /* Only idle the GPU and repeat the search once */
127 i915_gem_retire_requests(dev);
128 nonblocking = true;
129 goto search_again;
122 130
123found: 131found:
124 /* drm_mm doesn't allow any other other operations while 132 /* drm_mm doesn't allow any other other operations while
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 87652fafeb49..8d795626a25e 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -93,7 +93,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
93{ 93{
94 struct drm_i915_gem_object *obj; 94 struct drm_i915_gem_object *obj;
95 struct list_head objects; 95 struct list_head objects;
96 int i, ret = 0; 96 int i, ret;
97 97
98 INIT_LIST_HEAD(&objects); 98 INIT_LIST_HEAD(&objects);
99 spin_lock(&file->table_lock); 99 spin_lock(&file->table_lock);
@@ -106,7 +106,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
106 DRM_DEBUG("Invalid object handle %d at index %d\n", 106 DRM_DEBUG("Invalid object handle %d at index %d\n",
107 exec[i].handle, i); 107 exec[i].handle, i);
108 ret = -ENOENT; 108 ret = -ENOENT;
109 goto out; 109 goto err;
110 } 110 }
111 111
112 if (!list_empty(&obj->obj_exec_link)) { 112 if (!list_empty(&obj->obj_exec_link)) {
@@ -114,7 +114,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
114 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", 114 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
115 obj, exec[i].handle, i); 115 obj, exec[i].handle, i);
116 ret = -EINVAL; 116 ret = -EINVAL;
117 goto out; 117 goto err;
118 } 118 }
119 119
120 drm_gem_object_reference(&obj->base); 120 drm_gem_object_reference(&obj->base);
@@ -123,9 +123,13 @@ eb_lookup_vmas(struct eb_vmas *eb,
123 spin_unlock(&file->table_lock); 123 spin_unlock(&file->table_lock);
124 124
125 i = 0; 125 i = 0;
126 list_for_each_entry(obj, &objects, obj_exec_link) { 126 while (!list_empty(&objects)) {
127 struct i915_vma *vma; 127 struct i915_vma *vma;
128 128
129 obj = list_first_entry(&objects,
130 struct drm_i915_gem_object,
131 obj_exec_link);
132
129 /* 133 /*
130 * NOTE: We can leak any vmas created here when something fails 134 * NOTE: We can leak any vmas created here when something fails
131 * later on. But that's no issue since vma_unbind can deal with 135 * later on. But that's no issue since vma_unbind can deal with
@@ -138,10 +142,12 @@ eb_lookup_vmas(struct eb_vmas *eb,
138 if (IS_ERR(vma)) { 142 if (IS_ERR(vma)) {
139 DRM_DEBUG("Failed to lookup VMA\n"); 143 DRM_DEBUG("Failed to lookup VMA\n");
140 ret = PTR_ERR(vma); 144 ret = PTR_ERR(vma);
141 goto out; 145 goto err;
142 } 146 }
143 147
148 /* Transfer ownership from the objects list to the vmas list. */
144 list_add_tail(&vma->exec_list, &eb->vmas); 149 list_add_tail(&vma->exec_list, &eb->vmas);
150 list_del_init(&obj->obj_exec_link);
145 151
146 vma->exec_entry = &exec[i]; 152 vma->exec_entry = &exec[i];
147 if (eb->and < 0) { 153 if (eb->and < 0) {
@@ -155,16 +161,22 @@ eb_lookup_vmas(struct eb_vmas *eb,
155 ++i; 161 ++i;
156 } 162 }
157 163
164 return 0;
165
158 166
159out: 167err:
160 while (!list_empty(&objects)) { 168 while (!list_empty(&objects)) {
161 obj = list_first_entry(&objects, 169 obj = list_first_entry(&objects,
162 struct drm_i915_gem_object, 170 struct drm_i915_gem_object,
163 obj_exec_link); 171 obj_exec_link);
164 list_del_init(&obj->obj_exec_link); 172 list_del_init(&obj->obj_exec_link);
165 if (ret) 173 drm_gem_object_unreference(&obj->base);
166 drm_gem_object_unreference(&obj->base);
167 } 174 }
175 /*
176 * Objects already transfered to the vmas list will be unreferenced by
177 * eb_destroy.
178 */
179
168 return ret; 180 return ret;
169} 181}
170 182
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a54eaabb3a3e..6c3a6e60aeac 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -299,23 +299,23 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
299 unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE; 299 unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE;
300 struct sg_page_iter sg_iter; 300 struct sg_page_iter sg_iter;
301 301
302 pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]); 302 pt_vaddr = NULL;
303 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { 303 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
304 dma_addr_t page_addr; 304 if (pt_vaddr == NULL)
305 pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
305 306
306 page_addr = sg_dma_address(sg_iter.sg) + 307 pt_vaddr[act_pte] =
307 (sg_iter.sg_pgoffset << PAGE_SHIFT); 308 gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
308 pt_vaddr[act_pte] = gen8_pte_encode(page_addr, cache_level, 309 cache_level, true);
309 true);
310 if (++act_pte == GEN8_PTES_PER_PAGE) { 310 if (++act_pte == GEN8_PTES_PER_PAGE) {
311 kunmap_atomic(pt_vaddr); 311 kunmap_atomic(pt_vaddr);
312 pt_vaddr = NULL;
312 act_pt++; 313 act_pt++;
313 pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
314 act_pte = 0; 314 act_pte = 0;
315
316 } 315 }
317 } 316 }
318 kunmap_atomic(pt_vaddr); 317 if (pt_vaddr)
318 kunmap_atomic(pt_vaddr);
319} 319}
320 320
321static void gen8_ppgtt_cleanup(struct i915_address_space *vm) 321static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
@@ -583,21 +583,23 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
583 unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; 583 unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
584 struct sg_page_iter sg_iter; 584 struct sg_page_iter sg_iter;
585 585
586 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); 586 pt_vaddr = NULL;
587 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { 587 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
588 dma_addr_t page_addr; 588 if (pt_vaddr == NULL)
589 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
589 590
590 page_addr = sg_page_iter_dma_address(&sg_iter); 591 pt_vaddr[act_pte] =
591 pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true); 592 vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
593 cache_level, true);
592 if (++act_pte == I915_PPGTT_PT_ENTRIES) { 594 if (++act_pte == I915_PPGTT_PT_ENTRIES) {
593 kunmap_atomic(pt_vaddr); 595 kunmap_atomic(pt_vaddr);
596 pt_vaddr = NULL;
594 act_pt++; 597 act_pt++;
595 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
596 act_pte = 0; 598 act_pte = 0;
597
598 } 599 }
599 } 600 }
600 kunmap_atomic(pt_vaddr); 601 if (pt_vaddr)
602 kunmap_atomic(pt_vaddr);
601} 603}
602 604
603static void gen6_ppgtt_cleanup(struct i915_address_space *vm) 605static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
@@ -918,14 +920,12 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
918 WARN_ON(readq(&gtt_entries[i-1]) 920 WARN_ON(readq(&gtt_entries[i-1])
919 != gen8_pte_encode(addr, level, true)); 921 != gen8_pte_encode(addr, level, true));
920 922
921#if 0 /* TODO: Still needed on GEN8? */
922 /* This next bit makes the above posting read even more important. We 923 /* This next bit makes the above posting read even more important. We
923 * want to flush the TLBs only after we're certain all the PTE updates 924 * want to flush the TLBs only after we're certain all the PTE updates
924 * have finished. 925 * have finished.
925 */ 926 */
926 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 927 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
927 POSTING_READ(GFX_FLSH_CNTL_GEN6); 928 POSTING_READ(GFX_FLSH_CNTL_GEN6);
928#endif
929} 929}
930 930
931/* 931/*
@@ -1440,6 +1440,9 @@ static int i915_gmch_probe(struct drm_device *dev,
1440 dev_priv->gtt.base.clear_range = i915_ggtt_clear_range; 1440 dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
1441 dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries; 1441 dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
1442 1442
1443 if (unlikely(dev_priv->gtt.do_idle_maps))
1444 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
1445
1443 return 0; 1446 return 0;
1444} 1447}
1445 1448
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index d284d892ed94..fed87ec17211 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -420,6 +420,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
420 420
421 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); 421 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
422 list_add_tail(&vma->mm_list, &ggtt->inactive_list); 422 list_add_tail(&vma->mm_list, &ggtt->inactive_list);
423 i915_gem_object_pin_pages(obj);
423 424
424 return obj; 425 return obj;
425 426
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 1d44c793bdf4..6d11e253218a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -62,7 +62,7 @@ static const u32 hpd_mask_i915[] = {
62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
63}; 63};
64 64
65static const u32 hpd_status_gen4[] = { 65static const u32 hpd_status_g4x[] = {
66 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 66 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
@@ -1233,9 +1233,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
1233 spin_lock(&dev_priv->irq_lock); 1233 spin_lock(&dev_priv->irq_lock);
1234 for (i = 1; i < HPD_NUM_PINS; i++) { 1234 for (i = 1; i < HPD_NUM_PINS; i++) {
1235 1235
1236 WARN(((hpd[i] & hotplug_trigger) && 1236 WARN_ONCE(hpd[i] & hotplug_trigger &&
1237 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED), 1237 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED,
1238 "Received HPD interrupt although disabled\n"); 1238 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1239 hotplug_trigger, i, hpd[i]);
1239 1240
1240 if (!(hpd[i] & hotplug_trigger) || 1241 if (!(hpd[i] & hotplug_trigger) ||
1241 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1242 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
@@ -2714,6 +2715,8 @@ static void gen8_irq_preinstall(struct drm_device *dev)
2714#undef GEN8_IRQ_INIT_NDX 2715#undef GEN8_IRQ_INIT_NDX
2715 2716
2716 POSTING_READ(GEN8_PCU_IIR); 2717 POSTING_READ(GEN8_PCU_IIR);
2718
2719 ibx_irq_preinstall(dev);
2717} 2720}
2718 2721
2719static void ibx_hpd_irq_setup(struct drm_device *dev) 2722static void ibx_hpd_irq_setup(struct drm_device *dev)
@@ -3220,7 +3223,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3220 3223
3221 for_each_pipe(pipe) { 3224 for_each_pipe(pipe) {
3222 int plane = pipe; 3225 int plane = pipe;
3223 if (IS_MOBILE(dev)) 3226 if (HAS_FBC(dev))
3224 plane = !plane; 3227 plane = !plane;
3225 3228
3226 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3229 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
@@ -3421,7 +3424,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
3421 3424
3422 for_each_pipe(pipe) { 3425 for_each_pipe(pipe) {
3423 int plane = pipe; 3426 int plane = pipe;
3424 if (IS_MOBILE(dev)) 3427 if (HAS_FBC(dev))
3425 plane = !plane; 3428 plane = !plane;
3426 3429
3427 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3430 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
@@ -3658,7 +3661,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
3658 hotplug_status); 3661 hotplug_status);
3659 3662
3660 intel_hpd_irq_handler(dev, hotplug_trigger, 3663 intel_hpd_irq_handler(dev, hotplug_trigger,
3661 IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915); 3664 IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
3662 3665
3663 if (IS_G4X(dev) && 3666 if (IS_G4X(dev) &&
3664 (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)) 3667 (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X))
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f1eece4a63d5..76126e0ae609 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -193,10 +193,13 @@
193#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ 193#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
194#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ 194#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
195#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ 195#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */
196#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
197#define MI_ARB_ON_OFF MI_INSTR(0x08, 0)
198#define MI_ARB_ENABLE (1<<0)
199#define MI_ARB_DISABLE (0<<0)
196#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) 200#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
197#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0) 201#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
198#define MI_SUSPEND_FLUSH_EN (1<<0) 202#define MI_SUSPEND_FLUSH_EN (1<<0)
199#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
200#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0) 203#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0)
201#define MI_OVERLAY_CONTINUE (0x0<<21) 204#define MI_OVERLAY_CONTINUE (0x0<<21)
202#define MI_OVERLAY_ON (0x1<<21) 205#define MI_OVERLAY_ON (0x1<<21)
@@ -212,10 +215,24 @@
212#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) 215#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
213#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) 216#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
214#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) 217#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
215#define MI_ARB_ON_OFF MI_INSTR(0x08, 0) 218#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
216#define MI_ARB_ENABLE (1<<0) 219#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
217#define MI_ARB_DISABLE (0<<0) 220#define MI_SEMAPHORE_UPDATE (1<<21)
218 221#define MI_SEMAPHORE_COMPARE (1<<20)
222#define MI_SEMAPHORE_REGISTER (1<<18)
223#define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */
224#define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */
225#define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */
226#define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */
227#define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */
228#define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */
229#define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */
230#define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */
231#define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */
232#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */
233#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
234#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
235#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
219#define MI_SET_CONTEXT MI_INSTR(0x18, 0) 236#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
220#define MI_MM_SPACE_GTT (1<<8) 237#define MI_MM_SPACE_GTT (1<<8)
221#define MI_MM_SPACE_PHYSICAL (0<<8) 238#define MI_MM_SPACE_PHYSICAL (0<<8)
@@ -235,7 +252,7 @@
235 */ 252 */
236#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) 253#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
237#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1) 254#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
238#define MI_SRM_LRM_GLOBAL_GTT (1<<22) 255#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
239#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ 256#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
240#define MI_FLUSH_DW_STORE_INDEX (1<<21) 257#define MI_FLUSH_DW_STORE_INDEX (1<<21)
241#define MI_INVALIDATE_TLB (1<<18) 258#define MI_INVALIDATE_TLB (1<<18)
@@ -246,30 +263,13 @@
246#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) 263#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
247#define MI_BATCH_NON_SECURE (1) 264#define MI_BATCH_NON_SECURE (1)
248/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ 265/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
249#define MI_BATCH_NON_SECURE_I965 (1<<8) 266#define MI_BATCH_NON_SECURE_I965 (1<<8)
250#define MI_BATCH_PPGTT_HSW (1<<8) 267#define MI_BATCH_PPGTT_HSW (1<<8)
251#define MI_BATCH_NON_SECURE_HSW (1<<13) 268#define MI_BATCH_NON_SECURE_HSW (1<<13)
252#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) 269#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
253#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ 270#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
254#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1) 271#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
255#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ 272
256#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
257#define MI_SEMAPHORE_UPDATE (1<<21)
258#define MI_SEMAPHORE_COMPARE (1<<20)
259#define MI_SEMAPHORE_REGISTER (1<<18)
260#define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */
261#define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */
262#define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */
263#define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */
264#define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */
265#define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */
266#define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */
267#define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */
268#define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */
269#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */
270#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
271#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
272#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
273 273
274#define MI_PREDICATE_RESULT_2 (0x2214) 274#define MI_PREDICATE_RESULT_2 (0x2214)
275#define LOWER_SLICE_ENABLED (1<<0) 275#define LOWER_SLICE_ENABLED (1<<0)
@@ -3430,42 +3430,6 @@
3430/* the unit of memory self-refresh latency time is 0.5us */ 3430/* the unit of memory self-refresh latency time is 0.5us */
3431#define ILK_SRLT_MASK 0x3f 3431#define ILK_SRLT_MASK 0x3f
3432 3432
3433/* define the fifo size on Ironlake */
3434#define ILK_DISPLAY_FIFO 128
3435#define ILK_DISPLAY_MAXWM 64
3436#define ILK_DISPLAY_DFTWM 8
3437#define ILK_CURSOR_FIFO 32
3438#define ILK_CURSOR_MAXWM 16
3439#define ILK_CURSOR_DFTWM 8
3440
3441#define ILK_DISPLAY_SR_FIFO 512
3442#define ILK_DISPLAY_MAX_SRWM 0x1ff
3443#define ILK_DISPLAY_DFT_SRWM 0x3f
3444#define ILK_CURSOR_SR_FIFO 64
3445#define ILK_CURSOR_MAX_SRWM 0x3f
3446#define ILK_CURSOR_DFT_SRWM 8
3447
3448#define ILK_FIFO_LINE_SIZE 64
3449
3450/* define the WM info on Sandybridge */
3451#define SNB_DISPLAY_FIFO 128
3452#define SNB_DISPLAY_MAXWM 0x7f /* bit 16:22 */
3453#define SNB_DISPLAY_DFTWM 8
3454#define SNB_CURSOR_FIFO 32
3455#define SNB_CURSOR_MAXWM 0x1f /* bit 4:0 */
3456#define SNB_CURSOR_DFTWM 8
3457
3458#define SNB_DISPLAY_SR_FIFO 512
3459#define SNB_DISPLAY_MAX_SRWM 0x1ff /* bit 16:8 */
3460#define SNB_DISPLAY_DFT_SRWM 0x3f
3461#define SNB_CURSOR_SR_FIFO 64
3462#define SNB_CURSOR_MAX_SRWM 0x3f /* bit 5:0 */
3463#define SNB_CURSOR_DFT_SRWM 8
3464
3465#define SNB_FBC_MAX_SRWM 0xf /* bit 23:20 */
3466
3467#define SNB_FIFO_LINE_SIZE 64
3468
3469 3433
3470/* the address where we get all kinds of latency value */ 3434/* the address where we get all kinds of latency value */
3471#define SSKPD 0x5d10 3435#define SSKPD 0x5d10
@@ -4148,6 +4112,8 @@
4148#define DISP_ARB_CTL 0x45000 4112#define DISP_ARB_CTL 0x45000
4149#define DISP_TILE_SURFACE_SWIZZLING (1<<13) 4113#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
4150#define DISP_FBC_WM_DIS (1<<15) 4114#define DISP_FBC_WM_DIS (1<<15)
4115#define DISP_ARB_CTL2 0x45004
4116#define DISP_DATA_PARTITION_5_6 (1<<6)
4151#define GEN7_MSG_CTL 0x45010 4117#define GEN7_MSG_CTL 0x45010
4152#define WAIT_FOR_PCH_RESET_ACK (1<<1) 4118#define WAIT_FOR_PCH_RESET_ACK (1<<1)
4153#define WAIT_FOR_PCH_FLR_ACK (1<<0) 4119#define WAIT_FOR_PCH_FLR_ACK (1<<0)
@@ -4856,6 +4822,8 @@
4856#define FORCEWAKE_ACK 0x130090 4822#define FORCEWAKE_ACK 0x130090
4857#define VLV_GTLC_WAKE_CTRL 0x130090 4823#define VLV_GTLC_WAKE_CTRL 0x130090
4858#define VLV_GTLC_PW_STATUS 0x130094 4824#define VLV_GTLC_PW_STATUS 0x130094
4825#define VLV_GTLC_PW_RENDER_STATUS_MASK 0x80
4826#define VLV_GTLC_PW_MEDIA_STATUS_MASK 0x20
4859#define FORCEWAKE_MT 0xa188 /* multi-threaded */ 4827#define FORCEWAKE_MT 0xa188 /* multi-threaded */
4860#define FORCEWAKE_KERNEL 0x1 4828#define FORCEWAKE_KERNEL 0x1
4861#define FORCEWAKE_USER 0x2 4829#define FORCEWAKE_USER 0x2
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 6b8fef7fb3bb..8150fdc08d49 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -237,7 +237,7 @@ static void i915_save_display(struct drm_device *dev)
237 } 237 }
238 238
239 /* Only regfile.save FBC state on the platform that supports FBC */ 239 /* Only regfile.save FBC state on the platform that supports FBC */
240 if (I915_HAS_FBC(dev)) { 240 if (HAS_FBC(dev)) {
241 if (HAS_PCH_SPLIT(dev)) { 241 if (HAS_PCH_SPLIT(dev)) {
242 dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); 242 dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
243 } else if (IS_GM45(dev)) { 243 } else if (IS_GM45(dev)) {
@@ -300,7 +300,7 @@ static void i915_restore_display(struct drm_device *dev)
300 300
301 /* only restore FBC info on the platform that supports FBC*/ 301 /* only restore FBC info on the platform that supports FBC*/
302 intel_disable_fbc(dev); 302 intel_disable_fbc(dev);
303 if (I915_HAS_FBC(dev)) { 303 if (HAS_FBC(dev)) {
304 if (HAS_PCH_SPLIT(dev)) { 304 if (HAS_PCH_SPLIT(dev)) {
305 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE); 305 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
306 } else if (IS_GM45(dev)) { 306 } else if (IS_GM45(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index f88e5079a3f5..f22041973f3a 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -281,6 +281,34 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
281 } 281 }
282} 282}
283 283
284static void
285parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
286{
287 const struct bdb_lfp_backlight_data *backlight_data;
288 const struct bdb_lfp_backlight_data_entry *entry;
289
290 backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
291 if (!backlight_data)
292 return;
293
294 if (backlight_data->entry_size != sizeof(backlight_data->data[0])) {
295 DRM_DEBUG_KMS("Unsupported backlight data entry size %u\n",
296 backlight_data->entry_size);
297 return;
298 }
299
300 entry = &backlight_data->data[panel_type];
301
302 dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
303 dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
304 DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
305 "active %s, min brightness %u, level %u\n",
306 dev_priv->vbt.backlight.pwm_freq_hz,
307 dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
308 entry->min_brightness,
309 backlight_data->level[panel_type]);
310}
311
284/* Try to find sdvo panel data */ 312/* Try to find sdvo panel data */
285static void 313static void
286parse_sdvo_panel_data(struct drm_i915_private *dev_priv, 314parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
@@ -894,6 +922,7 @@ intel_parse_bios(struct drm_device *dev)
894 parse_general_features(dev_priv, bdb); 922 parse_general_features(dev_priv, bdb);
895 parse_general_definitions(dev_priv, bdb); 923 parse_general_definitions(dev_priv, bdb);
896 parse_lfp_panel_data(dev_priv, bdb); 924 parse_lfp_panel_data(dev_priv, bdb);
925 parse_lfp_backlight(dev_priv, bdb);
897 parse_sdvo_panel_data(dev_priv, bdb); 926 parse_sdvo_panel_data(dev_priv, bdb);
898 parse_sdvo_device_mapping(dev_priv, bdb); 927 parse_sdvo_device_mapping(dev_priv, bdb);
899 parse_device_mapping(dev_priv, bdb); 928 parse_device_mapping(dev_priv, bdb);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 81ed58cb7b31..282de5e9f39d 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -373,6 +373,22 @@ struct bdb_lvds_lfp_data {
373 struct bdb_lvds_lfp_data_entry data[16]; 373 struct bdb_lvds_lfp_data_entry data[16];
374} __packed; 374} __packed;
375 375
376struct bdb_lfp_backlight_data_entry {
377 u8 type:2;
378 u8 active_low_pwm:1;
379 u8 obsolete1:5;
380 u16 pwm_freq_hz;
381 u8 min_brightness;
382 u8 obsolete2;
383 u8 obsolete3;
384} __packed;
385
386struct bdb_lfp_backlight_data {
387 u8 entry_size;
388 struct bdb_lfp_backlight_data_entry data[16];
389 u8 level[16];
390} __packed;
391
376struct aimdb_header { 392struct aimdb_header {
377 char signature[16]; 393 char signature[16];
378 char oem_device[20]; 394 char oem_device[20];
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index d7d2683b89df..e06b9e017d6b 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1136,12 +1136,18 @@ void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
1136 enum pipe pipe; 1136 enum pipe pipe;
1137 struct intel_crtc *intel_crtc; 1137 struct intel_crtc *intel_crtc;
1138 1138
1139 dev_priv->ddi_plls.spll_refcount = 0;
1140 dev_priv->ddi_plls.wrpll1_refcount = 0;
1141 dev_priv->ddi_plls.wrpll2_refcount = 0;
1142
1139 for_each_pipe(pipe) { 1143 for_each_pipe(pipe) {
1140 intel_crtc = 1144 intel_crtc =
1141 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 1145 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
1142 1146
1143 if (!intel_crtc->active) 1147 if (!intel_crtc->active) {
1148 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
1144 continue; 1149 continue;
1150 }
1145 1151
1146 intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv, 1152 intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
1147 pipe); 1153 pipe);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 72a83fabb105..e77d4b8856a7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1211,15 +1211,12 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1211 } 1211 }
1212} 1212}
1213 1213
1214static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) 1214static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1215{ 1215{
1216 u32 val; 1216 u32 val;
1217 bool enabled; 1217 bool enabled;
1218 1218
1219 if (HAS_PCH_LPT(dev_priv->dev)) { 1219 WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1220 DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
1221 return;
1222 }
1223 1220
1224 val = I915_READ(PCH_DREF_CONTROL); 1221 val = I915_READ(PCH_DREF_CONTROL);
1225 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | 1222 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
@@ -1367,11 +1364,24 @@ static void intel_init_dpio(struct drm_device *dev)
1367 if (!IS_VALLEYVIEW(dev)) 1364 if (!IS_VALLEYVIEW(dev))
1368 return; 1365 return;
1369 1366
1370 /* Enable the CRI clock source so we can get at the display */ 1367 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1368}
1369
1370static void intel_reset_dpio(struct drm_device *dev)
1371{
1372 struct drm_i915_private *dev_priv = dev->dev_private;
1373
1374 if (!IS_VALLEYVIEW(dev))
1375 return;
1376
1377 /*
1378 * Enable the CRI clock source so we can get at the display and the
1379 * reference clock for VGA hotplug / manual detection.
1380 */
1371 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | 1381 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
1382 DPLL_REFA_CLK_ENABLE_VLV |
1372 DPLL_INTEGRATED_CRI_CLK_VLV); 1383 DPLL_INTEGRATED_CRI_CLK_VLV);
1373 1384
1374 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1375 /* 1385 /*
1376 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1386 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1377 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1387 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
@@ -1498,9 +1508,12 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1498 /* Make sure the pipe isn't still relying on us */ 1508 /* Make sure the pipe isn't still relying on us */
1499 assert_pipe_disabled(dev_priv, pipe); 1509 assert_pipe_disabled(dev_priv, pipe);
1500 1510
1501 /* Leave integrated clock source enabled */ 1511 /*
1512 * Leave integrated clock source and reference clock enabled for pipe B.
1513 * The latter is needed for VGA hotplug / manual detection.
1514 */
1502 if (pipe == PIPE_B) 1515 if (pipe == PIPE_B)
1503 val = DPLL_INTEGRATED_CRI_CLK_VLV; 1516 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
1504 I915_WRITE(DPLL(pipe), val); 1517 I915_WRITE(DPLL(pipe), val);
1505 POSTING_READ(DPLL(pipe)); 1518 POSTING_READ(DPLL(pipe));
1506} 1519}
@@ -2373,6 +2386,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2373 I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0); 2386 I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
2374 I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0); 2387 I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
2375 } 2388 }
2389 intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
2390 intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
2376 } 2391 }
2377 2392
2378 ret = dev_priv->display.update_plane(crtc, fb, x, y); 2393 ret = dev_priv->display.update_plane(crtc, fb, x, y);
@@ -3422,9 +3437,8 @@ void hsw_enable_ips(struct intel_crtc *crtc)
3422 mutex_unlock(&dev_priv->rps.hw_lock); 3437 mutex_unlock(&dev_priv->rps.hw_lock);
3423 /* Quoting Art Runyan: "its not safe to expect any particular 3438 /* Quoting Art Runyan: "its not safe to expect any particular
3424 * value in IPS_CTL bit 31 after enabling IPS through the 3439 * value in IPS_CTL bit 31 after enabling IPS through the
3425 * mailbox." Therefore we need to defer waiting on the state 3440 * mailbox." Moreover, the mailbox may return a bogus state,
3426 * change. 3441 * so we need to just enable it and continue on.
3427 * TODO: need to fix this for state checker
3428 */ 3442 */
3429 } else { 3443 } else {
3430 I915_WRITE(IPS_CTL, IPS_ENABLE); 3444 I915_WRITE(IPS_CTL, IPS_ENABLE);
@@ -3451,9 +3465,10 @@ void hsw_disable_ips(struct intel_crtc *crtc)
3451 mutex_lock(&dev_priv->rps.hw_lock); 3465 mutex_lock(&dev_priv->rps.hw_lock);
3452 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 3466 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
3453 mutex_unlock(&dev_priv->rps.hw_lock); 3467 mutex_unlock(&dev_priv->rps.hw_lock);
3454 } else 3468 } else {
3455 I915_WRITE(IPS_CTL, 0); 3469 I915_WRITE(IPS_CTL, 0);
3456 POSTING_READ(IPS_CTL); 3470 POSTING_READ(IPS_CTL);
3471 }
3457 3472
3458 /* We need to wait for a vblank before we can disable the plane. */ 3473 /* We need to wait for a vblank before we can disable the plane. */
3459 intel_wait_for_vblank(dev, crtc->pipe); 3474 intel_wait_for_vblank(dev, crtc->pipe);
@@ -3488,7 +3503,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
3488 /* Workaround : Do not read or write the pipe palette/gamma data while 3503 /* Workaround : Do not read or write the pipe palette/gamma data while
3489 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 3504 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3490 */ 3505 */
3491 if (intel_crtc->config.ips_enabled && 3506 if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
3492 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) == 3507 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
3493 GAMMA_MODE_MODE_SPLIT)) { 3508 GAMMA_MODE_MODE_SPLIT)) {
3494 hsw_disable_ips(intel_crtc); 3509 hsw_disable_ips(intel_crtc);
@@ -4975,7 +4990,11 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4975 4990
4976 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 4991 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
4977 4992
4978 /* Enable DPIO clock input */ 4993 /*
4994 * Enable DPIO clock input. We should never disable the reference
4995 * clock for pipe B, since VGA hotplug / manual detection depends
4996 * on it.
4997 */
4979 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | 4998 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
4980 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; 4999 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
4981 /* We should never disable this, set it here for state tracking */ 5000 /* We should never disable this, set it here for state tracking */
@@ -5420,6 +5439,9 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
5420 struct drm_i915_private *dev_priv = dev->dev_private; 5439 struct drm_i915_private *dev_priv = dev->dev_private;
5421 uint32_t tmp; 5440 uint32_t tmp;
5422 5441
5442 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
5443 return;
5444
5423 tmp = I915_READ(PFIT_CONTROL); 5445 tmp = I915_READ(PFIT_CONTROL);
5424 if (!(tmp & PFIT_ENABLE)) 5446 if (!(tmp & PFIT_ENABLE))
5425 return; 5447 return;
@@ -6995,8 +7017,9 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
6995 if (intel_display_power_enabled(dev, pfit_domain)) 7017 if (intel_display_power_enabled(dev, pfit_domain))
6996 ironlake_get_pfit_config(crtc, pipe_config); 7018 ironlake_get_pfit_config(crtc, pipe_config);
6997 7019
6998 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && 7020 if (IS_HASWELL(dev))
6999 (I915_READ(IPS_CTL) & IPS_ENABLE); 7021 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
7022 (I915_READ(IPS_CTL) & IPS_ENABLE);
7000 7023
7001 pipe_config->pixel_multiplier = 1; 7024 pipe_config->pixel_multiplier = 1;
7002 7025
@@ -7951,7 +7974,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7951 else 7974 else
7952 i9xx_clock(refclk, &clock); 7975 i9xx_clock(refclk, &clock);
7953 } else { 7976 } else {
7954 u32 lvds = I915_READ(LVDS); 7977 u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
7955 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 7978 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
7956 7979
7957 if (is_lvds) { 7980 if (is_lvds) {
@@ -9326,7 +9349,9 @@ intel_pipe_config_compare(struct drm_device *dev,
9326 PIPE_CONF_CHECK_I(pch_pfit.size); 9349 PIPE_CONF_CHECK_I(pch_pfit.size);
9327 } 9350 }
9328 9351
9329 PIPE_CONF_CHECK_I(ips_enabled); 9352 /* BDW+ don't expose a synchronous way to read the state */
9353 if (IS_HASWELL(dev))
9354 PIPE_CONF_CHECK_I(ips_enabled);
9330 9355
9331 PIPE_CONF_CHECK_I(double_wide); 9356 PIPE_CONF_CHECK_I(double_wide);
9332 9357
@@ -9339,7 +9364,7 @@ intel_pipe_config_compare(struct drm_device *dev,
9339 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 9364 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
9340 PIPE_CONF_CHECK_I(pipe_bpp); 9365 PIPE_CONF_CHECK_I(pipe_bpp);
9341 9366
9342 if (!IS_HASWELL(dev)) { 9367 if (!HAS_DDI(dev)) {
9343 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); 9368 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
9344 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 9369 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
9345 } 9370 }
@@ -9913,17 +9938,21 @@ intel_modeset_stage_output_state(struct drm_device *dev,
9913 /* Check for any encoders that needs to be disabled. */ 9938 /* Check for any encoders that needs to be disabled. */
9914 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 9939 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9915 base.head) { 9940 base.head) {
9941 int num_connectors = 0;
9916 list_for_each_entry(connector, 9942 list_for_each_entry(connector,
9917 &dev->mode_config.connector_list, 9943 &dev->mode_config.connector_list,
9918 base.head) { 9944 base.head) {
9919 if (connector->new_encoder == encoder) { 9945 if (connector->new_encoder == encoder) {
9920 WARN_ON(!connector->new_encoder->new_crtc); 9946 WARN_ON(!connector->new_encoder->new_crtc);
9921 9947 num_connectors++;
9922 goto next_encoder;
9923 } 9948 }
9924 } 9949 }
9925 encoder->new_crtc = NULL; 9950
9926next_encoder: 9951 if (num_connectors == 0)
9952 encoder->new_crtc = NULL;
9953 else if (num_connectors > 1)
9954 return -EINVAL;
9955
9927 /* Only now check for crtc changes so we don't miss encoders 9956 /* Only now check for crtc changes so we don't miss encoders
9928 * that will be disabled. */ 9957 * that will be disabled. */
9929 if (&encoder->new_crtc->base != encoder->base.crtc) { 9958 if (&encoder->new_crtc->base != encoder->base.crtc) {
@@ -9994,6 +10023,16 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
9994 10023
9995 ret = intel_pipe_set_base(set->crtc, 10024 ret = intel_pipe_set_base(set->crtc,
9996 set->x, set->y, set->fb); 10025 set->x, set->y, set->fb);
10026 /*
10027 * In the fastboot case this may be our only check of the
10028 * state after boot. It would be better to only do it on
10029 * the first update, but we don't have a nice way of doing that
10030 * (and really, set_config isn't used much for high freq page
10031 * flipping, so increasing its cost here shouldn't be a big
10032 * deal).
10033 */
10034 if (i915_fastboot && ret == 0)
10035 intel_modeset_check_state(set->crtc->dev);
9997 } 10036 }
9998 10037
9999 if (ret) { 10038 if (ret) {
@@ -10054,7 +10093,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
10054 struct intel_shared_dpll *pll) 10093 struct intel_shared_dpll *pll)
10055{ 10094{
10056 /* PCH refclock must be enabled first */ 10095 /* PCH refclock must be enabled first */
10057 assert_pch_refclk_enabled(dev_priv); 10096 ibx_assert_pch_refclk_enabled(dev_priv);
10058 10097
10059 I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll); 10098 I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
10060 10099
@@ -10122,8 +10161,6 @@ static void intel_shared_dpll_init(struct drm_device *dev)
10122 dev_priv->num_shared_dpll = 0; 10161 dev_priv->num_shared_dpll = 0;
10123 10162
10124 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); 10163 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
10125 DRM_DEBUG_KMS("%i shared PLLs initialized\n",
10126 dev_priv->num_shared_dpll);
10127} 10164}
10128 10165
10129static void intel_crtc_init(struct drm_device *dev, int pipe) 10166static void intel_crtc_init(struct drm_device *dev, int pipe)
@@ -10151,7 +10188,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
10151 */ 10188 */
10152 intel_crtc->pipe = pipe; 10189 intel_crtc->pipe = pipe;
10153 intel_crtc->plane = pipe; 10190 intel_crtc->plane = pipe;
10154 if (IS_MOBILE(dev) && INTEL_INFO(dev)->gen < 4) { 10191 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
10155 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 10192 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
10156 intel_crtc->plane = !pipe; 10193 intel_crtc->plane = !pipe;
10157 } 10194 }
@@ -10240,6 +10277,28 @@ static bool has_edp_a(struct drm_device *dev)
10240 return true; 10277 return true;
10241} 10278}
10242 10279
10280const char *intel_output_name(int output)
10281{
10282 static const char *names[] = {
10283 [INTEL_OUTPUT_UNUSED] = "Unused",
10284 [INTEL_OUTPUT_ANALOG] = "Analog",
10285 [INTEL_OUTPUT_DVO] = "DVO",
10286 [INTEL_OUTPUT_SDVO] = "SDVO",
10287 [INTEL_OUTPUT_LVDS] = "LVDS",
10288 [INTEL_OUTPUT_TVOUT] = "TV",
10289 [INTEL_OUTPUT_HDMI] = "HDMI",
10290 [INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
10291 [INTEL_OUTPUT_EDP] = "eDP",
10292 [INTEL_OUTPUT_DSI] = "DSI",
10293 [INTEL_OUTPUT_UNKNOWN] = "Unknown",
10294 };
10295
10296 if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
10297 return "Invalid";
10298
10299 return names[output];
10300}
10301
10243static void intel_setup_outputs(struct drm_device *dev) 10302static void intel_setup_outputs(struct drm_device *dev)
10244{ 10303{
10245 struct drm_i915_private *dev_priv = dev->dev_private; 10304 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -10757,11 +10816,20 @@ static struct intel_quirk intel_quirks[] = {
10757 /* Sony Vaio Y cannot use SSC on LVDS */ 10816 /* Sony Vaio Y cannot use SSC on LVDS */
10758 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 10817 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
10759 10818
10760 /* 10819 /* Acer Aspire 5734Z must invert backlight brightness */
10761 * All GM45 Acer (and its brands eMachines and Packard Bell) laptops 10820 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
10762 * seem to use inverted backlight PWM. 10821
10763 */ 10822 /* Acer/eMachines G725 */
10764 { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness }, 10823 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
10824
10825 /* Acer/eMachines e725 */
10826 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
10827
10828 /* Acer/Packard Bell NCL20 */
10829 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
10830
10831 /* Acer Aspire 4736Z */
10832 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
10765}; 10833};
10766 10834
10767static void intel_init_quirks(struct drm_device *dev) 10835static void intel_init_quirks(struct drm_device *dev)
@@ -10809,7 +10877,7 @@ void intel_modeset_init_hw(struct drm_device *dev)
10809 10877
10810 intel_init_clock_gating(dev); 10878 intel_init_clock_gating(dev);
10811 10879
10812 intel_init_dpio(dev); 10880 intel_reset_dpio(dev);
10813 10881
10814 mutex_lock(&dev->struct_mutex); 10882 mutex_lock(&dev->struct_mutex);
10815 intel_enable_gt_powersave(dev); 10883 intel_enable_gt_powersave(dev);
@@ -10871,6 +10939,9 @@ void intel_modeset_init(struct drm_device *dev)
10871 } 10939 }
10872 } 10940 }
10873 10941
10942 intel_init_dpio(dev);
10943 intel_reset_dpio(dev);
10944
10874 intel_cpu_pll_init(dev); 10945 intel_cpu_pll_init(dev);
10875 intel_shared_dpll_init(dev); 10946 intel_shared_dpll_init(dev);
10876 10947
@@ -11218,7 +11289,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
11218 pll->on = false; 11289 pll->on = false;
11219 } 11290 }
11220 11291
11221 if (IS_HASWELL(dev)) 11292 if (HAS_PCH_SPLIT(dev))
11222 ilk_wm_get_hw_state(dev); 11293 ilk_wm_get_hw_state(dev);
11223 11294
11224 if (force_restore) { 11295 if (force_restore) {
@@ -11240,8 +11311,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
11240 } 11311 }
11241 11312
11242 intel_modeset_check_state(dev); 11313 intel_modeset_check_state(dev);
11243
11244 drm_mode_config_reset(dev);
11245} 11314}
11246 11315
11247void intel_modeset_gem_init(struct drm_device *dev) 11316void intel_modeset_gem_init(struct drm_device *dev)
@@ -11250,7 +11319,10 @@ void intel_modeset_gem_init(struct drm_device *dev)
11250 11319
11251 intel_setup_overlay(dev); 11320 intel_setup_overlay(dev);
11252 11321
11322 mutex_lock(&dev->mode_config.mutex);
11323 drm_mode_config_reset(dev);
11253 intel_modeset_setup_hw_state(dev, false); 11324 intel_modeset_setup_hw_state(dev, false);
11325 mutex_unlock(&dev->mode_config.mutex);
11254} 11326}
11255 11327
11256void intel_modeset_cleanup(struct drm_device *dev) 11328void intel_modeset_cleanup(struct drm_device *dev)
@@ -11328,14 +11400,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
11328int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 11400int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
11329{ 11401{
11330 struct drm_i915_private *dev_priv = dev->dev_private; 11402 struct drm_i915_private *dev_priv = dev->dev_private;
11403 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
11331 u16 gmch_ctrl; 11404 u16 gmch_ctrl;
11332 11405
11333 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl); 11406 pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
11334 if (state) 11407 if (state)
11335 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 11408 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
11336 else 11409 else
11337 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 11410 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
11338 pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); 11411 pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
11339 return 0; 11412 return 0;
11340} 11413}
11341 11414
@@ -11445,7 +11518,8 @@ intel_display_capture_error_state(struct drm_device *dev)
11445 enum transcoder cpu_transcoder = transcoders[i]; 11518 enum transcoder cpu_transcoder = transcoders[i];
11446 11519
11447 error->transcoder[i].power_domain_on = 11520 error->transcoder[i].power_domain_on =
11448 intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i)); 11521 intel_display_power_enabled_sw(dev,
11522 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
11449 if (!error->transcoder[i].power_domain_on) 11523 if (!error->transcoder[i].power_domain_on)
11450 continue; 11524 continue;
11451 11525
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ea00068cced2..8754db9e3d52 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -65,8 +65,8 @@
65#define wait_for_atomic_us(COND, US) _wait_for((COND), \ 65#define wait_for_atomic_us(COND, US) _wait_for((COND), \
66 DIV_ROUND_UP((US), 1000), 0) 66 DIV_ROUND_UP((US), 1000), 0)
67 67
68#define KHz(x) (1000*x) 68#define KHz(x) (1000 * (x))
69#define MHz(x) KHz(1000*x) 69#define MHz(x) KHz(1000 * (x))
70 70
71/* 71/*
72 * Display related stuff 72 * Display related stuff
@@ -625,6 +625,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
625 625
626 626
627/* intel_display.c */ 627/* intel_display.c */
628const char *intel_output_name(int output);
628int intel_pch_rawclk(struct drm_device *dev); 629int intel_pch_rawclk(struct drm_device *dev);
629void intel_mark_busy(struct drm_device *dev); 630void intel_mark_busy(struct drm_device *dev);
630void intel_mark_fb_busy(struct drm_i915_gem_object *obj, 631void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
@@ -838,6 +839,7 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
838 uint32_t sprite_width, int pixel_size, 839 uint32_t sprite_width, int pixel_size,
839 bool enabled, bool scaled); 840 bool enabled, bool scaled);
840void intel_init_pm(struct drm_device *dev); 841void intel_init_pm(struct drm_device *dev);
842void intel_pm_setup(struct drm_device *dev);
841bool intel_fbc_enabled(struct drm_device *dev); 843bool intel_fbc_enabled(struct drm_device *dev);
842void intel_update_fbc(struct drm_device *dev); 844void intel_update_fbc(struct drm_device *dev);
843void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 845void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 284c3eb066f6..39eac9937a4a 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -328,8 +328,6 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
328 fb_set_suspend(info, state); 328 fb_set_suspend(info, state);
329} 329}
330 330
331MODULE_LICENSE("GPL and additional rights");
332
333void intel_fbdev_output_poll_changed(struct drm_device *dev) 331void intel_fbdev_output_poll_changed(struct drm_device *dev)
334{ 332{
335 struct drm_i915_private *dev_priv = dev->dev_private; 333 struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a98a990fbab3..a759ecdb7a6e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1005,7 +1005,7 @@ static int intel_panel_fitter_pipe(struct drm_device *dev)
1005 u32 pfit_control; 1005 u32 pfit_control;
1006 1006
1007 /* i830 doesn't have a panel fitter */ 1007 /* i830 doesn't have a panel fitter */
1008 if (IS_I830(dev)) 1008 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
1009 return -1; 1009 return -1;
1010 1010
1011 pfit_control = I915_READ(PFIT_CONTROL); 1011 pfit_control = I915_READ(PFIT_CONTROL);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 04b28f906f9e..d77cc81900f9 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -461,7 +461,7 @@ void intel_update_fbc(struct drm_device *dev)
461 const struct drm_display_mode *adjusted_mode; 461 const struct drm_display_mode *adjusted_mode;
462 unsigned int max_width, max_height; 462 unsigned int max_width, max_height;
463 463
464 if (!I915_HAS_FBC(dev)) { 464 if (!HAS_FBC(dev)) {
465 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED); 465 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
466 return; 466 return;
467 } 467 }
@@ -824,7 +824,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
824 return size; 824 return size;
825} 825}
826 826
827static int i85x_get_fifo_size(struct drm_device *dev, int plane) 827static int i830_get_fifo_size(struct drm_device *dev, int plane)
828{ 828{
829 struct drm_i915_private *dev_priv = dev->dev_private; 829 struct drm_i915_private *dev_priv = dev->dev_private;
830 uint32_t dsparb = I915_READ(DSPARB); 830 uint32_t dsparb = I915_READ(DSPARB);
@@ -857,21 +857,6 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
857 return size; 857 return size;
858} 858}
859 859
860static int i830_get_fifo_size(struct drm_device *dev, int plane)
861{
862 struct drm_i915_private *dev_priv = dev->dev_private;
863 uint32_t dsparb = I915_READ(DSPARB);
864 int size;
865
866 size = dsparb & 0x7f;
867 size >>= 1; /* Convert to cachelines */
868
869 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
870 plane ? "B" : "A", size);
871
872 return size;
873}
874
875/* Pineview has different values for various configs */ 860/* Pineview has different values for various configs */
876static const struct intel_watermark_params pineview_display_wm = { 861static const struct intel_watermark_params pineview_display_wm = {
877 PINEVIEW_DISPLAY_FIFO, 862 PINEVIEW_DISPLAY_FIFO,
@@ -950,14 +935,14 @@ static const struct intel_watermark_params i915_wm_info = {
950 2, 935 2,
951 I915_FIFO_LINE_SIZE 936 I915_FIFO_LINE_SIZE
952}; 937};
953static const struct intel_watermark_params i855_wm_info = { 938static const struct intel_watermark_params i830_wm_info = {
954 I855GM_FIFO_SIZE, 939 I855GM_FIFO_SIZE,
955 I915_MAX_WM, 940 I915_MAX_WM,
956 1, 941 1,
957 2, 942 2,
958 I830_FIFO_LINE_SIZE 943 I830_FIFO_LINE_SIZE
959}; 944};
960static const struct intel_watermark_params i830_wm_info = { 945static const struct intel_watermark_params i845_wm_info = {
961 I830_FIFO_SIZE, 946 I830_FIFO_SIZE,
962 I915_MAX_WM, 947 I915_MAX_WM,
963 1, 948 1,
@@ -965,65 +950,6 @@ static const struct intel_watermark_params i830_wm_info = {
965 I830_FIFO_LINE_SIZE 950 I830_FIFO_LINE_SIZE
966}; 951};
967 952
968static const struct intel_watermark_params ironlake_display_wm_info = {
969 ILK_DISPLAY_FIFO,
970 ILK_DISPLAY_MAXWM,
971 ILK_DISPLAY_DFTWM,
972 2,
973 ILK_FIFO_LINE_SIZE
974};
975static const struct intel_watermark_params ironlake_cursor_wm_info = {
976 ILK_CURSOR_FIFO,
977 ILK_CURSOR_MAXWM,
978 ILK_CURSOR_DFTWM,
979 2,
980 ILK_FIFO_LINE_SIZE
981};
982static const struct intel_watermark_params ironlake_display_srwm_info = {
983 ILK_DISPLAY_SR_FIFO,
984 ILK_DISPLAY_MAX_SRWM,
985 ILK_DISPLAY_DFT_SRWM,
986 2,
987 ILK_FIFO_LINE_SIZE
988};
989static const struct intel_watermark_params ironlake_cursor_srwm_info = {
990 ILK_CURSOR_SR_FIFO,
991 ILK_CURSOR_MAX_SRWM,
992 ILK_CURSOR_DFT_SRWM,
993 2,
994 ILK_FIFO_LINE_SIZE
995};
996
997static const struct intel_watermark_params sandybridge_display_wm_info = {
998 SNB_DISPLAY_FIFO,
999 SNB_DISPLAY_MAXWM,
1000 SNB_DISPLAY_DFTWM,
1001 2,
1002 SNB_FIFO_LINE_SIZE
1003};
1004static const struct intel_watermark_params sandybridge_cursor_wm_info = {
1005 SNB_CURSOR_FIFO,
1006 SNB_CURSOR_MAXWM,
1007 SNB_CURSOR_DFTWM,
1008 2,
1009 SNB_FIFO_LINE_SIZE
1010};
1011static const struct intel_watermark_params sandybridge_display_srwm_info = {
1012 SNB_DISPLAY_SR_FIFO,
1013 SNB_DISPLAY_MAX_SRWM,
1014 SNB_DISPLAY_DFT_SRWM,
1015 2,
1016 SNB_FIFO_LINE_SIZE
1017};
1018static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
1019 SNB_CURSOR_SR_FIFO,
1020 SNB_CURSOR_MAX_SRWM,
1021 SNB_CURSOR_DFT_SRWM,
1022 2,
1023 SNB_FIFO_LINE_SIZE
1024};
1025
1026
1027/** 953/**
1028 * intel_calculate_wm - calculate watermark level 954 * intel_calculate_wm - calculate watermark level
1029 * @clock_in_khz: pixel clock 955 * @clock_in_khz: pixel clock
@@ -1574,7 +1500,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1574 else if (!IS_GEN2(dev)) 1500 else if (!IS_GEN2(dev))
1575 wm_info = &i915_wm_info; 1501 wm_info = &i915_wm_info;
1576 else 1502 else
1577 wm_info = &i855_wm_info; 1503 wm_info = &i830_wm_info;
1578 1504
1579 fifo_size = dev_priv->display.get_fifo_size(dev, 0); 1505 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1580 crtc = intel_get_crtc_for_plane(dev, 0); 1506 crtc = intel_get_crtc_for_plane(dev, 0);
@@ -1622,7 +1548,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1622 if (IS_I945G(dev) || IS_I945GM(dev)) 1548 if (IS_I945G(dev) || IS_I945GM(dev))
1623 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); 1549 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1624 else if (IS_I915GM(dev)) 1550 else if (IS_I915GM(dev))
1625 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); 1551 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
1626 1552
1627 /* Calc sr entries for one plane configs */ 1553 /* Calc sr entries for one plane configs */
1628 if (HAS_FW_BLC(dev) && enabled) { 1554 if (HAS_FW_BLC(dev) && enabled) {
@@ -1674,14 +1600,14 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1674 I915_WRITE(FW_BLC_SELF, 1600 I915_WRITE(FW_BLC_SELF,
1675 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); 1601 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1676 else if (IS_I915GM(dev)) 1602 else if (IS_I915GM(dev))
1677 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); 1603 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
1678 DRM_DEBUG_KMS("memory self refresh enabled\n"); 1604 DRM_DEBUG_KMS("memory self refresh enabled\n");
1679 } else 1605 } else
1680 DRM_DEBUG_KMS("memory self refresh disabled\n"); 1606 DRM_DEBUG_KMS("memory self refresh disabled\n");
1681 } 1607 }
1682} 1608}
1683 1609
1684static void i830_update_wm(struct drm_crtc *unused_crtc) 1610static void i845_update_wm(struct drm_crtc *unused_crtc)
1685{ 1611{
1686 struct drm_device *dev = unused_crtc->dev; 1612 struct drm_device *dev = unused_crtc->dev;
1687 struct drm_i915_private *dev_priv = dev->dev_private; 1613 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1696,7 +1622,7 @@ static void i830_update_wm(struct drm_crtc *unused_crtc)
1696 1622
1697 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1623 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1698 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 1624 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1699 &i830_wm_info, 1625 &i845_wm_info,
1700 dev_priv->display.get_fifo_size(dev, 0), 1626 dev_priv->display.get_fifo_size(dev, 0),
1701 4, latency_ns); 1627 4, latency_ns);
1702 fwater_lo = I915_READ(FW_BLC) & ~0xfff; 1628 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
@@ -1707,423 +1633,6 @@ static void i830_update_wm(struct drm_crtc *unused_crtc)
1707 I915_WRITE(FW_BLC, fwater_lo); 1633 I915_WRITE(FW_BLC, fwater_lo);
1708} 1634}
1709 1635
1710/*
1711 * Check the wm result.
1712 *
1713 * If any calculated watermark values is larger than the maximum value that
1714 * can be programmed into the associated watermark register, that watermark
1715 * must be disabled.
1716 */
1717static bool ironlake_check_srwm(struct drm_device *dev, int level,
1718 int fbc_wm, int display_wm, int cursor_wm,
1719 const struct intel_watermark_params *display,
1720 const struct intel_watermark_params *cursor)
1721{
1722 struct drm_i915_private *dev_priv = dev->dev_private;
1723
1724 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
1725 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
1726
1727 if (fbc_wm > SNB_FBC_MAX_SRWM) {
1728 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
1729 fbc_wm, SNB_FBC_MAX_SRWM, level);
1730
1731 /* fbc has it's own way to disable FBC WM */
1732 I915_WRITE(DISP_ARB_CTL,
1733 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
1734 return false;
1735 } else if (INTEL_INFO(dev)->gen >= 6) {
1736 /* enable FBC WM (except on ILK, where it must remain off) */
1737 I915_WRITE(DISP_ARB_CTL,
1738 I915_READ(DISP_ARB_CTL) & ~DISP_FBC_WM_DIS);
1739 }
1740
1741 if (display_wm > display->max_wm) {
1742 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
1743 display_wm, SNB_DISPLAY_MAX_SRWM, level);
1744 return false;
1745 }
1746
1747 if (cursor_wm > cursor->max_wm) {
1748 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
1749 cursor_wm, SNB_CURSOR_MAX_SRWM, level);
1750 return false;
1751 }
1752
1753 if (!(fbc_wm || display_wm || cursor_wm)) {
1754 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
1755 return false;
1756 }
1757
1758 return true;
1759}
1760
1761/*
1762 * Compute watermark values of WM[1-3],
1763 */
1764static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1765 int latency_ns,
1766 const struct intel_watermark_params *display,
1767 const struct intel_watermark_params *cursor,
1768 int *fbc_wm, int *display_wm, int *cursor_wm)
1769{
1770 struct drm_crtc *crtc;
1771 const struct drm_display_mode *adjusted_mode;
1772 unsigned long line_time_us;
1773 int hdisplay, htotal, pixel_size, clock;
1774 int line_count, line_size;
1775 int small, large;
1776 int entries;
1777
1778 if (!latency_ns) {
1779 *fbc_wm = *display_wm = *cursor_wm = 0;
1780 return false;
1781 }
1782
1783 crtc = intel_get_crtc_for_plane(dev, plane);
1784 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1785 clock = adjusted_mode->crtc_clock;
1786 htotal = adjusted_mode->crtc_htotal;
1787 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1788 pixel_size = crtc->fb->bits_per_pixel / 8;
1789
1790 line_time_us = (htotal * 1000) / clock;
1791 line_count = (latency_ns / line_time_us + 1000) / 1000;
1792 line_size = hdisplay * pixel_size;
1793
1794 /* Use the minimum of the small and large buffer method for primary */
1795 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1796 large = line_count * line_size;
1797
1798 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1799 *display_wm = entries + display->guard_size;
1800
1801 /*
1802 * Spec says:
1803 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
1804 */
1805 *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
1806
1807 /* calculate the self-refresh watermark for display cursor */
1808 entries = line_count * pixel_size * 64;
1809 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1810 *cursor_wm = entries + cursor->guard_size;
1811
1812 return ironlake_check_srwm(dev, level,
1813 *fbc_wm, *display_wm, *cursor_wm,
1814 display, cursor);
1815}
1816
1817static void ironlake_update_wm(struct drm_crtc *crtc)
1818{
1819 struct drm_device *dev = crtc->dev;
1820 struct drm_i915_private *dev_priv = dev->dev_private;
1821 int fbc_wm, plane_wm, cursor_wm;
1822 unsigned int enabled;
1823
1824 enabled = 0;
1825 if (g4x_compute_wm0(dev, PIPE_A,
1826 &ironlake_display_wm_info,
1827 dev_priv->wm.pri_latency[0] * 100,
1828 &ironlake_cursor_wm_info,
1829 dev_priv->wm.cur_latency[0] * 100,
1830 &plane_wm, &cursor_wm)) {
1831 I915_WRITE(WM0_PIPEA_ILK,
1832 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1833 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1834 " plane %d, " "cursor: %d\n",
1835 plane_wm, cursor_wm);
1836 enabled |= 1 << PIPE_A;
1837 }
1838
1839 if (g4x_compute_wm0(dev, PIPE_B,
1840 &ironlake_display_wm_info,
1841 dev_priv->wm.pri_latency[0] * 100,
1842 &ironlake_cursor_wm_info,
1843 dev_priv->wm.cur_latency[0] * 100,
1844 &plane_wm, &cursor_wm)) {
1845 I915_WRITE(WM0_PIPEB_ILK,
1846 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1847 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1848 " plane %d, cursor: %d\n",
1849 plane_wm, cursor_wm);
1850 enabled |= 1 << PIPE_B;
1851 }
1852
1853 /*
1854 * Calculate and update the self-refresh watermark only when one
1855 * display plane is used.
1856 */
1857 I915_WRITE(WM3_LP_ILK, 0);
1858 I915_WRITE(WM2_LP_ILK, 0);
1859 I915_WRITE(WM1_LP_ILK, 0);
1860
1861 if (!single_plane_enabled(enabled))
1862 return;
1863 enabled = ffs(enabled) - 1;
1864
1865 /* WM1 */
1866 if (!ironlake_compute_srwm(dev, 1, enabled,
1867 dev_priv->wm.pri_latency[1] * 500,
1868 &ironlake_display_srwm_info,
1869 &ironlake_cursor_srwm_info,
1870 &fbc_wm, &plane_wm, &cursor_wm))
1871 return;
1872
1873 I915_WRITE(WM1_LP_ILK,
1874 WM1_LP_SR_EN |
1875 (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
1876 (fbc_wm << WM1_LP_FBC_SHIFT) |
1877 (plane_wm << WM1_LP_SR_SHIFT) |
1878 cursor_wm);
1879
1880 /* WM2 */
1881 if (!ironlake_compute_srwm(dev, 2, enabled,
1882 dev_priv->wm.pri_latency[2] * 500,
1883 &ironlake_display_srwm_info,
1884 &ironlake_cursor_srwm_info,
1885 &fbc_wm, &plane_wm, &cursor_wm))
1886 return;
1887
1888 I915_WRITE(WM2_LP_ILK,
1889 WM2_LP_EN |
1890 (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
1891 (fbc_wm << WM1_LP_FBC_SHIFT) |
1892 (plane_wm << WM1_LP_SR_SHIFT) |
1893 cursor_wm);
1894
1895 /*
1896 * WM3 is unsupported on ILK, probably because we don't have latency
1897 * data for that power state
1898 */
1899}
1900
1901static void sandybridge_update_wm(struct drm_crtc *crtc)
1902{
1903 struct drm_device *dev = crtc->dev;
1904 struct drm_i915_private *dev_priv = dev->dev_private;
1905 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
1906 u32 val;
1907 int fbc_wm, plane_wm, cursor_wm;
1908 unsigned int enabled;
1909
1910 enabled = 0;
1911 if (g4x_compute_wm0(dev, PIPE_A,
1912 &sandybridge_display_wm_info, latency,
1913 &sandybridge_cursor_wm_info, latency,
1914 &plane_wm, &cursor_wm)) {
1915 val = I915_READ(WM0_PIPEA_ILK);
1916 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1917 I915_WRITE(WM0_PIPEA_ILK, val |
1918 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1919 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1920 " plane %d, " "cursor: %d\n",
1921 plane_wm, cursor_wm);
1922 enabled |= 1 << PIPE_A;
1923 }
1924
1925 if (g4x_compute_wm0(dev, PIPE_B,
1926 &sandybridge_display_wm_info, latency,
1927 &sandybridge_cursor_wm_info, latency,
1928 &plane_wm, &cursor_wm)) {
1929 val = I915_READ(WM0_PIPEB_ILK);
1930 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1931 I915_WRITE(WM0_PIPEB_ILK, val |
1932 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1933 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1934 " plane %d, cursor: %d\n",
1935 plane_wm, cursor_wm);
1936 enabled |= 1 << PIPE_B;
1937 }
1938
1939 /*
1940 * Calculate and update the self-refresh watermark only when one
1941 * display plane is used.
1942 *
1943 * SNB support 3 levels of watermark.
1944 *
1945 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1946 * and disabled in the descending order
1947 *
1948 */
1949 I915_WRITE(WM3_LP_ILK, 0);
1950 I915_WRITE(WM2_LP_ILK, 0);
1951 I915_WRITE(WM1_LP_ILK, 0);
1952
1953 if (!single_plane_enabled(enabled) ||
1954 dev_priv->sprite_scaling_enabled)
1955 return;
1956 enabled = ffs(enabled) - 1;
1957
1958 /* WM1 */
1959 if (!ironlake_compute_srwm(dev, 1, enabled,
1960 dev_priv->wm.pri_latency[1] * 500,
1961 &sandybridge_display_srwm_info,
1962 &sandybridge_cursor_srwm_info,
1963 &fbc_wm, &plane_wm, &cursor_wm))
1964 return;
1965
1966 I915_WRITE(WM1_LP_ILK,
1967 WM1_LP_SR_EN |
1968 (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
1969 (fbc_wm << WM1_LP_FBC_SHIFT) |
1970 (plane_wm << WM1_LP_SR_SHIFT) |
1971 cursor_wm);
1972
1973 /* WM2 */
1974 if (!ironlake_compute_srwm(dev, 2, enabled,
1975 dev_priv->wm.pri_latency[2] * 500,
1976 &sandybridge_display_srwm_info,
1977 &sandybridge_cursor_srwm_info,
1978 &fbc_wm, &plane_wm, &cursor_wm))
1979 return;
1980
1981 I915_WRITE(WM2_LP_ILK,
1982 WM2_LP_EN |
1983 (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
1984 (fbc_wm << WM1_LP_FBC_SHIFT) |
1985 (plane_wm << WM1_LP_SR_SHIFT) |
1986 cursor_wm);
1987
1988 /* WM3 */
1989 if (!ironlake_compute_srwm(dev, 3, enabled,
1990 dev_priv->wm.pri_latency[3] * 500,
1991 &sandybridge_display_srwm_info,
1992 &sandybridge_cursor_srwm_info,
1993 &fbc_wm, &plane_wm, &cursor_wm))
1994 return;
1995
1996 I915_WRITE(WM3_LP_ILK,
1997 WM3_LP_EN |
1998 (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
1999 (fbc_wm << WM1_LP_FBC_SHIFT) |
2000 (plane_wm << WM1_LP_SR_SHIFT) |
2001 cursor_wm);
2002}
2003
2004static void ivybridge_update_wm(struct drm_crtc *crtc)
2005{
2006 struct drm_device *dev = crtc->dev;
2007 struct drm_i915_private *dev_priv = dev->dev_private;
2008 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
2009 u32 val;
2010 int fbc_wm, plane_wm, cursor_wm;
2011 int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
2012 unsigned int enabled;
2013
2014 enabled = 0;
2015 if (g4x_compute_wm0(dev, PIPE_A,
2016 &sandybridge_display_wm_info, latency,
2017 &sandybridge_cursor_wm_info, latency,
2018 &plane_wm, &cursor_wm)) {
2019 val = I915_READ(WM0_PIPEA_ILK);
2020 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
2021 I915_WRITE(WM0_PIPEA_ILK, val |
2022 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
2023 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
2024 " plane %d, " "cursor: %d\n",
2025 plane_wm, cursor_wm);
2026 enabled |= 1 << PIPE_A;
2027 }
2028
2029 if (g4x_compute_wm0(dev, PIPE_B,
2030 &sandybridge_display_wm_info, latency,
2031 &sandybridge_cursor_wm_info, latency,
2032 &plane_wm, &cursor_wm)) {
2033 val = I915_READ(WM0_PIPEB_ILK);
2034 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
2035 I915_WRITE(WM0_PIPEB_ILK, val |
2036 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
2037 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
2038 " plane %d, cursor: %d\n",
2039 plane_wm, cursor_wm);
2040 enabled |= 1 << PIPE_B;
2041 }
2042
2043 if (g4x_compute_wm0(dev, PIPE_C,
2044 &sandybridge_display_wm_info, latency,
2045 &sandybridge_cursor_wm_info, latency,
2046 &plane_wm, &cursor_wm)) {
2047 val = I915_READ(WM0_PIPEC_IVB);
2048 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
2049 I915_WRITE(WM0_PIPEC_IVB, val |
2050 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
2051 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
2052 " plane %d, cursor: %d\n",
2053 plane_wm, cursor_wm);
2054 enabled |= 1 << PIPE_C;
2055 }
2056
2057 /*
2058 * Calculate and update the self-refresh watermark only when one
2059 * display plane is used.
2060 *
2061 * SNB support 3 levels of watermark.
2062 *
2063 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
2064 * and disabled in the descending order
2065 *
2066 */
2067 I915_WRITE(WM3_LP_ILK, 0);
2068 I915_WRITE(WM2_LP_ILK, 0);
2069 I915_WRITE(WM1_LP_ILK, 0);
2070
2071 if (!single_plane_enabled(enabled) ||
2072 dev_priv->sprite_scaling_enabled)
2073 return;
2074 enabled = ffs(enabled) - 1;
2075
2076 /* WM1 */
2077 if (!ironlake_compute_srwm(dev, 1, enabled,
2078 dev_priv->wm.pri_latency[1] * 500,
2079 &sandybridge_display_srwm_info,
2080 &sandybridge_cursor_srwm_info,
2081 &fbc_wm, &plane_wm, &cursor_wm))
2082 return;
2083
2084 I915_WRITE(WM1_LP_ILK,
2085 WM1_LP_SR_EN |
2086 (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
2087 (fbc_wm << WM1_LP_FBC_SHIFT) |
2088 (plane_wm << WM1_LP_SR_SHIFT) |
2089 cursor_wm);
2090
2091 /* WM2 */
2092 if (!ironlake_compute_srwm(dev, 2, enabled,
2093 dev_priv->wm.pri_latency[2] * 500,
2094 &sandybridge_display_srwm_info,
2095 &sandybridge_cursor_srwm_info,
2096 &fbc_wm, &plane_wm, &cursor_wm))
2097 return;
2098
2099 I915_WRITE(WM2_LP_ILK,
2100 WM2_LP_EN |
2101 (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
2102 (fbc_wm << WM1_LP_FBC_SHIFT) |
2103 (plane_wm << WM1_LP_SR_SHIFT) |
2104 cursor_wm);
2105
2106 /* WM3, note we have to correct the cursor latency */
2107 if (!ironlake_compute_srwm(dev, 3, enabled,
2108 dev_priv->wm.pri_latency[3] * 500,
2109 &sandybridge_display_srwm_info,
2110 &sandybridge_cursor_srwm_info,
2111 &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
2112 !ironlake_compute_srwm(dev, 3, enabled,
2113 dev_priv->wm.cur_latency[3] * 500,
2114 &sandybridge_display_srwm_info,
2115 &sandybridge_cursor_srwm_info,
2116 &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
2117 return;
2118
2119 I915_WRITE(WM3_LP_ILK,
2120 WM3_LP_EN |
2121 (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
2122 (fbc_wm << WM1_LP_FBC_SHIFT) |
2123 (plane_wm << WM1_LP_SR_SHIFT) |
2124 cursor_wm);
2125}
2126
2127static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev, 1636static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
2128 struct drm_crtc *crtc) 1637 struct drm_crtc *crtc)
2129{ 1638{
@@ -2192,7 +1701,7 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
2192 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; 1701 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
2193} 1702}
2194 1703
2195struct hsw_pipe_wm_parameters { 1704struct ilk_pipe_wm_parameters {
2196 bool active; 1705 bool active;
2197 uint32_t pipe_htotal; 1706 uint32_t pipe_htotal;
2198 uint32_t pixel_rate; 1707 uint32_t pixel_rate;
@@ -2201,7 +1710,7 @@ struct hsw_pipe_wm_parameters {
2201 struct intel_plane_wm_parameters cur; 1710 struct intel_plane_wm_parameters cur;
2202}; 1711};
2203 1712
2204struct hsw_wm_maximums { 1713struct ilk_wm_maximums {
2205 uint16_t pri; 1714 uint16_t pri;
2206 uint16_t spr; 1715 uint16_t spr;
2207 uint16_t cur; 1716 uint16_t cur;
@@ -2219,7 +1728,7 @@ struct intel_wm_config {
2219 * For both WM_PIPE and WM_LP. 1728 * For both WM_PIPE and WM_LP.
2220 * mem_value must be in 0.1us units. 1729 * mem_value must be in 0.1us units.
2221 */ 1730 */
2222static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params, 1731static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
2223 uint32_t mem_value, 1732 uint32_t mem_value,
2224 bool is_lp) 1733 bool is_lp)
2225{ 1734{
@@ -2248,7 +1757,7 @@ static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params,
2248 * For both WM_PIPE and WM_LP. 1757 * For both WM_PIPE and WM_LP.
2249 * mem_value must be in 0.1us units. 1758 * mem_value must be in 0.1us units.
2250 */ 1759 */
2251static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params, 1760static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
2252 uint32_t mem_value) 1761 uint32_t mem_value)
2253{ 1762{
2254 uint32_t method1, method2; 1763 uint32_t method1, method2;
@@ -2271,7 +1780,7 @@ static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params,
2271 * For both WM_PIPE and WM_LP. 1780 * For both WM_PIPE and WM_LP.
2272 * mem_value must be in 0.1us units. 1781 * mem_value must be in 0.1us units.
2273 */ 1782 */
2274static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params, 1783static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
2275 uint32_t mem_value) 1784 uint32_t mem_value)
2276{ 1785{
2277 if (!params->active || !params->cur.enabled) 1786 if (!params->active || !params->cur.enabled)
@@ -2285,7 +1794,7 @@ static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params,
2285} 1794}
2286 1795
2287/* Only for WM_LP. */ 1796/* Only for WM_LP. */
2288static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params, 1797static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
2289 uint32_t pri_val) 1798 uint32_t pri_val)
2290{ 1799{
2291 if (!params->active || !params->pri.enabled) 1800 if (!params->active || !params->pri.enabled)
@@ -2390,7 +1899,7 @@ static void ilk_compute_wm_maximums(struct drm_device *dev,
2390 int level, 1899 int level,
2391 const struct intel_wm_config *config, 1900 const struct intel_wm_config *config,
2392 enum intel_ddb_partitioning ddb_partitioning, 1901 enum intel_ddb_partitioning ddb_partitioning,
2393 struct hsw_wm_maximums *max) 1902 struct ilk_wm_maximums *max)
2394{ 1903{
2395 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); 1904 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
2396 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); 1905 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
@@ -2399,7 +1908,7 @@ static void ilk_compute_wm_maximums(struct drm_device *dev,
2399} 1908}
2400 1909
2401static bool ilk_validate_wm_level(int level, 1910static bool ilk_validate_wm_level(int level,
2402 const struct hsw_wm_maximums *max, 1911 const struct ilk_wm_maximums *max,
2403 struct intel_wm_level *result) 1912 struct intel_wm_level *result)
2404{ 1913{
2405 bool ret; 1914 bool ret;
@@ -2441,7 +1950,7 @@ static bool ilk_validate_wm_level(int level,
2441 1950
2442static void ilk_compute_wm_level(struct drm_i915_private *dev_priv, 1951static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
2443 int level, 1952 int level,
2444 const struct hsw_pipe_wm_parameters *p, 1953 const struct ilk_pipe_wm_parameters *p,
2445 struct intel_wm_level *result) 1954 struct intel_wm_level *result)
2446{ 1955{
2447 uint16_t pri_latency = dev_priv->wm.pri_latency[level]; 1956 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
@@ -2489,7 +1998,7 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
2489{ 1998{
2490 struct drm_i915_private *dev_priv = dev->dev_private; 1999 struct drm_i915_private *dev_priv = dev->dev_private;
2491 2000
2492 if (IS_HASWELL(dev)) { 2001 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2493 uint64_t sskpd = I915_READ64(MCH_SSKPD); 2002 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2494 2003
2495 wm[0] = (sskpd >> 56) & 0xFF; 2004 wm[0] = (sskpd >> 56) & 0xFF;
@@ -2537,7 +2046,7 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2537static int ilk_wm_max_level(const struct drm_device *dev) 2046static int ilk_wm_max_level(const struct drm_device *dev)
2538{ 2047{
2539 /* how many WM levels are we expecting */ 2048 /* how many WM levels are we expecting */
2540 if (IS_HASWELL(dev)) 2049 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2541 return 4; 2050 return 4;
2542 else if (INTEL_INFO(dev)->gen >= 6) 2051 else if (INTEL_INFO(dev)->gen >= 6)
2543 return 3; 2052 return 3;
@@ -2589,8 +2098,8 @@ static void intel_setup_wm_latency(struct drm_device *dev)
2589 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); 2098 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2590} 2099}
2591 2100
2592static void hsw_compute_wm_parameters(struct drm_crtc *crtc, 2101static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2593 struct hsw_pipe_wm_parameters *p, 2102 struct ilk_pipe_wm_parameters *p,
2594 struct intel_wm_config *config) 2103 struct intel_wm_config *config)
2595{ 2104{
2596 struct drm_device *dev = crtc->dev; 2105 struct drm_device *dev = crtc->dev;
@@ -2600,7 +2109,7 @@ static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
2600 2109
2601 p->active = intel_crtc_active(crtc); 2110 p->active = intel_crtc_active(crtc);
2602 if (p->active) { 2111 if (p->active) {
2603 p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal; 2112 p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
2604 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); 2113 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2605 p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8; 2114 p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
2606 p->cur.bytes_per_pixel = 4; 2115 p->cur.bytes_per_pixel = 4;
@@ -2627,7 +2136,7 @@ static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
2627 2136
2628/* Compute new watermarks for the pipe */ 2137/* Compute new watermarks for the pipe */
2629static bool intel_compute_pipe_wm(struct drm_crtc *crtc, 2138static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2630 const struct hsw_pipe_wm_parameters *params, 2139 const struct ilk_pipe_wm_parameters *params,
2631 struct intel_pipe_wm *pipe_wm) 2140 struct intel_pipe_wm *pipe_wm)
2632{ 2141{
2633 struct drm_device *dev = crtc->dev; 2142 struct drm_device *dev = crtc->dev;
@@ -2639,16 +2148,25 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2639 .sprites_enabled = params->spr.enabled, 2148 .sprites_enabled = params->spr.enabled,
2640 .sprites_scaled = params->spr.scaled, 2149 .sprites_scaled = params->spr.scaled,
2641 }; 2150 };
2642 struct hsw_wm_maximums max; 2151 struct ilk_wm_maximums max;
2643 2152
2644 /* LP0 watermarks always use 1/2 DDB partitioning */ 2153 /* LP0 watermarks always use 1/2 DDB partitioning */
2645 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); 2154 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2646 2155
2156 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2157 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
2158 max_level = 1;
2159
2160 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2161 if (params->spr.scaled)
2162 max_level = 0;
2163
2647 for (level = 0; level <= max_level; level++) 2164 for (level = 0; level <= max_level; level++)
2648 ilk_compute_wm_level(dev_priv, level, params, 2165 ilk_compute_wm_level(dev_priv, level, params,
2649 &pipe_wm->wm[level]); 2166 &pipe_wm->wm[level]);
2650 2167
2651 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc); 2168 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2169 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
2652 2170
2653 /* At least LP0 must be valid */ 2171 /* At least LP0 must be valid */
2654 return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]); 2172 return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
@@ -2683,12 +2201,19 @@ static void ilk_merge_wm_level(struct drm_device *dev,
2683 * Merge all low power watermarks for all active pipes. 2201 * Merge all low power watermarks for all active pipes.
2684 */ 2202 */
2685static void ilk_wm_merge(struct drm_device *dev, 2203static void ilk_wm_merge(struct drm_device *dev,
2686 const struct hsw_wm_maximums *max, 2204 const struct intel_wm_config *config,
2205 const struct ilk_wm_maximums *max,
2687 struct intel_pipe_wm *merged) 2206 struct intel_pipe_wm *merged)
2688{ 2207{
2689 int level, max_level = ilk_wm_max_level(dev); 2208 int level, max_level = ilk_wm_max_level(dev);
2690 2209
2691 merged->fbc_wm_enabled = true; 2210 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2211 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2212 config->num_pipes_active > 1)
2213 return;
2214
2215 /* ILK: FBC WM must be disabled always */
2216 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
2692 2217
2693 /* merge each WM1+ level */ 2218 /* merge each WM1+ level */
2694 for (level = 1; level <= max_level; level++) { 2219 for (level = 1; level <= max_level; level++) {
@@ -2708,6 +2233,20 @@ static void ilk_wm_merge(struct drm_device *dev,
2708 wm->fbc_val = 0; 2233 wm->fbc_val = 0;
2709 } 2234 }
2710 } 2235 }
2236
2237 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2238 /*
2239 * FIXME this is racy. FBC might get enabled later.
2240 * What we should check here is whether FBC can be
2241 * enabled sometime later.
2242 */
2243 if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
2244 for (level = 2; level <= max_level; level++) {
2245 struct intel_wm_level *wm = &merged->wm[level];
2246
2247 wm->enable = false;
2248 }
2249 }
2711} 2250}
2712 2251
2713static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) 2252static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
@@ -2716,10 +2255,21 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2716 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); 2255 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2717} 2256}
2718 2257
2719static void hsw_compute_wm_results(struct drm_device *dev, 2258/* The value we need to program into the WM_LPx latency field */
2259static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2260{
2261 struct drm_i915_private *dev_priv = dev->dev_private;
2262
2263 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2264 return 2 * level;
2265 else
2266 return dev_priv->wm.pri_latency[level];
2267}
2268
2269static void ilk_compute_wm_results(struct drm_device *dev,
2720 const struct intel_pipe_wm *merged, 2270 const struct intel_pipe_wm *merged,
2721 enum intel_ddb_partitioning partitioning, 2271 enum intel_ddb_partitioning partitioning,
2722 struct hsw_wm_values *results) 2272 struct ilk_wm_values *results)
2723{ 2273{
2724 struct intel_crtc *intel_crtc; 2274 struct intel_crtc *intel_crtc;
2725 int level, wm_lp; 2275 int level, wm_lp;
@@ -2738,7 +2288,7 @@ static void hsw_compute_wm_results(struct drm_device *dev,
2738 break; 2288 break;
2739 2289
2740 results->wm_lp[wm_lp - 1] = WM3_LP_EN | 2290 results->wm_lp[wm_lp - 1] = WM3_LP_EN |
2741 ((level * 2) << WM1_LP_LATENCY_SHIFT) | 2291 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2742 (r->pri_val << WM1_LP_SR_SHIFT) | 2292 (r->pri_val << WM1_LP_SR_SHIFT) |
2743 r->cur_val; 2293 r->cur_val;
2744 2294
@@ -2749,7 +2299,11 @@ static void hsw_compute_wm_results(struct drm_device *dev,
2749 results->wm_lp[wm_lp - 1] |= 2299 results->wm_lp[wm_lp - 1] |=
2750 r->fbc_val << WM1_LP_FBC_SHIFT; 2300 r->fbc_val << WM1_LP_FBC_SHIFT;
2751 2301
2752 results->wm_lp_spr[wm_lp - 1] = r->spr_val; 2302 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2303 WARN_ON(wm_lp != 1);
2304 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2305 } else
2306 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2753 } 2307 }
2754 2308
2755 /* LP0 register values */ 2309 /* LP0 register values */
@@ -2772,7 +2326,7 @@ static void hsw_compute_wm_results(struct drm_device *dev,
2772 2326
2773/* Find the result with the highest level enabled. Check for enable_fbc_wm in 2327/* Find the result with the highest level enabled. Check for enable_fbc_wm in
2774 * case both are at the same level. Prefer r1 in case they're the same. */ 2328 * case both are at the same level. Prefer r1 in case they're the same. */
2775static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev, 2329static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2776 struct intel_pipe_wm *r1, 2330 struct intel_pipe_wm *r1,
2777 struct intel_pipe_wm *r2) 2331 struct intel_pipe_wm *r2)
2778{ 2332{
@@ -2807,8 +2361,8 @@ static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev,
2807#define WM_DIRTY_DDB (1 << 25) 2361#define WM_DIRTY_DDB (1 << 25)
2808 2362
2809static unsigned int ilk_compute_wm_dirty(struct drm_device *dev, 2363static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
2810 const struct hsw_wm_values *old, 2364 const struct ilk_wm_values *old,
2811 const struct hsw_wm_values *new) 2365 const struct ilk_wm_values *new)
2812{ 2366{
2813 unsigned int dirty = 0; 2367 unsigned int dirty = 0;
2814 enum pipe pipe; 2368 enum pipe pipe;
@@ -2858,27 +2412,53 @@ static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
2858 return dirty; 2412 return dirty;
2859} 2413}
2860 2414
2415static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2416 unsigned int dirty)
2417{
2418 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2419 bool changed = false;
2420
2421 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2422 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2423 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2424 changed = true;
2425 }
2426 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2427 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2428 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2429 changed = true;
2430 }
2431 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2432 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2433 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2434 changed = true;
2435 }
2436
2437 /*
2438 * Don't touch WM1S_LP_EN here.
2439 * Doing so could cause underruns.
2440 */
2441
2442 return changed;
2443}
2444
2861/* 2445/*
2862 * The spec says we shouldn't write when we don't need, because every write 2446 * The spec says we shouldn't write when we don't need, because every write
2863 * causes WMs to be re-evaluated, expending some power. 2447 * causes WMs to be re-evaluated, expending some power.
2864 */ 2448 */
2865static void hsw_write_wm_values(struct drm_i915_private *dev_priv, 2449static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2866 struct hsw_wm_values *results) 2450 struct ilk_wm_values *results)
2867{ 2451{
2868 struct hsw_wm_values *previous = &dev_priv->wm.hw; 2452 struct drm_device *dev = dev_priv->dev;
2453 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2869 unsigned int dirty; 2454 unsigned int dirty;
2870 uint32_t val; 2455 uint32_t val;
2871 2456
2872 dirty = ilk_compute_wm_dirty(dev_priv->dev, previous, results); 2457 dirty = ilk_compute_wm_dirty(dev, previous, results);
2873 if (!dirty) 2458 if (!dirty)
2874 return; 2459 return;
2875 2460
2876 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != 0) 2461 _ilk_disable_lp_wm(dev_priv, dirty);
2877 I915_WRITE(WM3_LP_ILK, 0);
2878 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != 0)
2879 I915_WRITE(WM2_LP_ILK, 0);
2880 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != 0)
2881 I915_WRITE(WM1_LP_ILK, 0);
2882 2462
2883 if (dirty & WM_DIRTY_PIPE(PIPE_A)) 2463 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2884 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]); 2464 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
@@ -2895,12 +2475,21 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2895 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]); 2475 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2896 2476
2897 if (dirty & WM_DIRTY_DDB) { 2477 if (dirty & WM_DIRTY_DDB) {
2898 val = I915_READ(WM_MISC); 2478 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2899 if (results->partitioning == INTEL_DDB_PART_1_2) 2479 val = I915_READ(WM_MISC);
2900 val &= ~WM_MISC_DATA_PARTITION_5_6; 2480 if (results->partitioning == INTEL_DDB_PART_1_2)
2901 else 2481 val &= ~WM_MISC_DATA_PARTITION_5_6;
2902 val |= WM_MISC_DATA_PARTITION_5_6; 2482 else
2903 I915_WRITE(WM_MISC, val); 2483 val |= WM_MISC_DATA_PARTITION_5_6;
2484 I915_WRITE(WM_MISC, val);
2485 } else {
2486 val = I915_READ(DISP_ARB_CTL2);
2487 if (results->partitioning == INTEL_DDB_PART_1_2)
2488 val &= ~DISP_DATA_PARTITION_5_6;
2489 else
2490 val |= DISP_DATA_PARTITION_5_6;
2491 I915_WRITE(DISP_ARB_CTL2, val);
2492 }
2904 } 2493 }
2905 2494
2906 if (dirty & WM_DIRTY_FBC) { 2495 if (dirty & WM_DIRTY_FBC) {
@@ -2912,37 +2501,48 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2912 I915_WRITE(DISP_ARB_CTL, val); 2501 I915_WRITE(DISP_ARB_CTL, val);
2913 } 2502 }
2914 2503
2915 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp_spr[0] != results->wm_lp_spr[0]) 2504 if (dirty & WM_DIRTY_LP(1) &&
2505 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2916 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]); 2506 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2917 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2918 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2919 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2920 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2921 2507
2922 if (dirty & WM_DIRTY_LP(1) && results->wm_lp[0] != 0) 2508 if (INTEL_INFO(dev)->gen >= 7) {
2509 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2510 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2511 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2512 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2513 }
2514
2515 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2923 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]); 2516 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2924 if (dirty & WM_DIRTY_LP(2) && results->wm_lp[1] != 0) 2517 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2925 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]); 2518 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2926 if (dirty & WM_DIRTY_LP(3) && results->wm_lp[2] != 0) 2519 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2927 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]); 2520 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2928 2521
2929 dev_priv->wm.hw = *results; 2522 dev_priv->wm.hw = *results;
2930} 2523}
2931 2524
2932static void haswell_update_wm(struct drm_crtc *crtc) 2525static bool ilk_disable_lp_wm(struct drm_device *dev)
2526{
2527 struct drm_i915_private *dev_priv = dev->dev_private;
2528
2529 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2530}
2531
2532static void ilk_update_wm(struct drm_crtc *crtc)
2933{ 2533{
2934 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2534 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2935 struct drm_device *dev = crtc->dev; 2535 struct drm_device *dev = crtc->dev;
2936 struct drm_i915_private *dev_priv = dev->dev_private; 2536 struct drm_i915_private *dev_priv = dev->dev_private;
2937 struct hsw_wm_maximums max; 2537 struct ilk_wm_maximums max;
2938 struct hsw_pipe_wm_parameters params = {}; 2538 struct ilk_pipe_wm_parameters params = {};
2939 struct hsw_wm_values results = {}; 2539 struct ilk_wm_values results = {};
2940 enum intel_ddb_partitioning partitioning; 2540 enum intel_ddb_partitioning partitioning;
2941 struct intel_pipe_wm pipe_wm = {}; 2541 struct intel_pipe_wm pipe_wm = {};
2942 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; 2542 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
2943 struct intel_wm_config config = {}; 2543 struct intel_wm_config config = {};
2944 2544
2945 hsw_compute_wm_parameters(crtc, &params, &config); 2545 ilk_compute_wm_parameters(crtc, &params, &config);
2946 2546
2947 intel_compute_pipe_wm(crtc, &params, &pipe_wm); 2547 intel_compute_pipe_wm(crtc, &params, &pipe_wm);
2948 2548
@@ -2952,15 +2552,15 @@ static void haswell_update_wm(struct drm_crtc *crtc)
2952 intel_crtc->wm.active = pipe_wm; 2552 intel_crtc->wm.active = pipe_wm;
2953 2553
2954 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max); 2554 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
2955 ilk_wm_merge(dev, &max, &lp_wm_1_2); 2555 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
2956 2556
2957 /* 5/6 split only in single pipe config on IVB+ */ 2557 /* 5/6 split only in single pipe config on IVB+ */
2958 if (INTEL_INFO(dev)->gen >= 7 && 2558 if (INTEL_INFO(dev)->gen >= 7 &&
2959 config.num_pipes_active == 1 && config.sprites_enabled) { 2559 config.num_pipes_active == 1 && config.sprites_enabled) {
2960 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max); 2560 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
2961 ilk_wm_merge(dev, &max, &lp_wm_5_6); 2561 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
2962 2562
2963 best_lp_wm = hsw_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6); 2563 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
2964 } else { 2564 } else {
2965 best_lp_wm = &lp_wm_1_2; 2565 best_lp_wm = &lp_wm_1_2;
2966 } 2566 }
@@ -2968,16 +2568,17 @@ static void haswell_update_wm(struct drm_crtc *crtc)
2968 partitioning = (best_lp_wm == &lp_wm_1_2) ? 2568 partitioning = (best_lp_wm == &lp_wm_1_2) ?
2969 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; 2569 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
2970 2570
2971 hsw_compute_wm_results(dev, best_lp_wm, partitioning, &results); 2571 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
2972 2572
2973 hsw_write_wm_values(dev_priv, &results); 2573 ilk_write_wm_values(dev_priv, &results);
2974} 2574}
2975 2575
2976static void haswell_update_sprite_wm(struct drm_plane *plane, 2576static void ilk_update_sprite_wm(struct drm_plane *plane,
2977 struct drm_crtc *crtc, 2577 struct drm_crtc *crtc,
2978 uint32_t sprite_width, int pixel_size, 2578 uint32_t sprite_width, int pixel_size,
2979 bool enabled, bool scaled) 2579 bool enabled, bool scaled)
2980{ 2580{
2581 struct drm_device *dev = plane->dev;
2981 struct intel_plane *intel_plane = to_intel_plane(plane); 2582 struct intel_plane *intel_plane = to_intel_plane(plane);
2982 2583
2983 intel_plane->wm.enabled = enabled; 2584 intel_plane->wm.enabled = enabled;
@@ -2985,176 +2586,24 @@ static void haswell_update_sprite_wm(struct drm_plane *plane,
2985 intel_plane->wm.horiz_pixels = sprite_width; 2586 intel_plane->wm.horiz_pixels = sprite_width;
2986 intel_plane->wm.bytes_per_pixel = pixel_size; 2587 intel_plane->wm.bytes_per_pixel = pixel_size;
2987 2588
2988 haswell_update_wm(crtc); 2589 /*
2989} 2590 * IVB workaround: must disable low power watermarks for at least
2990 2591 * one frame before enabling scaling. LP watermarks can be re-enabled
2991static bool 2592 * when scaling is disabled.
2992sandybridge_compute_sprite_wm(struct drm_device *dev, int plane, 2593 *
2993 uint32_t sprite_width, int pixel_size, 2594 * WaCxSRDisabledForSpriteScaling:ivb
2994 const struct intel_watermark_params *display, 2595 */
2995 int display_latency_ns, int *sprite_wm) 2596 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
2996{ 2597 intel_wait_for_vblank(dev, intel_plane->pipe);
2997 struct drm_crtc *crtc;
2998 int clock;
2999 int entries, tlb_miss;
3000
3001 crtc = intel_get_crtc_for_plane(dev, plane);
3002 if (!intel_crtc_active(crtc)) {
3003 *sprite_wm = display->guard_size;
3004 return false;
3005 }
3006
3007 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
3008
3009 /* Use the small buffer method to calculate the sprite watermark */
3010 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
3011 tlb_miss = display->fifo_size*display->cacheline_size -
3012 sprite_width * 8;
3013 if (tlb_miss > 0)
3014 entries += tlb_miss;
3015 entries = DIV_ROUND_UP(entries, display->cacheline_size);
3016 *sprite_wm = entries + display->guard_size;
3017 if (*sprite_wm > (int)display->max_wm)
3018 *sprite_wm = display->max_wm;
3019
3020 return true;
3021}
3022
3023static bool
3024sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
3025 uint32_t sprite_width, int pixel_size,
3026 const struct intel_watermark_params *display,
3027 int latency_ns, int *sprite_wm)
3028{
3029 struct drm_crtc *crtc;
3030 unsigned long line_time_us;
3031 int clock;
3032 int line_count, line_size;
3033 int small, large;
3034 int entries;
3035
3036 if (!latency_ns) {
3037 *sprite_wm = 0;
3038 return false;
3039 }
3040
3041 crtc = intel_get_crtc_for_plane(dev, plane);
3042 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
3043 if (!clock) {
3044 *sprite_wm = 0;
3045 return false;
3046 }
3047
3048 line_time_us = (sprite_width * 1000) / clock;
3049 if (!line_time_us) {
3050 *sprite_wm = 0;
3051 return false;
3052 }
3053
3054 line_count = (latency_ns / line_time_us + 1000) / 1000;
3055 line_size = sprite_width * pixel_size;
3056
3057 /* Use the minimum of the small and large buffer method for primary */
3058 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
3059 large = line_count * line_size;
3060
3061 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
3062 *sprite_wm = entries + display->guard_size;
3063 2598
3064 return *sprite_wm > 0x3ff ? false : true; 2599 ilk_update_wm(crtc);
3065}
3066
3067static void sandybridge_update_sprite_wm(struct drm_plane *plane,
3068 struct drm_crtc *crtc,
3069 uint32_t sprite_width, int pixel_size,
3070 bool enabled, bool scaled)
3071{
3072 struct drm_device *dev = plane->dev;
3073 struct drm_i915_private *dev_priv = dev->dev_private;
3074 int pipe = to_intel_plane(plane)->pipe;
3075 int latency = dev_priv->wm.spr_latency[0] * 100; /* In unit 0.1us */
3076 u32 val;
3077 int sprite_wm, reg;
3078 int ret;
3079
3080 if (!enabled)
3081 return;
3082
3083 switch (pipe) {
3084 case 0:
3085 reg = WM0_PIPEA_ILK;
3086 break;
3087 case 1:
3088 reg = WM0_PIPEB_ILK;
3089 break;
3090 case 2:
3091 reg = WM0_PIPEC_IVB;
3092 break;
3093 default:
3094 return; /* bad pipe */
3095 }
3096
3097 ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
3098 &sandybridge_display_wm_info,
3099 latency, &sprite_wm);
3100 if (!ret) {
3101 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %c\n",
3102 pipe_name(pipe));
3103 return;
3104 }
3105
3106 val = I915_READ(reg);
3107 val &= ~WM0_PIPE_SPRITE_MASK;
3108 I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
3109 DRM_DEBUG_KMS("sprite watermarks For pipe %c - %d\n", pipe_name(pipe), sprite_wm);
3110
3111
3112 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
3113 pixel_size,
3114 &sandybridge_display_srwm_info,
3115 dev_priv->wm.spr_latency[1] * 500,
3116 &sprite_wm);
3117 if (!ret) {
3118 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n",
3119 pipe_name(pipe));
3120 return;
3121 }
3122 I915_WRITE(WM1S_LP_ILK, sprite_wm);
3123
3124 /* Only IVB has two more LP watermarks for sprite */
3125 if (!IS_IVYBRIDGE(dev))
3126 return;
3127
3128 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
3129 pixel_size,
3130 &sandybridge_display_srwm_info,
3131 dev_priv->wm.spr_latency[2] * 500,
3132 &sprite_wm);
3133 if (!ret) {
3134 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n",
3135 pipe_name(pipe));
3136 return;
3137 }
3138 I915_WRITE(WM2S_LP_IVB, sprite_wm);
3139
3140 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
3141 pixel_size,
3142 &sandybridge_display_srwm_info,
3143 dev_priv->wm.spr_latency[3] * 500,
3144 &sprite_wm);
3145 if (!ret) {
3146 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n",
3147 pipe_name(pipe));
3148 return;
3149 }
3150 I915_WRITE(WM3S_LP_IVB, sprite_wm);
3151} 2600}
3152 2601
3153static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) 2602static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3154{ 2603{
3155 struct drm_device *dev = crtc->dev; 2604 struct drm_device *dev = crtc->dev;
3156 struct drm_i915_private *dev_priv = dev->dev_private; 2605 struct drm_i915_private *dev_priv = dev->dev_private;
3157 struct hsw_wm_values *hw = &dev_priv->wm.hw; 2606 struct ilk_wm_values *hw = &dev_priv->wm.hw;
3158 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2607 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3159 struct intel_pipe_wm *active = &intel_crtc->wm.active; 2608 struct intel_pipe_wm *active = &intel_crtc->wm.active;
3160 enum pipe pipe = intel_crtc->pipe; 2609 enum pipe pipe = intel_crtc->pipe;
@@ -3165,7 +2614,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3165 }; 2614 };
3166 2615
3167 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]); 2616 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
3168 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); 2617 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2618 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3169 2619
3170 if (intel_crtc_active(crtc)) { 2620 if (intel_crtc_active(crtc)) {
3171 u32 tmp = hw->wm_pipe[pipe]; 2621 u32 tmp = hw->wm_pipe[pipe];
@@ -3197,7 +2647,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3197void ilk_wm_get_hw_state(struct drm_device *dev) 2647void ilk_wm_get_hw_state(struct drm_device *dev)
3198{ 2648{
3199 struct drm_i915_private *dev_priv = dev->dev_private; 2649 struct drm_i915_private *dev_priv = dev->dev_private;
3200 struct hsw_wm_values *hw = &dev_priv->wm.hw; 2650 struct ilk_wm_values *hw = &dev_priv->wm.hw;
3201 struct drm_crtc *crtc; 2651 struct drm_crtc *crtc;
3202 2652
3203 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 2653 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
@@ -3211,8 +2661,12 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
3211 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); 2661 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
3212 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); 2662 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
3213 2663
3214 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? 2664 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3215 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; 2665 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
2666 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2667 else if (IS_IVYBRIDGE(dev))
2668 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
2669 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
3216 2670
3217 hw->enable_fbc_wm = 2671 hw->enable_fbc_wm =
3218 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); 2672 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
@@ -3583,9 +3037,11 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3583 3037
3584void gen6_rps_idle(struct drm_i915_private *dev_priv) 3038void gen6_rps_idle(struct drm_i915_private *dev_priv)
3585{ 3039{
3040 struct drm_device *dev = dev_priv->dev;
3041
3586 mutex_lock(&dev_priv->rps.hw_lock); 3042 mutex_lock(&dev_priv->rps.hw_lock);
3587 if (dev_priv->rps.enabled) { 3043 if (dev_priv->rps.enabled) {
3588 if (dev_priv->info->is_valleyview) 3044 if (IS_VALLEYVIEW(dev))
3589 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay); 3045 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3590 else 3046 else
3591 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); 3047 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
@@ -3596,9 +3052,11 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
3596 3052
3597void gen6_rps_boost(struct drm_i915_private *dev_priv) 3053void gen6_rps_boost(struct drm_i915_private *dev_priv)
3598{ 3054{
3055 struct drm_device *dev = dev_priv->dev;
3056
3599 mutex_lock(&dev_priv->rps.hw_lock); 3057 mutex_lock(&dev_priv->rps.hw_lock);
3600 if (dev_priv->rps.enabled) { 3058 if (dev_priv->rps.enabled) {
3601 if (dev_priv->info->is_valleyview) 3059 if (IS_VALLEYVIEW(dev))
3602 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay); 3060 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
3603 else 3061 else
3604 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay); 3062 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
@@ -4972,6 +4430,20 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
4972 } 4430 }
4973} 4431}
4974 4432
4433static void ilk_init_lp_watermarks(struct drm_device *dev)
4434{
4435 struct drm_i915_private *dev_priv = dev->dev_private;
4436
4437 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
4438 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
4439 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
4440
4441 /*
4442 * Don't touch WM1S_LP_EN here.
4443 * Doing so could cause underruns.
4444 */
4445}
4446
4975static void ironlake_init_clock_gating(struct drm_device *dev) 4447static void ironlake_init_clock_gating(struct drm_device *dev)
4976{ 4448{
4977 struct drm_i915_private *dev_priv = dev->dev_private; 4449 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5005,9 +4477,8 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
5005 I915_WRITE(DISP_ARB_CTL, 4477 I915_WRITE(DISP_ARB_CTL,
5006 (I915_READ(DISP_ARB_CTL) | 4478 (I915_READ(DISP_ARB_CTL) |
5007 DISP_FBC_WM_DIS)); 4479 DISP_FBC_WM_DIS));
5008 I915_WRITE(WM3_LP_ILK, 0); 4480
5009 I915_WRITE(WM2_LP_ILK, 0); 4481 ilk_init_lp_watermarks(dev);
5010 I915_WRITE(WM1_LP_ILK, 0);
5011 4482
5012 /* 4483 /*
5013 * Based on the document from hardware guys the following bits 4484 * Based on the document from hardware guys the following bits
@@ -5114,9 +4585,7 @@ static void gen6_init_clock_gating(struct drm_device *dev)
5114 I915_WRITE(GEN6_GT_MODE, 4585 I915_WRITE(GEN6_GT_MODE,
5115 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE)); 4586 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
5116 4587
5117 I915_WRITE(WM3_LP_ILK, 0); 4588 ilk_init_lp_watermarks(dev);
5118 I915_WRITE(WM2_LP_ILK, 0);
5119 I915_WRITE(WM1_LP_ILK, 0);
5120 4589
5121 I915_WRITE(CACHE_MODE_0, 4590 I915_WRITE(CACHE_MODE_0,
5122 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 4591 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
@@ -5290,9 +4759,7 @@ static void haswell_init_clock_gating(struct drm_device *dev)
5290{ 4759{
5291 struct drm_i915_private *dev_priv = dev->dev_private; 4760 struct drm_i915_private *dev_priv = dev->dev_private;
5292 4761
5293 I915_WRITE(WM3_LP_ILK, 0); 4762 ilk_init_lp_watermarks(dev);
5294 I915_WRITE(WM2_LP_ILK, 0);
5295 I915_WRITE(WM1_LP_ILK, 0);
5296 4763
5297 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. 4764 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5298 * This implements the WaDisableRCZUnitClockGating:hsw workaround. 4765 * This implements the WaDisableRCZUnitClockGating:hsw workaround.
@@ -5341,9 +4808,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
5341 struct drm_i915_private *dev_priv = dev->dev_private; 4808 struct drm_i915_private *dev_priv = dev->dev_private;
5342 uint32_t snpcr; 4809 uint32_t snpcr;
5343 4810
5344 I915_WRITE(WM3_LP_ILK, 0); 4811 ilk_init_lp_watermarks(dev);
5345 I915_WRITE(WM2_LP_ILK, 0);
5346 I915_WRITE(WM1_LP_ILK, 0);
5347 4812
5348 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); 4813 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
5349 4814
@@ -6062,7 +5527,7 @@ void intel_init_pm(struct drm_device *dev)
6062{ 5527{
6063 struct drm_i915_private *dev_priv = dev->dev_private; 5528 struct drm_i915_private *dev_priv = dev->dev_private;
6064 5529
6065 if (I915_HAS_FBC(dev)) { 5530 if (HAS_FBC(dev)) {
6066 if (INTEL_INFO(dev)->gen >= 7) { 5531 if (INTEL_INFO(dev)->gen >= 7) {
6067 dev_priv->display.fbc_enabled = ironlake_fbc_enabled; 5532 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
6068 dev_priv->display.enable_fbc = gen7_enable_fbc; 5533 dev_priv->display.enable_fbc = gen7_enable_fbc;
@@ -6095,58 +5560,27 @@ void intel_init_pm(struct drm_device *dev)
6095 if (HAS_PCH_SPLIT(dev)) { 5560 if (HAS_PCH_SPLIT(dev)) {
6096 intel_setup_wm_latency(dev); 5561 intel_setup_wm_latency(dev);
6097 5562
6098 if (IS_GEN5(dev)) { 5563 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
6099 if (dev_priv->wm.pri_latency[1] && 5564 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
6100 dev_priv->wm.spr_latency[1] && 5565 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
6101 dev_priv->wm.cur_latency[1]) 5566 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
6102 dev_priv->display.update_wm = ironlake_update_wm; 5567 dev_priv->display.update_wm = ilk_update_wm;
6103 else { 5568 dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
6104 DRM_DEBUG_KMS("Failed to get proper latency. " 5569 } else {
6105 "Disable CxSR\n"); 5570 DRM_DEBUG_KMS("Failed to read display plane latency. "
6106 dev_priv->display.update_wm = NULL; 5571 "Disable CxSR\n");
6107 } 5572 }
5573
5574 if (IS_GEN5(dev))
6108 dev_priv->display.init_clock_gating = ironlake_init_clock_gating; 5575 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
6109 } else if (IS_GEN6(dev)) { 5576 else if (IS_GEN6(dev))
6110 if (dev_priv->wm.pri_latency[0] &&
6111 dev_priv->wm.spr_latency[0] &&
6112 dev_priv->wm.cur_latency[0]) {
6113 dev_priv->display.update_wm = sandybridge_update_wm;
6114 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
6115 } else {
6116 DRM_DEBUG_KMS("Failed to read display plane latency. "
6117 "Disable CxSR\n");
6118 dev_priv->display.update_wm = NULL;
6119 }
6120 dev_priv->display.init_clock_gating = gen6_init_clock_gating; 5577 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
6121 } else if (IS_IVYBRIDGE(dev)) { 5578 else if (IS_IVYBRIDGE(dev))
6122 if (dev_priv->wm.pri_latency[0] &&
6123 dev_priv->wm.spr_latency[0] &&
6124 dev_priv->wm.cur_latency[0]) {
6125 dev_priv->display.update_wm = ivybridge_update_wm;
6126 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
6127 } else {
6128 DRM_DEBUG_KMS("Failed to read display plane latency. "
6129 "Disable CxSR\n");
6130 dev_priv->display.update_wm = NULL;
6131 }
6132 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; 5579 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
6133 } else if (IS_HASWELL(dev)) { 5580 else if (IS_HASWELL(dev))
6134 if (dev_priv->wm.pri_latency[0] &&
6135 dev_priv->wm.spr_latency[0] &&
6136 dev_priv->wm.cur_latency[0]) {
6137 dev_priv->display.update_wm = haswell_update_wm;
6138 dev_priv->display.update_sprite_wm =
6139 haswell_update_sprite_wm;
6140 } else {
6141 DRM_DEBUG_KMS("Failed to read display plane latency. "
6142 "Disable CxSR\n");
6143 dev_priv->display.update_wm = NULL;
6144 }
6145 dev_priv->display.init_clock_gating = haswell_init_clock_gating; 5581 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
6146 } else if (INTEL_INFO(dev)->gen == 8) { 5582 else if (INTEL_INFO(dev)->gen == 8)
6147 dev_priv->display.init_clock_gating = gen8_init_clock_gating; 5583 dev_priv->display.init_clock_gating = gen8_init_clock_gating;
6148 } else
6149 dev_priv->display.update_wm = NULL;
6150 } else if (IS_VALLEYVIEW(dev)) { 5584 } else if (IS_VALLEYVIEW(dev)) {
6151 dev_priv->display.update_wm = valleyview_update_wm; 5585 dev_priv->display.update_wm = valleyview_update_wm;
6152 dev_priv->display.init_clock_gating = 5586 dev_priv->display.init_clock_gating =
@@ -6180,21 +5614,21 @@ void intel_init_pm(struct drm_device *dev)
6180 dev_priv->display.update_wm = i9xx_update_wm; 5614 dev_priv->display.update_wm = i9xx_update_wm;
6181 dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 5615 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
6182 dev_priv->display.init_clock_gating = gen3_init_clock_gating; 5616 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6183 } else if (IS_I865G(dev)) { 5617 } else if (IS_GEN2(dev)) {
6184 dev_priv->display.update_wm = i830_update_wm; 5618 if (INTEL_INFO(dev)->num_pipes == 1) {
6185 dev_priv->display.init_clock_gating = i85x_init_clock_gating; 5619 dev_priv->display.update_wm = i845_update_wm;
6186 dev_priv->display.get_fifo_size = i830_get_fifo_size;
6187 } else if (IS_I85X(dev)) {
6188 dev_priv->display.update_wm = i9xx_update_wm;
6189 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
6190 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
6191 } else {
6192 dev_priv->display.update_wm = i830_update_wm;
6193 dev_priv->display.init_clock_gating = i830_init_clock_gating;
6194 if (IS_845G(dev))
6195 dev_priv->display.get_fifo_size = i845_get_fifo_size; 5620 dev_priv->display.get_fifo_size = i845_get_fifo_size;
6196 else 5621 } else {
5622 dev_priv->display.update_wm = i9xx_update_wm;
6197 dev_priv->display.get_fifo_size = i830_get_fifo_size; 5623 dev_priv->display.get_fifo_size = i830_get_fifo_size;
5624 }
5625
5626 if (IS_I85X(dev) || IS_I865G(dev))
5627 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
5628 else
5629 dev_priv->display.init_clock_gating = i830_init_clock_gating;
5630 } else {
5631 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
6198 } 5632 }
6199} 5633}
6200 5634
@@ -6289,10 +5723,19 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
6289 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6; 5723 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
6290} 5724}
6291 5725
6292void intel_pm_init(struct drm_device *dev) 5726void intel_pm_setup(struct drm_device *dev)
6293{ 5727{
6294 struct drm_i915_private *dev_priv = dev->dev_private; 5728 struct drm_i915_private *dev_priv = dev->dev_private;
6295 5729
5730 mutex_init(&dev_priv->rps.hw_lock);
5731
5732 mutex_init(&dev_priv->pc8.lock);
5733 dev_priv->pc8.requirements_met = false;
5734 dev_priv->pc8.gpu_idle = false;
5735 dev_priv->pc8.irqs_disabled = false;
5736 dev_priv->pc8.enabled = false;
5737 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
5738 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
6296 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, 5739 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
6297 intel_gen6_powersave_work); 5740 intel_gen6_powersave_work);
6298} 5741}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e05a0216cd9b..8fcb32a02cb4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -663,14 +663,15 @@ gen6_add_request(struct intel_ring_buffer *ring)
663 struct drm_device *dev = ring->dev; 663 struct drm_device *dev = ring->dev;
664 struct drm_i915_private *dev_priv = dev->dev_private; 664 struct drm_i915_private *dev_priv = dev->dev_private;
665 struct intel_ring_buffer *useless; 665 struct intel_ring_buffer *useless;
666 int i, ret; 666 int i, ret, num_dwords = 4;
667 667
668 ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) * 668 if (i915_semaphore_is_enabled(dev))
669 MBOX_UPDATE_DWORDS) + 669 num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
670 4); 670#undef MBOX_UPDATE_DWORDS
671
672 ret = intel_ring_begin(ring, num_dwords);
671 if (ret) 673 if (ret)
672 return ret; 674 return ret;
673#undef MBOX_UPDATE_DWORDS
674 675
675 for_each_ring(useless, dev_priv, i) { 676 for_each_ring(useless, dev_priv, i) {
676 u32 mbox_reg = ring->signal_mbox[i]; 677 u32 mbox_reg = ring->signal_mbox[i];
@@ -1606,8 +1607,8 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
1606 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno); 1607 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
1607} 1608}
1608 1609
1609static int __intel_ring_begin(struct intel_ring_buffer *ring, 1610static int __intel_ring_prepare(struct intel_ring_buffer *ring,
1610 int bytes) 1611 int bytes)
1611{ 1612{
1612 int ret; 1613 int ret;
1613 1614
@@ -1623,7 +1624,6 @@ static int __intel_ring_begin(struct intel_ring_buffer *ring,
1623 return ret; 1624 return ret;
1624 } 1625 }
1625 1626
1626 ring->space -= bytes;
1627 return 0; 1627 return 0;
1628} 1628}
1629 1629
@@ -1638,12 +1638,17 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
1638 if (ret) 1638 if (ret)
1639 return ret; 1639 return ret;
1640 1640
1641 ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
1642 if (ret)
1643 return ret;
1644
1641 /* Preallocate the olr before touching the ring */ 1645 /* Preallocate the olr before touching the ring */
1642 ret = intel_ring_alloc_seqno(ring); 1646 ret = intel_ring_alloc_seqno(ring);
1643 if (ret) 1647 if (ret)
1644 return ret; 1648 return ret;
1645 1649
1646 return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t)); 1650 ring->space -= num_dwords * sizeof(uint32_t);
1651 return 0;
1647} 1652}
1648 1653
1649void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) 1654void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 90a3f6db8288..fe4de89c374c 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -230,7 +230,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
230 u32 sprctl, sprscale = 0; 230 u32 sprctl, sprscale = 0;
231 unsigned long sprsurf_offset, linear_offset; 231 unsigned long sprsurf_offset, linear_offset;
232 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 232 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
233 bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
234 233
235 sprctl = I915_READ(SPRCTL(pipe)); 234 sprctl = I915_READ(SPRCTL(pipe));
236 235
@@ -291,21 +290,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
291 crtc_w--; 290 crtc_w--;
292 crtc_h--; 291 crtc_h--;
293 292
294 /* 293 if (crtc_w != src_w || crtc_h != src_h)
295 * IVB workaround: must disable low power watermarks for at least
296 * one frame before enabling scaling. LP watermarks can be re-enabled
297 * when scaling is disabled.
298 */
299 if (crtc_w != src_w || crtc_h != src_h) {
300 dev_priv->sprite_scaling_enabled |= 1 << pipe;
301
302 if (!scaling_was_enabled) {
303 intel_update_watermarks(crtc);
304 intel_wait_for_vblank(dev, pipe);
305 }
306 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; 294 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
307 } else
308 dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
309 295
310 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); 296 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
311 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); 297 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
@@ -332,10 +318,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
332 I915_MODIFY_DISPBASE(SPRSURF(pipe), 318 I915_MODIFY_DISPBASE(SPRSURF(pipe),
333 i915_gem_obj_ggtt_offset(obj) + sprsurf_offset); 319 i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
334 POSTING_READ(SPRSURF(pipe)); 320 POSTING_READ(SPRSURF(pipe));
335
336 /* potentially re-enable LP watermarks */
337 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
338 intel_update_watermarks(crtc);
339} 321}
340 322
341static void 323static void
@@ -345,7 +327,6 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
345 struct drm_i915_private *dev_priv = dev->dev_private; 327 struct drm_i915_private *dev_priv = dev->dev_private;
346 struct intel_plane *intel_plane = to_intel_plane(plane); 328 struct intel_plane *intel_plane = to_intel_plane(plane);
347 int pipe = intel_plane->pipe; 329 int pipe = intel_plane->pipe;
348 bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
349 330
350 I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE); 331 I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
351 /* Can't leave the scaler enabled... */ 332 /* Can't leave the scaler enabled... */
@@ -355,13 +336,13 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
355 I915_MODIFY_DISPBASE(SPRSURF(pipe), 0); 336 I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
356 POSTING_READ(SPRSURF(pipe)); 337 POSTING_READ(SPRSURF(pipe));
357 338
358 dev_priv->sprite_scaling_enabled &= ~(1 << pipe); 339 /*
340 * Avoid underruns when disabling the sprite.
341 * FIXME remove once watermark updates are done properly.
342 */
343 intel_wait_for_vblank(dev, pipe);
359 344
360 intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false); 345 intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
361
362 /* potentially re-enable LP watermarks */
363 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
364 intel_update_watermarks(crtc);
365} 346}
366 347
367static int 348static int
@@ -488,7 +469,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
488 crtc_h--; 469 crtc_h--;
489 470
490 dvsscale = 0; 471 dvsscale = 0;
491 if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h) 472 if (crtc_w != src_w || crtc_h != src_h)
492 dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; 473 dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
493 474
494 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); 475 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
@@ -528,6 +509,12 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
528 I915_MODIFY_DISPBASE(DVSSURF(pipe), 0); 509 I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
529 POSTING_READ(DVSSURF(pipe)); 510 POSTING_READ(DVSSURF(pipe));
530 511
512 /*
513 * Avoid underruns when disabling the sprite.
514 * FIXME remove once watermark updates are done properly.
515 */
516 intel_wait_for_vblank(dev, pipe);
517
531 intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false); 518 intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
532} 519}
533 520
@@ -661,6 +648,15 @@ format_is_yuv(uint32_t format)
661 } 648 }
662} 649}
663 650
651static bool colorkey_enabled(struct intel_plane *intel_plane)
652{
653 struct drm_intel_sprite_colorkey key;
654
655 intel_plane->get_colorkey(&intel_plane->base, &key);
656
657 return key.flags != I915_SET_COLORKEY_NONE;
658}
659
664static int 660static int
665intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, 661intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
666 struct drm_framebuffer *fb, int crtc_x, int crtc_y, 662 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
@@ -846,7 +842,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
846 * If the sprite is completely covering the primary plane, 842 * If the sprite is completely covering the primary plane,
847 * we can disable the primary and save power. 843 * we can disable the primary and save power.
848 */ 844 */
849 disable_primary = drm_rect_equals(&dst, &clip); 845 disable_primary = drm_rect_equals(&dst, &clip) && !colorkey_enabled(intel_plane);
850 WARN_ON(disable_primary && !visible && intel_crtc->active); 846 WARN_ON(disable_primary && !visible && intel_crtc->active);
851 847
852 mutex_lock(&dev->struct_mutex); 848 mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c
index 48f06378d3f9..2ea5568b6cf5 100644
--- a/drivers/gpu/drm/nouveau/core/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/core/core/subdev.c
@@ -104,11 +104,8 @@ nouveau_subdev_create_(struct nouveau_object *parent,
104 104
105 if (parent) { 105 if (parent) {
106 struct nouveau_device *device = nv_device(parent); 106 struct nouveau_device *device = nv_device(parent);
107 int subidx = nv_hclass(subdev) & 0xff;
108
109 subdev->debug = nouveau_dbgopt(device->dbgopt, subname); 107 subdev->debug = nouveau_dbgopt(device->dbgopt, subname);
110 subdev->mmio = nv_subdev(device)->mmio; 108 subdev->mmio = nv_subdev(device)->mmio;
111 device->subdev[subidx] = *pobject;
112 } 109 }
113 110
114 return 0; 111 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 9135b25a29d0..dd01c6c435d6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -268,6 +268,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
268 if (ret) 268 if (ret)
269 return ret; 269 return ret;
270 270
271 device->subdev[i] = devobj->subdev[i];
272
271 /* note: can't init *any* subdevs until devinit has been run 273 /* note: can't init *any* subdevs until devinit has been run
272 * due to not knowing exactly what the vbios init tables will 274 * due to not knowing exactly what the vbios init tables will
273 * mess with. devinit also can't be run until all of its 275 * mess with. devinit also can't be run until all of its
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 8d06eef2b9ee..dbc5e33de94f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -161,7 +161,7 @@ nvc0_identify(struct nouveau_device *device)
161 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 161 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
162 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 162 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
163 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 163 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
164 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; 164 device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
165 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 165 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
167 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 167 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 434bb4b0fa2e..5c8a63dc506a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -334,7 +334,7 @@ nvc0_graph_mthd(struct nvc0_graph_priv *priv, struct nvc0_graph_mthd *mthds)
334 while ((mthd = &mthds[i++]) && (init = mthd->init)) { 334 while ((mthd = &mthds[i++]) && (init = mthd->init)) {
335 u32 addr = 0x80000000 | mthd->oclass; 335 u32 addr = 0x80000000 | mthd->oclass;
336 for (data = 0; init->count; init++) { 336 for (data = 0; init->count; init++) {
337 if (data != init->data) { 337 if (init == mthd->init || data != init->data) {
338 nv_wr32(priv, 0x40448c, init->data); 338 nv_wr32(priv, 0x40448c, init->data);
339 data = init->data; 339 data = init->data;
340 } 340 }
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 8541aa382ff2..d89dbdf39b0d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -75,6 +75,11 @@ struct nouveau_fb {
75static inline struct nouveau_fb * 75static inline struct nouveau_fb *
76nouveau_fb(void *obj) 76nouveau_fb(void *obj)
77{ 77{
78 /* fbram uses this before device subdev pointer is valid */
79 if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
80 nv_subidx(obj) == NVDEV_SUBDEV_FB)
81 return obj;
82
78 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB]; 83 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB];
79} 84}
80 85
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 9fa5da723871..7f50a858b16f 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -73,7 +73,7 @@ struct nouveau_i2c {
73 int (*identify)(struct nouveau_i2c *, int index, 73 int (*identify)(struct nouveau_i2c *, int index,
74 const char *what, struct nouveau_i2c_board_info *, 74 const char *what, struct nouveau_i2c_board_info *,
75 bool (*match)(struct nouveau_i2c_port *, 75 bool (*match)(struct nouveau_i2c_port *,
76 struct i2c_board_info *)); 76 struct i2c_board_info *, void *), void *);
77 struct list_head ports; 77 struct list_head ports;
78}; 78};
79 79
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
index ec7a54e91a08..4aca33887aaa 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
@@ -50,6 +50,13 @@ struct nouveau_instmem {
50static inline struct nouveau_instmem * 50static inline struct nouveau_instmem *
51nouveau_instmem(void *obj) 51nouveau_instmem(void *obj)
52{ 52{
53 /* nv04/nv40 impls need to create objects in their constructor,
54 * which is before the subdev pointer is valid
55 */
56 if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
57 nv_subidx(obj) == NVDEV_SUBDEV_INSTMEM)
58 return obj;
59
53 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM]; 60 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM];
54} 61}
55 62
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 420908cb82b6..df1b1b423093 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -365,13 +365,13 @@ static u16
365init_script(struct nouveau_bios *bios, int index) 365init_script(struct nouveau_bios *bios, int index)
366{ 366{
367 struct nvbios_init init = { .bios = bios }; 367 struct nvbios_init init = { .bios = bios };
368 u16 data; 368 u16 bmp_ver = bmp_version(bios), data;
369 369
370 if (bmp_version(bios) && bmp_version(bios) < 0x0510) { 370 if (bmp_ver && bmp_ver < 0x0510) {
371 if (index > 1) 371 if (index > 1 || bmp_ver < 0x0100)
372 return 0x0000; 372 return 0x0000;
373 373
374 data = bios->bmp_offset + (bios->version.major < 2 ? 14 : 18); 374 data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18);
375 return nv_ro16(bios, data + (index * 2)); 375 return nv_ro16(bios, data + (index * 2));
376 } 376 }
377 377
@@ -1294,7 +1294,11 @@ init_jump(struct nvbios_init *init)
1294 u16 offset = nv_ro16(bios, init->offset + 1); 1294 u16 offset = nv_ro16(bios, init->offset + 1);
1295 1295
1296 trace("JUMP\t0x%04x\n", offset); 1296 trace("JUMP\t0x%04x\n", offset);
1297 init->offset = offset; 1297
1298 if (init_exec(init))
1299 init->offset = offset;
1300 else
1301 init->offset += 3;
1298} 1302}
1299 1303
1300/** 1304/**
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 041fd5edaebf..c33c03d2f4af 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -197,7 +197,7 @@ static int
197nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what, 197nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
198 struct nouveau_i2c_board_info *info, 198 struct nouveau_i2c_board_info *info,
199 bool (*match)(struct nouveau_i2c_port *, 199 bool (*match)(struct nouveau_i2c_port *,
200 struct i2c_board_info *)) 200 struct i2c_board_info *, void *), void *data)
201{ 201{
202 struct nouveau_i2c_port *port = nouveau_i2c_find(i2c, index); 202 struct nouveau_i2c_port *port = nouveau_i2c_find(i2c, index);
203 int i; 203 int i;
@@ -221,7 +221,7 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
221 } 221 }
222 222
223 if (nv_probe_i2c(port, info[i].dev.addr) && 223 if (nv_probe_i2c(port, info[i].dev.addr) &&
224 (!match || match(port, &info[i].dev))) { 224 (!match || match(port, &info[i].dev, data))) {
225 nv_info(i2c, "detected %s: %s\n", what, 225 nv_info(i2c, "detected %s: %s\n", what,
226 info[i].dev.type); 226 info[i].dev.type);
227 return i; 227 return i;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
index e44ed7b93c6d..7610fc5f8fa2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -29,9 +29,9 @@
29 29
30static bool 30static bool
31probe_monitoring_device(struct nouveau_i2c_port *i2c, 31probe_monitoring_device(struct nouveau_i2c_port *i2c,
32 struct i2c_board_info *info) 32 struct i2c_board_info *info, void *data)
33{ 33{
34 struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c); 34 struct nouveau_therm_priv *priv = data;
35 struct nvbios_therm_sensor *sensor = &priv->bios_sensor; 35 struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
36 struct i2c_client *client; 36 struct i2c_client *client;
37 37
@@ -96,7 +96,7 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
96 }; 96 };
97 97
98 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", 98 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
99 board, probe_monitoring_device); 99 board, probe_monitoring_device, therm);
100 if (priv->ic) 100 if (priv->ic)
101 return; 101 return;
102 } 102 }
@@ -108,7 +108,7 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
108 }; 108 };
109 109
110 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", 110 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
111 board, probe_monitoring_device); 111 board, probe_monitoring_device, therm);
112 if (priv->ic) 112 if (priv->ic)
113 return; 113 return;
114 } 114 }
@@ -117,5 +117,5 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
117 device. Let's try our static list. 117 device. Let's try our static list.
118 */ 118 */
119 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", 119 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
120 nv_board_infos, probe_monitoring_device); 120 nv_board_infos, probe_monitoring_device, therm);
121} 121}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 936a71c59080..7fdc51e2a571 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -643,7 +643,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
643 get_tmds_slave(encoder)) 643 get_tmds_slave(encoder))
644 return; 644 return;
645 645
646 type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL); 646 type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL, NULL);
647 if (type < 0) 647 if (type < 0)
648 return; 648 return;
649 649
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index cc4b208ce546..244822df8ffc 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -59,7 +59,7 @@ int nv04_tv_identify(struct drm_device *dev, int i2c_index)
59 struct nouveau_i2c *i2c = nouveau_i2c(drm->device); 59 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
60 60
61 return i2c->identify(i2c, i2c_index, "TV encoder", 61 return i2c->identify(i2c, i2c_index, "TV encoder",
62 nv04_tv_encoder_info, NULL); 62 nv04_tv_encoder_info, NULL, NULL);
63} 63}
64 64
65 65
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 6828d81ed7b9..900fae01793e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -447,6 +447,8 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
447 if (ret) 447 if (ret)
448 goto done; 448 goto done;
449 449
450 info->offset = ntfy->node->offset;
451
450done: 452done:
451 if (ret) 453 if (ret)
452 nouveau_abi16_ntfy_fini(chan, ntfy); 454 nouveau_abi16_ntfy_fini(chan, ntfy);
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 95c740454049..ba0183fb84f3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -51,6 +51,7 @@ static struct nouveau_dsm_priv {
51 bool dsm_detected; 51 bool dsm_detected;
52 bool optimus_detected; 52 bool optimus_detected;
53 acpi_handle dhandle; 53 acpi_handle dhandle;
54 acpi_handle other_handle;
54 acpi_handle rom_handle; 55 acpi_handle rom_handle;
55} nouveau_dsm_priv; 56} nouveau_dsm_priv;
56 57
@@ -260,9 +261,10 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
260 if (!dhandle) 261 if (!dhandle)
261 return false; 262 return false;
262 263
263 if (!acpi_has_method(dhandle, "_DSM")) 264 if (!acpi_has_method(dhandle, "_DSM")) {
265 nouveau_dsm_priv.other_handle = dhandle;
264 return false; 266 return false;
265 267 }
266 if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER)) 268 if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
267 retval |= NOUVEAU_DSM_HAS_MUX; 269 retval |= NOUVEAU_DSM_HAS_MUX;
268 270
@@ -338,6 +340,16 @@ static bool nouveau_dsm_detect(void)
338 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", 340 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
339 acpi_method_name); 341 acpi_method_name);
340 nouveau_dsm_priv.dsm_detected = true; 342 nouveau_dsm_priv.dsm_detected = true;
343 /*
344 * On some systems hotplug events are generated for the device
345 * being switched off when _DSM is executed. They cause ACPI
346 * hotplug to trigger and attempt to remove the device from
347 * the system, which causes it to break down. Prevent that from
348 * happening by setting the no_hotplug flag for the involved
349 * ACPI device objects.
350 */
351 acpi_bus_no_hotplug(nouveau_dsm_priv.dhandle);
352 acpi_bus_no_hotplug(nouveau_dsm_priv.other_handle);
341 ret = true; 353 ret = true;
342 } 354 }
343 355
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 29c3efdfc7dd..25ea82f8def3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -610,7 +610,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
610 ret = nouveau_fence_sync(fence, chan); 610 ret = nouveau_fence_sync(fence, chan);
611 nouveau_fence_unref(&fence); 611 nouveau_fence_unref(&fence);
612 if (ret) 612 if (ret)
613 return ret; 613 goto fail_free;
614 614
615 if (new_bo != old_bo) { 615 if (new_bo != old_bo) {
616 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); 616 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 7a3759f1c41a..98a22e6e27a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -858,6 +858,12 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
858 if (nouveau_runtime_pm == 0) 858 if (nouveau_runtime_pm == 0)
859 return -EINVAL; 859 return -EINVAL;
860 860
861 /* are we optimus enabled? */
862 if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
863 DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
864 return -EINVAL;
865 }
866
861 nv_debug_level(SILENT); 867 nv_debug_level(SILENT);
862 drm_kms_helper_poll_disable(drm_dev); 868 drm_kms_helper_poll_disable(drm_dev);
863 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); 869 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 80a20120e625..0b9621c9aeea 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1143,31 +1143,53 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1143 } 1143 }
1144 1144
1145 if (tiling_flags & RADEON_TILING_MACRO) { 1145 if (tiling_flags & RADEON_TILING_MACRO) {
1146 if (rdev->family >= CHIP_BONAIRE) 1146 evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
1147 tmp = rdev->config.cik.tile_config;
1148 else if (rdev->family >= CHIP_TAHITI)
1149 tmp = rdev->config.si.tile_config;
1150 else if (rdev->family >= CHIP_CAYMAN)
1151 tmp = rdev->config.cayman.tile_config;
1152 else
1153 tmp = rdev->config.evergreen.tile_config;
1154 1147
1155 switch ((tmp & 0xf0) >> 4) { 1148 /* Set NUM_BANKS. */
1156 case 0: /* 4 banks */ 1149 if (rdev->family >= CHIP_BONAIRE) {
1157 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK); 1150 unsigned tileb, index, num_banks, tile_split_bytes;
1158 break; 1151
1159 case 1: /* 8 banks */ 1152 /* Calculate the macrotile mode index. */
1160 default: 1153 tile_split_bytes = 64 << tile_split;
1161 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK); 1154 tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
1162 break; 1155 tileb = min(tile_split_bytes, tileb);
1163 case 2: /* 16 banks */ 1156
1164 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK); 1157 for (index = 0; tileb > 64; index++) {
1165 break; 1158 tileb >>= 1;
1159 }
1160
1161 if (index >= 16) {
1162 DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
1163 target_fb->bits_per_pixel, tile_split);
1164 return -EINVAL;
1165 }
1166
1167 num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
1168 fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
1169 } else {
1170 /* SI and older. */
1171 if (rdev->family >= CHIP_TAHITI)
1172 tmp = rdev->config.si.tile_config;
1173 else if (rdev->family >= CHIP_CAYMAN)
1174 tmp = rdev->config.cayman.tile_config;
1175 else
1176 tmp = rdev->config.evergreen.tile_config;
1177
1178 switch ((tmp & 0xf0) >> 4) {
1179 case 0: /* 4 banks */
1180 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
1181 break;
1182 case 1: /* 8 banks */
1183 default:
1184 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
1185 break;
1186 case 2: /* 16 banks */
1187 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
1188 break;
1189 }
1166 } 1190 }
1167 1191
1168 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); 1192 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
1169
1170 evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
1171 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split); 1193 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
1172 fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw); 1194 fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
1173 fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh); 1195 fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
@@ -1180,23 +1202,18 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1180 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); 1202 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
1181 1203
1182 if (rdev->family >= CHIP_BONAIRE) { 1204 if (rdev->family >= CHIP_BONAIRE) {
1183 u32 num_pipe_configs = rdev->config.cik.max_tile_pipes; 1205 /* Read the pipe config from the 2D TILED SCANOUT mode.
1184 u32 num_rb = rdev->config.cik.max_backends_per_se; 1206 * It should be the same for the other modes too, but not all
1185 if (num_pipe_configs > 8) 1207 * modes set the pipe config field. */
1186 num_pipe_configs = 8; 1208 u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f;
1187 if (num_pipe_configs == 8) 1209
1188 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P8_32x32_16x16); 1210 fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config);
1189 else if (num_pipe_configs == 4) {
1190 if (num_rb == 4)
1191 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_16x16);
1192 else if (num_rb < 4)
1193 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_8x16);
1194 } else if (num_pipe_configs == 2)
1195 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P2);
1196 } else if ((rdev->family == CHIP_TAHITI) || 1211 } else if ((rdev->family == CHIP_TAHITI) ||
1197 (rdev->family == CHIP_PITCAIRN)) 1212 (rdev->family == CHIP_PITCAIRN))
1198 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16); 1213 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
1199 else if (rdev->family == CHIP_VERDE) 1214 else if ((rdev->family == CHIP_VERDE) ||
1215 (rdev->family == CHIP_OLAND) ||
1216 (rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */
1200 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16); 1217 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
1201 1218
1202 switch (radeon_crtc->crtc_id) { 1219 switch (radeon_crtc->crtc_id) {
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 9e50dd5d0e42..e7f6334138a1 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3057,7 +3057,7 @@ static u32 cik_create_bitmask(u32 bit_width)
3057 * Returns the disabled RB bitmask. 3057 * Returns the disabled RB bitmask.
3058 */ 3058 */
3059static u32 cik_get_rb_disabled(struct radeon_device *rdev, 3059static u32 cik_get_rb_disabled(struct radeon_device *rdev,
3060 u32 max_rb_num, u32 se_num, 3060 u32 max_rb_num_per_se,
3061 u32 sh_per_se) 3061 u32 sh_per_se)
3062{ 3062{
3063 u32 data, mask; 3063 u32 data, mask;
@@ -3071,7 +3071,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
3071 3071
3072 data >>= BACKEND_DISABLE_SHIFT; 3072 data >>= BACKEND_DISABLE_SHIFT;
3073 3073
3074 mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se); 3074 mask = cik_create_bitmask(max_rb_num_per_se / sh_per_se);
3075 3075
3076 return data & mask; 3076 return data & mask;
3077} 3077}
@@ -3088,7 +3088,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
3088 */ 3088 */
3089static void cik_setup_rb(struct radeon_device *rdev, 3089static void cik_setup_rb(struct radeon_device *rdev,
3090 u32 se_num, u32 sh_per_se, 3090 u32 se_num, u32 sh_per_se,
3091 u32 max_rb_num) 3091 u32 max_rb_num_per_se)
3092{ 3092{
3093 int i, j; 3093 int i, j;
3094 u32 data, mask; 3094 u32 data, mask;
@@ -3098,7 +3098,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
3098 for (i = 0; i < se_num; i++) { 3098 for (i = 0; i < se_num; i++) {
3099 for (j = 0; j < sh_per_se; j++) { 3099 for (j = 0; j < sh_per_se; j++) {
3100 cik_select_se_sh(rdev, i, j); 3100 cik_select_se_sh(rdev, i, j);
3101 data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); 3101 data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
3102 if (rdev->family == CHIP_HAWAII) 3102 if (rdev->family == CHIP_HAWAII)
3103 disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH); 3103 disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
3104 else 3104 else
@@ -3108,12 +3108,14 @@ static void cik_setup_rb(struct radeon_device *rdev,
3108 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 3108 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3109 3109
3110 mask = 1; 3110 mask = 1;
3111 for (i = 0; i < max_rb_num; i++) { 3111 for (i = 0; i < max_rb_num_per_se * se_num; i++) {
3112 if (!(disabled_rbs & mask)) 3112 if (!(disabled_rbs & mask))
3113 enabled_rbs |= mask; 3113 enabled_rbs |= mask;
3114 mask <<= 1; 3114 mask <<= 1;
3115 } 3115 }
3116 3116
3117 rdev->config.cik.backend_enable_mask = enabled_rbs;
3118
3117 for (i = 0; i < se_num; i++) { 3119 for (i = 0; i < se_num; i++) {
3118 cik_select_se_sh(rdev, i, 0xffffffff); 3120 cik_select_se_sh(rdev, i, 0xffffffff);
3119 data = 0; 3121 data = 0;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index f0f9e1089409..af520d4d362b 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -527,7 +527,7 @@ int cik_copy_dma(struct radeon_device *rdev,
527 radeon_ring_write(ring, 0); /* src/dst endian swap */ 527 radeon_ring_write(ring, 0); /* src/dst endian swap */
528 radeon_ring_write(ring, src_offset & 0xffffffff); 528 radeon_ring_write(ring, src_offset & 0xffffffff);
529 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff); 529 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
530 radeon_ring_write(ring, dst_offset & 0xfffffffc); 530 radeon_ring_write(ring, dst_offset & 0xffffffff);
531 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff); 531 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
532 src_offset += cur_size_in_bytes; 532 src_offset += cur_size_in_bytes;
533 dst_offset += cur_size_in_bytes; 533 dst_offset += cur_size_in_bytes;
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index de86493cbc44..713a5d359901 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -174,7 +174,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
174 } 174 }
175 175
176 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); 176 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
177 if (sad_count < 0) { 177 if (sad_count <= 0) {
178 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 178 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
179 return; 179 return;
180 } 180 }
@@ -235,7 +235,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
235 } 235 }
236 236
237 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); 237 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
238 if (sad_count < 0) { 238 if (sad_count <= 0) {
239 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 239 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
240 return; 240 return;
241 } 241 }
@@ -308,7 +308,9 @@ int dce6_audio_init(struct radeon_device *rdev)
308 rdev->audio.enabled = true; 308 rdev->audio.enabled = true;
309 309
310 if (ASIC_IS_DCE8(rdev)) 310 if (ASIC_IS_DCE8(rdev))
311 rdev->audio.num_pins = 7; 311 rdev->audio.num_pins = 6;
312 else if (ASIC_IS_DCE61(rdev))
313 rdev->audio.num_pins = 4;
312 else 314 else
313 rdev->audio.num_pins = 6; 315 rdev->audio.num_pins = 6;
314 316
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index aa695c4feb3d..0c6d5cef4cf1 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -118,7 +118,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
118 } 118 }
119 119
120 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); 120 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
121 if (sad_count < 0) { 121 if (sad_count <= 0) {
122 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 122 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
123 return; 123 return;
124 } 124 }
@@ -173,7 +173,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
173 } 173 }
174 174
175 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); 175 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
176 if (sad_count < 0) { 176 if (sad_count <= 0) {
177 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 177 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
178 return; 178 return;
179 } 179 }
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 9f11a55962b5..af45b23675ee 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -896,6 +896,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
896 (rdev->pdev->device == 0x999C)) { 896 (rdev->pdev->device == 0x999C)) {
897 rdev->config.cayman.max_simds_per_se = 6; 897 rdev->config.cayman.max_simds_per_se = 6;
898 rdev->config.cayman.max_backends_per_se = 2; 898 rdev->config.cayman.max_backends_per_se = 2;
899 rdev->config.cayman.max_hw_contexts = 8;
900 rdev->config.cayman.sx_max_export_size = 256;
901 rdev->config.cayman.sx_max_export_pos_size = 64;
902 rdev->config.cayman.sx_max_export_smx_size = 192;
899 } else if ((rdev->pdev->device == 0x9903) || 903 } else if ((rdev->pdev->device == 0x9903) ||
900 (rdev->pdev->device == 0x9904) || 904 (rdev->pdev->device == 0x9904) ||
901 (rdev->pdev->device == 0x990A) || 905 (rdev->pdev->device == 0x990A) ||
@@ -906,6 +910,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
906 (rdev->pdev->device == 0x999D)) { 910 (rdev->pdev->device == 0x999D)) {
907 rdev->config.cayman.max_simds_per_se = 4; 911 rdev->config.cayman.max_simds_per_se = 4;
908 rdev->config.cayman.max_backends_per_se = 2; 912 rdev->config.cayman.max_backends_per_se = 2;
913 rdev->config.cayman.max_hw_contexts = 8;
914 rdev->config.cayman.sx_max_export_size = 256;
915 rdev->config.cayman.sx_max_export_pos_size = 64;
916 rdev->config.cayman.sx_max_export_smx_size = 192;
909 } else if ((rdev->pdev->device == 0x9919) || 917 } else if ((rdev->pdev->device == 0x9919) ||
910 (rdev->pdev->device == 0x9990) || 918 (rdev->pdev->device == 0x9990) ||
911 (rdev->pdev->device == 0x9991) || 919 (rdev->pdev->device == 0x9991) ||
@@ -916,9 +924,17 @@ static void cayman_gpu_init(struct radeon_device *rdev)
916 (rdev->pdev->device == 0x99A0)) { 924 (rdev->pdev->device == 0x99A0)) {
917 rdev->config.cayman.max_simds_per_se = 3; 925 rdev->config.cayman.max_simds_per_se = 3;
918 rdev->config.cayman.max_backends_per_se = 1; 926 rdev->config.cayman.max_backends_per_se = 1;
927 rdev->config.cayman.max_hw_contexts = 4;
928 rdev->config.cayman.sx_max_export_size = 128;
929 rdev->config.cayman.sx_max_export_pos_size = 32;
930 rdev->config.cayman.sx_max_export_smx_size = 96;
919 } else { 931 } else {
920 rdev->config.cayman.max_simds_per_se = 2; 932 rdev->config.cayman.max_simds_per_se = 2;
921 rdev->config.cayman.max_backends_per_se = 1; 933 rdev->config.cayman.max_backends_per_se = 1;
934 rdev->config.cayman.max_hw_contexts = 4;
935 rdev->config.cayman.sx_max_export_size = 128;
936 rdev->config.cayman.sx_max_export_pos_size = 32;
937 rdev->config.cayman.sx_max_export_smx_size = 96;
922 } 938 }
923 rdev->config.cayman.max_texture_channel_caches = 2; 939 rdev->config.cayman.max_texture_channel_caches = 2;
924 rdev->config.cayman.max_gprs = 256; 940 rdev->config.cayman.max_gprs = 256;
@@ -926,10 +942,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
926 rdev->config.cayman.max_gs_threads = 32; 942 rdev->config.cayman.max_gs_threads = 32;
927 rdev->config.cayman.max_stack_entries = 512; 943 rdev->config.cayman.max_stack_entries = 512;
928 rdev->config.cayman.sx_num_of_sets = 8; 944 rdev->config.cayman.sx_num_of_sets = 8;
929 rdev->config.cayman.sx_max_export_size = 256;
930 rdev->config.cayman.sx_max_export_pos_size = 64;
931 rdev->config.cayman.sx_max_export_smx_size = 192;
932 rdev->config.cayman.max_hw_contexts = 8;
933 rdev->config.cayman.sq_num_cf_insts = 2; 945 rdev->config.cayman.sq_num_cf_insts = 2;
934 946
935 rdev->config.cayman.sc_prim_fifo_size = 0x40; 947 rdev->config.cayman.sc_prim_fifo_size = 0x40;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 746c0c6c269b..c5519ca4bbc4 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1949,7 +1949,7 @@ struct si_asic {
1949 unsigned sc_earlyz_tile_fifo_size; 1949 unsigned sc_earlyz_tile_fifo_size;
1950 1950
1951 unsigned num_tile_pipes; 1951 unsigned num_tile_pipes;
1952 unsigned num_backends_per_se; 1952 unsigned backend_enable_mask;
1953 unsigned backend_disable_mask_per_asic; 1953 unsigned backend_disable_mask_per_asic;
1954 unsigned backend_map; 1954 unsigned backend_map;
1955 unsigned num_texture_channel_caches; 1955 unsigned num_texture_channel_caches;
@@ -1979,7 +1979,7 @@ struct cik_asic {
1979 unsigned sc_earlyz_tile_fifo_size; 1979 unsigned sc_earlyz_tile_fifo_size;
1980 1980
1981 unsigned num_tile_pipes; 1981 unsigned num_tile_pipes;
1982 unsigned num_backends_per_se; 1982 unsigned backend_enable_mask;
1983 unsigned backend_disable_mask_per_asic; 1983 unsigned backend_disable_mask_per_asic;
1984 unsigned backend_map; 1984 unsigned backend_map;
1985 unsigned num_texture_channel_caches; 1985 unsigned num_texture_channel_caches;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index f55879dd11c6..f74db43346fd 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2030,7 +2030,7 @@ static struct radeon_asic ci_asic = {
2030 .hdmi_setmode = &evergreen_hdmi_setmode, 2030 .hdmi_setmode = &evergreen_hdmi_setmode,
2031 }, 2031 },
2032 .copy = { 2032 .copy = {
2033 .blit = NULL, 2033 .blit = &cik_copy_cpdma,
2034 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 2034 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2035 .dma = &cik_copy_dma, 2035 .dma = &cik_copy_dma,
2036 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 2036 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -2132,7 +2132,7 @@ static struct radeon_asic kv_asic = {
2132 .hdmi_setmode = &evergreen_hdmi_setmode, 2132 .hdmi_setmode = &evergreen_hdmi_setmode,
2133 }, 2133 },
2134 .copy = { 2134 .copy = {
2135 .blit = NULL, 2135 .blit = &cik_copy_cpdma,
2136 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 2136 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2137 .dma = &cik_copy_dma, 2137 .dma = &cik_copy_dma,
2138 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 2138 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 9d302eaeea15..485848f889f5 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -33,6 +33,7 @@ static struct radeon_atpx_priv {
33 bool atpx_detected; 33 bool atpx_detected;
34 /* handle for device - and atpx */ 34 /* handle for device - and atpx */
35 acpi_handle dhandle; 35 acpi_handle dhandle;
36 acpi_handle other_handle;
36 struct radeon_atpx atpx; 37 struct radeon_atpx atpx;
37} radeon_atpx_priv; 38} radeon_atpx_priv;
38 39
@@ -451,9 +452,10 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
451 return false; 452 return false;
452 453
453 status = acpi_get_handle(dhandle, "ATPX", &atpx_handle); 454 status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
454 if (ACPI_FAILURE(status)) 455 if (ACPI_FAILURE(status)) {
456 radeon_atpx_priv.other_handle = dhandle;
455 return false; 457 return false;
456 458 }
457 radeon_atpx_priv.dhandle = dhandle; 459 radeon_atpx_priv.dhandle = dhandle;
458 radeon_atpx_priv.atpx.handle = atpx_handle; 460 radeon_atpx_priv.atpx.handle = atpx_handle;
459 return true; 461 return true;
@@ -530,6 +532,16 @@ static bool radeon_atpx_detect(void)
530 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", 532 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
531 acpi_method_name); 533 acpi_method_name);
532 radeon_atpx_priv.atpx_detected = true; 534 radeon_atpx_priv.atpx_detected = true;
535 /*
536 * On some systems hotplug events are generated for the device
537 * being switched off when ATPX is executed. They cause ACPI
538 * hotplug to trigger and attempt to remove the device from
539 * the system, which causes it to break down. Prevent that from
540 * happening by setting the no_hotplug flag for the involved
541 * ACPI device objects.
542 */
543 acpi_bus_no_hotplug(radeon_atpx_priv.dhandle);
544 acpi_bus_no_hotplug(radeon_atpx_priv.other_handle);
533 return true; 545 return true;
534 } 546 }
535 return false; 547 return false;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e91d548063ef..67fadcf4590f 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -77,9 +77,10 @@
77 * 2.33.0 - Add SI tiling mode array query 77 * 2.33.0 - Add SI tiling mode array query
78 * 2.34.0 - Add CIK tiling mode array query 78 * 2.34.0 - Add CIK tiling mode array query
79 * 2.35.0 - Add CIK macrotile mode array query 79 * 2.35.0 - Add CIK macrotile mode array query
80 * 2.36.0 - Fix CIK DCE tiling setup
80 */ 81 */
81#define KMS_DRIVER_MAJOR 2 82#define KMS_DRIVER_MAJOR 2
82#define KMS_DRIVER_MINOR 35 83#define KMS_DRIVER_MINOR 36
83#define KMS_DRIVER_PATCHLEVEL 0 84#define KMS_DRIVER_PATCHLEVEL 0
84int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 85int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
85int radeon_driver_unload_kms(struct drm_device *dev); 86int radeon_driver_unload_kms(struct drm_device *dev);
@@ -512,15 +513,6 @@ static const struct file_operations radeon_driver_kms_fops = {
512#endif 513#endif
513}; 514};
514 515
515
516static void
517radeon_pci_shutdown(struct pci_dev *pdev)
518{
519 struct drm_device *dev = pci_get_drvdata(pdev);
520
521 radeon_driver_unload_kms(dev);
522}
523
524static struct drm_driver kms_driver = { 516static struct drm_driver kms_driver = {
525 .driver_features = 517 .driver_features =
526 DRIVER_USE_AGP | 518 DRIVER_USE_AGP |
@@ -590,7 +582,6 @@ static struct pci_driver radeon_kms_pci_driver = {
590 .probe = radeon_pci_probe, 582 .probe = radeon_pci_probe,
591 .remove = radeon_pci_remove, 583 .remove = radeon_pci_remove,
592 .driver.pm = &radeon_pm_ops, 584 .driver.pm = &radeon_pm_ops,
593 .shutdown = radeon_pci_shutdown,
594}; 585};
595 586
596static int __init radeon_init(void) 587static int __init radeon_init(void)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index c44574e248d1..5bf50cec017e 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -461,6 +461,15 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
461 case RADEON_INFO_SI_CP_DMA_COMPUTE: 461 case RADEON_INFO_SI_CP_DMA_COMPUTE:
462 *value = 1; 462 *value = 1;
463 break; 463 break;
464 case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
465 if (rdev->family >= CHIP_BONAIRE) {
466 *value = rdev->config.cik.backend_enable_mask;
467 } else if (rdev->family >= CHIP_TAHITI) {
468 *value = rdev->config.si.backend_enable_mask;
469 } else {
470 DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
471 }
472 break;
464 default: 473 default:
465 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 474 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
466 return -EINVAL; 475 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 373d088bac66..b9c0529b4a2e 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -473,7 +473,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
473 return -EINVAL; 473 return -EINVAL;
474 } 474 }
475 475
476 if ((start >> 28) != (end >> 28)) { 476 if ((start >> 28) != ((end - 1) >> 28)) {
477 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", 477 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
478 start, end); 478 start, end);
479 return -EINVAL; 479 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index e461b45f29a9..35950738bd5e 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -162,6 +162,16 @@ static void rs690_mc_init(struct radeon_device *rdev)
162 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); 162 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
163 base = G_000100_MC_FB_START(base) << 16; 163 base = G_000100_MC_FB_START(base) << 16;
164 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 164 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
165 /* Some boards seem to be configured for 128MB of sideport memory,
166 * but really only have 64MB. Just skip the sideport and use
167 * UMA memory.
168 */
169 if (rdev->mc.igp_sideport_enabled &&
170 (rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
171 base += 128 * 1024 * 1024;
172 rdev->mc.real_vram_size -= 128 * 1024 * 1024;
173 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
174 }
165 175
166 /* Use K8 direct mapping for fast fb access. */ 176 /* Use K8 direct mapping for fast fb access. */
167 rdev->fastfb_working = false; 177 rdev->fastfb_working = false;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 4aaeb118a3ff..b95267846ff2 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2335,6 +2335,12 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
2335 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, 2335 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
2336 ASIC_INTERNAL_MEMORY_SS, 0); 2336 ASIC_INTERNAL_MEMORY_SS, 0);
2337 2337
2338 /* disable ss, causes hangs on some cayman boards */
2339 if (rdev->family == CHIP_CAYMAN) {
2340 pi->sclk_ss = false;
2341 pi->mclk_ss = false;
2342 }
2343
2338 if (pi->sclk_ss || pi->mclk_ss) 2344 if (pi->sclk_ss || pi->mclk_ss)
2339 pi->dynamic_ss = true; 2345 pi->dynamic_ss = true;
2340 else 2346 else
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 626163ef483d..22d3517ed6ad 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2813,7 +2813,7 @@ static void si_setup_spi(struct radeon_device *rdev,
2813} 2813}
2814 2814
2815static u32 si_get_rb_disabled(struct radeon_device *rdev, 2815static u32 si_get_rb_disabled(struct radeon_device *rdev,
2816 u32 max_rb_num, u32 se_num, 2816 u32 max_rb_num_per_se,
2817 u32 sh_per_se) 2817 u32 sh_per_se)
2818{ 2818{
2819 u32 data, mask; 2819 u32 data, mask;
@@ -2827,14 +2827,14 @@ static u32 si_get_rb_disabled(struct radeon_device *rdev,
2827 2827
2828 data >>= BACKEND_DISABLE_SHIFT; 2828 data >>= BACKEND_DISABLE_SHIFT;
2829 2829
2830 mask = si_create_bitmask(max_rb_num / se_num / sh_per_se); 2830 mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
2831 2831
2832 return data & mask; 2832 return data & mask;
2833} 2833}
2834 2834
2835static void si_setup_rb(struct radeon_device *rdev, 2835static void si_setup_rb(struct radeon_device *rdev,
2836 u32 se_num, u32 sh_per_se, 2836 u32 se_num, u32 sh_per_se,
2837 u32 max_rb_num) 2837 u32 max_rb_num_per_se)
2838{ 2838{
2839 int i, j; 2839 int i, j;
2840 u32 data, mask; 2840 u32 data, mask;
@@ -2844,19 +2844,21 @@ static void si_setup_rb(struct radeon_device *rdev,
2844 for (i = 0; i < se_num; i++) { 2844 for (i = 0; i < se_num; i++) {
2845 for (j = 0; j < sh_per_se; j++) { 2845 for (j = 0; j < sh_per_se; j++) {
2846 si_select_se_sh(rdev, i, j); 2846 si_select_se_sh(rdev, i, j);
2847 data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); 2847 data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
2848 disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH); 2848 disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
2849 } 2849 }
2850 } 2850 }
2851 si_select_se_sh(rdev, 0xffffffff, 0xffffffff); 2851 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
2852 2852
2853 mask = 1; 2853 mask = 1;
2854 for (i = 0; i < max_rb_num; i++) { 2854 for (i = 0; i < max_rb_num_per_se * se_num; i++) {
2855 if (!(disabled_rbs & mask)) 2855 if (!(disabled_rbs & mask))
2856 enabled_rbs |= mask; 2856 enabled_rbs |= mask;
2857 mask <<= 1; 2857 mask <<= 1;
2858 } 2858 }
2859 2859
2860 rdev->config.si.backend_enable_mask = enabled_rbs;
2861
2860 for (i = 0; i < se_num; i++) { 2862 for (i = 0; i < se_num; i++) {
2861 si_select_se_sh(rdev, i, 0xffffffff); 2863 si_select_se_sh(rdev, i, 0xffffffff);
2862 data = 0; 2864 data = 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 145f54f17b85..1df856f78568 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -353,7 +353,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
353 * Don't move nonexistent data. Clear destination instead. 353 * Don't move nonexistent data. Clear destination instead.
354 */ 354 */
355 if (old_iomap == NULL && 355 if (old_iomap == NULL &&
356 (ttm == NULL || ttm->state == tt_unpopulated)) { 356 (ttm == NULL || (ttm->state == tt_unpopulated &&
357 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
357 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); 358 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
358 goto out2; 359 goto out2;
359 } 360 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index cfcdf5b5440a..801231c9ae48 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -178,9 +178,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
178 } 178 }
179 179
180 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + 180 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
181 drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; 181 vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
182 page_last = vma_pages(vma) + 182 page_last = vma_pages(vma) + vma->vm_pgoff -
183 drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; 183 drm_vma_node_start(&bo->vma_node);
184 184
185 if (unlikely(page_offset >= bo->num_pages)) { 185 if (unlikely(page_offset >= bo->num_pages)) {
186 retval = VM_FAULT_SIGBUS; 186 retval = VM_FAULT_SIGBUS;