aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2014-01-16 16:06:30 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-01-16 16:06:30 -0500
commit0d9d349d8788d30f3fc3bb39279c370f94d9dbec (patch)
tree874f301d180bd2a80dee68ec4caf79ff64f9bed9 /drivers/gpu/drm
parentcba1c07377132fb87b2c73b395ef386da7e03f60 (diff)
parent145830dfb005961cb507a578c9d2e7622f0b3716 (diff)
Merge commit origin/master into drm-intel-next
Conflicts are getting out of hand, and now we have to shuffle even more in -next which was also shuffled in -fixes (the call for drm_mode_config_reset needs to move yet again). So do a proper backmerge. I wanted to wait with this for the 3.13 relaese, but alas let's just do this now. Conflicts: drivers/gpu/drm/i915/i915_reg.h drivers/gpu/drm/i915/intel_ddi.c drivers/gpu/drm/i915/intel_display.c drivers/gpu/drm/i915/intel_pm.c Besides the conflict around the forcewake get/put (where we chaged the called function in -fixes and added a new parameter in -next) code all the current conflicts are of the adjacent lines changed type. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h1
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c7
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c20
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c7
-rw-r--r--drivers/gpu/drm/drm_edid.c12
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/drm_stub.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c20
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c41
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c88
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c8
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c13
-rw-r--r--drivers/gpu/drm/i915/intel_display.c45
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c34
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c24
-rw-r--r--drivers/gpu/drm/nouveau/Makefile1
-rw-r--r--drivers/gpu/drm/nouveau/core/core/subdev.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/instmem.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c445
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c42
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2
-rw-r--r--drivers/gpu/drm/qxl/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c87
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c15
-rw-r--r--drivers/gpu/drm/radeon/cik.c12
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c2
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c20
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c4
-rw-r--r--drivers/gpu/drm/radeon/ni.c20
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c28
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h33
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman4
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen4
-rw-r--r--drivers/gpu/drm/radeon/rs690.c10
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c23
-rw-r--r--drivers/gpu/drm/tegra/drm.c34
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/fb.c2
-rw-r--r--drivers/gpu/drm/tegra/rgb.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c6
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c118
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c5
92 files changed, 1202 insertions, 426 deletions
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index eef09ec9a5ff..a72cae03b99b 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -103,6 +103,7 @@ void armada_drm_queue_unref_work(struct drm_device *,
103extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs; 103extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
104 104
105int armada_fbdev_init(struct drm_device *); 105int armada_fbdev_init(struct drm_device *);
106void armada_fbdev_lastclose(struct drm_device *);
106void armada_fbdev_fini(struct drm_device *); 107void armada_fbdev_fini(struct drm_device *);
107 108
108int armada_overlay_plane_create(struct drm_device *, unsigned long); 109int armada_overlay_plane_create(struct drm_device *, unsigned long);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 4f2b28354915..62d0ff3efddf 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -321,6 +321,11 @@ static struct drm_ioctl_desc armada_ioctls[] = {
321 DRM_UNLOCKED), 321 DRM_UNLOCKED),
322}; 322};
323 323
324static void armada_drm_lastclose(struct drm_device *dev)
325{
326 armada_fbdev_lastclose(dev);
327}
328
324static const struct file_operations armada_drm_fops = { 329static const struct file_operations armada_drm_fops = {
325 .owner = THIS_MODULE, 330 .owner = THIS_MODULE,
326 .llseek = no_llseek, 331 .llseek = no_llseek,
@@ -337,7 +342,7 @@ static struct drm_driver armada_drm_driver = {
337 .open = NULL, 342 .open = NULL,
338 .preclose = NULL, 343 .preclose = NULL,
339 .postclose = NULL, 344 .postclose = NULL,
340 .lastclose = NULL, 345 .lastclose = armada_drm_lastclose,
341 .unload = armada_drm_unload, 346 .unload = armada_drm_unload,
342 .get_vblank_counter = drm_vblank_count, 347 .get_vblank_counter = drm_vblank_count,
343 .enable_vblank = armada_drm_enable_vblank, 348 .enable_vblank = armada_drm_enable_vblank,
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index dd5ea77dac96..948cb14c561e 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -105,9 +105,9 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
105 drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth); 105 drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
106 drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height); 106 drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
107 107
108 DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n", 108 DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n",
109 dfb->fb.width, dfb->fb.height, 109 dfb->fb.width, dfb->fb.height, dfb->fb.bits_per_pixel,
110 dfb->fb.bits_per_pixel, obj->phys_addr); 110 (unsigned long long)obj->phys_addr);
111 111
112 return 0; 112 return 0;
113 113
@@ -177,6 +177,16 @@ int armada_fbdev_init(struct drm_device *dev)
177 return ret; 177 return ret;
178} 178}
179 179
180void armada_fbdev_lastclose(struct drm_device *dev)
181{
182 struct armada_private *priv = dev->dev_private;
183
184 drm_modeset_lock_all(dev);
185 if (priv->fbdev)
186 drm_fb_helper_restore_fbdev_mode(priv->fbdev);
187 drm_modeset_unlock_all(dev);
188}
189
180void armada_fbdev_fini(struct drm_device *dev) 190void armada_fbdev_fini(struct drm_device *dev)
181{ 191{
182 struct armada_private *priv = dev->dev_private; 192 struct armada_private *priv = dev->dev_private;
@@ -192,11 +202,11 @@ void armada_fbdev_fini(struct drm_device *dev)
192 framebuffer_release(info); 202 framebuffer_release(info);
193 } 203 }
194 204
205 drm_fb_helper_fini(fbh);
206
195 if (fbh->fb) 207 if (fbh->fb)
196 fbh->fb->funcs->destroy(fbh->fb); 208 fbh->fb->funcs->destroy(fbh->fb);
197 209
198 drm_fb_helper_fini(fbh);
199
200 priv->fbdev = NULL; 210 priv->fbdev = NULL;
201 } 211 }
202} 212}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 9f2356bae7fd..887816f43476 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -172,8 +172,9 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
172 obj->dev_addr = obj->linear->start; 172 obj->dev_addr = obj->linear->start;
173 } 173 }
174 174
175 DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n", 175 DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
176 obj, obj->phys_addr, obj->dev_addr); 176 (unsigned long long)obj->phys_addr,
177 (unsigned long long)obj->dev_addr);
177 178
178 return 0; 179 return 0;
179} 180}
@@ -557,7 +558,6 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
557 * refcount on the gem object itself. 558 * refcount on the gem object itself.
558 */ 559 */
559 drm_gem_object_reference(obj); 560 drm_gem_object_reference(obj);
560 dma_buf_put(buf);
561 return obj; 561 return obj;
562 } 562 }
563 } 563 }
@@ -573,6 +573,7 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
573 } 573 }
574 574
575 dobj->obj.import_attach = attach; 575 dobj->obj.import_attach = attach;
576 get_dma_buf(buf);
576 577
577 /* 578 /*
578 * Don't call dma_buf_map_attachment() here - it maps the 579 * Don't call dma_buf_map_attachment() here - it maps the
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index fb7cf0e796f6..8835dcddfac3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -68,6 +68,8 @@
68#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) 68#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
69/* Force reduced-blanking timings for detailed modes */ 69/* Force reduced-blanking timings for detailed modes */
70#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7) 70#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
71/* Force 8bpc */
72#define EDID_QUIRK_FORCE_8BPC (1 << 8)
71 73
72struct detailed_mode_closure { 74struct detailed_mode_closure {
73 struct drm_connector *connector; 75 struct drm_connector *connector;
@@ -128,6 +130,9 @@ static struct edid_quirk {
128 130
129 /* Medion MD 30217 PG */ 131 /* Medion MD 30217 PG */
130 { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, 132 { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
133
134 /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
135 { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
131}; 136};
132 137
133/* 138/*
@@ -2674,7 +2679,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
2674 int modes = 0; 2679 int modes = 0;
2675 u8 cea_mode; 2680 u8 cea_mode;
2676 2681
2677 if (video_db == NULL || video_index > video_len) 2682 if (video_db == NULL || video_index >= video_len)
2678 return 0; 2683 return 0;
2679 2684
2680 /* CEA modes are numbered 1..127 */ 2685 /* CEA modes are numbered 1..127 */
@@ -2701,7 +2706,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
2701 if (structure & (1 << 8)) { 2706 if (structure & (1 << 8)) {
2702 newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); 2707 newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
2703 if (newmode) { 2708 if (newmode) {
2704 newmode->flags = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF; 2709 newmode->flags |= DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
2705 drm_mode_probed_add(connector, newmode); 2710 drm_mode_probed_add(connector, newmode);
2706 modes++; 2711 modes++;
2707 } 2712 }
@@ -3435,6 +3440,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
3435 3440
3436 drm_add_display_info(edid, &connector->display_info); 3441 drm_add_display_info(edid, &connector->display_info);
3437 3442
3443 if (quirks & EDID_QUIRK_FORCE_8BPC)
3444 connector->display_info.bpc = 8;
3445
3438 return num_modes; 3446 return num_modes;
3439} 3447}
3440EXPORT_SYMBOL(drm_add_edid_modes); 3448EXPORT_SYMBOL(drm_add_edid_modes);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 85071a1c4547..b0733153dfd2 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1041,7 +1041,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
1041 /* if equal delete the probed mode */ 1041 /* if equal delete the probed mode */
1042 mode->status = pmode->status; 1042 mode->status = pmode->status;
1043 /* Merge type bits together */ 1043 /* Merge type bits together */
1044 mode->type = pmode->type; 1044 mode->type |= pmode->type;
1045 list_del(&pmode->head); 1045 list_del(&pmode->head);
1046 drm_mode_destroy(connector->dev, pmode); 1046 drm_mode_destroy(connector->dev, pmode);
1047 break; 1047 break;
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index f53d5246979c..66dd3a001cf1 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -566,11 +566,11 @@ err_unload:
566 if (dev->driver->unload) 566 if (dev->driver->unload)
567 dev->driver->unload(dev); 567 dev->driver->unload(dev);
568err_primary_node: 568err_primary_node:
569 drm_put_minor(dev->primary); 569 drm_unplug_minor(dev->primary);
570err_render_node: 570err_render_node:
571 drm_put_minor(dev->render); 571 drm_unplug_minor(dev->render);
572err_control_node: 572err_control_node:
573 drm_put_minor(dev->control); 573 drm_unplug_minor(dev->control);
574err_agp: 574err_agp:
575 if (dev->driver->bus->agp_destroy) 575 if (dev->driver->bus->agp_destroy)
576 dev->driver->bus->agp_destroy(dev); 576 dev->driver->bus->agp_destroy(dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index b676006a95a0..22b8f5eced80 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -173,28 +173,37 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
173static void exynos_drm_preclose(struct drm_device *dev, 173static void exynos_drm_preclose(struct drm_device *dev,
174 struct drm_file *file) 174 struct drm_file *file)
175{ 175{
176 exynos_drm_subdrv_close(dev, file);
177}
178
179static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
180{
176 struct exynos_drm_private *private = dev->dev_private; 181 struct exynos_drm_private *private = dev->dev_private;
177 struct drm_pending_vblank_event *e, *t; 182 struct drm_pending_vblank_event *v, *vt;
183 struct drm_pending_event *e, *et;
178 unsigned long flags; 184 unsigned long flags;
179 185
180 /* release events of current file */ 186 if (!file->driver_priv)
187 return;
188
189 /* Release all events not unhandled by page flip handler. */
181 spin_lock_irqsave(&dev->event_lock, flags); 190 spin_lock_irqsave(&dev->event_lock, flags);
182 list_for_each_entry_safe(e, t, &private->pageflip_event_list, 191 list_for_each_entry_safe(v, vt, &private->pageflip_event_list,
183 base.link) { 192 base.link) {
184 if (e->base.file_priv == file) { 193 if (v->base.file_priv == file) {
185 list_del(&e->base.link); 194 list_del(&v->base.link);
186 e->base.destroy(&e->base); 195 drm_vblank_put(dev, v->pipe);
196 v->base.destroy(&v->base);
187 } 197 }
188 } 198 }
189 spin_unlock_irqrestore(&dev->event_lock, flags);
190 199
191 exynos_drm_subdrv_close(dev, file); 200 /* Release all events handled by page flip handler but not freed. */
192} 201 list_for_each_entry_safe(e, et, &file->event_list, link) {
202 list_del(&e->link);
203 e->destroy(e);
204 }
205 spin_unlock_irqrestore(&dev->event_lock, flags);
193 206
194static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
195{
196 if (!file->driver_priv)
197 return;
198 207
199 kfree(file->driver_priv); 208 kfree(file->driver_priv);
200 file->driver_priv = NULL; 209 file->driver_priv = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 23da72b5eae9..a61878bf5dcd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -31,7 +31,7 @@
31#include "exynos_drm_iommu.h" 31#include "exynos_drm_iommu.h"
32 32
33/* 33/*
34 * FIMD is stand for Fully Interactive Mobile Display and 34 * FIMD stands for Fully Interactive Mobile Display and
35 * as a display controller, it transfers contents drawn on memory 35 * as a display controller, it transfers contents drawn on memory
36 * to a LCD Panel through Display Interfaces such as RGB or 36 * to a LCD Panel through Display Interfaces such as RGB or
37 * CPU Interface. 37 * CPU Interface.
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 1a25f9eaca59..35542eaabe89 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -85,6 +85,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev)
85 drm_i915_private_t *dev_priv = dev->dev_private; 85 drm_i915_private_t *dev_priv = dev->dev_private;
86 struct drm_i915_master_private *master_priv; 86 struct drm_i915_master_private *master_priv;
87 87
88 /*
89 * The dri breadcrumb update races against the drm master disappearing.
90 * Instead of trying to fix this (this is by far not the only ums issue)
91 * just don't do the update in kms mode.
92 */
93 if (drm_core_check_feature(dev, DRIVER_MODESET))
94 return;
95
88 if (dev->primary->master) { 96 if (dev->primary->master) {
89 master_priv = dev->primary->master->driver_priv; 97 master_priv = dev->primary->master->driver_priv;
90 if (master_priv->sarea_priv) 98 if (master_priv->sarea_priv)
@@ -1492,16 +1500,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1492 spin_lock_init(&dev_priv->uncore.lock); 1500 spin_lock_init(&dev_priv->uncore.lock);
1493 spin_lock_init(&dev_priv->mm.object_stat_lock); 1501 spin_lock_init(&dev_priv->mm.object_stat_lock);
1494 mutex_init(&dev_priv->dpio_lock); 1502 mutex_init(&dev_priv->dpio_lock);
1495 mutex_init(&dev_priv->rps.hw_lock);
1496 mutex_init(&dev_priv->modeset_restore_lock); 1503 mutex_init(&dev_priv->modeset_restore_lock);
1497 1504
1498 mutex_init(&dev_priv->pc8.lock); 1505 intel_pm_setup(dev);
1499 dev_priv->pc8.requirements_met = false;
1500 dev_priv->pc8.gpu_idle = false;
1501 dev_priv->pc8.irqs_disabled = false;
1502 dev_priv->pc8.enabled = false;
1503 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
1504 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
1505 1506
1506 intel_display_crc_init(dev); 1507 intel_display_crc_init(dev);
1507 1508
@@ -1605,7 +1606,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1605 } 1606 }
1606 1607
1607 intel_irq_init(dev); 1608 intel_irq_init(dev);
1608 intel_pm_init(dev);
1609 intel_uncore_sanitize(dev); 1609 intel_uncore_sanitize(dev);
1610 1610
1611 /* Try to make sure MCHBAR is enabled before poking at it */ 1611 /* Try to make sure MCHBAR is enabled before poking at it */
@@ -1851,8 +1851,10 @@ void i915_driver_lastclose(struct drm_device * dev)
1851 1851
1852void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 1852void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1853{ 1853{
1854 mutex_lock(&dev->struct_mutex);
1854 i915_gem_context_close(dev, file_priv); 1855 i915_gem_context_close(dev, file_priv);
1855 i915_gem_release(dev, file_priv); 1856 i915_gem_release(dev, file_priv);
1857 mutex_unlock(&dev->struct_mutex);
1856} 1858}
1857 1859
1858void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 1860void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 31ffe39d2b79..bb27f0dde03d 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -540,8 +540,10 @@ static int i915_drm_freeze(struct drm_device *dev)
540 * Disable CRTCs directly since we want to preserve sw state 540 * Disable CRTCs directly since we want to preserve sw state
541 * for _thaw. 541 * for _thaw.
542 */ 542 */
543 mutex_lock(&dev->mode_config.mutex);
543 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 544 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
544 dev_priv->display.crtc_disable(crtc); 545 dev_priv->display.crtc_disable(crtc);
546 mutex_unlock(&dev->mode_config.mutex);
545 547
546 intel_modeset_suspend_hw(dev); 548 intel_modeset_suspend_hw(dev);
547 } 549 }
@@ -655,6 +657,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
655 intel_modeset_init_hw(dev); 657 intel_modeset_init_hw(dev);
656 658
657 drm_modeset_lock_all(dev); 659 drm_modeset_lock_all(dev);
660 drm_mode_config_reset(dev);
658 intel_modeset_setup_hw_state(dev, true); 661 intel_modeset_setup_hw_state(dev, true);
659 drm_modeset_unlock_all(dev); 662 drm_modeset_unlock_all(dev);
660 663
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index cf7922bdf87c..ff6f870d6621 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1937,9 +1937,7 @@ void i915_queue_hangcheck(struct drm_device *dev);
1937void i915_handle_error(struct drm_device *dev, bool wedged); 1937void i915_handle_error(struct drm_device *dev, bool wedged);
1938 1938
1939extern void intel_irq_init(struct drm_device *dev); 1939extern void intel_irq_init(struct drm_device *dev);
1940extern void intel_pm_init(struct drm_device *dev);
1941extern void intel_hpd_init(struct drm_device *dev); 1940extern void intel_hpd_init(struct drm_device *dev);
1942extern void intel_pm_init(struct drm_device *dev);
1943 1941
1944extern void intel_uncore_sanitize(struct drm_device *dev); 1942extern void intel_uncore_sanitize(struct drm_device *dev);
1945extern void intel_uncore_early_sanitize(struct drm_device *dev); 1943extern void intel_uncore_early_sanitize(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c5a99c46ca9c..32636a470367 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2370,15 +2370,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
2370 kfree(request); 2370 kfree(request);
2371} 2371}
2372 2372
2373static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, 2373static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2374 struct intel_ring_buffer *ring) 2374 struct intel_ring_buffer *ring)
2375{ 2375{
2376 u32 completed_seqno; 2376 u32 completed_seqno = ring->get_seqno(ring, false);
2377 u32 acthd; 2377 u32 acthd = intel_ring_get_active_head(ring);
2378 struct drm_i915_gem_request *request;
2379
2380 list_for_each_entry(request, &ring->request_list, list) {
2381 if (i915_seqno_passed(completed_seqno, request->seqno))
2382 continue;
2378 2383
2379 acthd = intel_ring_get_active_head(ring); 2384 i915_set_reset_status(ring, request, acthd);
2380 completed_seqno = ring->get_seqno(ring, false); 2385 }
2386}
2381 2387
2388static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2389 struct intel_ring_buffer *ring)
2390{
2382 while (!list_empty(&ring->request_list)) { 2391 while (!list_empty(&ring->request_list)) {
2383 struct drm_i915_gem_request *request; 2392 struct drm_i915_gem_request *request;
2384 2393
@@ -2386,9 +2395,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2386 struct drm_i915_gem_request, 2395 struct drm_i915_gem_request,
2387 list); 2396 list);
2388 2397
2389 if (request->seqno > completed_seqno)
2390 i915_set_reset_status(ring, request, acthd);
2391
2392 i915_gem_free_request(request); 2398 i915_gem_free_request(request);
2393 } 2399 }
2394 2400
@@ -2430,8 +2436,16 @@ void i915_gem_reset(struct drm_device *dev)
2430 struct intel_ring_buffer *ring; 2436 struct intel_ring_buffer *ring;
2431 int i; 2437 int i;
2432 2438
2439 /*
2440 * Before we free the objects from the requests, we need to inspect
2441 * them for finding the guilty party. As the requests only borrow
2442 * their reference to the objects, the inspection must be done first.
2443 */
2444 for_each_ring(ring, dev_priv, i)
2445 i915_gem_reset_ring_status(dev_priv, ring);
2446
2433 for_each_ring(ring, dev_priv, i) 2447 for_each_ring(ring, dev_priv, i)
2434 i915_gem_reset_ring_lists(dev_priv, ring); 2448 i915_gem_reset_ring_cleanup(dev_priv, ring);
2435 2449
2436 i915_gem_cleanup_ringbuffer(dev); 2450 i915_gem_cleanup_ringbuffer(dev);
2437 2451
@@ -4477,10 +4491,9 @@ i915_gem_init_hw(struct drm_device *dev)
4477 if (dev_priv->ellc_size) 4491 if (dev_priv->ellc_size)
4478 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); 4492 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4479 4493
4480 if (IS_HSW_GT3(dev)) 4494 if (IS_HASWELL(dev))
4481 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED); 4495 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4482 else 4496 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4483 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
4484 4497
4485 if (HAS_PCH_NOP(dev)) { 4498 if (HAS_PCH_NOP(dev)) {
4486 u32 temp = I915_READ(GEN7_MSG_CTL); 4499 u32 temp = I915_READ(GEN7_MSG_CTL);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 41877045a1a0..e08acaba5402 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -345,10 +345,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
345{ 345{
346 struct drm_i915_file_private *file_priv = file->driver_priv; 346 struct drm_i915_file_private *file_priv = file->driver_priv;
347 347
348 mutex_lock(&dev->struct_mutex);
349 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 348 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
350 idr_destroy(&file_priv->context_idr); 349 idr_destroy(&file_priv->context_idr);
351 mutex_unlock(&dev->struct_mutex);
352} 350}
353 351
354static struct i915_hw_context * 352static struct i915_hw_context *
@@ -421,11 +419,21 @@ static int do_switch(struct i915_hw_context *to)
421 if (ret) 419 if (ret)
422 return ret; 420 return ret;
423 421
424 /* Clear this page out of any CPU caches for coherent swap-in/out. Note 422 /*
423 * Pin can switch back to the default context if we end up calling into
424 * evict_everything - as a last ditch gtt defrag effort that also
425 * switches to the default context. Hence we need to reload from here.
426 */
427 from = ring->last_context;
428
429 /*
430 * Clear this page out of any CPU caches for coherent swap-in/out. Note
425 * that thanks to write = false in this call and us not setting any gpu 431 * that thanks to write = false in this call and us not setting any gpu
426 * write domains when putting a context object onto the active list 432 * write domains when putting a context object onto the active list
427 * (when switching away from it), this won't block. 433 * (when switching away from it), this won't block.
428 * XXX: We need a real interface to do this instead of trickery. */ 434 *
435 * XXX: We need a real interface to do this instead of trickery.
436 */
429 ret = i915_gem_object_set_to_gtt_domain(to->obj, false); 437 ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
430 if (ret) { 438 if (ret) {
431 i915_gem_object_unpin(to->obj); 439 i915_gem_object_unpin(to->obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 7d5752fda5f1..9bb533e0d762 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -125,13 +125,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
125 125
126 ret = i915_gem_object_get_pages(obj); 126 ret = i915_gem_object_get_pages(obj);
127 if (ret) 127 if (ret)
128 goto error; 128 goto err;
129
130 i915_gem_object_pin_pages(obj);
129 131
130 ret = -ENOMEM; 132 ret = -ENOMEM;
131 133
132 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); 134 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
133 if (pages == NULL) 135 if (pages == NULL)
134 goto error; 136 goto err_unpin;
135 137
136 i = 0; 138 i = 0;
137 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) 139 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
@@ -141,15 +143,16 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
141 drm_free_large(pages); 143 drm_free_large(pages);
142 144
143 if (!obj->dma_buf_vmapping) 145 if (!obj->dma_buf_vmapping)
144 goto error; 146 goto err_unpin;
145 147
146 obj->vmapping_count = 1; 148 obj->vmapping_count = 1;
147 i915_gem_object_pin_pages(obj);
148out_unlock: 149out_unlock:
149 mutex_unlock(&dev->struct_mutex); 150 mutex_unlock(&dev->struct_mutex);
150 return obj->dma_buf_vmapping; 151 return obj->dma_buf_vmapping;
151 152
152error: 153err_unpin:
154 i915_gem_object_unpin_pages(obj);
155err:
153 mutex_unlock(&dev->struct_mutex); 156 mutex_unlock(&dev->struct_mutex);
154 return ERR_PTR(ret); 157 return ERR_PTR(ret);
155} 158}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index b7376533633d..8f3adc7d0dc8 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -88,6 +88,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
88 } else 88 } else
89 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); 89 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
90 90
91search_again:
91 /* First see if there is a large enough contiguous idle region... */ 92 /* First see if there is a large enough contiguous idle region... */
92 list_for_each_entry(vma, &vm->inactive_list, mm_list) { 93 list_for_each_entry(vma, &vm->inactive_list, mm_list) {
93 if (mark_free(vma, &unwind_list)) 94 if (mark_free(vma, &unwind_list))
@@ -115,10 +116,17 @@ none:
115 list_del_init(&vma->exec_list); 116 list_del_init(&vma->exec_list);
116 } 117 }
117 118
118 /* We expect the caller to unpin, evict all and try again, or give up. 119 /* Can we unpin some objects such as idle hw contents,
119 * So calling i915_gem_evict_vm() is unnecessary. 120 * or pending flips?
120 */ 121 */
121 return -ENOSPC; 122 ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev);
123 if (ret)
124 return ret;
125
126 /* Only idle the GPU and repeat the search once */
127 i915_gem_retire_requests(dev);
128 nonblocking = true;
129 goto search_again;
122 130
123found: 131found:
124 /* drm_mm doesn't allow any other other operations while 132 /* drm_mm doesn't allow any other other operations while
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index bceddf5a04bc..8d795626a25e 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,6 +33,9 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include <linux/dma_remapping.h> 34#include <linux/dma_remapping.h>
35 35
36#define __EXEC_OBJECT_HAS_PIN (1<<31)
37#define __EXEC_OBJECT_HAS_FENCE (1<<30)
38
36struct eb_vmas { 39struct eb_vmas {
37 struct list_head vmas; 40 struct list_head vmas;
38 int and; 41 int and;
@@ -90,7 +93,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
90{ 93{
91 struct drm_i915_gem_object *obj; 94 struct drm_i915_gem_object *obj;
92 struct list_head objects; 95 struct list_head objects;
93 int i, ret = 0; 96 int i, ret;
94 97
95 INIT_LIST_HEAD(&objects); 98 INIT_LIST_HEAD(&objects);
96 spin_lock(&file->table_lock); 99 spin_lock(&file->table_lock);
@@ -103,7 +106,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
103 DRM_DEBUG("Invalid object handle %d at index %d\n", 106 DRM_DEBUG("Invalid object handle %d at index %d\n",
104 exec[i].handle, i); 107 exec[i].handle, i);
105 ret = -ENOENT; 108 ret = -ENOENT;
106 goto out; 109 goto err;
107 } 110 }
108 111
109 if (!list_empty(&obj->obj_exec_link)) { 112 if (!list_empty(&obj->obj_exec_link)) {
@@ -111,7 +114,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
111 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", 114 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
112 obj, exec[i].handle, i); 115 obj, exec[i].handle, i);
113 ret = -EINVAL; 116 ret = -EINVAL;
114 goto out; 117 goto err;
115 } 118 }
116 119
117 drm_gem_object_reference(&obj->base); 120 drm_gem_object_reference(&obj->base);
@@ -120,9 +123,13 @@ eb_lookup_vmas(struct eb_vmas *eb,
120 spin_unlock(&file->table_lock); 123 spin_unlock(&file->table_lock);
121 124
122 i = 0; 125 i = 0;
123 list_for_each_entry(obj, &objects, obj_exec_link) { 126 while (!list_empty(&objects)) {
124 struct i915_vma *vma; 127 struct i915_vma *vma;
125 128
129 obj = list_first_entry(&objects,
130 struct drm_i915_gem_object,
131 obj_exec_link);
132
126 /* 133 /*
127 * NOTE: We can leak any vmas created here when something fails 134 * NOTE: We can leak any vmas created here when something fails
128 * later on. But that's no issue since vma_unbind can deal with 135 * later on. But that's no issue since vma_unbind can deal with
@@ -135,10 +142,12 @@ eb_lookup_vmas(struct eb_vmas *eb,
135 if (IS_ERR(vma)) { 142 if (IS_ERR(vma)) {
136 DRM_DEBUG("Failed to lookup VMA\n"); 143 DRM_DEBUG("Failed to lookup VMA\n");
137 ret = PTR_ERR(vma); 144 ret = PTR_ERR(vma);
138 goto out; 145 goto err;
139 } 146 }
140 147
148 /* Transfer ownership from the objects list to the vmas list. */
141 list_add_tail(&vma->exec_list, &eb->vmas); 149 list_add_tail(&vma->exec_list, &eb->vmas);
150 list_del_init(&obj->obj_exec_link);
142 151
143 vma->exec_entry = &exec[i]; 152 vma->exec_entry = &exec[i];
144 if (eb->and < 0) { 153 if (eb->and < 0) {
@@ -152,16 +161,22 @@ eb_lookup_vmas(struct eb_vmas *eb,
152 ++i; 161 ++i;
153 } 162 }
154 163
164 return 0;
155 165
156out: 166
167err:
157 while (!list_empty(&objects)) { 168 while (!list_empty(&objects)) {
158 obj = list_first_entry(&objects, 169 obj = list_first_entry(&objects,
159 struct drm_i915_gem_object, 170 struct drm_i915_gem_object,
160 obj_exec_link); 171 obj_exec_link);
161 list_del_init(&obj->obj_exec_link); 172 list_del_init(&obj->obj_exec_link);
162 if (ret) 173 drm_gem_object_unreference(&obj->base);
163 drm_gem_object_unreference(&obj->base);
164 } 174 }
175 /*
176 * Objects already transfered to the vmas list will be unreferenced by
177 * eb_destroy.
178 */
179
165 return ret; 180 return ret;
166} 181}
167 182
@@ -187,7 +202,28 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
187 } 202 }
188} 203}
189 204
190static void eb_destroy(struct eb_vmas *eb) { 205static void
206i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
207{
208 struct drm_i915_gem_exec_object2 *entry;
209 struct drm_i915_gem_object *obj = vma->obj;
210
211 if (!drm_mm_node_allocated(&vma->node))
212 return;
213
214 entry = vma->exec_entry;
215
216 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
217 i915_gem_object_unpin_fence(obj);
218
219 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
220 i915_gem_object_unpin(obj);
221
222 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
223}
224
225static void eb_destroy(struct eb_vmas *eb)
226{
191 while (!list_empty(&eb->vmas)) { 227 while (!list_empty(&eb->vmas)) {
192 struct i915_vma *vma; 228 struct i915_vma *vma;
193 229
@@ -195,6 +231,7 @@ static void eb_destroy(struct eb_vmas *eb) {
195 struct i915_vma, 231 struct i915_vma,
196 exec_list); 232 exec_list);
197 list_del_init(&vma->exec_list); 233 list_del_init(&vma->exec_list);
234 i915_gem_execbuffer_unreserve_vma(vma);
198 drm_gem_object_unreference(&vma->obj->base); 235 drm_gem_object_unreference(&vma->obj->base);
199 } 236 }
200 kfree(eb); 237 kfree(eb);
@@ -477,9 +514,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb)
477 return ret; 514 return ret;
478} 515}
479 516
480#define __EXEC_OBJECT_HAS_PIN (1<<31)
481#define __EXEC_OBJECT_HAS_FENCE (1<<30)
482
483static int 517static int
484need_reloc_mappable(struct i915_vma *vma) 518need_reloc_mappable(struct i915_vma *vma)
485{ 519{
@@ -551,26 +585,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
551 return 0; 585 return 0;
552} 586}
553 587
554static void
555i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
556{
557 struct drm_i915_gem_exec_object2 *entry;
558 struct drm_i915_gem_object *obj = vma->obj;
559
560 if (!drm_mm_node_allocated(&vma->node))
561 return;
562
563 entry = vma->exec_entry;
564
565 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
566 i915_gem_object_unpin_fence(obj);
567
568 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
569 i915_gem_object_unpin(obj);
570
571 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
572}
573
574static int 588static int
575i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 589i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
576 struct list_head *vmas, 590 struct list_head *vmas,
@@ -669,13 +683,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
669 goto err; 683 goto err;
670 } 684 }
671 685
672err: /* Decrement pin count for bound objects */ 686err:
673 list_for_each_entry(vma, vmas, exec_list)
674 i915_gem_execbuffer_unreserve_vma(vma);
675
676 if (ret != -ENOSPC || retry++) 687 if (ret != -ENOSPC || retry++)
677 return ret; 688 return ret;
678 689
690 /* Decrement pin count for bound objects */
691 list_for_each_entry(vma, vmas, exec_list)
692 i915_gem_execbuffer_unreserve_vma(vma);
693
679 ret = i915_gem_evict_vm(vm, true); 694 ret = i915_gem_evict_vm(vm, true);
680 if (ret) 695 if (ret)
681 return ret; 696 return ret;
@@ -707,6 +722,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
707 while (!list_empty(&eb->vmas)) { 722 while (!list_empty(&eb->vmas)) {
708 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); 723 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
709 list_del_init(&vma->exec_list); 724 list_del_init(&vma->exec_list);
725 i915_gem_execbuffer_unreserve_vma(vma);
710 drm_gem_object_unreference(&vma->obj->base); 726 drm_gem_object_unreference(&vma->obj->base);
711 } 727 }
712 728
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8c7ebfa3bd56..6c3a6e60aeac 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -57,7 +57,9 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
57#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) 57#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
58#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) 58#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
59#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) 59#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
60#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
60#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) 61#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
62#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
61 63
62#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t)) 64#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
63#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t)) 65#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
@@ -185,10 +187,10 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
185 case I915_CACHE_NONE: 187 case I915_CACHE_NONE:
186 break; 188 break;
187 case I915_CACHE_WT: 189 case I915_CACHE_WT:
188 pte |= HSW_WT_ELLC_LLC_AGE0; 190 pte |= HSW_WT_ELLC_LLC_AGE3;
189 break; 191 break;
190 default: 192 default:
191 pte |= HSW_WB_ELLC_LLC_AGE0; 193 pte |= HSW_WB_ELLC_LLC_AGE3;
192 break; 194 break;
193 } 195 }
194 196
@@ -918,14 +920,12 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
918 WARN_ON(readq(&gtt_entries[i-1]) 920 WARN_ON(readq(&gtt_entries[i-1])
919 != gen8_pte_encode(addr, level, true)); 921 != gen8_pte_encode(addr, level, true));
920 922
921#if 0 /* TODO: Still needed on GEN8? */
922 /* This next bit makes the above posting read even more important. We 923 /* This next bit makes the above posting read even more important. We
923 * want to flush the TLBs only after we're certain all the PTE updates 924 * want to flush the TLBs only after we're certain all the PTE updates
924 * have finished. 925 * have finished.
925 */ 926 */
926 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 927 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
927 POSTING_READ(GFX_FLSH_CNTL_GEN6); 928 POSTING_READ(GFX_FLSH_CNTL_GEN6);
928#endif
929} 929}
930 930
931/* 931/*
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d64da4fe36e5..6d11e253218a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2715,6 +2715,8 @@ static void gen8_irq_preinstall(struct drm_device *dev)
2715#undef GEN8_IRQ_INIT_NDX 2715#undef GEN8_IRQ_INIT_NDX
2716 2716
2717 POSTING_READ(GEN8_PCU_IIR); 2717 POSTING_READ(GEN8_PCU_IIR);
2718
2719 ibx_irq_preinstall(dev);
2718} 2720}
2719 2721
2720static void ibx_hpd_irq_setup(struct drm_device *dev) 2722static void ibx_hpd_irq_setup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index cec06a5453cc..74749c6f897e 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -173,7 +173,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
173 ddi_translations = ddi_translations_dp; 173 ddi_translations = ddi_translations_dp;
174 break; 174 break;
175 case PORT_D: 175 case PORT_D:
176 if (intel_dpd_is_edp(dev)) 176 if (intel_dp_is_edp(dev, PORT_D))
177 ddi_translations = ddi_translations_edp; 177 ddi_translations = ddi_translations_edp;
178 else 178 else
179 ddi_translations = ddi_translations_dp; 179 ddi_translations = ddi_translations_dp;
@@ -1136,12 +1136,18 @@ void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
1136 enum pipe pipe; 1136 enum pipe pipe;
1137 struct intel_crtc *intel_crtc; 1137 struct intel_crtc *intel_crtc;
1138 1138
1139 dev_priv->ddi_plls.spll_refcount = 0;
1140 dev_priv->ddi_plls.wrpll1_refcount = 0;
1141 dev_priv->ddi_plls.wrpll2_refcount = 0;
1142
1139 for_each_pipe(pipe) { 1143 for_each_pipe(pipe) {
1140 intel_crtc = 1144 intel_crtc =
1141 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 1145 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
1142 1146
1143 if (!intel_crtc->active) 1147 if (!intel_crtc->active) {
1148 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
1144 continue; 1149 continue;
1150 }
1145 1151
1146 intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv, 1152 intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
1147 pipe); 1153 pipe);
@@ -1235,9 +1241,10 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
1235 if (wait) 1241 if (wait)
1236 intel_wait_ddi_buf_idle(dev_priv, port); 1242 intel_wait_ddi_buf_idle(dev_priv, port);
1237 1243
1238 if (type == INTEL_OUTPUT_EDP) { 1244 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
1239 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1245 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1240 1246
1247 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
1241 ironlake_edp_panel_off(intel_dp); 1248 ironlake_edp_panel_off(intel_dp);
1242 } 1249 }
1243 1250
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9db009c55c88..e77d4b8856a7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6027,7 +6027,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
6027 uint16_t postoff = 0; 6027 uint16_t postoff = 0;
6028 6028
6029 if (intel_crtc->config.limited_color_range) 6029 if (intel_crtc->config.limited_color_range)
6030 postoff = (16 * (1 << 13) / 255) & 0x1fff; 6030 postoff = (16 * (1 << 12) / 255) & 0x1fff;
6031 6031
6032 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); 6032 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
6033 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); 6033 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
@@ -6614,7 +6614,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6614 6614
6615 /* Make sure we're not on PC8 state before disabling PC8, otherwise 6615 /* Make sure we're not on PC8 state before disabling PC8, otherwise
6616 * we'll hang the machine! */ 6616 * we'll hang the machine! */
6617 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); 6617 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
6618 6618
6619 if (val & LCPLL_POWER_DOWN_ALLOW) { 6619 if (val & LCPLL_POWER_DOWN_ALLOW) {
6620 val &= ~LCPLL_POWER_DOWN_ALLOW; 6620 val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -6648,7 +6648,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6648 DRM_ERROR("Switching back to LCPLL failed\n"); 6648 DRM_ERROR("Switching back to LCPLL failed\n");
6649 } 6649 }
6650 6650
6651 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); 6651 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
6652} 6652}
6653 6653
6654void hsw_enable_pc8_work(struct work_struct *__work) 6654void hsw_enable_pc8_work(struct work_struct *__work)
@@ -8581,7 +8581,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
8581 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | 8581 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
8582 DERRMR_PIPEB_PRI_FLIP_DONE | 8582 DERRMR_PIPEB_PRI_FLIP_DONE |
8583 DERRMR_PIPEC_PRI_FLIP_DONE)); 8583 DERRMR_PIPEC_PRI_FLIP_DONE));
8584 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1)); 8584 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
8585 MI_SRM_LRM_GLOBAL_GTT);
8585 intel_ring_emit(ring, DERRMR); 8586 intel_ring_emit(ring, DERRMR);
8586 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 8587 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
8587 } 8588 }
@@ -9363,7 +9364,7 @@ intel_pipe_config_compare(struct drm_device *dev,
9363 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 9364 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
9364 PIPE_CONF_CHECK_I(pipe_bpp); 9365 PIPE_CONF_CHECK_I(pipe_bpp);
9365 9366
9366 if (!IS_HASWELL(dev)) { 9367 if (!HAS_DDI(dev)) {
9367 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); 9368 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
9368 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 9369 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
9369 } 9370 }
@@ -10330,7 +10331,7 @@ static void intel_setup_outputs(struct drm_device *dev)
10330 intel_ddi_init(dev, PORT_D); 10331 intel_ddi_init(dev, PORT_D);
10331 } else if (HAS_PCH_SPLIT(dev)) { 10332 } else if (HAS_PCH_SPLIT(dev)) {
10332 int found; 10333 int found;
10333 dpd_is_edp = intel_dpd_is_edp(dev); 10334 dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
10334 10335
10335 if (has_edp_a(dev)) 10336 if (has_edp_a(dev))
10336 intel_dp_init(dev, DP_A, PORT_A); 10337 intel_dp_init(dev, DP_A, PORT_A);
@@ -10367,8 +10368,7 @@ static void intel_setup_outputs(struct drm_device *dev)
10367 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, 10368 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
10368 PORT_C); 10369 PORT_C);
10369 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) 10370 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
10370 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, 10371 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
10371 PORT_C);
10372 } 10372 }
10373 10373
10374 intel_dsi_init(dev); 10374 intel_dsi_init(dev);
@@ -10816,11 +10816,20 @@ static struct intel_quirk intel_quirks[] = {
10816 /* Sony Vaio Y cannot use SSC on LVDS */ 10816 /* Sony Vaio Y cannot use SSC on LVDS */
10817 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 10817 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
10818 10818
10819 /* 10819 /* Acer Aspire 5734Z must invert backlight brightness */
10820 * All GM45 Acer (and its brands eMachines and Packard Bell) laptops 10820 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
10821 * seem to use inverted backlight PWM. 10821
10822 */ 10822 /* Acer/eMachines G725 */
10823 { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness }, 10823 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
10824
10825 /* Acer/eMachines e725 */
10826 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
10827
10828 /* Acer/Packard Bell NCL20 */
10829 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
10830
10831 /* Acer Aspire 4736Z */
10832 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
10824}; 10833};
10825 10834
10826static void intel_init_quirks(struct drm_device *dev) 10835static void intel_init_quirks(struct drm_device *dev)
@@ -11302,8 +11311,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
11302 } 11311 }
11303 11312
11304 intel_modeset_check_state(dev); 11313 intel_modeset_check_state(dev);
11305
11306 drm_mode_config_reset(dev);
11307} 11314}
11308 11315
11309void intel_modeset_gem_init(struct drm_device *dev) 11316void intel_modeset_gem_init(struct drm_device *dev)
@@ -11312,7 +11319,10 @@ void intel_modeset_gem_init(struct drm_device *dev)
11312 11319
11313 intel_setup_overlay(dev); 11320 intel_setup_overlay(dev);
11314 11321
11322 mutex_lock(&dev->mode_config.mutex);
11323 drm_mode_config_reset(dev);
11315 intel_modeset_setup_hw_state(dev, false); 11324 intel_modeset_setup_hw_state(dev, false);
11325 mutex_unlock(&dev->mode_config.mutex);
11316} 11326}
11317 11327
11318void intel_modeset_cleanup(struct drm_device *dev) 11328void intel_modeset_cleanup(struct drm_device *dev)
@@ -11390,14 +11400,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
11390int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 11400int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
11391{ 11401{
11392 struct drm_i915_private *dev_priv = dev->dev_private; 11402 struct drm_i915_private *dev_priv = dev->dev_private;
11403 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
11393 u16 gmch_ctrl; 11404 u16 gmch_ctrl;
11394 11405
11395 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl); 11406 pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
11396 if (state) 11407 if (state)
11397 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 11408 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
11398 else 11409 else
11399 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 11410 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
11400 pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); 11411 pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
11401 return 0; 11412 return 0;
11402} 11413}
11403 11414
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 8f17f8fbd0b1..9b40113f4fa1 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -3324,11 +3324,19 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
3324} 3324}
3325 3325
3326/* check the VBT to see whether the eDP is on DP-D port */ 3326/* check the VBT to see whether the eDP is on DP-D port */
3327bool intel_dpd_is_edp(struct drm_device *dev) 3327bool intel_dp_is_edp(struct drm_device *dev, enum port port)
3328{ 3328{
3329 struct drm_i915_private *dev_priv = dev->dev_private; 3329 struct drm_i915_private *dev_priv = dev->dev_private;
3330 union child_device_config *p_child; 3330 union child_device_config *p_child;
3331 int i; 3331 int i;
3332 static const short port_mapping[] = {
3333 [PORT_B] = PORT_IDPB,
3334 [PORT_C] = PORT_IDPC,
3335 [PORT_D] = PORT_IDPD,
3336 };
3337
3338 if (port == PORT_A)
3339 return true;
3332 3340
3333 if (!dev_priv->vbt.child_dev_num) 3341 if (!dev_priv->vbt.child_dev_num)
3334 return false; 3342 return false;
@@ -3336,7 +3344,7 @@ bool intel_dpd_is_edp(struct drm_device *dev)
3336 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 3344 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
3337 p_child = dev_priv->vbt.child_dev + i; 3345 p_child = dev_priv->vbt.child_dev + i;
3338 3346
3339 if (p_child->common.dvo_port == PORT_IDPD && 3347 if (p_child->common.dvo_port == port_mapping[port] &&
3340 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) == 3348 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
3341 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS)) 3349 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
3342 return true; 3350 return true;
@@ -3614,26 +3622,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3614 intel_dp->DP = I915_READ(intel_dp->output_reg); 3622 intel_dp->DP = I915_READ(intel_dp->output_reg);
3615 intel_dp->attached_connector = intel_connector; 3623 intel_dp->attached_connector = intel_connector;
3616 3624
3617 type = DRM_MODE_CONNECTOR_DisplayPort; 3625 if (intel_dp_is_edp(dev, port))
3618 /*
3619 * FIXME : We need to initialize built-in panels before external panels.
3620 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
3621 */
3622 switch (port) {
3623 case PORT_A:
3624 type = DRM_MODE_CONNECTOR_eDP; 3626 type = DRM_MODE_CONNECTOR_eDP;
3625 break; 3627 else
3626 case PORT_C: 3628 type = DRM_MODE_CONNECTOR_DisplayPort;
3627 if (IS_VALLEYVIEW(dev))
3628 type = DRM_MODE_CONNECTOR_eDP;
3629 break;
3630 case PORT_D:
3631 if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
3632 type = DRM_MODE_CONNECTOR_eDP;
3633 break;
3634 default: /* silence GCC warning */
3635 break;
3636 }
3637 3629
3638 /* 3630 /*
3639 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 3631 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 4cbf49051b9c..8754db9e3d52 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -722,7 +722,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder);
722void intel_dp_check_link_status(struct intel_dp *intel_dp); 722void intel_dp_check_link_status(struct intel_dp *intel_dp);
723bool intel_dp_compute_config(struct intel_encoder *encoder, 723bool intel_dp_compute_config(struct intel_encoder *encoder,
724 struct intel_crtc_config *pipe_config); 724 struct intel_crtc_config *pipe_config);
725bool intel_dpd_is_edp(struct drm_device *dev); 725bool intel_dp_is_edp(struct drm_device *dev, enum port port);
726void ironlake_edp_backlight_on(struct intel_dp *intel_dp); 726void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
727void ironlake_edp_backlight_off(struct intel_dp *intel_dp); 727void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
728void ironlake_edp_panel_on(struct intel_dp *intel_dp); 728void ironlake_edp_panel_on(struct intel_dp *intel_dp);
@@ -839,6 +839,7 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
839 uint32_t sprite_width, int pixel_size, 839 uint32_t sprite_width, int pixel_size,
840 bool enabled, bool scaled); 840 bool enabled, bool scaled);
841void intel_init_pm(struct drm_device *dev); 841void intel_init_pm(struct drm_device *dev);
842void intel_pm_setup(struct drm_device *dev);
842bool intel_fbc_enabled(struct drm_device *dev); 843bool intel_fbc_enabled(struct drm_device *dev);
843void intel_update_fbc(struct drm_device *dev); 844void intel_update_fbc(struct drm_device *dev);
844void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 845void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 9998185fdb22..d77cc81900f9 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1113,7 +1113,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1113 1113
1114 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1114 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1115 clock = adjusted_mode->crtc_clock; 1115 clock = adjusted_mode->crtc_clock;
1116 htotal = adjusted_mode->htotal; 1116 htotal = adjusted_mode->crtc_htotal;
1117 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1117 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1118 pixel_size = crtc->fb->bits_per_pixel / 8; 1118 pixel_size = crtc->fb->bits_per_pixel / 8;
1119 1119
@@ -1200,7 +1200,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
1200 crtc = intel_get_crtc_for_plane(dev, plane); 1200 crtc = intel_get_crtc_for_plane(dev, plane);
1201 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1201 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1202 clock = adjusted_mode->crtc_clock; 1202 clock = adjusted_mode->crtc_clock;
1203 htotal = adjusted_mode->htotal; 1203 htotal = adjusted_mode->crtc_htotal;
1204 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1204 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1205 pixel_size = crtc->fb->bits_per_pixel / 8; 1205 pixel_size = crtc->fb->bits_per_pixel / 8;
1206 1206
@@ -1431,7 +1431,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
1431 const struct drm_display_mode *adjusted_mode = 1431 const struct drm_display_mode *adjusted_mode =
1432 &to_intel_crtc(crtc)->config.adjusted_mode; 1432 &to_intel_crtc(crtc)->config.adjusted_mode;
1433 int clock = adjusted_mode->crtc_clock; 1433 int clock = adjusted_mode->crtc_clock;
1434 int htotal = adjusted_mode->htotal; 1434 int htotal = adjusted_mode->crtc_htotal;
1435 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1435 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1436 int pixel_size = crtc->fb->bits_per_pixel / 8; 1436 int pixel_size = crtc->fb->bits_per_pixel / 8;
1437 unsigned long line_time_us; 1437 unsigned long line_time_us;
@@ -1557,7 +1557,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1557 const struct drm_display_mode *adjusted_mode = 1557 const struct drm_display_mode *adjusted_mode =
1558 &to_intel_crtc(enabled)->config.adjusted_mode; 1558 &to_intel_crtc(enabled)->config.adjusted_mode;
1559 int clock = adjusted_mode->crtc_clock; 1559 int clock = adjusted_mode->crtc_clock;
1560 int htotal = adjusted_mode->htotal; 1560 int htotal = adjusted_mode->crtc_htotal;
1561 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w; 1561 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
1562 int pixel_size = enabled->fb->bits_per_pixel / 8; 1562 int pixel_size = enabled->fb->bits_per_pixel / 8;
1563 unsigned long line_time_us; 1563 unsigned long line_time_us;
@@ -1985,8 +1985,9 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
1985 /* The WM are computed with base on how long it takes to fill a single 1985 /* The WM are computed with base on how long it takes to fill a single
1986 * row at the given clock rate, multiplied by 8. 1986 * row at the given clock rate, multiplied by 8.
1987 * */ 1987 * */
1988 linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock); 1988 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
1989 ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, 1989 mode->crtc_clock);
1990 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
1990 intel_ddi_get_cdclk_freq(dev_priv)); 1991 intel_ddi_get_cdclk_freq(dev_priv));
1991 1992
1992 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | 1993 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
@@ -5722,10 +5723,19 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
5722 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6; 5723 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
5723} 5724}
5724 5725
5725void intel_pm_init(struct drm_device *dev) 5726void intel_pm_setup(struct drm_device *dev)
5726{ 5727{
5727 struct drm_i915_private *dev_priv = dev->dev_private; 5728 struct drm_i915_private *dev_priv = dev->dev_private;
5728 5729
5730 mutex_init(&dev_priv->rps.hw_lock);
5731
5732 mutex_init(&dev_priv->pc8.lock);
5733 dev_priv->pc8.requirements_met = false;
5734 dev_priv->pc8.gpu_idle = false;
5735 dev_priv->pc8.irqs_disabled = false;
5736 dev_priv->pc8.enabled = false;
5737 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
5738 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
5729 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, 5739 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5730 intel_gen6_powersave_work); 5740 intel_gen6_powersave_work);
5731} 5741}
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index edcf801613e6..b3fa1ba191b7 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -59,6 +59,7 @@ nouveau-y += core/subdev/clock/nv40.o
59nouveau-y += core/subdev/clock/nv50.o 59nouveau-y += core/subdev/clock/nv50.o
60nouveau-y += core/subdev/clock/nv84.o 60nouveau-y += core/subdev/clock/nv84.o
61nouveau-y += core/subdev/clock/nva3.o 61nouveau-y += core/subdev/clock/nva3.o
62nouveau-y += core/subdev/clock/nvaa.o
62nouveau-y += core/subdev/clock/nvc0.o 63nouveau-y += core/subdev/clock/nvc0.o
63nouveau-y += core/subdev/clock/nve0.o 64nouveau-y += core/subdev/clock/nve0.o
64nouveau-y += core/subdev/clock/pllnv04.o 65nouveau-y += core/subdev/clock/pllnv04.o
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c
index 48f06378d3f9..2ea5568b6cf5 100644
--- a/drivers/gpu/drm/nouveau/core/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/core/core/subdev.c
@@ -104,11 +104,8 @@ nouveau_subdev_create_(struct nouveau_object *parent,
104 104
105 if (parent) { 105 if (parent) {
106 struct nouveau_device *device = nv_device(parent); 106 struct nouveau_device *device = nv_device(parent);
107 int subidx = nv_hclass(subdev) & 0xff;
108
109 subdev->debug = nouveau_dbgopt(device->dbgopt, subname); 107 subdev->debug = nouveau_dbgopt(device->dbgopt, subname);
110 subdev->mmio = nv_subdev(device)->mmio; 108 subdev->mmio = nv_subdev(device)->mmio;
111 device->subdev[subidx] = *pobject;
112 } 109 }
113 110
114 return 0; 111 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 9135b25a29d0..dd01c6c435d6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -268,6 +268,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
268 if (ret) 268 if (ret)
269 return ret; 269 return ret;
270 270
271 device->subdev[i] = devobj->subdev[i];
272
271 /* note: can't init *any* subdevs until devinit has been run 273 /* note: can't init *any* subdevs until devinit has been run
272 * due to not knowing exactly what the vbios init tables will 274 * due to not knowing exactly what the vbios init tables will
273 * mess with. devinit also can't be run until all of its 275 * mess with. devinit also can't be run until all of its
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index db139827047c..db3fc7be856a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -283,7 +283,7 @@ nv50_identify(struct nouveau_device *device)
283 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 283 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
284 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 284 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
285 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 285 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
286 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; 286 device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
287 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 287 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
288 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 288 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
289 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 289 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
@@ -311,7 +311,7 @@ nv50_identify(struct nouveau_device *device)
311 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 311 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
312 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 312 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
313 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 313 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
314 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; 314 device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
315 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 315 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
316 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 316 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
317 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 317 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 8d06eef2b9ee..dbc5e33de94f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -161,7 +161,7 @@ nvc0_identify(struct nouveau_device *device)
161 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 161 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
162 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 162 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
163 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 163 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
164 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; 164 device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
165 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 165 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
167 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 167 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 5f555788121c..e6352bd5b4ff 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -33,6 +33,7 @@
33#include <engine/dmaobj.h> 33#include <engine/dmaobj.h>
34#include <engine/fifo.h> 34#include <engine/fifo.h>
35 35
36#include "nv04.h"
36#include "nv50.h" 37#include "nv50.h"
37 38
38/******************************************************************************* 39/*******************************************************************************
@@ -460,6 +461,8 @@ nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
460 nv_subdev(priv)->intr = nv04_fifo_intr; 461 nv_subdev(priv)->intr = nv04_fifo_intr;
461 nv_engine(priv)->cclass = &nv50_fifo_cclass; 462 nv_engine(priv)->cclass = &nv50_fifo_cclass;
462 nv_engine(priv)->sclass = nv50_fifo_sclass; 463 nv_engine(priv)->sclass = nv50_fifo_sclass;
464 priv->base.pause = nv04_fifo_pause;
465 priv->base.start = nv04_fifo_start;
463 return 0; 466 return 0;
464} 467}
465 468
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 0908dc834c84..fe0f41e65d9b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -35,6 +35,7 @@
35#include <engine/dmaobj.h> 35#include <engine/dmaobj.h>
36#include <engine/fifo.h> 36#include <engine/fifo.h>
37 37
38#include "nv04.h"
38#include "nv50.h" 39#include "nv50.h"
39 40
40/******************************************************************************* 41/*******************************************************************************
@@ -432,6 +433,8 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
432 nv_subdev(priv)->intr = nv04_fifo_intr; 433 nv_subdev(priv)->intr = nv04_fifo_intr;
433 nv_engine(priv)->cclass = &nv84_fifo_cclass; 434 nv_engine(priv)->cclass = &nv84_fifo_cclass;
434 nv_engine(priv)->sclass = nv84_fifo_sclass; 435 nv_engine(priv)->sclass = nv84_fifo_sclass;
436 priv->base.pause = nv04_fifo_pause;
437 priv->base.start = nv04_fifo_start;
435 return 0; 438 return 0;
436} 439}
437 440
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 434bb4b0fa2e..5c8a63dc506a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -334,7 +334,7 @@ nvc0_graph_mthd(struct nvc0_graph_priv *priv, struct nvc0_graph_mthd *mthds)
334 while ((mthd = &mthds[i++]) && (init = mthd->init)) { 334 while ((mthd = &mthds[i++]) && (init = mthd->init)) {
335 u32 addr = 0x80000000 | mthd->oclass; 335 u32 addr = 0x80000000 | mthd->oclass;
336 for (data = 0; init->count; init++) { 336 for (data = 0; init->count; init++) {
337 if (data != init->data) { 337 if (init == mthd->init || data != init->data) {
338 nv_wr32(priv, 0x40448c, init->data); 338 nv_wr32(priv, 0x40448c, init->data);
339 data = init->data; 339 data = init->data;
340 } 340 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index b574dd4bb828..5ce686ee729e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -176,7 +176,7 @@ nv50_software_context_ctor(struct nouveau_object *parent,
176 if (ret) 176 if (ret)
177 return ret; 177 return ret;
178 178
179 chan->vblank.nr_event = pdisp->vblank->index_nr; 179 chan->vblank.nr_event = pdisp ? pdisp->vblank->index_nr : 0;
180 chan->vblank.event = kzalloc(chan->vblank.nr_event * 180 chan->vblank.event = kzalloc(chan->vblank.nr_event *
181 sizeof(*chan->vblank.event), GFP_KERNEL); 181 sizeof(*chan->vblank.event), GFP_KERNEL);
182 if (!chan->vblank.event) 182 if (!chan->vblank.event)
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
index e2675bc0edba..8f4ced75444a 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -14,6 +14,9 @@ enum nv_clk_src {
14 nv_clk_src_hclk, 14 nv_clk_src_hclk,
15 nv_clk_src_hclkm3, 15 nv_clk_src_hclkm3,
16 nv_clk_src_hclkm3d2, 16 nv_clk_src_hclkm3d2,
17 nv_clk_src_hclkm2d3, /* NVAA */
18 nv_clk_src_hclkm4, /* NVAA */
19 nv_clk_src_cclk, /* NVAA */
17 20
18 nv_clk_src_host, 21 nv_clk_src_host,
19 22
@@ -127,6 +130,7 @@ extern struct nouveau_oclass nv04_clock_oclass;
127extern struct nouveau_oclass nv40_clock_oclass; 130extern struct nouveau_oclass nv40_clock_oclass;
128extern struct nouveau_oclass *nv50_clock_oclass; 131extern struct nouveau_oclass *nv50_clock_oclass;
129extern struct nouveau_oclass *nv84_clock_oclass; 132extern struct nouveau_oclass *nv84_clock_oclass;
133extern struct nouveau_oclass *nvaa_clock_oclass;
130extern struct nouveau_oclass nva3_clock_oclass; 134extern struct nouveau_oclass nva3_clock_oclass;
131extern struct nouveau_oclass nvc0_clock_oclass; 135extern struct nouveau_oclass nvc0_clock_oclass;
132extern struct nouveau_oclass nve0_clock_oclass; 136extern struct nouveau_oclass nve0_clock_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 8541aa382ff2..d89dbdf39b0d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -75,6 +75,11 @@ struct nouveau_fb {
75static inline struct nouveau_fb * 75static inline struct nouveau_fb *
76nouveau_fb(void *obj) 76nouveau_fb(void *obj)
77{ 77{
78 /* fbram uses this before device subdev pointer is valid */
79 if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
80 nv_subidx(obj) == NVDEV_SUBDEV_FB)
81 return obj;
82
78 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB]; 83 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB];
79} 84}
80 85
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 9fa5da723871..7f50a858b16f 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -73,7 +73,7 @@ struct nouveau_i2c {
73 int (*identify)(struct nouveau_i2c *, int index, 73 int (*identify)(struct nouveau_i2c *, int index,
74 const char *what, struct nouveau_i2c_board_info *, 74 const char *what, struct nouveau_i2c_board_info *,
75 bool (*match)(struct nouveau_i2c_port *, 75 bool (*match)(struct nouveau_i2c_port *,
76 struct i2c_board_info *)); 76 struct i2c_board_info *, void *), void *);
77 struct list_head ports; 77 struct list_head ports;
78}; 78};
79 79
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
index ec7a54e91a08..4aca33887aaa 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
@@ -50,6 +50,13 @@ struct nouveau_instmem {
50static inline struct nouveau_instmem * 50static inline struct nouveau_instmem *
51nouveau_instmem(void *obj) 51nouveau_instmem(void *obj)
52{ 52{
53 /* nv04/nv40 impls need to create objects in their constructor,
54 * which is before the subdev pointer is valid
55 */
56 if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
57 nv_subidx(obj) == NVDEV_SUBDEV_INSTMEM)
58 return obj;
59
53 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM]; 60 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM];
54} 61}
55 62
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 420908cb82b6..df1b1b423093 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -365,13 +365,13 @@ static u16
365init_script(struct nouveau_bios *bios, int index) 365init_script(struct nouveau_bios *bios, int index)
366{ 366{
367 struct nvbios_init init = { .bios = bios }; 367 struct nvbios_init init = { .bios = bios };
368 u16 data; 368 u16 bmp_ver = bmp_version(bios), data;
369 369
370 if (bmp_version(bios) && bmp_version(bios) < 0x0510) { 370 if (bmp_ver && bmp_ver < 0x0510) {
371 if (index > 1) 371 if (index > 1 || bmp_ver < 0x0100)
372 return 0x0000; 372 return 0x0000;
373 373
374 data = bios->bmp_offset + (bios->version.major < 2 ? 14 : 18); 374 data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18);
375 return nv_ro16(bios, data + (index * 2)); 375 return nv_ro16(bios, data + (index * 2));
376 } 376 }
377 377
@@ -1294,7 +1294,11 @@ init_jump(struct nvbios_init *init)
1294 u16 offset = nv_ro16(bios, init->offset + 1); 1294 u16 offset = nv_ro16(bios, init->offset + 1);
1295 1295
1296 trace("JUMP\t0x%04x\n", offset); 1296 trace("JUMP\t0x%04x\n", offset);
1297 init->offset = offset; 1297
1298 if (init_exec(init))
1299 init->offset = offset;
1300 else
1301 init->offset += 3;
1298} 1302}
1299 1303
1300/** 1304/**
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
index da50c1b12928..30c1f3a4158e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -69,6 +69,11 @@ nv04_clock_pll_prog(struct nouveau_clock *clk, u32 reg1,
69 return 0; 69 return 0;
70} 70}
71 71
72static struct nouveau_clocks
73nv04_domain[] = {
74 { nv_clk_src_max }
75};
76
72static int 77static int
73nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 78nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
74 struct nouveau_oclass *oclass, void *data, u32 size, 79 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -77,7 +82,7 @@ nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
77 struct nv04_clock_priv *priv; 82 struct nv04_clock_priv *priv;
78 int ret; 83 int ret;
79 84
80 ret = nouveau_clock_create(parent, engine, oclass, NULL, &priv); 85 ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, &priv);
81 *pobject = nv_object(priv); 86 *pobject = nv_object(priv);
82 if (ret) 87 if (ret)
83 return ret; 88 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
new file mode 100644
index 000000000000..7a723b4f564d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
@@ -0,0 +1,445 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/fifo.h>
26#include <subdev/bios.h>
27#include <subdev/bios/pll.h>
28#include <subdev/timer.h>
29#include <subdev/clock.h>
30
31#include "pll.h"
32
33struct nvaa_clock_priv {
34 struct nouveau_clock base;
35 enum nv_clk_src csrc, ssrc, vsrc;
36 u32 cctrl, sctrl;
37 u32 ccoef, scoef;
38 u32 cpost, spost;
39 u32 vdiv;
40};
41
42static u32
43read_div(struct nouveau_clock *clk)
44{
45 return nv_rd32(clk, 0x004600);
46}
47
48static u32
49read_pll(struct nouveau_clock *clk, u32 base)
50{
51 u32 ctrl = nv_rd32(clk, base + 0);
52 u32 coef = nv_rd32(clk, base + 4);
53 u32 ref = clk->read(clk, nv_clk_src_href);
54 u32 post_div = 0;
55 u32 clock = 0;
56 int N1, M1;
57
58 switch (base){
59 case 0x4020:
60 post_div = 1 << ((nv_rd32(clk, 0x4070) & 0x000f0000) >> 16);
61 break;
62 case 0x4028:
63 post_div = (nv_rd32(clk, 0x4040) & 0x000f0000) >> 16;
64 break;
65 default:
66 break;
67 }
68
69 N1 = (coef & 0x0000ff00) >> 8;
70 M1 = (coef & 0x000000ff);
71 if ((ctrl & 0x80000000) && M1) {
72 clock = ref * N1 / M1;
73 clock = clock / post_div;
74 }
75
76 return clock;
77}
78
79static int
80nvaa_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
81{
82 struct nvaa_clock_priv *priv = (void *)clk;
83 u32 mast = nv_rd32(clk, 0x00c054);
84 u32 P = 0;
85
86 switch (src) {
87 case nv_clk_src_crystal:
88 return nv_device(priv)->crystal;
89 case nv_clk_src_href:
90 return 100000; /* PCIE reference clock */
91 case nv_clk_src_hclkm4:
92 return clk->read(clk, nv_clk_src_href) * 4;
93 case nv_clk_src_hclkm2d3:
94 return clk->read(clk, nv_clk_src_href) * 2 / 3;
95 case nv_clk_src_host:
96 switch (mast & 0x000c0000) {
97 case 0x00000000: return clk->read(clk, nv_clk_src_hclkm2d3);
98 case 0x00040000: break;
99 case 0x00080000: return clk->read(clk, nv_clk_src_hclkm4);
100 case 0x000c0000: return clk->read(clk, nv_clk_src_cclk);
101 }
102 break;
103 case nv_clk_src_core:
104 P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16;
105
106 switch (mast & 0x00000003) {
107 case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
108 case 0x00000001: return 0;
109 case 0x00000002: return clk->read(clk, nv_clk_src_hclkm4) >> P;
110 case 0x00000003: return read_pll(clk, 0x004028) >> P;
111 }
112 break;
113 case nv_clk_src_cclk:
114 if ((mast & 0x03000000) != 0x03000000)
115 return clk->read(clk, nv_clk_src_core);
116
117 if ((mast & 0x00000200) == 0x00000000)
118 return clk->read(clk, nv_clk_src_core);
119
120 switch (mast & 0x00000c00) {
121 case 0x00000000: return clk->read(clk, nv_clk_src_href);
122 case 0x00000400: return clk->read(clk, nv_clk_src_hclkm4);
123 case 0x00000800: return clk->read(clk, nv_clk_src_hclkm2d3);
124 default: return 0;
125 }
126 case nv_clk_src_shader:
127 P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16;
128 switch (mast & 0x00000030) {
129 case 0x00000000:
130 if (mast & 0x00000040)
131 return clk->read(clk, nv_clk_src_href) >> P;
132 return clk->read(clk, nv_clk_src_crystal) >> P;
133 case 0x00000010: break;
134 case 0x00000020: return read_pll(clk, 0x004028) >> P;
135 case 0x00000030: return read_pll(clk, 0x004020) >> P;
136 }
137 break;
138 case nv_clk_src_mem:
139 return 0;
140 break;
141 case nv_clk_src_vdec:
142 P = (read_div(clk) & 0x00000700) >> 8;
143
144 switch (mast & 0x00400000) {
145 case 0x00400000:
146 return clk->read(clk, nv_clk_src_core) >> P;
147 break;
148 default:
149 return 500000 >> P;
150 break;
151 }
152 break;
153 default:
154 break;
155 }
156
157 nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
158 return 0;
159}
160
161static u32
162calc_pll(struct nvaa_clock_priv *priv, u32 reg,
163 u32 clock, int *N, int *M, int *P)
164{
165 struct nouveau_bios *bios = nouveau_bios(priv);
166 struct nvbios_pll pll;
167 struct nouveau_clock *clk = &priv->base;
168 int ret;
169
170 ret = nvbios_pll_parse(bios, reg, &pll);
171 if (ret)
172 return 0;
173
174 pll.vco2.max_freq = 0;
175 pll.refclk = clk->read(clk, nv_clk_src_href);
176 if (!pll.refclk)
177 return 0;
178
179 return nv04_pll_calc(nv_subdev(priv), &pll, clock, N, M, NULL, NULL, P);
180}
181
182static inline u32
183calc_P(u32 src, u32 target, int *div)
184{
185 u32 clk0 = src, clk1 = src;
186 for (*div = 0; *div <= 7; (*div)++) {
187 if (clk0 <= target) {
188 clk1 = clk0 << (*div ? 1 : 0);
189 break;
190 }
191 clk0 >>= 1;
192 }
193
194 if (target - clk0 <= clk1 - target)
195 return clk0;
196 (*div)--;
197 return clk1;
198}
199
200static int
201nvaa_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
202{
203 struct nvaa_clock_priv *priv = (void *)clk;
204 const int shader = cstate->domain[nv_clk_src_shader];
205 const int core = cstate->domain[nv_clk_src_core];
206 const int vdec = cstate->domain[nv_clk_src_vdec];
207 u32 out = 0, clock = 0;
208 int N, M, P1, P2 = 0;
209 int divs = 0;
210
211 /* cclk: find suitable source, disable PLL if we can */
212 if (core < clk->read(clk, nv_clk_src_hclkm4))
213 out = calc_P(clk->read(clk, nv_clk_src_hclkm4), core, &divs);
214
215 /* Calculate clock * 2, so shader clock can use it too */
216 clock = calc_pll(priv, 0x4028, (core << 1), &N, &M, &P1);
217
218 if (abs(core - out) <=
219 abs(core - (clock >> 1))) {
220 priv->csrc = nv_clk_src_hclkm4;
221 priv->cctrl = divs << 16;
222 } else {
223 /* NVCTRL is actually used _after_ NVPOST, and after what we
224 * call NVPLL. To make matters worse, NVPOST is an integer
225 * divider instead of a right-shift number. */
226 if(P1 > 2) {
227 P2 = P1 - 2;
228 P1 = 2;
229 }
230
231 priv->csrc = nv_clk_src_core;
232 priv->ccoef = (N << 8) | M;
233
234 priv->cctrl = (P2 + 1) << 16;
235 priv->cpost = (1 << P1) << 16;
236 }
237
238 /* sclk: nvpll + divisor, href or spll */
239 out = 0;
240 if (shader == clk->read(clk, nv_clk_src_href)) {
241 priv->ssrc = nv_clk_src_href;
242 } else {
243 clock = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
244 if (priv->csrc == nv_clk_src_core) {
245 out = calc_P((core << 1), shader, &divs);
246 }
247
248 if (abs(shader - out) <=
249 abs(shader - clock) &&
250 (divs + P2) <= 7) {
251 priv->ssrc = nv_clk_src_core;
252 priv->sctrl = (divs + P2) << 16;
253 } else {
254 priv->ssrc = nv_clk_src_shader;
255 priv->scoef = (N << 8) | M;
256 priv->sctrl = P1 << 16;
257 }
258 }
259
260 /* vclk */
261 out = calc_P(core, vdec, &divs);
262 clock = calc_P(500000, vdec, &P1);
263 if(abs(vdec - out) <=
264 abs(vdec - clock)) {
265 priv->vsrc = nv_clk_src_cclk;
266 priv->vdiv = divs << 16;
267 } else {
268 priv->vsrc = nv_clk_src_vdec;
269 priv->vdiv = P1 << 16;
270 }
271
272 /* Print strategy! */
273 nv_debug(priv, "nvpll: %08x %08x %08x\n",
274 priv->ccoef, priv->cpost, priv->cctrl);
275 nv_debug(priv, " spll: %08x %08x %08x\n",
276 priv->scoef, priv->spost, priv->sctrl);
277 nv_debug(priv, " vdiv: %08x\n", priv->vdiv);
278 if (priv->csrc == nv_clk_src_hclkm4)
279 nv_debug(priv, "core: hrefm4\n");
280 else
281 nv_debug(priv, "core: nvpll\n");
282
283 if (priv->ssrc == nv_clk_src_hclkm4)
284 nv_debug(priv, "shader: hrefm4\n");
285 else if (priv->ssrc == nv_clk_src_core)
286 nv_debug(priv, "shader: nvpll\n");
287 else
288 nv_debug(priv, "shader: spll\n");
289
290 if (priv->vsrc == nv_clk_src_hclkm4)
291 nv_debug(priv, "vdec: 500MHz\n");
292 else
293 nv_debug(priv, "vdec: core\n");
294
295 return 0;
296}
297
298static int
299nvaa_clock_prog(struct nouveau_clock *clk)
300{
301 struct nvaa_clock_priv *priv = (void *)clk;
302 struct nouveau_fifo *pfifo = nouveau_fifo(clk);
303 unsigned long flags;
304 u32 pllmask = 0, mast, ptherm_gate;
305 int ret = -EBUSY;
306
307 /* halt and idle execution engines */
308 ptherm_gate = nv_mask(clk, 0x020060, 0x00070000, 0x00000000);
309 nv_mask(clk, 0x002504, 0x00000001, 0x00000001);
310 /* Wait until the interrupt handler is finished */
311 if (!nv_wait(clk, 0x000100, 0xffffffff, 0x00000000))
312 goto resume;
313
314 if (pfifo)
315 pfifo->pause(pfifo, &flags);
316
317 if (!nv_wait(clk, 0x002504, 0x00000010, 0x00000010))
318 goto resume;
319 if (!nv_wait(clk, 0x00251c, 0x0000003f, 0x0000003f))
320 goto resume;
321
322 /* First switch to safe clocks: href */
323 mast = nv_mask(clk, 0xc054, 0x03400e70, 0x03400640);
324 mast &= ~0x00400e73;
325 mast |= 0x03000000;
326
327 switch (priv->csrc) {
328 case nv_clk_src_hclkm4:
329 nv_mask(clk, 0x4028, 0x00070000, priv->cctrl);
330 mast |= 0x00000002;
331 break;
332 case nv_clk_src_core:
333 nv_wr32(clk, 0x402c, priv->ccoef);
334 nv_wr32(clk, 0x4028, 0x80000000 | priv->cctrl);
335 nv_wr32(clk, 0x4040, priv->cpost);
336 pllmask |= (0x3 << 8);
337 mast |= 0x00000003;
338 break;
339 default:
340 nv_warn(priv,"Reclocking failed: unknown core clock\n");
341 goto resume;
342 }
343
344 switch (priv->ssrc) {
345 case nv_clk_src_href:
346 nv_mask(clk, 0x4020, 0x00070000, 0x00000000);
347 /* mast |= 0x00000000; */
348 break;
349 case nv_clk_src_core:
350 nv_mask(clk, 0x4020, 0x00070000, priv->sctrl);
351 mast |= 0x00000020;
352 break;
353 case nv_clk_src_shader:
354 nv_wr32(clk, 0x4024, priv->scoef);
355 nv_wr32(clk, 0x4020, 0x80000000 | priv->sctrl);
356 nv_wr32(clk, 0x4070, priv->spost);
357 pllmask |= (0x3 << 12);
358 mast |= 0x00000030;
359 break;
360 default:
361 nv_warn(priv,"Reclocking failed: unknown sclk clock\n");
362 goto resume;
363 }
364
365 if (!nv_wait(clk, 0x004080, pllmask, pllmask)) {
366 nv_warn(priv,"Reclocking failed: unstable PLLs\n");
367 goto resume;
368 }
369
370 switch (priv->vsrc) {
371 case nv_clk_src_cclk:
372 mast |= 0x00400000;
373 default:
374 nv_wr32(clk, 0x4600, priv->vdiv);
375 }
376
377 nv_wr32(clk, 0xc054, mast);
378 ret = 0;
379
380resume:
381 if (pfifo)
382 pfifo->start(pfifo, &flags);
383
384 nv_mask(clk, 0x002504, 0x00000001, 0x00000000);
385 nv_wr32(clk, 0x020060, ptherm_gate);
386
387 /* Disable some PLLs and dividers when unused */
388 if (priv->csrc != nv_clk_src_core) {
389 nv_wr32(clk, 0x4040, 0x00000000);
390 nv_mask(clk, 0x4028, 0x80000000, 0x00000000);
391 }
392
393 if (priv->ssrc != nv_clk_src_shader) {
394 nv_wr32(clk, 0x4070, 0x00000000);
395 nv_mask(clk, 0x4020, 0x80000000, 0x00000000);
396 }
397
398 return ret;
399}
400
401static void
402nvaa_clock_tidy(struct nouveau_clock *clk)
403{
404}
405
406static struct nouveau_clocks
407nvaa_domains[] = {
408 { nv_clk_src_crystal, 0xff },
409 { nv_clk_src_href , 0xff },
410 { nv_clk_src_core , 0xff, 0, "core", 1000 },
411 { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
412 { nv_clk_src_vdec , 0xff, 0, "vdec", 1000 },
413 { nv_clk_src_max }
414};
415
416static int
417nvaa_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
418 struct nouveau_oclass *oclass, void *data, u32 size,
419 struct nouveau_object **pobject)
420{
421 struct nvaa_clock_priv *priv;
422 int ret;
423
424 ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, &priv);
425 *pobject = nv_object(priv);
426 if (ret)
427 return ret;
428
429 priv->base.read = nvaa_clock_read;
430 priv->base.calc = nvaa_clock_calc;
431 priv->base.prog = nvaa_clock_prog;
432 priv->base.tidy = nvaa_clock_tidy;
433 return 0;
434}
435
436struct nouveau_oclass *
437nvaa_clock_oclass = &(struct nouveau_oclass) {
438 .handle = NV_SUBDEV(CLOCK, 0xaa),
439 .ofuncs = &(struct nouveau_ofuncs) {
440 .ctor = nvaa_clock_ctor,
441 .dtor = _nouveau_clock_dtor,
442 .init = _nouveau_clock_init,
443 .fini = _nouveau_clock_fini,
444 },
445};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 041fd5edaebf..c33c03d2f4af 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -197,7 +197,7 @@ static int
197nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what, 197nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
198 struct nouveau_i2c_board_info *info, 198 struct nouveau_i2c_board_info *info,
199 bool (*match)(struct nouveau_i2c_port *, 199 bool (*match)(struct nouveau_i2c_port *,
200 struct i2c_board_info *)) 200 struct i2c_board_info *, void *), void *data)
201{ 201{
202 struct nouveau_i2c_port *port = nouveau_i2c_find(i2c, index); 202 struct nouveau_i2c_port *port = nouveau_i2c_find(i2c, index);
203 int i; 203 int i;
@@ -221,7 +221,7 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
221 } 221 }
222 222
223 if (nv_probe_i2c(port, info[i].dev.addr) && 223 if (nv_probe_i2c(port, info[i].dev.addr) &&
224 (!match || match(port, &info[i].dev))) { 224 (!match || match(port, &info[i].dev, data))) {
225 nv_info(i2c, "detected %s: %s\n", what, 225 nv_info(i2c, "detected %s: %s\n", what,
226 info[i].dev.type); 226 info[i].dev.type);
227 return i; 227 return i;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
index e44ed7b93c6d..7610fc5f8fa2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -29,9 +29,9 @@
29 29
30static bool 30static bool
31probe_monitoring_device(struct nouveau_i2c_port *i2c, 31probe_monitoring_device(struct nouveau_i2c_port *i2c,
32 struct i2c_board_info *info) 32 struct i2c_board_info *info, void *data)
33{ 33{
34 struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c); 34 struct nouveau_therm_priv *priv = data;
35 struct nvbios_therm_sensor *sensor = &priv->bios_sensor; 35 struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
36 struct i2c_client *client; 36 struct i2c_client *client;
37 37
@@ -96,7 +96,7 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
96 }; 96 };
97 97
98 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", 98 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
99 board, probe_monitoring_device); 99 board, probe_monitoring_device, therm);
100 if (priv->ic) 100 if (priv->ic)
101 return; 101 return;
102 } 102 }
@@ -108,7 +108,7 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
108 }; 108 };
109 109
110 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", 110 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
111 board, probe_monitoring_device); 111 board, probe_monitoring_device, therm);
112 if (priv->ic) 112 if (priv->ic)
113 return; 113 return;
114 } 114 }
@@ -117,5 +117,5 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
117 device. Let's try our static list. 117 device. Let's try our static list.
118 */ 118 */
119 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", 119 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
120 nv_board_infos, probe_monitoring_device); 120 nv_board_infos, probe_monitoring_device, therm);
121} 121}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 936a71c59080..7fdc51e2a571 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -643,7 +643,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
643 get_tmds_slave(encoder)) 643 get_tmds_slave(encoder))
644 return; 644 return;
645 645
646 type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL); 646 type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL, NULL);
647 if (type < 0) 647 if (type < 0)
648 return; 648 return;
649 649
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 3618ac6b6316..32e7064b819b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -58,8 +58,8 @@ struct nouveau_plane {
58}; 58};
59 59
60static uint32_t formats[] = { 60static uint32_t formats[] = {
61 DRM_FORMAT_NV12,
62 DRM_FORMAT_UYVY, 61 DRM_FORMAT_UYVY,
62 DRM_FORMAT_NV12,
63}; 63};
64 64
65/* Sine can be approximated with 65/* Sine can be approximated with
@@ -99,13 +99,28 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
99 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 99 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
100 struct nouveau_bo *cur = nv_plane->cur; 100 struct nouveau_bo *cur = nv_plane->cur;
101 bool flip = nv_plane->flip; 101 bool flip = nv_plane->flip;
102 int format = ALIGN(src_w * 4, 0x100);
103 int soff = NV_PCRTC0_SIZE * nv_crtc->index; 102 int soff = NV_PCRTC0_SIZE * nv_crtc->index;
104 int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index; 103 int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index;
105 int ret; 104 int format, ret;
105
106 /* Source parameters given in 16.16 fixed point, ignore fractional. */
107 src_x >>= 16;
108 src_y >>= 16;
109 src_w >>= 16;
110 src_h >>= 16;
111
112 format = ALIGN(src_w * 4, 0x100);
106 113
107 if (format > 0xffff) 114 if (format > 0xffff)
108 return -EINVAL; 115 return -ERANGE;
116
117 if (dev->chipset >= 0x30) {
118 if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1))
119 return -ERANGE;
120 } else {
121 if (crtc_w < (src_w >> 3) || crtc_h < (src_h >> 3))
122 return -ERANGE;
123 }
109 124
110 ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM); 125 ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM);
111 if (ret) 126 if (ret)
@@ -113,12 +128,6 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
113 128
114 nv_plane->cur = nv_fb->nvbo; 129 nv_plane->cur = nv_fb->nvbo;
115 130
116 /* Source parameters given in 16.16 fixed point, ignore fractional. */
117 src_x = src_x >> 16;
118 src_y = src_y >> 16;
119 src_w = src_w >> 16;
120 src_h = src_h >> 16;
121
122 nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY); 131 nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
123 nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0); 132 nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
124 133
@@ -245,14 +254,25 @@ nv10_overlay_init(struct drm_device *device)
245{ 254{
246 struct nouveau_device *dev = nouveau_dev(device); 255 struct nouveau_device *dev = nouveau_dev(device);
247 struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL); 256 struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
257 int num_formats = ARRAY_SIZE(formats);
248 int ret; 258 int ret;
249 259
250 if (!plane) 260 if (!plane)
251 return; 261 return;
252 262
263 switch (dev->chipset) {
264 case 0x10:
265 case 0x11:
266 case 0x15:
267 case 0x1a:
268 case 0x20:
269 num_formats = 1;
270 break;
271 }
272
253 ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */, 273 ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */,
254 &nv10_plane_funcs, 274 &nv10_plane_funcs,
255 formats, ARRAY_SIZE(formats), false); 275 formats, num_formats, false);
256 if (ret) 276 if (ret)
257 goto err; 277 goto err;
258 278
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index cc4b208ce546..244822df8ffc 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -59,7 +59,7 @@ int nv04_tv_identify(struct drm_device *dev, int i2c_index)
59 struct nouveau_i2c *i2c = nouveau_i2c(drm->device); 59 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
60 60
61 return i2c->identify(i2c, i2c_index, "TV encoder", 61 return i2c->identify(i2c, i2c_index, "TV encoder",
62 nv04_tv_encoder_info, NULL); 62 nv04_tv_encoder_info, NULL, NULL);
63} 63}
64 64
65 65
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 6828d81ed7b9..900fae01793e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -447,6 +447,8 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
447 if (ret) 447 if (ret)
448 goto done; 448 goto done;
449 449
450 info->offset = ntfy->node->offset;
451
450done: 452done:
451 if (ret) 453 if (ret)
452 nouveau_abi16_ntfy_fini(chan, ntfy); 454 nouveau_abi16_ntfy_fini(chan, ntfy);
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 95c740454049..ba0183fb84f3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -51,6 +51,7 @@ static struct nouveau_dsm_priv {
51 bool dsm_detected; 51 bool dsm_detected;
52 bool optimus_detected; 52 bool optimus_detected;
53 acpi_handle dhandle; 53 acpi_handle dhandle;
54 acpi_handle other_handle;
54 acpi_handle rom_handle; 55 acpi_handle rom_handle;
55} nouveau_dsm_priv; 56} nouveau_dsm_priv;
56 57
@@ -260,9 +261,10 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
260 if (!dhandle) 261 if (!dhandle)
261 return false; 262 return false;
262 263
263 if (!acpi_has_method(dhandle, "_DSM")) 264 if (!acpi_has_method(dhandle, "_DSM")) {
265 nouveau_dsm_priv.other_handle = dhandle;
264 return false; 266 return false;
265 267 }
266 if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER)) 268 if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
267 retval |= NOUVEAU_DSM_HAS_MUX; 269 retval |= NOUVEAU_DSM_HAS_MUX;
268 270
@@ -338,6 +340,16 @@ static bool nouveau_dsm_detect(void)
338 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", 340 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
339 acpi_method_name); 341 acpi_method_name);
340 nouveau_dsm_priv.dsm_detected = true; 342 nouveau_dsm_priv.dsm_detected = true;
343 /*
344 * On some systems hotplug events are generated for the device
345 * being switched off when _DSM is executed. They cause ACPI
346 * hotplug to trigger and attempt to remove the device from
347 * the system, which causes it to break down. Prevent that from
348 * happening by setting the no_hotplug flag for the involved
349 * ACPI device objects.
350 */
351 acpi_bus_no_hotplug(nouveau_dsm_priv.dhandle);
352 acpi_bus_no_hotplug(nouveau_dsm_priv.other_handle);
341 ret = true; 353 ret = true;
342 } 354 }
343 355
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7809d92183c4..25ea82f8def3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -608,8 +608,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
608 fence = nouveau_fence_ref(new_bo->bo.sync_obj); 608 fence = nouveau_fence_ref(new_bo->bo.sync_obj);
609 spin_unlock(&new_bo->bo.bdev->fence_lock); 609 spin_unlock(&new_bo->bo.bdev->fence_lock);
610 ret = nouveau_fence_sync(fence, chan); 610 ret = nouveau_fence_sync(fence, chan);
611 nouveau_fence_unref(&fence);
611 if (ret) 612 if (ret)
612 return ret; 613 goto fail_free;
613 614
614 if (new_bo != old_bo) { 615 if (new_bo != old_bo) {
615 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); 616 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
@@ -701,7 +702,7 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
701 702
702 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); 703 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
703 if (s->event) 704 if (s->event)
704 drm_send_vblank_event(dev, -1, s->event); 705 drm_send_vblank_event(dev, s->crtc, s->event);
705 706
706 list_del(&s->head); 707 list_del(&s->head);
707 if (ps) 708 if (ps)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 7a3759f1c41a..98a22e6e27a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -858,6 +858,12 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
858 if (nouveau_runtime_pm == 0) 858 if (nouveau_runtime_pm == 0)
859 return -EINVAL; 859 return -EINVAL;
860 860
861 /* are we optimus enabled? */
862 if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
863 DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
864 return -EINVAL;
865 }
866
861 nv_debug_level(SILENT); 867 nv_debug_level(SILENT);
862 drm_kms_helper_poll_disable(drm_dev); 868 drm_kms_helper_poll_disable(drm_dev);
863 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); 869 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f8e66c08b11a..4e384a2f99c3 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1265,7 +1265,7 @@ nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
1265 uint32_t start, uint32_t size) 1265 uint32_t start, uint32_t size)
1266{ 1266{
1267 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 1267 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1268 u32 end = max(start + size, (u32)256); 1268 u32 end = min_t(u32, start + size, 256);
1269 u32 i; 1269 u32 i;
1270 1270
1271 for (i = start; i < end; i++) { 1271 for (i = start; i < end; i++) {
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index 037d324bf58f..66ac0ff95f5a 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -8,5 +8,6 @@ config DRM_QXL
8 select DRM_KMS_HELPER 8 select DRM_KMS_HELPER
9 select DRM_KMS_FB_HELPER 9 select DRM_KMS_FB_HELPER
10 select DRM_TTM 10 select DRM_TTM
11 select CRC32
11 help 12 help
12 QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting. 13 QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 5e827c29d194..d70aafb83307 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -24,7 +24,7 @@
24 */ 24 */
25 25
26 26
27#include "linux/crc32.h" 27#include <linux/crc32.h>
28 28
29#include "qxl_drv.h" 29#include "qxl_drv.h"
30#include "qxl_object.h" 30#include "qxl_object.h"
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 80a20120e625..0b9621c9aeea 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1143,31 +1143,53 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1143 } 1143 }
1144 1144
1145 if (tiling_flags & RADEON_TILING_MACRO) { 1145 if (tiling_flags & RADEON_TILING_MACRO) {
1146 if (rdev->family >= CHIP_BONAIRE) 1146 evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
1147 tmp = rdev->config.cik.tile_config;
1148 else if (rdev->family >= CHIP_TAHITI)
1149 tmp = rdev->config.si.tile_config;
1150 else if (rdev->family >= CHIP_CAYMAN)
1151 tmp = rdev->config.cayman.tile_config;
1152 else
1153 tmp = rdev->config.evergreen.tile_config;
1154 1147
1155 switch ((tmp & 0xf0) >> 4) { 1148 /* Set NUM_BANKS. */
1156 case 0: /* 4 banks */ 1149 if (rdev->family >= CHIP_BONAIRE) {
1157 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK); 1150 unsigned tileb, index, num_banks, tile_split_bytes;
1158 break; 1151
1159 case 1: /* 8 banks */ 1152 /* Calculate the macrotile mode index. */
1160 default: 1153 tile_split_bytes = 64 << tile_split;
1161 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK); 1154 tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
1162 break; 1155 tileb = min(tile_split_bytes, tileb);
1163 case 2: /* 16 banks */ 1156
1164 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK); 1157 for (index = 0; tileb > 64; index++) {
1165 break; 1158 tileb >>= 1;
1159 }
1160
1161 if (index >= 16) {
1162 DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
1163 target_fb->bits_per_pixel, tile_split);
1164 return -EINVAL;
1165 }
1166
1167 num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
1168 fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
1169 } else {
1170 /* SI and older. */
1171 if (rdev->family >= CHIP_TAHITI)
1172 tmp = rdev->config.si.tile_config;
1173 else if (rdev->family >= CHIP_CAYMAN)
1174 tmp = rdev->config.cayman.tile_config;
1175 else
1176 tmp = rdev->config.evergreen.tile_config;
1177
1178 switch ((tmp & 0xf0) >> 4) {
1179 case 0: /* 4 banks */
1180 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
1181 break;
1182 case 1: /* 8 banks */
1183 default:
1184 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
1185 break;
1186 case 2: /* 16 banks */
1187 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
1188 break;
1189 }
1166 } 1190 }
1167 1191
1168 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); 1192 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
1169
1170 evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
1171 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split); 1193 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
1172 fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw); 1194 fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
1173 fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh); 1195 fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
@@ -1180,23 +1202,18 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1180 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); 1202 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
1181 1203
1182 if (rdev->family >= CHIP_BONAIRE) { 1204 if (rdev->family >= CHIP_BONAIRE) {
1183 u32 num_pipe_configs = rdev->config.cik.max_tile_pipes; 1205 /* Read the pipe config from the 2D TILED SCANOUT mode.
1184 u32 num_rb = rdev->config.cik.max_backends_per_se; 1206 * It should be the same for the other modes too, but not all
1185 if (num_pipe_configs > 8) 1207 * modes set the pipe config field. */
1186 num_pipe_configs = 8; 1208 u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f;
1187 if (num_pipe_configs == 8) 1209
1188 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P8_32x32_16x16); 1210 fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config);
1189 else if (num_pipe_configs == 4) {
1190 if (num_rb == 4)
1191 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_16x16);
1192 else if (num_rb < 4)
1193 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_8x16);
1194 } else if (num_pipe_configs == 2)
1195 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P2);
1196 } else if ((rdev->family == CHIP_TAHITI) || 1211 } else if ((rdev->family == CHIP_TAHITI) ||
1197 (rdev->family == CHIP_PITCAIRN)) 1212 (rdev->family == CHIP_PITCAIRN))
1198 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16); 1213 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
1199 else if (rdev->family == CHIP_VERDE) 1214 else if ((rdev->family == CHIP_VERDE) ||
1215 (rdev->family == CHIP_OLAND) ||
1216 (rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */
1200 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16); 1217 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
1201 1218
1202 switch (radeon_crtc->crtc_id) { 1219 switch (radeon_crtc->crtc_id) {
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index 0652ee0a2098..f685035dbe39 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -44,7 +44,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
44 PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args; 44 PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
45 int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); 45 int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
46 unsigned char *base; 46 unsigned char *base;
47 u16 out; 47 u16 out = cpu_to_le16(0);
48 48
49 memset(&args, 0, sizeof(args)); 49 memset(&args, 0, sizeof(args));
50 50
@@ -55,11 +55,14 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
55 DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num); 55 DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
56 return -EINVAL; 56 return -EINVAL;
57 } 57 }
58 args.ucRegIndex = buf[0]; 58 if (buf == NULL)
59 if (num > 1) { 59 args.ucRegIndex = 0;
60 else
61 args.ucRegIndex = buf[0];
62 if (num)
60 num--; 63 num--;
64 if (num)
61 memcpy(&out, &buf[1], num); 65 memcpy(&out, &buf[1], num);
62 }
63 args.lpI2CDataOut = cpu_to_le16(out); 66 args.lpI2CDataOut = cpu_to_le16(out);
64 } else { 67 } else {
65 if (num > ATOM_MAX_HW_I2C_READ) { 68 if (num > ATOM_MAX_HW_I2C_READ) {
@@ -96,14 +99,14 @@ int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
96 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); 99 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
97 struct i2c_msg *p; 100 struct i2c_msg *p;
98 int i, remaining, current_count, buffer_offset, max_bytes, ret; 101 int i, remaining, current_count, buffer_offset, max_bytes, ret;
99 u8 buf = 0, flags; 102 u8 flags;
100 103
101 /* check for bus probe */ 104 /* check for bus probe */
102 p = &msgs[0]; 105 p = &msgs[0];
103 if ((num == 1) && (p->len == 0)) { 106 if ((num == 1) && (p->len == 0)) {
104 ret = radeon_process_i2c_ch(i2c, 107 ret = radeon_process_i2c_ch(i2c,
105 p->addr, HW_I2C_WRITE, 108 p->addr, HW_I2C_WRITE,
106 &buf, 1); 109 NULL, 0);
107 if (ret) 110 if (ret)
108 return ret; 111 return ret;
109 else 112 else
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index b43a3a3c9067..e950fabd7f5e 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3057,7 +3057,7 @@ static u32 cik_create_bitmask(u32 bit_width)
3057 * Returns the disabled RB bitmask. 3057 * Returns the disabled RB bitmask.
3058 */ 3058 */
3059static u32 cik_get_rb_disabled(struct radeon_device *rdev, 3059static u32 cik_get_rb_disabled(struct radeon_device *rdev,
3060 u32 max_rb_num, u32 se_num, 3060 u32 max_rb_num_per_se,
3061 u32 sh_per_se) 3061 u32 sh_per_se)
3062{ 3062{
3063 u32 data, mask; 3063 u32 data, mask;
@@ -3071,7 +3071,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
3071 3071
3072 data >>= BACKEND_DISABLE_SHIFT; 3072 data >>= BACKEND_DISABLE_SHIFT;
3073 3073
3074 mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se); 3074 mask = cik_create_bitmask(max_rb_num_per_se / sh_per_se);
3075 3075
3076 return data & mask; 3076 return data & mask;
3077} 3077}
@@ -3088,7 +3088,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
3088 */ 3088 */
3089static void cik_setup_rb(struct radeon_device *rdev, 3089static void cik_setup_rb(struct radeon_device *rdev,
3090 u32 se_num, u32 sh_per_se, 3090 u32 se_num, u32 sh_per_se,
3091 u32 max_rb_num) 3091 u32 max_rb_num_per_se)
3092{ 3092{
3093 int i, j; 3093 int i, j;
3094 u32 data, mask; 3094 u32 data, mask;
@@ -3098,7 +3098,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
3098 for (i = 0; i < se_num; i++) { 3098 for (i = 0; i < se_num; i++) {
3099 for (j = 0; j < sh_per_se; j++) { 3099 for (j = 0; j < sh_per_se; j++) {
3100 cik_select_se_sh(rdev, i, j); 3100 cik_select_se_sh(rdev, i, j);
3101 data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); 3101 data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
3102 if (rdev->family == CHIP_HAWAII) 3102 if (rdev->family == CHIP_HAWAII)
3103 disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH); 3103 disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
3104 else 3104 else
@@ -3108,12 +3108,14 @@ static void cik_setup_rb(struct radeon_device *rdev,
3108 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 3108 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3109 3109
3110 mask = 1; 3110 mask = 1;
3111 for (i = 0; i < max_rb_num; i++) { 3111 for (i = 0; i < max_rb_num_per_se * se_num; i++) {
3112 if (!(disabled_rbs & mask)) 3112 if (!(disabled_rbs & mask))
3113 enabled_rbs |= mask; 3113 enabled_rbs |= mask;
3114 mask <<= 1; 3114 mask <<= 1;
3115 } 3115 }
3116 3116
3117 rdev->config.cik.backend_enable_mask = enabled_rbs;
3118
3117 for (i = 0; i < se_num; i++) { 3119 for (i = 0; i < se_num; i++) {
3118 cik_select_se_sh(rdev, i, 0xffffffff); 3120 cik_select_se_sh(rdev, i, 0xffffffff);
3119 data = 0; 3121 data = 0;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 0300727a4f70..d08b83c6267b 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -458,7 +458,7 @@ int cik_copy_dma(struct radeon_device *rdev,
458 radeon_ring_write(ring, 0); /* src/dst endian swap */ 458 radeon_ring_write(ring, 0); /* src/dst endian swap */
459 radeon_ring_write(ring, src_offset & 0xffffffff); 459 radeon_ring_write(ring, src_offset & 0xffffffff);
460 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff); 460 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
461 radeon_ring_write(ring, dst_offset & 0xfffffffc); 461 radeon_ring_write(ring, dst_offset & 0xffffffff);
462 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff); 462 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
463 src_offset += cur_size_in_bytes; 463 src_offset += cur_size_in_bytes;
464 dst_offset += cur_size_in_bytes; 464 dst_offset += cur_size_in_bytes;
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 009f46e0ce72..713a5d359901 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -93,11 +93,13 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
93 struct radeon_device *rdev = encoder->dev->dev_private; 93 struct radeon_device *rdev = encoder->dev->dev_private;
94 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 94 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
95 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 95 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
96 u32 offset = dig->afmt->offset; 96 u32 offset;
97 97
98 if (!dig->afmt->pin) 98 if (!dig || !dig->afmt || !dig->afmt->pin)
99 return; 99 return;
100 100
101 offset = dig->afmt->offset;
102
101 WREG32(AFMT_AUDIO_SRC_CONTROL + offset, 103 WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
102 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id)); 104 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
103} 105}
@@ -112,7 +114,7 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
112 struct radeon_connector *radeon_connector = NULL; 114 struct radeon_connector *radeon_connector = NULL;
113 u32 tmp = 0, offset; 115 u32 tmp = 0, offset;
114 116
115 if (!dig->afmt->pin) 117 if (!dig || !dig->afmt || !dig->afmt->pin)
116 return; 118 return;
117 119
118 offset = dig->afmt->pin->offset; 120 offset = dig->afmt->pin->offset;
@@ -156,7 +158,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
156 u8 *sadb; 158 u8 *sadb;
157 int sad_count; 159 int sad_count;
158 160
159 if (!dig->afmt->pin) 161 if (!dig || !dig->afmt || !dig->afmt->pin)
160 return; 162 return;
161 163
162 offset = dig->afmt->pin->offset; 164 offset = dig->afmt->pin->offset;
@@ -172,7 +174,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
172 } 174 }
173 175
174 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); 176 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
175 if (sad_count < 0) { 177 if (sad_count <= 0) {
176 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 178 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
177 return; 179 return;
178 } 180 }
@@ -217,7 +219,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
217 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, 219 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
218 }; 220 };
219 221
220 if (!dig->afmt->pin) 222 if (!dig || !dig->afmt || !dig->afmt->pin)
221 return; 223 return;
222 224
223 offset = dig->afmt->pin->offset; 225 offset = dig->afmt->pin->offset;
@@ -233,7 +235,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
233 } 235 }
234 236
235 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); 237 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
236 if (sad_count < 0) { 238 if (sad_count <= 0) {
237 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 239 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
238 return; 240 return;
239 } 241 }
@@ -306,7 +308,9 @@ int dce6_audio_init(struct radeon_device *rdev)
306 rdev->audio.enabled = true; 308 rdev->audio.enabled = true;
307 309
308 if (ASIC_IS_DCE8(rdev)) 310 if (ASIC_IS_DCE8(rdev))
309 rdev->audio.num_pins = 7; 311 rdev->audio.num_pins = 6;
312 else if (ASIC_IS_DCE61(rdev))
313 rdev->audio.num_pins = 4;
310 else 314 else
311 rdev->audio.num_pins = 6; 315 rdev->audio.num_pins = 6;
312 316
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index aa695c4feb3d..0c6d5cef4cf1 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -118,7 +118,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
118 } 118 }
119 119
120 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); 120 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
121 if (sad_count < 0) { 121 if (sad_count <= 0) {
122 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 122 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
123 return; 123 return;
124 } 124 }
@@ -173,7 +173,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
173 } 173 }
174 174
175 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); 175 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
176 if (sad_count < 0) { 176 if (sad_count <= 0) {
177 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 177 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
178 return; 178 return;
179 } 179 }
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 11aab2ab54ce..f59a9e9fccf8 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -895,6 +895,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
895 (rdev->pdev->device == 0x999C)) { 895 (rdev->pdev->device == 0x999C)) {
896 rdev->config.cayman.max_simds_per_se = 6; 896 rdev->config.cayman.max_simds_per_se = 6;
897 rdev->config.cayman.max_backends_per_se = 2; 897 rdev->config.cayman.max_backends_per_se = 2;
898 rdev->config.cayman.max_hw_contexts = 8;
899 rdev->config.cayman.sx_max_export_size = 256;
900 rdev->config.cayman.sx_max_export_pos_size = 64;
901 rdev->config.cayman.sx_max_export_smx_size = 192;
898 } else if ((rdev->pdev->device == 0x9903) || 902 } else if ((rdev->pdev->device == 0x9903) ||
899 (rdev->pdev->device == 0x9904) || 903 (rdev->pdev->device == 0x9904) ||
900 (rdev->pdev->device == 0x990A) || 904 (rdev->pdev->device == 0x990A) ||
@@ -905,6 +909,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
905 (rdev->pdev->device == 0x999D)) { 909 (rdev->pdev->device == 0x999D)) {
906 rdev->config.cayman.max_simds_per_se = 4; 910 rdev->config.cayman.max_simds_per_se = 4;
907 rdev->config.cayman.max_backends_per_se = 2; 911 rdev->config.cayman.max_backends_per_se = 2;
912 rdev->config.cayman.max_hw_contexts = 8;
913 rdev->config.cayman.sx_max_export_size = 256;
914 rdev->config.cayman.sx_max_export_pos_size = 64;
915 rdev->config.cayman.sx_max_export_smx_size = 192;
908 } else if ((rdev->pdev->device == 0x9919) || 916 } else if ((rdev->pdev->device == 0x9919) ||
909 (rdev->pdev->device == 0x9990) || 917 (rdev->pdev->device == 0x9990) ||
910 (rdev->pdev->device == 0x9991) || 918 (rdev->pdev->device == 0x9991) ||
@@ -915,9 +923,17 @@ static void cayman_gpu_init(struct radeon_device *rdev)
915 (rdev->pdev->device == 0x99A0)) { 923 (rdev->pdev->device == 0x99A0)) {
916 rdev->config.cayman.max_simds_per_se = 3; 924 rdev->config.cayman.max_simds_per_se = 3;
917 rdev->config.cayman.max_backends_per_se = 1; 925 rdev->config.cayman.max_backends_per_se = 1;
926 rdev->config.cayman.max_hw_contexts = 4;
927 rdev->config.cayman.sx_max_export_size = 128;
928 rdev->config.cayman.sx_max_export_pos_size = 32;
929 rdev->config.cayman.sx_max_export_smx_size = 96;
918 } else { 930 } else {
919 rdev->config.cayman.max_simds_per_se = 2; 931 rdev->config.cayman.max_simds_per_se = 2;
920 rdev->config.cayman.max_backends_per_se = 1; 932 rdev->config.cayman.max_backends_per_se = 1;
933 rdev->config.cayman.max_hw_contexts = 4;
934 rdev->config.cayman.sx_max_export_size = 128;
935 rdev->config.cayman.sx_max_export_pos_size = 32;
936 rdev->config.cayman.sx_max_export_smx_size = 96;
921 } 937 }
922 rdev->config.cayman.max_texture_channel_caches = 2; 938 rdev->config.cayman.max_texture_channel_caches = 2;
923 rdev->config.cayman.max_gprs = 256; 939 rdev->config.cayman.max_gprs = 256;
@@ -925,10 +941,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
925 rdev->config.cayman.max_gs_threads = 32; 941 rdev->config.cayman.max_gs_threads = 32;
926 rdev->config.cayman.max_stack_entries = 512; 942 rdev->config.cayman.max_stack_entries = 512;
927 rdev->config.cayman.sx_num_of_sets = 8; 943 rdev->config.cayman.sx_num_of_sets = 8;
928 rdev->config.cayman.sx_max_export_size = 256;
929 rdev->config.cayman.sx_max_export_pos_size = 64;
930 rdev->config.cayman.sx_max_export_smx_size = 192;
931 rdev->config.cayman.max_hw_contexts = 8;
932 rdev->config.cayman.sq_num_cf_insts = 2; 944 rdev->config.cayman.sq_num_cf_insts = 2;
933 945
934 rdev->config.cayman.sc_prim_fifo_size = 0x40; 946 rdev->config.cayman.sc_prim_fifo_size = 0x40;
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index cdc003085a76..49c4d48f54d6 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -785,8 +785,8 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
785 struct ni_ps *ps = ni_get_ps(rps); 785 struct ni_ps *ps = ni_get_ps(rps);
786 struct radeon_clock_and_voltage_limits *max_limits; 786 struct radeon_clock_and_voltage_limits *max_limits;
787 bool disable_mclk_switching; 787 bool disable_mclk_switching;
788 u32 mclk, sclk; 788 u32 mclk;
789 u16 vddc, vddci; 789 u16 vddci;
790 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; 790 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
791 int i; 791 int i;
792 792
@@ -839,24 +839,14 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
839 839
840 /* XXX validate the min clocks required for display */ 840 /* XXX validate the min clocks required for display */
841 841
842 /* adjust low state */
842 if (disable_mclk_switching) { 843 if (disable_mclk_switching) {
843 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; 844 ps->performance_levels[0].mclk =
844 sclk = ps->performance_levels[0].sclk; 845 ps->performance_levels[ps->performance_level_count - 1].mclk;
845 vddc = ps->performance_levels[0].vddc; 846 ps->performance_levels[0].vddci =
846 vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; 847 ps->performance_levels[ps->performance_level_count - 1].vddci;
847 } else {
848 sclk = ps->performance_levels[0].sclk;
849 mclk = ps->performance_levels[0].mclk;
850 vddc = ps->performance_levels[0].vddc;
851 vddci = ps->performance_levels[0].vddci;
852 } 848 }
853 849
854 /* adjusted low state */
855 ps->performance_levels[0].sclk = sclk;
856 ps->performance_levels[0].mclk = mclk;
857 ps->performance_levels[0].vddc = vddc;
858 ps->performance_levels[0].vddci = vddci;
859
860 btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk, 850 btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
861 &ps->performance_levels[0].sclk, 851 &ps->performance_levels[0].sclk,
862 &ps->performance_levels[0].mclk); 852 &ps->performance_levels[0].mclk);
@@ -868,11 +858,15 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
868 ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; 858 ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
869 } 859 }
870 860
861 /* adjust remaining states */
871 if (disable_mclk_switching) { 862 if (disable_mclk_switching) {
872 mclk = ps->performance_levels[0].mclk; 863 mclk = ps->performance_levels[0].mclk;
864 vddci = ps->performance_levels[0].vddci;
873 for (i = 1; i < ps->performance_level_count; i++) { 865 for (i = 1; i < ps->performance_level_count; i++) {
874 if (mclk < ps->performance_levels[i].mclk) 866 if (mclk < ps->performance_levels[i].mclk)
875 mclk = ps->performance_levels[i].mclk; 867 mclk = ps->performance_levels[i].mclk;
868 if (vddci < ps->performance_levels[i].vddci)
869 vddci = ps->performance_levels[i].vddci;
876 } 870 }
877 for (i = 0; i < ps->performance_level_count; i++) { 871 for (i = 0; i < ps->performance_level_count; i++) {
878 ps->performance_levels[i].mclk = mclk; 872 ps->performance_levels[i].mclk = mclk;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 4b89262f3f0e..b7d3ecba43e3 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -304,9 +304,9 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
304 WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); 304 WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
305 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ 305 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
306 } 306 }
307 } else if (ASIC_IS_DCE3(rdev)) { 307 } else {
308 /* according to the reg specs, this should DCE3.2 only, but in 308 /* according to the reg specs, this should DCE3.2 only, but in
309 * practice it seems to cover DCE3.0/3.1 as well. 309 * practice it seems to cover DCE2.0/3.0/3.1 as well.
310 */ 310 */
311 if (dig->dig_encoder == 0) { 311 if (dig->dig_encoder == 0) {
312 WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); 312 WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
@@ -317,10 +317,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
317 WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100); 317 WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
318 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ 318 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
319 } 319 }
320 } else {
321 /* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
322 WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
323 AUDIO_DTO_MODULE(clock / 10));
324 } 320 }
325} 321}
326 322
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index ecf2a3960c07..45e1f447bc79 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1940,7 +1940,7 @@ struct si_asic {
1940 unsigned sc_earlyz_tile_fifo_size; 1940 unsigned sc_earlyz_tile_fifo_size;
1941 1941
1942 unsigned num_tile_pipes; 1942 unsigned num_tile_pipes;
1943 unsigned num_backends_per_se; 1943 unsigned backend_enable_mask;
1944 unsigned backend_disable_mask_per_asic; 1944 unsigned backend_disable_mask_per_asic;
1945 unsigned backend_map; 1945 unsigned backend_map;
1946 unsigned num_texture_channel_caches; 1946 unsigned num_texture_channel_caches;
@@ -1970,7 +1970,7 @@ struct cik_asic {
1970 unsigned sc_earlyz_tile_fifo_size; 1970 unsigned sc_earlyz_tile_fifo_size;
1971 1971
1972 unsigned num_tile_pipes; 1972 unsigned num_tile_pipes;
1973 unsigned num_backends_per_se; 1973 unsigned backend_enable_mask;
1974 unsigned backend_disable_mask_per_asic; 1974 unsigned backend_disable_mask_per_asic;
1975 unsigned backend_map; 1975 unsigned backend_map;
1976 unsigned num_texture_channel_caches; 1976 unsigned num_texture_channel_caches;
@@ -2710,10 +2710,10 @@ void radeon_vm_fence(struct radeon_device *rdev,
2710 struct radeon_vm *vm, 2710 struct radeon_vm *vm,
2711 struct radeon_fence *fence); 2711 struct radeon_fence *fence);
2712uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); 2712uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
2713int radeon_vm_bo_update_pte(struct radeon_device *rdev, 2713int radeon_vm_bo_update(struct radeon_device *rdev,
2714 struct radeon_vm *vm, 2714 struct radeon_vm *vm,
2715 struct radeon_bo *bo, 2715 struct radeon_bo *bo,
2716 struct ttm_mem_reg *mem); 2716 struct ttm_mem_reg *mem);
2717void radeon_vm_bo_invalidate(struct radeon_device *rdev, 2717void radeon_vm_bo_invalidate(struct radeon_device *rdev,
2718 struct radeon_bo *bo); 2718 struct radeon_bo *bo);
2719struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, 2719struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e354ce94cdd1..c0425bb6223a 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2021,7 +2021,7 @@ static struct radeon_asic ci_asic = {
2021 .hdmi_setmode = &evergreen_hdmi_setmode, 2021 .hdmi_setmode = &evergreen_hdmi_setmode,
2022 }, 2022 },
2023 .copy = { 2023 .copy = {
2024 .blit = NULL, 2024 .blit = &cik_copy_cpdma,
2025 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 2025 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2026 .dma = &cik_copy_dma, 2026 .dma = &cik_copy_dma,
2027 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 2027 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -2122,7 +2122,7 @@ static struct radeon_asic kv_asic = {
2122 .hdmi_setmode = &evergreen_hdmi_setmode, 2122 .hdmi_setmode = &evergreen_hdmi_setmode,
2123 }, 2123 },
2124 .copy = { 2124 .copy = {
2125 .blit = NULL, 2125 .blit = &cik_copy_cpdma,
2126 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 2126 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2127 .dma = &cik_copy_dma, 2127 .dma = &cik_copy_dma,
2128 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 2128 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f79ee184ffd5..5c39bf7c3d88 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2918,7 +2918,7 @@ int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
2918 mpll_param->dll_speed = args.ucDllSpeed; 2918 mpll_param->dll_speed = args.ucDllSpeed;
2919 mpll_param->bwcntl = args.ucBWCntl; 2919 mpll_param->bwcntl = args.ucBWCntl;
2920 mpll_param->vco_mode = 2920 mpll_param->vco_mode =
2921 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK) ? 1 : 0; 2921 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK);
2922 mpll_param->yclk_sel = 2922 mpll_param->yclk_sel =
2923 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0; 2923 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0;
2924 mpll_param->qdr = 2924 mpll_param->qdr =
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 9d302eaeea15..485848f889f5 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -33,6 +33,7 @@ static struct radeon_atpx_priv {
33 bool atpx_detected; 33 bool atpx_detected;
34 /* handle for device - and atpx */ 34 /* handle for device - and atpx */
35 acpi_handle dhandle; 35 acpi_handle dhandle;
36 acpi_handle other_handle;
36 struct radeon_atpx atpx; 37 struct radeon_atpx atpx;
37} radeon_atpx_priv; 38} radeon_atpx_priv;
38 39
@@ -451,9 +452,10 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
451 return false; 452 return false;
452 453
453 status = acpi_get_handle(dhandle, "ATPX", &atpx_handle); 454 status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
454 if (ACPI_FAILURE(status)) 455 if (ACPI_FAILURE(status)) {
456 radeon_atpx_priv.other_handle = dhandle;
455 return false; 457 return false;
456 458 }
457 radeon_atpx_priv.dhandle = dhandle; 459 radeon_atpx_priv.dhandle = dhandle;
458 radeon_atpx_priv.atpx.handle = atpx_handle; 460 radeon_atpx_priv.atpx.handle = atpx_handle;
459 return true; 461 return true;
@@ -530,6 +532,16 @@ static bool radeon_atpx_detect(void)
530 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", 532 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
531 acpi_method_name); 533 acpi_method_name);
532 radeon_atpx_priv.atpx_detected = true; 534 radeon_atpx_priv.atpx_detected = true;
535 /*
536 * On some systems hotplug events are generated for the device
537 * being switched off when ATPX is executed. They cause ACPI
538 * hotplug to trigger and attempt to remove the device from
539 * the system, which causes it to break down. Prevent that from
540 * happening by setting the no_hotplug flag for the involved
541 * ACPI device objects.
542 */
543 acpi_bus_no_hotplug(radeon_atpx_priv.dhandle);
544 acpi_bus_no_hotplug(radeon_atpx_priv.other_handle);
533 return true; 545 return true;
534 } 546 }
535 return false; 547 return false;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index f41594b2eeac..0b366169d64d 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -360,13 +360,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
360 struct radeon_bo *bo; 360 struct radeon_bo *bo;
361 int r; 361 int r;
362 362
363 r = radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem); 363 r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
364 if (r) { 364 if (r) {
365 return r; 365 return r;
366 } 366 }
367 list_for_each_entry(lobj, &parser->validated, tv.head) { 367 list_for_each_entry(lobj, &parser->validated, tv.head) {
368 bo = lobj->bo; 368 bo = lobj->bo;
369 r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem); 369 r = radeon_vm_bo_update(parser->rdev, vm, bo, &bo->tbo.mem);
370 if (r) { 370 if (r) {
371 return r; 371 return r;
372 } 372 }
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 9f5ff28864f6..db39ea36bf22 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -77,9 +77,10 @@
77 * 2.33.0 - Add SI tiling mode array query 77 * 2.33.0 - Add SI tiling mode array query
78 * 2.34.0 - Add CIK tiling mode array query 78 * 2.34.0 - Add CIK tiling mode array query
79 * 2.35.0 - Add CIK macrotile mode array query 79 * 2.35.0 - Add CIK macrotile mode array query
80 * 2.36.0 - Fix CIK DCE tiling setup
80 */ 81 */
81#define KMS_DRIVER_MAJOR 2 82#define KMS_DRIVER_MAJOR 2
82#define KMS_DRIVER_MINOR 35 83#define KMS_DRIVER_MINOR 36
83#define KMS_DRIVER_PATCHLEVEL 0 84#define KMS_DRIVER_PATCHLEVEL 0
84int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 85int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
85int radeon_driver_unload_kms(struct drm_device *dev); 86int radeon_driver_unload_kms(struct drm_device *dev);
@@ -508,15 +509,6 @@ static const struct file_operations radeon_driver_kms_fops = {
508#endif 509#endif
509}; 510};
510 511
511
512static void
513radeon_pci_shutdown(struct pci_dev *pdev)
514{
515 struct drm_device *dev = pci_get_drvdata(pdev);
516
517 radeon_driver_unload_kms(dev);
518}
519
520static struct drm_driver kms_driver = { 512static struct drm_driver kms_driver = {
521 .driver_features = 513 .driver_features =
522 DRIVER_USE_AGP | 514 DRIVER_USE_AGP |
@@ -586,7 +578,6 @@ static struct pci_driver radeon_kms_pci_driver = {
586 .probe = radeon_pci_probe, 578 .probe = radeon_pci_probe,
587 .remove = radeon_pci_remove, 579 .remove = radeon_pci_remove,
588 .driver.pm = &radeon_pm_ops, 580 .driver.pm = &radeon_pm_ops,
589 .shutdown = radeon_pci_shutdown,
590}; 581};
591 582
592static int __init radeon_init(void) 583static int __init radeon_init(void)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 543dcfae7e6f..00e0d449021c 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -108,9 +108,10 @@
108 * 1.31- Add support for num Z pipes from GET_PARAM 108 * 1.31- Add support for num Z pipes from GET_PARAM
109 * 1.32- fixes for rv740 setup 109 * 1.32- fixes for rv740 setup
110 * 1.33- Add r6xx/r7xx const buffer support 110 * 1.33- Add r6xx/r7xx const buffer support
111 * 1.34- fix evergreen/cayman GS register
111 */ 112 */
112#define DRIVER_MAJOR 1 113#define DRIVER_MAJOR 1
113#define DRIVER_MINOR 33 114#define DRIVER_MINOR 34
114#define DRIVER_PATCHLEVEL 0 115#define DRIVER_PATCHLEVEL 0
115 116
116long radeon_drm_ioctl(struct file *filp, 117long radeon_drm_ioctl(struct file *filp,
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 3044e504f4ec..96e440061bdb 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -29,6 +29,7 @@
29#include <drm/radeon_drm.h> 29#include <drm/radeon_drm.h>
30#include "radeon.h" 30#include "radeon.h"
31#include "radeon_reg.h" 31#include "radeon_reg.h"
32#include "radeon_trace.h"
32 33
33/* 34/*
34 * GART 35 * GART
@@ -737,6 +738,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
737 for (i = 0; i < 2; ++i) { 738 for (i = 0; i < 2; ++i) {
738 if (choices[i]) { 739 if (choices[i]) {
739 vm->id = choices[i]; 740 vm->id = choices[i];
741 trace_radeon_vm_grab_id(vm->id, ring);
740 return rdev->vm_manager.active[choices[i]]; 742 return rdev->vm_manager.active[choices[i]];
741 } 743 }
742 } 744 }
@@ -1116,7 +1118,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
1116} 1118}
1117 1119
1118/** 1120/**
1119 * radeon_vm_bo_update_pte - map a bo into the vm page table 1121 * radeon_vm_bo_update - map a bo into the vm page table
1120 * 1122 *
1121 * @rdev: radeon_device pointer 1123 * @rdev: radeon_device pointer
1122 * @vm: requested vm 1124 * @vm: requested vm
@@ -1128,10 +1130,10 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
1128 * 1130 *
1129 * Object have to be reserved & global and local mutex must be locked! 1131 * Object have to be reserved & global and local mutex must be locked!
1130 */ 1132 */
1131int radeon_vm_bo_update_pte(struct radeon_device *rdev, 1133int radeon_vm_bo_update(struct radeon_device *rdev,
1132 struct radeon_vm *vm, 1134 struct radeon_vm *vm,
1133 struct radeon_bo *bo, 1135 struct radeon_bo *bo,
1134 struct ttm_mem_reg *mem) 1136 struct ttm_mem_reg *mem)
1135{ 1137{
1136 struct radeon_ib ib; 1138 struct radeon_ib ib;
1137 struct radeon_bo_va *bo_va; 1139 struct radeon_bo_va *bo_va;
@@ -1176,6 +1178,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1176 bo_va->valid = false; 1178 bo_va->valid = false;
1177 } 1179 }
1178 1180
1181 trace_radeon_vm_bo_update(bo_va);
1182
1179 nptes = radeon_bo_ngpu_pages(bo); 1183 nptes = radeon_bo_ngpu_pages(bo);
1180 1184
1181 /* assume two extra pdes in case the mapping overlaps the borders */ 1185 /* assume two extra pdes in case the mapping overlaps the borders */
@@ -1257,7 +1261,7 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
1257 mutex_lock(&rdev->vm_manager.lock); 1261 mutex_lock(&rdev->vm_manager.lock);
1258 mutex_lock(&bo_va->vm->mutex); 1262 mutex_lock(&bo_va->vm->mutex);
1259 if (bo_va->soffset) { 1263 if (bo_va->soffset) {
1260 r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); 1264 r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
1261 } 1265 }
1262 mutex_unlock(&rdev->vm_manager.lock); 1266 mutex_unlock(&rdev->vm_manager.lock);
1263 list_del(&bo_va->vm_list); 1267 list_del(&bo_va->vm_list);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 55d0b474bd37..21d593c0ecaf 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -461,6 +461,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
461 case RADEON_INFO_SI_CP_DMA_COMPUTE: 461 case RADEON_INFO_SI_CP_DMA_COMPUTE:
462 *value = 1; 462 *value = 1;
463 break; 463 break;
464 case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
465 if (rdev->family >= CHIP_BONAIRE) {
466 *value = rdev->config.cik.backend_enable_mask;
467 } else if (rdev->family >= CHIP_TAHITI) {
468 *value = rdev->config.si.backend_enable_mask;
469 } else {
470 DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
471 }
472 break;
464 default: 473 default:
465 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 474 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
466 return -EINVAL; 475 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index d1385ccc672c..984097b907ef 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -537,8 +537,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
537 struct device_attribute *attr, 537 struct device_attribute *attr,
538 char *buf) 538 char *buf)
539{ 539{
540 struct drm_device *ddev = dev_get_drvdata(dev); 540 struct radeon_device *rdev = dev_get_drvdata(dev);
541 struct radeon_device *rdev = ddev->dev_private;
542 int temp; 541 int temp;
543 542
544 if (rdev->asic->pm.get_temperature) 543 if (rdev->asic->pm.get_temperature)
@@ -553,8 +552,7 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
553 struct device_attribute *attr, 552 struct device_attribute *attr,
554 char *buf) 553 char *buf)
555{ 554{
556 struct drm_device *ddev = dev_get_drvdata(dev); 555 struct radeon_device *rdev = dev_get_drvdata(dev);
557 struct radeon_device *rdev = ddev->dev_private;
558 int hyst = to_sensor_dev_attr(attr)->index; 556 int hyst = to_sensor_dev_attr(attr)->index;
559 int temp; 557 int temp;
560 558
@@ -566,23 +564,14 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
566 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 564 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
567} 565}
568 566
569static ssize_t radeon_hwmon_show_name(struct device *dev,
570 struct device_attribute *attr,
571 char *buf)
572{
573 return sprintf(buf, "radeon\n");
574}
575
576static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); 567static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
577static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0); 568static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
578static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1); 569static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
579static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
580 570
581static struct attribute *hwmon_attributes[] = { 571static struct attribute *hwmon_attributes[] = {
582 &sensor_dev_attr_temp1_input.dev_attr.attr, 572 &sensor_dev_attr_temp1_input.dev_attr.attr,
583 &sensor_dev_attr_temp1_crit.dev_attr.attr, 573 &sensor_dev_attr_temp1_crit.dev_attr.attr,
584 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 574 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
585 &sensor_dev_attr_name.dev_attr.attr,
586 NULL 575 NULL
587}; 576};
588 577
@@ -590,8 +579,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
590 struct attribute *attr, int index) 579 struct attribute *attr, int index)
591{ 580{
592 struct device *dev = container_of(kobj, struct device, kobj); 581 struct device *dev = container_of(kobj, struct device, kobj);
593 struct drm_device *ddev = dev_get_drvdata(dev); 582 struct radeon_device *rdev = dev_get_drvdata(dev);
594 struct radeon_device *rdev = ddev->dev_private;
595 583
596 /* Skip limit attributes if DPM is not enabled */ 584 /* Skip limit attributes if DPM is not enabled */
597 if (rdev->pm.pm_method != PM_METHOD_DPM && 585 if (rdev->pm.pm_method != PM_METHOD_DPM &&
@@ -607,11 +595,15 @@ static const struct attribute_group hwmon_attrgroup = {
607 .is_visible = hwmon_attributes_visible, 595 .is_visible = hwmon_attributes_visible,
608}; 596};
609 597
598static const struct attribute_group *hwmon_groups[] = {
599 &hwmon_attrgroup,
600 NULL
601};
602
610static int radeon_hwmon_init(struct radeon_device *rdev) 603static int radeon_hwmon_init(struct radeon_device *rdev)
611{ 604{
612 int err = 0; 605 int err = 0;
613 606 struct device *hwmon_dev;
614 rdev->pm.int_hwmon_dev = NULL;
615 607
616 switch (rdev->pm.int_thermal_type) { 608 switch (rdev->pm.int_thermal_type) {
617 case THERMAL_TYPE_RV6XX: 609 case THERMAL_TYPE_RV6XX:
@@ -624,20 +616,13 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
624 case THERMAL_TYPE_KV: 616 case THERMAL_TYPE_KV:
625 if (rdev->asic->pm.get_temperature == NULL) 617 if (rdev->asic->pm.get_temperature == NULL)
626 return err; 618 return err;
627 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); 619 hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
628 if (IS_ERR(rdev->pm.int_hwmon_dev)) { 620 "radeon", rdev,
629 err = PTR_ERR(rdev->pm.int_hwmon_dev); 621 hwmon_groups);
622 if (IS_ERR(hwmon_dev)) {
623 err = PTR_ERR(hwmon_dev);
630 dev_err(rdev->dev, 624 dev_err(rdev->dev,
631 "Unable to register hwmon device: %d\n", err); 625 "Unable to register hwmon device: %d\n", err);
632 break;
633 }
634 dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev);
635 err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj,
636 &hwmon_attrgroup);
637 if (err) {
638 dev_err(rdev->dev,
639 "Unable to create hwmon sysfs file: %d\n", err);
640 hwmon_device_unregister(rdev->dev);
641 } 626 }
642 break; 627 break;
643 default: 628 default:
@@ -647,14 +632,6 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
647 return err; 632 return err;
648} 633}
649 634
650static void radeon_hwmon_fini(struct radeon_device *rdev)
651{
652 if (rdev->pm.int_hwmon_dev) {
653 sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup);
654 hwmon_device_unregister(rdev->pm.int_hwmon_dev);
655 }
656}
657
658static void radeon_dpm_thermal_work_handler(struct work_struct *work) 635static void radeon_dpm_thermal_work_handler(struct work_struct *work)
659{ 636{
660 struct radeon_device *rdev = 637 struct radeon_device *rdev =
@@ -1337,8 +1314,6 @@ static void radeon_pm_fini_old(struct radeon_device *rdev)
1337 1314
1338 if (rdev->pm.power_state) 1315 if (rdev->pm.power_state)
1339 kfree(rdev->pm.power_state); 1316 kfree(rdev->pm.power_state);
1340
1341 radeon_hwmon_fini(rdev);
1342} 1317}
1343 1318
1344static void radeon_pm_fini_dpm(struct radeon_device *rdev) 1319static void radeon_pm_fini_dpm(struct radeon_device *rdev)
@@ -1358,8 +1333,6 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1358 1333
1359 if (rdev->pm.power_state) 1334 if (rdev->pm.power_state)
1360 kfree(rdev->pm.power_state); 1335 kfree(rdev->pm.power_state);
1361
1362 radeon_hwmon_fini(rdev);
1363} 1336}
1364 1337
1365void radeon_pm_fini(struct radeon_device *rdev) 1338void radeon_pm_fini(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index 9f0e18172b6e..0473257d4078 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -47,6 +47,39 @@ TRACE_EVENT(radeon_cs,
47 __entry->fences) 47 __entry->fences)
48); 48);
49 49
50TRACE_EVENT(radeon_vm_grab_id,
51 TP_PROTO(unsigned vmid, int ring),
52 TP_ARGS(vmid, ring),
53 TP_STRUCT__entry(
54 __field(u32, vmid)
55 __field(u32, ring)
56 ),
57
58 TP_fast_assign(
59 __entry->vmid = vmid;
60 __entry->ring = ring;
61 ),
62 TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring)
63);
64
65TRACE_EVENT(radeon_vm_bo_update,
66 TP_PROTO(struct radeon_bo_va *bo_va),
67 TP_ARGS(bo_va),
68 TP_STRUCT__entry(
69 __field(u64, soffset)
70 __field(u64, eoffset)
71 __field(u32, flags)
72 ),
73
74 TP_fast_assign(
75 __entry->soffset = bo_va->soffset;
76 __entry->eoffset = bo_va->eoffset;
77 __entry->flags = bo_va->flags;
78 ),
79 TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
80 __entry->soffset, __entry->eoffset, __entry->flags)
81);
82
50TRACE_EVENT(radeon_vm_set_page, 83TRACE_EVENT(radeon_vm_set_page,
51 TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, 84 TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
52 uint32_t incr, uint32_t flags), 85 uint32_t incr, uint32_t flags),
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 373d088bac66..b9c0529b4a2e 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -473,7 +473,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
473 return -EINVAL; 473 return -EINVAL;
474 } 474 }
475 475
476 if ((start >> 28) != (end >> 28)) { 476 if ((start >> 28) != ((end - 1) >> 28)) {
477 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", 477 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
478 start, end); 478 start, end);
479 return -EINVAL; 479 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
index a072fa8c46b0..d46b58d078aa 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -21,7 +21,7 @@ cayman 0x9400
210x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE 210x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
220x000089B0 VGT_HS_OFFCHIP_PARAM 220x000089B0 VGT_HS_OFFCHIP_PARAM
230x00008A14 PA_CL_ENHANCE 230x00008A14 PA_CL_ENHANCE
240x00008A60 PA_SC_LINE_STIPPLE_VALUE 240x00008A60 PA_SU_LINE_STIPPLE_VALUE
250x00008B10 PA_SC_LINE_STIPPLE_STATE 250x00008B10 PA_SC_LINE_STIPPLE_STATE
260x00008BF0 PA_SC_ENHANCE 260x00008BF0 PA_SC_ENHANCE
270x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 270x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
@@ -532,7 +532,7 @@ cayman 0x9400
5320x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET 5320x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
5330x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE 5330x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
5340x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET 5340x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
5350x00028B74 VGT_GS_INSTANCE_CNT 5350x00028B90 VGT_GS_INSTANCE_CNT
5360x00028BD4 PA_SC_CENTROID_PRIORITY_0 5360x00028BD4 PA_SC_CENTROID_PRIORITY_0
5370x00028BD8 PA_SC_CENTROID_PRIORITY_1 5370x00028BD8 PA_SC_CENTROID_PRIORITY_1
5380x00028BDC PA_SC_LINE_CNTL 5380x00028BDC PA_SC_LINE_CNTL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index b912a37689bf..57745c8761c8 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -22,7 +22,7 @@ evergreen 0x9400
220x000089A4 VGT_COMPUTE_START_Z 220x000089A4 VGT_COMPUTE_START_Z
230x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE 230x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
240x00008A14 PA_CL_ENHANCE 240x00008A14 PA_CL_ENHANCE
250x00008A60 PA_SC_LINE_STIPPLE_VALUE 250x00008A60 PA_SU_LINE_STIPPLE_VALUE
260x00008B10 PA_SC_LINE_STIPPLE_STATE 260x00008B10 PA_SC_LINE_STIPPLE_STATE
270x00008BF0 PA_SC_ENHANCE 270x00008BF0 PA_SC_ENHANCE
280x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 280x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
@@ -545,7 +545,7 @@ evergreen 0x9400
5450x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET 5450x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
5460x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE 5460x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
5470x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET 5470x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
5480x00028B74 VGT_GS_INSTANCE_CNT 5480x00028B90 VGT_GS_INSTANCE_CNT
5490x00028C00 PA_SC_LINE_CNTL 5490x00028C00 PA_SC_LINE_CNTL
5500x00028C08 PA_SU_VTX_CNTL 5500x00028C08 PA_SU_VTX_CNTL
5510x00028C0C PA_CL_GB_VERT_CLIP_ADJ 5510x00028C0C PA_CL_GB_VERT_CLIP_ADJ
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 1c560629575a..e7dab069cccf 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -162,6 +162,16 @@ static void rs690_mc_init(struct radeon_device *rdev)
162 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); 162 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
163 base = G_000100_MC_FB_START(base) << 16; 163 base = G_000100_MC_FB_START(base) << 16;
164 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 164 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
165 /* Some boards seem to be configured for 128MB of sideport memory,
166 * but really only have 64MB. Just skip the sideport and use
167 * UMA memory.
168 */
169 if (rdev->mc.igp_sideport_enabled &&
170 (rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
171 base += 128 * 1024 * 1024;
172 rdev->mc.real_vram_size -= 128 * 1024 * 1024;
173 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
174 }
165 175
166 /* Use K8 direct mapping for fast fb access. */ 176 /* Use K8 direct mapping for fast fb access. */
167 rdev->fastfb_working = false; 177 rdev->fastfb_working = false;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 913b025ae9b3..374499db20c7 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2328,6 +2328,12 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
2328 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, 2328 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
2329 ASIC_INTERNAL_MEMORY_SS, 0); 2329 ASIC_INTERNAL_MEMORY_SS, 0);
2330 2330
2331 /* disable ss, causes hangs on some cayman boards */
2332 if (rdev->family == CHIP_CAYMAN) {
2333 pi->sclk_ss = false;
2334 pi->mclk_ss = false;
2335 }
2336
2331 if (pi->sclk_ss || pi->mclk_ss) 2337 if (pi->sclk_ss || pi->mclk_ss)
2332 pi->dynamic_ss = true; 2338 pi->dynamic_ss = true;
2333 else 2339 else
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 6a64ccaa0695..85e1edfaa3be 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2811,7 +2811,7 @@ static void si_setup_spi(struct radeon_device *rdev,
2811} 2811}
2812 2812
2813static u32 si_get_rb_disabled(struct radeon_device *rdev, 2813static u32 si_get_rb_disabled(struct radeon_device *rdev,
2814 u32 max_rb_num, u32 se_num, 2814 u32 max_rb_num_per_se,
2815 u32 sh_per_se) 2815 u32 sh_per_se)
2816{ 2816{
2817 u32 data, mask; 2817 u32 data, mask;
@@ -2825,14 +2825,14 @@ static u32 si_get_rb_disabled(struct radeon_device *rdev,
2825 2825
2826 data >>= BACKEND_DISABLE_SHIFT; 2826 data >>= BACKEND_DISABLE_SHIFT;
2827 2827
2828 mask = si_create_bitmask(max_rb_num / se_num / sh_per_se); 2828 mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
2829 2829
2830 return data & mask; 2830 return data & mask;
2831} 2831}
2832 2832
2833static void si_setup_rb(struct radeon_device *rdev, 2833static void si_setup_rb(struct radeon_device *rdev,
2834 u32 se_num, u32 sh_per_se, 2834 u32 se_num, u32 sh_per_se,
2835 u32 max_rb_num) 2835 u32 max_rb_num_per_se)
2836{ 2836{
2837 int i, j; 2837 int i, j;
2838 u32 data, mask; 2838 u32 data, mask;
@@ -2842,19 +2842,21 @@ static void si_setup_rb(struct radeon_device *rdev,
2842 for (i = 0; i < se_num; i++) { 2842 for (i = 0; i < se_num; i++) {
2843 for (j = 0; j < sh_per_se; j++) { 2843 for (j = 0; j < sh_per_se; j++) {
2844 si_select_se_sh(rdev, i, j); 2844 si_select_se_sh(rdev, i, j);
2845 data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); 2845 data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
2846 disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH); 2846 disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
2847 } 2847 }
2848 } 2848 }
2849 si_select_se_sh(rdev, 0xffffffff, 0xffffffff); 2849 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
2850 2850
2851 mask = 1; 2851 mask = 1;
2852 for (i = 0; i < max_rb_num; i++) { 2852 for (i = 0; i < max_rb_num_per_se * se_num; i++) {
2853 if (!(disabled_rbs & mask)) 2853 if (!(disabled_rbs & mask))
2854 enabled_rbs |= mask; 2854 enabled_rbs |= mask;
2855 mask <<= 1; 2855 mask <<= 1;
2856 } 2856 }
2857 2857
2858 rdev->config.si.backend_enable_mask = enabled_rbs;
2859
2858 for (i = 0; i < se_num; i++) { 2860 for (i = 0; i < se_num; i++) {
2859 si_select_se_sh(rdev, i, 0xffffffff); 2861 si_select_se_sh(rdev, i, 0xffffffff);
2860 data = 0; 2862 data = 0;
@@ -3882,8 +3884,15 @@ static int si_mc_init(struct radeon_device *rdev)
3882 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 3884 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
3883 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 3885 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
3884 /* size in MB on si */ 3886 /* size in MB on si */
3885 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; 3887 tmp = RREG32(CONFIG_MEMSIZE);
3886 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; 3888 /* some boards may have garbage in the upper 16 bits */
3889 if (tmp & 0xffff0000) {
3890 DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
3891 if (tmp & 0xffff)
3892 tmp &= 0xffff;
3893 }
3894 rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
3895 rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
3887 rdev->mc.visible_vram_size = rdev->mc.aper_size; 3896 rdev->mc.visible_vram_size = rdev->mc.aper_size;
3888 si_vram_gtt_location(rdev, &rdev->mc); 3897 si_vram_gtt_location(rdev, &rdev->mc);
3889 radeon_update_bandwidth_info(rdev); 3898 radeon_update_bandwidth_info(rdev);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 28e178137718..07eba596d458 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -135,11 +135,11 @@ int tegra_drm_submit(struct tegra_drm_context *context,
135 unsigned int num_relocs = args->num_relocs; 135 unsigned int num_relocs = args->num_relocs;
136 unsigned int num_waitchks = args->num_waitchks; 136 unsigned int num_waitchks = args->num_waitchks;
137 struct drm_tegra_cmdbuf __user *cmdbufs = 137 struct drm_tegra_cmdbuf __user *cmdbufs =
138 (void * __user)(uintptr_t)args->cmdbufs; 138 (void __user *)(uintptr_t)args->cmdbufs;
139 struct drm_tegra_reloc __user *relocs = 139 struct drm_tegra_reloc __user *relocs =
140 (void * __user)(uintptr_t)args->relocs; 140 (void __user *)(uintptr_t)args->relocs;
141 struct drm_tegra_waitchk __user *waitchks = 141 struct drm_tegra_waitchk __user *waitchks =
142 (void * __user)(uintptr_t)args->waitchks; 142 (void __user *)(uintptr_t)args->waitchks;
143 struct drm_tegra_syncpt syncpt; 143 struct drm_tegra_syncpt syncpt;
144 struct host1x_job *job; 144 struct host1x_job *job;
145 int err; 145 int err;
@@ -163,9 +163,10 @@ int tegra_drm_submit(struct tegra_drm_context *context,
163 struct drm_tegra_cmdbuf cmdbuf; 163 struct drm_tegra_cmdbuf cmdbuf;
164 struct host1x_bo *bo; 164 struct host1x_bo *bo;
165 165
166 err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf)); 166 if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) {
167 if (err) 167 err = -EFAULT;
168 goto fail; 168 goto fail;
169 }
169 170
170 bo = host1x_bo_lookup(drm, file, cmdbuf.handle); 171 bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
171 if (!bo) { 172 if (!bo) {
@@ -178,10 +179,11 @@ int tegra_drm_submit(struct tegra_drm_context *context,
178 cmdbufs++; 179 cmdbufs++;
179 } 180 }
180 181
181 err = copy_from_user(job->relocarray, relocs, 182 if (copy_from_user(job->relocarray, relocs,
182 sizeof(*relocs) * num_relocs); 183 sizeof(*relocs) * num_relocs)) {
183 if (err) 184 err = -EFAULT;
184 goto fail; 185 goto fail;
186 }
185 187
186 while (num_relocs--) { 188 while (num_relocs--) {
187 struct host1x_reloc *reloc = &job->relocarray[num_relocs]; 189 struct host1x_reloc *reloc = &job->relocarray[num_relocs];
@@ -199,15 +201,17 @@ int tegra_drm_submit(struct tegra_drm_context *context,
199 } 201 }
200 } 202 }
201 203
202 err = copy_from_user(job->waitchk, waitchks, 204 if (copy_from_user(job->waitchk, waitchks,
203 sizeof(*waitchks) * num_waitchks); 205 sizeof(*waitchks) * num_waitchks)) {
204 if (err) 206 err = -EFAULT;
205 goto fail; 207 goto fail;
208 }
206 209
207 err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts, 210 if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts,
208 sizeof(syncpt)); 211 sizeof(syncpt))) {
209 if (err) 212 err = -EFAULT;
210 goto fail; 213 goto fail;
214 }
211 215
212 job->is_addr_reg = context->client->ops->is_addr_reg; 216 job->is_addr_reg = context->client->ops->is_addr_reg;
213 job->syncpt_incrs = syncpt.incrs; 217 job->syncpt_incrs = syncpt.incrs;
@@ -573,7 +577,7 @@ static void tegra_debugfs_cleanup(struct drm_minor *minor)
573} 577}
574#endif 578#endif
575 579
576struct drm_driver tegra_drm_driver = { 580static struct drm_driver tegra_drm_driver = {
577 .driver_features = DRIVER_MODESET | DRIVER_GEM, 581 .driver_features = DRIVER_MODESET | DRIVER_GEM,
578 .load = tegra_drm_load, 582 .load = tegra_drm_load,
579 .unload = tegra_drm_unload, 583 .unload = tegra_drm_unload,
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index fdfe259ed7f8..7da0b923131f 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -116,7 +116,7 @@ host1x_client_to_dc(struct host1x_client *client)
116 116
117static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc) 117static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
118{ 118{
119 return container_of(crtc, struct tegra_dc, base); 119 return crtc ? container_of(crtc, struct tegra_dc, base) : NULL;
120} 120}
121 121
122static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value, 122static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value,
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 490f7719e317..a3835e7de184 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -247,7 +247,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
247 info->var.yoffset * fb->pitches[0]; 247 info->var.yoffset * fb->pitches[0];
248 248
249 drm->mode_config.fb_base = (resource_size_t)bo->paddr; 249 drm->mode_config.fb_base = (resource_size_t)bo->paddr;
250 info->screen_base = bo->vaddr + offset; 250 info->screen_base = (void __iomem *)bo->vaddr + offset;
251 info->screen_size = size; 251 info->screen_size = size;
252 info->fix.smem_start = (unsigned long)(bo->paddr + offset); 252 info->fix.smem_start = (unsigned long)(bo->paddr + offset);
253 info->fix.smem_len = size; 253 info->fix.smem_len = size;
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index ba47ca4fb880..3b29018913a5 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -14,6 +14,8 @@
14 14
15struct tegra_rgb { 15struct tegra_rgb {
16 struct tegra_output output; 16 struct tegra_output output;
17 struct tegra_dc *dc;
18
17 struct clk *clk_parent; 19 struct clk *clk_parent;
18 struct clk *clk; 20 struct clk *clk;
19}; 21};
@@ -84,18 +86,18 @@ static void tegra_dc_write_regs(struct tegra_dc *dc,
84 86
85static int tegra_output_rgb_enable(struct tegra_output *output) 87static int tegra_output_rgb_enable(struct tegra_output *output)
86{ 88{
87 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); 89 struct tegra_rgb *rgb = to_rgb(output);
88 90
89 tegra_dc_write_regs(dc, rgb_enable, ARRAY_SIZE(rgb_enable)); 91 tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable));
90 92
91 return 0; 93 return 0;
92} 94}
93 95
94static int tegra_output_rgb_disable(struct tegra_output *output) 96static int tegra_output_rgb_disable(struct tegra_output *output)
95{ 97{
96 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); 98 struct tegra_rgb *rgb = to_rgb(output);
97 99
98 tegra_dc_write_regs(dc, rgb_disable, ARRAY_SIZE(rgb_disable)); 100 tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
99 101
100 return 0; 102 return 0;
101} 103}
@@ -146,6 +148,7 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
146 148
147 rgb->output.dev = dc->dev; 149 rgb->output.dev = dc->dev;
148 rgb->output.of_node = np; 150 rgb->output.of_node = np;
151 rgb->dc = dc;
149 152
150 err = tegra_output_probe(&rgb->output); 153 err = tegra_output_probe(&rgb->output);
151 if (err < 0) 154 if (err < 0)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 15b86a94949d..406152152315 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -353,7 +353,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
353 * Don't move nonexistent data. Clear destination instead. 353 * Don't move nonexistent data. Clear destination instead.
354 */ 354 */
355 if (old_iomap == NULL && 355 if (old_iomap == NULL &&
356 (ttm == NULL || ttm->state == tt_unpopulated)) { 356 (ttm == NULL || (ttm->state == tt_unpopulated &&
357 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
357 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); 358 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
358 goto out2; 359 goto out2;
359 } 360 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index b249ab9b1eb2..6440eeac22d2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -169,9 +169,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
169 } 169 }
170 170
171 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + 171 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
172 drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; 172 vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
173 page_last = vma_pages(vma) + 173 page_last = vma_pages(vma) + vma->vm_pgoff -
174 drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; 174 drm_vma_node_start(&bo->vma_node);
175 175
176 if (unlikely(page_offset >= bo->num_pages)) { 176 if (unlikely(page_offset >= bo->num_pages)) {
177 retval = VM_FAULT_SIGBUS; 177 retval = VM_FAULT_SIGBUS;
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 24ffbe990736..8d67b943ac05 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -125,6 +125,12 @@ static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
125 125
126static void udl_gem_put_pages(struct udl_gem_object *obj) 126static void udl_gem_put_pages(struct udl_gem_object *obj)
127{ 127{
128 if (obj->base.import_attach) {
129 drm_free_large(obj->pages);
130 obj->pages = NULL;
131 return;
132 }
133
128 drm_gem_put_pages(&obj->base, obj->pages, false, false); 134 drm_gem_put_pages(&obj->base, obj->pages, false, false);
129 obj->pages = NULL; 135 obj->pages = NULL;
130} 136}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 7776e6f0aef6..0489c6152482 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -150,6 +150,8 @@ struct vmw_ttm_tt {
150 bool mapped; 150 bool mapped;
151}; 151};
152 152
153const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
154
153/** 155/**
154 * Helper functions to advance a struct vmw_piter iterator. 156 * Helper functions to advance a struct vmw_piter iterator.
155 * 157 *
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index db85985c7086..20890ad8408b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -615,6 +615,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
615 * TTM buffer object driver - vmwgfx_buffer.c 615 * TTM buffer object driver - vmwgfx_buffer.c
616 */ 616 */
617 617
618extern const size_t vmw_tt_size;
618extern struct ttm_placement vmw_vram_placement; 619extern struct ttm_placement vmw_vram_placement;
619extern struct ttm_placement vmw_vram_ne_placement; 620extern struct ttm_placement vmw_vram_ne_placement;
620extern struct ttm_placement vmw_vram_sys_placement; 621extern struct ttm_placement vmw_vram_sys_placement;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index a51f48e3e917..45d5b5ab6ca9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -68,6 +68,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
68 SVGA_FIFO_3D_HWVERSION)); 68 SVGA_FIFO_3D_HWVERSION));
69 break; 69 break;
70 } 70 }
71 case DRM_VMW_PARAM_MAX_SURF_MEMORY:
72 param->value = dev_priv->memory_size;
73 break;
71 default: 74 default:
72 DRM_ERROR("Illegal vmwgfx get param request: %d\n", 75 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
73 param->param); 76 param->param);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ecb3d867b426..03f1c2038631 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -75,6 +75,7 @@ void vmw_display_unit_cleanup(struct vmw_display_unit *du)
75 vmw_surface_unreference(&du->cursor_surface); 75 vmw_surface_unreference(&du->cursor_surface);
76 if (du->cursor_dmabuf) 76 if (du->cursor_dmabuf)
77 vmw_dmabuf_unreference(&du->cursor_dmabuf); 77 vmw_dmabuf_unreference(&du->cursor_dmabuf);
78 drm_sysfs_connector_remove(&du->connector);
78 drm_crtc_cleanup(&du->crtc); 79 drm_crtc_cleanup(&du->crtc);
79 drm_encoder_cleanup(&du->encoder); 80 drm_encoder_cleanup(&du->encoder);
80 drm_connector_cleanup(&du->connector); 81 drm_connector_cleanup(&du->connector);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 79f7e8e60529..a055a26819c2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -260,6 +260,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
260 connector->encoder = NULL; 260 connector->encoder = NULL;
261 encoder->crtc = NULL; 261 encoder->crtc = NULL;
262 crtc->fb = NULL; 262 crtc->fb = NULL;
263 crtc->enabled = false;
263 264
264 vmw_ldu_del_active(dev_priv, ldu); 265 vmw_ldu_del_active(dev_priv, ldu);
265 266
@@ -285,6 +286,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
285 crtc->x = set->x; 286 crtc->x = set->x;
286 crtc->y = set->y; 287 crtc->y = set->y;
287 crtc->mode = *mode; 288 crtc->mode = *mode;
289 crtc->enabled = true;
288 290
289 vmw_ldu_add_active(dev_priv, ldu, vfb); 291 vmw_ldu_add_active(dev_priv, ldu, vfb);
290 292
@@ -369,6 +371,8 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
369 encoder->possible_crtcs = (1 << unit); 371 encoder->possible_crtcs = (1 << unit);
370 encoder->possible_clones = 0; 372 encoder->possible_clones = 0;
371 373
374 (void) drm_sysfs_connector_add(connector);
375
372 drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); 376 drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
373 377
374 drm_mode_crtc_set_gamma_size(crtc, 256); 378 drm_mode_crtc_set_gamma_size(crtc, 256);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index efe2b74c5eb1..9b5ea2ac7ddf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -352,6 +352,38 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
352/** 352/**
353 * Buffer management. 353 * Buffer management.
354 */ 354 */
355
356/**
357 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
358 *
359 * @dev_priv: Pointer to a struct vmw_private identifying the device.
360 * @size: The requested buffer size.
361 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
362 */
363static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
364 bool user)
365{
366 static size_t struct_size, user_struct_size;
367 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
368 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
369
370 if (unlikely(struct_size == 0)) {
371 size_t backend_size = ttm_round_pot(vmw_tt_size);
372
373 struct_size = backend_size +
374 ttm_round_pot(sizeof(struct vmw_dma_buffer));
375 user_struct_size = backend_size +
376 ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
377 }
378
379 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
380 page_array_size +=
381 ttm_round_pot(num_pages * sizeof(dma_addr_t));
382
383 return ((user) ? user_struct_size : struct_size) +
384 page_array_size;
385}
386
355void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) 387void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
356{ 388{
357 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 389 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
@@ -359,6 +391,13 @@ void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
359 kfree(vmw_bo); 391 kfree(vmw_bo);
360} 392}
361 393
394static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
395{
396 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
397
398 ttm_prime_object_kfree(vmw_user_bo, prime);
399}
400
362int vmw_dmabuf_init(struct vmw_private *dev_priv, 401int vmw_dmabuf_init(struct vmw_private *dev_priv,
363 struct vmw_dma_buffer *vmw_bo, 402 struct vmw_dma_buffer *vmw_bo,
364 size_t size, struct ttm_placement *placement, 403 size_t size, struct ttm_placement *placement,
@@ -368,28 +407,23 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
368 struct ttm_bo_device *bdev = &dev_priv->bdev; 407 struct ttm_bo_device *bdev = &dev_priv->bdev;
369 size_t acc_size; 408 size_t acc_size;
370 int ret; 409 int ret;
410 bool user = (bo_free == &vmw_user_dmabuf_destroy);
371 411
372 BUG_ON(!bo_free); 412 BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
373 413
374 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer)); 414 acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
375 memset(vmw_bo, 0, sizeof(*vmw_bo)); 415 memset(vmw_bo, 0, sizeof(*vmw_bo));
376 416
377 INIT_LIST_HEAD(&vmw_bo->res_list); 417 INIT_LIST_HEAD(&vmw_bo->res_list);
378 418
379 ret = ttm_bo_init(bdev, &vmw_bo->base, size, 419 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
380 ttm_bo_type_device, placement, 420 (user) ? ttm_bo_type_device :
421 ttm_bo_type_kernel, placement,
381 0, interruptible, 422 0, interruptible,
382 NULL, acc_size, NULL, bo_free); 423 NULL, acc_size, NULL, bo_free);
383 return ret; 424 return ret;
384} 425}
385 426
386static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
387{
388 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
389
390 ttm_prime_object_kfree(vmw_user_bo, prime);
391}
392
393static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) 427static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
394{ 428{
395 struct vmw_user_dma_buffer *vmw_user_bo; 429 struct vmw_user_dma_buffer *vmw_user_bo;
@@ -781,54 +815,55 @@ err_ref:
781} 815}
782 816
783 817
818/**
819 * vmw_dumb_create - Create a dumb kms buffer
820 *
821 * @file_priv: Pointer to a struct drm_file identifying the caller.
822 * @dev: Pointer to the drm device.
823 * @args: Pointer to a struct drm_mode_create_dumb structure
824 *
825 * This is a driver callback for the core drm create_dumb functionality.
826 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
827 * that the arguments have a different format.
828 */
784int vmw_dumb_create(struct drm_file *file_priv, 829int vmw_dumb_create(struct drm_file *file_priv,
785 struct drm_device *dev, 830 struct drm_device *dev,
786 struct drm_mode_create_dumb *args) 831 struct drm_mode_create_dumb *args)
787{ 832{
788 struct vmw_private *dev_priv = vmw_priv(dev); 833 struct vmw_private *dev_priv = vmw_priv(dev);
789 struct vmw_master *vmaster = vmw_master(file_priv->master); 834 struct vmw_master *vmaster = vmw_master(file_priv->master);
790 struct vmw_user_dma_buffer *vmw_user_bo; 835 struct vmw_dma_buffer *dma_buf;
791 struct ttm_buffer_object *tmp;
792 int ret; 836 int ret;
793 837
794 args->pitch = args->width * ((args->bpp + 7) / 8); 838 args->pitch = args->width * ((args->bpp + 7) / 8);
795 args->size = args->pitch * args->height; 839 args->size = args->pitch * args->height;
796 840
797 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
798 if (vmw_user_bo == NULL)
799 return -ENOMEM;
800
801 ret = ttm_read_lock(&vmaster->lock, true); 841 ret = ttm_read_lock(&vmaster->lock, true);
802 if (ret != 0) { 842 if (unlikely(ret != 0))
803 kfree(vmw_user_bo);
804 return ret; 843 return ret;
805 }
806 844
807 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size, 845 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
808 &vmw_vram_sys_placement, true, 846 args->size, false, &args->handle,
809 &vmw_user_dmabuf_destroy); 847 &dma_buf);
810 if (ret != 0)
811 goto out_no_dmabuf;
812
813 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
814 ret = ttm_prime_object_init(vmw_fpriv(file_priv)->tfile,
815 args->size,
816 &vmw_user_bo->prime,
817 false,
818 ttm_buffer_type,
819 &vmw_user_dmabuf_release, NULL);
820 if (unlikely(ret != 0)) 848 if (unlikely(ret != 0))
821 goto out_no_base_object; 849 goto out_no_dmabuf;
822
823 args->handle = vmw_user_bo->prime.base.hash.key;
824 850
825out_no_base_object: 851 vmw_dmabuf_unreference(&dma_buf);
826 ttm_bo_unref(&tmp);
827out_no_dmabuf: 852out_no_dmabuf:
828 ttm_read_unlock(&vmaster->lock); 853 ttm_read_unlock(&vmaster->lock);
829 return ret; 854 return ret;
830} 855}
831 856
857/**
858 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
859 *
860 * @file_priv: Pointer to a struct drm_file identifying the caller.
861 * @dev: Pointer to the drm device.
862 * @handle: Handle identifying the dumb buffer.
863 * @offset: The address space offset returned.
864 *
865 * This is a driver callback for the core drm dumb_map_offset functionality.
866 */
832int vmw_dumb_map_offset(struct drm_file *file_priv, 867int vmw_dumb_map_offset(struct drm_file *file_priv,
833 struct drm_device *dev, uint32_t handle, 868 struct drm_device *dev, uint32_t handle,
834 uint64_t *offset) 869 uint64_t *offset)
@@ -846,6 +881,15 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
846 return 0; 881 return 0;
847} 882}
848 883
884/**
885 * vmw_dumb_destroy - Destroy a dumb boffer
886 *
887 * @file_priv: Pointer to a struct drm_file identifying the caller.
888 * @dev: Pointer to the drm device.
889 * @handle: Handle identifying the dumb buffer.
890 *
891 * This is a driver callback for the core drm dumb_destroy functionality.
892 */
849int vmw_dumb_destroy(struct drm_file *file_priv, 893int vmw_dumb_destroy(struct drm_file *file_priv,
850 struct drm_device *dev, 894 struct drm_device *dev,
851 uint32_t handle) 895 uint32_t handle)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 26387c3d5a21..22406c8651ea 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -310,6 +310,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
310 crtc->fb = NULL; 310 crtc->fb = NULL;
311 crtc->x = 0; 311 crtc->x = 0;
312 crtc->y = 0; 312 crtc->y = 0;
313 crtc->enabled = false;
313 314
314 vmw_sou_del_active(dev_priv, sou); 315 vmw_sou_del_active(dev_priv, sou);
315 316
@@ -370,6 +371,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
370 crtc->fb = NULL; 371 crtc->fb = NULL;
371 crtc->x = 0; 372 crtc->x = 0;
372 crtc->y = 0; 373 crtc->y = 0;
374 crtc->enabled = false;
373 375
374 return ret; 376 return ret;
375 } 377 }
@@ -382,6 +384,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
382 crtc->fb = fb; 384 crtc->fb = fb;
383 crtc->x = set->x; 385 crtc->x = set->x;
384 crtc->y = set->y; 386 crtc->y = set->y;
387 crtc->enabled = true;
385 388
386 return 0; 389 return 0;
387} 390}
@@ -464,6 +467,8 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
464 encoder->possible_crtcs = (1 << unit); 467 encoder->possible_crtcs = (1 << unit);
465 encoder->possible_clones = 0; 468 encoder->possible_clones = 0;
466 469
470 (void) drm_sysfs_connector_add(connector);
471
467 drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs); 472 drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
468 473
469 drm_mode_crtc_set_gamma_size(crtc, 256); 474 drm_mode_crtc_set_gamma_size(crtc, 256);